Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
28,000
async def start(self): for track, task in self.__tracks.items(): if task is None: self.__tracks[track] = asyncio.ensure_future(blackhole_consume(track))
Start discarding media.
28,001
def per_section(it, is_delimiter=lambda x: x.isspace()): ret = [] for line in it: if is_delimiter(line): if ret: yield ret ret = [] else: ret.append(line.rstrip()) if ret: yield ret
From http://stackoverflow.com/a/25226944/610569
28,002
def abfGroupFiles(groups,folder): assert os.path.exists(folder) files=os.listdir(folder) group2={} for parent in groups.keys(): if not parent in group2.keys(): group2[parent]=[] for ID in groups[parent]: for fname in [x.lower() for x in files if ID in x.lower()]: group2[parent].extend([fname]) return group2
when given a dictionary where every key contains a list of IDs, replace the keys with the list of files matching those IDs. This is how you get a list of files belonging to each child for each parent.
28,003
def inc(self, name, value=1): clone = self._clone() clone._qsl = [(q, v) if q != name else (q, int(v) + value) for (q, v) in self._qsl] if name not in dict(clone._qsl).keys(): clone._qsl.append((name, value)) return clone
Increment value
28,004
def start_fitting(self): self.queue = queue.Queue() self.peak_vals = [] self.fit_thread = QThread() self.fitobj = self.do_fit(str(self.data_filepath.text()), self.matplotlibwidget, self.queue, self.peak_vals, self.peak_locs) self.fitobj.moveToThread(self.fit_thread) self.fit_thread.started.connect(self.fitobj.run) self.fitobj.finished.connect(self.fit_thread.quit) self.fitobj.status.connect(self.update_status) self.fit_thread.start()
Launches the fitting routine on another thread
28,005
def set_transfer_spec(self): _ret = False try: self._args.transfer_spec_func(self._args) _ret = True except Exception as ex: self.notify_exception(AsperaTransferSpecError(ex), False) return _ret
run the function to set the transfer spec on error set associated exception
28,006
def save_script_file_for_state_and_source_path(state, state_path_full, as_copy=False): from rafcon.core.states.execution_state import ExecutionState if isinstance(state, ExecutionState): source_script_file = os.path.join(state.script.path, state.script.filename) destination_script_file = os.path.join(state_path_full, SCRIPT_FILE) try: write_file(destination_script_file, state.script_text) except Exception: logger.exception("Storing of script file failed: {0} -> {1}".format(state.get_path(), destination_script_file)) raise if not source_script_file == destination_script_file and not as_copy: state.script.filename = SCRIPT_FILE state.script.path = state_path_full
Saves the script file for a state to the directory of the state. The script name will be set to the SCRIPT_FILE constant. :param state: The state of which the script file should be saved :param str state_path_full: The path to the file system storage location of the state :param bool as_copy: Temporary storage flag to signal that the given path is not the new file_system_path
28,007
def quarantineWorker(self, *args, **kwargs): return self._makeApiCall(self.funcinfo["quarantineWorker"], *args, **kwargs)
Quarantine a worker Quarantine a worker This method takes input: ``v1/quarantine-worker-request.json#`` This method gives output: ``v1/worker-response.json#`` This method is ``experimental``
28,008
def get(self, key, default=None, reraise=False): if not self.enabled: if reraise: raise exceptions.DisabledCache() return default try: return self._get(key) except exceptions.NotInCache: if reraise: raise return default
Get the given key from the cache, if present. A default value can be provided in case the requested key is not present, otherwise, None will be returned. :param key: the key to query :type key: str :param default: the value to return if the key does not exist in cache :param reraise: wether an exception should be thrown if now value is found, defaults to False. :type key: bool Example usage: .. code-block:: python cache.set('my_key', 'my_value') cache.get('my_key') >>> 'my_value' cache.get('not_present', 'default_value') >>> 'default_value' cache.get('not_present', reraise=True) >>> raise lifter.exceptions.NotInCache
28,009
def onSave(self, grid): if self.drop_down_menu: self.drop_down_menu.clean_up() self.grid_builder.save_grid_data() style=wx.OK | wx.ICON_INFORMATION)
Save grid data in the data object
28,010
def upload_path(instance, filename): filename = filename.replace(" ", "_") filename = unicodedata.normalize(, filename).lower() return os.path.join(str(timezone.now().date().isoformat()), filename)
Sanitize the user-provided file name, add timestamp for uniqness.
28,011
def filter_dict(self, query, **kwargs): for name, value in query.items(): field = name.split(".")[0] try: getattr(self.type, field) except AttributeError: raise FieldNotFoundException("Field not found %s" % (field)) self.query_bypass(query, raw_output=False, **kwargs) return self
Filter for :func:`~ommongo.fields.mapping.DictField`. **Examples**: ``query.filter_dict({"User.Fullname": "Oji"})``
28,012
def project_remove_folder(object_id, input_params={}, always_retry=False, **kwargs): return DXHTTPRequest( % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /project-xxxx/removeFolder API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Folders-and-Deletion#API-method%3A-%2Fclass-xxxx%2FremoveFolder
28,013
def positionIf(pred, seq): for i,e in enumerate(seq): if pred(e): return i return -1
>>> positionIf(lambda x: x > 3, range(10)) 4
28,014
def filter_by_domain(self, domain): query = self._copy() query.domain = domain return query
Apply the given domain to a copy of this query
28,015
def vmach2cas(M, h): tas = vmach2tas(M, h) cas = vtas2cas(tas, h) return cas
Mach to CAS conversion
28,016
def _theorem6p4(): pruning_set4 = list() def _prune4(edges_b): for edges_a in pruning_set4: if edges_a.issubset(edges_b): return True return False def _explored4(edges_a): pruning_set4.append(edges_a) return _prune4, _explored4
See Theorem 6.4 in paper. Let E(x) denote the edges added when eliminating x. (edges_x below). Prunes (s,b) when (s,a) is explored and E(a) is a subset of E(b). For this theorem we only record E(a) rather than (s,E(a)) because we only need to check for pruning in the same s context (i.e the same level of recursion).
28,017
def hide_me(tb, g=globals()): base_tb = tb try: while tb and tb.tb_frame.f_globals is not g: tb = tb.tb_next while tb and tb.tb_frame.f_globals is g: tb = tb.tb_next except Exception as e: logging.exception(e) tb = base_tb if not tb: tb = base_tb return tb
Hide stack traceback of given stack
28,018
async def purgeRequests(self, *args, **kwargs): return await self._makeApiCall(self.funcinfo["purgeRequests"], *args, **kwargs)
Open Purge Requests for a provisionerId/workerType pair List of caches that need to be purged if they are from before a certain time. This is safe to be used in automation from workers. This method gives output: ``v1/purge-cache-request-list.json#`` This method is ``stable``
28,019
def write(self, vals): reservation_line_obj = self.env[] room_obj = self.env[] prod_id = vals.get() or self.product_id.id chkin = vals.get() or self.checkin_date chkout = vals.get() or self.checkout_date is_reserved = self.is_reserved if prod_id and is_reserved: prod_domain = [(, , prod_id)] prod_room = room_obj.search(prod_domain, limit=1) if (self.product_id and self.checkin_date and self.checkout_date): old_prd_domain = [(, , self.product_id.id)] old_prod_room = room_obj.search(old_prd_domain, limit=1) if prod_room and old_prod_room: srch_rmline = [(, , old_prod_room.id), (, , self.checkin_date), (, , self.checkout_date), ] rm_lines = reservation_line_obj.search(srch_rmline) if rm_lines: rm_line_vals = {: prod_room.id, : chkin, : chkout} rm_lines.write(rm_line_vals) return super(HotelFolioLineExt, self).write(vals)
Overrides orm write method. @param self: The object pointer @param vals: dictionary of fields value. Update Hotel Room Reservation line history
28,020
def get_policy_configurations(self, project, repository_id=None, ref_name=None, policy_type=None, top=None, continuation_token=None): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) query_parameters = {} if repository_id is not None: query_parameters[] = self._serialize.query(, repository_id, ) if ref_name is not None: query_parameters[] = self._serialize.query(, ref_name, ) if policy_type is not None: query_parameters[] = self._serialize.query(, policy_type, ) if top is not None: query_parameters[] = self._serialize.query(, top, ) if continuation_token is not None: query_parameters[] = self._serialize.query(, continuation_token, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters) response_object = models.GitPolicyConfigurationResponse() response_object.policy_configurations = self._deserialize(, self._unwrap_collection(response)) response_object.continuation_token = response.headers.get() return response_object
GetPolicyConfigurations. [Preview API] Retrieve a list of policy configurations by a given set of scope/filtering criteria. :param str project: Project ID or project name :param str repository_id: The repository id. :param str ref_name: The fully-qualified Git ref name (e.g. refs/heads/master). :param str policy_type: The policy type filter. :param int top: Maximum number of policies to return. :param str continuation_token: Pass a policy configuration ID to fetch the next page of results, up to top number of results, for this endpoint. :rtype: :class:`<GitPolicyConfigurationResponse> <azure.devops.v5_1.git.models.GitPolicyConfigurationResponse>`
28,021
def unicode_key(key): if not isinstance(key, (text_type, binary_type)): from mo_logs import Log Log.error("{{key|quote}} is not a valid key", key=key) return quote(text_type(key))
CONVERT PROPERTY VALUE TO QUOTED NAME OF SAME
28,022
def process_python(self, path): (pylint_stdout, pylint_stderr) = epylint.py_run( .join([str(path)] + self.pylint_opts), return_std=True) emap = {} print(pylint_stderr.read()) for line in pylint_stdout: sys.stderr.write(line) key = line.split()[-1].split()[0].strip() if key not in self.pylint_cats: continue if key not in emap: emap[key] = 1 else: emap[key] += 1 sys.stderr.write() self.python_map[str(path)] = emap
Process a python file.
28,023
def scan(self, table, scan_filter=None, attributes_to_get=None, request_limit=None, max_results=None, count=False, exclusive_start_key=None, item_class=Item): sf = self.dynamize_scan_filter(scan_filter) response = True n = 0 while response: if response is True: pass elif response.has_key("LastEvaluatedKey"): exclusive_start_key = response[] else: break response = self.layer1.scan(table.name, sf, attributes_to_get,request_limit, count, exclusive_start_key, object_hook=item_object_hook) if response: for item in response[]: if max_results and n == max_results: break yield item_class(table, attrs=item) n += 1
Perform a scan of DynamoDB. :type table: :class:`boto.dynamodb.table.Table` :param table: The Table object that is being scanned. :type scan_filter: A list of tuples :param scan_filter: A list of tuples where each tuple consists of an attribute name, a comparison operator, and either a scalar or tuple consisting of the values to compare the attribute to. Valid comparison operators are shown below along with the expected number of values that should be supplied. * EQ - equal (1) * NE - not equal (1) * LE - less than or equal (1) * LT - less than (1) * GE - greater than or equal (1) * GT - greater than (1) * NOT_NULL - attribute exists (0, use None) * NULL - attribute does not exist (0, use None) * CONTAINS - substring or value in list (1) * NOT_CONTAINS - absence of substring or value in list (1) * BEGINS_WITH - substring prefix (1) * IN - exact match in list (N) * BETWEEN - >= first value, <= second value (2) :type attributes_to_get: list :param attributes_to_get: A list of attribute names. If supplied, only the specified attribute names will be returned. Otherwise, all attributes will be returned. :type request_limit: int :param request_limit: The maximum number of items to retrieve from Amazon DynamoDB on each request. You may want to set a specific request_limit based on the provisioned throughput of your table. The default behavior is to retrieve as many results as possible per request. :type max_results: int :param max_results: The maximum number of results that will be retrieved from Amazon DynamoDB in total. For example, if you only wanted to see the first 100 results from the query, regardless of how many were actually available, you could set max_results to 100 and the generator returned from the query method will only yeild 100 results max. :type count: bool :param count: If True, Amazon DynamoDB returns a total number of items for the Scan operation, even if the operation has no matching items for the assigned filter. :type exclusive_start_key: list or tuple :param exclusive_start_key: Primary key of the item from which to continue an earlier query. This would be provided as the LastEvaluatedKey in that query. :type item_class: Class :param item_class: Allows you to override the class used to generate the items. This should be a subclass of :class:`boto.dynamodb.item.Item` :rtype: generator
28,024
def fillPelicanHole(site, username, password, tstat_name, start_time, end_time): start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name) if heat_needs_fan is None: return None "override": block.find("setBy").text != "Schedule", "fan": fanState, "mode": _mode_name_mappings[block.find("system").text], "state": _state_mappings.get(runStatus, 0), "time": timestamp, }) df = pd.DataFrame(output_rows) df.drop_duplicates(subset="time", keep="first", inplace=True) return df
Fill a hole in a Pelican thermostat's data stream. Arguments: site -- The thermostat's Pelican site name username -- The Pelican username for the site password -- The Pelican password for the site tstat_name -- The name of the thermostat, as identified by Pelican start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00" end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00" Returns: A Pandas dataframe with historical Pelican data that falls between the specified start and end times. Note that this function assumes the Pelican thermostat's local time zone is US/Pacific. It will properly handle PST vs. PDT.
28,025
def ndarray_to_list_in_structure(item, squeeze=True): tp = type(item) if tp == np.ndarray: if squeeze: item = item.squeeze() item = item.tolist() elif tp == list: for i in range(len(item)): item[i] = ndarray_to_list_in_structure(item[i]) elif tp == dict: for lab in item: item[lab] = ndarray_to_list_in_structure(item[lab]) return item
Change ndarray in structure of lists and dicts into lists.
28,026
def discard(self, key): if key in self: i = self.map[key] del self.items[i] del self.map[key] for k, v in self.map.items(): if v >= i: self.map[k] = v - 1
Remove an element. Do not raise an exception if absent. The MutableSet mixin uses this to implement the .remove() method, which *does* raise an error when asked to remove a non-existent item.
28,027
def BFS_Tree(G, start): if start not in G.vertices: raise GraphInsertError("Vertex %s doesn't exist." % (start,)) pred = BFS(G, start) T = digraph.DiGraph() queue = Queue() queue.put(start) while queue.qsize() > 0: current = queue.get() for element in pred: if pred[element] == current: T.add_edge(current, element) queue.put(element) return T
Return an oriented tree constructed from bfs starting at 'start'.
28,028
def get_protocols(self, device): return self._reg.device_builder(device, self._rv).protocols
Returns a list of available protocols for the specified device.
28,029
def by_player(self): return any([k.player and self.name != k.name for k in self.killers])
:class:`bool`: Whether the kill involves other characters.
28,030
def download_metadata_cli(master_token, output_csv, verbose=False, debug=False): return download_metadata(master_token, output_csv, verbose, debug)
Command line function for downloading metadata. For more information visit :func:`download_metadata<ohapi.command_line.download_metadata>`.
28,031
def guess_service_info_from_path(spec_path): spec_path = spec_path.lower() spec_path = spec_path[spec_path.index("specification"):] split_spec_path = spec_path.split("/") rp_name = split_spec_path[1] is_arm = split_spec_path[2] == "resource-manager" return { "rp_name": rp_name, "is_arm": is_arm }
Guess Python Autorest options based on the spec path. Expected path: specification/compute/resource-manager/readme.md
28,032
def get_bounds(tune_params): bounds = [] for values in tune_params.values(): sorted_values = numpy.sort(values) bounds.append((sorted_values[0], sorted_values[-1])) return bounds
create a bounds array from the tunable parameters
28,033
def itemat(iterable, index): result = None handleindex = True if isinstance(iterable, dict): handleindex = False else: try: result = iterable[index] except TypeError: handleindex = False if not handleindex: iterator = iter(iterable) if index < 0: index += len(iterable) while index >= 0: try: value = next(iterator) except StopIteration: raise IndexError( "{0} index {1} out of range".format( iterable.__class__, index ) ) else: if index == 0: result = value break index -= 1 return result
Try to get the item at index position in iterable after iterate on iterable items. :param iterable: object which provides the method __getitem__ or __iter__. :param int index: item position to get.
28,034
def get_mode(path, follow_symlinks=True): * return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(, )
Return the mode of a file path file or directory of which to get the mode follow_symlinks indicated if symlinks should be followed CLI Example: .. code-block:: bash salt '*' file.get_mode /etc/passwd .. versionchanged:: 2014.1.0 ``follow_symlinks`` option added
28,035
def get_node_type(self, node, parent=None): if isinstance(node, CompositeDocument): return elif isinstance(node, (Document, DocumentPointer)): return elif isinstance(node, Binder) and parent is None: return for child in node: if isinstance(child, TranslucentBinder): return return
If node is a document, the type is page. If node is a binder with no parent, the type is book. If node is a translucent binder, the type is either chapters (only contain pages) or unit (contains at least one translucent binder).
28,036
def _post_filter(search, urlkwargs, definitions): filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions) for filter_ in filters: search = search.post_filter(filter_) return (search, urlkwargs)
Ingest post filter in query.
28,037
def as_dict(self): d = {k:v for (k,v) in self.__dict__.items()} return d
Return the URI object as a dictionary
28,038
def get_random_mass(numPoints, massRangeParams): if massRangeParams.remnant_mass_threshold is None: mass1, mass2, spin1z, spin2z = \ get_random_mass_point_particles(numPoints, massRangeParams) else: _, max_ns_g_mass = load_ns_sequence(massRangeParams.ns_eos) if not os.path.isfile(): logging.info() generate_em_constraint_data(massRangeParams.minMass2, massRangeParams.maxMass2, massRangeParams.delta_ns_mass, \ -1.0, massRangeParams.maxBHSpinMag, massRangeParams.delta_bh_spin, \ massRangeParams.ns_eos, massRangeParams.remnant_mass_threshold, 0.0) constraint_datafile = numpy.load() mNS_pts = constraint_datafile[] bh_spin_z_pts = constraint_datafile[] eta_mins = constraint_datafile[] mass1_out = [] mass2_out = [] spin1z_out = [] spin2z_out = [] numPointsFound = 0 while numPointsFound < numPoints: mass1, mass2, spin1z, spin2z = \ get_random_mass_point_particles(numPoints-numPointsFound, massRangeParams) _, eta = pnutils.mass1_mass2_to_mtotal_eta(mass1, mass2) mask = numpy.ones(len(mass1), dtype=bool) min_eta_em = min_eta_for_em_bright(spin1z, mass2, mNS_pts, bh_spin_z_pts, eta_mins) mask[(mass1 >= massRangeParams.ns_bh_boundary_mass) & (mass2 <= max_ns_g_mass) & (eta < min_eta_em)] = False mass1_out = numpy.concatenate((mass1_out, mass1[mask])) mass2_out = numpy.concatenate((mass2_out, mass2[mask])) spin1z_out = numpy.concatenate((spin1z_out,spin1z[mask])) spin2z_out = numpy.concatenate((spin2z_out,spin2z[mask])) numPointsFound = len(mass1_out) mass1 = mass1_out mass2 = mass2_out spin1z = spin1z_out spin2z = spin2z_out return mass1, mass2, spin1z, spin2z
This function will generate a large set of points within the chosen mass and spin space, and with the desired minimum remnant disk mass (this applies to NS-BH systems only). It will also return the corresponding PN spin coefficients for ease of use later (though these may be removed at some future point). Parameters ---------- numPoints : int Number of systems to simulate massRangeParams : massRangeParameters instance Instance holding all the details of mass ranges and spin ranges. Returns -------- mass1 : float Mass of heavier body. mass2 : float Mass of lighter body. spin1z : float Spin of body 1. spin2z : float Spin of body 2.
28,039
def log_pdf(self, y, mu, weights=None): if weights is None: weights = np.ones_like(mu) n = self.levels p = mu / self.levels return sp.stats.binom.logpmf(y, n, p)
computes the log of the pdf or pmf of the values under the current distribution Parameters ---------- y : array-like of length n target values mu : array-like of length n expected values weights : array-like shape (n,) or None, default: None sample weights if None, defaults to array of ones Returns ------- pdf/pmf : np.array of length n
28,040
def is_finalized(self): return self.state == self.STATES.FINALIZED or self.state == self.STATES.INSTALLED
Return True if the bundle is installed.
28,041
def stream_command_dicts(commands, parallel=False): if parallel is True: threads = [] for command in commands: target = lambda: stream_command(**command) thread = Thread(target=target) thread.start() threads.append(thread) for t in threads: t.join() else: for command in commands: stream_command(**command)
Takes a list of dictionaries with keys corresponding to ``stream_command`` arguments, and runs all concurrently. :param commands: A list of dictionaries, the keys of which should line up with the arguments to ``stream_command`` function. :type commands: ``list`` of ``dict`` :param parallel: If true, commands will be run in parallel. :type parallel: ``bool``
28,042
def _machine_actions(self, action): payload = { : action } data = json.dumps(payload) req = self.request(self.mist_client.uri++self.cloud.id++self.id, data=data) req.post() self.cloud.update_machines()
Actions for the machine (e.g. stop, start etc) :param action: Can be "reboot", "start", "stop", "destroy" :returns: An updated list of the added machines
28,043
def set_options(self, **kwargs): p = Unskin(self.options) p.update(kwargs)
Set options. @param kwargs: keyword arguments. @see: L{Options}
28,044
def set_public_transport_route(self, public_transport_route): self._query_params += str(QueryParam.ROUTE_ID) + str(public_transport_route)
Set the public transport route. :param public_transport_route: TransportRoute
28,045
def get_app_instance(request): app = None if getattr(request, , None) and request.current_page.application_urls: app = apphook_pool.get_apphook(request.current_page.application_urls) if app and app.app_config: try: config = None with override(get_language_from_request(request, check_path=True)): namespace = resolve(request.path_info).namespace config = app.get_config(namespace) return namespace, config except Resolver404: pass return , None
Returns a tuple containing the current namespace and the AppHookConfig instance :param request: request object :return: namespace, config
28,046
def process_increase_expression_amount(self): statements = [] pwcs = self.find_event_parent_with_event_child( , ) for pair in pwcs: pos_reg = pair[0] expression = pair[1] cause = self.get_entity_text_for_relation(pos_reg, ) target = self.get_entity_text_for_relation(expression, ) if cause is not None and target is not None: theme_node = self.get_related_node(expression, ) assert(theme_node is not None) evidence = self.node_to_evidence(theme_node, is_direct=False) statements.append(IncreaseAmount(s2a(cause), s2a(target), evidence=evidence)) return statements
Looks for Positive_Regulation events with a specified Cause and a Gene_Expression theme, and processes them into INDRA statements.
28,047
def id_pools_vsn_ranges(self): if not self.__id_pools_vsn_ranges: self.__id_pools_vsn_ranges = IdPoolsRanges(, self.__connection) return self.__id_pools_vsn_ranges
Gets the IdPoolsRanges API Client for VSN Ranges. Returns: IdPoolsRanges:
28,048
def plot_pdf(self, names=None, Nbest=5, lw=2): assert Nbest > 0 if Nbest > len(self.distributions): Nbest = len(self.distributions) if isinstance(names, list): for name in names: pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) elif names: pylab.plot(self.x, self.fitted_pdf[names], lw=lw, label=names) else: try: names = self.df_errors.sort_values( by="sumsquare_error").index[0:Nbest] except: names = self.df_errors.sort("sumsquare_error").index[0:Nbest] for name in names: if name in self.fitted_pdf.keys(): pylab.plot(self.x, self.fitted_pdf[name], lw=lw, label=name) else: print("%s was not fitted. no parameters available" % name) pylab.grid(True) pylab.legend()
Plots Probability density functions of the distributions :param str,list names: names can be a single distribution name, or a list of distribution names, or kept as None, in which case, the first Nbest distribution will be taken (default to best 5)
28,049
def update(self, dtrain, iteration, fobj=None): if not isinstance(dtrain, DMatrix): raise TypeError(.format(type(dtrain).__name__)) self._validate_features(dtrain) if fobj is None: _check_call(_LIB.XGBoosterUpdateOneIter(self.handle, iteration, dtrain.handle)) else: pred = self.predict(dtrain) grad, hess = fobj(pred, dtrain) self.boost(dtrain, grad, hess)
Update for one iteration, with objective function calculated internally. Parameters ---------- dtrain : DMatrix Training data. iteration : int Current iteration number. fobj : function Customized objective function.
28,050
def get_gene_count_tab(infile, bc_getter=None): gene = None counts = collections.Counter() for line in infile: values = line.strip().split("\t") assert len(values) == 2, "line: %s does not contain 2 columns" % line read_id, assigned_gene = values if assigned_gene != gene: if gene: yield gene, counts gene = assigned_gene counts = collections.defaultdict(collections.Counter) cell, umi = bc_getter(read_id) counts[cell][umi] += 1 yield gene, counts
Yields the counts per umi for each gene bc_getter: method to get umi (plus optionally, cell barcode) from read, e.g get_umi_read_id or get_umi_tag TODO: ADD FOLLOWING OPTION skip_regex: skip genes matching this regex. Useful to ignore unassigned reads (as per get_bundles class above)
28,051
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0): local_stream = BytearrayStream() if self._username: self._username.write(local_stream, kmip_version=kmip_version) else: raise ValueError( "Username/password credential struct missing the username." ) if self._password: self._password.write(local_stream, kmip_version=kmip_version) self.length = local_stream.length() super(UsernamePasswordCredential, self).write( output_stream, kmip_version=kmip_version ) output_stream.write(local_stream.buffer)
Write the data encoding the UsernamePasswordCredential struct to a stream. Args: output_stream (stream): A data stream in which to encode object data, supporting a write method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be encoded. Optional, defaults to KMIP 1.0. Raises: ValueError: Raised if the username is not defined.
28,052
def _stage_input_files(self, file_mapping, dry_run=True): if self._file_stage is None: return self._file_stage.copy_to_scratch(file_mapping, dry_run)
Stage the input files to the scratch area and adjust the arguments accordingly
28,053
def geometricmean(inlist): mult = 1.0 one_over_n = 1.0 / len(inlist) for item in inlist: mult = mult * pow(item, one_over_n) return mult
Calculates the geometric mean of the values in the passed list. That is: n-th root of (x1 * x2 * ... * xn). Assumes a '1D' list. Usage: lgeometricmean(inlist)
28,054
def validate_id(request): if in request: correct_id = isinstance( request[], (string_types, int, None), ) error = assert correct_id, error
Validate request id.
28,055
def tagExplicitly(self, superTag): if superTag.tagClass == tagClassUniversal: raise error.PyAsn1Error("Can't tag with UNIVERSAL class tag") if superTag.tagFormat != tagFormatConstructed: superTag = Tag(superTag.tagClass, tagFormatConstructed, superTag.tagId) return self + superTag
Return explicitly tagged *TagSet* Create a new *TagSet* representing callee *TagSet* explicitly tagged with passed tag(s). With explicit tagging mode, new tags are appended to existing tag(s). Parameters ---------- superTag: :class:`~pyasn1.type.tag.Tag` *Tag* object to tag this *TagSet* Returns ------- : :class:`~pyasn1.type.tag.TagSet` New *TagSet* object
28,056
def open_las(source, closefd=True): if isinstance(source, str): stream = open(source, mode="rb") if not closefd: raise ValueError("Cannot use closefd with filename") elif isinstance(source, bytes): stream = io.BytesIO(source) else: stream = source return LasReader(stream, closefd=closefd)
Opens and reads the header of the las content in the source >>> with open_las('pylastests/simple.las') as f: ... print(f.header.point_format_id) 3 >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f, closefd=False) as flas: ... print(flas.header) <LasHeader(1.2)> >>> f.closed False >>> f = open('pylastests/simple.las', mode='rb') >>> with open_las(f) as flas: ... las = flas.read() >>> f.closed True Parameters ---------- source : str or io.BytesIO if source is a str it must be a filename a stream if a file object with the methods read, seek, tell closefd: bool Whether the stream/file object shall be closed, this only work when using open_las in a with statement. An exception is raised if closefd is specified and the source is a filename Returns ------- pylas.lasreader.LasReader
28,057
def SingleModeCombine(pupils,modeDiameter=None): if modeDiameter is None: modeDiameter=0.9*pupils.shape[-1] amplitudes=FibreCouple(pupils,modeDiameter) cc=np.conj(amplitudes) fluxes=(amplitudes*cc).real coherentFluxes=[amplitudes[i]*cc[j] for i in range(1,len(amplitudes)) for j in range(i)] return fluxes,coherentFluxes
Return the instantaneous coherent fluxes and photometric fluxes for a multiway single-mode fibre combiner
28,058
def p_paramlist(self, p): p[0] = Paramlist(params=p[3], lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
paramlist : DELAY LPAREN params RPAREN
28,059
def get_value_of_splits_for_account(self, account_id: str) -> Decimal: splits = self.get_split_for_account(account_id) result = Decimal(0) for split in splits: result += split.value return result
Returns the sum of values for all splits for the given account
28,060
def viewbox_mouse_event(self, event): if event.handled or not self.interactive: return PerspectiveCamera.viewbox_mouse_event(self, event) if event.type == : self._event_value = None elif event.type == : event.handled = True elif event.type == : if event.press_event is None: return modifiers = event.mouse_event.modifiers p1 = event.mouse_event.press_event.pos p2 = event.mouse_event.pos d = p2 - p1 if 1 in event.buttons and not modifiers: self._update_rotation(event) elif 2 in event.buttons and not modifiers: if self._event_value is None: self._event_value = (self._scale_factor, self._distance) zoomy = (1 + self.zoom_factor) ** d[1] self.scale_factor = self._event_value[0] * zoomy if self._distance is not None: self._distance = self._event_value[1] * zoomy self.view_changed() elif 1 in event.buttons and keys.SHIFT in modifiers: norm = np.mean(self._viewbox.size) if self._event_value is None or len(self._event_value) == 2: self._event_value = self.center dist = (p1 - p2) / norm * self._scale_factor dist[1] *= -1 dx, dy, dz = self._dist_to_trans(dist) ff = self._flip_factors up, forward, right = self._get_dim_vectors() dx, dy, dz = right * dx + forward * dy + up * dz dx, dy, dz = ff[0] * dx, ff[1] * dy, dz * ff[2] c = self._event_value self.center = c[0] + dx, c[1] + dy, c[2] + dz elif 2 in event.buttons and keys.SHIFT in modifiers: if self._event_value is None: self._event_value = self._fov fov = self._event_value - d[1] / 5.0 self.fov = min(180.0, max(0.0, fov))
The viewbox received a mouse event; update transform accordingly. Parameters ---------- event : instance of Event The event.
28,061
def derelativise_url(url): parsed = six.moves.urllib.parse.urlparse(url) newpath=[] for chunk in parsed.path[1:].split(): if chunk == : continue elif chunk == : newpath=newpath[:-1] continue elif _fullmatch(r, chunk) is not None: newpath=newpath[:-1] continue newpath += [chunk] return six.moves.urllib.parse.urlunparse(parsed[:2]+(+(.join(newpath)),)+parsed[3:])
Normalizes URLs, gets rid of .. and .
28,062
def read_actions(): while True: key = get_key() if key in (const.KEY_UP, const.KEY_CTRL_N, , ): yield const.ACTION_PREVIOUS elif key in (const.KEY_DOWN, const.KEY_CTRL_P, , ): yield const.ACTION_NEXT elif key in (const.KEY_CTRL_C, ): yield const.ACTION_ABORT elif key in (, ): yield const.ACTION_SELECT
Yields actions for pressed keys.
28,063
def get_branches(self, project=None, include_parent=None, include_children=None, include_deleted=None, include_links=None): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) query_parameters = {} if include_parent is not None: query_parameters[] = self._serialize.query(, include_parent, ) if include_children is not None: query_parameters[] = self._serialize.query(, include_children, ) if include_deleted is not None: query_parameters[] = self._serialize.query(, include_deleted, ) if include_links is not None: query_parameters[] = self._serialize.query(, include_links, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, query_parameters=query_parameters) return self._deserialize(, self._unwrap_collection(response))
GetBranches. Get a collection of branch roots -- first-level children, branches with no parents. :param str project: Project ID or project name :param bool include_parent: Return the parent branch, if there is one. Default: False :param bool include_children: Return the child branches for each root branch. Default: False :param bool include_deleted: Return deleted branches. Default: False :param bool include_links: Return links. Default: False :rtype: [TfvcBranch]
28,064
def get_last_block(working_dir): impl = sys.modules[__name__] return BlockstackDB.get_lastblock(impl, working_dir)
Get the last block processed Return the integer on success Return None on error
28,065
def ext_pillar(minion_id, pillar, bucket, key=None, keyid=None, verify_ssl=True, location=None, multiple_env=False, environment=, prefix=, service_url=None, kms_keyid=None, s3_cache_expire=30, s3_sync_on_update=True, path_style=False, https_enable=True): s3_creds = S3Credentials(key, keyid, bucket, service_url, verify_ssl, kms_keyid, location, path_style, https_enable) pillar_dir = os.path.normpath(os.path.join(_get_cache_dir(), environment, bucket)) if prefix: pillar_dir = os.path.normpath(os.path.join(pillar_dir, prefix)) if __opts__[].get(environment, []) == [pillar_dir]: return {} metadata = _init(s3_creds, bucket, multiple_env, environment, prefix, s3_cache_expire) if s3_sync_on_update: log.info() for saltenv, env_meta in six.iteritems(metadata): for bucket, files in six.iteritems(_find_files(env_meta)): for file_path in files: cached_file_path = _get_cached_file_name(bucket, saltenv, file_path) log.info(, bucket, saltenv, file_path) _get_file_from_s3(s3_creds, metadata, saltenv, bucket, file_path, cached_file_path) log.info() opts = deepcopy(__opts__) opts[][environment] = [os.path.join(pillar_dir, environment)] if multiple_env else [pillar_dir] opts[] = [x for x in opts[] if not in x] pil = Pillar(opts, __grains__, minion_id, environment) compiled_pillar = pil.compile_pillar(ext=False) return compiled_pillar
Execute a command and read the output as YAML
28,066
def map(self, mapper): if isinstance(mapper, ABCSeries): mapper = mapper.to_dict() if isinstance(mapper, abc.Mapping): fill_value = mapper.get(self.fill_value, self.fill_value) sp_values = [mapper.get(x, None) for x in self.sp_values] else: fill_value = mapper(self.fill_value) sp_values = [mapper(x) for x in self.sp_values] return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_value)
Map categories using input correspondence (dict, Series, or function). Parameters ---------- mapper : dict, Series, callable The correspondence from old values to new. Returns ------- SparseArray The output array will have the same density as the input. The output fill value will be the result of applying the mapping to ``self.fill_value`` Examples -------- >>> arr = pd.SparseArray([0, 1, 2]) >>> arr.apply(lambda x: x + 10) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply({0: 10, 1: 11, 2: 12}) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32) >>> arr.apply(pd.Series([10, 11, 12], index=[0, 1, 2])) [10, 11, 12] Fill: 10 IntIndex Indices: array([1, 2], dtype=int32)
28,067
def listDF(option=, token=, version=): df = pd.DataFrame(list(option, token, version)) _toDatetime(df) _reindex(df, ) return df
Returns an array of quotes for the top 10 symbols in a specified list. https://iexcloud.io/docs/api/#list Updated intraday Args: option (string); Option to query token (string); Access token version (string); API version Returns: DataFrame: result
28,068
def build_effects_to_residuals_matrix(num_seasons, dtype): effects_to_residuals_fullrank = np.eye(num_seasons) - 1./num_seasons effects_to_residuals_fullrank[-1, :] = 1./num_seasons residuals_to_effects_fullrank = np.linalg.inv(effects_to_residuals_fullrank) effects_to_residuals = effects_to_residuals_fullrank[:-1, :] residuals_to_effects = residuals_to_effects_fullrank[:, :-1] effects_to_residuals = tf.cast( effects_to_residuals, dtype=dtype, name=) residuals_to_effects = tf.cast( residuals_to_effects, dtype=dtype, name=) return effects_to_residuals, residuals_to_effects
Build change-of-basis matrices for constrained seasonal effects. This method builds the matrix that transforms seasonal effects into effect residuals (differences from the mean effect), and additionally projects these residuals onto the subspace where the mean effect is zero. See `ConstrainedSeasonalStateSpaceModel` for mathematical details. Args: num_seasons: scalar `int` number of seasons. dtype: TensorFlow `dtype` for the returned values. Returns: effects_to_residuals: `Tensor` of shape `[num_seasons-1, num_seasons]`, such that `differences_from_mean_effect = matmul(effects_to_residuals, seasonal_effects)`. In the notation of `ConstrainedSeasonalStateSpaceModel`, this is `effects_to_residuals = P * R`. residuals_to_effects: the (pseudo)-inverse of the above; a `Tensor` of shape `[num_seasons, num_seasons-1]`. In the notation of `ConstrainedSeasonalStateSpaceModel`, this is `residuals_to_effects = R^{-1} * P'`.
28,069
def remove_filtered_edges(graph, edge_predicates=None): edges = list(filter_edges(graph, edge_predicates=edge_predicates)) graph.remove_edges_from(edges)
Remove edges passing the given edge predicates. :param pybel.BELGraph graph: A BEL graph :param edge_predicates: A predicate or list of predicates :type edge_predicates: None or ((pybel.BELGraph, tuple, tuple, int) -> bool) or iter[(pybel.BELGraph, tuple, tuple, int) -> bool]] :return:
28,070
def _check_timeouts(self): for conn_id, data in self._connections.items(): if in data and data[].expired: if data[] == self.Connecting: self.finish_connection(conn_id, False, ) elif data[] == self.Disconnecting: self.finish_disconnection(conn_id, False, ) elif data[] == self.InProgress: if data[] == : self.finish_operation(conn_id, False, , None, None) elif data[] == : self.finish_operation(conn_id, False, )
Check if any operations in progress need to be timed out Adds the corresponding finish action that fails the request due to a timeout.
28,071
def EnumerateClasses(self, namespace=None, ClassName=None, DeepInheritance=None, LocalOnly=None, IncludeQualifiers=None, IncludeClassOrigin=None, **extra): exc = None classes = None method_name = if self._operation_recorders: self.operation_recorder_reset() self.operation_recorder_stage_pywbem_args( method=method_name, namespace=namespace, ClassName=ClassName, DeepInheritance=DeepInheritance, LocalOnly=LocalOnly, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, **extra) try: stats = self.statistics.start_timer(method_name) if namespace is None and isinstance(ClassName, CIMClassName): namespace = ClassName.namespace namespace = self._iparam_namespace_from_namespace(namespace) classname = self._iparam_classname(ClassName, ) result = self._imethodcall( method_name, namespace, ClassName=classname, DeepInheritance=DeepInheritance, LocalOnly=LocalOnly, IncludeQualifiers=IncludeQualifiers, IncludeClassOrigin=IncludeClassOrigin, **extra) if result is None: classes = [] else: classes = result[0][2] for klass in classes: if not isinstance(klass, CIMClass): raise CIMXMLParseError( _format("Expecting CIMClass object in result list, " "got {0} object", klass.__class__.__name__), conn_id=self.conn_id) klass.path = CIMClassName( classname=klass.classname, host=self.host, namespace=namespace) return classes except (CIMXMLParseError, XMLParseError) as exce: exce.request_data = self.last_raw_request exce.response_data = self.last_raw_reply exc = exce raise except Exception as exce: exc = exce raise finally: self._last_operation_time = stats.stop_timer( self.last_request_len, self.last_reply_len, self.last_server_response_time, exc) if self._operation_recorders: self.operation_recorder_stage_result(classes, exc)
Enumerate the subclasses of a class, or the top-level classes in a namespace. This method performs the EnumerateClasses operation (see :term:`DSP0200`). See :ref:`WBEM operations` for a list of all methods performing such operations. If the operation succeeds, this method returns. Otherwise, this method raises an exception. Parameters: namespace (:term:`string`): Name of the namespace in which the classes are to be enumerated (case independent). Leading and trailing slash characters will be stripped. The lexical case will be preserved. If `None`, the namespace of the `ClassName` parameter will be used, if specified as a :class:`~pywbem.CIMClassName` object. If that is also `None`, the default namespace of the connection will be used. ClassName (:term:`string` or :class:`~pywbem.CIMClassName`): Name of the class whose subclasses are to be retrieved (case independent). If specified as a :class:`~pywbem.CIMClassName` object, its host attribute will be ignored. If `None`, the top-level classes in the namespace will be retrieved. DeepInheritance (:class:`py:bool`): Indicates that all (direct and indirect) subclasses of the specified class or of the top-level classes are to be included in the result, as follows: * If `False`, only direct subclasses of the specified class or only top-level classes are included in the result. * If `True`, all direct and indirect subclasses of the specified class or the top-level classes and all of their direct and indirect subclasses are included in the result. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `False`. Note, the semantics of the `DeepInheritance` parameter in :meth:`~pywbem.WBEMConnection.EnumerateInstances` is different. LocalOnly (:class:`py:bool`): Indicates that inherited properties, methods, and qualifiers are to be excluded from the returned classes, as follows. * If `False`, inherited elements are not excluded. * If `True`, inherited elements are excluded. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `True`. IncludeQualifiers (:class:`py:bool`): Indicates that qualifiers are to be included in the returned classes, as follows: * If `False`, qualifiers are not included. * If `True`, qualifiers are included. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `True`. IncludeClassOrigin (:class:`py:bool`): Indicates that class origin information is to be included on each property and method in the returned classes, as follows: * If `False`, class origin information is not included. * If `True`, class origin information is included. * If `None`, this parameter is not passed to the WBEM server, and causes the server-implemented default to be used. :term:`DSP0200` defines that the server-implemented default is `False`. **extra : Additional keyword arguments are passed as additional operation parameters to the WBEM server. Note that :term:`DSP0200` does not define any additional parameters for this operation. Returns: A list of :class:`~pywbem.CIMClass` objects that are representations of the enumerated classes, with their `path` attributes set. Raises: Exceptions described in :class:`~pywbem.WBEMConnection`.
28,072
def make_timing_logger(logger, precision=3, level=logging.DEBUG): @contextmanager def log_time(msg, *args): start_time = time.time() try: yield finally: message = "{} in %0.{}fs".format(msg, precision) duration = time.time() - start_time args = args + (duration,) logger.log(level, message, *args) return log_time
Return a timing logger. Usage:: >>> logger = logging.getLogger('foobar') >>> log_time = make_timing_logger( ... logger, level=logging.INFO, precision=2) >>> >>> with log_time("hello %s", "world"): ... time.sleep(1) INFO:foobar:hello world in 1.00s
28,073
def p_scope(self, p): scope = () if p[1].scope is None else p[1].scope.labellist p[0] = IdentifierScope( scope + (IdentifierScopeLabel(p[1].name, lineno=p.lineno(1)),), lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
scope : identifier DOT
28,074
def _unlock_temporarily(self): if not self._is_locked: yield else: try: self._is_locked = False yield finally: self._is_locked = True
Allow tokens to modify the world for the duration of a with-block. It's important that tokens only modify the world at appropriate times, otherwise the changes they make may not be communicated across the network to other clients. To help catch and prevent these kinds of errors, the game engine keeps the world locked most of the time and only briefly unlocks it (using this method) when tokens are allowed to make changes. When the world is locked, token methods that aren't marked as being read-only can't be called. When the world is unlocked, any token method can be called. These checks can be disabled by running python with optimization enabled. You should never call this method manually from within your own game. This method is intended to be used by the game engine, which was carefully designed to allow the world to be modified only when safe. Calling this method yourself disables an important safety check.
28,075
def _catch_nonjson_streamresponse(rawresponse): try: response = json.loads(rawresponse) except (ValueError, TypeError): if rawresponse: response = { : [ { : , : rawresponse, } ] } else: response = {} return response
Validate a streamed response is JSON. Return a Python dictionary either way. **Parameters:** - **rawresponse:** Streamed Response from Requests. **Returns:** Dictionary
28,076
def rename(self, from_name, to_name): log.info( % (from_name, to_name)) self._run_stmt( % (from_name, to_name))
Renames an existing database.
28,077
def _edit_name(name, code, add_code=None, delete_end=False): info = name.split() info[2] = code if add_code is not None: info[3] = add_code if delete_end: info.pop() return .join(info)
Helping function for creating file names in .SAFE format :param name: initial string :type name: str :param code: :type code: str :param add_code: :type add_code: str or None :param delete_end: :type delete_end: bool :return: edited string :rtype: str
28,078
def filter(self, all_records=False, **filters): clone = copy.deepcopy(self) clone.adapter.add_query(filters.items()) clone_length = clone.count() if clone_length > self._cfg[] and not all_records: raise Exception( % (clone_length, self._cfg[], filters, self._cfg[])) return clone
Applies given query filters. If wanted result is more than specified size, exception is raised about using all() method instead of filter. Args: all_records (bool): **filters: Query filters as keyword arguments. Returns: Self. Queryset object. Examples: >>> Person.objects.filter(name='John') # same as .filter(name__exact='John') >>> Person.objects.filter(age__gte=16, name__startswith='jo') >>> # Assume u1 and u2 as related model instances. >>> Person.objects.filter(work_unit__in=[u1, u2], name__startswith='jo')
28,079
def create_certificate_id(vault, name, version=None): return CertificateId(vault=vault, name=name, version=version)
:param vault: The vault uri. :type vault: str :param name: The certificate name. :type name: str :param version: The certificate version. :type version: str :rtype: KeyVaultId
28,080
def get_qseq_dir(fc_dir): machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls") if os.path.exists(machine_bc): return machine_bc else: return fc_dir
Retrieve the qseq directory within Solexa flowcell output.
28,081
def spectrogram_from_file(filename, step=10, window=20, max_freq=None, eps=1e-14, overwrite=False, save_feature_as_csvfile=False): csvfilename = filename.replace(".wav", ".csv") if (os.path.isfile(csvfilename) is False) or overwrite: with soundfile.SoundFile(filename) as sound_file: audio = sound_file.read(dtype=) sample_rate = sound_file.samplerate if audio.ndim >= 2: audio = np.mean(audio, 1) if max_freq is None: max_freq = sample_rate / 2 if max_freq > sample_rate / 2: raise ValueError("max_freq must not be greater than half of " " sample rate") if step > window: raise ValueError("step size must not be greater than window size") hop_length = int(0.001 * step * sample_rate) fft_length = int(0.001 * window * sample_rate) pxx, freqs = spectrogram( audio, fft_length=fft_length, sample_rate=sample_rate, hop_length=hop_length) ind = np.where(freqs <= max_freq)[0][-1] + 1 res = np.transpose(np.log(pxx[:ind, :] + eps)) if save_feature_as_csvfile: np.savetxt(csvfilename, res) return res else: return np.loadtxt(csvfilename)
Calculate the log of linear spectrogram from FFT energy Params: filename (str): Path to the audio file step (int): Step size in milliseconds between windows window (int): FFT window size in milliseconds max_freq (int): Only FFT bins corresponding to frequencies between [0, max_freq] are returned eps (float): Small value to ensure numerical stability (for ln(x))
28,082
def parse_repeating_time_interval_to_str(date_str): with open(os.path.join(ABSOLUTE_SCHEMA_DIR, "accrualPeriodicity.json"), "r") as f: freqs_map = {freq["id"]: freq["description"] for freq in json.load(f)} return freqs_map[date_str]
Devuelve descripción humana de un intervalo de repetición. TODO: Por ahora sólo interpreta una lista fija de intervalos. Debería poder parsear cualquier caso.
28,083
def get_instance(self): return Instance(self.rest_client.make_request(self.instance), self.rest_client)
Get the Streams instance that owns this view. Returns: Instance: Streams instance owning this view.
28,084
def ldap_server_definitions(self): if not self._ldap_server_definitions: self._ldap_server_definitions = LdapServerDefinitionManager(self) return self._ldap_server_definitions
:class:`~zhmcclient.LdapServerDefinitionManager`: Access to the :term:`LDAP Server Definitions <LDAP Server Definition>` in this Console.
28,085
def forms(self, req, tag): liveForms = self.parameter.getInitialLiveForms() for liveForm in liveForms: liveForm.setFragmentParent(self) return liveForms
Make and return some forms, using L{self.parameter.getInitialLiveForms}. @return: some subforms. @rtype: C{list} of L{LiveForm}
28,086
def sendpfast(x, pps=None, mbps=None, realtime=None, loop=0, file_cache=False, iface=None, replay_args=None, parse_results=False): if iface is None: iface = conf.iface argv = [conf.prog.tcpreplay, "--intf1=%s" % iface] if pps is not None: argv.append("--pps=%i" % pps) elif mbps is not None: argv.append("--mbps=%f" % mbps) elif realtime is not None: argv.append("--multiplier=%f" % realtime) else: argv.append("--topspeed") if loop: argv.append("--loop=%i" % loop) if file_cache: argv.append("--preload-pcap") if replay_args is not None: argv.extend(replay_args) f = get_temp_file() argv.append(f) wrpcap(f, x) results = None with ContextManagerSubprocess("sendpfast()", conf.prog.tcpreplay): try: cmd = subprocess.Popen(argv, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except KeyboardInterrupt: log_interactive.info("Interrupted by user") except Exception: os.unlink(f) raise else: stdout, stderr = cmd.communicate() if stderr: log_runtime.warning(stderr.decode()) if parse_results: results = _parse_tcpreplay_result(stdout, stderr, argv) elif conf.verb > 2: log_runtime.info(stdout.decode()) os.unlink(f) return results
Send packets at layer 2 using tcpreplay for performance pps: packets per second mpbs: MBits per second realtime: use packet's timestamp, bending time with real-time value loop: number of times to process the packet list file_cache: cache packets in RAM instead of reading from disk at each iteration # noqa: E501 iface: output interface replay_args: List of additional tcpreplay args (List[str]) parse_results: Return a dictionary of information outputted by tcpreplay (default=False) # noqa: E501 :returns stdout, stderr, command used
28,087
def train(self, training_set, iterations=500): if len(training_set) > 2: self.__X = np.matrix([example[0] for example in training_set]) if self.__num_labels == 1: self.__y = np.matrix([example[1] for example in training_set]).reshape((-1, 1)) else: eye = np.eye(self.__num_labels) self.__y = np.matrix([eye[example[1]] for example in training_set]) else: self.__X = np.matrix(training_set[0]) if self.__num_labels == 1: self.__y = np.matrix(training_set[1]).reshape((-1, 1)) else: eye = np.eye(self.__num_labels) self.__y = np.matrix([eye[index] for sublist in training_set[1] for index in sublist]) self.__m = self.__X.shape[0] self.__input_layer_size = self.__X.shape[1] self.__sizes = [self.__input_layer_size] self.__sizes.extend(self.__hidden_layers) self.__sizes.append(self.__num_labels) initial_theta = [] for count in range(len(self.__sizes) - 1): epsilon = np.sqrt(6) / np.sqrt(self.__sizes[count]+self.__sizes[count+1]) initial_theta.append(np.random.rand(self.__sizes[count+1],self.__sizes[count]+1)*2*epsilon-epsilon) initial_theta = self.__unroll(initial_theta) self.__thetas = self.__roll(fmin_bfgs(self.__cost_function, initial_theta, fprime=self.__cost_grad_function, maxiter=iterations))
Trains itself using the sequence data.
28,088
def add_source(source, key=None): if source is None: log() return if source.startswith(): directory = for filename in os.listdir(directory): with open(directory + filename, ) as rpm_file: if source in rpm_file.read(): break else: log("Add source: {!r}".format(source)) with open(directory + , ) as rpm_file: rpm_file.write( % source[7:].replace(, )) rpm_file.write( % source[7:]) rpm_file.write( % source) else: log("Unknown source: {!r}".format(source)) if key: if in key: with NamedTemporaryFile() as key_file: key_file.write(key) key_file.flush() key_file.seek(0) subprocess.check_call([, , key_file.name]) else: subprocess.check_call([, , key])
Add a package source to this system. @param source: a URL with a rpm package @param key: A key to be added to the system's keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key id may also be used, but be aware that only insecure protocols are available to retrieve the actual public key from a public keyserver placing your Juju environment at risk.
28,089
def p_scalar__folded(self, p): scalar_group = .join(p[2]) folded_scalar = fold(dedent(scalar_group)).rstrip() p[0] = ScalarDispatch( % folded_scalar, cast=)
scalar : B_FOLD_START scalar_group B_FOLD_END
28,090
def _from_float(cls, xmin, xmax, ymin, ymax): ixmin = int(np.floor(xmin + 0.5)) ixmax = int(np.ceil(xmax + 0.5)) iymin = int(np.floor(ymin + 0.5)) iymax = int(np.ceil(ymax + 0.5)) return cls(ixmin, ixmax, iymin, iymax)
Return the smallest bounding box that fully contains a given rectangle defined by float coordinate values. Following the pixel index convention, an integer index corresponds to the center of a pixel and the pixel edges span from (index - 0.5) to (index + 0.5). For example, the pixel edge spans of the following pixels are: - pixel 0: from -0.5 to 0.5 - pixel 1: from 0.5 to 1.5 - pixel 2: from 1.5 to 2.5 In addition, because `BoundingBox` upper limits are exclusive (by definition), 1 is added to the upper pixel edges. See examples below. Parameters ---------- xmin, xmax, ymin, ymax : float Float coordinates defining a rectangle. The lower values (``xmin`` and ``ymin``) must not be greater than the respective upper values (``xmax`` and ``ymax``). Returns ------- bbox : `BoundingBox` object The minimal ``BoundingBox`` object fully containing the input rectangle coordinates. Examples -------- >>> from photutils import BoundingBox >>> BoundingBox._from_float(xmin=1.0, xmax=10.0, ymin=2.0, ymax=20.0) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=21) >>> BoundingBox._from_float(xmin=1.4, xmax=10.4, ymin=1.6, ymax=10.6) BoundingBox(ixmin=1, ixmax=11, iymin=2, iymax=12)
28,091
def find_referenced_subfiles(self, directory): if directory is None: return [] pattern = r\"]?(%s%s[^\s\\"]?' % (escape(directory), escape(sep)) return self.find_pattern_references(pattern)
Return list of files below specified `directory` in job inputs. Could use more sophisticated logic (match quotes to handle spaces, handle subdirectories, etc...). **Parameters** directory : str Full path to directory to search.
28,092
def build(self, builder): params = dict( OID=self.oid, Active=bool_to_true_false(self.active), BypassDuringMigration=bool_to_true_false(self.bypass_during_migration), NeedsRetesting=bool_to_true_false(self.needs_retesting), ) builder.start("mdsol:EditCheckDef", params) for step in self.check_steps: step.build(builder) for action in self.check_actions: action.build(builder) builder.end("mdsol:EditCheckDef")
Build XML by appending to builder
28,093
def federation(self): url = self._url + "/federation" return _Federation(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
returns the class that controls federation
28,094
def get_submission_ids(self, tournament=1): query = arguments = {: tournament} data = self.raw_query(query, arguments)[][][0] if data is None: return None mapping = {item[]: item[] for item in data[]} return mapping
Get dict with username->submission_id mapping. Args: tournament (int): ID of the tournament (optional, defaults to 1) Returns: dict: username->submission_id mapping, string->string Example: >>> NumerAPI().get_submission_ids() {'1337ai': '93c46857-fed9-4594-981e-82db2b358daf', '1x0r': '108c7601-822c-4910-835d-241da93e2e24', ... }
28,095
def inv(self): result = Complete(self.r.transpose(), np.dot(self.r.transpose(), -self.t)) result._cache_inv = self return result
The inverse transformation
28,096
def GetRootFileEntry(self): path_spec = fake_path_spec.FakePathSpec(location=self.LOCATION_ROOT) return self.GetFileEntryByPathSpec(path_spec)
Retrieves the root file entry. Returns: FakeFileEntry: a file entry or None if not available.
28,097
def update_index(self, project_name, logstore_name, index_detail): headers = {} params = {} resource = "/logstores/" + logstore_name + "/index" headers[] = body = six.b(json.dumps(index_detail.to_json())) headers[] = str(len(body)) (resp, header) = self._send("PUT", project_name, body, resource, params, headers) return UpdateIndexResponse(header, resp)
update index for a logstore Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type index_detail: IndexConfig :param index_detail: the index config detail used to update index :return: UpdateIndexResponse :raise: LogException
28,098
def run_bang(alt_args=None): parser = get_parser() args = parser.parse_args(alt_args) source = args.config_specs or get_env_configs() if not source: return config = Config.from_config_specs(source) if args.playbooks: config[A.PLAYBOOKS] = args.playbooks if args.dump_config: if args.dump_config in (, ): import yaml print yaml.safe_dump(dict(config)) elif args.dump_config == : import json print json.dumps(config) else: print config sys.exit() set_ssh_creds(config, args) annoy(config) stack = Stack(config) if args.ansible_list: stack.show_inventory( os.isatty(sys.stdout.fileno()) ) return initialize_logging(config) if args.deploy: stack.deploy() if args.configure: stack.configure() config.autoinc()
Runs bang with optional list of strings as command line options. If ``alt_args`` is not specified, defaults to parsing ``sys.argv`` for command line options.
28,099
def OnTableChanged(self, event): if hasattr(event, ): self.SetValue(event.table) wx.TextCtrl.SetInsertionPoint(self, self.cursor_pos) event.Skip()
Table changed event handler