Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
13,600
def stop(self): self.my_server.stop() self.http_thread.join() logging.info("HTTP server: Stopped")
Stop the HTTP server thread.
13,601
def do_perf_counter_check(self, instance): custom_tags = instance.get(, []) if custom_tags is None: custom_tags = [] instance_key = self._conn_key(instance, self.DEFAULT_DB_KEY) instance_by_key = self.instances_per_type_metrics[instance_key] with self.open_managed_db_connections(instance, self.DEFAULT_DB_KEY): if instance_key not in self.instances_metrics: self._make_metric_list_to_collect(instance, self.custom_metrics) metrics_to_collect = self.instances_metrics[instance_key] with self.get_managed_cursor(instance, self.DEFAULT_DB_KEY) as cursor: simple_rows = SqlSimpleMetric.fetch_all_values(cursor, instance_by_key["SqlSimpleMetric"], self.log) fraction_results = SqlFractionMetric.fetch_all_values( cursor, instance_by_key["SqlFractionMetric"], self.log ) waitstat_rows, waitstat_cols = SqlOsWaitStat.fetch_all_values( cursor, instance_by_key["SqlOsWaitStat"], self.log ) vfs_rows, vfs_cols = SqlIoVirtualFileStat.fetch_all_values( cursor, instance_by_key["SqlIoVirtualFileStat"], self.log ) clerk_rows, clerk_cols = SqlOsMemoryClerksStat.fetch_all_values( cursor, instance_by_key["SqlOsMemoryClerksStat"], self.log ) for metric in metrics_to_collect: try: if type(metric) is SqlSimpleMetric: metric.fetch_metric(cursor, simple_rows, custom_tags) elif type(metric) is SqlFractionMetric or type(metric) is SqlIncrFractionMetric: metric.fetch_metric(cursor, fraction_results, custom_tags) elif type(metric) is SqlOsWaitStat: metric.fetch_metric(cursor, waitstat_rows, waitstat_cols, custom_tags) elif type(metric) is SqlIoVirtualFileStat: metric.fetch_metric(cursor, vfs_rows, vfs_cols, custom_tags) elif type(metric) is SqlOsMemoryClerksStat: metric.fetch_metric(cursor, clerk_rows, clerk_cols, custom_tags) except Exception as e: self.log.warning("Could not fetch metric {} : {}".format(metric.datadog_name, e))
Fetch the metrics from the sys.dm_os_performance_counters table
13,602
def register_extensions(self, exts, force=False): for ext_in, ext_out in exts.items(): self.register_extension(ext_in, ext_out, force)
Add/register extensions. Args: exts (dict): force (bool): If ``force`` is set to ``True``, simply overwrite existing extensions, otherwise do nothing. If the ``logger`` is set, log a warning about the duplicate extension if ``force == False``.
13,603
def tangelo_import(*args, **kwargs): try: return builtin_import(*args, **kwargs) except ImportError: if not hasattr(cherrypy.thread_data, "modulepath"): raise path = os.path.abspath(cherrypy.thread_data.modulepath) root = os.path.abspath(cherrypy.config.get("webroot")) result = None imp.acquire_lock() oldpath = sys.path try: if path not in sys.path and (path == root or path.startswith(root + os.path.sep)): sys.path = [path] + sys.path result = builtin_import(*args, **kwargs) finally: sys.path = oldpath imp.release_lock() if result is not None: return result raise
When we are asked to import a module, if we get an import error and the calling script is one we are serving (not one in the python libraries), try again in the same directory as the script that is calling import. It seems like we should use sys.meta_path and combine our path with the path sent to imp.find_module. This requires duplicating a bunch of logic from the imp module and is actually heavier than this technique. :params: see __builtin__.__import__
13,604
def update_server_cert(self, cert_name, new_cert_name=None, new_path=None): params = { : cert_name} if new_cert_name: params[] = new_cert_name if new_path: params[] = new_path return self.get_response(, params)
Updates the name and/or the path of the specified server certificate. :type cert_name: string :param cert_name: The name of the server certificate that you want to update. :type new_cert_name: string :param new_cert_name: The new name for the server certificate. Include this only if you are updating the server certificate's name. :type new_path: string :param new_path: If provided, the path of the certificate will be changed to this path.
13,605
def process_result_value(self, value, dialect): masks = list() if value: for e in enums.CryptographicUsageMask: if e.value & value: masks.append(e) return masks
Returns a new list of enums.CryptographicUsageMask Enums. This converts the integer value into the list of enums. Args: value(int): The integer value stored in the database that is used to create the list of enums.CryptographicUsageMask Enums. dialect(string): SQL dialect
13,606
def add_error_marker(text, position, start_line=1): indent = " " lines = [] caret_line = start_line for line in text.split("\n"): lines.append(indent + line) if 0 <= position <= len(line): lines.append(indent + (" " * position) + "^") caret_line = start_line position -= len(line) position -= 1 start_line += 1 return "\n".join(lines), caret_line
Add a caret marking a given position in a string of input. Returns (new_text, caret_line).
13,607
def add_path(self, nodes, **attr): if nx.__version__[0] == "1": return super().add_path(nodes, **attr) else: return nx.add_path(self, nodes, **attr)
In replacement for Deprecated add_path method
13,608
def varYSizeGaussianFilter(arr, stdyrange, stdx=0, modex=, modey=): assert arr.ndim == 2, s0 = arr.shape[0] if isinstance(stdyrange, np.ndarray): assert len(stdyrange)==s0, stdys = stdyrange else: if type(stdyrange) not in (list, tuple): stdyrange = (0,stdyrange) mn,mx = stdyrange stdys = np.linspace(mn,mx,s0) kx = int(stdx*2.5) kx += 1-kx%2 ky = int(mx*2.5) ky += 1-ky%2 arr2 = extendArrayForConvolution(arr, (kx, ky), modex, modey) inp = np.zeros((ky,kx)) inp[ky//2, kx//2] = 1 kernels = np.empty((s0,ky,kx)) for i in range(s0): stdy = stdys[i] kernels[i] = gaussian_filter(inp, (stdy,stdx)) out = np.empty_like(arr) _2dConvolutionYdependentKernel(arr2, out, kernels) return out
applies gaussian_filter on input array but allowing variable ksize in y stdyrange(int) -> maximum ksize - ksizes will increase from 0 to given value stdyrange(tuple,list) -> minimum and maximum size as (mn,mx) stdyrange(np.array) -> all different ksizes in y
13,609
def subsets(self): source, target = self.builder_config.language_pair filtered_subsets = {} for split, ss_names in self._subsets.items(): filtered_subsets[split] = [] for ss_name in ss_names: ds = DATASET_MAP[ss_name] if ds.target != target or source not in ds.sources: logging.info( "Skipping sub-dataset that does not include language pair: %s", ss_name) else: filtered_subsets[split].append(ss_name) logging.info("Using sub-datasets: %s", filtered_subsets) return filtered_subsets
Subsets that make up each split of the dataset for the language pair.
13,610
def write(self, sync_map_format, output_file_path, parameters=None): def select_levels(syncmap, levels): self.log([u"Levels: ", levels]) if levels is None: return try: levels = [int(l) for l in levels if int(l) > 0] syncmap.fragments_tree.keep_levels(levels) self.log([u"Selected levels: %s", levels]) except ValueError: self.log_warn(u"Cannot convert levels to list of int, returning unchanged") def set_head_tail_format(syncmap, head_tail_format=None): self.log([u"Head/tail format: ", str(head_tail_format)]) tree = syncmap.fragments_tree head = tree.get_child(0) first = tree.get_child(1) last = tree.get_child(-2) tail = tree.get_child(-1) if head_tail_format == SyncMapHeadTailFormat.ADD: head.value.fragment_type = SyncMapFragment.REGULAR self.log(u"Marked HEAD as REGULAR") if head_tail_format == SyncMapHeadTailFormat.STRETCH: self.log([u"Stretched first.begin: %.3f => %.3f (head)", first.value.begin, head.value.begin]) self.log([u"Stretched last.end: %.3f => %.3f (tail)", last.value.end, tail.value.end]) first.value.begin = head.value.begin last.value.end = tail.value.end if head_tail_format == SyncMapHeadTailFormat.ADD: tail.value.fragment_type = SyncMapFragment.REGULAR self.log(u"Marked TAIL as REGULAR") for node in list(tree.dfs): if (node.value is not None) and (node.value.fragment_type != SyncMapFragment.REGULAR): node.remove() if sync_map_format is None: self.log_exc(u"Sync map format is None", None, True, ValueError) if sync_map_format not in SyncMapFormat.CODE_TO_CLASS: self.log_exc(u"Sync map format is not allowed" % (sync_map_format), None, True, ValueError) if not gf.file_can_be_written(output_file_path): self.log_exc(u"Cannot write sync map file . Wrong permissions?" % (output_file_path), None, True, OSError) self.log([u"Output format: ", sync_map_format]) self.log([u"Output path: ", output_file_path]) self.log([u"Output parameters: ", parameters]) pruned_syncmap = self.clone() try: select_levels(pruned_syncmap, parameters[gc.PPN_TASK_OS_FILE_LEVELS]) except: self.log_warn([u"No %s parameter specified", gc.PPN_TASK_OS_FILE_LEVELS]) try: set_head_tail_format(pruned_syncmap, parameters[gc.PPN_TASK_OS_FILE_HEAD_TAIL_FORMAT]) except: self.log_warn([u"No %s parameter specified", gc.PPN_TASK_OS_FILE_HEAD_TAIL_FORMAT]) writer = (SyncMapFormat.CODE_TO_CLASS[sync_map_format])( variant=sync_map_format, parameters=parameters, rconf=self.rconf, logger=self.logger ) gf.ensure_parent_directory(output_file_path) self.log(u"Writing output file...") with io.open(output_file_path, "w", encoding="utf-8") as output_file: output_file.write(writer.format(syncmap=pruned_syncmap)) self.log(u"Writing output file... done")
Write the current sync map to file in the requested format. Return ``True`` if the call succeeded, ``False`` if an error occurred. :param sync_map_format: the format of the sync map :type sync_map_format: :class:`~aeneas.syncmap.SyncMapFormat` :param string output_file_path: the path to the output file to write :param dict parameters: additional parameters (e.g., for ``SMIL`` output) :raises: ValueError: if ``sync_map_format`` is ``None`` or it is not an allowed value :raises: TypeError: if a required parameter is missing :raises: OSError: if ``output_file_path`` cannot be written
13,611
def expand_no_defaults (property_sets): assert is_iterable_typed(property_sets, property_set.PropertySet) expanded_property_sets = [ps.expand_subfeatures() for ps in property_sets] product = __x_product (expanded_property_sets) return [property_set.create(p) for p in product]
Expand the given build request by combining all property_sets which don't specify conflicting non-free features.
13,612
def nack(self, id, subscription, transaction=None, receipt=None): assert id is not None, " is required" assert subscription is not None, " is required" headers = {HDR_MESSAGE_ID: id, HDR_SUBSCRIPTION: subscription} if transaction: headers[HDR_TRANSACTION] = transaction if receipt: headers[HDR_RECEIPT] = receipt self.send_frame(CMD_NACK, headers)
Let the server know that a message was not consumed. :param str id: the unique id of the message to nack :param str subscription: the subscription this message is associated with :param str transaction: include this nack in a named transaction
13,613
def select(dataspec, testsuite, mode=, cast=True): if isinstance(testsuite, itsdb.ItsdbProfile): testsuite = itsdb.TestSuite(testsuite.root) elif not isinstance(testsuite, itsdb.TestSuite): testsuite = itsdb.TestSuite(testsuite) return tsql.select(dataspec, testsuite, mode=mode, cast=cast)
Select data from [incr tsdb()] profiles. Args: query (str): TSQL select query (e.g., `'i-id i-input mrs'` or `'* from item where readings > 0'`) testsuite (str, TestSuite): testsuite or path to testsuite containing data to select mode (str): see :func:`delphin.itsdb.select_rows` for a description of the *mode* parameter (default: `list`) cast (bool): if `True`, cast column values to their datatype according to the relations file (default: `True`) Returns: a generator that yields selected data
13,614
def _update_service_current_state(service: ServiceState): LOG.debug("Setting current state from target state for %s", service.id) service.update_current_state(service.target_state)
Update the current state of a service. Updates the current state of services after their target state has changed. Args: service (ServiceState): Service state object to update
13,615
def forkandlog(function, filter=, debug=False): import sys, os readfd, writefd = os.pipe() pid = os.fork() if pid == 0: os.close(readfd) if not debug: f = open(os.devnull, ) os.dup2(f.fileno(), 1) os.dup2(f.fileno(), 2) sink = logger(filter=filter) sink.setlogfile(b % writefd) function(sink) sys.exit(0) os.close(writefd) with os.fdopen(readfd) as readhandle: for line in readhandle: yield line info = os.waitpid(pid, 0) if info[1]: e.pid, e.exitcode = info raise e
Fork a child process and read its CASA log output. function A function to run in the child process filter The CASA log level filter to apply in the child process: less urgent messages will not be shown. Valid values are strings: "DEBUG1", "INFO5", ... "INFO1", "INFO", "WARN", "SEVERE". debug If true, the standard output and error of the child process are *not* redirected to /dev/null. Some CASA tools produce important results that are *only* provided via log messages. This is a problem for automation, since there’s no way for Python code to intercept those log messages and extract the results of interest. This function provides a framework for working around this limitation: by forking a child process and sending its log output to a pipe, the parent process can capture the log messages. This function is a generator. It yields lines from the child process’ CASA log output. Because the child process is a fork of the parent, it inherits a complete clone of the parent’s state at the time of forking. That means that the *function* argument you pass it can do just about anything you’d do in a regular program. The child process’ standard output and error streams are redirected to ``/dev/null`` unless the *debug* argument is true. Note that the CASA log output is redirected to a pipe that is neither of these streams. So, if the function raises an unhandled Python exception, the Python traceback will not pollute the CASA log output. But, by the same token, the calling program will not be able to detect that the exception occurred except by its impact on the expected log output.
13,616
def _fit_RSA_marginalized_null(self, Y, X_base, scan_onsets): n_subj = len(Y) t_start = time.time() logger.info( .format(self.n_iter)) rho_grids, rho_weights = self._set_rho_grids() logger.info( .format(rho_grids)) n_grid = self.rho_bins log_weights = np.log(rho_weights) rho_post = [None] * n_subj sigma_post = [None] * n_subj beta0_post = [None] * n_subj X0 = [None] * n_subj LL_null = np.zeros(n_subj) for subj in range(n_subj): logger.debug(.format(subj)) [n_T, n_V] = np.shape(Y[subj]) D, F, run_TRs, n_run = self._prepare_DF( n_T, scan_onsets=scan_onsets[subj]) YTY_diag = np.sum(Y[subj] * Y[subj], axis=0) YTDY_diag = np.sum(Y[subj] * np.dot(D, Y[subj]), axis=0) YTFY_diag = np.sum(Y[subj] * np.dot(F, Y[subj]), axis=0) X_DC = self._gen_X_DC(run_TRs) X_DC, X_base[subj], idx_DC = self._merge_DC_to_base( X_DC, X_base[subj], no_DC=False) X_res = np.empty((n_T, 0)) for it in range(0, self.n_iter): X0[subj] = np.concatenate( (X_base[subj], X_res), axis=1) n_X0 = X0[subj].shape[1] X0TX0, X0TDX0, X0TFX0 = self._make_templates( D, F, X0[subj], X0[subj]) X0TY, X0TDY, X0TFY = self._make_templates( D, F, X0[subj], Y[subj]) YTAY_diag = YTY_diag - rho_grids[:, None] * YTDY_diag \ + rho_grids[:, None]**2 * YTFY_diag return beta0_post, sigma_post, rho_post, X0, LL_null
The marginalized version of the null model for Bayesian RSA. The null model assumes no task-related response to the design matrix. Note that there is a naming change of variable. X in fit() is changed to Y here. This is because we follow the tradition that Y corresponds to data. However, in wrapper function fit(), we follow the naming routine of scikit-learn.
13,617
def crosscorrfunc(freq, cross): tbin = 1. / (2. * np.max(freq)) * 1e3 time = np.arange(-len(freq) / 2. + 1, len(freq) / 2. + 1) * tbin multidata = False if len(np.shape(cross)) > 1: multidata = True if multidata: N = len(cross) crossf = np.zeros((N, N, len(freq))) for i in range(N): for j in range(N): raw_crossf = np.real(np.fft.ifft(cross[i, j])) mid = int(len(raw_crossf) / 2.) crossf[i, j] = np.hstack( [raw_crossf[mid + 1:], raw_crossf[:mid + 1]]) assert(len(time) == len(crossf[0, 0])) else: raw_crossf = np.real(np.fft.ifft(cross)) mid = int(len(raw_crossf) / 2.) crossf = np.hstack([raw_crossf[mid + 1:], raw_crossf[:mid + 1]]) assert(len(time) == len(crossf)) return time, crossf
Calculate crosscorrelation function(s) for given cross spectra. Parameters ---------- freq : numpy.ndarray 1 dimensional array of frequencies. cross : numpy.ndarray 2 dimensional array of cross spectra, 1st axis units, 2nd axis units, 3rd axis frequencies. Returns ------- time : tuple 1 dim numpy.ndarray of times. crossf : tuple 3 dim numpy.ndarray, crosscorrelation functions, 1st axis first unit, 2nd axis second unit, 3rd axis times.
13,618
def stop(self, timeout=5): self.inner().stop(timeout=timeout) self.inner().reload()
Stop the container. The container must have been created. :param timeout: Timeout in seconds to wait for the container to stop before sending a ``SIGKILL``. Default: 5 (half the Docker default)
13,619
def degToHms(ra): assert (ra >= 0.0), WCSError("RA (%f) is negative" % (ra)) assert ra < 360.0, WCSError("RA (%f) > 360.0" % (ra)) rah = ra / degPerHMSHour ramin = (ra % degPerHMSHour) * HMSMinPerDeg rasec = (ra % degPerHMSMin) * HMSSecPerDeg return (int(rah), int(ramin), rasec)
Converts the ra (in degrees) to HMS three tuple. H and M are in integer and the S part is in float.
13,620
def t_text_end(self, t): r t.type = t.value = t.lexer.lexdata[ t.lexer.text_start:t.lexer.lexpos] t.lexer.lineno += t.value.count() t.value = t.value.strip() t.lexer.begin() return t
r'</text>\s*
13,621
def DECLARE_key_flag( flag_name, flag_values=FLAGS): if flag_name in _helpers.SPECIAL_FLAGS: _internal_declare_key_flags([flag_name], flag_values=_helpers.SPECIAL_FLAGS, key_flag_values=flag_values) return _internal_declare_key_flags([flag_name], flag_values=flag_values)
Declares one flag as key to the current module. Key flags are flags that are deemed really important for a module. They are important when listing help messages; e.g., if the --helpshort command-line flag is used, then only the key flags of the main module are listed (instead of all flags, as in the case of --helpfull). Sample usage: gflags.DECLARE_key_flag('flag_1') Args: flag_name: A string, the name of an already declared flag. (Redeclaring flags as key, including flags implicitly key because they were declared in this module, is a no-op.) flag_values: A FlagValues object. This should almost never need to be overridden.
13,622
def GetPackages(classification,visibility): r = clc.v1.API.Call(,, {: Blueprint.classification_stoi[classification],: Blueprint.visibility_stoi[visibility]}) if int(r[]) == 0: return(r[])
Gets a list of Blueprint Packages filtered by classification and visibility. https://t3n.zendesk.com/entries/20411357-Get-Packages :param classification: package type filter (System, Script, Software) :param visibility: package visibility filter (Public, Private, Shared)
13,623
def _disconnect(self, error): "done" if self._on_disconnect: self._on_disconnect(str(error)) if self._sender: self._sender.connectionLost(Failure(error)) self._when_done.fire(Failure(error))
done
13,624
def add_scalar_data(self, token, community_id, producer_display_name, metric_name, producer_revision, submit_time, value, **kwargs): parameters = dict() parameters[] = token parameters[] = community_id parameters[] = producer_display_name parameters[] = metric_name parameters[] = producer_revision parameters[] = submit_time parameters[] = value optional_keys = [ , , , , , , , , , , , , ] for key in optional_keys: if key in kwargs: if key == : parameters[] = kwargs[key] elif key == : parameters[] = kwargs[key] elif key == : parameters[] = kwargs[key] elif key == : parameters[] = kwargs[key] elif key == : parameters[] = json.dumps(kwargs[key]) elif key == : parameters[key] = json.dumps(kwargs[key]) elif key == : if kwargs[key]: parameters[key] = kwargs[key] elif key == : if kwargs[key]: parameters[key] = kwargs[key] elif key == : parameters[] = kwargs[key] elif key == : parameters[] = kwargs[key] elif key == : parameters[] = kwargs[key] elif key == : parameters[] = kwargs[key] else: parameters[key] = kwargs[key] response = self.request(, parameters) return response
Create a new scalar data point. :param token: A valid token for the user in question. :type token: string :param community_id: The id of the community that owns the producer. :type community_id: int | long :param producer_display_name: The display name of the producer. :type producer_display_name: string :param metric_name: The metric name that identifies which trend this point belongs to. :type metric_name: string :param producer_revision: The repository revision of the producer that produced this value. :type producer_revision: int | long | string :param submit_time: The submit timestamp. Must be parsable with PHP strtotime(). :type submit_time: string :param value: The value of the scalar. :type value: float :param config_item_id: (optional) If this value pertains to a specific configuration item, pass its id here. :type config_item_id: int | long :param test_dataset_id: (optional) If this value pertains to a specific test dataset, pass its id here. :type test_dataset_id: int | long :param truth_dataset_id: (optional) If this value pertains to a specific ground truth dataset, pass its id here. :type truth_dataset_id: int | long :param silent: (optional) If true, do not perform threshold-based email notifications for this scalar. :type silent: bool :param unofficial: (optional) If true, creates an unofficial scalar visible only to the user performing the submission. :type unofficial: bool :param build_results_url: (optional) A URL for linking to build results for this submission. :type build_results_url: string :param branch: (optional) The branch name in the source repository for this submission. :type branch: string :param submission_id: (optional) The id of the submission. :type submission_id: int | long :param submission_uuid: (optional) The uuid of the submission. If one does not exist, it will be created. :type submission_uuid: string :type branch: string :param params: (optional) Any key/value pairs that should be displayed with this scalar result. :type params: dict :param extra_urls: (optional) Other URL's that should be displayed with with this scalar result. Each element of the list should be a dict with the following keys: label, text, href :type extra_urls: list[dict] :param unit: (optional) The unit of the scalar value. :type unit: string :param reproduction_command: (optional) The command to reproduce this scalar. :type reproduction_command: string :returns: The scalar object that was created. :rtype: dict
13,625
def updateResultsView(self, index): flags = (QItemSelectionModel.Clear | QItemSelectionModel.Rows | QItemSelectionModel.Select) self.resultsView.selectionModel().select(index, flags) self.resultsView.resizeColumnsToContents() self.resultsView.setFocus()
Update the selection to contain only the result specified by the index. This should be the last index of the model. Finally updade the context menu. The selectionChanged signal is used to trigger the update of the Quanty dock widget and result details dialog. :param index: Index of the last item of the model. :type index: QModelIndex
13,626
def returns(ts, **kwargs): returns_type = kwargs.get(, ) cumulative = kwargs.get(, False) if returns_type == : relative = 0 else: relative = 1 start = kwargs.get(, None) end = kwargs.get(, dt.datetime.today()) period = kwargs.get(, 1) if isinstance(start, dt.datetime): log.debug(.format(ts[end], ts[start])) return ts[end] / ts[start] - 1 + relative rets_df = ts / ts.shift(period) - 1 + relative if cumulative: return rets_df.cumprod() return rets_df[1:]
Compute returns on the given period @param ts : time serie to process @param kwargs.type: gross or simple returns @param delta : period betweend two computed returns @param start : with end, will return the return betweend this elapsed time @param period : delta is the number of lines/periods provided @param end : so said @param cumulative: compute cumulative returns
13,627
def perlin2(size, units=None, repeat=(10.,)*2, scale=None, shift=(0, 0)): if scale: if np.isscalar(scale): scale = (scale,)*2 repeat = scale units = (1.,)*2 wx, wy = repeat dx, dy = units offset_x, offset_y = shift prog = OCLProgram(abspath("kernels/perlin.cl")) d = OCLArray.empty(size[::-1], np.float32) prog.run_kernel("perlin2d", d.shape[::-1], None, d.data, np.float32(dx), np.float32(dy), np.float32(wx), np.float32(wy), np.float32(offset_x), np.float32(offset_y), ) return d.get()
2d perlin noise either scale =(10.,10.) or units (5.,5.) have to be given.... scale is the characteristic length in pixels Parameters ---------- size: units repeat scale shift Returns -------
13,628
def format_string(self, fmat_string): try: return fmat_string.format(**vars(self)) except KeyError as e: raise ValueError( .format(repr(fmat_string), repr(e)))
Takes a string containing 0 or more {variables} and formats it according to this instance's attributes. :param fmat_string: A string, e.g. '{name}-foo.txt' :type fmat_string: ``str`` :return: The string formatted according to this instance. E.g. 'production-runtime-foo.txt' :rtype: ``str``
13,629
def remove_available_work_units(self, work_spec_name, work_unit_names): return self._remove_some_work_units( work_spec_name, work_unit_names, priority_max=time.time())
Remove some work units in the available queue. If `work_unit_names` is :const:`None` (which must be passed explicitly), all available work units in `work_spec_name` are removed; otherwise only the specific named work units will be. :param str work_spec_name: name of the work spec :param list work_unit_names: names of the work units, or :const:`None` for all in `work_spec_name` :return: number of work units removed
13,630
def resolve(self, _): if self.default_value == DUMMY_VALUE: if self.name in os.environ: return os.environ[self.name] else: raise VelException(f"Undefined environment variable: {self.name}") else: return os.environ.get(self.name, self.default_value)
Resolve given variable
13,631
def transform_paragraph(self, paragraph, epochs=50, ignore_missing=False): if self.word_vectors is None: raise Exception() if self.dictionary is None: raise Exception( ) cooccurrence = collections.defaultdict(lambda: 0.0) for token in paragraph: try: cooccurrence[self.dictionary[token]] += self.max_count / 10.0 except KeyError: if not ignore_missing: raise random_state = check_random_state(self.random_state) word_ids = np.array(cooccurrence.keys(), dtype=np.int32) values = np.array(cooccurrence.values(), dtype=np.float64) shuffle_indices = np.arange(len(word_ids), dtype=np.int32) paragraph_vector = np.mean(self.word_vectors[word_ids], axis=0) sum_gradients = np.ones_like(paragraph_vector) random_state.shuffle(shuffle_indices) transform_paragraph(self.word_vectors, self.word_biases, paragraph_vector, sum_gradients, word_ids, values, shuffle_indices, self.learning_rate, self.max_count, self.alpha, epochs) return paragraph_vector
Transform an iterable of tokens into its vector representation (a paragraph vector). Experimental. This will return something close to a tf-idf weighted average of constituent token vectors by fitting rare words (with low word bias values) more closely.
13,632
def get_contract_from_name(self, contract_name): return next((c for c in self.contracts if c.name == contract_name), None)
Return a contract from a name Args: contract_name (str): name of the contract Returns: Contract
13,633
def resolve_dynamic_values(env): if env.needs_workflow[]: return needs = env.needs_all_needs for key, need in needs.items(): for need_option in need: if need_option in [, , , ]: continue if not isinstance(need[need_option], (list, set)): func_call = True while func_call: try: func_call, func_return = _detect_and_execute(need[need_option], need, env) except FunctionParsingException: raise SphinxError("Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list".format(option=need_option, file=need[], line=need[])) if func_call is None: continue if func_return is None: need[need_option] = need[need_option].replace(.format(func_call), ) else: need[need_option] = need[need_option].replace(.format(func_call), str(func_return)) if need[need_option] == : need[need_option] = None else: new_values = [] for element in need[need_option]: try: func_call, func_return = _detect_and_execute(element, need, env) except FunctionParsingException: raise SphinxError("Function definition of {option} in file {file}:{line} has " "unsupported parameters. " "supported are str, int/float, list".format(option=need_option, file=need[], line=need[])) if func_call is None: new_values.append(element) else: if isinstance(need[need_option], (str, int, float)): new_values.append(element.replace(.format(func_call), str(func_return))) else: if isinstance(need[need_option], (list, set)): new_values += func_return need[need_option] = new_values env.needs_workflow[] = True
Resolve dynamic values inside need data. Rough workflow: #. Parse all needs and their data for a string like [[ my_func(a,b,c) ]] #. Extract function name and call parameters #. Execute registered function name with extracted call parameters #. Replace original string with return value :param env: Sphinx environment :return: return value of given function
13,634
def grid(children=[], sizing_mode=None, nrows=None, ncols=None): row = namedtuple("row", ["children"]) col = namedtuple("col", ["children"]) def flatten(layout): Item = namedtuple("Item", ["layout", "r0", "c0", "r1", "c1"]) Grid = namedtuple("Grid", ["nrows", "ncols", "items"]) def gcd(a, b): a, b = abs(a), abs(b) while b != 0: a, b = b, a % b return a def lcm(a, *rest): for b in rest: a = (a*b) // gcd(a, b) return a nonempty = lambda child: child.nrows != 0 and child.ncols != 0 def _flatten(layout): if isinstance(layout, row): children = list(filter(nonempty, map(_flatten, layout.children))) if not children: return Grid(0, 0, []) nrows = lcm(*[ child.nrows for child in children ]) ncols = sum([ child.ncols for child in children ]) items = [] offset = 0 for child in children: factor = nrows//child.nrows for (layout, r0, c0, r1, c1) in child.items: items.append((layout, factor*r0, c0 + offset, factor*r1, c1 + offset)) offset += child.ncols return Grid(nrows, ncols, items) elif isinstance(layout, col): children = list(filter(nonempty, map(_flatten, layout.children))) if not children: return Grid(0, 0, []) nrows = sum([ child.nrows for child in children ]) ncols = lcm(*[ child.ncols for child in children ]) items = [] offset = 0 for child in children: factor = ncols//child.ncols for (layout, r0, c0, r1, c1) in child.items: items.append((layout, r0 + offset, factor*c0, r1 + offset, factor*c1)) offset += child.nrows return Grid(nrows, ncols, items) else: return Grid(1, 1, [Item(layout, 0, 0, 1, 1)]) grid = _flatten(layout) children = [] for (layout, r0, c0, r1, c1) in grid.items: if layout is not None: children.append((layout, r0, c0, r1 - r0, c1 - c0)) return GridBox(children=children) if isinstance(children, list): if nrows is not None or ncols is not None: N = len(children) if ncols is None: ncols = math.ceil(N/nrows) layout = col([ row(children[i:i+ncols]) for i in range(0, N, ncols) ]) else: def traverse(children, level=0): if isinstance(children, list): container = col if level % 2 == 0 else row return container([ traverse(child, level+1) for child in children ]) else: return children layout = traverse(children) elif isinstance(children, LayoutDOM): def is_usable(child): return _has_auto_sizing(child) and child.spacing == 0 def traverse(item, top_level=False): if isinstance(item, Box) and (top_level or is_usable(item)): container = col if isinstance(item, Column) else row return container(list(map(traverse, item.children))) else: return item layout = traverse(children, top_level=True) elif isinstance(children, string_types): raise NotImplementedError else: raise ValueError("expected a list, string or model") grid = flatten(layout) if sizing_mode is not None: grid.sizing_mode = sizing_mode for child in grid.children: layout = child[0] if _has_auto_sizing(layout): layout.sizing_mode = sizing_mode return grid
Conveniently create a grid of layoutable objects. Grids are created by using ``GridBox`` model. This gives the most control over the layout of a grid, but is also tedious and may result in unreadable code in practical applications. ``grid()`` function remedies this by reducing the level of control, but in turn providing a more convenient API. Supported patterns: 1. Nested lists of layoutable objects. Assumes the top-level list represents a column and alternates between rows and columns in subsequent nesting levels. One can use ``None`` for padding purpose. >>> grid([p1, [[p2, p3], p4]]) GridBox(children=[ (p1, 0, 0, 1, 2), (p2, 1, 0, 1, 1), (p3, 2, 0, 1, 1), (p4, 1, 1, 2, 1), ]) 2. Nested ``Row`` and ``Column`` instances. Similar to the first pattern, just instead of using nested lists, it uses nested ``Row`` and ``Column`` models. This can be much more readable that the former. Note, however, that only models that don't have ``sizing_mode`` set are used. >>> grid(column(p1, row(column(p2, p3), p4))) GridBox(children=[ (p1, 0, 0, 1, 2), (p2, 1, 0, 1, 1), (p3, 2, 0, 1, 1), (p4, 1, 1, 2, 1), ]) 3. Flat list of layoutable objects. This requires ``nrows`` and/or ``ncols`` to be set. The input list will be rearranged into a 2D array accordingly. One can use ``None`` for padding purpose. >>> grid([p1, p2, p3, p4], ncols=2) GridBox(children=[ (p1, 0, 0, 1, 1), (p2, 0, 1, 1, 1), (p3, 1, 0, 1, 1), (p4, 1, 1, 1, 1), ])
13,635
def status(self, event): self.log() response = { : , : , : self.config.allow_registration } self.fire(send(event.client.uuid, response))
An anonymous client wants to know if we're open for enrollment
13,636
def _broker_exit(self): for _, (side, _) in self.poller.readers + self.poller.writers: LOG.debug(, side) side.stream.on_disconnect(self) self.poller.close()
Forcefully call :meth:`Stream.on_disconnect` on any streams that failed to shut down gracefully, then discard the :class:`Poller`.
13,637
def _etree_py26_write(f, tree): f.write("<?xml version= encoding=?>\n".encode()) if etree.VERSION[:3] == : def fixtag(tag, namespaces): if tag == XML_NS + : return , "" if in tag: j = tag.index() + 1 tag = tag[j:] xmlns = if tag == : xmlns = (, str()) namespaces[] = return tag, xmlns else: fixtag = etree.fixtag old_fixtag = etree.fixtag etree.fixtag = fixtag try: tree.write(f, encoding=str()) finally: etree.fixtag = old_fixtag
Compatibility workaround for ElementTree shipped with py2.6
13,638
def normalized(self): total = self.total() result = Histogram() for value, count in iteritems(self): try: result[value] = count / float(total) except UnorderableElements as e: result = Histogram.from_dict(dict(result), key=hash) result[value] = count / float(total) return result
Return a normalized version of the histogram where the values sum to one.
13,639
def count_nulls(self, field): try: n = self.df[field].isnull().sum() except KeyError: self.warning("Can not find column", field) return except Exception as e: self.err(e, "Can not count nulls") return self.ok("Found", n, "nulls in column", field)
Count the number of null values in a column
13,640
def logged_exec(cmd): logger = logging.getLogger() logger.debug("Executing external command: %r", cmd) p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) stdout = [] for line in p.stdout: line = line[:-1] stdout.append(line) logger.debug(STDOUT_LOG_PREFIX + line) retcode = p.wait() if retcode: raise ExecutionError(retcode, cmd, stdout) return stdout
Execute a command, redirecting the output to the log.
13,641
def definitions_help(): message = m.Message() message.add(m.Brand()) message.add(heading()) message.add(content()) return message
Help message for Definitions. .. versionadded:: 4.0.0 :returns: A message object containing helpful information. :rtype: messaging.message.Message
13,642
def get_redis_connection(config, use_strict_redis=False): redis_cls = redis.StrictRedis if use_strict_redis else redis.Redis if in config: return redis_cls.from_url(config[], db=config.get()) if in config.keys(): try: try: return cache._client except AttributeError: return cache.get_master_client() if in config: return redis_cls(unix_socket_path=config[], db=config[]) if in config: sentinel_kwargs = { : config.get(), : config.get(), : config.get(), } sentinel_kwargs.update(config.get(, {})) sentinel = Sentinel(config[], **sentinel_kwargs) return sentinel.master_for( service_name=config[], redis_class=redis_cls, ) return redis_cls(host=config[], port=config[], db=config[], password=config.get(), ssl=config.get(, False))
Returns a redis connection from a connection config
13,643
def collect(self, cert_id, format_type): result = self.client.service.collect(authData=self.auth, id=cert_id, formatType=ComodoCA.format_type[format_type]) if result.statusCode == 2: return jsend.success({: result.SSL.certificate, : , : cert_id}) elif result.statusCode == 0: return jsend.fail({: cert_id, : , : }) else: return self._create_error(result.statusCode)
Poll for certificate availability after submission. :param int cert_id: The certificate ID :param str format_type: The format type to use (example: 'X509 PEM Certificate only') :return: The certificate_id or the certificate depending on whether the certificate is ready (check status code) :rtype: dict
13,644
def debug(self, *debugReqs): return self._client.send(debug=sc2api_pb2.RequestDebug(debug=debugReqs))
send a debug command to control the game state's setup
13,645
def port_profile_qos_profile_qos_cos(self, **kwargs): config = ET.Element("config") port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile") name_key = ET.SubElement(port_profile, "name") name_key.text = kwargs.pop() qos_profile = ET.SubElement(port_profile, "qos-profile") qos = ET.SubElement(qos_profile, "qos") cos = ET.SubElement(qos, "cos") cos.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
13,646
def generate_keypair(curve=, randfunc=None): if randfunc is None: randfunc = Crypto.Random.new().read curve = Curve.by_name(curve) raw_privkey = randfunc(curve.order_len_bin) privkey = serialize_number(deserialize_number(raw_privkey), SER_COMPACT) pubkey = str(passphrase_to_pubkey(privkey)) return (privkey, pubkey)
Convenience function to generate a random new keypair (passphrase, pubkey).
13,647
def _tarboton_slopes_directions(self, data, dX, dY): return _tarboton_slopes_directions(data, dX, dY, self.facets, self.ang_adj)
Calculate the slopes and directions based on the 8 sections from Tarboton http://www.neng.usu.edu/cee/faculty/dtarb/96wr03137.pdf
13,648
def get_colmin(data): data=data.T colmins=[] for col in data: colmins.append(data[col].idxmin()) return colmins
Get rowwise column names with minimum values :param data: pandas dataframe
13,649
def clean_column_values(df, inplace=True): r dollars_percents = re.compile(r) if not inplace: df = df.copy() for c in df.columns: values = None if df[c].dtype.char in .split(): try: values = df[c].copy() values = values.fillna() values = values.astype(str).str.replace(dollars_percents, ) if values.str.len().sum() > .2 * df[c].astype(str).str.len().sum(): values[values.isnull()] = np.nan values[values == ] = np.nan values = values.astype(float) except ValueError: values = None except: logger.error(.format(c, df[c].dtype)) raise if values is not None: if values.isnull().sum() < .6 * len(values) and values.any(): df[c] = values return df
r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object
13,650
def do_login(self, line): "login aws-acces-key aws-secret" if line: args = self.getargs(line) self.connect(args[0], args[1]) else: self.connect() self.do_tables()
login aws-acces-key aws-secret
13,651
def _prepare_connection(**kwargs): init_args = {} fun_kwargs = {} netmiko_kwargs = __salt__[](, {}) netmiko_kwargs.update(kwargs) netmiko_init_args, _, _, netmiko_defaults = inspect.getargspec(BaseConnection.__init__) check_self = netmiko_init_args.pop(0) for karg, warg in six.iteritems(netmiko_kwargs): if karg not in netmiko_init_args: if warg is not None: fun_kwargs[karg] = warg continue if warg is not None: init_args[karg] = warg conn = ConnectHandler(**init_args) return conn, fun_kwargs
Prepare the connection with the remote network device, and clean up the key value pairs, removing the args used for the connection init.
13,652
def GET_query(self, req_hook, req_args): headers = {: , : self.__session__} try: if req_args is None: response = requests.get(self.__url__ + req_hook, headers=headers, verify=True) else: response = requests.get(self.__url__ + req_hook + str(req_args), headers=headers, verify=True) except requests.exceptions.RequestException as err: self.logger.error(err) return , return response.status_code, response.text
Generic GET query method
13,653
def fcoe_get_interface_output_fcoe_intf_list_fcoe_intf_port_type(self, **kwargs): config = ET.Element("config") fcoe_get_interface = ET.Element("fcoe_get_interface") config = fcoe_get_interface output = ET.SubElement(fcoe_get_interface, "output") fcoe_intf_list = ET.SubElement(output, "fcoe-intf-list") fcoe_intf_fcoe_port_id_key = ET.SubElement(fcoe_intf_list, "fcoe-intf-fcoe-port-id") fcoe_intf_fcoe_port_id_key.text = kwargs.pop() fcoe_intf_port_type = ET.SubElement(fcoe_intf_list, "fcoe-intf-port-type") fcoe_intf_port_type.text = kwargs.pop() callback = kwargs.pop(, self._callback) return callback(config)
Auto Generated Code
13,654
def insert(self, fields, values): if fields: _fields = % .join(fields) else: _fields = _values = .join( * len(values)) query = % (self._name, _fields, _values) self._cursor.execute(query, tuple(values)) self._connection.commit() return self._cursor.lastrowid
insert new db entry :param fields: list of fields to insert :param values: list of values to insert :return: row id of the new row
13,655
def read(cls, five9, external_id): results = cls.search(five9, {cls.__uid_field__: external_id}) if not results: return None return results[0]
Return a record singleton for the ID. Args: five9 (five9.Five9): The authenticated Five9 remote. external_id (mixed): The identified on Five9. This should be the value that is in the ``__uid_field__`` field on the record. Returns: BaseModel: The record, if found. Otherwise ``None``
13,656
def get_by_id(self, id_networkv4): uri = % id_networkv4 return super(ApiNetworkIPv4, self).get(uri)
Get IPv4 network :param id_networkv4: ID for NetworkIPv4 :return: IPv4 Network
13,657
def resolve_admin_type(admin): if admin is current_user or isinstance(admin, UserMixin): return else: return admin.__class__.__name__
Determine admin type.
13,658
def convert_general(value): if isinstance(value, bool): return "true" if value else "false" elif isinstance(value, list): value = [convert_general(item) for item in value] value = convert_to_imgur_list(value) elif isinstance(value, Integral): return str(value) elif in str(type(value)): return str(getattr(value, , value)) return value
Take a python object and convert it to the format Imgur expects.
13,659
def rpc_get_name_DID(self, name, **con_info): did_info = None if check_name(name): did_info = self.get_name_DID_info(name) elif check_subdomain(name): did_info = self.get_subdomain_DID_info(name) else: return {: , : 400} if did_info is None: return {: , : 404} did = make_DID(did_info[], did_info[], did_info[]) return self.success_response({: did})
Given a name or subdomain, return its DID.
13,660
def set_loader(self, loader, destructor, state): return lib.zcertstore_set_loader(self._as_parameter_, loader, destructor, state)
Override the default disk loader with a custom loader fn.
13,661
def syllabify(self, unsyllabified_tokens): syllables = self.make_syllables(unsyllabified_tokens) qu_fixed_syllables = self._qu_fix(syllables) elision_fixed_syllables = self._elision_fixer(qu_fixed_syllables) return elision_fixed_syllables
Helper class for calling syllabification-related methods. :param unsyllabified_tokens: :return: List of syllables. :rtype : list
13,662
def within_rupture_distance(self, surface, distance, **kwargs): upper_depth, lower_depth = _check_depth_limits(kwargs) rrupt = surface.get_min_distance(self.catalogue.hypocentres_as_mesh()) is_valid = np.logical_and( rrupt <= distance, np.logical_and(self.catalogue.data[] >= upper_depth, self.catalogue.data[] < lower_depth)) return self.select_catalogue(is_valid)
Select events within a rupture distance from a fault surface :param surface: Fault surface as instance of nhlib.geo.surface.base.BaseSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
13,663
def _get_param_names(cls): init = getattr(cls.__init__, , cls.__init__) if init is object.__init__: return [] args, varargs, kw, default = inspect.getargspec(init) if varargs is not None: raise RuntimeError("scikit-learn estimators should always " "specify their parameters in the signature" " of their __init__ (no varargs)." " %s doesnself' args.pop(0) args.sort() return args
Get parameter names for the estimator
13,664
def _handle_relation(self, tokens: ParseResults) -> str: subject_node_dsl = self.ensure_node(tokens[SUBJECT]) object_node_dsl = self.ensure_node(tokens[OBJECT]) subject_modifier = modifier_po_to_dict(tokens[SUBJECT]) object_modifier = modifier_po_to_dict(tokens[OBJECT]) annotations = { annotation_name: ( { ae: True for ae in annotation_entry } if isinstance(annotation_entry, set) else { annotation_entry: True } ) for annotation_name, annotation_entry in self.control_parser.annotations.items() } return self._add_qualified_edge( subject_node_dsl, object_node_dsl, relation=tokens[RELATION], annotations=annotations, subject_modifier=subject_modifier, object_modifier=object_modifier, )
Handle a relation.
13,665
def extract_env(self): environ = self._get_config() if environ is not None: if not isinstance(environ, list): environ = [environ] lines = [] for line in environ: line = re.findall("(?P<var_name>.+?)=(?P<var_value>.+)", line) line = [ % (x[0], x[1]) for x in line] lines = lines + line environ = "\n".join(lines) bot.verbose3("Found Docker container environment!") return environ
extract the environment from the manifest, or return None. Used by functions env_extract_image, and env_extract_tar
13,666
def _get_resource_id_from_stack(cfn_client, stack_name, logical_id): LOG.debug("Getting resource's PhysicalId from AWS CloudFormation stack. StackName=%s, LogicalId=%s", stack_name, logical_id) try: response = cfn_client.describe_stack_resource(StackName=stack_name, LogicalResourceId=logical_id) LOG.debug("Response from AWS CloudFormation %s", response) return response["StackResourceDetail"]["PhysicalResourceId"] except botocore.exceptions.ClientError as ex: LOG.debug("Unable to fetch resource name from CloudFormation Stack: " "StackName=%s, ResourceLogicalId=%s, Response=%s", stack_name, logical_id, ex.response) raise UserException(str(ex))
Given the LogicalID of a resource, call AWS CloudFormation to get physical ID of the resource within the specified stack. Parameters ---------- cfn_client CloudFormation client provided by AWS SDK stack_name : str Name of the stack to query logical_id : str LogicalId of the resource Returns ------- str Physical ID of the resource Raises ------ samcli.commands.exceptions.UserException If the stack or resource does not exist
13,667
def owner(self, owner): if owner is None: raise ValueError("Invalid value for `owner`, must not be `None`") if owner is not None and len(owner) > 31: raise ValueError("Invalid value for `owner`, length must be less than or equal to `31`") if owner is not None and len(owner) < 3: raise ValueError("Invalid value for `owner`, length must be greater than or equal to `3`") if owner is not None and not re.search(, owner): raise ValueError("Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`") self._owner = owner
Sets the owner of this OauthTokenReference. User name of the owner of the OAuth token within data.world. :param owner: The owner of this OauthTokenReference. :type: str
13,668
def jinja_env(template_path): fs_loader = FileSystemLoader(os.path.dirname(template_path)) env = Environment(loader=fs_loader, autoescape=True, trim_blocks=True, lstrip_blocks=True) env.filters[] = portable_b64encode env.filters[] = f_b64decode return env
Sets up our Jinja environment, loading the few filters we have
13,669
def check_local() -> None: to_check = [, , ] for i in to_check: if not os.path.exists(i): os.makedirs(i)
Verify required directories exist. This functions checks the current working directory to ensure that the required directories exist. If they do not exist, it will create them.
13,670
def get_desktop_size(self): _ptr = ffi.new() check_int_err(lib.SDL_GetDesktopDisplayMode(self._index, _ptr)) return (_ptr.w, _ptr.h)
Get the size of the desktop display
13,671
def export_trials_data(args): nni_config = Config(get_config_filename(args)) rest_port = nni_config.get_config() rest_pid = nni_config.get_config() if not detect_process(rest_pid): print_error() return running, response = check_rest_server_quick(rest_port) if running: response = rest_get(trial_jobs_url(rest_port), 20) if response is not None and check_response(response): content = json.loads(response.text) records = parse_trial_data(content) if args.type == : json_records = [] for trial in records: value = trial.pop(, None) trial_id = trial.pop(, None) json_records.append({: trial, : value, : trial_id}) with open(args.path, ) as file: if args.type == : writer = csv.DictWriter(file, set.union(*[set(r.keys()) for r in records])) writer.writeheader() writer.writerows(records) else: json.dump(json_records, file) else: print_error() else: print_error()
export experiment metadata to csv
13,672
def reopen(self, file_obj): file_obj.open() if sys.version_info[0] <= 2: return file_obj else: return codecs.getreader()(file_obj)
Reopen the file-like object in a safe manner.
13,673
def p2x(self, p): if hasattr(p, ): dp = BufferDict(p, keys=self.g.keys())._buf[:self.meanflat.size] - self.meanflat else: dp = numpy.asarray(p).reshape(-1) - self.meanflat return self.vec_isig.dot(dp)
Map parameters ``p`` to vector in x-space. x-space is a vector space of dimension ``p.size``. Its axes are in the directions specified by the eigenvectors of ``p``'s covariance matrix, and distance along an axis is in units of the standard deviation in that direction.
13,674
def checkfilesexist(self, on_missing="warn"): if on_missing not in ("warn", "error", "ignore"): raise ValueError("on_missing must be \"warn\", \"error\", or \"ignore\".") c_found = [] c_missed = [] for entry in self: if os.path.isfile(entry.path): c_found.append(entry) else: c_missed.append(entry) if len(c_missed) > 0: msg = "%d of %d files in the cache were not found "\ "on disk" % (len(c_missed), len(self)) if on_missing == "warn": print >>sys.stderr, "warning: " + msg elif on_missing == "error": raise ValueError(msg) elif on_missing == "ignore": pass else: raise ValueError("Why am I here? "\ "Please file a bug report!") return self.__class__(c_found), self.__class__(c_missed)
Runs through the entries of the Cache() object and checks each entry if the file which it points to exists or not. If the file does exist then it adds the entry to the Cache() object containing found files, otherwise it adds the entry to the Cache() object containing all entries that are missing. It returns both in the follwing order: Cache_Found, Cache_Missed. Pass on_missing to control how missing files are handled: "warn": print a warning message saying how many files are missing out of the total checked. "error": raise an exception if any are missing "ignore": do nothing
13,675
def _match_vcs_scheme(url): from pipenv.patched.notpip._internal.vcs import VcsSupport for scheme in VcsSupport.schemes: if url.lower().startswith(scheme) and url[len(scheme)] in : return scheme return None
Look for VCS schemes in the URL. Returns the matched VCS scheme, or None if there's no match.
13,676
def query(self, query_samples): self.sampled_topics = np.zeros((self.samples, self.N), dtype=np.int) for s in range(self.samples): self.sampled_topics[s, :] = \ samplers_lda.sampler_query(self.docid, self.tokens, self.topic_seed, np.ascontiguousarray( self.tt[:, :, s], dtype=np.float), self.N, self.K, self.D, self.alpha, query_samples) print("Sample %d queried" % s) self.dt = np.zeros((self.D, self.K, self.samples)) for s in range(self.samples): self.dt[:, :, s] = \ samplers_lda.dt_comp(self.docid, self.sampled_topics[s, :], self.N, self.K, self.D, self.alpha)
Query docs with query_samples number of Gibbs sampling iterations.
13,677
def layout_(self, chart_objs, cols=3): try: return hv.Layout(chart_objs).cols(cols) except Exception as e: self.err(e, self.layout_, "Can not build layout")
Returns a Holoview Layout from chart objects
13,678
def is_suitable(self, request): if self.key_type: validation = KEY_TYPE_VALIDATIONS.get( self.get_type() ) return validation( request ) if validation else None return True
Checks if key is suitable for given request according to key type and request's user agent.
13,679
def get_diff(self, commit, other_commit): print(other_commit, "VS", commit) diff = self.repo.git.diff(commit, other_commit) return Diff(diff).get_totals()
Calculates total additions and deletions :param commit: First commit :param other_commit: Second commit :return: dictionary: Dictionary with total additions and deletions
13,680
def generate_semantic_data_key(used_semantic_keys): semantic_data_id_counter = -1 while True: semantic_data_id_counter += 1 if "semantic data key " + str(semantic_data_id_counter) not in used_semantic_keys: break return "semantic data key " + str(semantic_data_id_counter)
Create a new and unique semantic data key :param list used_semantic_keys: Handed list of keys already in use :rtype: str :return: semantic_data_id
13,681
def _parse_prop(self, dd, row): key = row[] if key.startswith(): deprecated = True else: deprecated = False v = dd.get(key) _value = self._get_value(row) if not v: v = dd.setdefault(key, {}) v[_value] = deprecated else: if not _value in v: v[_value] = deprecated
:param dd: datadict :param _row: (tablename, row) :return:
13,682
def get_sdc_by_name(self, name): for sdc in self.sdc: if sdc.name == name: return sdc raise KeyError("SDC of that name not found")
Get ScaleIO SDC object by its name :param name: Name of SDC :return: ScaleIO SDC object :raise KeyError: No SDC with specified name found :rtype: SDC object
13,683
def unregister_finders(): global __PREVIOUS_FINDER if not __PREVIOUS_FINDER: return pkg_resources.register_finder(zipimport.zipimporter, __PREVIOUS_FINDER) _remove_finder(pkgutil.ImpImporter, find_wheels_on_path) if importlib_machinery is not None: _remove_finder(importlib_machinery.FileFinder, find_wheels_on_path) __PREVIOUS_FINDER = None
Unregister finders necessary for PEX to function properly.
13,684
def update_log(self, *args, **kwargs): return Log( self._provider_manager, self._get_provider_session().update_log(*args, **kwargs), self._runtime, self._proxy)
Pass through to provider LogAdminSession.update_log
13,685
def QA_fetch_financial_report_adv(code, start, end=None, ltype=): if end is None: return QA_DataStruct_Financial(QA_fetch_financial_report(code, start, ltype=ltype)) else: series = pd.Series( data=month_data, index=pd.to_datetime(month_data), name=) timerange = series.loc[start:end].tolist() return QA_DataStruct_Financial(QA_fetch_financial_report(code, timerange, ltype=ltype))
高级财务查询接口 Arguments: code {[type]} -- [description] start {[type]} -- [description] Keyword Arguments: end {[type]} -- [description] (default: {None})
13,686
def get_metrics(self, reset: bool = False) -> Dict[str, float]: return { : self._action_sequence_accuracy.get_metric(reset), : self._denotation_accuracy.get_metric(reset), : self._has_logical_form.get_metric(reset), }
We track three metrics here: 1. dpd_acc, which is the percentage of the time that our best output action sequence is in the set of action sequences provided by DPD. This is an easy-to-compute lower bound on denotation accuracy for the set of examples where we actually have DPD output. We only score dpd_acc on that subset. 2. denotation_acc, which is the percentage of examples where we get the correct denotation. This is the typical "accuracy" metric, and it is what you should usually report in an experimental result. You need to be careful, though, that you're computing this on the full data, and not just the subset that has DPD output (make sure you pass "keep_if_no_dpd=True" to the dataset reader, which we do for validation data, but not training data). 3. lf_percent, which is the percentage of time that decoding actually produces a finished logical form. We might not produce a valid logical form if the decoder gets into a repetitive loop, or we're trying to produce a super long logical form and run out of time steps, or something.
13,687
def add_to_obj(obj, dictionary, objs=None, exceptions=None, verbose=0): if exceptions is None: exceptions = [] for item in dictionary: if item in exceptions: continue if dictionary[item] is not None: if verbose: print("process: ", item, dictionary[item]) key, value = get_key_value(dictionary[item], objs, key=item) if verbose: print("assign: ", key, value) try: setattr(obj, key, value) except AttributeError: raise AttributeError("Can't set {0}={1} on object: {2}".format(key, value, obj))
Cycles through a dictionary and adds the key-value pairs to an object. :param obj: :param dictionary: :param exceptions: :param verbose: :return:
13,688
def set_field(self, field_name, data): if self.handle is None: raise Exception("Cannot set %s before construct dataset" % field_name) if data is None: _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), None, ctypes.c_int(0), ctypes.c_int(FIELD_TYPE_MAPPER[field_name]))) return self dtype = np.float32 if field_name == : dtype = np.int32 elif field_name == : dtype = np.float64 data = list_to_1d_numpy(data, dtype, name=field_name) if data.dtype == np.float32 or data.dtype == np.float64: ptr_data, type_data, _ = c_float_array(data) elif data.dtype == np.int32: ptr_data, type_data, _ = c_int_array(data) else: raise TypeError("Excepted np.float32/64 or np.int32, meet type({})".format(data.dtype)) if type_data != FIELD_TYPE_MAPPER[field_name]: raise TypeError("Input type error for set_field") _safe_call(_LIB.LGBM_DatasetSetField( self.handle, c_str(field_name), ptr_data, ctypes.c_int(len(data)), ctypes.c_int(type_data))) return self
Set property into the Dataset. Parameters ---------- field_name : string The field name of the information. data : list, numpy 1-D array, pandas Series or None The array of data to be set. Returns ------- self : Dataset Dataset with set property.
13,689
def run(self): stderr = os.path.abspath(os.path.join(self.outdir, self.name + )) if self.pipe: self.args += (, self.pipe, +stderr) if self.gzip: self.args += (, , , self.gzip) else: self.args.append(+stderr) self.args.append(+stderr) log = open(stderr, ) log.write("[gloTK] timestamp={}\n".format(utils.timestamp())) cmd = .join(map(str, self.args)) print(cmd) log.write(cmd) start = time.time() save_cwd = os.getcwd() try: utils.safe_mkdir(self.outdir) os.chdir(self.outdir) spawn_pid = os.spawnle(os.P_NOWAIT, self.shell, self.shell, , cmd, self.env) wait_pid, retcode, rusage = os.wait4(spawn_pid, 0) if wait_pid != spawn_pid: utils.die("could not wait for process %d: got %d" % (spawn_pid, wait_pid)) os.chdir(save_cwd) except OSError as e: utils.info(e) utils.die("could not run wrapper for command:\n%s" % cmd) elapsed = time.time() - start retcode = os.WEXITSTATUS(retcode) if (self.return_ok is not None) and (self.return_ok != retcode): if os.path.isfile(stderr): subprocess.call([, , stderr]) utils.die("non-zero return (%d) from command:\n%s" % (retcode, cmd)) log.close()
Call this function at the end of your class's `__init__` function.
13,690
def hold(model: Model, reducer: Optional[Callable] = None) -> Iterator[list]: if not isinstance(model, Model): raise TypeError("Expected a Model, not %r." % model) events = [] restore = model.__dict__.get("_notify_model_views") model._notify_model_views = lambda e: events.extend(e) try: yield events finally: if restore is None: del model._notify_model_views else: model._notify_model_views = restore events = tuple(events) if reducer is not None: events = tuple(map(Data, reducer(model, events))) model._notify_model_views(events)
Temporarilly withold change events in a modifiable list. All changes that are captured within a "hold" context are forwarded to a list which is yielded to the user before being sent to views of the given ``model``. If desired, the user may modify the list of events before the context is left in order to change the events that are ultimately sent to the model's views. Parameters: model: The model object whose change events will be temporarilly witheld. reducer: A function for modifying the events list at the end of the context. Its signature is ``(model, events) -> new_events`` where ``model`` is the given model, ``events`` is the complete list of events produced in the context, and the returned ``new_events`` is a list of events that will actuall be distributed to views. Notes: All changes witheld from views will be sent as a single notification. For example if you view a :class:`specate.mvc.models.List` and its ``append()`` method is called three times within a :func:`hold` context, Examples: Note how the event from ``l.append(1)`` is omitted from the printed statements. .. code-block:: python from spectate import mvc l = mvc.List() mvc.view(d, lambda d, e: list(map(print, e))) with mvc.hold(l) as events: l.append(1) l.append(2) del events[0] .. code-block:: text {'index': 1, 'old': Undefined, 'new': 2}
13,691
def _parse(self, stream, context, path): objs = [] while True: start = stream.tell() test = stream.read(len(self.find)) stream.seek(start) if test == self.find: break else: subobj = self.subcon._parse(stream, context, path) objs.append(subobj) return objs
Parse until a given byte string is found.
13,692
def dwelling_type(self): try: if self._data_from_search: info = self._data_from_search.find( , {"class": "info"}).text s = info.split() return s[0].strip() else: return self._ad_page_content.find( , {: } ).find(, {: }).text except Exception as e: if self._debug: logging.error( "Error getting dwelling_type. Error message: " + e.args[0]) return
This method returns the dwelling type. :return:
13,693
def _ppf(self, uloc, dist, length, cache): output = evaluation.evaluate_inverse( dist, uloc.reshape(1, -1)).reshape(length, -1) assert uloc.shape == output.shape return output
Point percentile function. Example: >>> print(chaospy.Iid(chaospy.Uniform(0, 2), 2).inv( ... [[0.1, 0.2, 0.3], [0.2, 0.2, 0.3]])) [[0.2 0.4 0.6] [0.4 0.4 0.6]]
13,694
def unzoom(self, full=False, delay_draw=False): if full: self.zoom_lims = self.zoom_lims[:1] self.zoom_lims = [] elif len(self.zoom_lims) > 0: self.zoom_lims.pop() self.set_viewlimits() if not delay_draw: self.canvas.draw()
unzoom display 1 level or all the way
13,695
async def pin_6_pwm_128(my_board): await my_board.set_pin_mode(6, Constants.PWM) await my_board.analog_write(6, 128) await asyncio.sleep(3) await my_board.shutdown()
Set digital pin 6 as a PWM output and set its output value to 128 @param my_board: A PymataCore instance @return: No Return Value
13,696
def prep_vectors_for_gradient(nest_coefs, index_coefs, design, choice_vec, rows_to_obs, rows_to_nests, *args, **kwargs): long_nest_params = (rows_to_nests.multiply(nest_coefs[None, :]) .sum(axis=1) .A .ravel()) scaled_y = choice_vec / long_nest_params inf_index = np.isinf(scaled_y) scaled_y[inf_index] = max_comp_value p_tilde_row_given_nest = (prob_dict["prob_given_nest"] * long_chosen_nest / long_nest_params) inf_index = np.isinf(p_tilde_row_given_nest) p_tilde_row_given_nest[inf_index] = max_comp_value desired_arrays = {} desired_arrays["long_nest_params"] = long_nest_params.ravel() desired_arrays["scaled_y"] = scaled_y.ravel() desired_arrays["long_chosen_nest"] = long_chosen_nest desired_arrays["obs_to_chosen_nests"] = obs_to_chosen_nests desired_arrays["p_tilde_given_nest"] = p_tilde_row_given_nest desired_arrays["long_probs"] = prob_dict["long_probs"] desired_arrays["prob_given_nest"] = prob_dict["prob_given_nest"] desired_arrays["nest_choice_probs"] = prob_dict["nest_choice_probs"] desired_arrays["ind_sums_per_nest"] = prob_dict["ind_sums_per_nest"] return desired_arrays
Parameters ---------- nest_coefs : 1D or 2D ndarray. All elements should by ints, floats, or longs. If 1D, should have 1 element for each nesting coefficient being estimated. If 2D, should have 1 column for each set of nesting coefficients being used to predict the probabilities of each alternative being chosen. There should be one row per nesting coefficient. Elements denote the inverse of the scale coefficients for each of the lower level nests. Note, this is NOT THE LOGIT of the inverse of the scale coefficients. index_coefs : 1D or 2D ndarray. All elements should by ints, floats, or longs. If 1D, should have 1 element for each utility coefficient being estimated (i.e. num_features). If 2D, should have 1 column for each set of coefficients being used to predict the probabilities of choosing each alternative. There should be one row per index coefficient. design : 2D ndarray. There should be one row per observation per available alternative. There should be one column per utility coefficient being estimated. All elements should be ints, floats, or longs. choice_vec : 1D ndarray. All elements should by ints, floats, or longs. Each element represents whether the individual associated with the given row chose the alternative associated with the given row. Should have the same number of rows as `design`. rows_to_obs : 2D scipy sparse array. There should be one row per observation per available alternative and one column per observation. This matrix maps the rows of the design matrix to the unique observations (on the columns). rows_to_nests : 2D scipy sparse array. There should be one row per observation per available alternative and one column per nest. This matrix maps the rows of the design matrix to the unique nests (on the columns). Returns ------- desired_arrays : dict. Will contain the arrays necessary for calculating the gradient of the nested logit log-likelihood. The keys will be: `["long_nest_params", "scaled_y", "long_chosen_nest", "obs_to_chosen_nests", "p_tilde_given_nest", "long_probs", "prob_given_nest", "nest_choice_probs", "ind_sums_per_nest"]`
13,697
def enter_cli_mode(self): delay_factor = self.select_delay_factor(delay_factor=0) count = 0 cur_prompt = "" while count < 50: self.write_channel(self.RETURN) time.sleep(0.1 * delay_factor) cur_prompt = self.read_channel() if re.search(r"admin@", cur_prompt) or re.search( r"^\$$", cur_prompt.strip() ): self.write_channel("cli" + self.RETURN) time.sleep(0.3 * delay_factor) self.clear_buffer() break elif ">" in cur_prompt or "%" in cur_prompt: break count += 1
Check if at shell prompt root@ and go into CLI.
13,698
def _insert_breathe_configs(c, *, project_name, doxygen_xml_dirname): if doxygen_xml_dirname is not None: c[] = {project_name: doxygen_xml_dirname} c[] = project_name return c
Add breathe extension configurations to the state.
13,699
def make_model(self, grounding_ontology=, grounding_threshold=None): if grounding_threshold is not None: self.grounding_threshold = grounding_threshold self.grounding_ontology = grounding_ontology statements = [stmt for stmt in self.statements if isinstance(stmt, Influence)] self.CAG = nx.MultiDiGraph() for s in statements: has_both_polarity = (s.subj.delta[] is not None and s.obj.delta[] is not None) for node, delta in zip((s.subj.concept, s.obj.concept), (s.subj.delta, s.obj.delta)): self.CAG.add_node(self._node_name(node), simulable=has_both_polarity, mods=delta[]) linestyle = if has_both_polarity else if has_both_polarity: same_polarity = (s.subj.delta[] == s.obj.delta[]) if same_polarity: target_arrow_shape, linecolor = (, ) else: target_arrow_shape, linecolor = (, ) else: target_arrow_shape, linecolor = (, ) provenance = [] if s.evidence: provenance = s.evidence[0].annotations.get(, []) if provenance: provenance[0][] = s.evidence[0].text self.CAG.add_edge( self._node_name(s.subj.concept), self._node_name(s.obj.concept), subj_polarity=s.subj.delta[], subj_adjectives=s.subj.delta[], obj_polarity=s.obj.delta[], obj_adjectives=s.obj.delta[], linestyle=linestyle, linecolor=linecolor, targetArrowShape=target_arrow_shape, provenance=provenance, ) return self.CAG
Return a networkx MultiDiGraph representing a causal analysis graph. Parameters ---------- grounding_ontology : Optional[str] The ontology from which the grounding should be taken (e.g. UN, FAO) grounding_threshold : Optional[float] Minimum threshold score for Eidos grounding. Returns ------- nx.MultiDiGraph The assembled CAG.