Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
378,100
def initializePhase(self, features, targets): assert features.shape[0] == targets.shape[0] assert features.shape[1] == self.inputs assert targets.shape[1] == self.outputs self.inputWeights = np.random.random((self.numHiddenNeurons, self.inputs)) self.inputWeights = self.inputWeights * 2 - 1 if self.activationFunction is "sig": self.bias = np.random.random((1, self.numHiddenNeurons)) * 2 - 1 else: print " Unknown activation function type" raise NotImplementedError H0 = self.calculateHiddenLayerActivation(features) self.M = pinv(np.dot(np.transpose(H0), H0)) self.beta = np.dot(pinv(H0), targets)
Step 1: Initialization phase :param features feature matrix with dimension (numSamples, numInputs) :param targets target matrix with dimension (numSamples, numOutputs)
378,101
def append(self, other): try: return numpy.append(self, other).view(type=self.__class__) except TypeError: str_fields = [name for name in self.fieldnames if _isstring(self.dtype[name])] new_strlens = dict( [[name, max(self.dtype[name].itemsize, other.dtype[name].itemsize)] for name in str_fields] ) new_dt = [] for dt in self.dtype.descr: name = dt[0] if name in new_strlens: dt = (name, self.dtype[name].type, new_strlens[name]) new_dt.append(dt) new_dt = numpy.dtype(new_dt) return numpy.append( self.astype(new_dt), other.astype(new_dt) ).view(type=self.__class__)
Appends another array to this array. The returned array will have all of the class methods and virutal fields of this array, including any that were added using `add_method` or `add_virtualfield`. If this array and other array have one or more string fields, the dtype for those fields are updated to a string length that can encompass the longest string in both arrays. .. note:: Increasing the length of strings only works for fields, not sub-fields. Parameters ---------- other : array The array to append values from. It must have the same fields and dtype as this array, modulo the length of strings. If the other array does not have the same dtype, a TypeError is raised. Returns ------- array An array with others values appended to this array's values. The returned array is an instance of the same class as this array, including all methods and virtual fields.
378,102
def __update(self, row): expr = self.__table.update().values(row) for key in self.__update_keys: expr = expr.where(getattr(self.__table.c, key) == row[key]) if self.__autoincrement: expr = expr.returning(getattr(self.__table.c, self.__autoincrement)) res = expr.execute() if res.rowcount > 0: if self.__autoincrement: first = next(iter(res)) last_row_id = first[0] return last_row_id return 0 return None
Update rows in table
378,103
def chdir(self, path=None): if path is None: self._cwd = None return if not stat.S_ISDIR(self.stat(path).st_mode): raise SFTPError(errno.ENOTDIR, "%s: %s" % (os.strerror(errno.ENOTDIR), path)) self._cwd = b(self.normalize(path))
Change the "current directory" of this SFTP session. Since SFTP doesn't really have the concept of a current working directory, this is emulated by Paramiko. Once you use this method to set a working directory, all operations on this `.SFTPClient` object will be relative to that path. You can pass in ``None`` to stop using a current working directory. :param str path: new current working directory :raises IOError: if the requested path doesn't exist on the server .. versionadded:: 1.4
378,104
def _delete_iapp(self, iapp_name, deploying_device): iapp = deploying_device.tm.sys.application iapp_serv = iapp.services.service.load( name=iapp_name, partition=self.partition ) iapp_serv.delete() iapp_tmpl = iapp.templates.template.load( name=iapp_name, partition=self.partition ) iapp_tmpl.delete()
Delete an iapp service and template on the root device. :param iapp_name: str -- name of iapp :param deploying_device: ManagementRoot object -- device where the iapp will be deleted
378,105
def get_first_pos_of_char(char, string): first_pos = -1 pos = len(string) while pos > 0: pos = string[:pos].rfind(char) if pos == -1: return first_pos num_backslashes = 0 test_index = pos - 1 while test_index >= 0 and string[test_index] == : num_backslashes += 1 test_index -= 1 if num_backslashes % 2 == 0: first_pos = pos return first_pos
:param char: The character to find :type char: string :param string: The string in which to search for *char* :type string: string :returns: Index in *string* where *char* last appears (unescaped by a preceding "\\"), -1 if not found :rtype: int Finds the first occurrence of *char* in *string* in which *char* is not present as an escaped character.
378,106
def _wrpy_ncbi_gene_nts(fout_py, geneid2nt, log): num_genes = len(geneid2nt) with open(fout_py, ) as ofstrm: docstr = "Data downloaded from NCBI Gene converted into Python namedtuples." ofstrm.write(.format(PYDOC=docstr)) ofstrm.write("from collections import namedtuple\n\n") ofstrm.write(.format( DATE=re.sub(, , str(datetime.date.today())))) ofstrm.write(.format(N=num_genes)) ntd = next(iter(geneid2nt.values())) ofstrm.write(" ofstrm.write("{NtName} = namedtuple(, )\n\n".format( NtName=type(ntd).__name__, FLDS=.join(ntd._fields))) ofstrm.write("GENEID2NT = {{ for geneid, ntd in sorted(geneid2nt.items(), key=lambda t: t[0]): ofstrm.write(" {GeneID} : {NT},\n".format(GeneID=geneid, NT=ntd)) ofstrm.write("}\n") log.write(" {N:9} geneids WROTE: {PY}\n".format(N=num_genes, PY=fout_py))
Write namedtuples to a dict in a Python module.
378,107
def printc(cls, txt, color=colors.red): print(cls.color_txt(txt, color))
Print in color.
378,108
def next(self): try: self.event, self.element = next(self.iterator) self.elementTag = clearTag(self.element.tag) except StopIteration: clearParsedElements(self.element) raise StopIteration return self.event, self.element, self.elementTag
#TODO: docstring :returns: #TODO: docstring
378,109
def _siftdown_max(heap, startpos, pos): newitem = heap[pos] while pos > startpos: parentpos = (pos - 1) >> 1 parent = heap[parentpos] if parent < newitem: heap[pos] = parent pos = parentpos continue break heap[pos] = newitem
Maxheap variant of _siftdown
378,110
def list_subgroups_global(self, id): path = {} data = {} params = {} path["id"] = id self.logger.debug("GET /api/v1/global/outcome_groups/{id}/subgroups with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("GET", "/api/v1/global/outcome_groups/{id}/subgroups".format(**path), data=data, params=params, all_pages=True)
List subgroups. List the immediate OutcomeGroup children of the outcome group. Paginated.
378,111
def getUsers(context, roles, allow_empty=True): mtool = getToolByName(context, ) pairs = allow_empty and [[, ]] or [] users = mtool.searchForMembers(roles=roles) for user in users: uid = user.getId() fullname = user.getProperty() if not fullname: fullname = uid pairs.append((uid, fullname)) pairs.sort(lambda x, y: cmp(x[1], y[1])) return DisplayList(pairs)
Present a DisplayList containing users in the specified list of roles
378,112
def get_data_info(self): Data_info = {} data_er_samples = {} data_er_sites = {} data_er_locations = {} data_er_ages = {} if self.data_model == 3.0: print(("data model: %1.1f" % (self.data_model))) Data_info["er_samples"] = [] Data_info["er_sites"] = [] Data_info["er_locations"] = [] Data_info["er_ages"] = [] try: data_er_sites = self.read_magic_file( os.path.join(self.WD, "er_sites.txt"), ) except: print("-W- Caner_location_namet find er_locations.txt in project directory") try: data_er_ages = self.read_magic_file( os.path.join(self.WD, "er_ages.txt"), ) except: try: data_er_ages = self.read_magic_file( os.path.join(self.WD, "er_ages.txt"), ) except: print("-W- Can't find er_ages in project directory") Data_info["er_samples"] = data_er_samples Data_info["er_sites"] = data_er_sites Data_info["er_locations"] = data_er_locations Data_info["er_ages"] = data_er_ages return(Data_info)
imports er tables and places data into Data_info data structure outlined bellow: Data_info - {er_samples: {er_samples.txt info} er_sites: {er_sites.txt info} er_locations: {er_locations.txt info} er_ages: {er_ages.txt info}}
378,113
def get_sideplot_ranges(plot, element, main, ranges): key = plot.current_key dims = element.dimensions() dim = dims[0] if in dims[1].name else dims[1] range_item = main if isinstance(main, HoloMap): if issubclass(main.type, CompositeOverlay): range_item = [hm for hm in main._split_overlays()[1] if dim in hm.dimensions()][0] else: range_item = HoloMap({0: main}, kdims=[]) ranges = match_spec(range_item.last, ranges) if dim.name in ranges: main_range = ranges[dim.name][] else: framewise = plot.lookup_options(range_item.last, ).options.get() if framewise and range_item.get(key, False): main_range = range_item[key].range(dim) else: main_range = range_item.range(dim) if isinstance(range_item, HoloMap): range_item = range_item.last if isinstance(range_item, CompositeOverlay): range_item = [ov for ov in range_item if dim in ov.dimensions()][0] return range_item, main_range, dim
Utility to find the range for an adjoined plot given the plot, the element, the Element the plot is adjoined to and the dictionary of ranges.
378,114
def _checkremove_que(self, word): in_que_pass_list = False que_pass_list = [, , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , , ] if word not in que_pass_list: word = re.sub(r, , word) else: in_que_pass_list = True return word, in_que_pass_list
If word ends in -que and if word is not in pass list, strip -que
378,115
def streamify(self, state, frame): pieces = frame.split(self.prefix) return % (self.prefix, self.begin, (self.prefix + self.nop).join(pieces), self.prefix, self.end)
Prepare frame for output as a byte-stuffed stream.
378,116
def stop_gradient(self, stop_layers, bigdl_type="float"): callBigDlFunc(bigdl_type, "setStopGradient", self.value, stop_layers) return self
stop the input gradient of layers that match the given ```names``` their input gradient are not computed. And they will not contributed to the input gradient computation of layers that depend on them. :param stop_layers: an array of layer names :param bigdl_type: :return:
378,117
def format_tb(tb=None, limit=None, allLocals=None, allGlobals=None, withTitle=False, with_color=None, with_vars=None): color = Color(enable=with_color) output = _Output(color=color) def format_filename(s): base = os.path.basename(s) return ( color( + s[:-len(base)], color.fg_colors[2]) + color(base, color.fg_colors[2], bold=True) + color(, color.fg_colors[2])) format_py_obj = output.pretty_print if tb is None: try: tb = get_current_frame() assert tb except Exception: output(color("format_tb: tb is None and sys._getframe() failed", color.fg_colors[1], bold=True)) return output.lines def is_stack_summary(_tb): return isinstance(_tb, StackSummary) isframe = inspect.isframe if withTitle: if isframe(tb) or is_stack_summary(tb): output(color(, color.fg_colors[0])) else: output(color(, color.fg_colors[0])) if with_vars is None and is_at_exit(): with_vars = False if withTitle: output("(Exclude vars because we are exiting.)") if with_vars is None: if any([f.f_code.co_name == "__del__" for f in iter_traceback()]): with_vars = False if withTitle: output("(Exclude vars because we are on a GC stack.)") if with_vars is None: with_vars = True try: if limit is None: if hasattr(sys, ): limit = sys.tracebacklimit n = 0 _tb = tb class NotFound(Exception): def _resolve_identifier(namespace, keys): if keys[0] not in namespace: raise NotFound() obj = namespace[keys[0]] for part in keys[1:]: obj = getattr(obj, part) return obj def _try_set(old, prefix, func): if old is not None: return old try: return add_indent_lines(prefix, func()) except NotFound: return old except Exception as e: return prefix + "!" + e.__class__.__name__ + ": " + str(e) while _tb is not None and (limit is None or n < limit): if isframe(_tb): f = _tb elif is_stack_summary(_tb): if isinstance(_tb[0], ExtendedFrameSummary): f = _tb[0].tb_frame else: f = DummyFrame.from_frame_summary(_tb[0]) else: f = _tb.tb_frame if allLocals is not None: allLocals.update(f.f_locals) if allGlobals is not None: allGlobals.update(f.f_globals) if hasattr(_tb, "tb_lineno"): lineno = _tb.tb_lineno elif is_stack_summary(_tb): lineno = _tb[0].lineno else: lineno = f.f_lineno co = f.f_code filename = co.co_filename name = co.co_name file_descr = "".join([ , color("File ", color.fg_colors[0], bold=True), format_filename(filename), ", ", color("line ", color.fg_colors[0]), color("%d" % lineno, color.fg_colors[4]), ", ", color("in ", color.fg_colors[0]), name]) with output.fold_text_ctx(file_descr): if not os.path.isfile(filename): alt_fn = fallback_findfile(filename) if alt_fn: output( color(" -- couldn locals: %s -- code not available --', color.fg_colors[0])) if isframe(_tb): _tb = _tb.f_back elif is_stack_summary(_tb): _tb = StackSummary.from_list(_tb[1:]) if not _tb: _tb = None else: _tb = _tb.tb_next n += 1 except Exception: output(color("ERROR: cannot get more detailed exception info because:", color.fg_colors[1], bold=True)) import traceback for l in traceback.format_exc().split("\n"): output(" " + l) return output.lines
:param types.TracebackType|types.FrameType|StackSummary tb: traceback. if None, will use sys._getframe :param int|None limit: limit the traceback to this number of frames. by default, will look at sys.tracebacklimit :param dict[str]|None allLocals: if set, will update it with all locals from all frames :param dict[str]|None allGlobals: if set, will update it with all globals from all frames :param bool withTitle: :param bool|None with_color: output with ANSI escape codes for color :param bool with_vars: will print var content which are referenced in the source code line. by default enabled. :return: list of strings (line-based) :rtype: list[str]
378,118
def set(self, name, default=0, editable=True, description=""): var, created = ConfigurationVariable.objects.get_or_create(name=name) if created: var.value = default if not editable: var.value = default var.editable = editable var.description = description var.save(reload=False) self.ATTRIBUTES[var.name] = var.value
Define a variable in DB and in memory
378,119
def fnFromDate(self, date): fn = time.strftime(, date) fn = os.path.join(self.basepath, , fn + ".html") fn = os.path.abspath(fn) return fn
Get filename from date.
378,120
def from_node(index, value): try: lines = json.loads(value) except (TypeError, ValueError): lines = None if not isinstance(lines, list): lines = [] return TimelineHistory(index, value, lines)
>>> h = TimelineHistory.from_node(1, 2) >>> h.lines []
378,121
def run_top_task(self, task_name=None, sort=None, **kwargs): if not isinstance(task_name, str): raise Exception("task_name should be string") self._fill_project_info(kwargs) kwargs.update({: }) task = self.db.Task.find_one_and_update(kwargs, {: {: }}, sort=sort) try: if task is None: logging.info("[Database] Find Task FAIL: key: {} sort: {}".format(task_name, sort)) return False else: logging.info("[Database] Find Task SUCCESS: key: {} sort: {}".format(task_name, sort)) _datetime = task[] _script = task[] _id = task[] _hyper_parameters = task[] _saved_result_keys = task[] logging.info(" hyper parameters:") for key in _hyper_parameters: globals()[key] = _hyper_parameters[key] logging.info(" {}: {}".format(key, _hyper_parameters[key])) s = time.time() logging.info("[Database] Start Task: key: {} sort: {} push time: {}".format(task_name, sort, _datetime)) _script = _script.decode() with tf.Graph().as_default(): exec(_script, globals()) _ = self.db.Task.find_one_and_update({: _id}, {: {: }}) __result = {} for _key in _saved_result_keys: logging.info(" result: {}={} {}".format(_key, globals()[_key], type(globals()[_key]))) __result.update({"%s" % _key: globals()[_key]}) _ = self.db.Task.find_one_and_update( { : _id }, {: { : __result }}, return_document=pymongo.ReturnDocument.AFTER ) logging.info( "[Database] Finished Task: task_name - {} sort: {} push time: {} took: {}s". format(task_name, sort, _datetime, time.time() - s) ) return True except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] logging.info("{} {} {} {} {}".format(exc_type, exc_obj, fname, exc_tb.tb_lineno, e)) logging.info("[Database] Fail to run task") _ = self.db.Task.find_one_and_update({: _id}, {: {: }}) return False
Finds and runs a pending task that in the first of the sorting list. Parameters ----------- task_name : str The task name. sort : List of tuple PyMongo sort comment, search "PyMongo find one sorting" and `collection level operations <http://api.mongodb.com/python/current/api/pymongo/collection.html>`__ for more details. kwargs : other parameters Users customized parameters such as description, version number. Examples --------- Monitors the database and pull tasks to run >>> while True: >>> print("waiting task from distributor") >>> db.run_top_task(task_name='mnist', sort=[("time", -1)]) >>> time.sleep(1) Returns -------- boolean : True for success, False for fail.
378,122
def logReload(options): event_handler = Reload(options) observer = Observer() observer.schedule(event_handler, path=, recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() pid = os.getpid() chalk.eraser() chalk.green() os.kill(pid, 15) observer.join() exit()
encompasses all the logic for reloading observer.
378,123
def set_log_file_maximum_size(self, logFileMaxSize): assert _is_number(logFileMaxSize), "logFileMaxSize must be a number" logFileMaxSize = float(logFileMaxSize) assert logFileMaxSize>=1, "logFileMaxSize minimum size is 1 megabytes" self.__maxlogFileSize = logFileMaxSize
Set the log file maximum size in megabytes :Parameters: #. logFileMaxSize (number): The maximum size in Megabytes of a logging file. Once exceeded, another logging file as logFileBasename_N.logFileExtension will be created. Where N is an automatically incremented number.
378,124
def create_refresh_token(self, access_token_value): if access_token_value not in self.access_tokens: raise InvalidAccessToken(.format(access_token_value)) if not self.refresh_token_lifetime: logger.debug(, access_token_value) return None refresh_token = rand_str() authz_info = {: access_token_value, : int(time.time()) + self.refresh_token_lifetime} self.refresh_tokens[refresh_token] = authz_info logger.debug(, refresh_token, authz_info[], access_token_value) return refresh_token
Creates an refresh token bound to the specified access token.
378,125
def parse_expmethodresponse(self, tup_tree): raise CIMXMLParseError( _format("Internal Error: Parsing support for element {0!A} is not " "implemented", name(tup_tree)), conn_id=self.conn_id)
This function not implemented.
378,126
def normalize_uri(u: URI) -> URIRef: return u if isinstance(u, URIRef) else URIRef(str(u))
Return a URIRef for a str or URIRef
378,127
def serialize_raw_master_key_prefix(raw_master_key): if raw_master_key.config.wrapping_key.wrapping_algorithm.encryption_type is EncryptionType.ASYMMETRIC: return to_bytes(raw_master_key.key_id) return struct.pack( ">{}sII".format(len(raw_master_key.key_id)), to_bytes(raw_master_key.key_id), raw_master_key.config.wrapping_key.wrapping_algorithm.algorithm.tag_len * 8, raw_master_key.config.wrapping_key.wrapping_algorithm.algorithm.iv_len, )
Produces the prefix that a RawMasterKey will always use for the key_info value of keys which require additional information. :param raw_master_key: RawMasterKey for which to produce a prefix :type raw_master_key: aws_encryption_sdk.key_providers.raw.RawMasterKey :returns: Serialized key_info prefix :rtype: bytes
378,128
def docs(context: Context): try: from sphinx.application import Sphinx except ImportError: context.pip_command(, ) from sphinx.application import Sphinx context.shell(, , ) app = Sphinx(, , , , buildername=, parallel=True, verbosity=context.verbosity) app.build()
Generates static documentation
378,129
def Add(self, service, method, request, global_params=None): method_config = service.GetMethodConfig(method) upload_config = service.GetUploadConfig(method) http_request = service.PrepareHttpRequest( method_config, request, global_params=global_params, upload_config=upload_config) api_request = self.ApiCall( http_request, self.retryable_codes, service, method_config) self.api_requests.append(api_request)
Add a request to the batch. Args: service: A class inheriting base_api.BaseApiService. method: A string indicated desired method from the service. See the example in the class docstring. request: An input message appropriate for the specified service.method. global_params: Optional additional parameters to pass into method.PrepareHttpRequest. Returns: None
378,130
def _read_fd(file_descr): try: line = os.read(file_descr, 1024 * 1024) except OSError: stream_desc = NonBlockingStreamReader._get_sd(file_descr) if stream_desc is not None: stream_desc.has_error = True if stream_desc.callback is not None: stream_desc.callback() return 0 if line: stream_desc = NonBlockingStreamReader._get_sd(file_descr) if stream_desc is None: return 0 if IS_PYTHON3: try: line = line.decode("ascii") except UnicodeDecodeError: line = repr(line) stream_desc.buf += line split = stream_desc.buf.split(os.linesep) for line in split[:-1]: stream_desc.read_queue.appendleft(strip_escape(line.strip())) if stream_desc.callback is not None: stream_desc.callback() stream_desc.buf = split[-1] return len(line) return 0
Read incoming data from file handle. Then find the matching StreamDescriptor by file_descr value. :param file_descr: file object :return: Return number of bytes read
378,131
def extra_context(self, request, context): if settings.PAGE_EXTRA_CONTEXT: context.update(settings.PAGE_EXTRA_CONTEXT())
Call the PAGE_EXTRA_CONTEXT function if there is one.
378,132
def posthoc_tamhane(a, val_col=None, group_col=None, welch=True, sort=False): s T2 all-pairs comparison test for normally distributed data with unequal variances. Tamhanes approximate solution for calculating the degree of freedom. T2 test uses the usual df = N - 2 approximation. sort : bool, optional If True, sort data by block and group columns. Returns ------- result : pandas DataFrame P values. Notes ----- The p values are computed from the t-distribution and adjusted according to Dunn-Sidak. References ---------- .. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of Means with Unequal Variances. Journal of the American Statistical Association, 74, 471-480. Examples -------- >>> import scikit_posthocs as sp >>> import pandas as pd >>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]}) >>> x = x.melt(var_name=, value_name=) >>> sp.posthoc_tamhane(x, val_col=, group_col=) ' x, _val_col, _group_col = __convert_to_df(a, val_col, group_col) if not sort: x[_group_col] = Categorical(x[_group_col], categories=x[_group_col].unique(), ordered=True) x.sort_values(by=[_group_col], ascending=True, inplace=True) groups = x[_group_col].unique() x_grouped = x.groupby(_group_col)[_val_col] ni = x_grouped.count() n = ni.sum() xi = x_grouped.mean() si = x_grouped.var() sin = 1. / (n - groups.size) * np.sum(si * (ni - 1)) def compare(i, j): dif = xi[i] - xi[j] A = si[i] / ni[i] + si[j] / ni[j] t_val = dif / np.sqrt(A) if welch: df = A ** 2. / (si[i] ** 2. / (ni[i] ** 2. * (ni[i] - 1.)) + si[j] ** 2. / (ni[j] ** 2. * (ni[j] - 1.))) else: ok1 = (9./10. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 10./9.) ok2 = (9./10. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 10./9.) ok3 = (4./5. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 5./4.) and (1./2. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 2.) ok4 = (2./3. <= ni[i]/ni[j]) and (ni[i]/ni[j] <= 3./2.) and (3./4. <= (s2i[i] / ni[i]) / (s2i[j] / ni[j])) and ((s2i[i] / ni[i]) / (s2i[j] / ni[j]) <= 4./3.) OK = any(ok1, ok2, ok3, ok4) if not OK: print("Sample sizes or standard errors are not balanced. T2 test is recommended.") df = ni[i] + ni[j] - 2. p_val = 2. * ss.t.sf(np.abs(t_val), df=df) return p_val vs = np.zeros((groups.size, groups.size), dtype=np.float) tri_upper = np.triu_indices(vs.shape[0], 1) tri_lower = np.tril_indices(vs.shape[0], -1) vs[:,:] = 0 combs = it.combinations(range(groups.size), 2) for i,j in combs: vs[i, j] = compare(groups[i], groups[j]) vs[tri_upper] = 1. - (1. - vs[tri_upper]) ** groups.size vs[tri_lower] = vs.T[tri_lower] vs[vs > 1] = 1 np.fill_diagonal(vs, -1) return DataFrame(vs, index=groups, columns=groups)
Tamhane's T2 all-pairs comparison test for normally distributed data with unequal variances. Tamhane's T2 test can be performed for all-pairs comparisons in an one-factorial layout with normally distributed residuals but unequal groups variances. A total of m = k(k-1)/2 hypotheses can be tested. The null hypothesis is tested in the two-tailed test against the alternative hypothesis [1]_. Parameters ---------- a : array_like or pandas DataFrame object An array, any object exposing the array interface or a pandas DataFrame. val_col : str, optional Name of a DataFrame column that contains dependent variable values (test or response variable). Values should have a non-nominal scale. Must be specified if `a` is a pandas DataFrame object. group_col : str, optional Name of a DataFrame column that contains independent variable values (grouping or predictor variable). Values should have a nominal scale (categorical). Must be specified if `a` is a pandas DataFrame object. welch : bool, optional If True, use Welch's approximate solution for calculating the degree of freedom. T2 test uses the usual df = N - 2 approximation. sort : bool, optional If True, sort data by block and group columns. Returns ------- result : pandas DataFrame P values. Notes ----- The p values are computed from the t-distribution and adjusted according to Dunn-Sidak. References ---------- .. [1] A.C. Tamhane (1979), A Comparison of Procedures for Multiple Comparisons of Means with Unequal Variances. Journal of the American Statistical Association, 74, 471-480. Examples -------- >>> import scikit_posthocs as sp >>> import pandas as pd >>> x = pd.DataFrame({"a": [1,2,3,5,1], "b": [12,31,54,62,12], "c": [10,12,6,74,11]}) >>> x = x.melt(var_name='groups', value_name='values') >>> sp.posthoc_tamhane(x, val_col='values', group_col='groups')
378,133
def invokeCompletionIfAvailable(self, requestedByUser=False): if self._qpart.completionEnabled and self._wordSet is not None: wordBeforeCursor = self._wordBeforeCursor() wholeWord = wordBeforeCursor + self._wordAfterCursor() forceShow = requestedByUser or self._completionOpenedManually if wordBeforeCursor: if len(wordBeforeCursor) >= self._qpart.completionThreshold or forceShow: if self._widget is None: model = _CompletionModel(self._wordSet) model.setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(model, forceShow): self._createWidget(model) return True else: self._widget.model().setData(wordBeforeCursor, wholeWord) if self._shouldShowModel(self._widget.model(), forceShow): self._widget.updateGeometry() return True self._closeCompletion() return False
Invoke completion, if available. Called after text has been typed in qpart Returns True, if invoked
378,134
def get_current_desktop(self): desktop = ctypes.c_long(0) _libxdo.xdo_get_current_desktop(self._xdo, ctypes.byref(desktop)) return desktop.value
Get the current desktop. Uses ``_NET_CURRENT_DESKTOP`` of the EWMH spec.
378,135
def crashlog_status(**kwargs): ctx = Context(**kwargs) ctx.execute_action(, **{ : ctx.repo.create_secure_service(), })
Show crashlogs status.
378,136
def upload(self, resource_id, data): self.body = data self.content_type = self.resource_id(str(resource_id)) self._request_uri = .format(self._request_uri)
Update the request URI to upload the a document to this resource. Args: resource_id (integer): The group id. data (any): The raw data to upload.
378,137
def on_plugin_install(plugin_directory, ostream=sys.stdout): current_directory = os.getcwd() plugin_directory = ph.path(plugin_directory).realpath() print >> ostream, ( .format(plugin_directory.name)) hooks_dir_i = plugin_directory.joinpath().realpath() hook_path_i = hooks_dir_i.joinpath() if hook_path_i.isfile(): logger.info(, plugin_directory.name) os.chdir(hook_path_i.parent) try: process = sp.Popen([hook_path_i, sys.executable], shell=True, stdin=sp.PIPE) process.communicate(input=) if process.returncode != 0: raise RuntimeError( .format(process.returncode)) return hook_path_i except Exception, exception: raise RuntimeError(.format(hook_path_i, exception)) finally: os.chdir(current_directory)
Run ``on_plugin_install`` script for specified plugin directory (if available). **TODO** Add support for Linux, OSX. Parameters ---------- plugin_directory : str File system to plugin directory. ostream :file-like Output stream for status messages (default: ``sys.stdout``).
378,138
def ResolvePrefix(self, subject, attribute_prefix, timestamp=None, limit=None): for _, values in self.MultiResolvePrefix([subject], attribute_prefix, timestamp=timestamp, limit=limit): values.sort(key=lambda a: a[0]) return values return []
Retrieve a set of value matching for this subject's attribute. Args: subject: The subject that we will search. attribute_prefix: The attribute prefix. timestamp: A range of times for consideration (In microseconds). Can be a constant such as ALL_TIMESTAMPS or NEWEST_TIMESTAMP or a tuple of ints (start, end). limit: The number of results to fetch. Returns: A list of (attribute, value string, timestamp). Values with the same attribute (happens when timestamp is not NEWEST_TIMESTAMP, but ALL_TIMESTAMPS or time range) are guaranteed to be ordered in the decreasing timestamp order. Raises: AccessError: if anything goes wrong.
378,139
def decode(self, data: bytes) -> bytes: if CONTENT_TRANSFER_ENCODING in self.headers: data = self._decode_content_transfer(data) if CONTENT_ENCODING in self.headers: return self._decode_content(data) return data
Decodes data according the specified Content-Encoding or Content-Transfer-Encoding headers value.
378,140
def apply_calibration(df, calibration_df, calibration): from dmf_control_board_firmware import FeedbackResults for i, (fb_resistor, R_fb, C_fb) in calibration_df[[, , ]].iterrows(): calibration.R_fb[int(fb_resistor)] = R_fb calibration.C_fb[int(fb_resistor)] = C_fb cleaned_df = df.dropna() grouped = cleaned_df.groupby([, , ]) for (f, channel, repeat_index), group in grouped: r = FeedbackResults(group.V_actuation.iloc[0], f, 5.0, group.V_hv.values, group.hv_resistor.values, group.V_fb.values, group.fb_resistor.values, calibration) df.loc[group.index, ] = r.capacitance()
Apply calibration values from `fit_fb_calibration` result to `calibration` object.
378,141
def from_config(cls, cp, data=None, delta_f=None, delta_t=None, gates=None, recalibration=None, **kwargs): prior_section = "marginalized_prior" args = cls._init_args_from_config(cp) marg_prior = read_distributions_from_config(cp, prior_section) if len(marg_prior) == 0: raise AttributeError("No priors are specified for the " "marginalization. Please specify this in a " "section in the config file with heading " "{}-variable".format(prior_section)) params = [i.params[0] for i in marg_prior] marg_args = [k for k, v in args.items() if "_marginalization" in k] if len(marg_args) != len(params): raise ValueError("There is not a prior for each keyword argument") kwargs[] = marg_prior for i in params: kwargs[i+"_marginalization"] = True args.update(kwargs) variable_params = args[] args["data"] = data try: static_params = args[] except KeyError: static_params = {} try: approximant = static_params[] except KeyError: raise ValueError("no approximant provided in the static args") generator_function = generator.select_waveform_generator(approximant) waveform_generator = generator.FDomainDetFrameGenerator( generator_function, epoch=data.values()[0].start_time, variable_args=variable_params, detectors=data.keys(), delta_f=delta_f, delta_t=delta_t, recalib=recalibration, gates=gates, **static_params) args[] = waveform_generator args["f_lower"] = static_params["f_lower"] return cls(**args)
Initializes an instance of this class from the given config file. Parameters ---------- cp : WorkflowConfigParser Config file parser to read. data : dict A dictionary of data, in which the keys are the detector names and the values are the data. This is not retrieved from the config file, and so must be provided. delta_f : float The frequency spacing of the data; needed for waveform generation. delta_t : float The time spacing of the data; needed for time-domain waveform generators. recalibration : dict of pycbc.calibration.Recalibrate, optional Dictionary of detectors -> recalibration class instances for recalibrating data. gates : dict of tuples, optional Dictionary of detectors -> tuples of specifying gate times. The sort of thing returned by `pycbc.gate.gates_from_cli`. \**kwargs : All additional keyword arguments are passed to the class. Any provided keyword will over ride what is in the config file.
378,142
def omit_deep(omit_props, dct): omit_partial = omit_deep(omit_props) if isinstance(dict, dct): return map_dict(omit_partial, compact_dict(omit(omit_props, dct))) if isinstance((list, tuple), dct): return map(omit_partial, dct) return dct
Implementation of omit that recurses. This tests the same keys at every level of dict and in lists :param omit_props: :param dct: :return:
378,143
def update_event_types(self): self.idx_evt_type.clear() self.idx_evt_type.setSelectionMode(QAbstractItemView.ExtendedSelection) event_types = sorted(self.parent.notes.annot.event_types, key=str.lower) for ty in event_types: item = QListWidgetItem(ty) self.idx_evt_type.addItem(item)
Update event types in event type box.
378,144
def predictions(self, stpid="", rt="", vid="", maxpredictions=""): if (stpid and vid) or (rt and vid): raise ValueError("These parameters cannot be specified simultaneously.") elif not (stpid or rt or vid): raise ValueError("You must specify a parameter.") if listlike(stpid): stpid = ",".join(stpid) if listlike(rt): rt = ",".join(rt) if listlike(vid): vid = ",".join(vid) if stpid or (rt and stpid) or vid: url = self.endpoint(, dict(rt=rt, stpid=stpid, vid=vid, top=maxpredictions)) return self.response(url)
Retrieve predictions for 1+ stops or 1+ vehicles. Arguments: `stpid`: unique ID number for bus stop (single or comma-seperated list or iterable) or `vid`: vehicle ID number (single or comma-seperated list or iterable) or `stpid` and `rt` `maxpredictions` (optional): limit number of predictions returned Response: `prd`: (prediction container) contains list of `tmstp`: when prediction was generated `typ`: prediction type ('A' = arrival, 'D' = departure) `stpid`: stop ID for prediction `stpnm`: stop name for prediction `vid`: vehicle ID for prediction `dstp`: vehicle distance to stop (feet) `rt`: bus route `des`: bus destination `prdtm`: ETA/ETD `dly`: True if bus delayed `tablockid`, `tatripid`, `zone`: internal, see `self.vehicles` http://realtime.portauthority.org/bustime/apidoc/v1/main.jsp?section=predictions.jsp
378,145
def hav_dist(locs1, locs2): cos_lat1 = np.cos(locs1[..., 0]) cos_lat2 = np.cos(locs2[..., 0]) cos_lat_d = np.cos(locs1[..., 0] - locs2[..., 0]) cos_lon_d = np.cos(locs1[..., 1] - locs2[..., 1]) return 6367000 * np.arccos( cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))
Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- locs1 : numpy.array The first set of coordinates as [(long, lat), (long, lat)]. locs2 : numpy.array The second set of coordinates as [(long, lat), (long, lat)]. Returns ------- mat_dist : numpy.array The distance matrix between locs1 and locs2
378,146
def getSequenceCombinaisons(polymorphipolymorphicDnaSeqSeq, pos = 0) : if type(polymorphipolymorphicDnaSeqSeq) is not types.ListType : seq = list(polymorphipolymorphicDnaSeqSeq) else : seq = polymorphipolymorphicDnaSeqSeq if pos >= len(seq) : return [.join(seq)] variants = [] if seq[pos] in polymorphicNucleotides : chars = decodePolymorphicNucleotide(seq[pos]) else : chars = seq[pos] for c in chars : rseq = copy.copy(seq) rseq[pos] = c variants.extend(getSequenceCombinaisons(rseq, pos + 1)) return variants
Takes a dna sequence with polymorphismes and returns all the possible sequences that it can yield
378,147
def _get_model_table(self, part): rows = self.parser.find(part).find_children().list_results() table = [] for row in rows: table.append(self._get_model_row(self.parser.find( row ).find_children().list_results())) return self._get_valid_model_table(table)
Returns a list that represents the table. :param part: The table header, table footer or table body. :type part: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The list that represents the table. :rtype: list(list(hatemile.util.html.htmldomelement.HTMLDOMElement))
378,148
def calcNewEdges(wcs, shape): naxis1 = shape[1] naxis2 = shape[0] numpix = naxis1*2 + naxis2*2 border = np.zeros(shape=(numpix,2),dtype=np.float64) xmin = 1. xmax = naxis1 ymin = 1. ymax = naxis2 xside = np.arange(naxis1) + xmin yside = np.arange(naxis2) + ymin _range0 = 0 _range1 = naxis1 border[_range0:_range1,0] = xside border[_range0:_range1,1] = ymin _range0 = _range1 _range1 = _range0 + naxis1 border[_range0:_range1,0] = xside border[_range0:_range1,1] = ymax _range0 = _range1 _range1 = _range0 + naxis2 border[_range0:_range1,0] = xmin border[_range0:_range1,1] = yside _range0 = _range1 _range1 = _range0 + naxis2 border[_range0:_range1,0] = xmax border[_range0:_range1,1] = yside edges = wcs.all_pix2world(border[:,0],border[:,1],1) return edges
This method will compute sky coordinates for all the pixels around the edge of an image AFTER applying the geometry model. Parameters ---------- wcs : obj HSTWCS object for image shape : tuple numpy shape tuple for size of image Returns ------- border : arr array which contains the new positions for all pixels around the border of the edges in alpha,dec
378,149
def get_asset_lookup_session_for_repository(self, repository_id, proxy, *args, **kwargs): if not repository_id: raise NullArgument() if not self.supports_asset_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.AssetLookupSession(repository_id, proxy, runtime=self._runtime, **kwargs) except AttributeError: raise OperationFailed() return session
Gets the OsidSession associated with the asset lookup service for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.AssetLookupSession) - the new AssetLookupSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_asset_lookup() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_asset_lookup() and supports_visible_federation() are true.
378,150
def schemaValidCtxtGetParserCtxt(self): ret = libxml2mod.xmlSchemaValidCtxtGetParserCtxt(self._o) if ret is None:raise parserError() __tmp = parserCtxt(_obj=ret) return __tmp
allow access to the parser context of the schema validation context
378,151
def function_call_prepare_action(self, text, loc, fun): exshared.setpos(loc, text) if DEBUG > 0: print("FUN_PREP:",fun) if DEBUG == 2: self.symtab.display() if DEBUG > 2: return index = self.symtab.lookup_symbol(fun.name, SharedData.KINDS.FUNCTION) if index == None: raise SemanticException(" is not a function" % fun.name) self.function_call_stack.append(self.function_call_index) self.function_call_index = index self.function_arguments_stack.append(self.function_arguments[:]) del self.function_arguments[:] self.codegen.save_used_registers()
Code executed after recognising a function call (type and function name)
378,152
def profile_remove(name, **kwargs): ctx = Context(**kwargs) ctx.execute_action(, **{ : ctx.repo.create_secure_service(), : name, })
Remove profile from the storage.
378,153
def domain_delete(auth=None, **kwargs): ** cloud = get_operator_cloud(auth) kwargs = _clean_kwargs(**kwargs) return cloud.delete_domain(**kwargs)
Delete a domain CLI Example: .. code-block:: bash salt '*' keystoneng.domain_delete name=domain1 salt '*' keystoneng.domain_delete name=b62e76fbeeff4e8fb77073f591cf211e
378,154
def get_related_galleries(gallery, count=5): try: cat = gallery.sections.all()[0] related = cat.gallery_categories.filter(published=True).exclude(id=gallery.id).order_by()[:count] except: related = None return {: related, : settings.MEDIA_URL}
Gets latest related galleries from same section as originating gallery. Count defaults to five but can be overridden. Usage: {% get_related_galleries gallery <10> %}
378,155
def create_examples_train(candidate_dialog_paths, rng, positive_probability=0.5, max_context_length=20): i = 0 examples = [] for context_dialog in candidate_dialog_paths: if i % 1000 == 0: print str(i) dialog_path = candidate_dialog_paths[i] examples.append(create_single_dialog_train_example(dialog_path, candidate_dialog_paths, rng, positive_probability, max_context_length=max_context_length)) i += 1
Creates single training example. :param candidate_dialog_paths: :param rng: :param positive_probability: probability of selecting positive training example :return:
378,156
def __build_lxml(target, source, env): from lxml import etree xslt_ac = etree.XSLTAccessControl(read_file=True, write_file=True, create_dir=True, read_network=False, write_network=False) xsl_style = env.subst() xsl_tree = etree.parse(xsl_style) transform = etree.XSLT(xsl_tree, access_control=xslt_ac) doc = etree.parse(str(source[0])) parampass = {} if parampass: result = transform(doc, **parampass) else: result = transform(doc) try: of = open(str(target[0]), "wb") of.write(of.write(etree.tostring(result, pretty_print=True))) of.close() except: pass return None
General XSLT builder (HTML/FO), using the lxml module.
378,157
def get(self, client_method, get_params, is_json=True): url = self._wa.apollo_url + self.CLIENT_BASE + client_method headers = {} response = requests.get(url, headers=headers, verify=self.__verify, params=get_params, **self._request_args) if response.status_code == 200: if is_json: data = response.json() return self._scrub_data(data) else: return response.text raise Exception("Unexpected response from apollo %s: %s" % (response.status_code, response.text))
Make a GET request
378,158
def checkRequirements(sender,**kwargs): if not getConstant(): return logger.debug() formData = kwargs.get(,{}) first = formData.get() last = formData.get() email = formData.get() request = kwargs.get(,{}) registration = kwargs.get(,None) customer = Customer.objects.filter( first_name=first, last_name=last, email=email).first() requirement_warnings = [] requirement_errors = [] for ter in registration.temporaryeventregistration_set.all(): if hasattr(ter.event,): for req in ter.event.getRequirements(): if not req.customerMeetsRequirement( customer=customer, danceRole=ter.role ): if req.enforcementMethod == Requirement.EnforcementChoice.error: requirement_errors.append((ter.event.name, req.name)) if req.enforcementMethod == Requirement.EnforcementChoice.warning: requirement_warnings.append((ter.event.name,req.name)) if requirement_errors: raise ValidationError(format_html( , ugettext(), mark_safe(.join([ % x for x in requirement_errors])), getConstant() or , )) if requirement_warnings: messages.warning(request,format_html( , mark_safe(ugettext()), mark_safe(.join([ % x for x in requirement_warnings])), getConstant() or , ))
Check that the customer meets all prerequisites for the items in the registration.
378,159
def refresh_lock(lock_file): unique_id = % ( os.getpid(), .join([str(random.randint(0, 9)) for i in range(10)]), hostname) try: lock_write = open(lock_file, ) lock_write.write(unique_id + ) lock_write.close() except Exception: while get_lock.n_lock > 0: release_lock() raise return unique_id
Refresh' an existing lock. 'Refresh' an existing lock by re-writing the file containing the owner's unique id, using a new (randomly generated) id, which is also returned.
378,160
def as_tuple(obj): " Given obj return a tuple " if not obj: return tuple() if isinstance(obj, (tuple, set, list)): return tuple(obj) if hasattr(obj, ) and not isinstance(obj, dict): return obj return obj,
Given obj return a tuple
378,161
def main(args=sys.argv[1:]): if not args: sys.stderr.write(_usage() + ) sys.exit(4) else: parsed = _parse_args(args) delim = parsed.delimiter if parsed.regex else re.escape(parsed.delimiter) num_cutters = 0 read_mode = if parsed.bytes: positions = parsed.bytes cutter = ByteCutter(positions) num_cutters += 1 read_mode = if parsed.chars: positions = parsed.chars cutter = CharCutter(positions) num_cutters += 1 if parsed.fields: positions = parsed.fields cutter = FieldCutter(positions, delim, parsed.separator) num_cutters += 1 if num_cutters > 1: sys.stderr.write() sys.stderr.write(_usage() + ) sys.exit(1) if [n for n in positions if re.search("0:?|0$", n)]: sys.stderr.write() sys.stderr.write(_usage() + ) sys.exit(2) try: for line in fileinput.input(parsed.file, mode=read_mode): if parsed.skip and not re.search(parsed.delimiter, line): pass else: print(cutter.cut(line)) except IOError: sys.stderr.write(\) sys.exit(3) fileinput.close()
Processes command line arguments and file i/o
378,162
def get(self, request, slug): matching_datasets = self.generate_matching_datasets(slug) if matching_datasets is None: raise Http404("Datasets meeting these criteria do not exist.") base_context = { : matching_datasets, : matching_datasets.count(), : self.generate_page_title(slug), } additional_context = self.generate_additional_context( matching_datasets ) base_context.update(additional_context) context = base_context return render( request, self.template_path, context )
Basic functionality for GET request to view.
378,163
def clean_proc_dir(opts): for basefilename in os.listdir(salt.minion.get_proc_dir(opts[])): fn_ = os.path.join(salt.minion.get_proc_dir(opts[]), basefilename) with salt.utils.files.fopen(fn_, ) as fp_: job = None try: job = salt.payload.Serial(opts).load(fp_) except Exception: ) else: if salt.utils.platform.is_windows(): fp_.close() try: os.unlink(fn_) except OSError: pass
Loop through jid files in the minion proc directory (default /var/cache/salt/minion/proc) and remove any that refer to processes that no longer exist
378,164
def _clean_value(key, val): t clean (for example has value ), return None. Otherwise, return the original value. NOTE: This logic also exists in the smbios module. This function is for use when not using smbios to retrieve the value. noneuuids actually a UUID for uuidver in range(1, 5): try: uuid.UUID(val, version=uuidver) return val except ValueError: continue log.trace(, key, val.replace(, )) return None elif re.search(, key): val, flags=re.IGNORECASE)): return None return val
Clean out well-known bogus values. If it isn't clean (for example has value 'None'), return None. Otherwise, return the original value. NOTE: This logic also exists in the smbios module. This function is for use when not using smbios to retrieve the value.
378,165
def eventFilter(self, object, event): if not self.isVisible(): return False links = self.positionLinkedTo() is_dialog = self.currentMode() == self.Mode.Dialog if object not in links: return False if event.type() == event.Close: self.close() return False if event.type() == event.Hide and not is_dialog: self.hide() return False if event.type() == event.Move and not is_dialog: deltaPos = event.pos() - event.oldPos() self.move(self.pos() + deltaPos) return False if self.currentMode() != self.Mode.ToolTip: return False if event.type() == event.Leave: pos = object.mapFromGlobal(QCursor.pos()) if (not object.rect().contains(pos)): self.close() event.accept() return True if event.type() in (event.MouseButtonPress, event.MouseButtonDblClick): self.close() event.accept() return True return False
Processes when the window is moving to update the position for the popup if in popup mode. :param object | <QObject> event | <QEvent>
378,166
def compile(pattern, namespaces=None, flags=0, **kwargs): if namespaces is not None: namespaces = ct.Namespaces(**namespaces) custom = kwargs.get() if custom is not None: custom = ct.CustomSelectors(**custom) if isinstance(pattern, SoupSieve): if flags: raise ValueError("Cannot process argument on a compiled selector list") elif namespaces is not None: raise ValueError("Cannot process argument on a compiled selector list") elif custom is not None: raise ValueError("Cannot process argument on a compiled selector list") return pattern return cp._cached_css_compile(pattern, namespaces, custom, flags)
Compile CSS pattern.
378,167
def stats(self): per_utt_stats = self.stats_per_utterance() return stats.DataStats.concatenate(per_utt_stats.values())
Return statistics calculated overall samples of all utterances in the corpus. Returns: DataStats: A DataStats object containing statistics overall samples in the corpus.
378,168
def _list_files(root): dir_patterns, file_patterns = _gitignore(root) paths = [] prefix = os.path.abspath(root) + os.sep for base, dirs, files in os.walk(root): for d in dirs: for dir_pattern in dir_patterns: if fnmatch(d, dir_pattern): dirs.remove(d) break for f in files: skip = False for file_pattern in file_patterns: if fnmatch(f, file_pattern): skip = True break if skip: continue full_path = os.path.join(base, f) if full_path[:len(prefix)] == prefix: full_path = full_path[len(prefix):] paths.append(full_path) return sorted(paths)
Lists all of the files in a directory, taking into account any .gitignore file that is present :param root: A unicode filesystem path :return: A list of unicode strings, containing paths of all files not ignored by .gitignore with root, using relative paths
378,169
def main(arguments=None): su = tools( arguments=arguments, docString=__doc__, logLevel="DEBUG", options_first=False, projectName="picaxe" ) arguments, settings, log, dbConn = su.setup() startTime = times.get_now_sql_datetime() for arg, val in arguments.iteritems(): if arg[0] == "-": varname = arg.replace("-", "") + "Flag" else: varname = arg.replace("<", "").replace(">", "") if isinstance(val, str) or isinstance(val, unicode): exec(varname + " = " % (val,)) else: exec(varname + " = %s" % (val,)) if arg == "--dbConn": dbConn = val log.debug( % (varname, val,)) if init: from os.path import expanduser home = expanduser("~") filepath = home + "/.config/picaxe/picaxe.yaml" try: cmd = % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) except: pass try: cmd = % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) except: pass if auth: from picaxe import picaxe client = picaxe( log=log, settings=settings, pathToSettingsFile=pathToSettingsFile ) client.authenticate() if md: from picaxe import picaxe Flickr = picaxe( log=log, settings=settings ) if not width: width = "original" mdLink = Flickr.md( url=urlOrPhotoid, width=width ) print mdLink if albums: from picaxe import picaxe flickr = picaxe( log=log, settings=settings ) albumList = flickr.list_album_titles() for a in albumList: print a if upload: from picaxe import picaxe flickr = picaxe( log=log, settings=settings ) imageType = "photo" if screenGrabFlag: imageType = "screengrab" elif imageFlag: imageType = "image" album = "inbox" if albumFlag: album = albumFlag photoid = flickr.upload( imagePath=imagePath, title=titleFlag, private=publicFlag, tags=tagsFlag, description=descFlag, imageType=imageType, album=albumFlag, openInBrowser=openFlag ) print photoid if grab: try: os.remove("/tmp/screengrab.png") except: pass if delayFlag: time.sleep(int(delayFlag)) from subprocess import Popen, PIPE, STDOUT cmd = % locals() p = Popen(cmd, stdout=PIPE, stderr=PIPE, shell=True) stdout, stderr = p.communicate() log.debug( % locals()) exists = os.path.exists("/tmp/screengrab.png") if exists: from picaxe import picaxe flickr = picaxe( log=log, settings=settings ) if not albumFlag: albumFlag = "screengrabs" photoid = flickr.upload( imagePath="/tmp/screengrab.png", title=titleFlag, private=publicFlag, tags=tagsFlag, description=descFlag, imageType="screengrab", album=albumFlag, openInBrowser=openFlag ) mdLink = flickr.md( url=photoid, width="original" ) print mdLink if "dbConn" in locals() and dbConn: dbConn.commit() dbConn.close() endTime = times.get_now_sql_datetime() runningTime = times.calculate_time_difference(startTime, endTime) log.info( % (endTime, runningTime, )) return
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
378,170
def _put_bucket_website(self): if self.s3props[][]: website_config = { : { : self.s3props[][] }, : { : self.s3props[][] } } _response = self.s3client.put_bucket_website(Bucket=self.bucket, WebsiteConfiguration=website_config) self._put_bucket_cors() self._set_bucket_dns() else: _response = self.s3client.delete_bucket_website(Bucket=self.bucket) self._put_bucket_cors() LOG.debug(, _response) LOG.info()
Configure static website on S3 bucket.
378,171
async def seek(self, pos, *, device: Optional[SomeDevice] = None): await self._user.http.seek_playback(pos, device_id=str(device))
Seeks to the given position in the user’s currently playing track. Parameters ---------- pos : int The position in milliseconds to seek to. Must be a positive number. Passing in a position that is greater than the length of the track will cause the player to start playing the next song. device : Optional[:obj:`SomeDevice`] The Device object or id of the device this command is targeting. If not supplied, the user’s currently active device is the target.
378,172
def get_nas_credentials(self, identifier, **kwargs): result = self.network_storage.getObject(id=identifier, **kwargs) return result
Returns a list of IDs of VLANs which match the given VLAN name. :param integer instance_id: the instance ID :returns: A dictionary containing a large amount of information about the specified instance.
378,173
def get_lemma_by_id(self, mongo_id): cache_hit = None if self._lemma_cache is not None: cache_hit = self._lemma_cache.get(mongo_id) if cache_hit is not None: return cache_hit lemma_dict = self._mongo_db.lexunits.find_one({: mongo_id}) if lemma_dict is not None: lemma = Lemma(self, lemma_dict) if self._lemma_cache is not None: self._lemma_cache.put(mongo_id, lemma) return lemma
Builds a Lemma object from the database entry with the given ObjectId. Arguments: - `mongo_id`: a bson.objectid.ObjectId object
378,174
def _example_short_number_for_cost(region_code, cost): metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = None if cost == ShortNumberCost.TOLL_FREE: desc = metadata.toll_free elif cost == ShortNumberCost.STANDARD_RATE: desc = metadata.standard_rate elif cost == ShortNumberCost.PREMIUM_RATE: desc = metadata.premium_rate else: pass if desc is not None and desc.example_number is not None: return desc.example_number return U_EMPTY_STRING
Gets a valid short number for the specified cost category. Arguments: region_code -- the region for which an example short number is needed. cost -- the cost category of number that is needed. Returns a valid short number for the specified region and cost category. Returns an empty string when the metadata does not contain such information, or the cost is UNKNOWN_COST.
378,175
def _check_compound_minions(self, expr, delimiter, greedy, pillar_exact=False): if not isinstance(expr, six.string_types) and not isinstance(expr, (list, tuple)): log.error() return {: [], : []} minions = set(self._pki_minions()) log.debug(, minions) nodegroups = self.opts.get(, {}) if self.opts.get(, False): ref = {: self._check_grain_minions, : self._check_grain_pcre_minions, : self._check_pillar_minions, : self._check_pillar_pcre_minions, : self._check_list_minions, : None, : self._check_ipcidr_minions, : self._check_pcre_minions, : self._all_minions} if pillar_exact: ref[] = self._check_pillar_exact_minions ref[] = self._check_pillar_exact_minions results = [] unmatched = [] opers = [, , , , ] missing = [] if isinstance(expr, six.string_types): words = expr.split() else: words = expr[:] while words: word = words.pop(0) target_info = parse_target(word) if word in opers: if results: if results[-1] == and word in (, ): log.error(, word) return {: [], : []} if word == : if not results[-1] in (, , ): results.append() results.append() results.append(six.text_type(set(minions))) results.append() unmatched.append() elif word == : results.append() elif word == : results.append() elif word == : results.append(word) unmatched.append(word) elif word == : if not unmatched or unmatched[-1] != : log.error( , expr) return {: [], : []} results.append(word) unmatched.pop() if unmatched and unmatched[-1] == : results.append() unmatched.pop() else:
Return the minions found by looking via compound matcher
378,176
def reverse(self): def reverse_trans(pipe): if self.writeback: self._sync_helper(pipe) n = self.__len__(pipe) for i in range(n // 2): left = pipe.lindex(self.key, i) right = pipe.lindex(self.key, n - i - 1) pipe.lset(self.key, i, right) pipe.lset(self.key, n - i - 1, left) self._transaction(reverse_trans)
Reverses the items of this collection "in place" (only two values are retrieved from Redis at a time).
378,177
def mark_read(user, message): BackendClass = stored_messages_settings.STORAGE_BACKEND backend = BackendClass() backend.inbox_delete(user, message)
Mark message instance as read for user. Returns True if the message was `unread` and thus actually marked as `read` or False in case it is already `read` or it does not exist at all. :param user: user instance for the recipient :param message: a Message instance to mark as read
378,178
def translate(self, text, to_template=, from_template=None, name_matcher=None, url_matcher=None): return self.replace(text, to_template=to_template, from_template=from_template, name_matcher=name_matcher, url_matcher=url_matcher)
Translate hyperinks into printable book style for Manning Publishing >>> translator = HyperlinkStyleCorrector() >>> adoc = 'See http://totalgood.com[Total Good] about that.' >>> translator.translate(adoc) 'See Total Good (http://totalgood.com) about that.'
378,179
def set_regs(self, regs_dump): if self.real_stack_top == 0 and self.adjust_stack is True: raise SimStateError("You need to set the stack first, or set" "adjust_stack to False. Beware that in this case, sp and bp wont supported in angr except KeyError as e: l.warning("Reg %s was not set", e) self._adjust_regs()
Initialize register values within the state :param regs_dump: The output of ``info registers`` in gdb.
378,180
def partition_key(self, value): annotations = dict(self._annotations) annotations[self._partition_key] = value header = MessageHeader() header.durable = True self.message.annotations = annotations self.message.header = header self._annotations = annotations
Set the partition key of the event data object. :param value: The partition key to set. :type value: str or bytes
378,181
def _is_output(part): if part[0].lower() == : return True elif part[0][:2].lower() == : return True elif part[0][:2].lower() == : return True else: return False
Returns whether the given part represents an output variable.
378,182
def compose_dynamic_tree(src, target_tree_alias=None, parent_tree_item_alias=None, include_trees=None): def result(sitetrees=src): if include_trees is not None: sitetrees = [tree for tree in sitetrees if tree.alias in include_trees] return { : src, : sitetrees, : target_tree_alias, : parent_tree_item_alias} if isinstance(src, six.string_types): try: module = import_app_sitetree_module(src) return None if module is None else result(getattr(module, , None)) except ImportError as e: if settings.DEBUG: warnings.warn( % (src, e)) return None return result()
Returns a structure describing a dynamic sitetree.utils The structure can be built from various sources, :param str|iterable src: If a string is passed to `src`, it'll be treated as the name of an app, from where one want to import sitetrees definitions. `src` can be an iterable of tree definitions (see `sitetree.toolbox.tree()` and `item()` functions). :param str|unicode target_tree_alias: Static tree alias to attach items from dynamic trees to. :param str|unicode parent_tree_item_alias: Tree item alias from a static tree to attach items from dynamic trees to. :param list include_trees: Sitetree aliases to filter `src`. :rtype: dict
378,183
def merge_with(self, other): result = ValuesAggregation() result.total = self.total + other.total result.count = self.count + other.count result.min = min(self.min, other.min) result.max = max(self.max, other.max) return result
Merge this ``ValuesAggregation`` with another one
378,184
def derived(self, name, relative_coords, formula): relZ, relN = relative_coords daughter_idx = [(x[0] + relZ, x[1] + relN) for x in self.df.index] values = formula(self.df.values, self.df.loc[daughter_idx].values) return Table(df=pd.Series(values, index=self.df.index, name=name + + self.name + ))
Helper function for derived quantities
378,185
def xml_report(self, morfs=None, outfile=None, ignore_errors=None, omit=None, include=None): self._harvest_data() self.config.from_args( ignore_errors=ignore_errors, omit=omit, include=include, xml_output=outfile, ) file_to_close = None delete_file = False if self.config.xml_output: if self.config.xml_output == : outfile = sys.stdout else: outfile = open(self.config.xml_output, "w") file_to_close = outfile try: try: reporter = XmlReporter(self, self.config) return reporter.report(morfs, outfile=outfile) except CoverageException: delete_file = True raise finally: if file_to_close: file_to_close.close() if delete_file: file_be_gone(self.config.xml_output)
Generate an XML report of coverage results. The report is compatible with Cobertura reports. Each module in `morfs` is included in the report. `outfile` is the path to write the file to, "-" will write to stdout. See `coverage.report()` for other arguments. Returns a float, the total percentage covered.
378,186
def as_dict(self, join=): result = {} for e in self.errors: result.update(e.as_dict(join)) return result
Returns all the errors in this collection as a path to message dictionary. Paths are joined with the ``join`` string.
378,187
def _render_bundle(bundle_name): try: bundle = get_bundles()[bundle_name] except KeyError: raise ImproperlyConfigured("Bundle is not defined" % bundle_name) if bundle.use_bundle: return _render_file(bundle.bundle_type, bundle.get_url(), attrs=({:bundle.media} if bundle.media else {})) bundle_files = [] for bundle_file in bundle.files: if bundle_file.precompile_in_debug: bundle_files.append(_render_file(bundle_file.bundle_type, bundle_file.precompile_url, attrs=({:bundle_file.media} if bundle.media else {}))) else: bundle_files.append(_render_file(bundle_file.file_type, bundle_file.file_url, attrs=({:bundle_file.media} if bundle.media else {}))) return .join(bundle_files)
Renders the HTML for a bundle in place - one HTML tag or many depending on settings.USE_BUNDLES
378,188
async def check_authorized(self, identity): identify = await self.identify(identity) if identify is None: raise UnauthorizedError() return identify
Works like :func:`Security.identity`, but when check is failed :func:`UnauthorizedError` exception is raised. :param identity: Claim :return: Checked claim or return ``None`` :raise: :func:`UnauthorizedError`
378,189
def process(self, element): import apache_beam as beam import six import tensorflow as tf
Run the transformation graph on batched input data Args: element: list of csv strings, representing one batch input to the TF graph. Returns: dict containing the transformed data. Results are un-batched. Sparse tensors are converted to lists.
378,190
def do_use(self, args): self.instance = args self.prompt = self.instance + archive = self._client.get_archive(self.instance) self.streams = [s.name for s in archive.list_streams()] self.tables = [t.name for t in archive.list_tables()]
Use another instance, provided as argument.
378,191
def base_url(self): return .format( proto=self.protocol, host=self.host, port=self.port, url_path=self.url_path, )
A base_url that will be used to construct the final URL we're going to query against. :returns: A URL of the form: ``proto://host:port``. :rtype: :obj:`string`
378,192
def unit(n, d=None, j=None, tt_instance=True): if isinstance(n, int): if d is None: d = 1 n = n * _np.ones(d, dtype=_np.int32) else: d = len(n) if j is None: j = 0 rv = [] j = _ind2sub(n, j) for k in xrange(d): rv.append(_np.zeros((1, n[k], 1))) rv[-1][0, j[k], 0] = 1 if tt_instance: rv = _vector.vector.from_list(rv) return rv
Generates e_j _vector in tt.vector format --------- Parameters: n - modes (either integer or array) d - dimensionality (integer) j - position of 1 in full-format e_j (integer) tt_instance - if True, returns tt.vector; if False, returns tt cores as a list
378,193
def fixminimized(self, alphabet): endstate = len(list(self.states)) for state in self.states: for char in alphabet: found = 0 for arc in state.arcs: if self.isyms.find(arc.ilabel) == char: found = 1 break if found == 0: self.add_arc(state.stateid, endstate, char) self[endstate].final = TropicalWeight(float()) for char in alphabet: self.add_arc(endstate, endstate, char)
After pyfst minimization, all unused arcs are removed, and all sink states are removed. However this may break compatibility. Args: alphabet (list): The input alphabet Returns: None
378,194
def citations(val): retCites = [] for c in val: retCites.append(Citation(c)) return retCites
# The CR Tag extracts a list of all the citations in the record, the citations are the [metaknowledge.Citation](../classes/Citation.html#metaknowledge.citation.Citation) class. # Parameters _val_: `list[str]` > The raw data from a WOS file # Returns ` list[metaknowledge.Citation]` > A list of Citations
378,195
def _closure_deletelink(self, oldparentpk): self._closure_model.objects.filter( **{ "parent__%s__child" % self._closure_parentref(): oldparentpk, "child__%s__parent" % self._closure_childref(): self.pk } ).delete()
Remove incorrect links from the closure tree.
378,196
def template_filter(self, name=None): def decorator(f): self.add_template_filter(f, name=name) return f return decorator
A decorator that is used to register custom template filter. You can specify a name for the filter, otherwise the function name will be used. Example:: @app.template_filter() def reverse(s): return s[::-1] :param name: the optional name of the filter, otherwise the function name will be used.
378,197
def validate_proxy_granting_ticket(pgt, target_service): logger.debug("Proxy ticket request received for %s using %s" % (target_service, pgt)) pgt = ProxyGrantingTicket.objects.validate_ticket(pgt, target_service) pt = ProxyTicket.objects.create_ticket(service=target_service, user=pgt.user, granted_by_pgt=pgt) return pt
Validate a proxy granting ticket string. Return an ordered pair containing a ``ProxyTicket``, or a ``ValidationError`` if ticket validation failed.
378,198
def list_keyvaults_sub(access_token, subscription_id): endpoint = .join([get_rm_endpoint(), , subscription_id, , , KEYVAULT_API]) return do_get_next(endpoint, access_token)
Lists key vaults belonging to this subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. 200 OK.
378,199
def remove_env(environment): if not environment: print("You need to supply an environment name") return parser = read_config() if not parser.remove_section(environment): print("Unknown environment type " % environment) return write_config(parser) print("Removed environment " % environment)
Remove an environment from the configuration.