Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
387,000
def forward_substitution(matrix_l, matrix_b): q = len(matrix_b) matrix_y = [0.0 for _ in range(q)] matrix_y[0] = float(matrix_b[0]) / float(matrix_l[0][0]) for i in range(1, q): matrix_y[i] = float(matrix_b[i]) - sum([matrix_l[i][j] * matrix_y[j] for j in range(0, i)]) matrix_y[i] /= float(matrix_l[i][i]) return matrix_y
Forward substitution method for the solution of linear systems. Solves the equation :math:`Ly = b` using forward substitution method where :math:`L` is a lower triangular matrix and :math:`b` is a column matrix. :param matrix_l: L, lower triangular matrix :type matrix_l: list, tuple :param matrix_b: b, column matrix :type matrix_b: list, tuple :return: y, column matrix :rtype: list
387,001
def _get_qvm_based_on_real_device(name: str, device: Device, noisy: bool, connection: ForestConnection = None, qvm_type: str = ): if noisy: noise_model = device.noise_model else: noise_model = None return _get_qvm_qc(name=name, connection=connection, device=device, noise_model=noise_model, requires_executable=True, qvm_type=qvm_type)
A qvm with a based on a real device. This is the most realistic QVM. :param name: The full name of this QVM :param device: The device from :py:func:`get_lattice`. :param noisy: Whether to construct a noisy quantum computer by using the device's associated noise model. :param connection: An optional :py:class:`ForestConnection` object. If not specified, the default values for URL endpoints will be used. :return: A pre-configured QuantumComputer based on the named device.
387,002
def valuefrompostdata(self, postdata): if self.id in postdata and postdata[self.id] != : return int(postdata[self.id]) else: return None
This parameter method searches the POST data and retrieves the values it needs. It does not set the value yet though, but simply returns it. Needs to be explicitly passed to parameter.set()
387,003
def compute(cls, observation, prediction): assert isinstance(observation, dict) assert isinstance(prediction, dict) p_mean = prediction[] s = (((p_n-1)*(p_std**2) + (o_n-1)*(o_std**2))/(p_n+o_n-2))**0.5 except KeyError: s = (p_std**2 + o_std**2)**0.5 value = (p_mean - o_mean)/s value = utils.assert_dimensionless(value) return CohenDScore(value)
Compute a Cohen's D from an observation and a prediction.
387,004
def _line_wrapper(self,diffs): for fromdata,todata,flag in diffs: if flag is None: yield fromdata,todata,flag continue (fromline,fromtext),(toline,totext) = fromdata,todata fromlist,tolist = [],[] self._split_line(fromlist,fromline,fromtext) self._split_line(tolist,toline,totext) while fromlist or tolist: if fromlist: fromdata = fromlist.pop(0) else: fromdata = (,) if tolist: todata = tolist.pop(0) else: todata = (,) yield fromdata,todata,flag
Returns iterator that splits (wraps) mdiff text lines
387,005
def svd(g, svdcut=1e-12, wgts=False, add_svdnoise=False): if hasattr(g,): is_dict = True g = BufferDict(g) else: is_dict = False class svdarray(numpy.ndarray): def __new__(cls, inputarray): obj = numpy.array(g).view(cls) return obj g = svdarray(g) idx_bcov = evalcov_blocks(g.flat) g.logdet = 0.0 svdcorrection = numpy.zeros(len(g.flat), object) svdcorrection[:] = gvar(0, 0) g.eigen_range = 1. g.nmod = 0 if wgts is not False: i_wgts = [([], [])] lost_modes = 0 g.nblocks = {} for idx, block_cov in idx_bcov: g.nblocks[len(idx)] = g.nblocks.get(len(idx), 0) + 1 if len(idx) == 1: i = idx[0] if block_cov[0, 0] == 0: g.logdet = numpy.inf else: g.logdet += numpy.log(block_cov[0, 0]) if wgts is not False: i_wgts[0][0].append(i) i_wgts[0][1].append(block_cov[0, 0] ** (wgts * 0.5)) else: s = SVD(block_cov, svdcut=svdcut, rescale=True, compute_delta=True) if s.D is not None: g.logdet -= 2 * sum(numpy.log(di) for di in s.D) g.logdet += sum(numpy.log(vali) for vali in s.val) g.nmod += s.nmod if s.delta is not None: if add_svdnoise: for vali, valorigi, veci in zip(s.val, s.valorig, s.vec): if vali > valorigi: s.delta += (veci / s.D) * ( numpy.random.normal(0.0, (vali - valorigi) ** 0.5) ) svdcorrection[idx] = s.delta g.flat[idx] += s.delta elif svdcut is not None and svdcut < 0: newg = numpy.zeros(len(idx), object) for veci in s.vec: veci_D = veci / s.D newg += veci_D * (veci.dot(s.D * g.flat[idx])) lost_modes += len(idx) - len(s.vec) g.flat[idx] = newg if wgts is not False: i_wgts.append( (idx, [w for w in s.decomp(wgts)[::-1]]) ) if s.eigen_range < g.eigen_range: g.eigen_range = s.eigen_range g.nmod += lost_modes g.dof = len(g.flat) - lost_modes g.svdcut = svdcut if is_dict: g.svdcorrection = BufferDict(g, buf=svdcorrection) else: g.svdcorrection = svdcorrection.reshape(g.shape) svd.dof = g.dof svd.nmod = g.nmod svd.eigen_range = g.eigen_range svd.logdet = g.logdet svd.correction = g.svdcorrection.flat[:] svd.nblocks = g.nblocks if wgts is not False: tmp = [] for iw, wgts in i_wgts: tmp.append( (numpy.array(iw, numpy.intp), numpy.array(wgts, numpy.double)) ) i_wgts = tmp return (g, i_wgts) else: return g
Apply SVD cuts to collection of |GVar|\s in ``g``. Standard usage is, for example, :: svdcut = ... gmod = svd(g, svdcut=svdcut) where ``g`` is an array of |GVar|\s or a dictionary containing |GVar|\s and/or arrays of |GVar|\s. When ``svdcut>0``, ``gmod`` is a copy of ``g`` whose |GVar|\s have been modified to make their correlation matrix less singular than that of the original ``g``: each eigenvalue ``eig`` of the correlation matrix is replaced by ``max(eig, svdcut * max_eig)`` where ``max_eig`` is the largest eigenvalue. This SVD cut, which is applied separately to each block-diagonal sub-matrix of the correlation matrix, increases the variance of the eigenmodes with eigenvalues smaller than ``svdcut * max_eig``. The modification of ``g``'s covariance matrix is implemented by adding (to ``g``) a set of |GVar|\s with zero means:: gmod = g + gmod.svdcorrection where ``gmod.svdcorrection`` is an array/dictionary containing the |GVar|\s. If parameter ``add_svdnoise=True``, noise is included in ``gmod.svdcorrection``, :: gmod.svdcorrection += gv.sample(gmod.svdcorrection), before it is added to ``g``. The noise can be useful for testing fits and other applications. When ``svdcut`` is negative, eigenmodes of the correlation matrix whose eigenvalues are smaller than ``|svdcut| * max_eig`` are dropped from the new matrix and the corresponding components of ``g`` are zeroed out (that is, replaced by 0(0)) in ``gmod``. There is an additional parameter ``wgts`` in :func:`gvar.svd` whose default value is ``False``. Setting ``wgts=1`` or ``wgts=-1`` instead causes :func:`gvar.svd` to return a tuple ``(gmod, i_wgts)`` where ``gmod`` is the modified copy of ``g``, and ``i_wgts`` contains a spectral decomposition of the covariance matrix corresponding to the modified correlation matrix if ``wgts=1``, or a decomposition of its inverse if ``wgts=-1``. The first entry ``i, wgts = i_wgts[0]`` specifies the diagonal part of the matrix: ``i`` is a list of the indices in ``gmod.flat`` corresponding to diagonal elements, and ``wgts ** 2`` gives the corresponding matrix elements. The second and subsequent entries, ``i, wgts = i_wgts[n]`` for ``n > 0``, each correspond to block-diagonal sub-matrices, where ``i`` is the list of indices corresponding to the block, and ``wgts[j]`` are eigenvectors of the sub-matrix rescaled so that :: numpy.sum(numpy.outer(wi, wi) for wi in wgts[j] is the sub-matrix (``wgts=1``) or its inverse (``wgts=-1``). To compute the inverse of the covariance matrix from ``i_wgts``, for example, one could use code like:: gmod, i_wgts = svd(g, svdcut=svdcut, wgts=-1) inv_cov = numpy.zeros((n, n), float) i, wgts = i_wgts[0] # 1x1 sub-matrices if len(i) > 0: inv_cov[i, i] = numpy.array(wgts) ** 2 for i, wgts in i_wgts[1:]: # nxn sub-matrices (n>1) for w in wgts: inv_cov[i[:, None], i] += numpy.outer(w, w) This sets ``inv_cov`` equal to the inverse of the covariance matrix of the ``gmod``\s. Similarly, we can compute the expectation value, ``u.dot(inv_cov.dot(v))``, between two vectors (:mod:`numpy` arrays) using:: result = 0.0 i, wgts = i_wgts[0] # 1x1 sub-matrices if len(i) > 0: result += numpy.sum((u[i] * wgts) * (v[i] * wgts)) for i, wgts in i_wgts[1:]: # nxn sub-matrices (n>1) result += numpy.sum(wgts.dot(u[i]) * wgts.dot(v[i])) where ``result`` is the desired expectation value. Args: g: An array of |GVar|\s or a dicitionary whose values are |GVar|\s and/or arrays of |GVar|\s. svdcut (None or float): If positive, replace eigenvalues ``eig`` of the correlation matrix with ``max(eig, svdcut * max_eig)`` where ``max_eig`` is the largest eigenvalue; if negative, discard eigenmodes with eigenvalues smaller than ``|svdcut| * max_eig``. Note ``|svdcut| < 1``. Default is 1e-12. wgts: Setting ``wgts=1`` causes :func:`gvar.svd` to compute and return a spectral decomposition of the covariance matrix of the modified |GVar|\s, ``gmod``. Setting ``wgts=-1`` results in a decomposition of the inverse of the covariance matrix. The default value is ``False``, in which case only ``gmod`` is returned. add_svdnoise: If ``True``, noise is added to the SVD correction (see above). Returns: A copy ``gmod`` of ``g`` whose correlation matrix is modified by SVD cuts. If ``wgts`` is not ``False``, a tuple ``(g, i_wgts)`` is returned where ``i_wgts`` contains a spectral decomposition of ``gmod``'s covariance matrix or its inverse. Data from the SVD analysis is stored in ``gmod``: .. attribute:: gmod.svdcut SVD cut used to create ``gmod``. .. attribute:: gmod.dof Number of independent degrees of freedom left after the SVD cut. This is the same as the number initially unless ``svdcut < 0`` in which case it may be smaller. .. attribute:: gmod.nmod Number of modes whose eignevalue was modified by the SVD cut. .. attribute:: gmod.nblocks A dictionary where ``gmod.nblocks[s]`` contains the number of block-diagonal ``s``-by-``s`` sub-matrices in the correlation matrix. .. attribute:: gmod.eigen_range Ratio of the smallest to largest eigenvalue before SVD cuts are applied (but after rescaling). .. attribute:: gmod.logdet Logarithm of the determinant of the covariance matrix after SVD cuts are applied (excluding any omitted modes when ``svdcut < 0`` and any diagonal zero modes). .. attribute:: gmod.svdcorrection Array or dictionary containing the SVD corrections added to ``g`` to create ``gmod``: ``gmod = g + gmod.svdcorrection``.
387,006
def build_tensor_serving_input_receiver_fn(shape, dtype=tf.float32, batch_size=1): def serving_input_receiver_fn(): features = tf.placeholder( dtype=dtype, shape=[batch_size] + shape, name=) return tf.estimator.export.TensorServingInputReceiver( features=features, receiver_tensors=features) return serving_input_receiver_fn
Returns a input_receiver_fn that can be used during serving. This expects examples to come through as float tensors, and simply wraps them as TensorServingInputReceivers. Arguably, this should live in tf.estimator.export. Testing here first. Args: shape: list representing target size of a single example. dtype: the expected datatype for the input example batch_size: number of input tensors that will be passed for prediction Returns: A function that itself returns a TensorServingInputReceiver.
387,007
def walk(self, basedir): system_d = SitePackagesDir() filter_system_d = system_d and os.path.commonprefix([system_d, basedir]) != system_d for root, dirs, files in os.walk(basedir, topdown=True): dirs[:] = [d for d in dirs if d[0] != and d[0] != "_"] if filter_system_d: dirs[:] = [d for d in dirs if not d.startswith(system_d)] yield root, dirs, files
Walk all the directories of basedir except hidden directories :param basedir: string, the directory to walk :returns: generator, same as os.walk
387,008
def on_play_speed(self, *args): Clock.unschedule(self.play) Clock.schedule_interval(self.play, 1.0 / self.play_speed)
Change the interval at which ``self.play`` is called to match my current ``play_speed``.
387,009
def NRTL(xs, taus, alphas): r gammas = [] cmps = range(len(xs)) Gs = [[exp(-alphas[i][j]*taus[i][j]) for j in cmps] for i in cmps] for i in cmps: tn1, td1, total2 = 0., 0., 0. for j in cmps: tn1 += xs[j]*taus[j][i]*Gs[j][i] td1 += xs[j]*Gs[j][i] tn2 = xs[j]*Gs[i][j] td2 = td3 = sum([xs[k]*Gs[k][j] for k in cmps]) tn3 = sum([xs[m]*taus[m][j]*Gs[m][j] for m in cmps]) total2 += tn2/td2*(taus[i][j] - tn3/td3) gamma = exp(tn1/td1 + total2) gammas.append(gamma) return gammas
r'''Calculates the activity coefficients of each species in a mixture using the Non-Random Two-Liquid (NRTL) method, given their mole fractions, dimensionless interaction parameters, and nonrandomness constants. Those are normally correlated with temperature in some form, and need to be calculated separately. .. math:: \ln(\gamma_i)=\frac{\displaystyle\sum_{j=1}^{n}{x_{j}\tau_{ji}G_{ji}}} {\displaystyle\sum_{k=1}^{n}{x_{k}G_{ki}}}+\sum_{j=1}^{n} {\frac{x_{j}G_{ij}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}} {\left ({\tau_{ij}-\frac{\displaystyle\sum_{m=1}^{n}{x_{m}\tau_{mj} G_{mj}}}{\displaystyle\sum_{k=1}^{n}{x_{k}G_{kj}}}}\right )} G_{ij}=\text{exp}\left ({-\alpha_{ij}\tau_{ij}}\right ) Parameters ---------- xs : list[float] Liquid mole fractions of each species, [-] taus : list[list[float]] Dimensionless interaction parameters of each compound with each other, [-] alphas : list[list[float]] Nonrandomness constants of each compound interacting with each other, [-] Returns ------- gammas : list[float] Activity coefficient for each species in the liquid mixture, [-] Notes ----- This model needs N^2 parameters. One common temperature dependence of the nonrandomness constants is: .. math:: \alpha_{ij}=c_{ij}+d_{ij}T Most correlations for the interaction parameters include some of the terms shown in the following form: .. math:: \tau_{ij}=A_{ij}+\frac{B_{ij}}{T}+\frac{C_{ij}}{T^{2}}+D_{ij} \ln{\left ({T}\right )}+E_{ij}T^{F_{ij}} Examples -------- Ethanol-water example, at 343.15 K and 1 MPa: >>> NRTL(xs=[0.252, 0.748], taus=[[0, -0.178], [1.963, 0]], ... alphas=[[0, 0.2974],[.2974, 0]]) [1.9363183763514304, 1.1537609663170014] References ---------- .. [1] Renon, Henri, and J. M. Prausnitz. "Local Compositions in Thermodynamic Excess Functions for Liquid Mixtures." AIChE Journal 14, no. 1 (1968): 135-144. doi:10.1002/aic.690140124. .. [2] Gmehling, Jurgen, Barbel Kolbe, Michael Kleiber, and Jurgen Rarey. Chemical Thermodynamics for Process Simulation. 1st edition. Weinheim: Wiley-VCH, 2012.
387,010
def resolve_object(self, object_arg_name, resolver): def decorator(func_or_class): if isinstance(func_or_class, type): func_or_class._apply_decorator_to_methods(decorator) return func_or_class @wraps(func_or_class) def wrapper(*args, **kwargs): kwargs[object_arg_name] = resolver(kwargs) return func_or_class(*args, **kwargs) return wrapper return decorator
A helper decorator to resolve object instance from arguments (e.g. identity). Example: >>> @namespace.route('/<int:user_id>') ... class MyResource(Resource): ... @namespace.resolve_object( ... object_arg_name='user', ... resolver=lambda kwargs: User.query.get_or_404(kwargs.pop('user_id')) ... ) ... def get(self, user): ... # user is a User instance here
387,011
def _get(self, uri, params={}): if not uri.startswith(self.remote): uri = .format(self.remote, uri) return self._make_request(uri, params)
HTTP GET function :param uri: REST endpoint :param params: optional HTTP params to pass to the endpoint :return: list of results (usually a list of dicts) Example: ret = cli.get('/search', params={ 'q': 'example.org' })
387,012
def _parse_csv_col_rules(self): self.cols = self.csv_line.split() self.table = self.extract_col(0) self.column = self.extract_col(1) self.data_type = self.extract_col(2) self.aikif_map = self.extract_col(3) self.aikif_map_name = self.extract_col(4) self.extract = self.extract_col(5) self.format = self.extract_col(6) self.where = self.extract_col(7) self.index = self.extract_col(8)
splits the CSV line of the current format and puts into local class variables - mainly for testing, though this is not the best method long term. (TODO - fix this)
387,013
def _join(segments): new = [] start = segments[0][0] end = segments[0][1] for i in range(len(segments)-1): if segments[i+1][0] != segments[i][1]: new.append((start, end)) start = segments[i+1][0] end = segments[i+1][1] new.append((start, end)) return new
simply list by joining adjacent segments.
387,014
def timestr_mod24(timestr: str) -> int: try: hours, mins, secs = [int(x) for x in timestr.split(":")] hours %= 24 result = f"{hours:02d}:{mins:02d}:{secs:02d}" except: result = None return result
Given a GTFS HH:MM:SS time string, return a timestring in the same format but with the hours taken modulo 24.
387,015
def write_template(fn, lang="python"): with open(fn, "wb") as fh: if lang == "python": fh.write(PY_TEMPLATE) elif lang == "bash": fh.write(SH_TEMPLATE)
Write language-specific script template to file. Arguments: - fn(``string``) path to save the template to - lang('python', 'bash') which programming language
387,016
def fix_config(self, options): options = super(RenameRelation, self).fix_config(options) opt = "name" if opt not in options: options[opt] = "newname" if opt not in self.help: self.help[opt] = "The new relation name to use (string)." return options
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict
387,017
def get_link_name (self, tag, attrs, attr): if tag == and attr == : data = self.parser.peek(MAX_NAMELEN) data = data.decode(self.parser.encoding, "ignore") name = linkname.href_name(data) if not name: name = attrs.get_true(, u) elif tag == : name = attrs.get_true(, u) if not name: name = attrs.get_true(, u) else: name = u"" return name
Parse attrs for link name. Return name of link.
387,018
def memory_read16(self, addr, num_halfwords, zone=None): return self.memory_read(addr, num_halfwords, zone=zone, nbits=16)
Reads memory from the target system in units of 16-bits. Args: self (JLink): the ``JLink`` instance addr (int): start address to read from num_halfwords (int): number of half words to read zone (str): memory zone to read from Returns: List of halfwords read from the target system. Raises: JLinkException: if memory could not be read
387,019
def cli(env, identifier, name, all, note): vsi = SoftLayer.VSManager(env.client) vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, ) capture = vsi.capture(vs_id, name, all, note) table = formatting.KeyValueTable([, ]) table.align[] = table.align[] = table.add_row([, capture[]]) table.add_row([, capture[][:10]]) table.add_row([, capture[][11:19]]) table.add_row([, formatting.transaction_status(capture)]) table.add_row([, capture[]]) table.add_row([, all]) env.fout(table)
Capture one or all disks from a virtual server to a SoftLayer image.
387,020
def _get_position(self, position, prev=False): if position == self.POSITION_LOADING: if prev: raise IndexError() else: return self._conversation.events[0].id_ else: ev = self._conversation.next_event(position, prev=prev) if ev is None: if prev: return self.POSITION_LOADING else: raise IndexError() else: return ev.id_
Return the next/previous position or raise IndexError.
387,021
def store(self, deferred_result): self._counter += 1 self._stored[self._counter] = deferred_result return self._counter
Store a EventualResult. Return an integer, a unique identifier that can be used to retrieve the object.
387,022
def mean_abs_tree_shap(model, data): def f(X): v = TreeExplainer(model).shap_values(X) if isinstance(v, list): return [np.tile(np.abs(sv).mean(0), (X.shape[0], 1)) for sv in v] else: return np.tile(np.abs(v).mean(0), (X.shape[0], 1)) return f
mean(|TreeExplainer|) color = red_blue_circle(0.25) linestyle = solid
387,023
def changelist_view(self, request, extra_context=None): extra_context = extra_context or {} if in request.GET.keys(): value = request.GET[].split() content_type = get_object_or_404( ContentType, id=value[0], ) tracked_object = get_object_or_404( content_type.model_class(), id=value[1], ) extra_context[] = tracked_object extra_context[] = tracked_object._meta return super(TrackingEventAdmin, self).changelist_view( request, extra_context)
Get object currently tracked and add a button to get back to it
387,024
def resolve_redirects(self, resp, req, stream=False, timeout=None, verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs): hist = [] url = self.get_redirect_target(resp) while url: prepared_request = req.copy() hist.append(resp) resp.history = hist[1:] try: resp.content except (ChunkedEncodingError, ContentDecodingError, RuntimeError): resp.raw.read(decode_content=False) if len(resp.history) >= self.max_redirects: raise TooManyRedirects( % self.max_redirects, response=resp) resp.close() if url.startswith(): parsed_rurl = urlparse(resp.url) url = % (to_native_string(parsed_rurl.scheme), url) parsed = urlparse(url) url = parsed.geturl() if not parsed.netloc: url = urljoin(resp.url, requote_uri(url)) else: url = requote_uri(url) prepared_request.url = to_native_string(url) self.rebuild_method(prepared_request, resp) if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect): purged_headers = (, , ) for header in purged_headers: prepared_request.headers.pop(header, None) prepared_request.body = None headers = prepared_request.headers try: del headers[] except KeyError: pass extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) merge_cookies(prepared_request._cookies, self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) proxies = self.rebuild_proxies(prepared_request, proxies) self.rebuild_auth(prepared_request, resp) rewindable = ( prepared_request._body_position is not None and ( in headers or in headers) ) if rewindable: rewind_body(prepared_request) req = prepared_request if yield_requests: yield req else: resp = self.send( req, stream=stream, timeout=timeout, verify=verify, cert=cert, proxies=proxies, allow_redirects=False, **adapter_kwargs ) extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) url = self.get_redirect_target(resp) yield resp
Receives a Response. Returns a generator of Responses or Requests.
387,025
def _initialize(self, chain, length): if self._getfunc is None: self._getfunc = self.db.model._funs_to_tally[self.name] try: self._shape = np.shape(self._getfunc()) except TypeError: self._shape = None self._vstr = .join(var_str(self._shape)) if chain != 0: return vstr = .join(v + for v in var_str(self._shape)) query = % (self.name, vstr) self.db.cur.execute(query)
Create an SQL table.
387,026
def _read_execute_info(path, parents): path = os.path.join(path, "StarCraft II/ExecuteInfo.txt") if os.path.exists(path): with open(path, "rb") as f: for line in f: parts = [p.strip() for p in line.decode("utf-8").split("=")] if len(parts) == 2 and parts[0] == "executable": exec_path = parts[1].replace("\\", "/") for _ in range(parents): exec_path = os.path.dirname(exec_path) return exec_path
Read the ExecuteInfo.txt file and return the base directory.
387,027
def Clift(Re): r if Re < 0.01: return 24./Re + 3/16. elif Re < 20: return 24./Re*(1 + 0.1315*Re**(0.82 - 0.05*log10(Re))) elif Re < 260: return 24./Re*(1 + 0.1935*Re**(0.6305)) elif Re < 1500: return 10**(1.6435 - 1.1242*log10(Re) + 0.1558*(log10(Re))**2) elif Re < 12000: return 10**(-2.4571 + 2.5558*log10(Re) - 0.9295*(log10(Re))**2 + 0.1049*log10(Re)**3) elif Re < 44000: return 10**(-1.9181 + 0.6370*log10(Re) - 0.0636*(log10(Re))**2) elif Re < 338000: return 10**(-4.3390 + 1.5809*log10(Re) - 0.1546*(log10(Re))**2) elif Re < 400000: return 29.78 - 5.3*log10(Re) else: return 0.19*log10(Re) - 0.49
r'''Calculates drag coefficient of a smooth sphere using the method in [1]_ as described in [2]_. .. math:: C_D = \left\{ \begin{array}{ll} \frac{24}{Re} + \frac{3}{16} & \mbox{if $Re < 0.01$}\\ \frac{24}{Re}(1 + 0.1315Re^{0.82 - 0.05\log Re}) & \mbox{if $0.01 < Re < 20$}\\ \frac{24}{Re}(1 + 0.1935Re^{0.6305}) & \mbox{if $20 < Re < 260$}\\ 10^{[1.6435 - 1.1242\log Re + 0.1558[\log Re]^2} & \mbox{if $260 < Re < 1500$}\\ 10^{[-2.4571 + 2.5558\log Re - 0.9295[\log Re]^2 + 0.1049[\log Re]^3} & \mbox{if $1500 < Re < 12000$}\\ 10^{[-1.9181 + 0.6370\log Re - 0.0636[\log Re]^2} & \mbox{if $12000 < Re < 44000$}\\ 10^{[-4.3390 + 1.5809\log Re - 0.1546[\log Re]^2} & \mbox{if $44000 < Re < 338000$}\\ 9.78 - 5.3\log Re & \mbox{if $338000 < Re < 400000$}\\ 0.19\log Re - 0.49 & \mbox{if $400000 < Re < 1000000$}\end{array} \right. Parameters ---------- Re : float Particle Reynolds number of the sphere using the surrounding fluid density and viscosity, [-] Returns ------- Cd : float Drag coefficient [-] Notes ----- Range is Re <= 1E6. Examples -------- >>> Clift(200) 0.7756342422322543 References ---------- .. [1] R. Clift, J.R. Grace, M.E. Weber, Bubbles, Drops, and Particles, Academic, New York, 1978. .. [2] Barati, Reza, Seyed Ali Akbar Salehi Neyshabouri, and Goodarz Ahmadi. "Development of Empirical Models with High Accuracy for Estimation of Drag Coefficient of Flow around a Smooth Sphere: An Evolutionary Approach." Powder Technology 257 (May 2014): 11-19. doi:10.1016/j.powtec.2014.02.045.
387,028
def list_clusters(self): resp = self._client.instance_admin_client.list_clusters(self.name) clusters = [Cluster.from_pb(cluster, self) for cluster in resp.clusters] return clusters, resp.failed_locations
List the clusters in this instance. For example: .. literalinclude:: snippets.py :start-after: [START bigtable_list_clusters_on_instance] :end-before: [END bigtable_list_clusters_on_instance] :rtype: tuple :returns: (clusters, failed_locations), where 'clusters' is list of :class:`google.cloud.bigtable.instance.Cluster`, and 'failed_locations' is a list of locations which could not be resolved.
387,029
def scaleToSeconds(requestContext, seriesList, seconds): for series in seriesList: series.name = "scaleToSeconds(%s,%d)" % (series.name, seconds) series.pathExpression = series.name factor = seconds * 1.0 / series.step for i, value in enumerate(series): series[i] = safeMul(value, factor) return seriesList
Takes one metric or a wildcard seriesList and returns "value per seconds" where seconds is a last argument to this functions. Useful in conjunction with derivative or integral function if you want to normalize its result to a known resolution for arbitrary retentions
387,030
def get_pull_request_query(self, queries, repository_id, project=None): route_values = {} if project is not None: route_values[] = self._serialize.url(, project, ) if repository_id is not None: route_values[] = self._serialize.url(, repository_id, ) content = self._serialize.body(queries, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, content=content) return self._deserialize(, response)
GetPullRequestQuery. [Preview API] This API is used to find what pull requests are related to a given commit. It can be used to either find the pull request that created a particular merge commit or it can be used to find all pull requests that have ever merged a particular commit. The input is a list of queries which each contain a list of commits. For each commit that you search against, you will get back a dictionary of commit -> pull requests. :param :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>` queries: The list of queries to perform. :param str repository_id: ID of the repository. :param str project: Project ID or project name :rtype: :class:`<GitPullRequestQuery> <azure.devops.v5_1.git.models.GitPullRequestQuery>`
387,031
def punchcard(self, branch=, limit=None, days=None, by=None, normalize=None, ignore_globs=None, include_globs=None): ch = self.commit_history( branch=branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs ) ch[] = ch.index.map(lambda x: x.weekday()) ch[] = ch.index.map(lambda x: x.hour) aggs = [, ] if by is not None: aggs.append(by) punch_card = ch.groupby(aggs).agg({ : np.sum, : np.sum, : np.sum, : np.sum }) punch_card.reset_index(inplace=True) if normalize is not None: for col in [, , , ]: punch_card[col] = (punch_card[col] / punch_card[col].sum()) * normalize return punch_card
Returns a pandas DataFrame containing all of the data for a punchcard. * day_of_week * hour_of_day * author / committer * lines * insertions * deletions * net :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param days: (optional, default=None) number of days to return, if limit is None :param by: (optional, default=None) agg by options, None for no aggregation (just a high level punchcard), or 'committer', 'author' :param normalize: (optional, default=None) if an integer, returns the data normalized to max value of that (for plotting) :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame
387,032
def __draw_cmp(self, obj1, obj2): if obj1.draw_order > obj2.draw_order: return 1 elif obj1.draw_order < obj2.draw_order: return -1 else: return 0
Defines how our drawable objects should be sorted
387,033
def destroy_iam(app=, env=, **_): session = boto3.Session(profile_name=env) client = session.client() generated = get_details(env=env, app=app) generated_iam = generated.iam() app_details = collections.namedtuple(, generated_iam.keys()) details = app_details(**generated_iam) LOG.debug(, details) resource_action( client, action=, log_format=, GroupName=details.group, UserName=details.user) resource_action(client, action=, log_format=, UserName=details.user) resource_action(client, action=, log_format=, GroupName=details.group) resource_action( client, action=, log_format= , InstanceProfileName=details.profile, RoleName=details.role) resource_action( client, action=, log_format=, InstanceProfileName=details.profile) role_policies = [] try: role_policies = resource_action( client, action=, log_format=, RoleName=details.role)[] except TypeError: LOG.info(, details.role) for policy in role_policies: resource_action( client, action=, log_format= , RoleName=details.role, PolicyName=policy) attached_role_policies = [] try: attached_role_policies = resource_action( client, action=, log_format=, RoleName=details.role)[] except TypeError: LOG.info(, details.role) for policy in attached_role_policies: resource_action( client, action=, log_format= , RoleName=details.role, PolicyArn=policy[]) resource_action(client, action=, log_format=, RoleName=details.role)
Destroy IAM Resources. Args: app (str): Spinnaker Application name. env (str): Deployment environment, i.e. dev, stage, prod. Returns: True upon successful completion.
387,034
def undersampling(X, y, cost_mat=None, per=0.5): n_samples = X.shape[0] num_y1 = y.sum() num_y0 = n_samples - num_y1 filter_rand = np.random.rand(int(num_y1 + num_y0)) if num_y1 < num_y0: num_y0_new = num_y1 * 1.0 / per - num_y1 num_y0_new_per = num_y0_new * 1.0 / num_y0 filter_0 = np.logical_and(y == 0, filter_rand <= num_y0_new_per) filter_ = np.nonzero(np.logical_or(y == 1, filter_0))[0] else: num_y1_new = num_y0 * 1.0 / per - num_y0 num_y1_new_per = num_y1_new * 1.0 / num_y1 filter_1 = np.logical_and(y == 1, filter_rand <= num_y1_new_per) filter_ = np.nonzero(np.logical_or(y == 0, filter_1))[0] X_u = X[filter_, :] y_u = y[filter_] if not cost_mat is None: cost_mat_u = cost_mat[filter_, :] return X_u, y_u, cost_mat_u else: return X_u, y_u
Under-sampling. Parameters ---------- X : array-like of shape = [n_samples, n_features] The input samples. y : array-like of shape = [n_samples] Ground truth (correct) labels. cost_mat : array-like of shape = [n_samples, 4], optional (default=None) Cost matrix of the classification problem Where the columns represents the costs of: false positives, false negatives, true positives and true negatives, for each example. per: float, optional (default = 0.5) Percentage of the minority class in the under-sampled data
387,035
def respond_from_question(self, question, user_question, importance): option_index = user_question.answer_text_to_option[ question.their_answer ].id self.respond(question.id, [option_index], [option_index], importance)
Copy the answer given in `question` to the logged in user's profile. :param question: A :class:`~.Question` instance to copy. :param user_question: An instance of :class:`~.UserQuestion` that corresponds to the same question as `question`. This is needed to retrieve the answer id from the question text answer on question. :param importance: The importance to assign to the response to the answered question.
387,036
def unpublish(namespace, name, version, registry=None): registry = registry or Registry_Base_URL url = % ( registry, namespace, name, version ) headers = _headersForRegistry(registry) response = requests.delete(url, headers=headers) response.raise_for_status() return None
Try to unpublish a recently published version. Return any errors that occur.
387,037
def solarzenithangle(time: datetime, glat: float, glon: float, alt_m: float) -> tuple: time = totime(time) obs = EarthLocation(lat=glat*u.deg, lon=glon*u.deg, height=alt_m*u.m) times = Time(time, scale=) sun = get_sun(times) sunobs = sun.transform_to(AltAz(obstime=times, location=obs)) return 90 - sunobs.alt.degree, sun, sunobs
Input: t: scalar or array of datetime
387,038
def is_ancestor_of_bin(self, id_, bin_id): if self._catalog_session is not None: return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=bin_id) return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=bin_id)
Tests if an ``Id`` is an ancestor of a bin. arg: id (osid.id.Id): an ``Id`` arg: bin_id (osid.id.Id): the ``Id`` of a bin return: (boolean) - ``true`` if this ``id`` is an ancestor of ``bin_id,`` ``false`` otherwise raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``id`` or ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* *implementation notes*: If ``id`` not found return ``false``.
387,039
def report_question(self, concern, pub_name, ext_name, question_id): route_values = {} if pub_name is not None: route_values[] = self._serialize.url(, pub_name, ) if ext_name is not None: route_values[] = self._serialize.url(, ext_name, ) if question_id is not None: route_values[] = self._serialize.url(, question_id, ) content = self._serialize.body(concern, ) response = self._send(http_method=, location_id=, version=, route_values=route_values, content=content) return self._deserialize(, response)
ReportQuestion. [Preview API] Flags a concern with an existing question for an extension. :param :class:`<Concern> <azure.devops.v5_1.gallery.models.Concern>` concern: User reported concern with a question for the extension. :param str pub_name: Name of the publisher who published the extension. :param str ext_name: Name of the extension. :param long question_id: Identifier of the question to be updated for the extension. :rtype: :class:`<Concern> <azure.devops.v5_1.gallery.models.Concern>`
387,040
def spearmanr(x, y): from scipy import stats if not x or not y: return 0 corr, pvalue = stats.spearmanr(x, y) return corr
Michiel de Hoon's library (available in BioPython or standalone as PyCluster) returns Spearman rsb which does include a tie correction. >>> x = [5.05, 6.75, 3.21, 2.66] >>> y = [1.65, 26.5, -5.93, 7.96] >>> z = [1.65, 2.64, 2.64, 6.95] >>> round(spearmanr(x, y), 4) 0.4 >>> round(spearmanr(x, z), 4) -0.6325
387,041
def form_user_label_matrix(user_twitter_list_keywords_gen, id_to_node, max_number_of_labels): user_label_matrix, annotated_nodes, label_to_lemma, node_to_lemma_tokeywordbag = form_user_term_matrix(user_twitter_list_keywords_gen, id_to_node, None) user_label_matrix, annotated_nodes, label_to_lemma = filter_user_term_matrix(user_label_matrix, annotated_nodes, label_to_lemma, max_number_of_labels) lemma_to_keyword = form_lemma_tokeyword_map(annotated_nodes, node_to_lemma_tokeywordbag) return user_label_matrix, annotated_nodes, label_to_lemma, lemma_to_keyword
Forms the user-label matrix to be used in multi-label classification. Input: - user_twitter_list_keywords_gen: - id_to_node: A Twitter id to node map as a python dictionary. Outputs: - user_label_matrix: A user-to-label matrix in scipy sparse matrix format. - annotated_nodes: A numpy array containing graph nodes. - label_to_lemma: A python dictionary that maps a numerical label to a string topic lemma. - lemma_to_keyword: A python dictionary that maps a lemma to the original keyword.
387,042
def _mapping_to_tuple_pairs(d): t = [] ord_keys = sorted(d.keys()) for k in ord_keys: t.append(_product(k, d[k])) return tuple(product(*t))
Convert a mapping object (such as a dictionary) to tuple pairs, using its keys and values to generate the pairs and then generating all possible combinations between those e.g. {1: (1,2,3)} -> (((1, 1),), ((1, 2),), ((1, 3),))
387,043
def set_record(self, record, **kw): if isstring(record): card = FITSCard(record) self.update(card) self.verify() else: if isinstance(record, FITSRecord): self.update(record) elif isinstance(record, dict): if in record and in record: self.update(record) elif in record: self.set_record(record[]) else: raise ValueError( ) else: raise ValueError("record must be a string card or " "dictionary or FITSRecord")
check the record is valid and set keys in the dict parameters ---------- record: string Dict representing a record or a string representing a FITS header card
387,044
def assert_strong_password(username, password, old_password=None): try: minlength = settings.MIN_PASSWORD_LENGTH except AttributeError: minlength = 12 if len(password) < minlength: raise ValueError( "Password must be at least %s characters long" % minlength) if username is not None and username in password: raise ValueError("Password contains username") return _assert_password(password, old_password)
Raises ValueError if the password isn't strong. Returns the password otherwise.
387,045
def libvlc_audio_set_format(mp, format, rate, channels): f = _Cfunctions.get(, None) or \ _Cfunction(, ((1,), (1,), (1,), (1,),), None, None, MediaPlayer, ctypes.c_char_p, ctypes.c_uint, ctypes.c_uint) return f(mp, format, rate, channels)
Set decoded audio format. This only works in combination with L{libvlc_audio_set_callbacks}(), and is mutually exclusive with L{libvlc_audio_set_format_callbacks}(). @param mp: the media player. @param format: a four-characters string identifying the sample format (e.g. "S16N" or "FL32"). @param rate: sample rate (expressed in Hz). @param channels: channels count. @version: LibVLC 2.0.0 or later.
387,046
def event_update_status(self, event_id, status, scores=[], account=None, **kwargs): if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") account = Account(account) event = Event(event_id) if event["status"] == status: status = None op = operations.Event_update_status( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "event_id": event["id"], "status": status, "scores": scores, "prefix": self.prefix, } ) return self.finalizeOp(op, account["name"], "active", **kwargs)
Update the status of an event. This needs to be **proposed**. :param str event_id: Id of the event to update :param str status: Event status :param list scores: List of strings that represent the scores of a match (defaults to []) :param str account: (optional) the account to allow access to (defaults to ``default_account``)
387,047
def _get_first_urn(self, urn): urn = URN(urn) subreference = None textId = urn.upTo(URN.NO_PASSAGE) if urn.reference is not None: subreference = str(urn.reference) firstId = self.resolver.getTextualNode(textId=textId, subreference=subreference).firstId r = render_template( "cts/GetFirstUrn.xml", firstId=firstId, full_urn=textId, request_urn=str(urn) ) return r, 200, {"content-type": "application/xml"}
Provisional route for GetFirstUrn request :param urn: URN to filter the resource :param inv: Inventory Identifier :return: GetFirstUrn response
387,048
def _startMqtt(self): LOGGER.info(.format(self._server, self._port)) try: self._mqttc.connect_async(.format(self._server), int(self._port), 10) self._mqttc.loop_forever() except Exception as ex: template = "An exception of type {0} occurred. Arguments:\n{1!r}" message = template.format(type(ex).__name__, ex.args) LOGGER.error("MQTT Connection error: {}".format(message), exc_info=True)
The client start method. Starts the thread for the MQTT Client and publishes the connected message.
387,049
def get(self, request, provider=None): if USING_ALLAUTH: self.social_auths = request.user.socialaccount_set.all() else: self.social_auths = request.user.social_auth.all() self.social_friend_lists = [] if self.social_auths.count() == 0: if REDIRECT_IF_NO_ACCOUNT: return HttpResponseRedirect(REDIRECT_URL) return super(FriendListView, self).get(request) self.social_friend_lists = SocialFriendList.objects.get_or_create_with_social_auths(self.social_auths) return super(FriendListView, self).get(request)
prepare the social friend model
387,050
def rates_angles(fk_candidate_observations): detections = fk_candidate_observations.get_sources() for detection in detections: measures = detection.get_readings() for measure in measures: def main(): parser = argparse.ArgumentParser() parser.add_argument(, default=None, help="Give the astrom file directly instead of looking-up " "using the field/ccd naming scheme.") parser.add_argument(, action=, default=False) parser.add_argument(, choices=[, , ], help="Which type of image.", default=) parser.add_argument(, default=) parser.add_argument(, default=None) parser.add_argument(, action=, default=False) parser.add_argument(, action=, default=False) args = parser.parse_args() logging.basicConfig(level=logging.INFO) prefix = ext = args.reals and or storage.MEASURE3 = args.measure3 if args.dbimages is not None: storage.DBIMAGES = args.dbimages astrom.DATASET_ROOT = args.dbimages astrom_uri = storage.get_cands_uri(args.field, ccd=args.ccd, version=args.type, prefix=prefix, ext="measure3.{}.astrom".format(ext)) if args.astrom_filename is None: astrom_filename = os.path.basename(astrom_uri) else: astrom_filename = args.astrom_filename if not os.access(astrom_filename, os.F_OK): astrom_filename = os.path.dirname(astrom_uri) + "/" + astrom_filename fk_candidate_observations = astrom.parse(astrom_filename)
:param fk_candidate_observations: name of the fk*reals.astrom file to check against Object.planted
387,051
def keep_mask(nkeep, X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model, random_state): X_train, X_test = to_array(X_train, X_test) assert X_train.shape[1] == X_test.shape[1] X_test_tmp = X_test.copy() yp_masked_test = np.zeros(y_test.shape) tie_breaking_noise = const_rand(X_train.shape[1], random_state) * 1e-6 mean_vals = X_train.mean(0) for i in range(len(y_test)): if nkeep[i] < X_test.shape[1]: ordering = np.argsort(-attr_test[i,:] + tie_breaking_noise) X_test_tmp[i,ordering[nkeep[i]:]] = mean_vals[ordering[nkeep[i]:]] yp_masked_test = trained_model.predict(X_test_tmp) return metric(y_test, yp_masked_test)
The model is revaluated for each test sample with the non-important features set to their mean.
387,052
def logpdf(self, mu): if self.transform is not None: mu = self.transform(mu) return ss.t.logpdf(mu, df=self.df0, loc=self.loc0, scale=self.scale0)
Log PDF for t prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu))
387,053
def linear_elasticity(grid, spacing=None, E=1e5, nu=0.3, format=None): if len(grid) == 2: return q12d(grid, spacing=spacing, E=E, nu=nu, format=format) else: raise NotImplemented( % str(grid))
Linear elasticity problem discretizes with Q1 finite elements on a regular rectangular grid. Parameters ---------- grid : tuple length 2 tuple of grid sizes, e.g. (10, 10) spacing : tuple length 2 tuple of grid spacings, e.g. (1.0, 0.1) E : float Young's modulus nu : float Poisson's ratio format : string Format of the returned sparse matrix (eg. 'csr', 'bsr', etc.) Returns ------- A : csr_matrix FE Q1 stiffness matrix B : array rigid body modes See Also -------- linear_elasticity_p1 Notes ----- - only 2d for now Examples -------- >>> from pyamg.gallery import linear_elasticity >>> A, B = linear_elasticity((4, 4)) References ---------- .. [1] J. Alberty, C. Carstensen, S. A. Funken, and R. KloseDOI "Matlab implementation of the finite element method in elasticity" Computing, Volume 69, Issue 3 (November 2002) Pages: 239 - 263 http://www.math.hu-berlin.de/~cc/
387,054
def get_meta_image_url(request, image): rendition = image.get_rendition(filter=) return request.build_absolute_uri(rendition.url)
Resize an image for metadata tags, and return an absolute URL to it.
387,055
def StartAFF4Flow(args=None, runner_args=None, parent_flow=None, sync=True, token=None, **kwargs): if runner_args is None: runner_args = rdf_flow_runner.FlowRunnerArgs() FilterArgsFromSemanticProtobuf(runner_args, kwargs) try: flow_cls = registry.AFF4FlowRegistry.FlowClassByName(runner_args.flow_name) except ValueError: stats_collector_instance.Get().IncrementCounter( "grr_flow_invalid_flow_count") raise RuntimeError("Unable to locate flow %s" % runner_args.flow_name) if not token: raise access_control.UnauthorizedAccess("A token must be specified.") token = token.SetUID() token.expiry = rdfvalue.RDFDatetime.FromHumanReadable("2997-01-01") if flow_cls.category and not runner_args.client_id: raise RuntimeError("Flow with category (user-visible flow) has to be " "started on a client, but runner_args.client_id " "is missing.") flow_obj = aff4.FACTORY.Create(None, flow_cls, token=token) if args is None: args = flow_obj.args_type() FilterArgsFromSemanticProtobuf(args, kwargs) args.Validate() flow_obj.args = args flow_obj.runner_args = runner_args if kwargs: raise type_info.UnknownArg("Unknown parameters to StartAFF4Flow: %s" % kwargs) if parent_flow: parent_runner = parent_flow.runner else: parent_runner = None runner = flow_obj.CreateRunner( parent_runner=parent_runner, runner_args=runner_args) logging.info(u"Scheduling %s(%s) on %s", flow_obj.urn, runner_args.flow_name, runner_args.client_id) if sync: flow_obj.Start() else: runner.CallState(next_state="Start") if not flow_obj.outstanding_requests: flow_obj.Terminate() flow_obj.Close() if parent_flow is None: events.Events.PublishEvent( "Audit", rdf_events.AuditEvent( user=token.username, action="RUN_FLOW", flow_name=runner_args.flow_name, urn=flow_obj.urn, client=runner_args.client_id), token=token) return flow_obj.urn
The main factory function for creating and executing a new flow. Args: args: An arg protocol buffer which is an instance of the required flow's args_type class attribute. runner_args: an instance of FlowRunnerArgs() protocol buffer which is used to initialize the runner for this flow. parent_flow: A parent flow or None if this is a top level flow. sync: If True, the Start method of this flow will be called inline. Otherwise we schedule the starting of this flow on another worker. token: Security credentials token identifying the user. **kwargs: If args or runner_args are not specified, we construct these protobufs from these keywords. Returns: the session id of the flow. Raises: RuntimeError: Unknown or invalid parameters were provided.
387,056
def _handle_chat_name(self, data): self.room.user.nick = data self.conn.enqueue_data("user", self.room.user)
Handle user name changes
387,057
def get_PSD(self, NPerSegment=1000000, window="hann", timeStart=None, timeEnd=None, override=False): if timeStart == None and timeEnd == None: freqs, PSD = calc_PSD(self.voltage, self.SampleFreq, NPerSegment=NPerSegment) self.PSD = PSD self.freqs = freqs else: if timeStart == None: timeStart = self.timeStart if timeEnd == None: timeEnd = self.timeEnd time = self.time.get_array() StartIndex = _np.where(time == take_closest(time, timeStart))[0][0] EndIndex = _np.where(time == take_closest(time, timeEnd))[0][0] if EndIndex == len(time) - 1: EndIndex = EndIndex + 1 freqs, PSD = calc_PSD(self.voltage[StartIndex:EndIndex], self.SampleFreq, NPerSegment=NPerSegment) if override == True: self.freqs = freqs self.PSD = PSD return freqs, PSD
Extracts the power spectral density (PSD) from the data. Parameters ---------- NPerSegment : int, optional Length of each segment used in scipy.welch default = 1000000 window : str or tuple or array_like, optional Desired window to use. See get_window for a list of windows and required parameters. If window is array_like it will be used directly as the window and its length will be used for nperseg. default = "hann" Returns ------- freqs : ndarray Array containing the frequencies at which the PSD has been calculated PSD : ndarray Array containing the value of the PSD at the corresponding frequency value in V**2/Hz
387,058
def action_draft(self): for rec in self: if not rec.state == : raise UserError( _()) if not (rec.am_i_owner or rec.am_i_approver): raise UserError( _( )) rec.write({: })
Set a change request as draft
387,059
def extract(data, items, out_dir=None): if vcfutils.get_paired_phenotype(data): if len(items) == 1: germline_vcf = _remove_prioritization(data["vrn_file"], data, out_dir) germline_vcf = vcfutils.bgzip_and_index(germline_vcf, data["config"]) data["vrn_file_plus"] = {"germline": germline_vcf} return data
Extract germline calls for the given sample, if tumor only.
387,060
def on_failure(self, exc, task_id, args, kwargs, einfo): use_exc = str(exc) log.error(("{} FAIL - exc={} " "args={} kwargs={}") .format( self.log_label, use_exc, args, kwargs))
on_failure http://docs.celeryproject.org/en/latest/userguide/tasks.html#task-inheritance :param exc: exception :param task_id: task id :param args: arguments passed into task :param kwargs: keyword arguments passed into task :param einfo: exception info
387,061
def anonymous_user_required(*decorator_args, msg=None, category=None, redirect_url=None): def wrapper(fn): @wraps(fn) def decorated(*args, **kwargs): if current_user.is_authenticated: if request.is_json: abort(HTTPStatus.FORBIDDEN) else: if msg: flash(msg, category) return redirect(, override=redirect_url) return fn(*args, **kwargs) return decorated if decorator_args and callable(decorator_args[0]): return wrapper(decorator_args[0]) return wrapper
Decorator requiring that there is no user currently logged in. Aborts with ``HTTP 403: Forbidden`` if there is an authenticated user.
387,062
def check(self, **kwargs): errors = super().check(**kwargs) multitenant_staticfiles_dirs = settings.MULTITENANT_STATICFILES_DIRS if not isinstance(multitenant_staticfiles_dirs, (list, tuple)): errors.append( Error( "Your MULTITENANT_STATICFILES_DIRS setting is not a tuple or list.", hint="Perhaps you forgot a trailing comma?", ) ) return errors
In addition to parent class' checks, also ensure that MULTITENANT_STATICFILES_DIRS is a tuple or a list.
387,063
def draft_pick(self): doc = self.get_main_doc() try: p_tags = doc() draft_p_tag = next(p for p in p_tags.items() if p.text().lower().startswith()) draft_pick = int(re.search(r, draft_p_tag.text()).group(1)) return draft_pick except Exception as e: return None
Returns when in the draft the player was picked. :returns: TODO
387,064
def log_train_metric(period, auto_reset=False): def _callback(param): if param.nbatch % period == 0 and param.eval_metric is not None: name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info(, param.epoch, param.nbatch, name, value) if auto_reset: param.eval_metric.reset_local() return _callback
Callback to log the training evaluation result every period. Parameters ---------- period : int The number of batch to log the training evaluation metric. auto_reset : bool Reset the metric after each log. Returns ------- callback : function The callback function that can be passed as iter_epoch_callback to fit.
387,065
def get_attributes(file, *, attributes=None, mime_type=None, force_document=False, voice_note=False, video_note=False, supports_streaming=False): name = file if isinstance(file, str) else getattr(file, , ) if mime_type is None: mime_type = mimetypes.guess_type(name)[0] attr_dict = {types.DocumentAttributeFilename: types.DocumentAttributeFilename(os.path.basename(name))} if is_audio(file): m = _get_metadata(file) if m: attr_dict[types.DocumentAttributeAudio] = \ types.DocumentAttributeAudio( voice=voice_note, title=m.get() if m.has() else None, performer=m.get() if m.has() else None, duration=int(m.get().seconds if m.has() else 0) ) if not force_document and is_video(file): m = _get_metadata(file) if m: doc = types.DocumentAttributeVideo( round_message=video_note, w=m.get() if m.has() else 0, h=m.get() if m.has() else 0, duration=int(m.get().seconds if m.has() else 0), supports_streaming=supports_streaming ) else: doc = types.DocumentAttributeVideo( 0, 1, 1, round_message=video_note, supports_streaming=supports_streaming) attr_dict[types.DocumentAttributeVideo] = doc if voice_note: if types.DocumentAttributeAudio in attr_dict: attr_dict[types.DocumentAttributeAudio].voice = True else: attr_dict[types.DocumentAttributeAudio] = \ types.DocumentAttributeAudio(0, voice=True) if attributes: for a in attributes: attr_dict[type(a)] = a if not mime_type: mime_type = return list(attr_dict.values()), mime_type
Get a list of attributes for the given file and the mime type as a tuple ([attribute], mime_type).
387,066
def delimited_file( self, hdfs_dir, schema, name=None, database=None, delimiter=, na_rep=None, escapechar=None, lineterminator=None, external=True, persist=False, ): name, database = self._get_concrete_table_path( name, database, persist=persist ) stmt = ddl.CreateTableDelimited( name, hdfs_dir, schema, database=database, delimiter=delimiter, external=external, na_rep=na_rep, lineterminator=lineterminator, escapechar=escapechar, ) self._execute(stmt) return self._wrap_new_table(name, database, persist)
Interpret delimited text files (CSV / TSV / etc.) as an Ibis table. See `parquet_file` for more exposition on what happens under the hood. Parameters ---------- hdfs_dir : string HDFS directory name containing delimited text files schema : ibis Schema name : string, default None Name for temporary or persistent table; otherwise random one generated database : string Database to create the (possibly temporary) table in delimiter : length-1 string, default ',' Pass None if there is no delimiter escapechar : length-1 string Character used to escape special characters lineterminator : length-1 string Character used to delimit lines external : boolean, default True Create table as EXTERNAL (data will not be deleted on drop). Not that if persist=False and external=False, whatever data you reference will be deleted persist : boolean, default False If True, do not delete the table upon garbage collection of ibis table object Returns ------- delimited_table : ImpalaTable
387,067
def _to_dict(self): _dict = {} if hasattr(self, ) and self.document is not None: _dict[] = self.document if hasattr(self, ) and self.targets is not None: _dict[] = self.targets return _dict
Return a json dictionary representing this model.
387,068
def ImportConfig(filename, config): sections_to_import = ["PrivateKeys"] entries_to_import = [ "Client.executable_signing_public_key", "CA.certificate", "Frontend.certificate" ] options_imported = 0 old_config = grr_config.CONFIG.MakeNewConfig() old_config.Initialize(filename) for entry in old_config.raw_data: try: section = entry.split(".")[0] if section in sections_to_import or entry in entries_to_import: config.Set(entry, old_config.Get(entry)) print("Imported %s." % entry) options_imported += 1 except Exception as e: print("Exception during import of %s: %s" % (entry, e)) return options_imported
Reads an old config file and imports keys and user accounts.
387,069
def _create_node(self, index: int, name: str, external_id: Optional[str] = None) -> IGraphNode: return IGraphNode(graph=self._graph, index=index, name=name, external_id=external_id)
Returns a new `IGraphNode` instance with the given index and name. Arguments: index (int): The index of the node to create. name (str): The name of the node to create. external_id (Optional[str]): The external ID of the node.
387,070
async def info(self, fields: Iterable[str] = None) -> dict: s information such as resource limits. :param fields: Additional per-agent query fields to fetch. .. versionadded:: 18.12 access_keysecret_keyis_activeis_adminquery { keypair { $fields }}$fields POST/admin/graphqlquerykeypair']
Returns the keypair's information such as resource limits. :param fields: Additional per-agent query fields to fetch. .. versionadded:: 18.12
387,071
def from_points(cls, iterable_of_points): return MultiPoint([(p.lon, p.lat) for p in iterable_of_points])
Creates a MultiPoint from an iterable collection of `pyowm.utils.geo.Point` instances :param iterable_of_points: iterable whose items are `pyowm.utils.geo.Point` instances :type iterable_of_points: iterable :return: a *MultiPoint* instance
387,072
def es_version(self, url): try: res = self.grimoire_con.get(url) res.raise_for_status() major = res.json()[][].split(".")[0] except Exception: logger.error("Error retrieving Elasticsearch version: " + url) raise return major
Get Elasticsearch version. Get the version of Elasticsearch. This is useful because Elasticsearch and Kibiter are paired (same major version for 5, 6). :param url: Elasticseearch url hosting Kibiter indices :returns: major version, as string
387,073
def get_iam_policy(self): checker = AwsLimitChecker() policy = checker.get_required_iam_policy() return json.dumps(policy, sort_keys=True, indent=2)
Return the current IAM policy as a json-serialized string
387,074
def default_project(self, value): if value is not None: assert type(value) is unicode, \ " attribute: type is not !".format("default_project", value) self.__default_project = value
Setter for **self.__default_project** attribute. :param value: Attribute value. :type value: unicode
387,075
def ConsumeCommentOrTrailingComment(self): just_started = self._line == 0 and self._column == 0 before_parsing = self._previous_line comment = self.ConsumeComment() trailing = (self._previous_line == before_parsing and not just_started) return trailing, comment
Consumes a comment, returns a 2-tuple (trailing bool, comment str).
387,076
def run(self): random.seed(self.seed) np.random.seed(self.np_seed) if not isinstance(self, multiprocessing.Process): mx.random.seed(self.mx_seed) try: stream_iter = iter(self.stream) self._errorq.put(None) except Exception as e: tb = traceback.format_exc() self._errorq.put((e, tb)) while True: try: c = self._controlq.get(False) if c is None: break else: raise RuntimeError(.format(repr(c))) except queue.Empty: pass except RuntimeError as e: tb = traceback.format_exc() self._errorq.put((e, tb)) self._dataq.put(None) try: data = next(stream_iter) error = None except Exception as e: tb = traceback.format_exc() error = (e, tb) data = None finally: self._errorq.put(error) self._dataq.put(data)
Method representing the process’s activity.
387,077
def process_frame(self, f, frame_str): frame_type = f.cmd.lower() if frame_type in []: return if frame_type == : frame_type = f.cmd = if frame_type in [, , , , ]: if frame_type == : if f.headers[] not in self.subscriptions.values(): return (f.headers, f.body) = self.notify(, f.headers, f.body) self.notify(frame_type, f.headers, f.body) if in f.headers: receipt_frame = Frame(, {: f.headers[]}) lines = convert_frame(receipt_frame) self.send(encode(pack(lines))) log.debug("Received frame: %r, headers=%r, body=%r", f.cmd, f.headers, f.body)
:param Frame f: Frame object :param bytes frame_str: Raw frame content
387,078
def rmd_options_to_metadata(options): options = re.split(r, options, 1) if len(options) == 1: language = options[0] chunk_options = [] else: language, others = options language = language.rstrip() others = others.lstrip() chunk_options = parse_rmd_options(others) language = if language == else language metadata = {} for i, opt in enumerate(chunk_options): name, value = opt if i == 0 and name == : metadata[] = value continue else: if update_metadata_from_rmd_options(name, value, metadata): continue try: metadata[name] = _py_logical_values(value) continue except RLogicalValueError: metadata[name] = value for name in metadata: try_eval_metadata(metadata, name) if ( in metadata or metadata.get(, {}).get() is True) and in metadata: del metadata[] return metadata.get() or language, metadata
Parse rmd options and return a metadata dictionary :param options: :return:
387,079
def add_file(self, name, filename, compress_hint=True): return self.add_stream(name, open(filename, ))
Saves the actual file in the store. ``compress_hint`` suggests whether the file should be compressed before transfer Works like :meth:`add_stream`, but ``filename`` is the name of an existing file in the filesystem.
387,080
def zone_absent(domain, profile): zones = __salt__[](profile) matching_zone = [z for z in zones if z[] == domain] if not matching_zone: return state_result(True, , domain) else: result = __salt__[](matching_zone[0][], profile) return state_result(result, , domain)
Ensures a record is absent. :param domain: Zone name, i.e. the domain name :type domain: ``str`` :param profile: The profile key :type profile: ``str``
387,081
def view_cancel_edit(name=None): if name is None: return redirect() else: files = glob.glob("{0}.rst".format(name)) if len(files) > 0: reset_to_last_commit() return redirect( + name) else: return abort(404)
Cancel the edition of an existing page. Then render the last modification status .. note:: this is a bottle view if no page name is given, do nothing (it may leave some .tmp. files in the directory). Keyword Arguments: :name: (str) -- name of the page (OPTIONAL) Returns: bottle response object
387,082
def timezone(self, value=0.0): if value is not None: try: value = float(value) except ValueError: raise ValueError( .format(value)) if value < -12.0: raise ValueError( ) if value > 12.0: raise ValueError( ) self._timezone = value
Corresponds to IDD Field `timezone` Time relative to GMT. Args: value (float): value for IDD Field `timezone` Unit: hr - not on standard units list??? Default value: 0.0 value >= -12.0 value <= 12.0 if `value` is None it will not be checked against the specification and is assumed to be a missing value Raises: ValueError: if `value` is not a valid value
387,083
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False): if ssbio.utils.is_ipynb(): import nglview as nv else: raise EnvironmentError() if not self.structure_file: raise ValueError("Structure file not loaded") only_chains = ssbio.utils.force_list(only_chains) to_show_chains = for c in only_chains: to_show_chains += .format(c) to_show_chains = to_show_chains.strip() to_show_chains += if self.file_type == or self.file_type == : view = nv.NGLWidget() view.add_component(self.structure_path) else: view = nv.show_structure_file(self.structure_path, gui=gui) if recolor: view.clear_representations() if only_chains: view.add_cartoon(selection=.format(to_show_chains), color=, opacity=opacity) else: view.add_cartoon(selection=, color=, opacity=opacity) elif only_chains: view.clear_representations() view.add_cartoon(selection=.format(to_show_chains), color=, opacity=opacity) return view
Use NGLviewer to display a structure in a Jupyter notebook Args: only_chains (str, list): Chain ID or IDs to display opacity (float): Opacity of the structure recolor (bool): If structure should be cleaned and recolored to silver gui (bool): If the NGLview GUI should show up Returns: NGLviewer object
387,084
def upload_feature_value_file(self, mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, filename, index_col): logger.info(, filename) if not filename.lower().endswith(): raise IOError() filename = os.path.expanduser(os.path.expandvars(filename)) data = pd.read_csv(filename, index_col=index_col) self._upload_feature_values( mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x, tpoint, data )
Uploads feature values for the given :class:`MapobjectType <tmlib.models.mapobject.MapobjectType>` at the specified :class:`Site <tmlib.models.site.Site>`. Parameters ---------- mapobject_type_name: str type of the segmented objects plate_name: str name of the plate well_name: str name of the well well_pos_y: int y-position of the site relative to the well grid well_pos_x: int x-position of the site relative to the well grid tpoint: int zero-based time point index filename: str path to the file on disk index_col: str column name containing the object labels See also -------- :func:`tmserver.api.feature.add_feature_values` :class:`tmlib.models.feature.FeatureValues`
387,085
def answer (self, headers, **options): self._steps.append(Answer (headers, **options).obj)
Places a call or sends an an IM, Twitter, or SMS message. To start a call, use the Session API headers tell Tropo headers launch your code. Arguments: headers is a String. Argument: **options is a set of optional keyword arguments. See https://www.tropo.com/docs/webapi/answer
387,086
def semilocal_linear_trend_transition_matrix(autoregressive_coef): fixed_entries = tf.constant( [[1., 1.], [0., 0.]], dtype=autoregressive_coef.dtype) autoregressive_coef_mask = tf.constant([[0., 0.], [0., 1.]], dtype=autoregressive_coef.dtype) bottom_right_entry = (autoregressive_coef[..., tf.newaxis, tf.newaxis] * autoregressive_coef_mask) return tf.linalg.LinearOperatorFullMatrix( fixed_entries + bottom_right_entry)
Build the transition matrix for a semi-local linear trend model.
387,087
def __handle_changed_state(self, state): timeval = self.__get_timeval() events = self.__get_button_events(state, timeval) events.extend(self.__get_axis_events(state, timeval)) if events: self.__write_to_character_device(events, timeval)
we need to pack a struct with the following five numbers: tv_sec, tv_usec, ev_type, code, value then write it using __write_to_character_device seconds, mircroseconds, ev_type, code, value time we just use now ev_type we look up code we look up value is 0 or 1 for the buttons axis value is maybe the same as Linux? Hope so!
387,088
def update_title_to_proceeding(self): titles = record_get_field_instances(self.record, tag="245") for title in titles: subs = field_get_subfields(title) new_subs = [] if "a" in subs: new_subs.append(("a", subs[][0])) if "b" in subs: new_subs.append(("c", subs[][0])) record_add_field(self.record, tag="111", subfields=new_subs) record_delete_fields(self.record, tag="245") record_delete_fields(self.record, tag="246")
Move title info from 245 to 111 proceeding style.
387,089
def popup(self, title, callfn, initialdir=None): super(DirectorySelection, self).popup(title, callfn, initialdir)
Let user select a directory.
387,090
def after_request(self, f): self.record_once(lambda s: s.app.after_request_funcs .setdefault(self.name, []).append(f)) return f
Like :meth:`Flask.after_request` but for a blueprint. This function is only executed after each request that is handled by a function of that blueprint.
387,091
def load_directory(self, directory, ext=None): self._say("Loading from directory: " + directory) if ext is None: ext = [, ] elif type(ext) == str: ext = [ext] if not os.path.isdir(directory): self._warn("Error: " + directory + " is not a directory.") return for root, subdirs, files in os.walk(directory): for file in files: for extension in ext: if file.lower().endswith(extension): self.load_file(os.path.join(root, file)) break
Load RiveScript documents from a directory. :param str directory: The directory of RiveScript documents to load replies from. :param []str ext: List of file extensions to consider as RiveScript documents. The default is ``[".rive", ".rs"]``.
387,092
def _preprocess(df): df = df.stack() df.index.rename(["id", "time"], inplace=True) df.name = "value" df = df.reset_index() return df
given a DataFrame where records are stored row-wise, rearrange it such that records are stored column-wise.
387,093
def receive_loop_with_callback(self, queue_name, callback): self.connect() channel = self.create_channel(queue_name) channel.basic_qos(prefetch_count=1) channel.basic_consume(callback, queue=queue_name) channel.start_consuming()
Process incoming messages with callback until close is called. :param queue_name: str: name of the queue to poll :param callback: func(ch, method, properties, body) called with data when data arrives :return:
387,094
def install_python_package(self, arch, name=None, env=None, is_dir=True): if env is None: env = self.get_recipe_env(arch) with current_directory(self.get_build_dir(arch.arch)): hostpython = sh.Command(self.ctx.hostpython) shprint(hostpython, , , , _env=env) shprint(hostpython, , , , .format(self.ctx.get_python_install_dir()), , _env=env)
Automate the installation of a Python package (or a cython package where the cython components are pre-built).
387,095
def _generate_main_scripts(self): head = self.parser.find().first_result() if head is not None: common_functions_script = self.parser.find( + AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS ).first_result() if common_functions_script is None: common_functions_file = open( os.path.join( os.path.dirname(os.path.dirname(os.path.dirname( os.path.realpath(__file__) ))), , ), ) common_functions_content = common_functions_file.read() common_functions_file.close() common_functions_script = self.parser.create_element() common_functions_script.set_attribute( , AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS ) common_functions_script.set_attribute( , ) common_functions_script.append_text(common_functions_content) head.prepend_element(common_functions_script) if ( self.parser.find( + AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER ).first_result() is None ): event_listener_file = open( os.path.join( os.path.dirname(os.path.dirname(os.path.dirname( os.path.realpath(__file__) ))), , ), ) event_listener_script_content = event_listener_file.read() event_listener_file.close() script = self.parser.create_element() script.set_attribute( , AccessibleEventImplementation.ID_SCRIPT_EVENT_LISTENER ) script.set_attribute(, ) script.append_text(event_listener_script_content) common_functions_script.insert_after(script) local = self.parser.find().first_result() if local is not None: self.script_list = self.parser.find( + AccessibleEventImplementation.ID_LIST_IDS_SCRIPT ).first_result() if self.script_list is None: self.script_list = self.parser.create_element() self.script_list.set_attribute( , AccessibleEventImplementation.ID_LIST_IDS_SCRIPT ) self.script_list.set_attribute(, ) self.script_list.append_text() self.script_list.append_text() self.script_list.append_text() self.script_list.append_text() local.append_element(self.script_list) if self.parser.find( + AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX ).first_result() is None: include_file = open( os.path.join( os.path.dirname(os.path.dirname(os.path.dirname( os.path.realpath(__file__) ))), , ), ) local_include_script_content = include_file.read() include_file.close() script_function = self.parser.create_element() script_function.set_attribute( , AccessibleEventImplementation.ID_FUNCTION_SCRIPT_FIX ) script_function.set_attribute(, ) script_function.append_text(local_include_script_content) local.append_element(script_function) self.main_script_added = True
Include the scripts used by solutions.
387,096
def translate(self, dx, dy): vec = numpy.array((dx, dy)) self.polygons = [points + vec for points in self.polygons] return self
Move the polygons from one place to another Parameters ---------- dx : number distance to move in the x-direction dy : number distance to move in the y-direction Returns ------- out : ``PolygonSet`` This object.
387,097
def project(self, **kwargs: Dict[str, Any]) -> Union[Hist, Dict[str, Hist]]: if self.single_observable_projection: return self._project_single_observable(**kwargs) else: return self._project_dict(**kwargs)
Perform the requested projection(s). Note: All cuts on the original histograms will be reset when this function is completed. Args: kwargs (dict): Additional named args to be passed to projection_name(...) and output_key_name(...) Returns: The projected histogram(s). The projected histograms are also stored in ``output_observable``.
387,098
def pretty_exe_doc(program, parser, stack=1, under=): if os.path.basename(sys.argv[0]) == : mod = inspect.getmodule(inspect.stack()[stack][0]) _parser = parser() if in dir(parser) else parser _parser.set_usage(mod.__usage__.replace(, program)) mod.__doc__ = .join([, program, under * len(program), , ] + [ % l for l in _parser.format_help().split()]) + \ mod.__doc__
Takes the name of a script and a parser that will give the help message for it. The module that called this function will then add a header to the docstring of the script, followed immediately by the help message generated by the OptionParser :param str program: Name of the program that we want to make the header :param optparser.Option parser: Either a parser or a callable with no arguments that will give the desired parser :param int stack: How far up the stack to get the docstring to change :param str under: The character you want for the program underline
387,099
def _setup_process_environment(self, env): environ = self._process.processEnvironment() if env is None: env = {} for k, v in os.environ.items(): environ.insert(k, v) for k, v in env.items(): environ.insert(k, v) if sys.platform != : environ.insert(, ) environ.insert(, ) environ.insert(, ) environ.insert(, ) environ.insert(, ) return environ
Sets up the process environment.