content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def passwordbox(**kwargs): """ This wrapper is for making a dialog for changing your password. It will return the old password, the new password, and a confirmation. The remaining keywords are passed on to the autobox class. """ additional_fields = kwargs.get("additional_fields") and kwargs.pop("additional_fields") or [] title = kwargs.get("title_string", "Change your password") header = kwargs.get("header_string") and kwargs.pop("header_string") or "Change your password" default_fields = [ {"type" : "label", "label" : "First type your old password"}, {"name" : "old_password", "type" : "hidden_text", "label" : "Old Password: "}, {"type" : "label", "label": "Now enter your new password twice"}, {"name" : "new_password", "type" : "hidden_text", "label" : "New Password: "}, {"name" : "confirm_password", "type" : "hidden_text", "label" : "Confirm Password: "} ] fields = default_fields + additional_fields return autobox(fields = fields, title_string = title, header_string = header, **kwargs)
ff38d854a8d7303bbf58654e220c0b24b3ede105
3,652,313
def unravel_hpx_index(idx, npix): """Convert flattened global map index to an index tuple. Parameters ---------- idx : `~numpy.ndarray` Flat index. npix : `~numpy.ndarray` Number of pixels in each band. Returns ------- idx : tuple of `~numpy.ndarray` Index array for each dimension of the map. """ if npix.size == 1: return tuple([idx]) dpix = np.zeros(npix.size, dtype="i") dpix[1:] = np.cumsum(npix.flat[:-1]) bidx = np.searchsorted(np.cumsum(npix.flat), idx + 1) pix = idx - dpix[bidx] return tuple([pix] + list(np.unravel_index(bidx, npix.shape)))
c7fa097ffeae3219d59526ed76d62383277d317b
3,652,314
def parse_revdep(value): """Value should be an atom, packages with deps intersecting that match.""" try: targetatom = atom.atom(value) except atom.MalformedAtom as e: raise argparser.error(e) val_restrict = values.FlatteningRestriction( atom.atom, values.AnyMatch(values.FunctionRestriction(targetatom.intersects))) return packages.OrRestriction(*list( packages.PackageRestriction(dep, val_restrict) for dep in ('bdepend', 'depend', 'rdepend', 'pdepend')))
eb2118af7644fac15fa4ebedba6684d20ab18d47
3,652,316
def is_context_word(model, word_a, word_b): """Calculates probability that both words appear in context with each other by executing forward pass of model. Args: model (Mode): keras model word_a (int): index of first word word_b (int): index of second word """ # define inputs input_a = np.zeros((1,)) input_b = np.zeros((1,)) input_a[0,] = word_a input_b[0,] = word_b # compute forward pass of model prediction = model.predict_on_batch([input_a, input_b]) # retrieve value from tf tensor prediction = prediction.numpy()[0][0] return prediction
a7b0642cfc97b21e53f8b42eaddbed69689a0f1d
3,652,317
def next_method(): """next, for: Get one item of an iterators.""" class _Iterator: def __init__(self): self._stop = False def __next__(self): if self._stop: raise StopIteration() self._stop = True return "drums" return next(_Iterator())
85cdd08a65ae66c2869ba2067db81ff37f40d0b8
3,652,319
import json def get_ingress_deployment( serve_dag_root_node: DAGNode, pipeline_input_node: PipelineInputNode ) -> Deployment: """Return an Ingress deployment to handle user HTTP inputs. Args: serve_dag_root_node (DAGNode): Transformed as serve DAG's root. User inputs are translated to serve_dag_root_node.execute(). pipeline_input_node (DAGNode): Singleton PipelineInputNode instance that contains input preprocessor info. Returns: ingress (Deployment): Generated pipeline ingress deployment to serve user HTTP requests. """ serve_dag_root_json = json.dumps(serve_dag_root_node, cls=DAGNodeEncoder) preprocessor_import_path = pipeline_input_node.get_preprocessor_import_path() serve_dag_root_deployment = serve.deployment(Ingress).options( name=DEFAULT_INGRESS_DEPLOYMENT_NAME, init_args=( serve_dag_root_json, preprocessor_import_path, ), ) return serve_dag_root_deployment
33f7ca9218e59af168fccdd8e0d0392964febaf2
3,652,320
def get_project_settings(project): """Gets project's settings. Return value example: [{ "attribute" : "Brightness", "value" : 10, ...},...] :param project: project name or metadata :type project: str or dict :return: project settings :rtype: list of dicts """ if not isinstance(project, dict): project = get_project_metadata_bare(project) team_id, project_id = project["team_id"], project["id"] params = { "team_id": team_id, } response = _api.send_request( req_type='GET', path=f'/project/{project_id}/settings', params=params ) if not response.ok: raise SABaseException( response.status_code, "Couldn't get project settings " + response.text ) res = response.json() for val in res: if val['attribute'] == 'ImageQuality': if val['value'] == 60: val['value'] = 'compressed' elif val['value'] == 100: val['value'] = 'original' else: raise SABaseException(0, "NA ImageQuality value") return res
298d00eedff7c70ae8745e47f2eff48642988c7b
3,652,321
def guard(M, test): """Monadic guard. What it does:: return M.pure(Unit) if test else M.empty() https://en.wikibooks.org/wiki/Haskell/Alternative_and_MonadPlus#guard """ return M.pure(Unit) if test else M.empty()
9184310fcebec10ca1cc7cdb25e36831b327cbb0
3,652,322
def get_git_hash() -> str: """Get the git hash.""" rv = _run("git", "rev-parse", "HEAD") if rv is None: return "UNHASHED" return rv
978eca015aeb534e500dbbc5e9ab7aad5b487865
3,652,323
def primary_style(): """ a blue green style """ return color_mapping( 'bg:#449adf #ffffff', 'bg:#002685 #ffffff', '#cd1e10', '#007e3a', '#fe79d1', '#4cde77', '#763931', '#64d13e', '#7e77d2', 'bg:#000000 #ffffff', )
aecbe4cccb18763cf961ba08d6d9c04188080989
3,652,324
def decrypt_files(rsa_key): """ Decrypt all encrypted files on host machine `Required` :param str rsa_key: RSA private key in PEM format """ try: if not isinstance(rsa_key, Crypto.PublicKey.RSA.RsaKey): rsa_key = Crypto.PublicKey.RSA.importKey(rsa_key) if not rsa_key.has_private(): return "Error: RSA key cannot decrypt" globals()['threads']['iter_files'] = _iter_files(rsa_key) globals()['threads']['decrypt_files'] = _threader() return "Decrypting files" except Exception as e: util.log("{} error: {}".format(decrypt_files.__name__, str(e)))
ccc5d253b5ab7a7851195751a798ba4e18fef983
3,652,325
def _bivariate_uc_uc( lhs,rhs, z, dz_dl, # (dz_re_dl_re, dz_re_dl_im, dz_im_dl_re, dz_im_dl_im) dz_dr # (dz_re_dr_re, dz_re_dr_im, dz_im_dr_re, dz_im_dr_im) ): """ Create an uncertain complex number as a bivariate function This is a utility method for implementing mathematical functions of uncertain complex numbers. The parameters 'lhs' and 'rhs' are the UncertainComplex arguments to the function, 'z' is the complex value of the function and 'dz_dl' and 'dz_dr' are the Jacobian matrices of the function value z with respect to the real and imaginary components of the function's left and right arguments. Parameters ---------- lhs, rhs : :class:`UncertainComplex` z : complex dz_dl, dz_dr : 4-element sequence of float Returns ------- :class:`UncertainComplex` """ lhs_r = lhs.real lhs_i = lhs.imag rhs_r = rhs.real rhs_i = rhs.imag u_lhs_real, u_lhs_imag = vector.merge_weighted_vectors_twice( lhs_r._u_components,(dz_dl[0],dz_dl[2]), lhs_i._u_components,(dz_dl[1],dz_dl[3]) ) u_rhs_real, u_rhs_imag = vector.merge_weighted_vectors_twice( rhs_r._u_components,(dz_dr[0],dz_dr[2]), rhs_i._u_components,(dz_dr[1],dz_dr[3]) ) d_lhs_real, d_lhs_imag = vector.merge_weighted_vectors_twice( lhs_r._d_components,(dz_dl[0],dz_dl[2]), lhs_i._d_components,(dz_dl[1],dz_dl[3]) ) d_rhs_real, d_rhs_imag = vector.merge_weighted_vectors_twice( rhs_r._d_components,(dz_dr[0],dz_dr[2]), rhs_i._d_components,(dz_dr[1],dz_dr[3]) ) i_lhs_real, i_lhs_imag = vector.merge_weighted_vectors_twice( lhs_r._i_components,(dz_dl[0],dz_dl[2]), lhs_i._i_components,(dz_dl[1],dz_dl[3]) ) i_rhs_real, i_rhs_imag = vector.merge_weighted_vectors_twice( rhs_r._i_components,(dz_dr[0],dz_dr[2]), rhs_i._i_components,(dz_dr[1],dz_dr[3]) ) return UncertainComplex( UncertainReal( z.real, vector.merge_vectors( u_lhs_real, u_rhs_real ), vector.merge_vectors( d_lhs_real, d_rhs_real ), vector.merge_vectors( i_lhs_real, i_rhs_real ) ), UncertainReal( z.imag, vector.merge_vectors( u_lhs_imag,u_rhs_imag ), vector.merge_vectors( d_lhs_imag,d_rhs_imag ), vector.merge_vectors( i_lhs_imag, i_rhs_imag ) ) )
f3b2c778cd1152910c951e893861f0c900978a4e
3,652,326
def smoothing_filter(time_in, val_in, time_out=None, relabel=None, params=None): """ @brief Smoothing filter with relabeling and resampling features. @details It supports evenly sampled multidimensional input signal. Relabeling can be used to infer the value of samples at time steps before and after the explicitly provided samples. As a reminder, relabeling is a generalization of periodicity. @param[in] time_in Time steps of the input signal (1D numpy array) @param[in] val_in Sampled values of the input signal (2D numpy array: row = sample, column = time) @param[in] time_out Time steps of the output signal (1D numpy array) @param[in] relabel Relabeling matrix (identity for periodic signals) Optional: Disable if omitted @param[in] params Parameters of the filter. Dictionary with keys: 'mixing_ratio_1': Relative time at the begining of the signal during the output signal corresponds to a linear mixing over time of the filtered and original signal. (only used if relabel is omitted) 'mixing_ratio_2': Relative time at the end of the signal during the output signal corresponds to a linear mixing over time of the filtered and original signal. (only used if relabel is omitted) 'smoothness'[0]: Smoothing factor to filter the begining of the signal (only used if relabel is omitted) 'smoothness'[1]: Smoothing factor to filter the end of the signal (only used if relabel is omitted) 'smoothness'[2]: Smoothing factor to filter the middle part of the signal @return Filtered signal (2D numpy array: row = sample, column = time) """ if time_out is None: time_out = time_in if params is None: params = dict() params['mixing_ratio_1'] = 0.12 params['mixing_ratio_2'] = 0.04 params['smoothness'] = [0.0,0.0,0.0] params['smoothness'][0] = 5e-3 params['smoothness'][1] = 5e-3 params['smoothness'][2] = 3e-3 if relabel is None: mix_fit = [None,None,None] mix_fit[0] = lambda t: 0.5*(1+np.sin(1/params['mixing_ratio_1']*((t-time_in[0])/(time_in[-1]-time_in[0]))*np.pi-np.pi/2)) mix_fit[1] = lambda t: 0.5*(1+np.sin(1/params['mixing_ratio_2']*((t-(1-params['mixing_ratio_2'])*time_in[-1])/(time_in[-1]-time_in[0]))*np.pi+np.pi/2)) mix_fit[2] = lambda t: 1 val_fit = [] for jj in range(val_in.shape[0]): val_fit_jj = [] for kk in range(len(params['smoothness'])): val_fit_jj.append(UnivariateSpline(time_in, val_in[jj], s=params['smoothness'][kk])) val_fit.append(val_fit_jj) time_out_mixing = [None, None, None] time_out_mixing_ind = [None, None, None] time_out_mixing_ind[0] = time_out < time_out[-1]*params['mixing_ratio_1'] time_out_mixing[0] = time_out[time_out_mixing_ind[0]] time_out_mixing_ind[1] = time_out > time_out[-1]*(1-params['mixing_ratio_2']) time_out_mixing[1] = time_out[time_out_mixing_ind[1]] time_out_mixing_ind[2] = np.logical_and(np.logical_not(time_out_mixing_ind[0]), np.logical_not(time_out_mixing_ind[1])) time_out_mixing[2] = time_out[time_out_mixing_ind[2]] val_out = np.zeros((val_in.shape[0],len(time_out))) for jj in range(val_in.shape[0]): for kk in range(len(time_out_mixing)): val_out[jj,time_out_mixing_ind[kk]] = \ (1 - mix_fit[kk](time_out_mixing[kk])) * val_fit[jj][kk](time_out_mixing[kk]) + \ mix_fit[kk](time_out_mixing[kk]) * val_fit[jj][-1](time_out_mixing[kk]) else: time_tmp = np.concatenate([time_in[:-1]-time_in[-1],time_in,time_in[1:]+time_in[-1]]) val_in_tmp = np.concatenate([relabel.dot(val_in[:,:-1]),val_in,relabel.dot(val_in[:,1:])], axis=1) val_out = np.zeros((val_in.shape[0],len(time_out))) for jj in range(val_in_tmp.shape[0]): f = UnivariateSpline(time_tmp, val_in_tmp[jj], s=params['smoothness'][-1]) val_out[jj] = f(time_out) return val_out
7af0f6925d255c0445c7b5dfdfb330f4058f8afc
3,652,327
def get_selector_qty(*args): """get_selector_qty() -> int""" return _idaapi.get_selector_qty(*args)
82ea62d3220893456358c42b0ec931e5c2cf9053
3,652,328
from typing import Optional from typing import Dict from typing import Any import requests def get( host: str, path: str, params: Optional[Dict[str, Any]] = None, headers: Optional[Dict[str, str]] = None, authenticated: bool = True, stream: bool = False, ) -> requests.Response: """ Send a GET request to the remote API. """ return do_request( "GET", host, path, params=params, headers=headers, authenticated=authenticated, stream=stream, )
7f0188ad2d678c0edef5d4fce623a5faee5c13db
3,652,329
def inner_xml(xml_text): """ Get the inner xml of an element. >>> inner_xml('<div>This is some <i><b>really</b> silly</i> text!</div>') u'This is some <i><b>really</b> silly</i> text!' """ return unicode(INNER_XML_RE.match(xml_text).groupdict()['body'])
dcba13de5a75d4b9956c2a27f02a289212d9789e
3,652,330
def store_tags(): """Routing: Stores the (updated) tag data for the image.""" data = { "id": request.form.get("id"), "tag": request.form.get('tags'), "SHOWN": 0 } loader.store(data) next_image = loader.next_data() if next_image is None: return redirect("/finished") target = "/" if next_image: target = f"/?image_id={next_image['id']}" return redirect(location=target)
ec433586e7ad60d2b85ac8ff2ccc209f4c00a110
3,652,331
def getAssets(public_key: str) -> list: """ Get all the balances an account has. """ balances = server.accounts().account_id(public_key).call()['balances'] balances_to_return = [ {"asset_code": elem.get("asset_code"), "issuer": elem.get("asset_issuer"), "balance": elem.get("balance")} for elem in balances ] balances_to_return[-1]["asset_code"] = "XLM" return balances_to_return
71c1b89edd79f0dc4092b909c2d7f505b35d5391
3,652,332
def parse_pattern(format_string, env, wrapper=lambda x, y: y): """ Parse the format_string and return prepared data according to the env. Pick each field found in the format_string from the env(ironment), apply the wrapper on each data and return a mapping between field-to-replace and values for each. """ formatter = Formatter() fields = [x[1] for x in formatter.parse(format_string) if x[1] is not None] prepared_env = {} # Create a prepared environment with only used fields, all as list: for field in fields: # Search for a movie attribute for each alternative field separated # by a pipe sign: for field_alt in (x.strip() for x in field.split('|')): # Handle default values (enclosed by quotes): if field_alt[0] in '\'"' and field_alt[-1] in '\'"': field_values = field_alt[1:-1] else: field_values = env.get(field_alt) if field_values is not None: break else: field_values = [] if not isinstance(field_values, list): field_values = [field_values] prepared_env[field] = wrapper(field_alt, field_values) return prepared_env
fdd5057929ed06f5ee984019e967df45d683fb75
3,652,333
def u1_series_summation(xarg, a, kmax): """ 5.3.2 ROUTINE - U1 Series Summation PLATE 5-10 (p32) :param xarg: :param a: :param kmax: :return: u1 """ du1 = 0.25*xarg u1 = du1 f7 = -a*du1**2 k = 3 while k < kmax: du1 = f7*du1 / (k*(k-1)) u1old = u1 u1 = u1+du1 if u1 == u1old: break k = k+2 return u1
e54cb5f68dd5ecba5dd7f540ac645ff8d70ae0e3
3,652,334
def mask_iou(masks_a, masks_b, iscrowd=False): """ Computes the pariwise mask IoU between two sets of masks of size [a, h, w] and [b, h, w]. The output is of size [a, b]. Wait I thought this was "box_utils", why am I putting this in here? """ masks_a = masks_a.view(masks_a.size(0), -1) masks_b = masks_b.view(masks_b.size(0), -1) matmul = nn.MatMul() intersection = matmul(masks_a, masks_b.T) mask_iou_sum = P.ReduceSum() expand_dims = P.ExpandDims() area_a = expand_dims(mask_iou_sum(masks_a, 1), 1) area_b = expand_dims(mask_iou_sum(masks_b, 1), 0) return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a
585bb48b3b8460660739acd102d8a0f5e1716078
3,652,335
import torch def normalized_grid_coords(height, width, aspect=True, device="cuda"): """Return the normalized [-1, 1] grid coordinates given height and width. Args: height (int) : height of the grid. width (int) : width of the grid. aspect (bool) : if True, use the aspect ratio to scale the coordinates, in which case the coords will not be normalzied to [-1, 1]. (Default: True) device : the device the tensors will be created on. """ aspect_ratio = width/height if aspect else 1.0 window_x = torch.linspace(-1, 1, steps=width, device=device) * aspect_ratio window_y = torch.linspace(1, -1, steps=height, device=device) coord = torch.stack(torch.meshgrid(window_x, window_y, indexing='ij')).permute(2,1,0) return coord
7ddd1c5eda2e28116e40fa99f6cd794d9dfd48cc
3,652,336
from typing import Optional from pathlib import Path from typing import Iterable from typing import List from typing import Any import ray import traceback def ray_map(task: Task, *item_lists: Iterable[List[Any]], log_dir: Optional[Path] = None) -> List[Any]: """ Initialize ray, align item lists and map each item of a list of arguments to a callable and executes in parallel. :param task: callable to be run :param item_lists: items to be parallelized :param log_dir: directory to store worker logs :return: list of outputs """ try: results = _ray_map_items(task, *item_lists, log_dir=log_dir) return results except (RayTaskError, Exception) as exc: ray.shutdown() traceback.print_exc() raise RuntimeError(exc)
a033bb1f2d84b7a37bffd4db4643ed5c2291b3ba
3,652,337
def consensus_kmeans(data=None, k=0, linkage='average', nensemble=100, kmin=None, kmax=None): """Perform clustering based on an ensemble of k-means partitions. Parameters ---------- data : array An m by n array of m data samples in an n-dimensional space. k : int, optional Number of clusters to extract; if 0 uses the life-time criterion. linkage : str, optional Linkage criterion for final partition extraction; one of 'average', 'centroid', 'complete', 'median', 'single', 'ward', or 'weighted'. nensemble : int, optional Number of partitions in the ensemble. kmin : int, optional Minimum k for the k-means partitions; defaults to :math:`\\sqrt{m}/2`. kmax : int, optional Maximum k for the k-means partitions; defaults to :math:`\\sqrt{m}`. Returns ------- clusters : dict Dictionary with the sample indices (rows from 'data') for each found cluster; outliers have key -1; clusters are assigned integer keys starting at 0. """ # check inputs if data is None: raise TypeError("Please specify input data.") N = len(data) if kmin is None: kmin = int(round(np.sqrt(N) / 2.)) if kmax is None: kmax = int(round(np.sqrt(N))) # initialization grid grid = { 'k': np.random.random_integers(low=kmin, high=kmax, size=nensemble) } # run consensus clusters, = consensus(data=data, k=k, linkage=linkage, fcn=kmeans, grid=grid) return utils.ReturnTuple((clusters,), ('clusters',))
25ee74ac24883a4981db98c730c9010d13866840
3,652,338
def to_cftime(date, calendar="gregorian"): """Convert datetime object to cftime object. Parameters ---------- date : datetime object Datetime object. calendar : str Calendar of the cftime object. Returns ------- cftime : cftime object Cftime ojbect. """ if type(date) == dt.date: date = dt.datetime.combine(date, dt.time()) elif isinstance(date, cfdt.datetime): # do nothing return date return cfdt.datetime( date.year, date.month, date.day, date.hour, date.minute, date.second, date.microsecond, calendar=calendar, )
cfd968e1fd74f105ef7b44ce6700d646d4470910
3,652,339
def poly_to_mask(mask_shape, vertices): """Converts a polygon to a boolean mask with `True` for points lying inside the shape. Uses the bounding box of the vertices to reduce computation time. Parameters ---------- mask_shape : np.ndarray | tuple 1x2 array of shape of mask to be generated. vertices : np.ndarray Nx2 array of the vertices of the polygon. Returns ------- mask : np.ndarray Boolean array with `True` for points inside the polygon """ return polygon2mask(mask_shape, vertices)
13dec3d1057cff4823fa989e268f5103756bc263
3,652,340
def get_nn_edges( basis_vectors, extent, site_offsets, pbc, distance_atol, order, ): """For :code:`order == k`, generates all edges between up to :math:`k`-nearest neighbor sites (measured by their Euclidean distance). Edges are colored by length with colors between 0 and `order - 1` in order of increasing length.""" positions, ids = create_padded_sites( basis_vectors, extent, site_offsets, pbc, order ) naive_edges_by_order = get_naive_edges( positions, order * np.linalg.norm(basis_vectors, axis=1).max() + distance_atol, order, ) colored_edges = [] for k, naive_edges in enumerate(naive_edges_by_order): true_edges = set() for node1, node2 in naive_edges: # switch to real node indices node1 = ids[node1] node2 = ids[node2] if node1 == node2: raise RuntimeError( f"Lattice contains self-referential edge {(node1, node2)} of order {k}" ) elif node1 > node2: node1, node2 = node2, node1 true_edges.add((node1, node2)) for edge in true_edges: colored_edges.append((*edge, k)) return colored_edges
dfc55a3696c18769bbe3d4b15f068afbc763b6bf
3,652,341
import math def unit_vector(data, axis=None, out=None): """Return ndarray normalized by length, i.e. eucledian norm, along axis. >>> v0 = numpy.random.random(3) >>> v1 = unit_vector(v0) >>> numpy.allclose(v1, v0 / numpy.linalg.norm(v0)) True >>> v0 = numpy.random.rand(5, 4, 3) >>> v1 = unit_vector(v0, axis=-1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=2)), 2) >>> numpy.allclose(v1, v2) True >>> v1 = unit_vector(v0, axis=1) >>> v2 = v0 / numpy.expand_dims(numpy.sqrt(numpy.sum(v0*v0, axis=1)), 1) >>> numpy.allclose(v1, v2) True >>> v1 = numpy.empty((5, 4, 3), dtype=numpy.float64) >>> unit_vector(v0, axis=1, out=v1) >>> numpy.allclose(v1, v2) True >>> list(unit_vector([])) [] >>> list(unit_vector([1.0])) [1.0] see: https://github.com/ros/geometry/blob/hydro-devel/tf/src/tf/transformations.py """ if out is None: data = np.array(data, dtype=np.float64, copy=True) if data.ndim == 1: data /= math.sqrt(np.dot(data, data)) return data else: if out is not data: out[:] = np.array(data, copy=False) data = out length = np.atleast_1d(np.sum(data*data, axis)) np.sqrt(length, length) if axis is not None: length = np.expand_dims(length, axis) data /= length if out is None: return data
eb29e86d33ff576f290ea11aa6e2e6180a04d56e
3,652,344
import torch def negative_f1_score(probs, labels): """ Computes the f1 score between output and labels for k classes. args: probs (tensor) (size, k) labels (tensor) (size, 1) """ probs = torch.nn.functional.softmax(probs, dim=1) probs = probs.numpy() labels = labels.numpy() pred = np.argmax(probs, axis=1) return skl.f1_score(labels, pred, pos_label=0)
bd308d70934ed5ada0868f454b07c0f554384f32
3,652,345
import requests def search_usb_devices_facets(): """Facet USB Devices""" data = {"terms": {"fields": ["status"]}} usb_url = USB_DEVICES_FACETS.format(HOSTNAME, ORG_KEY) return requests.post(usb_url, json=data, headers=HEADERS)
d4f09b8374fe2461ac5e7c121822287bf8e80494
3,652,346
import struct def pack4(v): """ Takes a 32 bit integer and returns a 4 byte string representing the number in little endian. """ assert 0 <= v <= 0xffffffff # The < is for little endian, the I is for a 4 byte unsigned int. # See https://docs.python.org/2/library/struct.html for more info. return struct.pack('<I', v)
bbaeb0026624a7ec30ec379466ef11398f93d573
3,652,347
def index(): """ """ category = Category.get_categories() pitch = Pitch.get_all_pitches() title = "Welcome to Pitch Hub" return render_template('index.html', title = title, category = category, pitch =pitch)
6758964bf9a304d62d9048e9b9248cee39d04742
3,652,348
def maximum_sum_increasing_subsequence(numbers, size): """ Given an array of n positive integers. Write a program to find the sum of maximum sum subsequence of the given array such that the integers in the subsequence are sorted in increasing order. """ results = [numbers[i] for i in range(size)] for i in range(1, size): for j in range(i): if numbers[i] > numbers[j] and results[i] < results[j] + numbers[i]: results[i] = results[j] + numbers[i] return max(results)
a684ead4dcd9acbf8c796f5d24a3bf826fb5ad9d
3,652,349
def lstsqb(a, b): """ Return least-squares solution to a = bx. Similar to MATLAB / operator for rectangular matrices. If b is invertible then the solution is la.solve(a, b).T """ return la.lstsq(b.T, a.T, rcond=None)[0].T
4b046896ce29b79e9edcb434b1a01c652654867c
3,652,350
def multivariateGaussian(X, mu, sigma2): """ 多元高斯分布 :param X: :param mu: :param sigma2: :return: """ k = len(mu) if sigma2.shape[0] > 1: sigma2 = np.diag(sigma2) X = X - mu argu = (2 * np.pi) ** (-k / 2) * np.linalg.det(sigma2) ** (-0.5) p = argu * np.exp(-0.5 * np.sum(np.dot(X, np.linalg.inv(sigma2)) * X, axis=1)) return p
67a466318c473eef2749bf23e26d45de1149c5dc
3,652,351
from datetime import datetime def get_day(input): """ Convert input to a datetime object and extract the Day part """ if isinstance(input, str): input = parse_iso(input) if isinstance(input, (datetime.date, datetime.datetime)): return input.day return None
8a18e1832b85faf0612667ce3431176301502523
3,652,352
def read_ds(tier, pos_source=None): """ Like read_pt above, given a DS tier, return the DepTree object :param tier: :type tier: RGTier """ # First, assert that the type we're looking at is correct. assert tier.type == DS_TIER_TYPE # --1) Root the tree. root = DepTree.root() # --2) We will build up a list of edges, then attach the edges to the tree. edges = [] # --2b) Retrieve the POS tier, if it exists, in advance. pos_tier = tier.igt.get_pos_tags(tier.attributes.get(DS_DEP_ATTRIBUTE), tag_method=pos_source) for item in tier: dep = item.attributes.get(DS_DEP_ATTRIBUTE) head = item.attributes.get(DS_HEAD_ATTRIBUTE) # Get the POS tag if it exists pos = None if pos_tier: pos_item = pos_tier.find(alignment=dep) if pos_item: pos = pos_item.value() # Get the word value... dep_w = tier.igt.find(id=dep) dep_t = Terminal(dep_w.value(), dep_w.index) if head is not None: head_w = tier.igt.find(id=head) head_t = Terminal(head_w.value(), head_w.index) else: head_t = Terminal('ROOT', 0) e = DepEdge(head=head_t, dep=dep_t, type=item.value(), pos=pos) edges.append(e) dt = build_dep_edges(edges) return dt
797503380a3ff697440da8cd5d409b5c89384f4f
3,652,353
def get_local_ontology_from_file(ontology_file): """ return ontology class from a local OWL file """ return ow.get_ontology("file://" + ontology_file).load()
c022aac464c4afdbc088455a5edf8a4d91bc5586
3,652,354
import urllib def get_wolframalpha_imagetag(searchterm): """ Used to get the first image tag from the Wolfram Alpha API. The return value is a dictionary with keys that can go directly into html. Takes in: searchterm: the term to search with in the Wolfram Alpha API """ base_url = 'http://api.wolframalpha.com/v2/query?' app_id = credentials['wolframkey'] # api key url_params = {'input': searchterm, 'appid': app_id} headers = {'User-Agent': None} data = urllib.urlencode(url_params) req = urllib2.Request(base_url, data, headers) xml = urllib2.urlopen(req).read() tree = ET.fromstring(xml) for e in tree.findall('pod'): for item in [ef for ef in list(e) if ef.tag == 'subpod']: for it in [i for i in list(item) if i.tag == 'img']: if it.tag == 'img': if float(it.attrib['width']) > 50 and float(it.attrib['height']) > 50: return it.attrib['src']
958e09d6498b1f1d98de72fe9089e45e48988f20
3,652,355
def get_synset_definitions(word): """Return all possible definitions for synsets in a word synset ring. :param word (str): The word to lookup. :rtype definitions (list): The synset definitions list. """ definitions = [] synsets = get_word_synsets(word) for _synset in synsets: definitions.append(_synset.definition().split()) return definitions
70d522777cd413902157df6c0d96bdf378d7cf69
3,652,356
import json def getResourceDefUsingSession(url, session, resourceName, sensitiveOptions=False): """ get the resource definition - given a resource name (and catalog url) catalog url should stop at port (e.g. not have ldmadmin, ldmcatalog etc... or have v2 anywhere since we are using v1 api's returns rc=200 (valid) & other rc's from the get resourceDef (json) """ print( "getting resource for catalog:-" + url + " resource=" + resourceName ) apiURL = url + "/access/1/catalog/resources/" + resourceName if sensitiveOptions: apiURL += "?sensitiveOptions=true" # print("\turl=" + apiURL) header = {"Accept": "application/json"} tResp = session.get(apiURL, params={}, headers=header, ) print("\tresponse=" + str(tResp.status_code)) if tResp.status_code == 200: # valid - return the jsom return tResp.status_code, json.loads(tResp.text) else: # not valid return tResp.status_code, None
883a393018b068b8f15a8c0ea5ac6969c1a386b6
3,652,357
def _merge_sse(sum1, sum2): """Merge the partial SSE.""" sum_count = sum1 + sum2 return sum_count
0aae96262cfb56c6052fdbe5bbd92437d37b1f76
3,652,358
def earliest_deadline_first(evs, iface): """ Sort EVs by departure time in increasing order. Args: evs (List[EV]): List of EVs to be sorted. iface (Interface): Interface object. (not used in this case) Returns: List[EV]: List of EVs sorted by departure time in increasing order. """ return sorted(evs, key=lambda x: x.departure)
f1a57586b9993d890ddda6c309dafbea4ae16554
3,652,359
import re def auto_load(filename): """Load any supported raw battery cycler file to the correct Datapath automatically. Matches raw file patterns to the correct datapath and returns the datapath object. Example: auto_load("2017-05-09_test-TC-contact_CH33.csv") >>> <ArbinDatapath object> auto_load("PreDiag_000287_000128short.092") >>> <MaccorDatapath object> Args: filename (str, Pathlike): string corresponding to battery cycler file filename. Returns: (beep.structure.base.BEEPDatapath): The datapath child class corresponding to this file. """ if re.match(ARBIN_CONFIG["file_pattern"], filename) or re.match(FastCharge_CONFIG["file_pattern"], filename): return ArbinDatapath.from_file(filename) elif re.match(MACCOR_CONFIG["file_pattern"], filename) or re.match(xTesladiag_CONFIG["file_pattern"], filename): return MaccorDatapath.from_file(filename) elif re.match(INDIGO_CONFIG["file_pattern"], filename): return IndigoDatapath.from_file(filename) elif re.match(BIOLOGIC_CONFIG["file_pattern"], filename): return BiologicDatapath.from_file(filename) elif re.match(NEWARE_CONFIG["file_pattern"], filename): return NewareDatapath.from_file(filename) else: raise ValueError("{} does not match any known file pattern".format(filename))
6b3ccf40296f62c15ea005cfe5e87e397d8e9f88
3,652,360
def print_param_list(param_list, result, decimal_place=2, unit=''): """ Return a result string with parameter data appended. The input `param_list` is a list of a tuple (param_value, param_name), where `param_value` is a float and `param_name` is a string. If `param_value` is None, it writes 'N/A'. """ for param_value, param_name in param_list: result += '<tr>' result += r' <td class = "key"><span>{0}</span></td>'.format(param_name) result += r' <td class="equals">=</td>' if param_value is None: result += r' <td class="value">N/A</td>' else: param_value = '%.*f' % (decimal_place, param_value) result += r' <td class="value"><script type="math/tex">{0} \ \mathrm{{ {1!s} }}</script></td>'.format( param_value, unit) result += '</tr>\n' return result
f92fd926eaf312e625058c394c42e9909cac7a43
3,652,361
def get_veh_id(gb_data): """ Mapping function for vehicle id """ veh_ref = gb_data['Vehicle_Reference'] acc_id = get_acc_id_from_data(gb_data) veh_id = common.get_gb_veh_id(acc_id, int(veh_ref)) return veh_id
de3a8f99a099737cedb00534ad21bc7dd1a900c5
3,652,362
def linreg_qr_gramschmidt_unencrypted(clientMap, coordinator, encryLv=3, colTrunc=False): """ Compute vertical federated linear regression using QR. QR decomposition is computed by means of Numpy/Scipy builtin algorithm and Gram-Schmidt method. Parameters ---------- clientMap : List The list of qrClient objects. clientInfos : List The list of machine information of the corresponding qrClient objects. encryLv : int The least number of columns the feature matrix of a single client should have to protect its privacy. colTrunc : bool Do the column pivoting and truncation or not. Returns ------- numpy.array The computed weights of all the clients. The weights corresponding to the constant term is at the last position. """ preprocessing_wo_constaint(clientMap, coordinator.machine_info_client, encryLv, colTrunc) compute_qr_gramschmidt_unencrypted(clientMap, coordinator.machine_info_client) apply_q_unencrypted(clientMap, coordinator.machine_info_client) weights = apply_back_solve_wo_constraint(clientMap, coordinator.machine_info_client) return weights
59fee17cff911a22c4e6cfc6daf13ce7559d32a7
3,652,363
def has_soa_perm(user_level, obj, ctnr, action): """ Permissions for SOAs SOAs are global, related to domains and reverse domains """ return { 'cyder_admin': True, #? 'ctnr_admin': action == 'view', 'user': action == 'view', 'guest': action == 'view', }.get(user_level, False)
6b32c9f3411d9341d9692c46e84a7506d649f36d
3,652,364
def check_skyscrapers(input_path: str) -> bool: """ Main function to check the status of skyscraper game board. Return True if the board status is compliant with the rules, False otherwise. """ board = read_input(input_path) return check_not_finished_board(board) and check_uniqueness_in_rows(board) and \ check_horizontal_visibility(board) and check_columns(board)
a4a2c77049bad429e548c749ef3e34ef27081de4
3,652,367
from typing import Optional async def get_station(station: avwx.Station, token: Optional[Token]) -> dict: """Log and returns station data as dict""" await app.station.add(station.lookup_code, "station") return await station_data_for(station, token=token) or {}
659bf56ff274ccd460dfdf240d6f4776fb7586a6
3,652,368
def add_check_numerics_ops(): """Connect a `check_numerics` to every floating point tensor. `check_numerics` operations themselves are added for each `half`, `float`, or `double` tensor in the graph. For all ops in the graph, the `check_numerics` op for all of its (`half`, `float`, or `double`) inputs is guaranteed to run before the `check_numerics` op on any of its outputs. Note: This API is not compatible with the use of `tf.cond` or `tf.while_loop`, and will raise a `ValueError` if you attempt to call it in such a graph. Returns: A `group` op depending on all `check_numerics` ops added. Raises: ValueError: If the graph contains any numeric operations in a control flow structure. RuntimeError: If called with eager execution enabled. @compatibility(eager) Not compatible with eager execution. To check for `Inf`s and `NaN`s under eager execution, call tfe.seterr(inf_or_nan='raise') once before executing the checked operations. @enc_compatibility """ if context.executing_eagerly(): raise RuntimeError( "add_check_numerics_ops() is not compatible with eager execution. " "To check for Inf's and NaN's under eager execution, call " "tfe.seterr(inf_or_nan='raise') once before executing the " "checked operations.") check_op = [] # This code relies on the ordering of ops in get_operations(). # The producer of a tensor always comes before that tensor's consumer in # this list. This is true because get_operations() returns ops in the order # added, and an op can only be added after its inputs are added. for op in ops.get_default_graph().get_operations(): for output in op.outputs: if output.dtype in [dtypes.float16, dtypes.float32, dtypes.float64]: if op._get_control_flow_context() is not None: # pylint: disable=protected-access raise ValueError("`tf.add_check_numerics_ops() is not compatible " "with TensorFlow control flow operations such as " "`tf.cond()` or `tf.while_loop()`.") message = op.name + ":" + str(output.value_index) with ops.control_dependencies(check_op): check_op = [array_ops.check_numerics(output, message=message)] return control_flow_ops.group(*check_op)
8a5026ff07a0cfce7f0acac58641996cef76fb2e
3,652,369
def get_text(part): """Gmailの本文をdecode""" if not part['filename'] and \ part['body']['size'] > 0 and \ 'data' in part['body'].keys(): content_type = header(part['headers'], 'Content-Type') encode_type = header(part['headers'], 'Content-Transfer-Encoding') data = decode_data(content_type, encode_type, part['filename'], part['body']['data']) if data["data_type"]=="text": return data['data'] return ''
2d32b30539c39dc89cb3680e2d21e14eb9ce24c4
3,652,370
import dataclasses def run(ex: "interactivity.Execution"): """Specify the target function(s) and/or layer(s) to target.""" selection: "definitions.Selection" = ex.shell.selection is_exact = ex.args.get("exact", False) functions = ex.args.get("functions", False) layers = ex.args.get("layers", False) both = not functions and not layers names = _get_names(ex) if both and names == ["*"]: status = "ALL" message = "Selection has been cleared. All items are now selected." ex.shell.selection = dataclasses.replace( selection, function_needles=["*"], layer_needles=["*"], bundle_all=True, ) elif is_exact: status = "EXACT" message = "Exact selection has been applied." ex.shell.selection = _update_exact_selection( names=names, functions=functions, layers=layers, selection=selection, ) else: status = "MATCH" message = "Matching items have been selected." ex.shell.selection = _update_fuzzy_selection( names=names, functions=functions, layers=layers, selection=selection, ) targets = ex.shell.context.get_selected_targets(ex.shell.selection) return ex.finalize( status=status, message=message, echo=True, info={ "functions": _to_names(targets.function_targets), "layers": _to_names(targets.layer_targets), }, )
9389ada1c657b1f2794650e9b2b2d9a40039b64f
3,652,371
def ByName(breakdown_metric_name): """Return a BreakdownMetric class by name.""" breakdown_mapping = { 'distance': ByDistance, 'num_points': ByNumPoints, 'rotation': ByRotation, 'difficulty': ByDifficulty } if breakdown_metric_name not in breakdown_mapping: raise ValueError('Invalid breakdown name: %s, valid names are %s' % (breakdown_metric_name, list(breakdown_mapping.keys()))) return breakdown_mapping[breakdown_metric_name]
06a1a44f8453375cfc83729339062948829d950c
3,652,373
def deserialize_structure(serialized_structure, dtype=np.int32): """Converts a string to a structure. Args: serialized_structure: A structure produced by `serialize_structure`. dtype: The data type of the output numpy array. Returns: A numpy array with `dtype`. """ return np.asarray( [token for token in serialized_structure.split(domains.SEP_TOKEN)], dtype=dtype)
ec8f3d096f3eedea4343576f7b204da15ae73ca6
3,652,374
from typing import List def get_all_text_elements(dataset_name: str) -> List[TextElement]: """ get all the text elements of the given dataset :param dataset_name: """ return data_access.get_all_text_elements(dataset_name=dataset_name)
fa4c2e0bff9818f1026095b5b6b774b09652b989
3,652,375
def form_x(form_file,*args): """ same as above, except assumes all tags in the form are number, and uses the additional arguments in *args to fill out those tag values. :param form_file: file which we use for replacements :param *args: optional arguments which contain the form entries for the file in question, by number. """ form_dict = {} count = 0 for arg in args: count += 1 form_dict[str(count)] = str(arg) return form(form_file,form_dict)
e2d45e71ff18ce626a89d9a097389fc27b34fa82
3,652,376
import click def init(): """Manage IAM users.""" formatter = cli.make_formatter('aws_user') @click.group() def user(): """Manage IAM users.""" pass @user.command() @click.option('--create', is_flag=True, default=False, help='Create if it does not exist') @click.option('--path', default='/', help='Path for user name.') @click.option('--inline-policy', type=cli.LIST, required=False, help='Inline user policy name:file') @click.option('--attached-policy', type=cli.LIST, required=False, help='global:PolicyName or local:PolicyName') @click.option('--attached-policy', type=cli.LIST, required=False, help='global:PolicyName or local:PolicyName') @click.argument('user-name', required=True, callback=aws_cli.sanitize_user_name) @cli.admin.ON_EXCEPTIONS def configure(create, path, inline_policy, attached_policy, user_name): """Create/configure/get IAM user.""" iam_conn = awscontext.GLOBAL.iam try: user = iamclient.get_user(iam_conn, user_name) except exc.NotFoundError: if not create: raise user = None if not user: user = iamclient.create_user(iam_conn, user_name, path) if inline_policy: _set_user_policy(iam_conn, user_name, inline_policy) if attached_policy: _set_attached_policy(iam_conn, user_name, attached_policy) user['UserPolicies'] = iamclient.list_user_policies(iam_conn, user_name) user['AttachedPolicies'] = iamclient.list_attached_user_policies( iam_conn, user_name) cli.out(formatter(user)) @user.command(name='list') @cli.admin.ON_EXCEPTIONS @click.option('--path', default='/', help='Path for user name.') def list_users(path): """List IAM users. """ iam_conn = awscontext.GLOBAL.iam users = iamclient.list_users(iam_conn, path) cli.out(formatter(users)) @user.command() @click.option('--force', is_flag=True, default=False, help='Delete user, even is user has policies attached.') @click.argument('user-name') @cli.admin.ON_EXCEPTIONS def delete(force, user_name): """Delete IAM user.""" iam_conn = awscontext.GLOBAL.iam if force: user_policies = iamclient.list_user_policies(iam_conn, user_name) for policy in user_policies: _LOGGER.info('deleting inline policy: %s', policy) iamclient.delete_user_policy(iam_conn, user_name, policy) attached_pols = iamclient.list_attached_user_policies(iam_conn, user_name) for policy in attached_pols: _LOGGER.info('detaching policy: %s', policy['PolicyArn']) iamclient.detach_user_policy(iam_conn, user_name, policy['PolicyArn']) groups = iamclient.list_groups_for_user(iam_conn, user_name) for group in groups: _LOGGER.info('removing user from group: %s', group) iamclient.remove_user_from_group(iam_conn, user_name, group) try: iamclient.delete_user(iam_conn=iam_conn, user_name=user_name) except iam_conn.exceptions.DeleteConflictException: raise click.UsageError('User [%s] has inline or attached ' 'policies, or is a member of one or ' 'more group, use --force to force ' 'delete.' % user_name) del configure del list_users del delete return user
b237e6ba7c10aafa1a499944f1553eaceed0fb2a
3,652,377
def fix_units(dims): """Fill in missing units.""" default = [d.get("units") for d in dims][-1] for dim in dims: dim["units"] = dim.get("units", default) return dims
d3a47ad84e1b4e44bedebb1e5739778df975a6fe
3,652,378
def annotate_movement(raw, pos, rotation_velocity_limit=None, translation_velocity_limit=None, mean_distance_limit=None, use_dev_head_trans='average'): """Detect segments with movement. Detects segments periods further from rotation_velocity_limit, translation_velocity_limit and mean_distance_limit. It returns an annotation with the bad segments. Parameters ---------- raw : instance of Raw Data to compute head position. pos : array, shape (N, 10) The position and quaternion parameters from cHPI fitting. Obtained with `mne.chpi` functions. rotation_velocity_limit : float Head rotation velocity limit in radians per second. translation_velocity_limit : float Head translation velocity limit in radians per second. mean_distance_limit : float Head position limit from mean recording in meters. use_dev_head_trans : 'average' (default) | 'info' Identify the device to head transform used to define the fixed HPI locations for computing moving distances. If ``average`` the average device to head transform is computed using ``compute_average_dev_head_t``. If ``info``, ``raw.info['dev_head_t']`` is used. Returns ------- annot : mne.Annotations Periods with head motion. hpi_disp : array Head position over time with respect to the mean head pos. See Also -------- compute_average_dev_head_t """ sfreq = raw.info['sfreq'] hp_ts = pos[:, 0].copy() - raw.first_time dt = np.diff(hp_ts) hp_ts = np.concatenate([hp_ts, [hp_ts[-1] + 1. / sfreq]]) orig_time = raw.info['meas_date'] annot = Annotations([], [], [], orig_time=orig_time) # Annotate based on rotational velocity t_tot = raw.times[-1] if rotation_velocity_limit is not None: assert rotation_velocity_limit > 0 # Rotational velocity (radians / sec) r = _angle_between_quats(pos[:-1, 1:4], pos[1:, 1:4]) r /= dt bad_mask = (r >= np.deg2rad(rotation_velocity_limit)) onsets, offsets = _mask_to_onsets_offsets(bad_mask) onsets, offsets = hp_ts[onsets], hp_ts[offsets] bad_pct = 100 * (offsets - onsets).sum() / t_tot logger.info(u'Omitting %5.1f%% (%3d segments): ' u'ω >= %5.1f°/s (max: %0.1f°/s)' % (bad_pct, len(onsets), rotation_velocity_limit, np.rad2deg(r.max()))) annot += _annotations_from_mask( hp_ts, bad_mask, 'BAD_mov_rotat_vel', orig_time=orig_time) # Annotate based on translational velocity limit if translation_velocity_limit is not None: assert translation_velocity_limit > 0 v = np.linalg.norm(np.diff(pos[:, 4:7], axis=0), axis=-1) v /= dt bad_mask = (v >= translation_velocity_limit) onsets, offsets = _mask_to_onsets_offsets(bad_mask) onsets, offsets = hp_ts[onsets], hp_ts[offsets] bad_pct = 100 * (offsets - onsets).sum() / t_tot logger.info(u'Omitting %5.1f%% (%3d segments): ' u'v >= %5.4fm/s (max: %5.4fm/s)' % (bad_pct, len(onsets), translation_velocity_limit, v.max())) annot += _annotations_from_mask( hp_ts, bad_mask, 'BAD_mov_trans_vel', orig_time=orig_time) # Annotate based on displacement from mean head position disp = [] if mean_distance_limit is not None: assert mean_distance_limit > 0 # compute dev to head transform for fixed points use_dev_head_trans = use_dev_head_trans.lower() if use_dev_head_trans not in ['average', 'info']: raise ValueError('use_dev_head_trans must be either' + ' \'average\' or \'info\': got \'%s\'' % (use_dev_head_trans,)) if use_dev_head_trans == 'average': fixed_dev_head_t = compute_average_dev_head_t(raw, pos) elif use_dev_head_trans == 'info': fixed_dev_head_t = raw.info['dev_head_t'] # Get static head pos from file, used to convert quat to cartesian chpi_pos = sorted([d for d in raw.info['hpi_results'][-1] ['dig_points']], key=lambda x: x['ident']) chpi_pos = np.array([d['r'] for d in chpi_pos]) # Get head pos changes during recording chpi_pos_mov = np.array([apply_trans(_quat_to_affine(quat), chpi_pos) for quat in pos[:, 1:7]]) # get fixed position chpi_pos_fix = apply_trans(fixed_dev_head_t, chpi_pos) # get movement displacement from mean pos hpi_disp = chpi_pos_mov - np.tile(chpi_pos_fix, (pos.shape[0], 1, 1)) # get positions above threshold distance disp = np.sqrt((hpi_disp ** 2).sum(axis=2)) bad_mask = np.any(disp > mean_distance_limit, axis=1) onsets, offsets = _mask_to_onsets_offsets(bad_mask) onsets, offsets = hp_ts[onsets], hp_ts[offsets] bad_pct = 100 * (offsets - onsets).sum() / t_tot logger.info(u'Omitting %5.1f%% (%3d segments): ' u'disp >= %5.4fm (max: %5.4fm)' % (bad_pct, len(onsets), mean_distance_limit, disp.max())) annot += _annotations_from_mask( hp_ts, bad_mask, 'BAD_mov_dist', orig_time=orig_time) _adjust_onset_meas_date(annot, raw) return annot, disp
f89e48281cb70da6aa27b7dde737a8a587024f08
3,652,379
from typing import Any def run_in_executor( func: F, executor: ThreadPoolExecutor = None, args: Any = (), kwargs: Any = MappingProxyType({}), ) -> Future: """将耗时函数加入到线程池 .""" loop = get_event_loop() # noinspection PyTypeChecker return loop.run_in_executor( # type: ignore executor, context_partial(func, *args, **kwargs), )
dfa40f30e359d785e3582f48910d3936659bd2fa
3,652,380
def find_entry_with_minimal_scale_at_prime(self, p): """ Finds the entry of the quadratic form with minimal scale at the prime p, preferring diagonal entries in case of a tie. (I.e. If we write the quadratic form as a symmetric matrix M, then this entry M[i,j] has the minimal valuation at the prime p.) Note: This answer is independent of the kind of matrix (Gram or Hessian) associated to the form. INPUT: `p` -- a prime number > 0 OUTPUT: a pair of integers >= 0 EXAMPLES:: sage: Q = QuadraticForm(ZZ, 2, [6, 2, 20]); Q Quadratic form in 2 variables over Integer Ring with coefficients: [ 6 2 ] [ * 20 ] sage: Q.find_entry_with_minimal_scale_at_prime(2) (0, 1) sage: Q.find_entry_with_minimal_scale_at_prime(3) (1, 1) sage: Q.find_entry_with_minimal_scale_at_prime(5) (0, 0) """ n = self.dim() min_val = Infinity ij_index = None val_2 = valuation(2, p) for d in range(n): ## d = difference j-i for e in range(n - d): ## e is the length of the diagonal with value d. ## Compute the valuation of the entry if d == 0: tmp_val = valuation(self[e, e+d], p) else: tmp_val = valuation(self[e, e+d], p) - val_2 ## Check if it's any smaller than what we have if tmp_val < min_val: ij_index = (e,e+d) min_val = tmp_val ## Return the result return ij_index
737a6dd1c3a1f416f4e22b79440b7731a5048fe0
3,652,381
import awkward._v2._connect.pyarrow def from_arrow(array, highlevel=True, behavior=None): """ Args: array (`pyarrow.Array`, `pyarrow.ChunkedArray`, `pyarrow.RecordBatch`, or `pyarrow.Table`): Apache Arrow array to convert into an Awkward Array. highlevel (bool): If True, return an #ak.Array; otherwise, return a low-level #ak.layout.Content subclass. behavior (None or dict): Custom #ak.behavior for the output array, if high-level. """ out = awkward._v2._connect.pyarrow.handle_arrow(array, pass_empty_field=True) return ak._v2._util.wrap(out, behavior, highlevel)
a3c0cea2f2f3763f8997978e2963654cc08ed4e1
3,652,382
def Get_EstimatedRedshifts( scenario={} ): """ obtain estimated source redshifts written to npy file """ return np.genfromtxt( FilenameEstimatedRedshift( scenario ), dtype=None, delimiter=',', names=True, encoding='UTF-8')
0696cfee6783c093b8cf4b7c9703fec18e9799a4
3,652,384
def get_national_museums(db_connection, export_to_csv, export_path): """ Get national museum data from DB """ df = pd.read_sql('select * from optourism.state_national_museum_visits', con=db_connection) if export_to_csv: df.to_csv(f"{export_path}_nationalmuseums_raw.csv", index=False) return df
d34b9ff8f7f95025f932078e8d6e8b179bcff27e
3,652,385
from re import A def hrm_configure_pr_group_membership(): """ Configures the labels and CRUD Strings of pr_group_membership """ T = current.T s3db = current.s3db settings = current.deployment_settings request = current.request function = request.function table = s3db.pr_group_membership if settings.get_hrm_teams() == "Team": table.group_id.label = T("Team Name") table.group_head.label = T("Team Leader") if function == "group": current.response.s3.crud_strings["pr_group_membership"] = Storage( title_create = T("Add Member"), title_display = T("Membership Details"), title_list = T("Team Members"), title_update = T("Edit Membership"), title_search = T("Search Members"), subtitle_create = T("Add New Team Member"), label_list_button = T("List Members"), label_create_button = T("Add Team Member"), label_delete_button = T("Delete Membership"), msg_record_created = T("Team Member added"), msg_record_modified = T("Membership updated"), msg_record_deleted = T("Membership deleted"), msg_list_empty = T("No Members currently registered")) else: table.group_head.label = T("Group Leader") phone_label = settings.get_ui_label_mobile_phone() site_label = settings.get_org_site_label() if function == "group": db = current.db ptable = db.pr_person controller = request.controller def hrm_person_represent(id, row=None): if row: id = row.id elif id: row = db(ptable.id == id).select(ptable.first_name, limitby=(0, 1) ).first() else: return current.messages["NONE"] return A(row.first_name, _href=URL(c=controller, f="person", args=id)) table.person_id.represent = hrm_person_represent list_fields = ["id", (T("First Name"), "person_id"), "person_id$middle_name", "person_id$last_name", "group_head", (T("Email"), "person_id$email.value"), (phone_label, "person_id$phone.value"), (current.messages.ORGANISATION, "person_id$human_resource.organisation_id"), (site_label, "person_id$human_resource.site_id"), ] orderby = "pr_person.first_name" else: list_fields = ["id", "group_id", "group_head", "group_id$description", ] orderby = table.group_id s3db.configure("pr_group_membership", list_fields=list_fields, orderby=orderby)
f5ec66e00063bf8101505de8b1b8a767227b6bbd
3,652,386
import torch def inverse_sphere_distances(batch, dist, labels, anchor_label): """ Function to utilise the distances of batch samples to compute their probability of occurence, and using the inverse to sample actual negatives to the resp. anchor. Args: batch: torch.Tensor(), batch for which the sampling probabilities w.r.t to the anchor are computed. Used only to extract the shape. dist: torch.Tensor(), computed distances between anchor to all batch samples. labels: np.ndarray, labels for each sample for which distances were computed in dist. anchor_label: float, anchor label Returns: distance_matrix, clamped to ensure no zero values are passed. """ bs,dim = len(dist),batch.shape[-1] #negated log-distribution of distances of unit sphere in dimension <dim> log_q_d_inv = ((2.0 - float(dim)) * torch.log(dist) - (float(dim-3) / 2) * torch.log(1.0 - 0.25 * (dist.pow(2)))) #Set sampling probabilities of positives to zero log_q_d_inv[np.where(labels==anchor_label)[0]] = 0 q_d_inv = torch.exp(log_q_d_inv - torch.max(log_q_d_inv)) # - max(log) for stability #Set sampling probabilities of positives to zero q_d_inv[np.where(labels==anchor_label)[0]] = 0 ### NOTE: Cutting of values with high distances made the results slightly worse. # q_d_inv[np.where(dist>upper_cutoff)[0]] = 0 #Normalize inverted distance for probability distr. q_d_inv = q_d_inv/q_d_inv.sum() return q_d_inv.detach().cpu().numpy()
9bcb7f56f08fd850f6a9fa70175e1f83df603705
3,652,387
from functools import reduce def wrap_onspace(text, width): """ A word-wrap function that preserves existing line breaks and most spaces in the text. Expects that existing line breaks are posix newlines (\n). """ return reduce(lambda line, word, width=width: '%s%s%s' % (line, ' \n'[(len(line[line.rfind('\n')+1:]) + len(word.split('\n', 1)[0]) >= width)], word), text.split(' '))
13387fa67dcff2b0329463dfe1ab7d6721255afc
3,652,389
def xsd_simple_type_factory(elem, schema, parent): """ Factory function for XSD simple types. Parses the xs:simpleType element and its child component, that can be a restriction, a list or an union. Annotations are linked to simple type instance, omitting the inner annotation if both are given. """ annotation = None try: child = elem[0] except IndexError: return schema.maps.types[XSD_ANY_SIMPLE_TYPE] else: if child.tag == XSD_ANNOTATION: annotation = XsdAnnotation(elem[0], schema, child) try: child = elem[1] except IndexError: schema.parse_error("(restriction | list | union) expected", elem) return schema.maps.types[XSD_ANY_SIMPLE_TYPE] if child.tag == XSD_RESTRICTION: xsd_type = schema.BUILDERS.restriction_class(child, schema, parent) elif child.tag == XSD_LIST: xsd_type = XsdList(child, schema, parent) elif child.tag == XSD_UNION: xsd_type = schema.BUILDERS.union_class(child, schema, parent) else: schema.parse_error("(restriction | list | union) expected", elem) return schema.maps.types[XSD_ANY_SIMPLE_TYPE] if annotation is not None: xsd_type.annotation = annotation try: xsd_type.name = get_qname(schema.target_namespace, elem.attrib['name']) except KeyError: if parent is None: schema.parse_error("missing attribute 'name' in a global simpleType", elem) xsd_type.name = 'nameless_%s' % str(id(xsd_type)) else: if parent is not None: schema.parse_error("attribute 'name' not allowed for a local simpleType", elem) xsd_type.name = None if 'final' in elem.attrib: try: xsd_type._final = get_xsd_derivation_attribute(elem, 'final') except ValueError as err: xsd_type.parse_error(err, elem) return xsd_type
27ab47787923fadef6364828e2cc7604b006d76d
3,652,390
def amen_solve(A, f, x0, eps, kickrank=4, nswp=20, local_prec='n', local_iters=2, local_restart=40, trunc_norm=1, max_full_size=50, verb=1): """ Approximate linear system solution in the tensor-train (TT) format using Alternating minimal energy (AMEN approach) :References: Sergey Dolgov, Dmitry. Savostyanov Paper 1: http://arxiv.org/abs/1301.6068 Paper 2: http://arxiv.org/abs/1304.1222 :param A: Matrix in the TT-format :type A: matrix :param f: Right-hand side in the TT-format :type f: tensor :param x0: TT-tensor of initial guess. :type x0: tensor :param eps: Accuracy. :type eps: float :Example: >>> import tt >>> import tt.amen #Needed, not imported automatically >>> a = tt.qlaplace_dd([8, 8, 8]) #3D-Laplacian >>> rhs = tt.ones(2, 3 * 8) #Right-hand side of all ones >>> x = tt.amen.amen_solve(a, rhs, rhs, 1e-8) amen_solve: swp=1, max_dx= 9.766E-01, max_res= 3.269E+00, max_rank=5 amen_solve: swp=2, max_dx= 4.293E-01, max_res= 8.335E+00, max_rank=9 amen_solve: swp=3, max_dx= 1.135E-01, max_res= 5.341E+00, max_rank=13 amen_solve: swp=4, max_dx= 9.032E-03, max_res= 5.908E-01, max_rank=17 amen_solve: swp=5, max_dx= 9.500E-04, max_res= 7.636E-02, max_rank=21 amen_solve: swp=6, max_dx= 4.002E-05, max_res= 5.573E-03, max_rank=25 amen_solve: swp=7, max_dx= 4.949E-06, max_res= 8.418E-04, max_rank=29 amen_solve: swp=8, max_dx= 9.618E-07, max_res= 2.599E-04, max_rank=33 amen_solve: swp=9, max_dx= 2.792E-07, max_res= 6.336E-05, max_rank=37 amen_solve: swp=10, max_dx= 4.730E-08, max_res= 1.663E-05, max_rank=41 amen_solve: swp=11, max_dx= 1.508E-08, max_res= 5.463E-06, max_rank=45 amen_solve: swp=12, max_dx= 3.771E-09, max_res= 1.847E-06, max_rank=49 amen_solve: swp=13, max_dx= 7.797E-10, max_res= 6.203E-07, max_rank=53 amen_solve: swp=14, max_dx= 1.747E-10, max_res= 2.058E-07, max_rank=57 amen_solve: swp=15, max_dx= 8.150E-11, max_res= 8.555E-08, max_rank=61 amen_solve: swp=16, max_dx= 2.399E-11, max_res= 4.215E-08, max_rank=65 amen_solve: swp=17, max_dx= 7.871E-12, max_res= 1.341E-08, max_rank=69 amen_solve: swp=18, max_dx= 3.053E-12, max_res= 6.982E-09, max_rank=73 >>> print (tt.matvec(a, x) - rhs).norm() / rhs.norm() 5.5152374305127345e-09 """ m = A.m.copy() rx0 = x0.r.copy() psx0 = x0.ps.copy() if A.is_complex or f.is_complex: amen_f90.amen_f90.ztt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) else: if x0.is_complex: x0 = x0.real() rx0 = x0.r.copy() psx0 = x0.ps.copy() amen_f90.amen_f90.dtt_amen_wrapper(f.d, A.n, m, A.tt.r, A.tt.ps, A.tt.core, f.r, f.ps, f.core, rx0, psx0, x0.core, eps, kickrank, nswp, local_iters, local_restart, trunc_norm, max_full_size, verb, local_prec) x = tt.tensor() x.d = f.d x.n = m.copy() x.r = rx0 if A.is_complex or f.is_complex: x.core = amen_f90.amen_f90.zcore.copy() else: x.core = amen_f90.amen_f90.core.copy() amen_f90.amen_f90.deallocate_result() x.get_ps() return x
15b35bedd6e07f867ae1bae54992f8988b1b56cb
3,652,391
def get_vss(ts, tau_p): """ Compute candidates of VS for specified task tau_p """ if tau_p == None: return [] C, T, D = extract(ts) R = rta(C, T) _VS = _get_vs(C, T, R, task_name_to_index(ts, tau_p)) _VS.sort() VS = [] vs = Server(0, 0, None) # ignore duplicates for s in _VS: if vs.C == s[0] and vs.T == s[1]: continue vs = Server(s[0], s[1], tau_p) VS.append(vs) return VS
a6b0abc26d32d8e62e4026ee59a6491a02dd6a32
3,652,392
from typing import Iterable from typing import Dict from typing import Hashable from typing import List def groupby( entities: Iterable["DXFEntity"], dxfattrib: str = "", key: "KeyFunc" = None ) -> Dict[Hashable, List["DXFEntity"]]: """ Groups a sequence of DXF entities by a DXF attribute like ``'layer'``, returns a dict with `dxfattrib` values as key and a list of entities matching this `dxfattrib`. A `key` function can be used to combine some DXF attributes (e.g. layer and color) and should return a hashable data type like a tuple of strings, integers or floats, `key` function example:: def group_key(entity: DXFEntity): return entity.dxf.layer, entity.dxf.color For not suitable DXF entities return ``None`` to exclude this entity, in this case it's not required, because :func:`groupby` catches :class:`DXFAttributeError` exceptions to exclude entities, which do not provide layer and/or color attributes, automatically. Result dict for `dxfattrib` = ``'layer'`` may look like this:: { '0': [ ... list of entities ], 'ExampleLayer1': [ ... ], 'ExampleLayer2': [ ... ], ... } Result dict for `key` = `group_key`, which returns a ``(layer, color)`` tuple, may look like this:: { ('0', 1): [ ... list of entities ], ('0', 3): [ ... ], ('0', 7): [ ... ], ('ExampleLayer1', 1): [ ... ], ('ExampleLayer1', 2): [ ... ], ('ExampleLayer1', 5): [ ... ], ('ExampleLayer2', 7): [ ... ], ... } All entity containers (modelspace, paperspace layouts and blocks) and the :class:`~ezdxf.query.EntityQuery` object have a dedicated :meth:`groupby` method. Args: entities: sequence of DXF entities to group by a DXF attribute or a `key` function dxfattrib: grouping DXF attribute like ``'layer'`` key: key function, which accepts a :class:`DXFEntity` as argument and returns a hashable grouping key or ``None`` to ignore this entity """ if all((dxfattrib, key)): raise DXFValueError( "Specify a dxfattrib or a key function, but not both." ) if dxfattrib != "": key = lambda entity: entity.dxf.get_default(dxfattrib) if key is None: raise DXFValueError( "no valid argument found, specify a dxfattrib or a key function, " "but not both." ) result: Dict[Hashable, List["DXFEntity"]] = dict() for dxf_entity in entities: if not dxf_entity.is_alive: continue try: group_key = key(dxf_entity) except DXFAttributeError: # ignore DXF entities, which do not support all query attributes continue if group_key is not None: group = result.setdefault(group_key, []) group.append(dxf_entity) return result
0eecfc2263c1f5716615cb4add6bc092edbb2b8b
3,652,393
def train_test_split(data_filepath, num_train=10, num_test=10): """Split a dataset into training and test sets.""" df = pd.read_csv(data_filepath, sep=',', header=None) data = df.values train = data[:2*num_train, :] test = data[2*num_train:2*(num_train+num_test), :] ind = np.argsort(train[:,-1]) X_train = train[ind][:,:-1] y_train = train[ind][:,-1] ind = np.argsort(test[:,-1]) X_test = test[ind][:,:-1] y_test = test[ind][:,-1] return X_train, y_train, X_test, y_test
650979e62667ade3f88d89f2058bedf8675a5ae5
3,652,394
import requests def get_filings(app: Flask = None): """Get a filing with filing_id.""" r = requests.get(f'{app.config["LEGAL_URL"]}/internal/filings') if not r or r.status_code != 200: app.logger.error(f'Failed to collect filings from legal-api. {r} {r.json()} {r.status_code}') raise Exception return r.json()
4b2ba1a3d15918fe5d7b706a20006d0c85818176
3,652,395
def _uno_struct__setattr__(self, name, value): """Sets attribute on UNO struct. Referenced from the pyuno shared library. """ return setattr(self.__dict__["value"], name, value)
6b66213e33bb8b882407ff33bcca177701fb98cd
3,652,396
from datetime import datetime def register(): """Registers the user.""" if g.user: return redirect(url_for('user_home')) error = None if request.method == 'POST': if not request.form['username']: error = 'You have to enter a username' elif not request.form['email'] or '@' not in request.form['email']: error = 'You have to enter a valid email address' elif not request.form['password']: error = 'You have to enter a password' elif request.form['password'] != request.form['password2']: error = 'The two passwords do not match' elif get_uid(request.form['username']) is not None: error = 'The username is already taken' else: db = get_db() db.execute('''insert into user ( username, email, pw_hash, day, inc_log, dec_log, phase) values (?, ?, ?, 1, ?, ?, 1)''', [request.form['username'], request.form['email'], generate_password_hash(request.form['password']), datetime.datetime.utcnow(), datetime.datetime.utcnow()]) db.commit() flash('You were successfully registered and can login now') return redirect(url_for('login')) return render_template('register.html', error=error)
572ee30c9f4981f6d526f115178ba8988e2b93c1
3,652,398
def test_single_while_2(): """ Feature: JIT Fallback Description: Test fallback with control flow. Expectation: No exception. """ @ms_function def control_flow_while(): x = Tensor(7).astype("int32") y = Tensor(0).astype("int32") while x >= y: y += x return y res = control_flow_while() assert res == 14
8334819ee7d4ea24085e2a2f1ab3d18fb732c8cc
3,652,399
import cuml from cuml.metrics import confusion_matrix def build_and_predict_model(ml_input_df): """ Create a standardized feature matrix X and target array y. Returns the model and accuracy statistics """ feature_names = ["college_education", "male"] + [ "clicks_in_%d" % i for i in range(1, 8) ] X = ml_input_df[feature_names] # Standardize input matrix X = (X - X.mean()) / X.std() y = ml_input_df["clicks_in_category"] model = cuml.LogisticRegression( tol=convergence_tol, penalty="none", solver="qn", fit_intercept=True, max_iter=iterations, C=C, ) model.fit(X, y) # # Predict and evaluate accuracy # (Should be 1.0) at SF-1 # results_dict = {} y_pred = model.predict(X) results_dict["auc"] = roc_auc_score(y.to_array(), y_pred.to_array()) results_dict["precision"] = cupy_precision_score(cp.asarray(y), cp.asarray(y_pred)) results_dict["confusion_matrix"] = confusion_matrix( cp.asarray(y, dtype="int32"), cp.asarray(y_pred, dtype="int32") ) results_dict["output_type"] = "supervised" return results_dict
2d3e192986c680d910a401c9a4da295595fe236e
3,652,400
def codes_new_from_file(fileobj, product_kind, headers_only=False): """ @brief Load in memory a message from a file for a given product. The message can be accessed through its id and will be available\n until @ref grib_release is called.\n \b Examples: \ref get_product_kind.py "get_product_kind.py" @param fileobj python file object @param product_kind one of CODES_PRODUCT_GRIB, CODES_PRODUCT_BUFR, CODES_PRODUCT_GTS or CODES_PRODUCT_ANY @param headers_only whether or not to load the message with the headers only @return id of the message loaded in memory @exception GribInternalError """ if product_kind == CODES_PRODUCT_GRIB: return grib_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_BUFR: return bufr_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_METAR: return metar_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_GTS: return gts_new_from_file(fileobj, headers_only) if product_kind == CODES_PRODUCT_ANY: return any_new_from_file(fileobj, headers_only) raise Exception("Invalid product kind: " + product_kind)
47ed09dbf5bf59160dcab4c36dfd202ae7b190a5
3,652,401
from datetime import datetime def list_resources(path, long_format=None, relations=False): """List resources in a given DMF workspace. Args: path (str): Path to the workspace long_format (bool): List in long format flag relations (bool): Show relationships, in long format Returns: None """ t = ColorTerm() d = DMF(path) if long_format: resources = list(d.find()) uuid_pfx = _uuid_prefix([r.uuid for r in resources]) fields = ("uuid", "name", "type", "modified", "created") widths = (uuid_pfx, 30, 20, 19, 19) colors = (t.green, t.white, t.yellow, t.white, t.white) fmts = [f"{{:{w}s}}" for w in widths] left_gutter = "| " if relations else "" # table header print( " " * len(left_gutter) + t.bold + " ".join([f.format(v) for f, v in zip(fmts, fields)]) + t.reset ) def datestr(t): return datetime.isoformat(datetime.fromtimestamp(t)) # table body for r in resources: values = list(getattr(r, k) for k in fields[:-2]) values.append(datestr(r.modified)) values.append(datestr(r.created)) if not values[1] and r.desc: values[1] = r.desc[: widths[1]] else: values[1] = values[1][: widths[1]] if uuid_pfx < 32: values[0] = values[0][:uuid_pfx] print( left_gutter + " ".join([c + f.format(v) for c, f, v in zip(colors, fmts, values)]) + t.reset ) if relations and len(r.relations) > 0: relitems = [] for rel in r.relations: if rel.subject == r.uuid: fmt = f"{t.white}{{p}}->{t.blue}{{o}}" else: fmt = f"{t.blue}{{s}}->{t.white}{{p}}" item = fmt.format( s=rel.subject[:uuid_pfx], p=rel.predicate, o=rel.object[:uuid_pfx], ) relitems.append(item) print(f"+-- {' / '.join(relitems)}") else: items = [] for r in d.find(): name_color = "w" if r.name: name = r.name elif r.desc: name = r.desc[:40] name_color = t.blue else: name = r.uuid name_color = t.green item = f"{name_color}{name}{t.yellow}:{r.type}" items.append(item) if items: columnized = _display_in_columns(items, max_line=t.width) print(columnized + t.reset)
904f04a008efb8add2d7744bfe1dc71009faff17
3,652,402
def calc_streamtemp(tas): """ Global standard regression equation from Punzet et al. (2012) Calculates grid cell stream temperature based on air temperature Both input and output temperature are in K""" # global constants, taken from Punzet et al., 2012 c0 = 32; c1 = -0.13; c2 = 1.94 tas_C = tas - 273.15 streamtemp_C = c0/(1+np.exp(c1*tas_C+c2)) streamtemp = streamtemp_C + 273.15 return streamtemp
493d1ca3b3543db9bfabc8c0e2a4f013da794028
3,652,403
def _process(proc_data): """ Final processing to conform to the schema. Parameters: proc_data: (List of Dictionaries) raw structured data to process Returns: List of Dictionaries. Structured data to conform to the schema. """ # nothing more to process return proc_data
7585a8810667f39d6d6a787f2617590aee1ec8cf
3,652,404
import requests def get_station_info(my_token, station_id): """ This function gets all the information on the station ---------- Input: my_token (str) token generated from "token request page" station_id (str) ---------- Output: dictionary of station information """ station_url = '{}stations/{}'.format(base_url, station_id) return requests.get(station_url, headers = {'token': my_token}).json()
9abcb5b74cb0be45396c1b182845467e3fc0829c
3,652,405
def check_fit_input(coordinates, data, weights, unpack=True): """ Validate the inputs to the fit method of gridders. Checks that the coordinates, data, and weights (if given) all have the same shape. Weights arrays are raveled. Parameters ---------- coordinates : tuple of arrays Arrays with the coordinates of each data point. Should be in the following order: (easting, northing, vertical, ...). data : array or tuple of arrays The data values of each data point. Data can have more than one component. In such cases, data should be a tuple of arrays. weights : None or array If not None, then the weights assigned to each data point. Typically, this should be 1 over the data uncertainty squared. If the data has multiple components, the weights have the same number of components. unpack : bool If False, data and weights will be tuples always. If they are single arrays, then they will be returned as a 1-element tuple. If True, will unpack the tuples if there is only 1 array in each. Returns ------- validated_inputs The validated inputs in the same order. If weights are given, will ravel the array before returning. """ data = check_data(data) weights = check_data(weights) coordinates = check_coordinates(coordinates) if any(i.shape != coordinates[0].shape for i in data): raise ValueError( "Data arrays must have the same shape {} as coordinates. Data shapes: {}.".format( coordinates[0].shape, [i.shape for i in data] ) ) if any(w is not None for w in weights): if len(weights) != len(data): raise ValueError( "Number of data '{}' and weights '{}' must be equal.".format( len(data), len(weights) ) ) if any(i.size != j.size for i in weights for j in data): raise ValueError("Weights must have the same size as the data array.") weights = tuple(i.ravel() for i in weights) else: weights = tuple([None] * len(data)) if unpack: if len(weights) == 1: weights = weights[0] if len(data) == 1: data = data[0] return coordinates, data, weights
eef22bb026aad657f096ac4de00a6d2a5a6fa0f8
3,652,407
def get_local_beneficiaries(map_lat: float, map_lng: float, map_zoom: int) -> DataFrame: """Return only projects that are fairly close to the map's centre.""" return beneficiaries[ (map_lat - 100 / map_zoom < beneficiaries.lat) & (beneficiaries.lat < map_lat + 100 / map_zoom) & (map_lng - 100 / map_zoom < beneficiaries.lng) & (beneficiaries.lng < map_lng + 100 / map_zoom) ][:500]
30e0d7255a70c15a25f499e867974c711ae3f750
3,652,409
def create_atoms(atoms, atom_dict): """Transform the atom types in a molecule (e.g., H, C, and O) into the indices (e.g., H=0, C=1, and O=2). """ atoms = [atom_dict[a] for a in atoms] return np.array(atoms)
10f0e0d0669c2148db7cdcd487a6b72ade2e2f06
3,652,411
def uint2float(A,bits,x_min,x_max=None): """ Converts uint[bits] to the corresponding floating point value in the range [x_min,x_max]. """ if x_max is None: x_min,x_max = x_range(x_min) return x_min + (x_max - x_min) * A / ((1 << bits) - 1)
242b72824309a7e0724c42f29e259e52b11a90d2
3,652,412
def partCmp(verA: str, verB: str) -> int: """Compare parts of a semver. Args: verA (str): lhs part to compare verB (str): rhs part to compare Returns: int: 0 if equal, 1 if verA > verB and -1 if verA < verB """ if verA == verB or verA == "*" or verB == "*": return 0 if int(verA) > int(verB): return 1 return -1
d9417ce482bf0c2332175412ba3125435f884336
3,652,413
import io def OrigPosLemConcordancer(sentences, annots, textMnt, wordType="word", nrows=10): """Output HTML for the text (including lemma and pos tags) identified by the AQAnnotation (typically a sentence annotation). Below the sentence (in successive rows) output the original terms, parts of speech, and lemma terms for the text identified by the AQAnnotation. Args: sentences: Sentence annotations that you would like to display. annots: The Dataframe of AQAnnotations that will contain the the AQAnnotations (orig, lemma, pos) for the above sentences textPath: Path for the str files. The sentence annotations must be for documents contained in these str files. wordType: The annotType that identies the AQAnnotation in the above annotations. nrows: Number of sentences to display Returns: HTML """ def _buildOrigPosLemmaRow(entryType, entry): return ("<tr>" + "<td>" + entryType + "</td>" + "<td bgcolor='grey'/>" + "<td bgcolor='grey'/>" + entry + "</tr>") sentenceAnnots = sentences.sort("docId","startOffset").limit(nrows).collect() tmpStr = "" docId = "" docText = "" text= "" lastDoc = "" curDoc = "" # Get the TextAnnotations (for the specified annotType) for each sentence for sentence in sentenceAnnots: textAnnots = annots.filter((col("docId") == sentence.docId) & (col("annotType") == wordType) & (col("startOffset") >= sentence.startOffset) & (col("endOffset") <= sentence.endOffset)) \ .sort("startOffset") \ .collect() # Get the raw text for the sentence annotation if docId != sentence.docId: docid = sentence.docId try: with io.open(textMnt + sentence.docId,'r',encoding='utf-8') as f: docText = f.read() except Exception as ex: print(ex) docText = "" if docText != "": text = docText[sentence.startOffset:sentence.endOffset] else: text = "" tmpStr += "<table border='1' style='font-family: monospace;table-layout: fixed;'><tr>" tmpStr += ("<td>" + sentence.docId + "</td>") tmpStr += ("<td>" + str(sentence.startOffset) + "</td>") tmpStr += ("<td>" + str(sentence.endOffset) + "</td>") tmpStr += ("<td colspan='" + str(len(textAnnots)) + "'>" + text + "</td>") tmpStr += "</tr>" # Get original row origEntry = "" for annot in textAnnots: if (annot.properties != None) and ('orig' in annot.properties) and (len(annot.properties['orig']) > 0): origEntry += ("<td>" + unquote_plus(annot.properties['orig']) + "</td>") else: origEntry += ("<td> </td>") tmpStr += _buildOrigPosLemmaRow('orig',origEntry) # Get pos row posEntry = "" for annot in textAnnots: if (annot.properties != None) and ('pos' in annot.properties) and (len(annot.properties['pos']) > 0): posEntry += ("<td>" + unquote_plus(annot.properties['pos']) + "</td>") else: posEntry += ("<td> </td>") tmpStr += _buildOrigPosLemmaRow('pos',posEntry) # Get lemma row lemmaEntry = "" for annot in textAnnots: if (annot.properties != None) and ('lemma' in annot.properties) and (len(annot.properties['lemma']) > 0): lemmaEntry += ("<td>" + unquote_plus(annot.properties['lemma']) + "</td>") else: lemmaEntry += ("<td> </td>") tmpStr += _buildOrigPosLemmaRow('lemma',lemmaEntry) tmpStr += "</table><p/><p/><p/>" return "<html><body>" + tmpStr + "</body></html>"
f11f0240cbee58954901bd57e728cd54ab51b6dd
3,652,414
def show_hidden_article(request, id): """ 展示隐藏的文章 """ db = connect_mongodb_database(request) article = db.articles.find_one({ 'Id':int(id), 'IsPublic': False }) if article is None: return HttpResponse(404) return render_admin_and_back(request, 'show-hidden-article.html', { 'page': u'隐私文章 - '+ article['Title'], 'article': article, })
5f9f9c3bc21ed267d8c13226d51a7f44877af976
3,652,416
def MakeNormalPmf(mu, sigma, num_sigmas, n=201): """Makes a PMF discrete approx to a Normal distribution. mu: float mean sigma: float standard deviation num_sigmas: how many sigmas to extend in each direction n: number of values in the Pmf returns: normalized Pmf """ pmf = Pmf() low = mu - num_sigmas * sigma high = mu + num_sigmas * sigma for x in np.linspace(low, high, n): p = EvalNormalPdf(x, mu, sigma) pmf.Set(x, p) pmf.Normalize() return pmf
9bca22cfd3b3b94fa233be9f78696361d9e36726
3,652,417
def histogram(a, bins, ranges): """ Examples -------- >>> x = np.random.uniform(0., 1., 100) >>> H, xedges = np.histogram(x, bins=5, range=[0., 1.]) >>> Hn = histogram(x, bins=5, ranges=[0., 1.]) >>> assert np.all(H == Hn) """ hist_arr = np.zeros((bins,), dtype=a.dtype) return _hist1d_numba_seq(hist_arr, a, bins, np.asarray(ranges))
a53a8204180c8f9d2a3fb36595d14c07da262cbd
3,652,419
def is_valid_python_code(src_string: str): """True if, and only if, ``src_string`` is valid python. Valid python is defined as 'ast.parse(src_string)` doesn't raise a ``SyntaxError``' """ try: ast_parse(src_string) return True except SyntaxError: return False
03ee9d915797ba1cfcbcaf8630d38df529744f1b
3,652,421
def rss_format_export_post(): """ :return: """ try: payload = request.get_json(force=True) # post data in json except: payload = dict(request.form) # post data in form encoding if 'link' in payload: link = read_value_list_or_not(payload, 'link') else: link = '' results, status = export_post(request, 'RSS') if status == 200: return return_rss_format_export(solr_data=results, link=link) return return_response(results, status)
2ddc5b814cabec3fd84f024d44cb04b0063890c5
3,652,422
def setup_agents(model, initial_locations): """Load the simulated initial locations and return a list that holds all agents. """ initial_locations = initial_locations.reshape(2, model["n_types"], 30000) agents = [] for typ in range(model["n_types"]): for i in range(model["n_agents_by_type"][typ]): agents.append( Agent( typ=typ, initial_location=initial_locations[typ, :, i], n_neighbours=model["n_neighbours"], require_same_type=model["require_same_type"], max_moves=model["max_moves"], ) ) return agents
40368819f0968b3fcaed6a1953f7e4fec453f471
3,652,425
async def get_active_infraction( ctx: Context, user: MemberOrUser, infr_type: str, send_msg: bool = True ) -> t.Optional[dict]: """ Retrieves an active infraction of the given type for the user. If `send_msg` is True and the user has an active infraction matching the `infr_type` parameter, then a message for the moderator will be sent to the context channel letting them know. Otherwise, no message will be sent. """ log.trace(f"Checking if {user} has active infractions of type {infr_type}.") active_infractions = await ctx.bot.api_client.get( 'bot/infractions', params={ 'active': 'true', 'type': infr_type, 'user__id': str(user.id) } ) if active_infractions: # Checks to see if the moderator should be told there is an active infraction if send_msg: log.trace(f"{user} has active infractions of type {infr_type}.") await send_active_infraction_message(ctx, active_infractions[0]) return active_infractions[0] else: log.trace(f"{user} does not have active infractions of type {infr_type}.")
63361319b75e072489544e2956d21ff5cbe08590
3,652,426
def plot_arb_images(label, data, label_string): """ Neatly displays arbitrary numbers of images from the camera returns fig Parameters: ----------- label: array of values that each image is labeled by, e.g. time data: array of arrays of image data label_string: string describing label, e.g. 's' """ length = len(data) columns = 10 if length % columns is not 0: rows = length / columns + 1 else: rows = length / columns fig = _p.figure() fig.set_figheight(rows * 5) fig.set_figwidth(10) for i in range(length): ax = fig.add_subplot(rows, columns, i + 1) ax.matshow(data[i], vmin=_n.min(data), vmax=_n.max(data)) ax.set_title('%s\n%.1f%s' % (i, label[i], label_string)) if i % 10 is 0: ax.set_xticks([]) ax.set_ylabel('pixels') else: ax.set_xticks([]) ax.set_yticks([]) fig.tight_layout() return fig
56e22d9f7a0651d56e93873b95c2228162f4a602
3,652,428
def listGslbServer(**kargs): """ List the Servers of KT Cloud GSLB. * Args: - zone(String, Required) : [KR-CA, KR-CB, KR-M, KR-M2] * Examples: print(gslb.listGslbServer(zone='KR-M')) """ my_apikey, my_secretkey = c.read_config() if not 'zone' in kargs: return c.printZoneHelp() ZoneName = kargs['zone'] del kargs['zone'] kargs['zoneid'] = c.getzoneidbyhname(ZoneName) M2Bool = c.IsM2(ZoneName) baseurl = c.geturl(ctype='gslb', m2=M2Bool) kargs['command'] = 'listGslbServer' kargs['response'] = 'json' kargs['apikey'] = my_apikey return c.makerequest(kargs, baseurl, my_secretkey)
18d8f0e6699b7cb3080eef5a3d040420ea45329d
3,652,429
import numpy def imencode(image, pix_fmt=IM_RGB, quality=DEFAULT_QUALITY): """ Encode image into jpeg codec Adapt convert image pixel color with pix_fmt Parameters ---------- image: source pix_fmt: format of pixel color. Default: RGB quality: JPEG quality image. Returns ------- Buffer of image encoded """ check_type("image", image, numpy.ndarray) if pix_fmt == IM_RGB: image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) _, buf = cv2.imencode('.jpeg', image, params=[cv2.IMWRITE_JPEG_QUALITY, int(quality)]) return buf
65750d176275e56da55c0660370a56f44baa6e48
3,652,430
def split_train_test_data(X, Y, train_rate): """ 将数据集划分为训练集与测试集 :param X: 数据集的特征 :param Y: 数据集的标签 :param train_rate: 训练集的比例 :return: 训练集的特征;训练集的标签;测试集的特征;测试集的标签 """ number = len(X) number_train = int(number * train_rate) number_test = number - number_train train_X = [] train_Y = [] test_X = [] test_Y = [] for i in range(number): if number_test > 0: if number_train == 0 or np.random.randint(2) == 0: number_test -= 1 test_X.append(X[i]) test_Y.append(Y[i]) else: number_train -= 1 train_X.append(X[i]) train_Y.append(Y[i]) else: number_train -= 1 train_X.append(X[i]) train_Y.append(Y[i]) return np.array(train_X), np.array(train_Y), np.array(test_X), np.array(test_Y)
7943278bc662968f8e019368ed8744d9e2a23929
3,652,431