content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def structures_at_boundaries(gdf, datamodel, areas, structures, tolerance, distance): """ Check if there are structures near area (typically water-level areas) boundaries. Parameters ---------- gdf : ExtendedGeoDataframe ExtendedGeoDataFrame, HyDAMO hydroobject layer datamodel : HyDAMO HyDAMO datamodel class areas : str HyDAMO datamodel class with areas ("peilgebiedenpraktijk") structures : str List with structure-types to be expected on the boundary tolerance : numeric Tolerance to dermine if a structure is on the hydroobject distance : numeric Max distance between structure and area-boundary Returns ------- Pandas Series Default dtype is bool """ areas_gdf = getattr(datamodel, areas) areas_sindex = areas_gdf.sindex struc_series = _layers_from_datamodel(structures, datamodel) struc_sindex = struc_series.sindex return gdf.apply( lambda x: _structures_at_boundaries( x, areas_gdf, areas_sindex, struc_series, struc_sindex, tolerance, distance ), axis=1, )
6f1c83f2ac02b6773bf51f64326ed4f6e3c7c354
3,654,624
from typing import List from typing import Tuple from typing import Union def above_cutoff(gene_freq_tup_list: List[Tuple[Union[str, tuple], Tuple[str, str]]], cutoff: int) -> List[str]: """Return the genes/edges that are are in at least the given cutoff's networks Parameters ---------- gene_freq_tup_list : List[Tuple[Union[str, tuple], Tuple[str, str]]] list of (comparison_object, (frequency_count, percent)) tuples in order of most common should be return from most_common() cutoff : int number to be used as minimum for how many networks the object must be present in to be returned Returns ------- list of objects that were in at least as many networks as the cutoff given """ above = [] for gene, freq in gene_freq_tup_list: if count_in_freq(freq) >= cutoff: above.append(gene) else: break # since it's ordered, no need wasting time checking the rest return above
c5679743d0b87fcbf7b6955a755aa8bbb11f5f95
3,654,625
def normalizeWindows(X): """ Do point centering and sphere normalizing to each window to control for linear drift and global amplitude Parameters ---------- X: ndarray(N, Win) An array of N sliding windows Returns XRet: ndarray(N, Win) An array in which the mean of each row is zero and the norm of each row is 1 """ XRet = X - np.mean(X, 1)[:, None] Norm = np.sqrt(np.sum(XRet**2, 1)) Norm[Norm == 0] = 1 XRet /= Norm[:, None] return XRet
013b5829153ee21979bcf9dac8457beb1adbe2a2
3,654,626
import torch def cost_matrix_slow(x, y): """ Input: x is a Nxd matrix y is an optional Mxd matirx Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:] if y is not given then use 'y=x'. i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2 """ x_norm = (x ** 2).sum(1).view(-1, 1) if y is not None: y_t = torch.transpose(y, 0, 1) y_norm = (y ** 2).sum(1).view(1, -1) else: y_t = torch.transpose(x, 0, 1) y_norm = x_norm.view(1, -1) dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t) # Ensure diagonal is zero if x=y # if y is None: # dist = dist - torch.diag(dist.diag) return torch.clamp(dist, 0.0, np.inf)
c27889346d6fb1a075eabf908f5e56ececb554d0
3,654,627
def get_dists(ts1_sax, ts2_sax, lookup_table): """ Compute distance between each symbol of two words (series) using a lookup table ts1_sax and ts2_sax are two sax representations (strings) built under the same conditions """ # Verify integrity if ts1_sax.shape[0] != ts2_sax.shape[0]: return -1 # convert symbol series into series of indexes (symbol indexes) ts1_sax_id = symbol2index(ts1_sax) ts2_sax_id = symbol2index(ts2_sax) # array of distances between symbols dists = np.zeros(ts1_sax.shape[0]) for i in range(ts1_sax_id.shape[0]): dists[i] = lookup_table[ts1_sax_id[i], ts2_sax_id[i]] return dists
3da21a1c57952225326c97bb7a58238545131e94
3,654,628
def get_dom_coords(string, dom): """Get Coordinates of a DOM specified by the string and dom number. Parameters ---------- string : int String number (between 1 and 86) dom : int DOM number (between 1 and 60) Returns ------- tuple(float, float, float) The x, y, z coordinates of the DOM. """ assert string > 0 and string <= 86, 'String must be within [1, 86]' assert dom > 0 and dom <= 60, 'DOM must be within [1, 86]' a, b = get_matrix_indices(string) dom_id = dom - 1 return x_ic78_coords[a, b, dom_id]
08e0817ab85e71caa38e6c247e1cc488d03ce1c0
3,654,629
def relevance_ka(x): """ based on code from https://www.kaggle.com/aleksandradeis/regression-addressing-extreme-rare-cases see paper: https://www.researchgate.net/publication/220699419_Utility-Based_Regression use the sigmoid function to create the relevance function, so that relevance function has values close to 1 when the target variable is greater than 0.6 Args: x: the x values for which the relevance should be returned """ x = np.array(x) return sigmoid((x-0.5) * 15)
e056c8ae1b1c527dc1a875c201967eacf914d7e0
3,654,630
from datetime import datetime def now(mydateformat='%Y%m%dT%H%M%S'): """ Return current datetime as string. Just a shorthand to abbreviate the common task to obtain the current datetime as a string, e.g. for result versioning. Args: mydateformat: optional format string (default: '%Y%m%dT%H%M%S') Returns: datetime.now(), formated to string with argument mydateformat, e.g. YYYYMMDDThhmmss ==> 20131007H123456 """ return datetime.now().strftime(mydateformat)
f4f98116700888a4be273143d635c62859c96e03
3,654,631
from datetime import datetime def cmp_point_identities(a, b): """ Given point identities a, b (may be string, number, date, etc), collation algorithm compares: (a) strings case-insensitively (b) dates and datetimes compared by normalizing date->datetime. (c) all other types use __cmp__(self, other) defaults from type. """ dt = lambda d: datetime(*d.timetuple()[0:6]) # date|datetime -> datetime if isinstance(a, basestring) and isinstance(b, basestring): return cmp(a.upper(), b.upper()) if isinstance(a, date) or isinstance(b, date): return cmp(dt(a), dt(b)) return cmp(a, b)
475206398fc0c2f301446c5c264bf67d1671a2ad
3,654,633
def shortest_complement(t, m, l): """ Given a primitive slope t and the holonomies of the current meridian and longitude, returns a shortest complementary slope s so that s.t = +1. """ c, d = t # second slope _, a, b = xgcd(d, c) # first slope b = -b assert a*d - b*c == 1 return a_shortest_lattice_point_on_line((a, b), (c, d), m, l)
4653a7eac7af7ed8a67ce298f1453236cfeabf73
3,654,634
def run_pii(text, lang): """ Runs the given set of regexes on the data "lines" and pulls out the tagged items. The lines structure stores the language type(s). This can be used for language-specific regexes, although we're dropping that for now and using only "default"/non-language-specific regexes. """ #print('Detecting....') # What is this for...? text = text.encode().decode() matches = detect_pii(text, lang, high_risk_tags) #print(matches) match_set = (text, {}) if len(matches) > 0: # !!! REDACTION HAPPENS HERE !!! redacted_str, metadata = redact_pii(text, matches) metadata_out = {"regex metadata":metadata, "original": text, "redacted": redacted_str} match_set = (redacted_str, metadata_out) return match_set
e9f34686be27773952f64a9231e86c76c0170483
3,654,635
def get_ref_cat(butler, visit, center_radec, radius=2.1): """ Get the reference catalog for the desired visit for the requested sky location and sky cone radius. """ ref_cats = RefCat(butler) try: band = list(butler.subset('src', visit=visit))[0].dataId['filter'] except dp.butlerExceptions.NoResults: band = list(butler.subset('src', expId=visit))[0].dataId['filter'] centerCoord = lsst_geom.SpherePoint(center_radec[0]*lsst_geom.degrees, center_radec[1]*lsst_geom.degrees) return ref_cats(centerCoord, band, radius)
d2814729aeb775668d6eff6fdc68a0676168b16e
3,654,636
def replace_dict(d, **kwargs): """ Replace values by keyword on a dict, returning a new dict. """ e = d.copy() e.update(kwargs) return e
be1cc21be5320eeea13307dd4ed5025b51339eec
3,654,637
def pageHeader( headline="", tagline=""): """ *Generate a pageHeader - TBS style* **Key Arguments:** - ``headline`` -- the headline text - ``tagline`` -- the tagline text for below the headline **Return:** - ``pageHeader`` -- the pageHeader """ pageHeader = """ <div class="page-header" id=" "> <h1>%(headline)s<br><small>%(tagline)s</small></h1> </div>""" % locals() return pageHeader
7d9e91df8af2fff92b0b7096cd1a13198d899e15
3,654,638
def get_counter_merge_suggestion(merge_suggestion_tokens): """Return opposite of merge suggestion Args: merge_suggestion_tokens (list): tokens in merge suggestion Returns: str: opposite of merge suggestion """ counter_merge_suggestion = ' '.join(merge_suggestion_tokens) if merge_suggestion_tokens[-1][-1] == '་': counter_merge_suggestion += " " return counter_merge_suggestion
e32e0f1b64fe77acaa8d88d72dca9304b7427674
3,654,639
import re from datetime import datetime import pytz def parse_rfc3339_utc_string(rfc3339_utc_string): """Converts a datestamp from RFC3339 UTC to a datetime. Args: rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format Returns: A datetime. """ # The timestamp from the Google Operations are all in RFC3339 format, but # they are sometimes formatted to millisconds, microseconds, sometimes # nanoseconds, and sometimes only seconds: # * 2016-11-14T23:05:56Z # * 2016-11-14T23:05:56.010Z # * 2016-11-14T23:05:56.010429Z # * 2016-11-14T23:05:56.010429380Z m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z', rfc3339_utc_string) # It would be unexpected to get a different date format back from Google. # If we raise an exception here, we can break people completely. # Instead, let's just return None and people can report that some dates # are not showing up. # We might reconsider this approach in the future; it was originally # established when dates were only used for display. if not m: return None groups = m.groups() if len(groups[6]) not in (0, 3, 6, 9): return None # Create a UTC datestamp from parsed components # 1- Turn components 0-5 from strings to integers # 2- If the last component does not exist, set it to 0. # If it does exist, make sure to interpret it as milliseconds. g = [int(val) for val in groups[:6]] fraction = groups[6] if not fraction: micros = 0 elif len(fraction) == 3: micros = int(fraction) * 1000 elif len(fraction) == 6: micros = int(fraction) elif len(fraction) == 9: # When nanoseconds are provided, we round micros = int(round(int(fraction) // 1000)) else: assert False, 'Fraction length not 0, 6, or 9: {}'.format(len(fraction)) try: return datetime.datetime( g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc) except ValueError as e: assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format( rfc3339_utc_string, e)
04653bd5673c5ca7713c9e6014947886781f3f5e
3,654,640
from datetime import datetime def response(code, body='', etag=None, last_modified=None, expires=None, **kw): """Helper to build an HTTP response. Parameters: code : An integer status code. body : The response body. See `Response.__init__` for details. etag : A value for the ETag header. Double quotes will be added unless the string starts and ends with a double quote. last_modified : A value for the Last-Modified header as a datetime.datetime object or Unix timestamp. expires : A value for the Expires header as number of seconds, datetime.timedelta or datetime.datetime object. Note: a value of type int or float is interpreted as a number of seconds in the future, *not* as Unix timestamp. **kw : All other keyword arguments are interpreted as response headers. The names will be converted to header names by replacing underscores with hyphens and converting to title case (e.g. `x_powered_by` => `X-Powered-By`). """ if etag is not None: if not (etag[0] == '"' and etag[-1] == '"'): etag = '"%s"' % etag kw['etag'] = etag if last_modified is not None: kw['last_modified'] = datetime_to_httpdate(last_modified) if expires is not None: if isinstance(expires, datetime): kw['expires'] = datetime_to_httpdate(expires) else: kw['expires'] = timedelta_to_httpdate(expires) headers = [(k.replace('_', '-').title(), v) for k, v in sorted(kw.items())] return Response(code, headers, body)
094e7dc99114d4b742808c0aa123001fb301fb14
3,654,641
from datetime import datetime import click def parse_tweet(raw_tweet, source, now=None): """ Parses a single raw tweet line from a twtxt file and returns a :class:`Tweet` object. :param str raw_tweet: a single raw tweet line :param Source source: the source of the given tweet :param Datetime now: the current datetime :returns: the parsed tweet :rtype: Tweet """ if now is None: now = datetime.now(timezone.utc) raw_created_at, text = raw_tweet.split("\t", 1) created_at = parse_iso8601(raw_created_at) if created_at > now: raise ValueError("Tweet is from the future") return Tweet(click.unstyle(text.strip()), created_at, source)
85f90ce469091cc82dd120e6c100859f8bcc8f2c
3,654,643
import requests def scopes(request, coalition_id): """ Update coalition required scopes with a specific set of scopes """ scopes = [] for key in request.POST: if key in ESI_SCOPES: scopes.append(key) url = f"{GLOBAL_URL}/{coalition_id}" headers = global_headers(request, {"Content-type": "application/json"}) data = "{\"mandatory_esi_scopes\": [\"" + "\",\"".join(scopes) + "\"]}" request_change_scopes = requests.put(url, headers=headers, data=data) if request_change_scopes.status_code != 200: return render_error(request_change_scopes) params = urlencode({"changed_scopes": "true"}) return_url = reverse("coalition-sheet", args=[coalition_id]) + "?" + params return redirect(return_url)
71d07be26815a8e30ed37074b4452fa7574d07b5
3,654,644
def recursive_dictionary_cleanup(dictionary): """Recursively enrich the dictionary and replace object links with names etc. These patterns are replaced: [phobostype, bpyobj] -> {'object': bpyobj, 'name': getObjectName(bpyobj, phobostype)} Args: dictionary(dict): dictionary to enrich Returns: : dict -- dictionary with replace/enriched patterns """ for key, value in dictionary.items(): # handle everything as list, so we can loop over it unlist = False if not isinstance(value, list): value = [value] unlist = True itemlist = [] for item in value: if isinstance(item, list) and item: # (phobostype, bpyobj) -> {'object': bpyobj, 'name': getObjectName(bpyobj)} if ( len(item) == 2 and isinstance(item[0], str) and (item[0] in ['joint'] + [enum[0] for enum in defs.phobostypes]) and isinstance(item[1], bpy.types.Object) ): itemlist.append( { 'object': item[1], 'name': nUtils.getObjectName(item[1], phobostype=item[0]), } ) # recursion on subdictionaries elif isinstance(item, dict): itemlist.append(recursive_dictionary_cleanup(item)) else: itemlist.append(item) # extract single items back out of the list dictionary[key] = itemlist if not unlist else itemlist[0] return dictionary
b49798dd1918401951bae57e544406ee1d14ebd6
3,654,645
def validate_dtype(dtype_in): """ Input is an argument represention one, or more datatypes. Per column, number of columns have to match number of columns in csv file: dtype = [pa.int32(), pa.int32(), pa.int32(), pa.int32()] dtype = {'__columns__': [pa.int32(), pa.int32(), pa.int32(), pa.int32()]} Default: dtype_in = pa.int32() dtype_out = {'__default__': pa.int32()} Not yet supported: Default, optional column overwrite: dtype_in = {'__default__': pa.int32(), '__columns__': {'colname': pa.int32()}} dtype_out = raise ValueError dtype_in = {'colname': pa.int32()} dtype_out = raise ValueError """ if dtype_in is None: # use default datatype dtype_in = pa.float32() argtype = type(dtype_in) valid_types = _dtypes_from_arrow() if argtype is pa.DataType: if dtype_in not in list(valid_types.keys()): raise ValueError('Not supporting type: ' + dtype_in.__str__()) return {'__default__': valid_types[dtype_in]} if argtype is dict: raise ValueError('Not yet supported dict') if argtype is list and dtype_in.__len__() > 0: matches = [dtype in list(valid_types.keys()) for dtype in dtype_in] if False in matches: mismatches = [dtype_in[j].__str__() + '(column:' + str(j) + ')' for j in range(0, len(matches)) if matches[j] is False] raise ValueError('List contains unsupported datatype: ' + ','.join(mismatches)) if set(dtype_in).__len__() == 1: # all list members are of same type return {'__default__': valid_types[dtype_in[0]]} return {'__columns__': list([valid_types[dtype] for dtype in dtype_in])} raise ValueError('No input to match datatypes')
274df2e010314867f31c14951f1e0b18190218ad
3,654,646
from typing import Callable from re import T from typing import List from typing import Any from typing import Dict def cache( cache_class: Callable[[], base_cache.BaseCache[T]], serializer: Callable[[], cache_serializer.CacheSerializer], conditional: Callable[[List[Any], Dict[str, Any]], bool] = _always_true): """ cache ===== parameters: cache_class (base_cache.BaseCache) conditional (Callable[[List[Any], Dict[str, Any]]) Decorator that caches function results using the provided class. The class must be a subclass of base_cache, providing get and set methods with appropriate signatures. An optional conditional can be passed, which receives the *args and **kwargs of the called function. This function determines whether or not to cache, or to always recompute, based on whether it returns True or False. """ serializer_instance = serializer() cache_instance = cache_class() return curry(_wrapper, cache_instance, serializer_instance, conditional)
c4d5318d471e13f5001eb12b6d3dc4f278478855
3,654,648
def words2chars(images, labels, gaplines): """ Transform word images with gaplines into individual chars """ # Total number of chars length = sum([len(l) for l in labels]) imgs = np.empty(length, dtype=object) newLabels = [] height = images[0].shape[0] idx = 0; for i, gaps in enumerate(gaplines): for pos in range(len(gaps) - 1): imgs[idx] = images[i][0:height, gaps[pos]:gaps[pos+1]] newLabels.append(char2idx(labels[i][pos])) idx += 1 print("Loaded chars from words:", length) return imgs, newLabels
e04bf5b1e9b47c2f930600433b4214343e067f26
3,654,649
def create_spark_session(spark_jars: str) -> SparkSession: """ Create Spark session :param spark_jars: Hadoop-AWS JARs :return: SparkSession """ spark = SparkSession \ .builder \ .config("spark.jars.packages", spark_jars) \ .appName("Sparkify ETL") \ .getOrCreate() return spark
576072460e465610fff98da377cc20a8472c537f
3,654,650
from datetime import datetime def make_expired(request, pk): """ 将号码状态改为过号 """ try: reg = Registration.objects.get(pk=pk) except Registration.DoesNotExist: return Response('registration not found', status=status.HTTP_404_NOT_FOUND) data = { 'status': REGISTRATION_STATUS_EXPIRED } serializer = RegistrationSerializer(reg, data=data, partial=True) if serializer.is_valid(): reg = serializer.save() reg.end_time = datetime.datetime.now() reg.save() # 通知后面第n位的顾客就餐 _notify_ready(reg.table_type) return Response(serializer.data, status=status.HTTP_200_OK) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
827f45c12bcbb973eb073662d8a14765422fdf51
3,654,651
def word2vec_similarity(segmented_topics, accumulator, with_std=False, with_support=False): """For each topic segmentation, compute average cosine similarity using a :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator`. Parameters ---------- segmented_topics : list of lists of (int, `numpy.ndarray`) Output from the :func:`~gensim.topic_coherence.segmentation.s_one_set`. accumulator : :class:`~gensim.topic_coherence.text_analysis.WordVectorsAccumulator` or :class:`~gensim.topic_coherence.text_analysis.InvertedIndexAccumulator` Word occurrence accumulator. with_std : bool, optional True to also include standard deviation across topic segment sets in addition to the mean coherence for each topic. with_support : bool, optional True to also include support across topic segments. The support is defined as the number of pairwise similarity comparisons were used to compute the overall topic coherence. Returns ------- list of (float[, float[, int]]) Сosine word2vec similarities per topic (with std/support if `with_std`, `with_support`). Examples -------- .. sourcecode:: pycon >>> import numpy as np >>> from gensim.corpora.dictionary import Dictionary >>> from gensim.topic_coherence import indirect_confirmation_measure >>> from gensim.topic_coherence import text_analysis >>> >>> # create segmentation >>> segmentation = [[(1, np.array([1, 2])), (2, np.array([1, 2]))]] >>> >>> # create accumulator >>> dictionary = Dictionary() >>> dictionary.id2token = {1: 'fake', 2: 'tokens'} >>> accumulator = text_analysis.WordVectorsAccumulator({1, 2}, dictionary) >>> _ = accumulator.accumulate([['fake', 'tokens'], ['tokens', 'fake']], 5) >>> >>> # should be (0.726752426218 0.00695475919227) >>> mean, std = indirect_confirmation_measure.word2vec_similarity(segmentation, accumulator, with_std=True)[0] """ topic_coherences = [] total_oov = 0 for topic_index, topic_segments in enumerate(segmented_topics): segment_sims = [] num_oov = 0 for w_prime, w_star in topic_segments: if not hasattr(w_prime, '__iter__'): w_prime = [w_prime] if not hasattr(w_star, '__iter__'): w_star = [w_star] try: segment_sims.append(accumulator.ids_similarity(w_prime, w_star)) except ZeroDivisionError: num_oov += 1 if num_oov > 0: total_oov += 1 logger.warning( "%d terms for topic %d are not in word2vec model vocabulary", num_oov, topic_index) topic_coherences.append(aggregate_segment_sims(segment_sims, with_std, with_support)) if total_oov > 0: logger.warning("%d terms for are not in word2vec model vocabulary", total_oov) return topic_coherences
1a3d439e75c4732138f42ea14e7fd50eb6e7d5cb
3,654,652
def addGroupsToKey(server, activation_key, groups): """ Add server groups to a activation key CLI Example: .. code-block:: bash salt-run spacewalk.addGroupsToKey spacewalk01.domain.com 1-my-key '[group1, group2]' """ try: client, key = _get_session(server) except Exception as exc: # pylint: disable=broad-except err_msg = "Exception raised when connecting to spacewalk server ({}): {}".format( server, exc ) log.error(err_msg) return {"Error": err_msg} all_groups = client.systemgroup.listAllGroups(key) groupIds = [] for group in all_groups: if group["name"] in groups: groupIds.append(group["id"]) if client.activationkey.addServerGroups(key, activation_key, groupIds) == 1: return {activation_key: groups} else: return {activation_key: "Failed to add groups to activation key"}
346690a9eac24f62f4410b23f60bb589d174c9ed
3,654,653
def get_user_for_delete(): """Query for Users table.""" delete_user = Users.query \ .get(DELETE_USER_ID) return delete_user
208dbbe47550c6889848b7ff61324acf23a4c495
3,654,654
from typing import List from typing import Optional def station_code_from_duids(duids: List[str]) -> Optional[str]: """ Derives a station code from a list of duids ex. BARRON1,BARRON2 => BARRON OSBAG,OSBAG => OSBAG """ if type(duids) is not list: return None if not duids: return None if len(duids) == 0: return None duids_uniq = list(set(duids)) common = findcommonstart(duids_uniq) if not common: return None # strip last character if we have one if is_single_number(common[-1]): common = common[:-1] if common.endswith("_"): common = common[:-1] if len(common) > 2: return common return None
1f976ee0b7a82453673ea07c20070e502df5fcf5
3,654,655
def erosion(image, selem, out=None, shift_x=False, shift_y=False): """Return greyscale morphological erosion of an image. Morphological erosion sets a pixel at (i,j) to the minimum over all pixels in the neighborhood centered at (i,j). Erosion shrinks bright regions and enlarges dark regions. Parameters ---------- image : ndarray Image array. selem : ndarray The neighborhood expressed as a 2-D array of 1's and 0's. out : ndarray The array to store the result of the morphology. If None is passed, a new array will be allocated. shift_x, shift_y : bool shift structuring element about center point. This only affects eccentric structuring elements (i.e. selem with even numbered sides). Returns ------- eroded : uint8 array The result of the morphological erosion. Examples -------- >>> # Erosion shrinks bright regions >>> import numpy as np >>> from skimage.morphology import square >>> bright_square = np.array([[0, 0, 0, 0, 0], ... [0, 1, 1, 1, 0], ... [0, 1, 1, 1, 0], ... [0, 1, 1, 1, 0], ... [0, 0, 0, 0, 0]], dtype=np.uint8) >>> erosion(bright_square, square(3)) array([[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 1, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]], dtype=uint8) """ if image is out: raise NotImplementedError("In-place erosion not supported!") image = img_as_ubyte(image) selem = img_as_ubyte(selem) return cmorph._erode(image, selem, out=out, shift_x=shift_x, shift_y=shift_y)
2e7c2547b862add24cc6a4355cf3e0308cb2f342
3,654,656
def NE(x=None, y=None): """ Compares two values and returns: true when the values are not equivalent. false when the values are equivalent. See https://docs.mongodb.com/manual/reference/operator/aggregation/ne/ for more details :param x: first value or expression :param y: second value or expression :return: Aggregation operator """ if x is None and y is None: return {'$ne': []} return {'$ne': [x, y]}
be721daf480ec0cb465a3c010c4f910a10fbbb1d
3,654,657
def TCPs_from_tc(type_constraint): """ Take type_constraint(type_param_str, allowed_type_strs) and return list of TypeConstraintParam """ tys = type_constraint.allowed_type_strs # Get all ONNX types tys = set( [onnxType_to_Type_with_mangler(ty) for ty in tys] ) # Convert to Knossos and uniquify return [ TypeConstraintParam(type_constraint.type_param_str, ty) for ty in tys ]
7c2162bb2dde0b00caf289511f20804cadaa17e5
3,654,658
def _randomde(allgenes, allfolds, size): """Randomly select genes from the allgenes array and fold changes from the allfolds array. Size argument indicates how many to draw. Parameters ---------- allgenes : numpy array numpy array with all the genes expressed in the cells where de is generated allfolds : numpy array an array of fold changes from which the simulation should draw size : int number of non-zero weights (typically number of DE genes) Returns ------- type : PandasDataFrame DataFrame with randomly chosen genes and weights. """ rdgenes = np.random.choice(allgenes, size, replace = False) rdfolds = np.random.choice(allfolds, size, replace = False) rdDF = pd.DataFrame({'id' : rdgenes, 'weights' : rdfolds}) return(rdDF)
1a5f38eab8933b90697f4999cb7571fe602db3f9
3,654,659
import time def XCor(spectra, mask_l, mask_h, mask_w, vel, lbary_ltopo, vel_width=30,\ vel_step=0.3, start_order=0, spec_order=9,iv_order=10,sn_order=8,max_vel_rough=300.): """ Calculates the cross-correlation function for a Coralie Spectra """ # speed of light, km/s c = 2.99792458E5 # loop over orders norders = spectra.shape[1] # determine minimum velocities vel_min = vel - vel_width vel_max = vel + vel_width N = int(np.ceil( (2*vel_width) / vel_step )) Xcor_full = np.zeros( (N, norders+1) ) sn = np.zeros( (norders) ) nlines_used = np.zeros( (norders) ) velocities = vel_min + np.arange( N ) * vel_step Xcor_full[:,0] = velocities weight=0.0 mask_middle = 0.5*(mask_l + mask_h) W = np.zeros( norders ) vwt = 300 for j in range(start_order,norders): t1 = time.time() LL = np.where( spectra[spec_order,j,:] != 0 ) if len(LL[0]) > 0: x1 = np.min( LL ) x2 = np.max( LL ) w1 = np.argmin( np.absolute( spectra[0,j,:] - spectra[0,j,x1] ) ) w2 = np.argmin( np.absolute( spectra[0,j,:] - spectra[0,j,x2] ) ) l1_0 = spectra[0,j,w1] / lbary_ltopo l2_0 = spectra[0,j,w2] / lbary_ltopo ww1 = np.argmin( np.abs( spectra[0,j,:] - l1_0*(1+(31+max_vel_rough)/c) ) ) ww2 = np.argmin( np.abs( spectra[0,j,:] - l2_0*(1-(31+max_vel_rough)/c) ) ) # should not happen, but hey, just in case... if (ww1 < w1): ww1 = w1 if (ww2 > w2): ww2 = w2 l1 = spectra[0,j,ww1] l2 = spectra[0,j,ww2] II = np.where( (mask_l > l1) & (mask_h < l2) ) #if len(II[0])>0: #print j,II[0][0],II[0][-1] nlu = len(II[0]) nlines_used[j] = nlu snw1 = int(0.25*spectra.shape[2]) snw2 = int(0.75*spectra.shape[2]) if (nlu > 0): # calculate median S/N #median_sn = np.median( spectra[5,j,w1:w2] * np.sqrt( spectra[6,j,w1:w2] ) ) median_sn = np.median( spectra[sn_order,j,snw1:snw2] ) sn[j] = median_sn S = spectra[spec_order,j,w1:w2] #iv = spectra[iv_order,j,w1:w2] signal2noise = spectra[sn_order,j,w1:w2] snwa = np.zeros(N) for k in range(N): #print k Xcor_full[k,j+1], snw = CCF.ccfcos(mask_l[II], mask_h[II], spectra[0,j,w1:w2], S,\ mask_w[II], signal2noise, vel_min + k*vel_step) snwa[k] = snw if np.isnan(Xcor_full[k,j+1]): Xcor_full[k,j+1] = Xcor_full[k-1,j+1] snwa[k] = snwa[k-1] #if k ==182 and j==35: # #print mask_l[II], mask_h[II], spectra[0,j,w1:w2], S,mask_w[II], signal2noise, vel_min + k*vel_step # #for z in range(len(mask_l[II])): # # III = np.where((spectra[0,j,w1:w2]>=mask_l[II][z])&(spectra[0,j,w1:w2]<=mask_h[II][z]))[0] # # print spectra[0,j,w1:w2][III],S[III] # #print Xcor_full[k,j+1] # #print snw # #print gfd xc_weight = np.median( snwa ) Xcor_full[:,j+1] /= snwa #xc_weight W[j] = xc_weight return velocities, Xcor_full, sn, nlines_used, W
7007c56e5173999b9d20dfbd8018133a59bb777c
3,654,660
import json def marks_details(request, pk): """ Display details for a given Mark """ # Check permission if not has_access(request): raise PermissionDenied # Get context context = get_base_context(request) # Get object mark = get_object_or_404(Mark, pk=pk) mark.category_clean = mark.get_category_display() context['mark'] = mark # Get users connected to the mark context['mark_users'] = mark.given_to.all() # AJAX if request.method == 'POST': if request.is_ajax and 'action' in request.POST: resp = {'status': 200} context, resp = _handle_mark_detail(request, context, resp) # Set mark resp['mark'] = {'last_changed_date': context['mark'].last_changed_date.strftime("%Y-%m-%d"), 'last_changed_by': context['mark'].last_changed_by.get_full_name()} # Return ajax return HttpResponse(json.dumps(resp), status=resp['status']) # Render view return render(request, 'marks/dashboard/marks_details.html', context)
1f193c67f1e047ecd6da0e5eec1d29da50f6595e
3,654,661
from bs4 import BeautifulSoup def clean_text(text): """ text: a string return: modified initial string """ text = BeautifulSoup(text, "lxml").text # HTML decoding text = text.lower() # lowercase text # replace REPLACE_BY_SPACE_RE symbols by space in text text = REPLACE_BY_SPACE_RE.sub(' ', text) # delete symbols which are in BAD_SYMBOLS_RE from text text = BAD_SYMBOLS_RE.sub('', text) # delete stopwors from text text = ' '.join(word for word in text.split() if word not in STOPWORDS) return text
6594bd61c2f1ff885948755a0dfc74e7256b9a3e
3,654,662
def _simpsons_interaction(data, groups): """ Calculation of Simpson's Interaction index Parameters ---------- data : a pandas DataFrame groups : list of strings. The variables names in data of the groups of interest of the analysis. Returns ------- statistic : float Simpson's Interaction Index core_data : a pandas DataFrame A pandas DataFrame that contains the columns used to perform the estimate. Notes ----- Based on Equation 1 of page 37 of Reardon, Sean F., and Glenn Firebaugh. "Measures of multigroup segregation." Sociological methodology 32.1 (2002): 33-67. Simpson's interaction index (I) can be simply interpreted as the probability that two individuals chosen at random and independently from the population will be found to not belong to the same group. Higher values means lesser segregation. Simpson's Concentration + Simpson's Interaction = 1 Reference: :cite:`reardon2002measures`. """ core_data = data[groups] data = _nan_handle(core_data) df = np.array(core_data) Pk = df.sum(axis=0) / df.sum() I = (Pk * (1 - Pk)).sum() return I, core_data, groups
d7c4bc8bfb2d6db17868f0d140c2547e65cfd666
3,654,664
import json def run(target='192.168.1.1', ports=[21,22,23,25,80,110,111,135,139,443,445,554,993,995,1433,1434,3306,3389,8000,8008,8080,8888]): """ Run a portscan against a target hostname/IP address `Optional` :param str target: Valid IPv4 address :param list ports: Port numbers to scan on target host :returns: Results in a nested dictionary object in JSON format Returns onlne targets & open ports as key-value pairs in dictionary (JSON) object """ global tasks global threads global results if not util.ipv4(target): raise ValueError("target is not a valid IPv4 address") if _ping(target): for port in ports: tasks.put_nowait((_scan, (target, port))) for i in range(1, tasks.qsize()): threads['portscan-%d' % i] = _threader() for t in threads: threads[t].join() return json.dumps(results[target]) else: return "Target offline"
201e4dc1809553eb4fb57848d9e5f8001ccdef23
3,654,667
import torch def subsequent_mask(size: int): """ Mask out subsequent positions (to prevent attending to future positions) Transformer helper function. :param size: size of mask (2nd and 3rd dim) :return: Tensor with 0s and 1s of shape (1, size, size) """ mask = np.triu(np.ones((1, size, size)), k=1).astype("uint8") return torch.from_numpy(mask) == 0
f4e40d2e9ac944d3582ed16088e8096f75a5f29e
3,654,668
from typing import Type def _get_dist_class( policy: Policy, config: TrainerConfigDict, action_space: gym.spaces.Space ) -> Type[TorchDistributionWrapper]: """Helper function to return a dist class based on config and action space. Args: policy (Policy): The policy for which to return the action dist class. config (TrainerConfigDict): The Trainer's config dict. action_space (gym.spaces.Space): The action space used. Returns: Type[TFActionDistribution]: A TF distribution class. """ if hasattr(policy, "dist_class") and policy.dist_class is not None: return policy.dist_class elif config["model"].get("custom_action_dist"): action_dist_class, _ = ModelCatalog.get_action_dist( action_space, config["model"], framework="torch" ) return action_dist_class elif isinstance(action_space, Discrete): return TorchCategorical elif isinstance(action_space, Simplex): return TorchDirichlet else: assert isinstance(action_space, Box) if config["normalize_actions"]: return ( TorchSquashedGaussian if not config["_use_beta_distribution"] else TorchBeta ) else: return TorchDiagGaussian
6511786dff734ddb78ce7c28e19b651c70fe86e2
3,654,669
def timeexec(fct, number, repeat): """ Measures the time for a given expression. :param fct: function to measure (as a string) :param number: number of time to run the expression (and then divide by this number to get an average) :param repeat: number of times to repeat the computation of the above average :return: dictionary """ rep = timeit_repeat(fct, number=number, repeat=repeat) ave = sum(rep) / (number * repeat) std = (sum((x / number - ave)**2 for x in rep) / repeat)**0.5 fir = rep[0] / number fir3 = sum(rep[:3]) / (3 * number) las3 = sum(rep[-3:]) / (3 * number) rep.sort() mini = rep[len(rep) // 20] / number maxi = rep[-len(rep) // 20] / number return dict(average=ave, deviation=std, first=fir, first3=fir3, last3=las3, repeat=repeat, min5=mini, max5=maxi, run=number)
01ea6d74bed9d8a7d1b7793d3f8473bc6442f83f
3,654,670
def regexify(w, tags): """Convert a single component of a decomposition rule from Weizenbaum notation to regex. Parameters ---------- w : str Component of a decomposition rule. tags : dict Tags to consider when converting to regex. Returns ------- w : str Component of a decomposition rule converted to regex form. """ # 0 means "an indefinite number of words" if w == '0': w = '.*' # A positive non-zero integer means "this specific amount of words" elif w.isnumeric() and int(w) > 0: w = r'(?:\b\w+\b[\s\r\n]*){' + w + '}' # A word starting with @ signifies a tag elif w[0] == "@": # Get tag name tag_name = w[1:].lower() w = tag_to_regex(tag_name, tags) else: # Add word boundaries to match on a whole word basis w = r'\b' + w + r'\b' return w
113a631674c5984d81f830c5e8ca840d95678aa1
3,654,672
def row_dot_product(a: np.ndarray, b: np.ndarray) -> np.ndarray: """ Returns a vectorized dot product between the rows of a and b :param a: An array of shape (N, M) or (M, ) (or a shape that can be broadcast to (N, M)) :param b: An array of shape (N, M) or (M, ) (or a shape that can be broadcast to (N, M)) :return: A vector of shape (N, ) whose elements are the dot product of rows a, b """ return np.einsum('ij,ij->i', np.atleast_2d(a), np.atleast_2d(b))
d2544f2957963d343bdeb079418a1a5d96373eb4
3,654,673
def pmg_pickle_dump(obj, filobj, **kwargs): """ Dump an object to a pickle file using PmgPickler. Args: obj : Object to dump. fileobj: File-like object \\*\\*kwargs: Any of the keyword arguments supported by PmgPickler """ return PmgPickler(filobj, **kwargs).dump(obj)
4ac72623538ce463b1bfc183bcac90919e47c513
3,654,674
def condition_header(header, needed_keys=None): """Return a dictionary of all `needed_keys` from `header` after passing their values through the CRDS value conditioner. """ header = { key.upper():val for (key, val) in header.items() } if not needed_keys: needed_keys = header.keys() else: needed_keys = [ key.upper() for key in needed_keys ] conditioned = { key:condition_value(header[key]) for key in needed_keys } return conditioned
cd8c39e355a05367d479e76bda6f0869c10f8130
3,654,675
from typing import OrderedDict def get_generic_path_information(paths, stat_prefix=""): """ Get an OrderedDict with a bunch of statistic names and values. """ statistics = OrderedDict() returns = [sum(path["rewards"]) for path in paths] # rewards = np.vstack([path["rewards"] for path in paths]) rewards = np.concatenate([path["rewards"] for path in paths]) statistics.update( create_stats_ordered_dict( "Rewards", rewards, stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Returns", returns, stat_prefix=stat_prefix, always_show_all_stats=True ) ) # print(paths[0]["env_infos"]) if "is_success" in paths[0]["env_infos"][0].keys(): acc_sum = [(np.sum([x['is_success'] for x in path["env_infos"]])>0).astype(float) for path in paths] acc = np.sum(acc_sum) * 1.0 / len(paths) statistics.update( create_stats_ordered_dict( "Success Num", np.sum(acc_sum), stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Traj Num", len(paths), stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Success Rate", acc, stat_prefix=stat_prefix, always_show_all_stats=True ) ) actions = [path["actions"] for path in paths] # if isinstance(actions[0][0], np.ndarray): # actions = np.vstack([path["actions"] for path in paths]) # else: # actions = np.hstack([path["actions"] for path in paths]) statistics.update( create_stats_ordered_dict( "Actions", actions, stat_prefix=stat_prefix, always_show_all_stats=True ) ) statistics.update( create_stats_ordered_dict( "Ep. Len.", np.array([len(path["terminals"]) for path in paths]), stat_prefix=stat_prefix, always_show_all_stats=True, ) ) statistics["Num Paths"] = len(paths) return statistics
a90995c43d588cee4869bfa8b3f6a1026d265aab
3,654,676
import math import numpy as np def pad_images(images, nlayers): """ In Unet, every layer the dimension gets divided by 2 in the encoder path. Therefore the image size should be divisible by 2^nlayers. """ divisor = 2**nlayers nlayers, x, y = images.shape # essentially setting nlayers to z direction so return is z, x, y x_pad = int((math.ceil(x / float(divisor)) * divisor) - x) y_pad = int((math.ceil(y / float(divisor)) * divisor) - y) padded_image = np.pad(images, ((0,0),(0, x_pad), (0, y_pad)), 'constant', constant_values=(0, 0)) return padded_image
671fa940d0a0ed87819335b60d12d9e268bf9932
3,654,677
def remove_measurements(measurements, model_dict, params=None): """Remove measurements from a model specification. If provided, a params DataFrame is also reduced correspondingly. Args: measurements (str or list): Name(s) of the measurement(s) to remove. model_dict (dict): The model specification. See: :ref:`model_specs`. params (pandas.DataFrame or None): The params DataFrame for the full model. Returns: dict: The reduced model dictionary pandas.DataFrame: The reduced parameter DataFrame (only if params is not None) """ out = deepcopy(model_dict) for factor in model_dict["factors"]: full = model_dict["factors"][factor]["measurements"] reduced = [_remove_from_list(meas_list, measurements) for meas_list in full] out["factors"][factor]["measurements"] = reduced norminfo = model_dict["factors"][factor].get("normalizations", {}) if "loadings" in norminfo: out["factors"][factor]["normalizations"][ "loadings" ] = _remove_measurements_from_normalizations( measurements, norminfo["loadings"] ) if "intercepts" in norminfo: out["factors"][factor]["normalizations"][ "intercepts" ] = _remove_measurements_from_normalizations( measurements, norminfo["intercepts"] ) if params is not None: out_params = _reduce_params(params, out) out = (out, out_params) return out
fffddf4368579c999648c29b4746006b38de140c
3,654,678
def good2Go(SC, L, CC, STR): """ Check, if all input is correct and runnable """ if SC == 1 and L == 1 and CC == 1 and STR == 1: return True else: print(SC, L, CC, STR) return False
e49229df6b9b187e1840d5bc5c8a1a8e087a5a4e
3,654,679
def __validate_tweet_name(tweet_name: str, error_msg: str) -> str: """Validate the tweet's name. Parameters ---------- tweet_name : str Tweet's name. error_msg : str Error message to display for an invalid name. Returns ------- str Validated tweet name. Raises ------ InvalidTweetName Raised for invalid tweet names. """ if tweet_name == "": raise InvalidTweetName(error_msg) else: return tweet_name
7086aeac6ccd0afcad0d13e947f3b454f7333b9f
3,654,680
def convert_event(obj): """ :type obj: :class:`sir.schema.modelext.CustomEvent` """ event = models.event(id=obj.gid, name=obj.name) if obj.comment: event.set_disambiguation(obj.comment) if obj.type is not None: event.set_type(obj.type.name) event.set_type_id(obj.type.gid) lifespan = convert_life_span(obj.begin_date, obj.end_date, obj.ended) if lifespan.get_begin() is not None or lifespan.get_end() is not None: event.set_life_span(lifespan) if obj.time is not None: event.set_time(datetime_to_string(obj.time)) if obj.area_links: event.add_relation_list(convert_event_area_relation_list(obj.area_links)) if obj.artist_links: event.add_relation_list(convert_artist_relation_list(obj.artist_links)) if obj.place_links: event.add_relation_list(convert_place_relation_list(obj.place_links)) if obj.aliases: event.set_alias_list(convert_alias_list(obj.aliases)) if obj.tags: event.set_tag_list(convert_tag_list(obj.tags)) return event
23a6a31abca03d0c92f6162ce28b8548dc95bdda
3,654,681
from typing import Any import requests import json def get_pr_review_status(pr: PullRequestDetails, per_page: int = 100) -> Any: """ References: https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request """ url = (f"https://api.github.com/repos/{pr.repo.organization}/{pr.repo.name}" f"/pulls/{pr.pull_id}/reviews" f"?per_page={per_page};access_token={pr.repo.access_token}") response = requests.get(url) if response.status_code != 200: raise RuntimeError( 'Get review failed. Code: {}. Content: {}.'.format( response.status_code, response.content)) return json.JSONDecoder().decode(response.content.decode())
5ce662ab5d82e374def95e5f3cc4da9f2d4dbf96
3,654,682
def make_sph_model(filename): """reads a spherical model file text file and generates interpolated values Args: filename: Returns: model: """ M = np.loadtxt(filename, dtype={'names': ('rcurve', 'potcurve', 'dpotcurve'),'formats': ('f4', 'f4', 'f4')},skiprows=1) model = spherical_model() model.rcurve = M['rcurve'] model.potcurve = M['potcurve'] model.dpotcurve = M['dpotcurve'] model.rcurve = M['rcurve'] model.potcurve = UnivariateSpline(model.rcurve,M['potcurve'],k=3) model.dpotcurve = UnivariateSpline(model.rcurve,M['dpotcurve'],k=3) return model
d86a88ffca93ee0618cf5a19aa015077247cffb0
3,654,683
def minimum(x, y): """ Returns the min of x and y (i.e. x < y ? x : y) element-wise. Parameters ---------- x : tensor. Must be one of the following types: bfloat16, half, float32, float64, int32, int64. y : A Tensor. Must have the same type as x. name : str A name for the operation (optional). Returns ------- A Tensor. Has the same type as x """ return pd.minimum(x, y)
384e7d15687d03f7b639fc50707712c94029620f
3,654,685
def seconds_to_timestamp(seconds): """ Convert from seconds to a timestamp """ minutes, seconds = divmod(float(seconds), 60) hours, minutes = divmod(minutes, 60) return "%02d:%02d:%06.3f" % (hours, minutes, seconds)
8b9806f05fe4796baae51001e69455e82fb51eed
3,654,686
def query(querystring: str, db: tsdb.Database, **kwargs): """ Perform query *querystring* on the testsuite *ts*. Note: currently only 'select' queries are supported. Args: querystring (str): TSQL query string ts (:class:`delphin.itsdb.TestSuite`): testsuite to query over kwargs: keyword arguments passed to the more specific query function (e.g., :func:`select`) Example: >>> list(tsql.query('select i-id where i-length < 4', ts)) [[142], [1061]] """ queryobj = _parse_query(querystring) if queryobj['type'] in ('select', 'retrieve'): return _select( queryobj['projection'], queryobj['relations'], queryobj['condition'], db, record_class=kwargs.get('record_class', None)) else: # not really a syntax error; replace with TSQLError or something # when the proper exception class exists raise TSQLSyntaxError(queryobj['type'] + ' queries are not supported', text=querystring)
fa43123b3e0c4706b738104c641836fa08a4fc35
3,654,687
def TTF_SizeUTF8(font, text, w, h): """Calculates the size of a UTF8-encoded string rendered with a given font. See :func:`TTF_SizeText` for more info. Args: font (:obj:`TTF_Font`): The font object to use. text (bytes): A UTF8-encoded bytestring of text for which the rendered surface size should be calculated. w (byref(:obj:`~ctypes.c_int`)): A pointer to an integer in which to store the calculated surface width (in pixels). h (byref(:obj:`~ctypes.c_int`)): A pointer to an integer in which to store the calculated surface height (in pixels). Returns: int: 0 on success, or -1 on error (e.g. if a glyph is not found in the font). """ return _funcs["TTF_SizeUTF8"](font, text, w, h)
3d24382222b1795caa0981c659d00a717c22fc86
3,654,688
def get_mse(y_true, y_hat): """ Return the mean squared error between the ground truth and the prediction :param y_true: ground truth :param y_hat: prediction :return: mean squared error """ return np.mean(np.square(y_true - y_hat))
3d4c1828abf5bf88607e4ca1a263c483105733aa
3,654,689
def generate_v2_token(username, version, client_ip, issued_at_timestamp, email=''): """Creates the JSON Web Token with a new schema :Returns: String :param username: The name of person who the token identifies :type username: String :param version: The version number for the token :type version: Integer/String :param client_ip: The IP of machine the client used to request a token. :type client_ip: String :param email: The email address associated with a user. :type email: String """ claims = {'exp' : issued_at_timestamp + const.AUTH_TOKEN_TIMEOUT, 'iat' : issued_at_timestamp, 'iss' : const.VLAB_URL, 'username' : username, 'version' : version, 'client_ip' : client_ip, 'email' : email, } return jwt.encode(claims, const.AUTH_TOKEN_SECRET, algorithm=const.AUTH_TOKEN_ALGORITHM)
dee10b68fc15ec730a7b8921f95a77804618879c
3,654,690
import math def choose(n, k): """return n choose k resilient (though not immune) to integer overflow""" if n == 1: # optimize by far most-common case return 1 return fact_div(n, max(k, n - k)) / math.factorial(min(k, n - k))
fecd411a4148127f998f58d8d27668777bf5efbe
3,654,691
from typing import List def part_one(puzzle_input: List[str]) -> int: """Find the highest seat ID on the plane""" return max(boarding_pass_to_seat_id(line) for line in puzzle_input)
1ae95a7784f5348bb435483228630c8795d62d30
3,654,692
def readbit(val, bitidx): """ Direct word value """ return int((val & (1<<bitidx))!=0)
4ca368f89b2496ec46c1641835c1f2a0a1cdd573
3,654,693
def matrix_bombing_plan(m): """ This method calculates sum of the matrix by trying every possible position of the bomb and returns a dictionary. Dictionary's keys are the positions of the bomb and values are the sums of the matrix after the damage """ matrix = deepcopy(m) rows = len(m) columns = len(m[0]) d = {} for x in range(0, rows): for y in range(0, columns): p = (x, y) neighbours = find_neighbour(matrix, (x, y)) d[p] = sum_matrix(neighbours) return d
013d1dc3685013fa6fd5c87cfc2513e07e66e310
3,654,694
def coord_to_gtp(coord, board_size): """ From 1d coord (0 for position 0,0 on the board) to A1 """ if coord == board_size ** 2: return "pass" return "{}{}".format("ABCDEFGHJKLMNOPQRSTYVWYZ"[int(coord % board_size)],\ int(board_size - coord // board_size))
a0419e8a7f39cd282585ed1d29d94bbded0e3f1c
3,654,695
def test_alternative_clusting_method(ClusterModel): """ Test that users can supply alternative clustering method as dep injection """ def clusterer(X: np.ndarray, k: int, another_test_arg): """ Function to wrap a sklearn model as a clusterer for OptimalK First two arguments are always the data matrix, and k, and can supply """ m = ClusterModel() m.fit(X) assert another_test_arg == "test" return m.cluster_centers_, m.predict(X) optimalk = OptimalK( n_jobs=-1, parallel_backend="joblib", clusterer=clusterer, clusterer_kwargs={"another_test_arg": "test"}, ) X, y = make_blobs(n_samples=50, n_features=2, centers=3) n_clusters = optimalk(X, n_refs=3, cluster_array=np.arange(1, 5)) assert isinstance(n_clusters, int)
173e376726abe943f15fae44aa746bf9abe7dd53
3,654,696
def load_dataset(spfile, twfile): """Loads dataset given the span file and the tweets file Arguments: spfile {string} -- path to span file twfile {string} -- path to tweets file Returns: dict -- dictionary of tweet-id to Tweet object """ tw_int_map = {} # for filen in os.listdir(txt_dir): # twid = filen.split(".")[0] # if twid == "tweet_id": # continue # tweet = Tweet(twid) # tw_int_map[twid] = tweet for line in open(twfile, 'r'): #parts = line.split("\t") #twid, text = parts[0], parts[1] twid = get_basename_without_extension(line.strip('\n')) # ANTONIO tweet = Tweet(twid) if twid in tw_int_map: log.warning("Possible duplicate %s", twid) tw_int_map[twid] = tweet # Load annotations for line in open(spfile, 'r'): parts = [x.strip() for x in line.split("\t")] if len(parts) != 5: log.warning("Tab delimited not correct:" + str(len(parts))) continue if len(parts) == 5: twid, start, end, atype, prof = parts if twid == "tweet_id": continue if twid in tw_int_map: tweet = tw_int_map[twid] else: log.warning("Invalid tweetid %s not found.", twid) continue valid_labels = ["PROTEINAS", "NORMALIZABLES", "UNCLEAR","NO-NORMALIZABLES"] if atype in valid_labels: ann = Ann(prof.strip(), atype, start, end) tweet.anns.append(ann) tweet.has_ann = (tweet.has_ann or atype in valid_labels) num_anns = sum([len(x.anns) for _, x in tw_int_map.items()]) log.info("Loaded dataset %s tweets. %s annotations.", len(tw_int_map), num_anns) return tw_int_map
e25c382b3fe8c321b70206894e483c3f04ade2ed
3,654,697
from typing import Union from typing import Tuple def nameof(var, *more_vars, # *, keyword only argument, supported with python3.8+ frame: int = 1, vars_only: bool = True) -> Union[str, Tuple[str]]: """Get the names of the variables passed in Examples: >>> a = 1 >>> nameof(a) # 'a' >>> b = 2 >>> nameof(a, b) # ('a', 'b') >>> x = lambda: None >>> x.y = 1 >>> nameof(x.y, full=True) # 'x.y' Note: This function works with the environments where source code is available, in other words, the callee's node can be retrieved by `executing`. In some cases, for example, running code from python shell/REPL or from `exec`/`eval`, we try to fetch the variable name from the bytecode. This requires only a single variable name is passed to this function and no keyword arguments, meaning that getting full names of attribute calls are not supported in such cases. Args: var: The variable to retrieve the name of *more_vars: Other variables to retrieve the names of frame: The this function is called from the wrapper of it. `frame=1` means no wrappers. Note that the calls from standard libraries are ignored. Also note that the wrapper has to have signature as this one. vars_only: Whether only allow variables/attributes as arguments or any expressions. If `True`, then the sources of the arguments will be returned. Returns: The names/sources of variables/expressions passed in. If a single argument is passed, return the name/source of it. If multiple variables are passed, return a tuple of their names/sources. If the argument is an attribute (e.g. `a.b`) and `vars_only` is `False`, only `"b"` will returned. Set `vars_only` to `True` to get `"a.b"`. Raises: VarnameRetrievingError: When the callee's node cannot be retrieved or trying to retrieve the full name of non attribute series calls. """ # Frame is anyway used in get_node frameobj = IgnoreList.create( ignore_lambda=False, ignore_varname=False ).get_frame(frame) node = get_node_by_frame(frameobj, raise_exc=True) if not node: # We can't retrieve the node by executing. # It can be due to running code from python/shell, exec/eval or # other environments where sourcecode cannot be reached # make sure we keep it simple (only single variable passed and no # full passed) to use bytecode_nameof # # We don't have to check keyword arguments here, as the instruction # will then be CALL_FUNCTION_KW. if not more_vars: return bytecode_nameof(frameobj.f_code, frameobj.f_lasti) # We are anyway raising exceptions, no worries about additional burden # of frame retrieval again source = frameobj.f_code.co_filename if source == '<stdin>': raise VarnameRetrievingError( "Are you trying to call nameof in REPL/python shell? " "In such a case, nameof can only be called with single " "argument and no keyword arguments." ) if source == '<string>': raise VarnameRetrievingError( "Are you trying to call nameof from exec/eval? " "In such a case, nameof can only be called with single " "argument and no keyword arguments." ) raise VarnameRetrievingError( "Source code unavailable, nameof can only retrieve the name of " "a single variable, and argument `full` should not be specified." ) return argname( var, *more_vars, func=nameof, frame=frame, vars_only=vars_only, pos_only=True )
4a7c7d8390dad2597cad65409aaa6cd3f716a8a8
3,654,698
def scorer(func): """This function is a decorator for a scoring function. This is hack a to get around self being passed as the first argument to the scoring function.""" def wrapped(a, b=None): if b is not None: return func(b) return func(a) return wrapped
39ec390982d26d10a6ce827800df654ff6c4ab42
3,654,700
def print_stats(yards): """ This function prints the final stats after a skier has crashed. """ print print "You skied a total of", yards, "yards!" #print "Want to take another shot?" print return 0
72b56bf8cfb0691636e41ccfcfe9b3893ab870eb
3,654,701
def _calculate_risk_reduction(module): """ Function to calculate the risk reduction due to testing. The algorithms used are based on the methodology presented in RL-TR-92-52, "SOFTWARE RELIABILITY, MEASUREMENT, AND TESTING Guidebook for Software Reliability Measurement and Testing." Rather than attempting to estimate the software failure rate, RTK provides a risk index for the software based on the same factors used in RL-TR-92-52 for estimating software failure rates. RTK also provides test planning guidance in the same manner as RL-TR-92-52. :param module: the :py:class:`rtk.software.CSCI.Model` or :py:class:`rtk.software.Unit.Model` data model to calculate. :return: _error_code :rtype: int """ # WARNING: Refactor _calculate_risk_reduction; current McCabe Complexity metric = 13. _error_code = 0 # Calculate the risk reduction due to the test effort. try: if module.test_effort == 1: # Labor hours _test_ratio = float(module.labor_hours_test) / \ float(module.labor_hours_dev) elif module.test_effort == 2: # Budget _test_ratio = float(module.budget_test) / \ float(module.budget_dev) elif module.test_effort == 3: # Schedule _test_ratio = float(module.schedule_test) / \ float(module.schedule_dev) else: _test_ratio = 1.0 except ZeroDivisionError: _error_code = 10 _test_ratio = 0.0 module.te = 1.0 if _test_ratio > 0.4: module.te = 0.9 # Calculate the risk reduction due to test methods used. module.tm = 1.0 module.tu = sum([_tu[0] for _tu in module.lst_test_selection]) module.tt = sum([_tt[1] for _tt in module.lst_test_selection]) try: if module.tu / module.tt > 0.75: module.tm = 0.9 elif module.tu / module.tt < 0.5: module.tm = 1.1 except ZeroDivisionError: _error_code = 10 # Calculate the risk reduction due to test coverage. try: if module.level_id == 2: # Module _VS = ((float(module.nm_test) / float(module.nm)) + (float(module.interfaces_test) / float(module.interfaces))) / 2.0 elif module.level_id == 3: # Unit _VS = ((float(module.branches_test) / float(module.branches)) + (float(module.inputs_test) / float(module.inputs))) / 2.0 else: _VS = 1.0 except ZeroDivisionError: _error_code = 10 _VS = 1.0 module.tc = 1.0 / _VS module.t_risk = module.te * module.tm * module.tc return _error_code
c8876bc247243f13572d49c07063a063ba4eb42a
3,654,702
def run_metarl(env, test_env, seed, log_dir): """Create metarl model and training.""" deterministic.set_seed(seed) snapshot_config = SnapshotConfig(snapshot_dir=log_dir, snapshot_mode='gap', snapshot_gap=10) runner = LocalRunner(snapshot_config) obs_dim = int(np.prod(env[0]().observation_space.shape)) action_dim = int(np.prod(env[0]().action_space.shape)) reward_dim = 1 # instantiate networks encoder_in_dim = obs_dim + action_dim + reward_dim encoder_out_dim = params['latent_size'] * 2 net_size = params['net_size'] context_encoder = MLPEncoder(input_dim=encoder_in_dim, output_dim=encoder_out_dim, hidden_sizes=[200, 200, 200]) space_a = akro.Box(low=-1, high=1, shape=(obs_dim + params['latent_size'], ), dtype=np.float32) space_b = akro.Box(low=-1, high=1, shape=(action_dim, ), dtype=np.float32) augmented_env = EnvSpec(space_a, space_b) qf1 = ContinuousMLPQFunction(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size]) qf2 = ContinuousMLPQFunction(env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size]) obs_space = akro.Box(low=-1, high=1, shape=(obs_dim, ), dtype=np.float32) action_space = akro.Box(low=-1, high=1, shape=(params['latent_size'], ), dtype=np.float32) vf_env = EnvSpec(obs_space, action_space) vf = ContinuousMLPQFunction(env_spec=vf_env, hidden_sizes=[net_size, net_size, net_size]) policy = TanhGaussianMLPPolicy2( env_spec=augmented_env, hidden_sizes=[net_size, net_size, net_size]) context_conditioned_policy = ContextConditionedPolicy( latent_dim=params['latent_size'], context_encoder=context_encoder, policy=policy, use_ib=params['use_information_bottleneck'], use_next_obs=params['use_next_obs_in_context'], ) train_task_names = ML10.get_train_tasks()._task_names test_task_names = ML10.get_test_tasks()._task_names pearlsac = PEARLSAC( env=env, test_env=test_env, policy=context_conditioned_policy, qf1=qf1, qf2=qf2, vf=vf, num_train_tasks=params['num_train_tasks'], num_test_tasks=params['num_test_tasks'], latent_dim=params['latent_size'], meta_batch_size=params['meta_batch_size'], num_steps_per_epoch=params['num_steps_per_epoch'], num_initial_steps=params['num_initial_steps'], num_tasks_sample=params['num_tasks_sample'], num_steps_prior=params['num_steps_prior'], num_extra_rl_steps_posterior=params['num_extra_rl_steps_posterior'], num_evals=params['num_evals'], num_steps_per_eval=params['num_steps_per_eval'], batch_size=params['batch_size'], embedding_batch_size=params['embedding_batch_size'], embedding_mini_batch_size=params['embedding_mini_batch_size'], max_path_length=params['max_path_length'], reward_scale=params['reward_scale'], train_task_names=train_task_names, test_task_names=test_task_names, ) tu.set_gpu_mode(params['use_gpu'], gpu_id=0) if params['use_gpu']: pearlsac.to() tabular_log_file = osp.join(log_dir, 'progress.csv') tensorboard_log_dir = osp.join(log_dir) dowel_logger.add_output(dowel.StdOutput()) dowel_logger.add_output(dowel.CsvOutput(tabular_log_file)) dowel_logger.add_output(dowel.TensorBoardOutput(tensorboard_log_dir)) runner.setup(algo=pearlsac, env=env, sampler_cls=PEARLSampler, sampler_args=dict(max_path_length=params['max_path_length'])) runner.train(n_epochs=params['num_epochs'], batch_size=params['batch_size']) dowel_logger.remove_all() return tabular_log_file
adc4041539d55d9cddba69a44a0d0fcfbbc1c16e
3,654,703
from ..nn.nn_modifiers import get_single_nn_mutation_op def get_default_mutation_op(dom): """ Returns the default mutation operator for the domain. """ if dom.get_type() == 'euclidean': return lambda x: euclidean_gauss_mutation(x, dom.bounds) elif dom.get_type() == 'integral': return lambda x: integral_gauss_mutation(x, dom.bounds) elif dom.get_type() == 'discrete': return lambda x: discrete_random_mutation(x, dom.list_of_items) elif dom.get_type() == 'prod_discrete': return lambda x: prod_discrete_random_mutation(x, dom.list_of_list_of_items) elif dom.get_type() == 'discrete_numeric': return lambda x: discrete_numeric_exp_mutation(x, dom.list_of_items) elif dom.get_type() == 'prod_discrete_numeric': return lambda x: prod_discrete_numeric_exp_mutation(x, dom.list_of_list_of_items) elif dom.get_type() == 'discrete_euclidean': return lambda x: discrete_euclidean_mutation(x, dom.list_of_items) elif dom.get_type() == 'neural_network': return get_single_nn_mutation_op(dom, [0.5, 0.25, 0.125, 0.075, 0.05]) else: raise ValueError('No default mutation implemented for domain type %s.'%( dom.get_type()))
8e9455ca96dac89b11bebcc3e4f779f62111a010
3,654,704
import itertools def chunked(src, size, count=None, **kw): """Returns a list of *count* chunks, each with *size* elements, generated from iterable *src*. If *src* is not evenly divisible by *size*, the final chunk will have fewer than *size* elements. Provide the *fill* keyword argument to provide a pad value and enable padding, otherwise no padding will take place. >>> chunked(range(10), 3) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] >>> chunked(range(10), 3, fill=None) [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9, None, None]] >>> chunked(range(10), 3, count=2) [[0, 1, 2], [3, 4, 5]] See :func:`chunked_iter` for more info. """ chunk_iter = chunked_iter(src, size, **kw) if count is None: return list(chunk_iter) else: return list(itertools.islice(chunk_iter, count))
6f35735d9294f4c245643609641fb86b0f988fb1
3,654,705
def doc_to_schema_fields(doc, schema_file_name='_schema.yaml'): """Parse a doc to retrieve the schema file.""" return doc_to_schema(doc, schema_file_name=schema_file_name)[ 'schema_fields']
b9d88f52ff49e43cae0ad5373a8d841f0236bb50
3,654,706
from typing import Tuple from typing import OrderedDict from typing import Counter import tqdm def cluster(df: pd.DataFrame, k: int, knn: int = 10, m: int = 30, alpha: float = 2.0, verbose0: bool = False, verbose1: bool = False, verbose2: bool = True, plot: bool = True) -> Tuple[pd.DataFrame, OrderedDict]: """ Chameleon clustering: build the K-NN graph, partition it into m clusters :param df: input dataframe. :param k: desired number of clusters. :param knn: parameter k of K-nearest_neighbors. :param m: number of clusters to reach in the initial clustering phase. :param alpha: exponent of relative closeness; the larger, the more important relative closeness is than relative interconnectivity. :param verbose0: if True, print general infos. :param verbose1: if True, print infos about the prepartitioning phase. :param verbose2: if True, print labels of merging clusters and their scores in the merging phase. :param plot: if True, show plots. :return: dataframe with cluster labels and dictionary of merging scores (similarities). """ if k is None: k = 1 if verbose0: print(f"Building kNN graph (k = {knn})...") graph = knn_graph(df=df, k=knn, symmetrical=False, verbose=verbose1) if plot is True: plot2d_graph(graph, print_clust=False) graph = pre_part_graph(graph, m, df, verbose1, plotting=plot) # to account for cases where initial_clust is too big or k is already reached before the merging phase cl_dict = OrderedDict({ list(graph.nodes)[i]: graph.nodes[i]["cluster"] for i in range(len(graph)) }) m = len(Counter(cl_dict.values())) if verbose0: print(f"actual init_clust: {m}") merging_similarities = OrderedDict({}) iterm = (tqdm(enumerate(range(m - k)), total=m - k) if verbose1 else enumerate(range(m - k))) for i, _ in iterm: df, ms, ci = merge_best(graph, df, alpha, k, False, verbose2) if ms == 0: break merging_similarities[m - (i + 1)] = ms if plot: plot2d_data(df, ci) res = rebuild_labels(df) return res, merging_similarities
2363df84104da1f182c63faaac21006033e23083
3,654,707
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000): """ Load the CIFAR-10 dataset from disk and perform preprocessing to prepare it for the two-layer neural net classifier. These are the same steps as we used for the SVM, but condensed to a single function. """ # Load the raw CIFAR-10 data cifar10_dir = "C:\Users\Pomodori\workspace\cifar-10-batches-py" X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir) # Subsample the data mask = range(num_training, num_training + num_validation) X_val = X_train[mask] y_val = y_train[mask] mask = range(num_training) X_train = X_train[mask] y_train = y_train[mask] mask = range(num_test) X_test = X_test[mask] y_test = y_test[mask] # Normalize the data: subtract the mean image mean_image = np.mean(X_train, axis=0) X_train -= mean_image X_val -= mean_image X_test -= mean_image # Reshape data to rows X_train = X_train.reshape(num_training, -1) X_val = X_val.reshape(num_validation, -1) X_test = X_test.reshape(num_test, -1) return X_train, y_train, X_val, y_val, X_test, y_test
515777ca498ae9a234a1503660f2cde40f0b0244
3,654,708
def timeframe_int_to_str(timeframe: int) -> str: """ Convert timeframe from integer to string :param timeframe: minutes per candle (240) :return: string representation for API (4h) """ if timeframe < 60: return f"{timeframe}m" elif timeframe < 1440: return f"{int(timeframe / 60)}h" else: return f"{int(timeframe / 1440)}d"
75778742dea8204c74a47bfe92c25aef43ebbad8
3,654,709
def FIT(individual): """Sphere test objective function. F(x) = sum_{i=1}^d xi^2 d=1,2,3,... Range: [-100,100] Minima: 0 """ y=sum(x**2 for x in individual) return y
d6aadf620f85bd9cb27cef661e2ec664a4eb43b1
3,654,710
def update_range(value): """ For user selections, return the relevant range """ global df min, max = df.timestamp.iloc[value[0]], df.timestamp.iloc[value[-1]] return 'timestamp slider: {} | {}'.format(min, max)
c4819b46cdd78be3c86fc503791a7a0ff9cd96b3
3,654,711
def simplify(tile): """ :param tile: 34 tile format :return: tile: 0-8 presentation """ return tile - 9 * (tile // 9)
c8543d73e37d4fa1d665d3d28277ff99095e0635
3,654,712
def vep(dataset, config, block_size=1000, name='vep', csq=False) -> MatrixTable: """Annotate variants with VEP. .. include:: ../_templates/req_tvariant.rst :func:`.vep` runs `Variant Effect Predictor <http://www.ensembl.org/info/docs/tools/vep/index.html>`__ with the `LOFTEE plugin <https://github.com/konradjk/loftee>`__ on the current dataset and adds the result as a row field. Examples -------- Add VEP annotations to the dataset: >>> result = hl.vep(dataset, "data/vep.properties") # doctest: +SKIP Notes ----- **Configuration** :func:`.vep` needs a configuration file to tell it how to run VEP. The format is a `.properties file <https://en.wikipedia.org/wiki/.properties>`__. Roughly, each line defines a property as a key-value pair of the form `key = value`. :func:`.vep` supports the following properties: - **hail.vep.perl** -- Location of Perl. Optional, default: perl. - **hail.vep.perl5lib** -- Value for the PERL5LIB environment variable when invoking VEP. Optional, by default PERL5LIB is not set. - **hail.vep.path** -- Value of the PATH environment variable when invoking VEP. Optional, by default PATH is not set. - **hail.vep.location** -- Location of the VEP Perl script. Required. - **hail.vep.cache_dir** -- Location of the VEP cache dir, passed to VEP with the ``--dir`` option. Required. - **hail.vep.fasta** -- Location of the FASTA file to use to look up the reference sequence, passed to VEP with the `--fasta` option. Required. - **hail.vep.assembly** -- Genome assembly version to use. Optional, default: GRCh37 - **hail.vep.plugin** -- VEP plugin, passed to VEP with the `--plugin` option. Optional. Overrides `hail.vep.lof.human_ancestor` and `hail.vep.lof.conservation_file`. - **hail.vep.lof.human_ancestor** -- Location of the human ancestor file for the LOFTEE plugin. Ignored if `hail.vep.plugin` is set. Required otherwise. - **hail.vep.lof.conservation_file** -- Location of the conservation file for the LOFTEE plugin. Ignored if `hail.vep.plugin` is set. Required otherwise. Here is an example ``vep.properties`` configuration file .. code-block:: text hail.vep.perl = /usr/bin/perl hail.vep.path = /usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin hail.vep.location = /path/to/vep/ensembl-tools-release-81/scripts/variant_effect_predictor/variant_effect_predictor.pl hail.vep.cache_dir = /path/to/vep hail.vep.lof.human_ancestor = /path/to/loftee_data/human_ancestor.fa.gz hail.vep.lof.conservation_file = /path/to/loftee_data/phylocsf.sql **VEP Invocation** .. code-block:: text <hail.vep.perl> <hail.vep.location> --format vcf --json --everything --allele_number --no_stats --cache --offline --dir <hail.vep.cache_dir> --fasta <hail.vep.fasta> --minimal --assembly <hail.vep.assembly> --plugin LoF,\ human_ancestor_fa:$<hail.vep.lof.human_ancestor>,\ filter_position:0.05,\ min_intron_size:15,\ conservation_file:<hail.vep.lof.conservation_file> -o STDOUT **Annotations** A new row field is added in the location specified by `name` with the following schema: .. code-block:: text struct { assembly_name: str, allele_string: str, ancestral: str, colocated_variants: array<struct { aa_allele: str, aa_maf: float64, afr_allele: str, afr_maf: float64, allele_string: str, amr_allele: str, amr_maf: float64, clin_sig: array<str>, end: int32, eas_allele: str, eas_maf: float64, ea_allele: str, ea_maf: float64, eur_allele: str, eur_maf: float64, exac_adj_allele: str, exac_adj_maf: float64, exac_allele: str, exac_afr_allele: str, exac_afr_maf: float64, exac_amr_allele: str, exac_amr_maf: float64, exac_eas_allele: str, exac_eas_maf: float64, exac_fin_allele: str, exac_fin_maf: float64, exac_maf: float64, exac_nfe_allele: str, exac_nfe_maf: float64, exac_oth_allele: str, exac_oth_maf: float64, exac_sas_allele: str, exac_sas_maf: float64, id: str, minor_allele: str, minor_allele_freq: float64, phenotype_or_disease: int32, pubmed: array<int32>, sas_allele: str, sas_maf: float64, somatic: int32, start: int32, strand: int32 }>, context: str, end: int32, id: str, input: str, intergenic_consequences: array<struct { allele_num: int32, consequence_terms: array<str>, impact: str, minimised: int32, variant_allele: str }>, most_severe_consequence: str, motif_feature_consequences: array<struct { allele_num: int32, consequence_terms: array<str>, high_inf_pos: str, impact: str, minimised: int32, motif_feature_id: str, motif_name: str, motif_pos: int32, motif_score_change: float64, strand: int32, variant_allele: str }>, regulatory_feature_consequences: array<struct { allele_num: int32, biotype: str, consequence_terms: array<str>, impact: str, minimised: int32, regulatory_feature_id: str, variant_allele: str }>, seq_region_name: str, start: int32, strand: int32, transcript_consequences: array<struct { allele_num: int32, amino_acids: str, biotype: str, canonical: int32, ccds: str, cdna_start: int32, cdna_end: int32, cds_end: int32, cds_start: int32, codons: str, consequence_terms: array<str>, distance: int32, domains: array<struct { db: str, name: str }>, exon: str, gene_id: str, gene_pheno: int32, gene_symbol: str, gene_symbol_source: str, hgnc_id: str, hgvsc: str, hgvsp: str, hgvs_offset: int32, impact: str, intron: str, lof: str, lof_flags: str, lof_filter: str, lof_info: str, minimised: int32, polyphen_prediction: str, polyphen_score: float64, protein_end: int32, protein_start: int32, protein_id: str, sift_prediction: str, sift_score: float64, strand: int32, swissprot: str, transcript_id: str, trembl: str, uniparc: str, variant_allele: str }>, variant_class: str } Parameters ---------- dataset : :class:`.MatrixTable` Dataset. config : :obj:`str` Path to VEP configuration file. block_size : :obj:`int` Number of rows to process per VEP invocation. name : :obj:`str` Name for resulting row field. csq : :obj:`bool` If ``True``, annotates VCF CSQ field as a :py:data:`.tstr`. If ``False``, annotates with the full nested struct schema. Returns ------- :class:`.MatrixTable` Dataset with new row-indexed field `name` containing VEP annotations. """ require_row_key_variant(dataset, 'vep') mt = MatrixTable(Env.hail().methods.VEP.apply(dataset._jvds, config, 'va.`{}`'.format(name), csq, block_size)) return mt.annotate_rows(vep=mt['vep']['vep'])
e9433db17e82d00aba275066026a301a9b97e5e0
3,654,713
def __get_ll_type__(ll_type): """ Given an lltype value, retrieve its definition. """ res = [llt for llt in __LL_TYPES__ if llt[1] == ll_type] assert len(res) < 2, 'Duplicate linklayer types.' if res: return res[0] else: return None
f2e86ddd027ec26546a4be8ff8060c1cd8c64aca
3,654,714
def slice_node(node, split): """Splits a node up into two sides. For text nodes, this will return two text nodes. For text elements, this will return two of the source nodes with children distributed on either side. Children that live on the split will be split further. Parameters ---------- node : docutils.nodes.Text or docutils.nodes.TextElement split : int Location of the represented text to split at. Returns ------- (left, right) : (type(node), type(node)) """ if isinstance(node, Text): return Text(node[:split]), Text(node[split:]) elif isinstance(node, docutils.nodes.TextElement): if split < 0: split = len(node.astext())+split right = node.deepcopy() left = node.deepcopy() left.clear() offset = 0 while offset < split: try: child = right.pop(0) except IndexError: break child_strlen = len(child.astext()) if offset+child_strlen < split: left.append(child) offset += child_strlen continue elif offset+child_strlen != split: child_left, child_right = slice_node(child, split-offset) left.append(child_left) right.insert(0, child_right) offset += child_strlen return left, right else: raise ValueError('Cannot split {}'.format(repr(node)))
5958afbb61160f7e00c42e80c4c69aa7f8644925
3,654,716
def k_radius(x,centroids): """ Maximal distance between centroids and corresponding samples in partition """ labels = partition_labels(x,centroids) radii = [] for idx in range(centroids.shape[0]): mask = labels == idx radii.append( np.max( np.linalg.norm(x[mask]-centroids[idx],axis=-1)) ) return np.asarray(radii)
de010609e726ce250d72d773a9c1ffb772315b0c
3,654,717
def build_feature_df(data, default=True, custom_features={}): """ Computes the feature matrix for the dataset of components. Args: data (dataset): A mapping of {ic_id: IC}. Compatible with the dataset representaion produced by load_dataset(). default (bool, optional): Determines wether to compute a standard selection of features for the dataset. Defaults to True. custom_features (dict, optional): A mapping of custom features that will be computed for the dataset. The format is {feature_name: compute_feature} where compute_feature is a function with the only argument IC. Defaults to {}. Returns: pd.Dataframe: The feature matrix for the dataset. """ feature_df = pd.DataFrame(index=data.keys()) def get_iter(): if default: return default_features.items() else: return chain(default_features.items(), custom_features.items()) features = [feature_name for feature_name, _ in get_iter()] idx = [] rows = [] for ic_id, ic in data.items(): row = [] idx.append(ic_id) for feature_name, compute_feature in get_iter(): row.append(compute_feature(ic)) rows.append(row) feature_df = pd.DataFrame(rows, index=idx, columns=features) return feature_df
bbd2543a5043ae11305fe86449778a74f7e7ceb3
3,654,718
def decode_complex(data, complex_names=(None, None)): """ Decodes possibly complex data read from an HDF5 file. Decodes possibly complex datasets read from an HDF5 file. HDF5 doesn't have a native complex type, so they are stored as H5T_COMPOUND types with fields such as 'r' and 'i' for the real and imaginary parts. As there is no standardization for field names, the field names have to be given explicitly, or the fieldnames in `data` analyzed for proper decoding to figure out the names. A variety of reasonably expected combinations of field names are checked and used if available to decode. If decoding is not possible, it is returned as is. Parameters ---------- data : arraylike The data read from an HDF5 file, that might be complex, to decode into the proper Numpy complex type. complex_names : tuple of 2 str and/or Nones, optional ``tuple`` of the names to use (in order) for the real and imaginary fields. A ``None`` indicates that various common field names should be tried. Returns ------- c : decoded data or data If `data` can be decoded into a complex type, the decoded complex version is returned. Otherwise, `data` is returned unchanged. See Also -------- encode_complex Notes ----- Currently looks for real field names of ``('r', 're', 'real')`` and imaginary field names of ``('i', 'im', 'imag', 'imaginary')`` ignoring case. """ # Now, complex types are stored in HDF5 files as an H5T_COMPOUND type # with fields along the lines of ('r', 're', 'real') and ('i', 'im', # 'imag', 'imaginary') for the real and imaginary parts, which most # likely won't be properly extracted back into making a Python # complex type unless the proper h5py configuration is set. Since we # can't depend on it being set and adjusting it is hazardous (the # setting is global), it is best to just decode it manually. These # fields are obtained from the fields of its dtype. Obviously, if # there are no fields, then there is nothing to do. if data.dtype.fields is None: return data fields = list(data.dtype.fields) # If there aren't exactly two fields, then it can't be complex. if len(fields) != 2: return data # We need to grab the field names for the real and imaginary # parts. This will be done by seeing which list, if any, each field # is and setting variables to the proper name if it is in it (they # are initialized to None so that we know if one isn't found). real_fields = ['r', 're', 'real'] imag_fields = ['i', 'im', 'imag', 'imaginary'] cnames = list(complex_names) for s in fields: if s.lower() in real_fields: cnames[0] = s elif s.lower() in imag_fields: cnames[1] = s # If the real and imaginary fields were found, construct the complex # form from the fields. This is done by finding the complex type # that they cast to, making an array, and then setting the # parts. Otherwise, return what we were given because it isn't in # the right form. if cnames[0] is not None and cnames[1] is not None: cdata = np.result_type(data[cnames[0]].dtype, \ data[cnames[1]].dtype, 'complex64').type(data[cnames[0]]) cdata.imag = data[cnames[1]] return cdata else: return data
4c2fad09751ddfe4c5623d47a187f710ab62532f
3,654,720
def CLYH( directed = False, preprocess = "auto", load_nodes = True, load_node_types = True, load_edge_weights = True, auto_enable_tradeoffs = True, sort_tmp_dir = None, verbose = 2, cache = True, cache_path = None, cache_sys_var = "GRAPH_CACHE_DIR", version = "2020-05-29", **kwargs ) -> Graph: """Return CLYH graph Parameters ---------- directed = False preprocess = "auto" Preprocess for optimal load time & memory peak. Will preprocess in Linux/macOS but not Windows. load_nodes = True Load node names or use numeric range auto_enable_tradeoffs = True Enable when graph has < 50M edges cache_path = None Path to store graphs Defaults either to `GRAPH_CACHE_DIR` sys var or `graphs` cache_sys_var = "GRAPH_CACHE_DIR" version = "2020-05-29" Version to retrieve The available versions are: - 2020-05-29 """ return AutomaticallyRetrievedGraph( "CLYH", version, "kgobo", directed, preprocess, load_nodes, load_node_types, load_edge_weights, auto_enable_tradeoffs, sort_tmp_dir, verbose, cache, cache_path, cache_sys_var, kwargs )()
6dcddfff1411ea71d1743fabde782066c41ace9f
3,654,721
def lineParPlot(parDict, FigAx=None, **kwargs): """ Plot the results of lineParameters(). Parameters ---------- parDict : dict The relevant parameters: xPerc : tuple, (xPerc1, xPerc2) Left and right x-axis values of the line profile at perc% of the peak flux. Xc : float The center of x-axis value calculated at perc% of the peak flux. Fperc : float Fpeak * perc / 100. FigAx : tuple (optional) The tuple of (fig, ax) of the figure. **kwargs : dict The keywords for the plotting. Returns ------- FigAx : tuple The tuple of (fig, ax) of the figure. """ if FigAx is None: fig = plt.figure(figsize=(8, 4)) ax = plt.gca() else: fig, ax = FigAx x1, x2 = parDict["xPerc"] xc = parDict["Xc"] yperc = parDict["Fperc"] ax.axvline(x=x1, **kwargs) kwargs["label"] = None ax.axvline(x=x2, **kwargs) ax.axhline(y=yperc, **kwargs) kwargs["ls"] = "-" ax.axvline(x=xc, **kwargs) return (fig, ax)
4767446fb983902ea0a3ce631420c61f032970f9
3,654,723
def prepare_data_arrays(tr_df, te_df, target): """ tr_df: train dataset made by "prepare_dataset" function te_df: test dataset made by "prepare_dataset" function target: name of target y return: (numpy array of train dataset), (numpy array of test dataset: y will be filled with NaN), (column ID of y) """ col_to_id = {k: v for v, k in enumerate(tr_df.columns)} train_array = np.array(tr_df) test_array = np.array(te_df) target_id = col_to_id[target] # fill target values with nan test_array[:, target_id] = np.nan return train_array, test_array, target_id
097f376263dfeecffaf201f4ea1cd29980d88746
3,654,724
def plot(model_set, actual_mdot=True, qnuc=0.0, verbose=True, ls='-', offset=True, bprops=('rate', 'fluence', 'peak'), display=True, grid_version=0): """Plot predefined set of mesa model comparisons model_set : int ID for set of models (defined below) """ mesa_info = get_mesa_set(model_set) if actual_mdot: mdots = mesa_info['mdots_actual'] else: mdots = mesa_info['mdots'] mesa_info['params']['qnuc'] = qnuc fig, ax = plot_compare(mesa_runs=mesa_info['runs'], display=display, mesa_mdots=mdots, bprops=bprops, params=mesa_info['params'], verbose=verbose, grid_version=grid_version, ls=ls, offset=offset) return fig, ax
2ceec63d162fe07dd4a00a508657095528243421
3,654,726
def preprocessing_fn(batch): """ Standardize, then normalize sound clips """ processed_batch = [] for clip in batch: signal = clip.astype(np.float64) # Signal normalization signal = signal / np.max(np.abs(signal)) # get pseudorandom chunk of fixed length (from SincNet's create_batches_rnd) signal_length = len(signal) if signal_length < WINDOW_LENGTH: signal = np.concatenate((signal, np.zeros(WINDOW_LENGTH-signal_length))) else: np.random.seed(signal_length) signal_start = np.random.randint(0, signal_length-WINDOW_LENGTH) signal_stop = signal_start + WINDOW_LENGTH signal = signal[signal_start:signal_stop] processed_batch.append(signal) return np.array(processed_batch)
d277cd95d174e1ec104a8b8a8d72e23e2dd7f991
3,654,728
def generate_random_bond_list(atom_count, bond_count, seed=0): """ Generate a random :class:`BondList`. """ np.random.seed(seed) # Create random bonds between atoms of # a potential atom array of length ATOM_COUNT bonds = np.random.randint(atom_count, size=(bond_count, 3)) # Clip bond types to allowed BondType values bonds[:, 2] %= len(struc.BondType) # Remove bonds of atoms to itself bonds = bonds[bonds[:,0] != bonds[:,1]] assert len(bonds) > 0 return struc.BondList(atom_count, bonds)
cb7784f8561be2ea7c54d5f46c2e6e697164b1b8
3,654,729
def open_cosmos_files(): """ This function opens files related to the COSMOS field. Returns: A lot of stuff. Check the code to see what it returns """ COSMOS_mastertable = pd.read_csv('data/zfire/zfire_cosmos_master_table_dr1.1.csv',index_col='Nameobj') ZF_cat = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.cat') ZF_EAZY = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.zout') ZF_FAST = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.fout') #load in colours using spec-z #only ZFIRE U_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.153.rf') V_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.155.rf') J_spec = ascii.read('data/zfourge/uvj/specz_zfire/cosmos.v0.10.7.a.161.rf') #load in colours using photo-z U_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.153.rf') V_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.155.rf') J_photo = ascii.read('data/zfourge/uvj/photoz/cosmos.v0.10.7.a.161.rf') #galaxy colours derived by Lee's catalogue #This uses the older EAZY method of fitting colours UV_lee = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.153-155.rf') VJ_lee = ascii.read('data/zfourge/spitler2014/cosmos.v0.10.7.a.155-161.rf') UV_IR_SFRs = ascii.read('data/zfourge/sfrs/cosmos.sfr.v0.5.cat') MOSDEF_ZFOURGE = ascii.read('data/catalogue_crossmatch/MOSDEF_COSMOS.dat') #ZFIRE and MOSDEF colours U_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.153.rf') V_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.155.rf') J_ZM = ascii.read('data/zfourge/uvj/specz_zfire_mosdef/cosmos.v0.10.7.a.161.rf') VUDS_ZFOURGE = ascii.read('data/catalogue_crossmatch/VUDS_COSMOS.dat') VUDS_extra = ascii.read('data/vuds/cesam_vuds_spectra_dr1_cosmos_catalog_additional_info.txt') #ZFIRE and VUDS colours U_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.153.rf') V_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.155.rf') J_ZV = ascii.read('data/zfourge/uvj/specz_vuds/cosmos.v0.10.7.a.161.rf') return COSMOS_mastertable, ZF_cat, ZF_EAZY, ZF_FAST, U_spec, V_spec, J_spec,\ U_photo, V_photo, J_photo, UV_lee, VJ_lee, UV_IR_SFRs, MOSDEF_ZFOURGE,\ U_ZM,V_ZM, J_ZM, VUDS_ZFOURGE, VUDS_extra, U_ZV, V_ZV, J_ZV
229aa967dce5faaf42b488ebf2768b280ced9359
3,654,730
import numpy def convert_image_points_to_points(image_positions, distances): """Convert image points to 3d points. Returns: positions """ hypotenuse_small = numpy.sqrt( image_positions[:, 0]**2 + image_positions[:, 1]**2 + 1.0) ratio = distances / hypotenuse_small n = image_positions.shape[0] positions = numpy.zeros([n, 3]) positions[:, 0] = -image_positions[:, 0] * ratio positions[:, 1] = ratio positions[:, 2] = -image_positions[:, 1] * ratio return positions
3680a02997cf1109fd08f61c6642b29ea3433f1d
3,654,731
def W(i, j): """The Wilson functions. :func:`W` corresponds to formula (2) on page 16 in `the technical paper`_ defined as: .. math:: W(t, u_j)= \\ e^{-UFR\cdot (t+u_j)}\cdot \\ \left\{ \\ \\alpha\cdot\min(t, u_j) \\ -0.5\cdot e^{-\\alpha\cdot\max(t, u_j)}\cdot( \\ e^{\\alpha\cdot\min(t, u_j)} \\ -e^{-\\alpha\cdot\min(t, u_j)} \\ ) \\ \\right\} where :math:`t = u_i`. Args: i(int): Time index (1, 2, ..., :attr:`N`) j(int): Time index (1, 2, ..., :attr:`N`) """ t = u[i] uj = u[j] return exp(-UFR * (t+uj)) * ( alpha * min(t, uj) - 0.5 * exp(-alpha * max(t, uj)) * ( exp(alpha*min(t, uj)) - exp(-alpha*min(t, uj)) ))
37266db68fb51a87f15290edae06eb6397796b6f
3,654,732
from scipy.interpolate import interp1d def reddening_fm(wave, ebv=None, a_v=None, r_v=3.1, model='f99'): """Determines a Fitzpatrick & Massa reddening curve. Parameters ---------- wave: ~numpy.ndarray wavelength in Angstroms ebv: float E(B-V) differential extinction; specify either this or a_v. a_v: float A(V) extinction; specify either this or ebv. r_v: float, optional defaults to standard Milky Way average of 3.1 model: {'f99', 'fm07'}, optional * 'f99' is the default Fitzpatrick (1999) [1]_ * 'fm07' is Fitzpatrick & Massa (2007) [2]_. Currently not R dependent. Returns ------- reddening_curve: ~numpy.ndarray Multiply to deredden flux, divide to redden. Notes ----- Uses Fitzpatrick (1999) [1]_ by default, which relies on the UV parametrization of Fitzpatrick & Massa (1990) [2]_ and spline fitting in the optical and IR. This function is defined from 910 A to 6 microns, but note the claimed validity goes down only to 1150 A. The optical spline points are not taken from F99 Table 4, but rather updated versions from E. Fitzpatrick (this matches the Goddard IDL astrolib routine FM_UNRED). The fm07 model uses the Fitzpatrick & Massa (2007) [3]_ parametrization, which has a slightly different functional form. That paper claims it preferable, although it is unclear if signficantly (Gordon et al. 2009) [4]_. It is not the literature standard, so not default here. References ---------- [1] Fitzpatrick, E. L. 1999, PASP, 111, 63 [2] Fitpatrick, E. L. & Massa, D. 1990, ApJS, 72, 163 [3] Fitpatrick, E. L. & Massa, D. 2007, ApJ, 663, 320 [4] Gordon, K. D., Cartledge, S., & Clayton, G. C. 2009, ApJ, 705, 1320 """ model = model.lower() if model not in ['f99','fm07']: raise ValueError('model must be f99 or fm07') if (a_v is None) and (ebv is None): raise ValueError('Must specify either a_v or ebv') if (a_v is not None) and (ebv is not None): raise ValueError('Cannot specify both a_v and ebv') if a_v is not None: ebv = a_v / r_v if model == 'fm07': raise ValueError('TEMPORARY: fm07 currently not properly R dependent') x = 1e4 / wave # inverse microns k = np.zeros(x.size) if any(x < 0.167) or any(x > 11): raise ValueError('fm_dered valid only for wavelengths from 910 A to '+ '6 microns') # UV region uvsplit = 10000. / 2700. # Turn 2700A split into inverse microns. uv_region = (x >= uvsplit) y = x[uv_region] k_uv = np.zeros(y.size) # Fitzpatrick (1999) model if model == 'f99': x0, gamma = 4.596, 0.99 c3, c4 = 3.23, 0.41 c2 = -0.824 + 4.717 / r_v c1 = 2.030 - 3.007 * c2 D = y**2 / ((y**2-x0**2)**2 + y**2 * gamma**2) F = np.zeros(y.size) valid = (y >= 5.9) F[valid] = 0.5392 * (y[valid]-5.9)**2 + 0.05644 * (y[valid]-5.9)**3 k_uv = c1 + c2*y + c3*D + c4*F # Fitzpatrick & Massa (2007) model if model == 'fm07': x0, gamma = 4.592, 0.922 c1, c2, c3, c4, c5 = -0.175, 0.807, 2.991, 0.319, 6.097 D = y**2 / ((y**2-x0**2)**2 + y**2 * gamma**2) valid = (y <= c5) k_uv[valid] = c1 + c2*y[valid] + c3*D[valid] valid = (y > c5) k_uv[valid] = c1 + c2*y[valid] + c3*D[valid] + c4*(y[valid]-c5)**2 k[uv_region] = k_uv # Calculate values for UV spline points to anchor OIR fit x_uv_spline = 10000. / np.array([2700., 2600.]) D = x_uv_spline**2 / ((x_uv_spline**2-x0**2)**2 + x_uv_spline**2 * gamma**2) k_uv_spline = c1 + c2*x_uv_spline +c3*D # Optical / IR OIR_region = (x < uvsplit) y = x[OIR_region] k_OIR = np.zeros(y.size) # Fitzpatrick (1999) model if model == 'f99': # The OIR anchors are up from IDL astrolib, not F99. anchors_extinction = np.array([0, 0.26469*r_v/3.1, 0.82925*r_v/3.1, # IR -0.422809 + 1.00270*r_v + 2.13572e-04*r_v**2, # optical -5.13540e-02 + 1.00216*r_v - 7.35778e-05*r_v**2, 0.700127 + 1.00184*r_v - 3.32598e-05*r_v**2, (1.19456 + 1.01707*r_v - 5.46959e-03*r_v**2 + 7.97809e-04*r_v**3 + -4.45636e-05*r_v**4)]) anchors_k = np.append(anchors_extinction-r_v, k_uv_spline) # Note that interp1d requires that the input abscissa is monotonically # _increasing_. This is opposite the usual ordering of a spectrum, but # fortunately the _output_ abscissa does not have the same requirement. anchors_x = 1e4 / np.array([26500., 12200., 6000., 5470., 4670., 4110.]) anchors_x = np.append(0., anchors_x) # For well-behaved spline. anchors_x = np.append(anchors_x, x_uv_spline) OIR_spline = interp1d(anchors_x, anchors_k, kind='cubic') k_OIR = OIR_spline(y) # Fitzpatrick & Massa (2007) model if model == 'fm07': anchors_k_opt = np.array([0., 1.322, 2.055]) IR_wave = np.array([float('inf'), 4., 2., 1.333, 1.]) anchors_k_IR = (-0.83 + 0.63*r_v) * IR_wave**-1.84 - r_v anchors_k = np.append(anchors_k_IR, anchors_k_opt) anchors_k = np.append(anchors_k, k_uv_spline) anchors_x = np.array([0., 0.25, 0.50, 0.75, 1.]) # IR opt_x = 1e4 / np.array([5530., 4000., 3300.]) # optical anchors_x = np.append(anchors_x, opt_x) anchors_x = np.append(anchors_x, x_uv_spline) OIR_spline = interp1d(anchors_x, anchors_k, kind='cubic') k_OIR = OIR_spline(y) k[OIR_region] = k_OIR reddening_curve = 10**(0.4 * ebv * (k+r_v)) return reddening_curve
1f47b360044613c9bbb18bf3446bcd7e3ad20344
3,654,733
def list_registered_stateful_ops_without_inputs(): """Returns set of registered stateful ops that do not expect inputs. This list is used to identify the ops to be included in the state-graph and that are subsequently fed into the apply-graphs. Returns: A set of strings. """ return set([ name for name, op in op_def_registry.get_registered_ops().items() if op.is_stateful and not op.input_arg ])
aa089bc4157c6a3c36121c6e880ffbd546723f0e
3,654,734
def load_frame_from_video(path: str, frame_index: int) -> np.ndarray: """load a full trajectory video file and return a single frame from it""" vid = load_video(path) img = vid[frame_index] return img
7b8747df38dfcf1f2244166002126d6d25170506
3,654,735
from typing import Dict from typing import List def get_settings_patterns(project_id: int) -> Dict[str, str]: """Returning project patterns settings""" track_patterns: List[Dict[str, str]] = ProjectSettings.objects.get(project_id=project_id).trackPatterns return {pattern['pattern']: pattern['regex'] for pattern in track_patterns}
d566ad5ec2fd72e2384fea90aa9cae9d99d9f441
3,654,736
def video_to_array(filepath): """Process the video into an array.""" cap = cv2.VideoCapture(filepath) num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) channel = 3 frame_buffer = np.empty((num_frames, height, width, channel), dtype=np.float32) frame_num = 0 returned = True while (frame_num < num_frames and returned): returned, frame = cap.read() if frame is not None: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) frame = frame.astype(np.float32) frame = frame / 255.0 if np.sum(frame) > 0.0: frame_buffer[frame_num] = frame frame_num += 1 cap.release() return frame_buffer
2034ce56c7ca4fe61d0e0eb443c6fa9910d8a232
3,654,737