content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
def freq2Index(freq, sampleRate, nFFT): """ Return the FTT index for a given frequency. :param freq: Frequency, of which the bins should be returned :type freq: int :param sampleRate: Numbers of data samples per second :type sampleRate: int :param nFFT: Length of fourier transform :type nFFT: int :return: FFT index of the given frequency :rtype: int """ return int(round(freq / (sampleRate / (nFFT*2)), 3))
167f6ac2c3e1c170162896c9af5e9641f56d6983
539,212
from typing import List from typing import Any def get_range_command(indexes: List[str], val: List[Any]) -> List[Any]: """ Filter list with a given range. Args: indexes (list): indexes to filter. val (list): list to filter . Returns: filtered list. """ result = [] for index in indexes: if '-' in str(index): start, end = index.split('-') start = int(start) if start else 0 end = int(end) if end else len(val) for element in val[start:end + 1]: result.append(element) else: result.append(val[int(index)]) return result
cc8f036e6a9cbb62d5bf6670a826f45e750f88a1
135,591
def _preprocess_label(lbl): """ The provided datasets have labels that are 1-indexed. We convert them to 0-index here. """ return lbl - 1
bb91a6b3221f656142a0479bc4aeca0163970c73
143,597
def convert_datetime_to_timestamp_str(date): """Converts the given datetime to a formatted date str. The format matches strftime directives %Y-%m-%dT%H:%M:%S.%f. Args: date (datetime): The datetime object to convert. Returns: (str): A str representing the given date. Example output looks like '2020-03-25T15:29:04.465Z'. """ prefix = date.strftime(u"%Y-%m-%dT%H:%M:%S.%f")[:-3] return u"{}Z".format(prefix)
b3f5aa73f9e578da9e7a94a88473621d16a2cc3d
515,287
from pathlib import Path def read_certificate_file(certificate_file: Path) -> str: """Read the contents of the local X.509 certificate file and returns the string :param certificate_file: X.509 client certificate to read :type certificate_file: Path :return: PEM encoded certificate :rtype: str """ # the certificate format is not modified with open(certificate_file, "r") as f: data = f.read() return data
1b014b03880cca14e830f1a6dba0a9529b64b239
611,305
def ngrams(words, n): """ Note: generate ngrams of certain length with a list of word Args: words: words list n: length of ngram Returns: list of ngrams of certain length """ if len(words)<= n: print (len(words), '/',n) return words else: ngram_set = set() for i in range(len(words)-n+1): ngram_set.add(' '.join(words[i:i+n])) return list(ngram_set)
2e9f8b4774dcb8d0567c9826b9b03ecbafb9968f
333,440
def rgb_to_RGB(r, g, b): """ Convert rgb values to RGB values :param r,g,b: (0,1) range floats :return: a 3 element tuple of RGB values in the range (0, 255) """ return (int(r * 255), int(g * 255), int(b * 255))
7358f0a0263125c4c036cb6ebc170455c24bf05b
599,467
import math def deg2rad(x): """Convert the unit of x from degree to radian""" return x * math.pi/180.0
679d70a7c209f2487d3849670653dcfd01a9f2fa
441,949
def get_converter_type_uuid(*args, **kwargs): """ Handle converter type "uuid" :param args: :param kwargs: :return: return schema dict """ schema = { 'type': 'string', 'format': 'uuid', } return schema
2841ba2ff4d0994371dabe581f8350b4971a9144
609,843
def in_cksum_done(s): """Fold and return Internet checksum.""" while (s >> 16): s = (s >> 16) + (s & 0xffff) return (~s & 0xffff)
25011c254e89179fe4232ad0ecfa0a847bf0b30b
23,609
def filter_entries_by_tag(search, entry): """Check if search matches any tags as stored in the Entry namedtuple (case insensitive, only whole, not partial string matches). Returns bool: True if match, False if not. Supported searches: 1. If & in search do AND match, e.g. flask&api should match entries with both tags 2. Elif | in search do an OR match, e.g. flask|django should match entries with either tag 3. Else: match if search is in tags """ Truth = [] search = search.lower() if '&' in search: for term in search.split('&'): Truth.append(term.lower() in entry.tags) return all(Truth) elif '|' in search: for term in search.split('|'): Truth.append(term.lower() in entry.tags) return any(Truth) else: return search.lower() in entry.tags pass
13a722166efee98eac5610beec4f1e38d974a409
427,558
def ratio(value, count): """compute ratio but ignore count=0""" if count == 0: return 0.0 else: return value / count
0688000ae2685ffd2f377d4d136d41964acef416
267,071
def normalize_wiki_text(text): """ Normalizes a text such as a wikipedia title. @param text text to normalize @return normalized text """ return text.replace("_", " ").replace("''", '"')
ec3855b8661ae67abc6ca84d03634ff48437ad58
670,483
def trigger_event(connection, id, fields=None, error_msg=None): """Trigger an event. Args: connection(object): MicroStrategy connection object returned by `connection.Connection()`. id(str): ID of the event error_msg (string, optional): Custom Error Message for Error Handling Returns: HTTP response object returned by the MicroStrategy REST server. """ url = f'{connection.base_url}/api/events/{id}/trigger' return connection.post( url=url, params={'fields': fields} )
418994565cd20cac681575286553d4fa92cf89c9
698,445
def make_course_key_str(org, number, run='test-run'): """ Helper method to create a string representation of a CourseKey """ return 'course-v1:{}+{}+{}'.format(org, number, run)
3fcc10ac2b6380ed88e53dd37ae2978584d26447
259,460
def find_asymetric_coords(M): """ Given a square numpy array M return i,j such that M[i,j] != M[j,i] """ for i in range(M.shape[0]): for j in range(i): if M[i,j] != M[j,i]: return i,j return None
4e4f2d0b65a6074686eac041054f2af50940e8ea
527,865
def resolve_url(properties, seqid, path_type): """Resolves a refget request to a cloud storage object url Arguments: properties (Properties): runtime properties containing cloud storage base url, and url paths to sequences and metadata seqid (str): Requested sequence (checksum identifier) path_type (str): 'sequence' for sequence requests, 'metadata' for metadata requests Returns: (str): url pointing to requested object in cloud storage """ path_props = { "sequence": "source.sequence_path", "metadata": "source.metadata_path" } base_url = properties.get("source.base_url") url_path = properties.get(path_props[path_type]) template = base_url + url_path url = template.format(seqid=seqid) return url
9d4e96cbf097ab7ea08894d0482da5ce4ca930b2
461,114
def in_slots(obj, key, default=False): """ Returns true if key exists in obj.__slots__; false if not in. If obj.__slots__ is absent, return default """ return (key in obj.__slots__) if getattr(obj, '__slots__', None) else default
c64473a323302eacefcf211c1114686ef1feec2d
661,754
def iterator_to_list(iterator): """ Transform the input iterator into a list. :param iterator: :iterator type: """ liste = [elem for elem in iterator] return liste
1a582f76763dba5728a7011597312ece819dc6af
293,737
def getChildrenText(tag, childtag): """returns the text of all children of tag which are <childtag>""" tags=[] for child in tag: if child.tag==childtag: tags.append(child.text) return tags
59ae3eca5dcd393907115e208cd2ed4cf0fa1b8a
252,770
import csv def read_layout(layout): """ Reads out a layout from .csv and puts it in a dictionary Args: layout(str): filename of the .csv Returns: pts(dict): A dictionary with all the relevant information from the .csv """ pts = {} with open(layout) as csvfile: readCSV = csv.reader(csvfile, delimiter=',') for row in readCSV: if row[0][0] == 'D': pts[row[0]] = float(row[1]), float(row[2]) if row[0][0] == 'T': pts[row[0]] = row[1], row[2] if row[0][0] == 'U': pts[row[0]] = int(row[1]), int(row[2]) return pts
f28a98284226ff13652588568c33be19b6fb3245
496,037
def get_request_id(request_json): """Get the request_id from the request """ request_id = request_json['requestInfo'].get('requestId') if not request_id: request_id = request_json['requestInfo'].get('requestID') return request_id
68b3e9e8a15d84d1042173b8fc8c480f996d616a
16,664
def all(*args, span=None): """Create a new expression of the intersection of all conditions in the arguments Parameters ---------- args : list List of symbolic boolean expressions span : Optional[Span] The location of this operator in the source code. Returns ------- expr: Expr Expression """ if not args: raise ValueError("Any must take at least 1 argument") if len(args) == 1: return args[0] val = _ffi_api._OpAnd(args[0], args[1], span) # type: ignore for i in range(2, len(args)): val = _ffi_api._OpAnd(val, args[i], span) # type: ignore return val
f0cebfb241c10c2d53c58a8b4fb186e9d65a1b7a
9,430
def encode_phrase_bank_to_id_lists(phrase_bank_list, vocab, max_bank_size): """Encode keyphrase bank into word ids. Args: phrase_bank_list (list): the list of keyphrase bank for each trianing data, each list is a list of keyphrase (already deduplicated), each keyphrase is a string. vocab (Vocab): vocabulary to convert words into ids max_bank_size (int): maximum allowed number of keyphrase per instance. Returns: phrase_bank_word_ids (list): the list of phrase bank word ids phrase_bank_words (list): the list of tokenized phrase words, with the same dimension as phrase_bank_word_ids """ phrase_bank_word_ids = [] phrase_bank_words = [] for sample in phrase_bank_list: cur_ph_bank_wids = [] cur_ph_bank_words = [] for ph in sample: ph = ph.lower() cur_ph = [] cur_ph_ids = [] for w in ph.split(): if w in vocab._word2id: cur_ph.append(w) cur_ph_ids.append(vocab.word2id(w)) if len(cur_ph_ids) == 0: continue cur_ph_bank_wids.append(cur_ph_ids) cur_ph_bank_words.append(cur_ph) if len(cur_ph_bank_wids) == max_bank_size: break phrase_bank_word_ids.append(cur_ph_bank_wids) phrase_bank_words.append(cur_ph_bank_words) return phrase_bank_word_ids, phrase_bank_words
5344f5090e4728a2cffc835690584b5901c28d5a
149,571
def _get(entry): """A helper to get the value, be it a callable or callable with args, or value """ if isinstance(entry, (tuple, list)): func, args = entry return func(*args) elif callable(entry): return entry() else: return entry
40742e0f86ea1a89b05e0354912c64683a9b9160
31,562
from typing import Union from typing import Tuple def safe_issubclass(cls: type, class_or_tuple: Union[type, Tuple[type, ...]]) -> bool: """Safe version of ``issubclass()``. Apart from incorrect arguments, ``issubclass(a, b)`` can throw an error only if `b` has a ``__subclasscheck__`` method that throws an error. Therefore, it is not necessary to use ``safe_issubclass()`` if the class is known to not override ``__subclasscheck__``. Defaults to False if ``issubclass()`` throws. """ try: return issubclass(cls, class_or_tuple) except Exception: return False
70509472c577b6c97576b0a6925d16314773be30
453,114
def countAlleles(columns): """For each alt allele in the 5th column, keep a count of how many times that alt's index appears in the genotype columns, for the AC tag in the INFO field. For the AN tag, tally up the genotype columns that don't have '.' (no call). Return the total number of calls for AN and an array of alternate allele counts for AC. NOTE: this does not handle multiploid VCF where genotypes can be /- or |-separated.""" totalCalls = 0 alts = columns[4] altCounts = [ 0 for alt in alts.split(',')] for colVal in columns[9:]: subVals = colVal.split(':') gt = subVals[0] if (gt is not '.'): totalCalls += 1 altIx = int(gt)-1 if (altIx >= 0): altCounts[altIx] += 1 return totalCalls, altCounts
e270250c8a2a018055075951dd7cbd307ecebba6
428,624
def flatten(d): """Recursive method to flatten a dict -->list Parameters: d (dict): dict Returns: l (list) """ res = [] # Result list if isinstance(d, dict): for key, val in sorted(d.items()): res.extend(flatten(val)) elif isinstance(d, list): res = d else: res = [d] return res
d1fe6c38c5c875799fa9793d5bea470a9c490153
183,742
import six def safe_filename(filename, extension=None): """ Returns a filename with FAT32-, NTFS- and HFS+-illegal characters removed. Unicode or bytestring datatype of filename is preserved. >>> safe_filename(u'spam*?: 𐍃𐍀𐌰𐌼-&.txt') u'spam 𐍃𐍀𐌰𐌼-&.txt' """ filename = filename if isinstance(filename, six.text_type) else filename.decode('utf8') if extension is not None: filename = "{}.{}".format(filename, extension) unsafe_chars = ':*?"<>|/\\\r\n' for c in unsafe_chars: filename = filename.replace(c, '') return filename
d3571ea8d272d1081fc132c204b1a8c5544c2cd2
115,202
import yaml def get_rule_set(args): """Read and parse the config file at the location given when the linter script is run. Parameters ---------- args : argparse.Namespace Arguments input when script is called from the console Returns ------- dict Contains structured data from the parsed configuration file """ with open(args.rules, 'r') as f: text = f.read() rule_set = yaml.safe_load(text) return rule_set
335b5e73c721f8ca276690444ede1f88fd33c2e9
644,712
def _is_plottable(structure): """ Determines if a PDS4 structure is plottable. Plottable structures are either: (1) 1D arrays, or (2) Tables Parameters ---------- structure : Structure PDS4 structure to check. Returns ------- bool True if *structure* can be plotted, False otherwise. """ plottable = False if structure.is_table(): plottable = True elif structure.is_array() and structure.meta_data.num_axes() == 1: plottable = True return plottable
534529daf211c68bb08983918c463aaf1a8640bd
290,683
def _buffer_word(word, length, backward): """ Simple helper that adds spaces to the end of the provided word so that it will look uniform. :param word: word (as a string) to buffer :param length: 1 less than the length to buffer to :param backward: Whether the provided string should be made backwards (used by the solver for convenience) :return: buffered word """ if backward: word = word[::-1] while len(word) < length + 1: word += " " return word
80638e77045e96127a91084c394c6f210d2583f5
441,743
def parseVersion(stringVersion): """Parses a version string like "6.1.0.3" and returns a python list of ints like [ 6,1,0,3 ]""" m = "parseVersion:" # sop(m,"Entry. stringVersion=%s" % ( stringVersion )) listVersion = [] parts = stringVersion.split('.') for part in parts: # sop(m,"Adding part=%s" % part) listVersion.append(int(part)) # sop(m,"Exit. Returning listVersion=%s" % ( listVersion )) return listVersion
6510006c314dfbaed9397dc5b3e19e6bc13787a0
87,162
def get_local_images(docker_client): """Return a list of local images names (repo+tag). :param docker.client.APIClient docker_client: docker api client. :return list local_images: local images names (repo+tag). """ local_images = [] for image in docker_client.images(): image_repo_tag = image.get('RepoTags') if image_repo_tag: local_images.extend(image_repo_tag) return local_images
f1f2003e5172144790841ee9b5781fa40143fa01
403,502
def sum_two_to_2020_product(expense_report_entries): """Find the product of the two entries in the expense report that sum to 2020.""" for i, entry in enumerate(expense_report_entries): for j in range(i + 1, len(expense_report_entries)): if entry + expense_report_entries[j] == 2020: return entry * expense_report_entries[j] return None
dacf22295105ba2ad443a90420f785a8b497cced
390,666
def lookup_from_kwargs_env(kwargs, env, attr, default=None): """ __getitem__ from kwargs, env, or default. Args: kwargs (dict): kwargs dict env (Env): :py:mod:`Env` dict attr (str): attribute name default (obj): default value to return if not found in kwargs or env Returns: obj: kwargs.get(attr, env.get(attr, default)) """ return kwargs.get(attr, env.get(attr, default))
781367e396800e98c262a05ef6bb01668cc6f536
284,518
import inspect def is_coroutine(coro): """Returns true if the argument is a coroutine or coroutine function.""" return inspect.iscoroutine(coro) or inspect.iscoroutinefunction(coro)
c4b659fd469b4d50a93019001b83729de693a09f
31,221
import json def parse_advice(json_response) -> str: """Get the advice from the JSON response.""" json_slip = json.loads(json_response) advice = json_slip['slip']['advice'] return advice
d4a8680602917032ecd8a463fafe083851d0446c
73,572
def get_mbean_name(location, existing_names, alias_helper): """ Return the mbean name for the specified location. For unpredictable single folders: 1. if an existing folder name is present, use that name as the mbean name. 2. set the location's token to the mbean name. :param location: the location to examine :param existing_names: a list of existing names at the location :param alias_helper: the alias helper to use for name and path resolution """ mbean_name = alias_helper.get_wlst_mbean_name(location) if alias_helper.requires_unpredictable_single_name_handling(location): if len(existing_names) > 0: mbean_name = existing_names[0] token = alias_helper.get_name_token(location) location.add_name_token(token, mbean_name) return mbean_name
c973073fd02c82179326d089ec4680012f7fd724
636,319
def differences(scansion: str, candidate: str) -> list: """"Given two strings, return a list of index positions where the contents differ. >>> differences("abc", "abz") [2] """ before = scansion.replace(" ", "") after = candidate.replace(" ", "") diffs = [] for idx, tmp in enumerate(before): if before[idx] != after[idx]: diffs.append(idx) return diffs
3afa87616168b333ffd4ccb50002509292e1d271
308,699
import copy def uvp_zscore(uvp, error_field='bs_std', inplace=False): """ Calculate a zscore of a UVPSpec object using entry 'error_field' in its stats_array. This assumes that the UVPSpec object has been already mean subtracted using hera_pspec.uvpspec_utils.subtract_uvp(). The resultant zscore is stored in the stats_array as error_field + "_zscore". Parameters ---------- uvp : UVPSpec object error_field : str, optional Key of stats_array to use as z-score normalization. inplace : bool, optional If True, add zscores into input uvp, else make a copy of uvp and return with zscores. Returns ------- if inplace: uvp : UVPSpec object """ if not inplace: uvp = copy.deepcopy(uvp) # check error_field assert error_field in list(uvp.stats_array.keys()), "{} not found in stats_array" \ .format(error_field) new_field = "{}_zscore".format(error_field) # iterate over spectral windows for i, spw in enumerate(uvp.spw_array): # iterate over polarizations for j, polpair in enumerate(uvp.polpair_array): # iterate over blpairs for k, blp in enumerate(uvp.blpair_array): key = (spw, blp, polpair) # calculate z-score: real and imag separately d = uvp.get_data(key) e = uvp.get_stats(error_field, key) zsc = d.real / e.real + 1j * d.imag / e.imag # set into uvp uvp.set_stats(new_field, key, zsc) if not inplace: return uvp
98ceb99b06a258d98e0dc913177186aba119bf08
526,090
def dictlist_lookup(dictlist, key, value): """ From a list of dicts, retrieve those elements for which <key> is <value>. """ return [el for el in dictlist if el.get(key)==value]
94e90b29c8f4034357be2b8683115f725c24b374
16,218
import re def _extract_sentence_tags(tagged_sentence): """Given a tagged sentence, extracts a dictionary mapping tags to the words or phrases that they tag. Parameters ---------- tagged_sentence : str The sentence with Medscan annotations and tags Returns ------- tags : dict A dictionary mapping tags to the words or phrases that they tag. """ p = re.compile('ID{([0-9,]+)=([^}]+)}') tags = {} # Iteratively look for all matches of this pattern endpos = 0 while True: match = p.search(tagged_sentence, pos=endpos) if not match: break endpos = match.end() tags[match.group(1)] = match.group(2) return tags
2a913707d2f48736f70df2141779c00a0068ee54
636,939
def is_date_within_date_range(date_in_question, start, end) -> bool: """ Return whether a given date falls within the range of two other dates. This function assumes start < end. """ return start <= date_in_question <= end
3220cb8c49f6c7b18166032f9cd2ca400ffd1fea
416,797
import json def json_load(file: str): """ loads json file. :param file: :return: loaded json file as dict/list """ with open(file) as f: return json.load(f)
71e2360a0dfeee339a612f13d4929954c7f8bcd0
480,896
def DecodeDecimal(hexinput): """ Converts to decimal each byte in the input tuple, concatenates the result and converts the resulting string into decimal. """ return int(''.join('{0}'.format(x) for x in hexinput), 10)
f400a5c8cc0e575bee48d16843cbce5b43f9a5a6
390,966
def api_error(api, error): """format error message for api error, if error is present""" if error is not None: return "calling: %s: got %s" % (api, error) return None
a9269a93d51e3203646886a893998ffec6488c95
705,403
def delay(lag): """Create a function that applies a delay to a trajectory.""" def func(traj, length): return traj[lag : length + lag] return func
e65d6094ef44869a0b411fb9762a5fdb14271d56
180,169
def _add_comp_def(doc, comp_def): """Add the component definition while checking if it already existss :param doc: The SBOL Document object :param comp_def: Component definition :type doc: Document :type comp_def: ComponentDefinition :rtype: ComponentDefinition :return: The updated component definition """ if comp_def.identity not in [comp_def.identity for comp_def in doc.componentDefinitions]: doc.addComponentDefinition(comp_def) else: comp_def = doc.getComponentDefinition(comp_def.identity) return comp_def
c718de9c0c42fd88de2a4eddd6485f8ca02a3c4f
590,506
def create_vocab(corpus): """ This function creates a set of unique and preprocessed words from a corpus Arguments corpus : pandas df column or list-like Returns vocab : dictionary with the words as keys and a unique integer for each as values """ vocab = set() for doc in corpus: vocab.update(set(doc)) return {word:idx for idx, word in enumerate(vocab)}
ed6d02f11202044806aaca94ac56fa7400ab3f9a
470,310
def process_output(output): """ Given the output file, filter out photons, electrons and positrons """ if output is None: return (None, None, None) # no photons, electrons or positrons photons = list() electrons = list() positrons = list() with open(output, "r") as f: for cnt, line in enumerate(f): if "GGG" in line: photons.append(line) elif "EEE" in line: electrons.append(line) elif "PPP" in line: positrons.append(line) return (photons if len(photons)>0 else None, electrons if len(electrons)>0 else None, positrons if len(positrons)>0 else None)
201523c7a364cc523882aec7736fc9e00e016bb7
482,158
def type_rank(hierarchy, feature_type): """Return rank or code 1000 if feature_type not found.""" if feature_type in hierarchy: return hierarchy[feature_type]["rank"] else: return 1000
9e33649db0d0d28aa3884cb3a2f3f243858cab5e
174,768
import torch from typing import Optional def torch_one_hot(target: torch.Tensor, num_classes: Optional[int] = None) -> torch.Tensor: """ Compute one hot encoding of input tensor Args: target: tensor to be converted num_classes: number of classes. If :attr:`num_classes` is None, the maximum of target is used Returns: torch.Tensor: one hot encoded tensor """ if num_classes is None: num_classes = int(target.max().detach().item() + 1) dtype, device = target.dtype, target.device target_onehot = torch.zeros(*target.shape, num_classes, dtype=dtype, device=device) return target_onehot.scatter_(1, target.unsqueeze_(1), 1.0)
bb2aee6012ead3dcd08ddcdce05a73ac3693fc1d
588,172
from typing import List def athlete_sort(k: int, arr: List[List[int]]) -> List[List[int]]: """ >>> athlete_sort(1, [[10, 2, 5], [7, 1, 0], [9, 9, 9], ... [1, 23, 12], [6, 5, 9]]) [[7, 1, 0], [10, 2, 5], [6, 5, 9], [9, 9, 9], [1, 23, 12]] """ arr.sort(key=lambda x: x[k]) return arr
1655882b7760a705afcbbeba55f5051eaf7d448f
698,368
def create_row(username, results, release_count): """ Returns string with format UserName,M1,M2,...,Mn. :param username: string :param results: queryset object :param release_count: int :return: Formatted string """ row = "{}".format(username) count = 1 while count <= release_count: result = results.filter(comment__release=count).first() if result: row += ",{}".format(result.mark) else: row += ",0" count += 1 return row
fa649addb606ad2519078e41779eb7a8422d4466
172,467
def uniqueListWithOrder(lst): """ Return new list with preserved the original order of the list. Each element in lst must be hashable. """ # pylint: disable = simplifiable-condition used = set() return [x for x in lst if x not in used and (used.add(x) or True)]
f926ce6b39f1d5c22af010b9a903c6567ca6c2f3
258,947
def bBalancedPar(p): """check if parenthesis are balanced no matter the content""" l = 0 for c in p: if c == ord(b"("): l += 1 elif c == ord(b")"): l -= 1 if l < 0: return False if l != 0: return False return True
9480891316cc9661afdaae5bf1a76915d712e34f
649,123
def remove_underscore(data_str: str) -> str: """ A function to remove underscores from a string returning the string with spaces instead. :param data_str: :return: """ return data_str.replace('_', ' ')
edd902c0243c4fec2135de5d5f39265bc5486ad1
350,385
def show_graph(ticker): """ Displays the graph based on ticker :param ticker: the ticker :return: two dicts setting visibility of graph and sentiment information """ if not ticker: return { 'display':'none' },{'display':'none'} else: return { 'display':'block', 'height':'450px' },{'display':'block'}
eae748d4506c38011ff9171ac98f760c0c32d918
439,596
def _rhs(model_expression): """ Get only the right-hand side of a patsy model expression. Parameters ---------- model_expression : str Returns ------- rhs : str """ if '~' not in model_expression: return model_expression else: return model_expression.split('~')[1].strip()
c28bdfc7358faae3f052d37c212371792cbb4d02
210,911
def labels_trick(outputs, labels, criterion): """ Labels trick calculates the loss only on labels which appear on the current mini-batch. It is implemented for classification loss types (e.g. CrossEntropyLoss()). :param outputs: The DNN outputs of the current mini-batch (torch Tensor). :param labels: The ground-truth (correct tags) (torch Tensor). :param criterion: Criterion (loss). :return: Loss value, after applying the labels trick. """ # Get current batch labels (and sort them for reassignment) unq_lbls = labels.unique().sort()[0] # Create a copy of the labels to avoid in-place modification labels_copy = labels.clone() # Assign new labels (0,1 ...) because we will select from the outputs only the columns of labels of the current # mini-batch (outputs[:, unq_lbls]), so their "tagging" will be changed (e.g. column number 3, which corresponds # to label number 3 will become column number 0 if labels 0,1,2 do not appear in the current mini-batch, so its # ground-truth should be changed accordingly to label #0). for lbl_idx, lbl in enumerate(unq_lbls): labels_copy[labels_copy == lbl] = lbl_idx # Calcualte loss only over the heads appear in the batch: return criterion(outputs[:, unq_lbls], labels_copy)
5f74a1b73b6903816ff12f1e63f403754ed4810e
654,358
def reshape_signal_batch(signal): """Convert the signal into a standard batch shape for use with cochleagram.py functions. The first dimension is the batch dimension. Args: signal (array): The sound signal (waveform) in the time domain. Should be either a flattened array with shape (n_samples,), a row vector with shape (1, n_samples), a column vector with shape (n_samples, 1), or a 2D matrix of the form [batch, waveform]. Returns: array: **out_signal**: If the input `signal` has a valid shape, returns a 2D version of the signal with the first dimension as the batch dimension. Raises: ValueError: Raises an error of the input `signal` has invalid shape. """ if signal.ndim == 1: # signal is a flattened array out_signal = signal.reshape((1, -1)) elif signal.ndim == 2: # signal is a row or column vector if signal.shape[0] == 1: out_signal = signal elif signal.shape[1] == 1: out_signal = signal.reshape((1, -1)) else: # first dim is batch dim out_signal = signal else: raise ValueError('signal should be flat array, row or column vector, or a 2D matrix with dimensions [batch, waveform]; found %s' % signal.ndim) return out_signal
344ce1a9a695e99fa470a5d849afb40bc381c9df
706,145
def remove_chars(string: str) -> str: """Remove all characters but strings. Args: string: <str> input string sequence. Returns: string: <str> sorted only letters string. Examples: >>> assert remove_chars('.tree1') == 'tree' """ return ''.join( filter(lambda letter: letter.isalpha() or letter.isspace(), string) )
79baf3839c3c355052ac68bbd2bd793318023d58
608,172
import re def _get_custom_display_values(df, translated_style): """Parses pandas.Styler style dictionary into a {(row, col): display_value} dictionary for cells whose display format has been customized. """ # Create {(row, col): display_value} from translated_style['body'] # translated_style['body'] has the shape: # [ # [ // row # { // cell or header # 'id': 'level0_row0' (for row header) | 'row0_col0' (for cells) # 'value': 1.329212 # 'display_value': '132.92%' # ... # } # ] # ] default_formatter = df.style._display_funcs[(0, 0)] def has_custom_display_value(cell): value = str(cell["value"]) display_value = str(cell["display_value"]) if value == display_value: return False # Pandas applies a default style to all float values, regardless # of whether they have a user-specified display format. We test # for that here. return default_formatter(value) != display_value cell_selector_regex = re.compile(r"row(\d+)_col(\d+)") header_selector_regex = re.compile(r"level(\d+)_row(\d+)") display_values = {} for row in translated_style["body"]: # row is a List[Dict], containing format data for each cell in the row, # plus an extra first entry for the row header, which we skip found_row_header = False for cell in row: cell_id = cell["id"] # a string in the form 'row0_col0' if header_selector_regex.match(cell_id): if not found_row_header: # We don't care about processing row headers, but as # a sanity check, ensure we only see one per row found_row_header = True continue else: raise RuntimeError('Found unexpected row header "%s"' % cell) match = cell_selector_regex.match(cell_id) if not match: raise RuntimeError('Failed to parse cell selector "%s"' % cell_id) # Only store display values that differ from the cell's default if has_custom_display_value(cell): row = int(match.group(1)) col = int(match.group(2)) display_values[(row, col)] = str(cell["display_value"]) return display_values
9a08d6ecd9d79e3c85ef8ae6053366388bcd43cb
573,779
def decode(data): """ Normalize a "compressed" dictionary with special 'map' entry. This format looks like a way to reduce bandwidth by avoiding repeated key strings. Maybe it's a JSON standard with a built-in method to decode it? But since I'm REST illiterate, we decode it manually! For example, the following data object: data = { "244526" : [ "Starter Songs", [ 134082068, 134082066, 134082069, 134082067 ], "1234-1234-1234-1234", false, null, null, null, null, 1 ], "map" : { "artwork_id" : 7, "description" : 6, "name" : 0, "public_id" : 4, "sort" : 8, "system_created" : 3, "tracks" : 1, "type" : 5, "uid" : 2 } } will be decoded to: data = { "244526" : { "name": "Starter Songs", "tracks": [ 134082068, 134082066, 134082069, 134082067 ], "uid": "1234-1234-1234-1234", "system_created": false, "public_id": null, "type": null, "description": null, "artwork_id": null, "sort": 1 } } """ if not 'map' in data or type(data['map']) is not dict: return data keymap = {v: k for (k, v) in data['map'].items()} result = {} for k, v in data.items(): if type(v) is list: result[k] = {keymap[i]: v[i] for i in range(len(v))} return result
c952e08e300dbf0e9574a1076e3342ce0c3939f6
685,242
def _start_of_option(value: str) -> bool: """Check if the value looks like the start of an option.""" if not value: return False c = value[0] # Allow "/" since that starts a path. return not c.isalnum() and c != "/"
7ade3d78f7778e522c02e992d6f6cf9efca7eaf4
463,334
import six def to_unicode(value): """ Return the input value as a unicode string. The input value may be and will result in: * None -> None * binary string -> decoded using UTF-8 to unicode string * unicode string -> unchanged * list or tuple with items of any of the above -> list with converted items """ if isinstance(value, (list, tuple)): list_uval = [] for val in value: uval = to_unicode(val) list_uval.append(uval) return list_uval elif isinstance(value, six.binary_type): return value.decode('utf-8') elif isinstance(value, six.text_type): return value elif value is None: return None else: raise TypeError("Value of {0} cannot be converted to unicode: {1!r}". format(type(value), value))
2b924a7d187c81d76929110b085988ffb175715e
611,337
def IoU(rect1, rect2): """ Calculates IoU of two rectangles. Assumes rectanles are in ltrb (left, right, top, bottom) format. ltrb is also known as x1y1x2y2 format, whch is two corners """ intersection = max( min(rect1[2], rect2[2]) - max(rect1[0], rect2[0]), 0 ) * \ max( min(rect1[3], rect2[3]) - max(rect1[1], rect2[1]), 0 ) # A1 + A2 - I union = (rect1[2] - rect1[0]) * (rect1[3] - rect1[1]) + \ (rect2[2] - rect2[0]) * (rect2[3] - rect2[1]) - \ intersection return float(intersection) / max(union, .00001)
5b2ece3afb5fa22a3c42ae108467770c0c880e70
464,602
def common(n): """ Returns true if the node is in the common directory. """ filename = n.filename.replace("\\", "/") if filename.startswith("common/") or filename.startswith("renpy/common/"): return True else: return False
a14b680b6a016b5d13385df07071091127d7b72e
626,240
import torch def get_cam_mat(width, height, focal_length): """ Get intrinsic camera matrix (tensor) """ cam_mat = torch.eye(3) cam_mat[0, 0] = focal_length cam_mat[1, 1] = focal_length cam_mat[0, 2] = width / 2 cam_mat[1, 2] = height / 2 cam_mat = cam_mat.cuda() return cam_mat
e5580df1e3a6cfd68e1af757bae65d833de2faf4
226,767
def orm_query_keys(query): """Given a SQLAlchemy ORM query, extract the list of column keys expected in the result.""" return [c["name"] for c in query.column_descriptions]
cf9dbe457d369e6da3f83c4cdf74595ad8dcbc83
678,954
import json def parse_line(header, line): """Parse one line of data from the message file. Each line is expected to contain chunk key - comma - tile key (CSV style). Args: header (dict): Data to join with contents of line to construct a full message. line (string): Contents of the line. Returns: (string): JSON encoded data ready for enqueuing. Raises: (RuntimeError): if less than 2 columns found on a line. """ msg = {} msg['job_id'] = header['job_id'] msg['upload_queue_arn'] = header['upload_queue_url'] msg['ingest_queue_arn'] = header['ingest_queue_url'] tokens = line.split(',') if len(tokens) < 2: raise RuntimeError('Bad message line encountered.') msg['chunk_key'] = tokens[0].strip() msg['tile_key'] = tokens[1].strip() return json.dumps(msg)
452dd80f84a35f6e3532330155bade7f424c102a
9,179
import copy def merge_to_panoptic(detection_dicts, sem_seg_dicts): """ Create dataset dicts for panoptic segmentation, by merging two dicts using "file_name" field to match their entries. Args: detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation. sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation. Returns: list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in both detection_dicts and sem_seg_dicts that correspond to the same image. The function assumes that the same key in different dicts has the same value. """ results = [] sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts} assert len(sem_seg_file_to_entry) > 0 for det_dict in detection_dicts: dic = copy.copy(det_dict) dic.update(sem_seg_file_to_entry[dic["file_name"]]) results.append(dic) return results
e28916b97bf1955f06a6e24208dd6d47422e23c4
682,440
from typing import List import re def find_url(url_raw: List[str]) -> List[str]: """Function to find all URL matching a specific regex in a list of URL. Here, this function will fetch all URL beginning with: "https://geodatamine.fr/dump/" and continuing with "t3xt-4nd-numb3rs.text". This allows to fetch the direct URL of the files to download. Args: url_raw (str): The list of URL from which to find a corresponding URL. Returns: List[str]: The list of URL corresponding to the given format. """ regex = re.compile(r"^https:\/\/geodatamine\.fr\/dump\/[\w-]+geojson\.[A-Za-z]+$") url = [str(link) for link in url_raw if regex.match(str(link))] return url
2f38f161ccb11c5cfde08dd11a3d3c08ae84c516
74,588
def generateQueryRecommendations(dataFrame, dimensionAttributes, measureAttributes): """Function that returns a list of query recommendations to the user. NOTE: This function assumes the underlying dataframe is called 'dataBase' Args: dataFrame: a pandas dataframe dimensionAttributes: list of dimension attributes measureAttributes: list of measure attributes Return: A list of strings, each representing a different query on the dataDase """ generatedQueryRecommendations = [] for dim in dimensionAttributes: dimValues = dataFrame[dim].unique() for value in dimValues: value = value.replace("'", "''") query = '''SELECT * FROM dataBase WHERE [''' + dim + "] = " + "'" + value + "'" generatedQueryRecommendations.append(query) return generatedQueryRecommendations
4b6e53caa57c718bfbddddd090fa085a60df88d6
454,139
def _need_exponent_sign_bit_check(max_value): """Checks whether the sign bit of exponent is needed. This is used by quantized_po2 and quantized_relu_po2. Args: max_value: the maximum value allowed. Returns: An integer. 1: sign_bit is needed. 0: sign_bit is not needed. """ if max_value is not None: if max_value < 0: raise ValueError("po2 max_value should be non-negative.") if max_value > 1: # if max_value is larger than 1, # the exponent could be positive and negative. # e.g., log(max_value) > 0 when max_value > 1 need_exponent_sign_bit = 1 else: need_exponent_sign_bit = 0 else: # max_value is not specified, so we cannot decide the range. # Then we need to put sign_bit for exponent to be safe need_exponent_sign_bit = 1 return need_exponent_sign_bit
a4dc3bccc8a7e8a7f4f7af2784eccd4ebb74f393
288,627
def merge_dicts(*my_dicts): """Combines a bunch of dictionaries together, later dictionaries taking precedence if there is a key conflict.""" return dict((k, v) for d in my_dicts for (k, v) in d.items())
77ecdcef57866b1307cbaabdfa578971be70b811
607,656
import json def build_event(body_data, query_params=dict(), path_params=dict()): """ Builds AWS Lambda event using the provided data Args: body_data (dict): data loaded to the body of event query_params (dict): data loaded to query params of event path_params (dict): data loaded to path params of event Returns: event (dict): AWS Lambda event """ body = json.dumps(body_data) if body_data else None event = { 'body': body, 'queryStringParameters': query_params, 'pathParameters': path_params } return event
ca9353d79f630ec9e58baa6942c2e319c1e7fa60
238,279
def quote_normalization(question): """ Normalize all usage of quotation marks into a separate \" """ new_question, quotation_marks = [], ['“', '”', '``', "''", "‘‘", "’’","'", '"', '`', '‘', '’'] for idx, tok in enumerate(question): for mark in quotation_marks: tok = tok.replace(mark, "\"") new_question.append(tok) return new_question
da5b62e821fc203868fb1c5d76460de0904f9efe
180,507
def __points_to_dict(points): """transform list of [x, y] into a dict() where {x: y}""" return {p[0]: p[1] for p in points}
83c7445e1c4c2c034f609f19da0d9638b3d9dbf3
633,087
def confusion_matrix(classify=lambda document: False, documents=[(None, False)]): """ Returns the performance of a binary classification task (i.e., predicts True or False) as a tuple of (TP, TN, FP, FN): - TP: true positives = correct hits, - TN: true negatives = correct rejections, - FP: false positives = false alarm (= type I error), - FN: false negatives = misses (= type II error). The given classify() function returns True or False for a document. The list of documents contains (document, bool)-tuples for testing, where True means a document that should be identified as True by classify(). """ TN = TP = FN = FP = 0 for document, b1 in documents: b2 = classify(document) if b1 and b2: TP += 1 # true positive elif not b1 and not b2: TN += 1 # true negative elif not b1 and b2: FP += 1 # false positive (type I error) elif b1 and not b2: FN += 1 # false negative (type II error) return TP, TN, FP, FN
6521e24c3283fff6409609f9ace760b5d318c8f3
661,263
def _CreateClassfileArgs(class_files, exclude_suffix=None): """Returns a list of files that don't have a given suffix. Args: class_files: A list of class files. exclude_suffix: Suffix to look for to exclude. Returns: A list of files that don't use the suffix. """ result_class_files = [] for f in class_files: if exclude_suffix: if not f.endswith(exclude_suffix): result_class_files += ['--classfiles', f] else: result_class_files += ['--classfiles', f] return result_class_files
d64005842a3af07909c50b961ef18f8646d05231
428,587
def to_argb_int(diffuse_colour) -> int: """Converts an RGBA array to an ARGB integer""" diffuse_colour = diffuse_colour[-1:] + diffuse_colour[:3] diffuse_colour = [int(val * 255) for val in diffuse_colour] return int.from_bytes(diffuse_colour, byteorder="big", signed=True)
0a2d5e30c181d191c3ead3bd4ffc1c007163cc92
187,195
def getminmax(coords): """ Given an iterable of (x, y), return minx, miny, maxx, maxy. """ i = iter(coords) try: x, y = i.next() minx = maxx = x miny = maxy = y except StopIteration: return None, None, None, None for x, y in i: if x < minx: minx = x if x > maxx: maxx = x if y < miny: miny = y if y > maxy: maxy = y return minx, miny, maxx, maxy
451b9041612d1f5ad4de41b335d7fb15d8fee386
581,330
import struct def int2vector_pack(seq, pack = struct.pack): """ Given a sequence of integers, pack them into the serialized form. An int2vector is a type used by the PostgreSQL catalog. """ return pack("!%dh"%(len(seq),), *seq).ljust(64, '\x00')
41e2b494da1cf44effdbe6c02cb870804b724695
376,417
def build_catalog_url(account: str, image_digest: str) -> str: """ Returns the URL as a string that the policy engine will use to fetch the loaded analysis result from the catalog :param account: :param image_digest: :return: """ return "catalog://{}/analysis_data/{}".format(account, image_digest)
cdcdd003e05c5d51ddcd7e05a6279897a226fd59
685,519
def comp_cols(comps): """Return columns corresponding to the average composition :comps: str :returns: list str """ return ["<comp({})>".format(c) for c in comps]
ab8afc1c69d4e26f1febbd8791fd3825d002a14d
606,474
import logging def timestamp_formatter() -> logging.Formatter: """Returns a logging formatter which outputs in the style of ``YEAR-MONTH-DAY HOUR:MINUTE:SECOND.MILLISECOND LEVEL MESSAGE``. """ return logging.Formatter( fmt="%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S", )
05e1d5878e669a0ae4bd8c5d6aef875967c9b206
531,877
import codecs def read_file(filename): """ Read a utf8 encoded text file and return its contents. """ with codecs.open(filename, 'r', 'utf8') as f: return f.read()
0e4b7da7e056c5603146ea2feebff1d9c6f61cd3
455,464
import ast def validate_python_file(filename): """ Determine if a Python file is valid Args: filename (:obj:`str`): path to Python file Returns: :obj:`tuple`: * nested :obj:`list` of :obj:`str`: nested list of errors * nested :obj:`list` of :obj:`str`: nested list of warnings """ try: with open(filename, 'rb') as file: ast.parse(file.read()) return [], [] except ValueError as exception: return [[str(exception)]], []
daad336486d39c40fa8ad525a40b04d2b042358e
332,949
def add_suffix(fname, suffix): """Adds a suffix to a file name.""" name, extension = fname.split(".") return name + "_" + suffix + "." + extension
53e8772bd5b974635010974d6373fbf5816ae520
18,709
import re def replace_vars(s, values): """Replace all occurrences of variables in the given string with values""" retval = s for v_key, v_value in values.iteritems(): replace = re.compile(re.escape('{{' + v_key + '}}'), re.IGNORECASE) retval = re.sub(replace, v_value, retval) return retval
2dfef62d1db2d5c84f10d610893551c2f1b97a3d
400,058
def font_color( label, f_color): """ Changes font color in Message object attached to GUI :param label: font used by Message in Tkinter GUI :param f_color: font color the user selects :return: label with new font color """ label.config(fg=f_color) return label
40bd7af9ba5b558f11c3691901dca804916fb07c
55,679
def names_from_results(response): """Returns card names from results as a list""" return [x["name"] for x in response.json()["results"]]
c879b2cdb8f78150e50be3e115a5103992e93b79
47,450
def get_repository_name(repository) -> str: """Return the name of the repository for use in the frontend.""" name = None if repository.repository_manifest.name: name = repository.repository_manifest.name else: name = repository.data.full_name.split("/")[-1] name = name.replace("-", " ").replace("_", " ").strip() if name.isupper(): return name return name.title()
402713e7d433442e26f49d01d8dd13bd5a095514
114,620
def max_cw(l): """Return max value of a list.""" a = sorted(l) return a[-1]
21b079b5c3dd4cb7aba55588d38ba57a058bbb97
682,539
def doubleNear(a, b, tol=0.0000000001): """ Tests whether two floats are near, within a specified tolerance """ return abs(float(a) - float(b)) < tol
7ab16ab19874e3e993eb86629699e5f4cd1b0cfd
404,020
import json def load_data(filename): """Load source file.""" with open(filename, "r") as myfile: text = myfile.read() data = json.loads(text) return data
82271322f67ed1bedb4dd43fa6c06f99cf3594ea
307,782
def codonify(seq): """ input: a nucleotide sequence (not necessarily a string) output: a list of codons """ seq = str(seq) l = len(seq) return [seq[i : i + 3] for i in range(0, l, 3)]
64852cd1830b29ea53359a9b103bc5a7a135b730
307,533
def listar_aeropuertos_sin_salida(vuelos: dict) -> list: """ Aeropuertos sin salida Parámetros: vuelos (dict): Es un diccionario de diccionarios con la información de los vuelos. Retorno: list: Una lista de cadenas de caracteres que tiene los códigos de los aeropuertos de los cuales no salieron vuelos. """ aerosinvuelo = [] destinos = [] origenes = [] # vuelos = {'codigovuelo':{'aerolinea': nombre, 'retraso': minutos}} for vuelo in vuelos: origen = vuelos[vuelo]['origen'] destino = vuelos[vuelo]['destino'] origenes.append(origen) destinos.append(destino) for destino in destinos: if destino not in origenes and destino not in aerosinvuelo: aerosinvuelo.append(destino) return aerosinvuelo
9424cb245e4db61f44088d11a43e9e37c27c4ea0
461,983