content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import torch def seq_mask_by_lens(lengths:torch.Tensor, maxlen=None, dtype=torch.bool): """ giving sequence lengths, return mask of the sequence. example: input: lengths = torch.tensor([4, 5, 1, 3]) output: tensor([[ True, True, True, True, False], [ True, True, True, True, True], [ True, False, False, False, False], [ True, True, True, False, False]]) """ if maxlen is None: maxlen = lengths.max() row_vector = torch.arange(start=0, end=maxlen.item(), step=1) matrix = torch.unsqueeze(lengths, dim=-1) mask = row_vector < matrix mask.type(dtype) return mask
531ec65780f53bcefa7a89acdad82a9c23672904
326,189
def parse_info_field(field): """ Parses a vcf info field in to a dictionary Args: field (str): info field from vcf Returns: Dict: {name:[val1, val2]} """ d = {} for entry in field.split(";"): try: key, value = entry.split("=") d[key] = value.split(",") except ValueError: d[entry] = [] return d
9d3f61f54b63a1c390b8f076a85d49167e71bb47
396,302
def inc_dec_ing(x, y): """ ์žฅ ๋งˆ๊ฐ์ผ ๋•Œ ๋ณ€ํ™”๋Ÿ‰์— ๋”ฐ๋ผ ๋‹ฌ๋ผ์ง€๋Š” ํ•จ์ˆ˜ params : x: ์ง์ „๊ฑฐ๋ž˜์ผ ๋Œ€๋น„ ๋ณ€ํ™”๋Ÿ‰ (won) y : ์ง์ „๊ฑฐ๋ž˜์ผ ๋Œ€๋น„ ๋ณ€ํ™”๋Ÿ‰ (%) return : result : 1) ์ƒ์Šน์ผ ๊ฒฝ์šฐ : ๋ณ€ํ™”๋Ÿ‰ ์› (๋ณ€ํ™”๋Ÿ‰%) ์ƒ์Šนํ•˜๋ฉฐ 2) ํ•˜๋ฝ์ผ ๊ฒฝ์šฐ : ๋ณ€ํ™”๋Ÿ‰ ์›(๋ณ€ํ™”๋Ÿ‰%) ํ•˜๋ฝํ•˜๋ฉฐ 3) ๋ณ€๋™ ์—†์Œ : ๋ณ€๋™์ด ์—†์œผ๋ฉฐ """ result = '' if float(x) > 0 : result = x + '์›' + '(' + y + '%) ์ƒ์Šนํ•˜๋ฉฐ' elif float(x) < 0: result = x + '์›' + '(' + y + '%) ํ•˜๋ฝํ•˜๋ฉฐ' elif float(x) == 0: result = '๋ณ€๋™์ด ์—†์œผ๋ฉฐ' return result
17799e738c03c3f811a01db02d7264f87f9224de
445,426
def _idempotent_append(element, data): """Append to a list if that element is not already in the list. :param element: The element to add to the list. :param data: `List` the list to add to. :returns: `List` the list with the element in it. """ if element not in data: data.append(element) return data
50e1ce9aff417b04022dd46020ec4ece247d78f9
315,537
import time def neg(value): """Return a negative copy of the value """ time.sleep(2.0) return -value
717635b890262657b9c95dfa298d0824bc0ba6ea
591,375
def rad2rad(ang=1): """Dummy converter.""" return ang
334bcc173d6f5e4cf2f5a9ac8c87ffdaa024f4b3
323,853
from typing import Dict def reorder_components(content: Dict) -> Dict: """Sort components properties in required order. Arguments --------- content OpenAPI content to be cleaned up. Returns ------- Sorted OpenAPI content's components. """ key_section = 'components' if key_section in content and isinstance(content[key_section], dict): target = {} for comp_prop in sorted(content[key_section].keys()): target.setdefault(comp_prop, {}) # Sort components' subproperties for key in sorted(content[key_section][comp_prop].keys()): target[comp_prop].setdefault( key, content[key_section][comp_prop][key]) content[key_section] = target return content
2735c939fcfb05e1eb21da939cd914fc58413c65
264,787
def clone_model(model, **new_values): """Clones the entity, adding or overriding constructor attributes. The cloned entity will have exactly the same property values as the original entity, except where overridden. By default, it will have no parent entity or key name, unless supplied. Args: model: datastore_services.Model. Model to clone. **new_values: dict(str: *). Keyword arguments to override when invoking the cloned entity's constructor. Returns: datastore_services.Model. A cloned, and possibly modified, copy of self. Subclasses of BaseModel will return a clone with the same type. """ # Reference implementation: https://stackoverflow.com/a/2712401/4859885. cls = model.__class__ model_id = new_values.pop('id', model.id) props = {k: v.__get__(model, cls) for k, v in cls._properties.items()} # pylint: disable=protected-access props.update(new_values) return cls(id=model_id, **props)
ed668632c8917ad685b86fb5c71146be7c9b3b96
709,408
def mutateScript(context, script, mutator): """Apply `mutator` function to every command in the `script` array of strings. The mutator function is called with `context` and the string to be mutated and must return the modified string. Sets `context.tmpBase` to a path unique to every command.""" previous_tmpbase = context.tmpBase i = 0 mutated_script = [] for line in script: number = "" if len(script) > 1: number = "-%s" % (i,) i += 1 context.tmpBase = previous_tmpbase + number mutated_line = mutator(context, line) mutated_script.append(mutated_line) return mutated_script
e6aa1d1c021505f67e5025b6100bed43bd03d44c
39,697
import curses def create_vline_window(y_start, x_start, height): """Create a window with a single vline and return prepared window.""" line = curses.newwin( height+1, 1, y_start, x_start, ) line.vline(0,0,"#", height) line.noutrefresh() return line
1d364a1239c336e25aee412c7426508366ca313e
314,962
def count_chars(text: str, include_spaces=False) -> int: """ Count number of characters in a text. Arguments: --------- text: str Text whose words are to be counted. include_spaces: bool, default=False Count spaces as characters. Returns: ------- Number of Characters: int Length of words in text. """ if not isinstance(text, str): raise TypeError("Text must be in string format not {}".format(type(text))) if not include_spaces: text = text.replace(" ", "") # replace space with no space return len(text)
bc29985c34eec5c3d79f5d1e1c6da0208ddc82e3
301,015
from datetime import datetime def get_today_date(the_date=datetime.now()): """Return today's date (no time) as string.""" return the_date.strftime("%Y-%m-%d")
e506d2735e66c0bfaa4fd5572147c0669a935190
614,108
import math def is_prime(i: int) -> bool: """Tests if number is prime or not""" if i <= 1: return False _max = int(math.floor(math.sqrt(i))) for j in range(2, _max+1): if not i % j: return False return True
477aeb6bbbf46377aabffe2da32a17bee2dd6f9d
470,689
from datetime import datetime def convert_timestamp(ts): """ Convert Windows SYSTEMTIME structure to a datatime object """ return datetime(ts.wYear, ts.wMonth, ts.wDay, ts.wHour, ts.wMinute, ts.wSecond, ts.wMilliseconds * 1000)
d0d20900e2b83ebdbd707ea572482c3d0d804e02
455,627
def load_users(ids_file): """ Load all user IDs into a list. Note: The input file must be a plain text file where each line contains only a single unique Twitter user ID. Parameters: ----------- - ids_file (str) : the full path to the Twitter user IDs file that you'd like to load Returns: ---------- - users (list) : a list of Twitter user IDs """ with open(ids_file, 'r') as f: users = [x.strip('\n') for x in f.readlines()] return users
9d1e5b3281a0d1633228887034137bdbe0aec74c
136,795
def selection_sort(arr): """ Selection Sort Complexity: O(n^2) """ for i in range(len(arr)): minimum = i for j in range(i+1, len(arr)): # "Select" the correct value if arr[j] < arr[minimum]: minimum = j # Using a pythonic swap arr[minimum], arr[i] = arr[i], arr[minimum] return arr
d1fa69696cdcedf18c8864490a23bfc82b3f3464
273,503
def map_from_to(x, a, b, c, d): """ Maps a value x from a-b to c-d. """ return (x - a) / (b - a) * (d - c) + c
6ea6c68c09c0cbd76d6443287229165c1918fd3d
191,138
from typing import OrderedDict def dict_to_ordered_dict(dict_in, key, reverse=True): """ Takes in a dict and returns an OrderedDict object sorted by a given key (in the form of a lambda expression). :param dict dict_in: a dictionary to be turned into an ordered dictionary. :param key: lambda expression. :param bool reverse: whether to sort the dict in reverse. :return: the dictionary input, ordered by the given key. :rtype: collections.OrderedDict """ return OrderedDict(sorted(dict_in.items(), key=key, reverse=reverse))
10b94e1338f5bfa45f3f41eba0a71b411299e35b
405,159
def _count_set_bits(i): """ Counts the number of set bits in a uint (or a numpy array of uints). """ i = i - ((i >> 1) & 0x55555555) i = (i & 0x33333333) + ((i >> 2) & 0x33333333) return (((i + (i >> 4) & 0xF0F0F0F) * 0x1010101) & 0xffffffff) >> 24
f3f4b2ac777f3a7eadcd6c92306b143a57942035
314,239
def convertAtom(oldAF, newAF, atom): """Convert an atom from one AtomFactory to another. @param oldAF : The old AtomFactory to which atom belongs @param newAF : The new AtomFactory @param atom : The atom to convert @return : The converted atom @raise Exception : If atom cannot be found in oldAF """ o = oldAF.get_object(atom) if o not in newAF: raise Exception, "%r not in newAF" % o return newAF[o]
47b33e7bf9d3e9bc1728d16d639be27004f517db
444,500
import base64 import json def encode_additional_data(additional_data): """Encodes additional_data object""" if isinstance(additional_data, dict): return base64.b64encode(json.dumps(additional_data).encode()).decode('utf-8') raise ValueError("additional_data must be of type dict")
a053efb12b2e8be5f1ebc01293ee925d6217c50e
126,552
def check_for_non_alpha_character(word): """Check if the word starts with alphabetic character""" if not word[0].isalpha(): raise ValueError return word
88babc047f59a69bc08e4f5b2603e05572768065
428,519
import difflib def _diff(a, b): """ diff of strings; returns a generator, 3 lines of context by default, - or + for changes """ return difflib.unified_diff(a, b)
14d2821e214ed9b1e3203ecdfe3ded7b8e9018d7
89,022
def _get_bin_sum(bin_result): """ Get the [min, max] of the sample; best-fit scatter and error. """ min_val = bin_result['samples'].min() max_val = bin_result['samples'].max() sig = bin_result['sig_med_bt'] err = bin_result['sig_err_bt'] return min_val, max_val, sig, err
7f7d7a9705fb0e4da80689e1c98cb159e90e3b32
650,108
def ordenar_alinhamento(elemento_frasico, alinhamento): """ Ordena os pares alinhados conforme as frases originais. :param elemento_frรกsico: lista de tuplos com as informaรงรตes (palavra/gesto, lema, classe gramatical) do elemento frรกsico :param alinhamento: dicionรกrio com as palavras/gestos alinhados e as suas classes gramaticais :return: Lista com as palavra/gestos alinhados ordenados conforme a sua ordem na frase original. """ alinhamento_ordenado = [] for t in elemento_frasico: for k, v in alinhamento.items(): if k == t[1]: alinhamento_ordenado.append(v) return alinhamento_ordenado
6994fdc7576d2e1820f6edea25046c03f1589eaf
690,493
def mock_get_default_site(self): """Simulates actual metadata for the Default site. The `contentUrl` is always present and its value is an empty string. """ return [{'contentUrl': ''}]
694144f9940215bceba60e45fb1786706d380964
222,471
import math def stats(data): """ Computes basic distribution statistics from the list *data*. Returns a tuple ``(m, V, sd, se)`` where *m* is the mean, *V* the unbiased variance, *sd* the unbiased standard deviation and *se* the unbiased standard error. .. note:: Returns a tuple of zeros if data is empty. """ n= len(data) if not n: return (0., 0., 0., 0.) SUM=0. SUM2=0. for i in data: SUM+=i SUM2+=i**2 m= SUM/n v= SUM2/n - m**2 sd= math.sqrt(math.fabs(v)) se= sd/math.sqrt(n) return (m,v,sd,se)
8e9f6c11c17bbc3dad96f5ea2d2597de2bbd8701
310,027
def import_backend(path): """Import a backend class by dotted path.""" module_name, class_name = path.rsplit(".", 1) module = __import__(module_name, fromlist=[class_name]) return getattr(module, class_name)
13dc496dab343d75b97a40adeda4e378f0627885
141,359
from datetime import datetime def get_eso_file_timestamp(timestamp): """Return date and time of the eso file generation as a Datetime.""" timestamp = timestamp.split("=")[1].strip() return datetime.strptime(timestamp, "%Y.%m.%d %H:%M")
22049ef27409fc9d9b4a0d3b91fb4e62c75a2558
487,593
def split(str, num): """Split strings every 'num' characters.""" return [str[start : start + num] for start in range(0, len(str), num)]
2bfeed1f5dcc783aef33351895440c8d032bfaeb
446,052
def pad_chunk_columns(chunk): """Given a set of items to be inserted, make sure they all have the same columns by padding columns with None if they are missing.""" columns = set() for record in chunk: columns.update(record.keys()) for record in chunk: for column in columns: record.setdefault(column, None) return chunk
2e5d91ad03ad613b55bcaea97fd8c0785eec977f
7,756
from bs4 import BeautifulSoup def strip_html(value): """Strip HTML from a string.""" if value is None: return None return BeautifulSoup(value, "html.parser").text
100bbfdc82550bd367cd66a6cf2429e2bffd2dd8
320,460
def getProcessRequestRedirect(entity, _): """Returns the redirect for processing the specified request entity. """ result = '/%s/process_request/%s/%s' % ( entity.role, entity.scope_path, entity.link_id) return result
f90d101c230a4c3ea44e78ca922b011ba4f7589f
474,544
def lda_topic_top_words(lda_mod, n_top_words=6): """ Extract the top n words for each K topic, and convert results in a dictionary: - keys are the K topics - 2 values: first value is the list of top words, second value is the list of corresponding word probabilities Input parameter: ---------------- lda mod : gensim lda model n_top_words : number of top words per topic to be exracted """ # extract n top word for each topic and their probabilties, and save them as a dictionary mydict = {} for t in range(lda_mod.num_topics): ws = [] w_probs = [] for n in range(n_top_words): w, w_prob = lda_mod.show_topic(t, n_top_words)[n] ws.append(w) w_probs.append(w_prob) mydict[t] = ws, w_probs return mydict
7c20da76dc2ea01bdbba9fed6355035977cb0e2e
295,684
def find_multiple_measurement(column_name_list: list, spec_dict: dict, delimiter: str = '!!') -> list: """Check if any column is getting multiple measurement associated. Args: column_name_list: List of all columns after dropping ignored columns. spec_dict: Dict obj containing configurations for the import. delimiter: delimiter seperating tokens within single column name string. Returns: List of columns that have multiple measurement associated with it. """ ret_list = [] # tokenList = getTokensListFromColumnList(columnNameList, delimiter) for column_name in column_name_list: if 'measurement' in spec_dict: temp_flag = False for token in column_name.split(delimiter): if token in spec_dict['measurement']: if temp_flag: ret_list.append(column_name) temp_flag = True return ret_list
9944221f80910f3c9fc504ae7e53372c33f4ddc0
217,946
import hashlib def compute_file_hash(file_path, alg='md5'): """ Computes the file's Hash based on specified algorithms and its given path Supported algorithms:: * md5 :param file_path: the file path from which to get hash :type file_path: str :param alg: used algorithm (MD5, SHA{1, 2, ...}, CRC, ...) :type alg: str :return: the file's hash according desired algorithm :rtype: str """ if alg == 'md5': md5_obj = hashlib.md5() block_size = 65536 # read chunk by chunk for big file with open(file_path, 'r+b') as f: for block in iter(lambda: f.read(block_size), ""): md5_obj.update(block) local_md5 = md5_obj.hexdigest() file_hash = local_md5 else: raise NotImplementedError("ALGORITHM {0} NOT IMPLEMENTED!".format(alg)) return file_hash
b7e1b06272f9560f6b818eef1903c97fe1f8d6cb
281,952
def mask_list(mask): """Return the list of set bits in a mask as integers.""" set_bits = [] for position, bit in enumerate(reversed(bin(mask)[2:])): if bit == "1": set_bits.append(int("1{}".format("0" * position), base=2)) return set_bits
bf856acf732e06eed6283ba0fc60c234a48dfe52
232,836
def pids2sql(pids): """ Convert list of pids to SQL sequence expression :param pids: list of person_id :return: str representation of SQL expression """ str_pids = map(str, pids) return ', '.join(str_pids)
e7c3b6c16ca9a85498f90caed2fff6e57eabe3f8
397,198
import json def convert_json_to_dict(filename): """ Convert json file to python dictionary """ with open(filename, 'r') as JSON: json_dict = json.load(JSON) return json_dict
11f8ea6d4999a204b9d239afde1aa7538d262208
628,640
def uri_parser(uri): """ Split S3 URI into bucket, key, filename """ if uri[0:5] != 's3://': raise Exception('Invalid S3 uri %s' % uri) uri_obj = uri.replace('s3://', '').split('/') # remove empty items uri_obj = list(filter(lambda x: x, uri_obj)) return { 'bucket': uri_obj[0], 'key': '/'.join(uri_obj[1:]), 'filename': uri_obj[-1] }
7a922d6ff30c45b5757dbd41dbad9d48b3cf8cc0
494,046
import re def skip_invalid_files(filename): """Ignore certain files during upload (such as dot files, .DS_Store, .git, etc)""" if filename == '.bidsignore': return False else: return re.match(r'^\.|.*\/\.|.*Icon\r', filename)
8d6dd4575fa5f39e0c1af84a08055a510a530486
476,366
import datetime def time_diff (dt_abs, dt_stamp) : """ return the time difference bewteen two datetime objects in seconds (incl. fractions). Exceptions (like on improper data types) fall through. """ delta = dt_stamp - dt_abs if not isinstance (delta, datetime.timedelta) : raise TypeError ("difference between '%s' and '%s' is not a .timedelta" \ % (type(dt_abs), type(dt_stamp))) # get seconds as float seconds = delta.seconds + delta.microseconds/1E6 return seconds
cecfbc25fbe3f3ca2ea58651a2c596617a20f886
367,346
import re def is_cusip(value): """Checks whether a string is a valid CUSIP identifier. Regex from here: https://regex101.com/r/vN3tE5/1 :param value: A string to evaluate. :returns: True if string is in the form of a valid CUSIP number.""" return re.match(r'^([\w\d]{6})([\w\d]{2})([\w\d]{1})$', value)
92825395ead76a769bc938f4843e8ca50d9385f8
547,628
def _parse_instructors(details): """ Extract instructor names from the course detail page Args: details(Tag): BeautifulSoup Tag for course details Returns: list of dict: List of first & last names of each instructor """ try: instructors = details.findAll( "div", {"class": "field--name-field-learn-more-links"} )[-1].findAll("div", {"class": "field__item"}) return [ instructor.get_text().strip().split(",", 1)[0].split(" ", 1) for instructor in instructors ] except (AttributeError, IndexError): return []
3dc4f4ddf62dc9dae894d218e249a279941eca5f
13,060
def encrypt(plaintext, cipher, shift): """ Caesar encryption of a plaintext using a shifted cipher. You can specify a positiv shift to rotate the cipher to the right and using a negative shift to rotate the cipher to the left. :param plaintext: the text to encrypt. :param cipher: set of characters, shifted in a directed to used for character substitution. :param shift: offset to rotate cipher. :returns: encrypted plaintext (ciphertext). See: https://en.wikipedia.org/wiki/Caesar_cipher Example: >>> encrypt("hello world", "abcdefghijklmnopqrstuvwxyz ", 1) 'gdkknzvnqkc' >>> encrypt("hello world", "abcdefghijklmnopqrstuvwxyz ", -1) 'ifmmpaxpsme' """ # calculating shifted cipher shifted_cipher = cipher if shift > 0: while shift > 0: shifted_cipher = shifted_cipher[-1] + shifted_cipher[0:len(shifted_cipher) - 1] shift -= 1 else: while shift < 0: shifted_cipher = shifted_cipher[1:] + shifted_cipher[0] shift += 1 return "".join([shifted_cipher[cipher.index(character)] for character in plaintext])
ba932f3582745bcd41618337b0e8d6dbc4bf0c2d
126,162
def session_path(info): """Construct a session group path from a dict of values.""" return "/data/rat{rat:02d}/day{day:02d}/{comment}".format(**info)
1b849bc26df70adc29a30435dd992f2364143d85
233,655
def count_words(filepath, words_list): """ Parameters ---------- filepath : str Path to text file words_list : list of str Count the total number of appearance of these words Returns ------- n : int Total number of times the words appears Usage: count_words('../alice.txt', ['cat', 'dog']) """ # Open the text file with open(filepath) as file: text = file.read() n = 0 for word in text.split(): # Count the number of times the words in the list appear if word.lower() in words_list: n += 1 print('Lewis Carroll uses the word "cat" {} times'.format(n)) return n
fa69ca7bcb83b5a3c2db99823c6e1cada1e47c33
524,798
import itertools def flatten(lst): """ Flattens one level of a list. Parameters ---------- lst : list Returns ------- list List flattened by one level. """ return list(itertools.chain.from_iterable(lst))
5d30ca71acabeec57252f1e466dbe250ce6742cd
694,423
def get_route_policy( self, ne_id: str, cached: bool, ) -> dict: """Get route policy configurations from Edge Connect appliance .. list-table:: :header-rows: 1 * - Swagger Section - Method - Endpoint * - routePolicy - GET - GET /routeMaps/{neId}?cached={cached} :param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE`` :type ne_id: str :param cached: ``True`` retrieves last known value to Orchestrator, ``False`` retrieves values directly from Appliance :type cached: bool :return: Returns dictionary with options object and data object. Data objects includes Route map(s), each map includes Route rule(s), each rule has a match part and a set part. The schema of returned is ``{ 'options' : {'activeMap': 'map1'}, 'data': {'map1' : {'prio': {...}, 'self':'map1'}}}`` The ``prio`` object contains rules key value pairs, each key is the priority of a rule, each value is the rule object. :rtype: dict """ return self._get("/routeMaps/{}?cached={}".format(ne_id, cached))
5b6c7654602c198570c710492c311eb9de51a024
664,700
def _get_version() -> str: """Returns the package version. Returns: Version number. """ return '0.1'
049bc5a9f1cea90a70e4a4b4dba3a0a2833227d8
304,460
import torch def min_tensor_value(tensor): """Returns the minimum value in a tensor, as a float (primitive).""" #return float(tensor.min(tensor.Tensor.type(torch.float32))) # old if type(tensor) is int: return tensor return float(torch.min(tensor))
e21365aedfd8281e9a994a4fbcaeb2f163cbc053
342,457
from typing import Iterable from typing import Mapping def make_json_friendly(data): """ Recursively transform data so that the output only contains objects readily json-ifiable. Right now, converts anything iterable (but not a mapping) to a list and anything that's a Mapping to a dict. Returns a completely new copy of data. """ if isinstance(data, Iterable) and not isinstance(data, str): if isinstance(data, Mapping): return {key: make_json_friendly(val) for key, val in data.items()} else: return [make_json_friendly(val) for val in data] else: return data
13d98148faeb7acaa0f2c67e5040704c1aaf8685
163,654
def mat31_mod(b, m): """Compute moduli of a 3x1 matrix. Parameters ---------- b : 'list' ['float'] 3x1 matrix. m : 'float' modulus. Returns ------- res : 'list' ['float'] 3x1 matrix. """ res = [0, 0, 0] for i in range(3): res[i] = int(b[i] - int(b[i] / m) * m) # if negative, add back modulus m if res[i] < 0: res[i] += m return res
f87ee517eef4bdf5aa3b5ef4b4b01bf42b729c49
432,523
import ipaddress def is_valid_ip(ip_address): """ Check Validity of an IP address """ try: ip = ipaddress.ip_address(u'' + ip_address) return True except ValueError as e: return False
c6b5011610249af389fc0e16c282cdaab245faeb
600,732
import functools def with_lock(lock): """ Call function with a lock :param lock: lock context manager """ def _(func): @functools.wraps(func) def f(*args, **kwg): with lock: return func(*args, **kwg) return f return _
b7611b7b83dfb59f982714bb3fdf5dd4eb25a0ae
310,615
def flatten_forward(x_input): """Perform the reshaping of the tensor of size `(K, L, M, N)` to the tensor of size `(K, L*M*N)` # Arguments x_input: np.array of size `(K, L, M, N)` # Output output: np.array of size `(K, L*M*N)` """ K, L, M, N = x_input.shape output = x_input.reshape(K, L*M*N) return output
0767220baa652e5f4c6efbca7b3ffdb03f71902b
629,482
def format_card(note: str) -> str: """Formats a single org-mode note as an HTML card.""" lines = note.split('\n') if lines[0].startswith('* '): card_front = '<br />{}<br />'.format(lines[0][2:]) else: print('Received an incorrectly formatted note: {}'.format(note)) return '' card_back = '<br />'.join(lines[1:]) return card_front + '\t' + card_back
b05c6d9e2419f3aa7c78811faf7c4ebf171d3073
647,711
from typing import Any from typing import Dict def serialize_arg(name: str, value: Any) -> Dict: """Get serialization for a run argument. Parameters ---------- name: string Unique parameter identifier. value: any Argument value. Returns ------- dict """ return {'name': name, 'value': value}
ed5c8b3bf3792585d4b6a81ec049ea5a9e5a46af
543,727
def filter_action_by_name(actions, name): """ Filter action list by name and return filtered list """ return list(filter(lambda x: x['name'] == name, actions))
18b1f5a816bdd02bf2fd15086a3824fadddbb4bd
496,940
def check_dna_sequence(sequence): """Check if a given sequence contains only the allowed letters A, C, T, G.""" return len(sequence) != 0 and all(base.upper() in ['A', 'C', 'T', 'G'] for base in sequence)
2f561c83773ddaaad2fff71a6b2e5d48c5a35f87
6,209
def drop_users_to_ignore(ignore, license_lists): """This function drops the users to ignore during the comparison from each license type list. Parameters ---------- ignore (DataFrame) : Users to ignore during comparison license_lists (dict) : dictionary of DataFrames, one for each license type Returns ------- license_lists (dict) : same as input minus DataFrame records whose email matched an email in the *ignore* DataFrame """ for license in license_lists.keys(): license_lists[license] = license_lists[license][ ~license_lists[license]['User principal name'].isin(ignore['email']) ] return license_lists
3aaee5d9c49ee776f6fc5dbd2a8062e104c78845
44,756
def prompt_user_yes_no(question): """Prompt the user for a question with answer Y/N. :return: True if yes, False if no, ask again if any other answer """ value = '' while value.lower() not in ['yes', 'no', 'n', 'y']: value = input("%s [yes/no] " % question) if value.lower() in ['yes', 'y']: return True else: return False
7c2658a3e75038f7f8f04564bf6fc2b408d07bc1
471,544
def _loss(dz, gamma=0.15): """Risk / Loss function, Tanaka et al. (https://arxiv.org/abs/1704.05988) Parameters ---------- gamma : float Returns ------- loss : float """ return 1-1/(1+(dz/gamma)**2)
abec3bed11eeebcd7bfe94f8cb640a9032cc9d84
372,707
from typing import Dict def _read_requirements_file(f) -> Dict: """Read requirement.txt file :param f: req file object :return: dict representing pip requirements """ requirements = {} for line in f.readlines(): line = line.decode() requirements[line.split('=')[0]] = line.replace("\n", "") return requirements
6f598e41c84d4d3f3d0adf743cf55e5bce48c9ef
496,221
from typing import Any from functools import reduce def dgetattr(obj: Any, attr: str, default: Any) -> Any: """ getattr with dot seperated attribute list """ try: return reduce(getattr, attr.split("."), obj) except AttributeError: return default
12361f12ef40c7c34fac182c47e0e0a8f784f18e
620,545
def get_delta_frame_id(span0, span1): """Computes the minimum distance between two non-overlapping spans. Args: span0 (Span): First span. span1 (Span): Second span. """ if span0.overlaps(span1): assert False, (span0, span1) return 0 if span0.end < span1.start: return span1.start - span0.end else: return span0.start - span1.end
93e61c6aa338955751a7eeaa52c4f2bf036aa6b4
122,748
def verify_interval_info(in_dict): """Verifies post request was made with correct format The input dictionary must have the appropriate data keys and types, or be convertible to correct types, to be elicit the correct patient heart rate data. Args: in_dict (dict): input with patient ID and datetime Returns: str: if error, returns error message bool: if input verified, returns True """ expected_keys = ("patient_id", "heart_rate_average_since") expected_types = (int, str) for i, key in enumerate(expected_keys): if key not in in_dict.keys(): return "{} key not found".format(key) if type(in_dict[key]) is not expected_types[i]: if key == "patient_id": try: in_dict[key] = int(in_dict[key]) except ValueError: return "{} value not correct type".format(key) else: return "{} value not correct type".format(key) return True
a24ccfabb11a3004a56db28b5ef7ca346a2085fb
545,710
def get_token_string(auth): """ Retrieve the actual token string from a token creation. Used for knox support. :param auth: The instance or tuple returned by the token's .create() :type auth tuple | rest_framework.authtoken.models.Token :return: The actual token string :rtype: str """ return auth[1] if isinstance(auth, tuple) else auth.key
b8444ba3ca8bb5fb72c20dcdb5fde60a075ab378
241,990
def edges_flux_to_node_flux(G, attribute_name='flux'): """Sum all flux from incoming edges for each node in networkx object""" node_fluxes = {} for node in G.nodes: node_flux = sum([edge[2] for edge in list(G.in_edges(node, data=attribute_name)) if edge[2]]) node_fluxes[node] = node_flux return node_fluxes
8e70e44b38e2f8e2b48b070bb03234d2df75e810
19,244
def MSE(gridA,gridB): """ A function to calculate mean square error or difference between two grids """ diff=gridA.subtract(gridB) mse=diff.multiply(diff) return mse
f641f1f0b1b470b8a6570137fa8286157ffc1901
579,679
def mongodb_uri(mongodb_url, mongodb_port, db_name='tuplex-history'): """ constructs a fully qualified MongoDB URI Args: mongodb_url: hostname mongodb_port: port db_name: database name Returns: string representing MongoDB URI """ return 'mongodb://{}:{}/{}'.format(mongodb_url, mongodb_port, db_name)
a7a7ab645d6490669907f1f016c116cf65d75980
594,937
def _LookupTargets(names, mapping): """Returns a list of the mapping[name] for each value in |names| that is in |mapping|.""" return [mapping[name] for name in names if name in mapping]
03a038150f7f757a99e9cfbb9cdaf6e98f441d57
651,868
def object_sizes(sobjs): """Return an array of the object sizes""" return [obj['Size'] for obj in sobjs]
34ddc0191cb3c96ffac471ac85061ca10f13b08d
118,676
from re import VERBOSE def is_verbose(verbosity: str) -> bool: """ Validates if verbosity is verbose. :param verbosity: String. verbosity value :return: Boolean. is verbosity verbose """ return verbosity == VERBOSE
df30899b6179910ff1bc403741e905e94692af3e
567,379
def convert_move_to_action(move_str: str): """ :param move_str: A1 -> 0, H8 -> 63 :return: """ if move_str[:2].lower() == "pa": return None pos = move_str.lower() x = ord(pos[0]) - ord("a") y = int(pos[1]) - 1 return y * 8 + x
5ebfc7bbfc3c7736bea088a3d6bb38678c33bd8e
336,543
from datetime import datetime def get_current_semester() -> str: """ Given today's date, generates a three character code that represents the semester to use for courses such that the first half of the year is considered "Spring" and the last half is considered "Fall". The "Spring" semester gets an S as the first letter while "Fall" gets an F. The next two characters are the last two digits in the current year. """ today = datetime.today() semester = "f" + str(today.year)[-2:] if today.month < 7: semester = "s" + str(today.year)[-2:] return semester
039c5ae2b22c7e319320b09b155cb4a932fabd06
191,130
def validate_neighbor_entry_exist(duthost, neighbor_addr): """Validate if neighbor entry exist on duthost Args: duthost (AnsibleHost): Device Under Test (DUT) neighbor_addr (str): neighbor's ip address Returns: bool: True if neighbor exists. Otherwise, return False. """ command = "ip neighbor show %s" % neighbor_addr output = [_.strip() for _ in duthost.shell(command)["stdout_lines"]] if not output or "REACHABLE" not in output[0]: return False return True
4009f3e7fbb2ce3f24ff39bb2faa07f2048d3006
524,800
import itertools def get_all_combinations(elements): """ get all combinations for a venn diagram from a list of elements""" result = [] n = len(elements) for i in range(n): idx = n - i result.append(list(set(itertools.combinations(elements, idx)))) return result
d6423d46fe8104fdb0f338446c8f91c655e89062
655,259
def guessPeriodicity(srcBounds): """ Guess if a src grid is periodic Parameters ---------- srcBounds : the nodal src set of coordinates Returns ------- 1 if periodic, warp around, 0 otherwise """ res = 0 if srcBounds is not None: res = 1 # assume longitude to be the last coordinate lonsb = srcBounds[-1] nlon = lonsb.shape[-1] dlon = (lonsb.max() - lonsb.min()) / float(nlon) tol = 1.e-2 * dlon if abs((lonsb[..., -1] - 360.0 - lonsb[..., 0] ).sum() / float(lonsb.size)) > tol: # looks like a regional model res = 0 return res
93c6e58785aeeadb642262cd12ca937aded996fb
404,343
def bin_to_dec(bin_str): """Convert a string of bits to decimal.""" result = 0 for i, bit in enumerate(bin_str[::-1]): result += int(bit) * 2**i return result
f49a9f0ee25e886111407bd20688567ee7e9a16f
657,313
def sort_wc(w_c, sort_key): """Sorts the dictionary and returns sorted dictionary Args; dictionary, 0 or 1 0 - sort by key 1 - sort by value Return sorted dictionary """ sorted_w_c = {} # sorted is a built in function and returns a sorted list # if sort_key is 1 - sort on value in the dictionary # if sort_key is 0 - sort on key in the dictionary if sort_key == 1: sorted_list = sorted(w_c, key=w_c.get, reverse = True) else: sorted_list = sorted(w_c, reverse = True) #build the sorted dictionary for word in sorted_list: sorted_w_c[word] = w_c[word] return(sorted_w_c)
727714b0d32d894272037f98221d52d9bc180b53
74,500
def GetDeferGroups(env): """Returns the dict of defer groups from the root defer environment. Args: env: Environment context. Returns: The dict of defer groups from the root defer environment. """ return env.GetDeferRoot()['_DEFER_GROUPS']
d623ada67c1e49e00a678ce26f97b53f579c4379
47,260
from typing import Iterable def check_for_func(sequence: Iterable) -> bool: """Used to determine if a list or tuple of columns passed into sql function has parethesis '()' which indicate a function that needs to be parsed out Args: sequence (Iterable): list/tupe of column names Returns: bool: True if function found """ # make all elements strings it = map(str, sequence) combined = "".join(it) return "(" in combined
8832c70f98a36eb761fdec5b47385575b92c6ca8
48,686
def check_sa_ea_for_each_branch(conn_components, start_activities, end_activities): """ Checks if each branch of the parallel cut has a start and an end node of the subgraph Parameters -------------- conn_components Parallel cut Returns ------------- boolean True if each branch of the parallel cut has a start and an end node """ if conn_components is None: return False for comp in conn_components: comp_sa_ok = False comp_ea_ok = False for sa in start_activities: if sa in comp: comp_sa_ok = True break for ea in end_activities: if ea in comp: comp_ea_ok = True break if not (comp_sa_ok and comp_ea_ok): return False return True
08cadae5e2665fefa4b85ffac85c90a317746e7c
225,591
def ngrams_of(sequence, ngram_size, ngram_behaviour="exact"): """Produce n-grams of a sequence of tokens. The n-gram behaviour can either be "exact", meaning that only n-grams of exactly size n are produced, or "subgrams" meaning that all n-grams of size less than or equal to n are produced. Parameters ---------- sequence: Iterable The sequence of tokens to produce n-grams of. ngram_size: int The size of n-grams to use. ngram_behaviour: string (optional, default="exact") The n-gram behaviour. Should be one of: * "exact" * "subgrams" Returns ------- ngrams: list A list of the n-grams of the sequence. """ result = [] for i in range(len(sequence)): if ngram_behaviour == "exact": if i + ngram_size <= len(sequence): result.append(sequence[i : i + ngram_size]) elif ngram_behaviour == "subgrams": for j in range(1, ngram_size + 1): if i + j <= len(sequence): result.append(sequence[i : i + j]) else: raise ValueError("Unrecognized ngram_behaviour!") return result
2b42ced6bed4db9bf15c6aae4ec730622225bfbd
449,339
def edgesToVerts(s): """ Turn a list of edges into a list of verticies """ o = set() for e in s: o.add(e[0]) o.add(e[1]) return o
7f793a66a6aa5761723ce538f5e2186b154581f5
559,338
import csv def get_latencies(latencies_results_file): """ Read the resulting latencies from the csv file. Parameters: - results_file: the path to the result file. Return: - A list of the filt, packet and network latencies. """ latencies = [] try: with open(latencies_results_file, newline='') as f: spamreader = csv.reader(f, delimiter=' ', quotechar='|') for row in spamreader: latencies.append(row[1]) except Exception: # Add dummy values to latencies, -1. latencies.append(-1) latencies.append(-1) latencies.append(-1) return(latencies)
5d7b9885f846f5ed3070f5e0cc630c9b4c4e55e0
278,298
import math def cairns(aboveground_biomass): """ Calculates belowground biomass as a function of aboveground biomass. Derived from "Equation 1" in Cairns, Brown, Helmer & Baumgardner (1997), available online at: www.arb.ca.gov/cc/capandtrade/protocols/usforest/references/cairns1997.pdf Aboveground biomass expected in units of kg, and returns units of kg. """ if aboveground_biomass <= 0: return 0 else: return math.exp(-1.085 + 0.9256 * math.log(aboveground_biomass))
be0980dc94e3a19bc8dd8fb8221059c147524817
334,723
def snake_to_camel_case(name): """ Accept a snake_case string and return a CamelCase string. For example:: >>> snake_to_camel_case('cidr_block') 'CidrBlock' """ name = name.replace("-", "_") return "".join(word.capitalize() for word in name.split("_"))
b9e4a38bd26e86fa17fffc70449c6bed503314f1
442,771
def dimensions(bound): """ Get the width and height of a bound :param bound: a bound tuple :return: a tuple containing the width and height of the ``bound`` i.e ``(width, height)`` """ return bound[2] - bound[0], bound[3] - bound[1]
d386cdb0fefb2ad46bd5b08826159c72d8ec1108
55,443
def get_duration(df): """Get duration of ECG recording Args: df (DataFrame): DataFrame with time/voltage data Returns: float: duration of ECG recording """ start = df.time.iloc[0] end = df.time.iloc[-1] duration = end - start return duration
77698afc8ef7af557628d5fea760dc101c3e6112
1,823
def combine_names(msg_name, new_name): """combine msg-name with a new name.""" if msg_name: return "{0}_{1}".format(msg_name, new_name) else: return new_name
908fa530021b4c17541d97571ede1aa1b546243b
126,466
def get_first_step_basement(instructions: str) -> int: """get the first step that reaches the first basement""" floor = 0 for step, word in enumerate(instructions): if word == "(": floor += 1 elif word == ")": floor -= 1 else: raise ValueError(f"{word} is not a valid instruction") if floor == -1: return step + 1 raise ValueError(f"with the given instructions it did not reach the -1 floor")
101cd072de7b5eaa4c4d426193e9f3b03c19b087
528,044
def get_qualified(module, name): """Return a qualified module member ``name`` inside the named ``module``. The module (or package) first gets imported and the name is retrieved from the module's global namespace. """ # see __import__.__doc__ for why 'fromlist' is used module = __import__(module, fromlist=[name]) return getattr(module, name)
c2ebee0eaf6e5431bfc9d0fe22c88906d6fb6d4d
598,898
def identify_slack_event(event): """Identify the Slack event type given an event object. Parameters ---------- event : `dict` The Slack event object. Returns ------- slack_event_type : `str` The name of the slack event, one of https://api.slack.com/events. """ primary_type = event['event']['type'] if primary_type == 'message': channel_type = event['event']['channel_type'] if channel_type == 'channel': return 'message.channels' if channel_type == 'im': return 'message.im' elif channel_type == 'group': return 'message.groups' elif channel_type == 'mpim': return 'message.mpim' else: raise RuntimeError(f'Unknown channel type {channel_type!r}') else: return primary_type
05465e620d5f91294c9f0ae63793c8a582d49cca
325,872
import re def to_slug(inp, lowercase=True): """Implements Aloe Slugify.to_slug """ # https://bitbucket.org/tacc-cic/aloe/src/master/aloe-jobslib/src/main/java/edu/utexas/tacc/aloe/jobs/utils/Slug.java # Remove single quote characters inp = re.sub("'", '', inp) # Remove non-ASCII characters inp = re.sub(r'[^\x00-\x7F]', '', inp) # Whitespace characters reduced to single - inp = re.sub('[\s]+', '-', inp) inp = re.sub('[^a-zA-Z0-9_]+', '-', inp) inp = re.sub('[-]+', '-', inp) inp = re.sub('^-', '', inp) inp = re.sub('-$', '', inp) if lowercase: inp = inp.lower() return inp
91e4a373604c0c42f1d5bc5f5789b3382dac68e0
239,953
def merge_form_errors(form): """ Combine all form errors (except hidden) into one list. """ errors = list(form.non_field_errors()) for field in form.visible_fields(): errors += field.errors return errors
ab5f0213fb7d6ba6559dc7681d391973c41b1a31
488,147
def get_tapis_abaco_image(base_url): """ Determine the docker image name for a tapis notebook associated with a base_url. """ # designsafe tenant: if 'agave.designsafe-ci.org' in base_url: return 'taccsciapps/jupyteruser-ds-abaco:1.2.14' return None
3ceb981ceed72dabfdb9adfa7e5572f7afe24411
590,742
def quote_with_backticks(identifier): """Quote the given identifier with backticks, converting backticks (`) in the identifier name with the correct escape sequence (``). identifier[in] identifier to quote. Returns string with the identifier quoted with backticks. """ return "`" + identifier.replace("`", "``") + "`"
e201dbe4e608131e4ced24580555c3011b7864fb
293,607
def is_weighted(G): """ Determine if a graph G is weighted Checks each edge to see if it has attribute 'weight' if it does return True, otherwise false. This checks if the entire graph is weighted not partial. Parameters: ---------- G: A networkx Graph Returns: -------- weighted : A bool Determines whether the graph is weighted. """ weighted = True for (u,v) in G.edges(): weighted = weighted and ('weight' in G.edge[u][v]) if not weighted: return weighted return weighted
8e0799194fe6dfd94fbb495c227ec2066479a827
425,223