content
stringlengths
39
9.28k
sha1
stringlengths
40
40
id
int64
8
710k
import time import logging def getSeconds(time_str): """ Convert a string of the form '%d-%H:%M:%S', '%H:%M:%S' or '%M:%S' to seconds. """ if not time_str: return 0 # sometimes the timestamp includs a fractional second part time_str = time_str.split('.')[0] if '-' in time_str: days, time_str = time_str.split('-') st = time.strptime(time_str, '%H:%M:%S') sec = int(days)*86400+st.tm_hour*3600+st.tm_min*60+st.tm_sec else: try: st = time.strptime(time_str, '%H:%M:%S') sec = st.tm_hour*3600+st.tm_min*60+st.tm_sec except ValueError: try: st = time.strptime(time_str, '%M:%S') sec = st.tm_min*60+st.tm_sec except ValueError: logging.error('String: %s does not match time format.' % time_str) return -1 return sec
3e1c55d9c91d9955d05dbd250a25b8814f75ca50
459,054
import math def floor(base): """Get the floor of a number""" return math.floor(float(base))
8b00ffccf30765f55ff024b35de364c617b4b20c
4,568
def get_cookie(self, name, default=False): """Gets the value of the cookie with the given name,else default.""" if name in self.request.cookies: return self.request.cookies[name] return default
0831ec3c42433bdb10261a6fa0a71f239866c0d1
666,966
def bh2u(x: bytes) -> str: """ str with hex representation of a bytes-like object >>> x = bytes((1, 2, 10)) >>> bh2u(x) '01020A' """ return x.hex()
8ab7bf9b536d13a1944e014ea83a4302917c2306
699,763
def predict_class(annotator, fsid, alpha): """ Returns the candidates using a given alpha and fsid :param annotator: :param fsid: :param alpha: :return: """ annotator.compute_f(alpha) candidates = annotator.get_top_k(fsid=fsid) return candidates
c7ee725709e9b1fea6a7f5d383cb616f3f77dc9c
279,096
def get_model_defaults(cls): """ This function receives a model class and returns the default values for the class in the form of a dict. If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid. Args: cls: (obj) : A Model class. Returns: defaults: (dict) : A dictionary of the default values. """ tmp = {} for key in cls.__dict__.keys(): col = cls.__dict__[key] if hasattr(col, "expression"): if col.expression.default is not None: arg = col.expression.default.arg if callable(arg): tmp[key] = arg(cls.db) else: tmp[key] = arg return tmp
93c29af27446c558b165159cee4bb41bbb3cad4d
4,508
def get_active_profile(content, key): """ Gets the active profile for the given key in the content's config object, or NONE_PROFILE """ try: if content.config.has_option(key, 'profile'): return content.config.get(key, 'profile') else: return 'None' except: return 'None'
b26491381fcd22003316ce9d1c2eb0577d89d715
692,879
def is_int(s): """Returns True if all non-null values are integers. Useful for determining if the df column (pd.Series) is float just to hold missing values. """ notnull = s.notnull() is_integer = s.apply(lambda x: (x % 1 == 0.0)) return (notnull == is_integer).all()
eb52b4de24790cd4923a9cab8c36d7d5a151d141
540,830
import re def filter_remove_sw_test_status_log_prefix(line): """ Remove the logging prefix produced by the sw_test_status DV component. """ # Example of a full prefix to be matched: # 1629002: (../src/lowrisc_dv_sw_test_status_0/sw_test_status_if.sv:42) [TOP.top_earlgrey_verilator.u_sw_test_status_if] pattern = r'\d+: \(.+/sw_test_status_if\.sv:\d+\) \[TOP\..+\] ' if isinstance(line, bytes): return re.sub(bytes(pattern, encoding='utf-8'), b'', line) else: return re.sub(pattern, '', line)
c9eba618032f0368b7581f781d2e990404ab7578
208,088
def find_non_whitespace(string, i=0): """ Return index of first non-whitespace character in string starting at index i. Returns -1 if there are None. """ for j in range(i, len(string)): if not string[j].isspace(): return j else: return -1
99883cc377446b3758bafe92c69cf06a5e206769
182,917
def patch_method_in(cls): """ To use: from <somewhere> import <someclass> @monkey.patch_method(<someclass>) def <newmethod>(self, args): return <whatever> This adds <newmethod> to <someclass> """ def decorator(func): setattr(cls, func.__name__, func) return func return decorator
2719ffb888d00e112b69e753cb8539aa29ea1bf5
225,743
import textwrap def split_doc(txt): """Split a docstring into a first line + rest blurb""" short, rest = txt.split("\n", 1) short = short.strip() rest = textwrap.dedent(rest.rstrip()) return short, rest
89bdf288ec24cc1d1569fb21f06a4a790087f65b
479,683
def encode3(s): """ convert string => bytes for compatibility python 2/3 """ if type(s)==str: return s.encode('UTF-8') return s
fa6ed03dccf1ed6ea8dbcbee6ca88f1500e50ec9
225,400
def get_optime_tokumx(mc): """ Get optime of primary in the replica set. """ rs_status = mc['admin'].command({'replSetGetStatus': 1}) members = rs_status.get('members') if members: for member in members: role = member.get('stateStr') if role == 'PRIMARY': optime = member.get('optimeDate') return optime return None
98d9b10c69d33700a55ecec0731b41f9b9659876
246,479
def CheckSvnProperty(input_api, output_api, prop, expected, affected_files): """Checks that affected_files files have prop=expected.""" if input_api.change.scm != 'svn': return [] bad = filter(lambda f: f.Property(prop) != expected, affected_files) if bad: if input_api.is_committing: res_type = output_api.PresubmitError else: res_type = output_api.PresubmitNotifyResult message = 'Run the command: svn pset %s %s \\' % (prop, expected) return [res_type(message, items=bad)] return []
dc177961ea57e8d306ca1f348d73dc6d3bf9ec68
454,677
def accuracy(output, target, top_k=(1,)): """Calculate classification accuracy between output and target. :param output: output of classification network :type output: pytorch tensor :param target: ground truth from dataset :type target: pytorch tensor :param top_k: top k of metric, k is an interger :type top_k: tuple of interger :return: results of top k :rtype: list """ max_k = max(top_k) batch_size = target.size(0) _, pred = output.topk(max_k, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in top_k: correct_k = correct[:k].view(-1).float().sum(0) res.append(correct_k.mul_(100.0 / batch_size)) return res
68b7c48e5bd832a637e7a06353c48ffa09b449cd
2,140
def parse_tabbed_table(txt): """Parse a tab-separated table into list of dicts. Expect first row to be column names. Very primitive. """ txt = txt.replace("\r\n", "\n") fields = None data = [] for ln in txt.split("\n"): if not ln: continue if not fields: fields = ln.split("\t") continue cols = ln.split("\t") if len(cols) != len(fields): continue row = dict(zip(fields, cols)) data.append(row) return data
81bfbeeef8019b0540b715973616672a819cf316
149,781
def parse_raw_headers(raw_headers: str) -> dict: """ Parses the given headers string and returns the corresponding headers dictionary. Useful when working with developer tools in the browser """ headers = {} for header in raw_headers.strip().split('\n'): key, value = header.split(': ') headers[key] = value return headers
8246069502803f10d1148405bc2e384b6bb1a277
452,581
def normalize_snappy(s): """Normalization function that performs SNAPPY normalization (hyphen to underscore).""" return s.replace("-", "_")
442684d6deb8ed3d3c2a71d6dbc7e2be42648905
399,678
def remove_dot_git(url_with_dot_git): """ Removes trailing .git in repo name or url :param url_with_dot_git: :return: """ url = url_with_dot_git.split('.git')[0] return url
4e9932d6268ba8dbd41afc42443c2a791ac396bd
601,443
import math def calculate_border_dimensions(width, height): """ For a given width and height of the game grid, this method calculates the dimensions of the border surrounding the cell. The magic numbers correspond to the proportions of the 2048 border in the actual game. :param width: The width of the grid :param height: The height of the grid :return: height and width of the grid border """ return int(math.ceil((15.0 / 500) * height)), int(math.ceil((15.0 / 500) * width))
3eff19eb379caa2e8a936f56728af4af85aada99
525,534
def calculate_frip(nreads, noverlaps): """ calculate FRiP score from nreads and noverlaps """ return( float(noverlaps) / nreads )
dd57c99f2744a7b891270afe169a7bac459f9e64
133,615
def t02_SimpleGetPutNoState(C, pks, crypto, server): """Verifies that clients maintain no state about keys stored.""" score = 0 alice = C("alice") alice2 = C("alice") alice.upload("a", "b") score += alice2.download("a") == "b" server.kv = {} alice = C("alice") alice2 = C("alice") alice2.upload("a", "b") score += alice.download("a") == "b" server.kv = {} alice = C("alice") alice.upload("a", "b") alice2 = C("alice") score += alice2.download("a") == "b" return float(score) / 3.0
9a1db38e34bedbab15a84cc474de5774bac6b31a
217,554
def split_corpus(data, train_amount, test_length=None): """ Split the sentences into train and test sets. If test_length is None, then the split is: train = the first 'train_length' number of sentences. test = the rest. If test_length is given, the split is: train = the first 'train_length' number of sentences. test = the last 'test_length' number of sentences. NOTE: train_amount + test_length must be less than the total number of sentences. """ if 0 <= train_amount <= 1: train_length = int(len(data) * train_amount) else: train_length = train_amount # number of sentences in training set train = data[:train_length] if test_length is None: test = data[train_length :] else: # test_length is number of testing sentences if train_length + test_length > len(data): raise ValueError("The corpus is not long enough to have that much training & testing data.") test = data[len(data)-test_length : ] return train, test
1819a275425cf175d0d2a0b38442a1844ff20a5b
282,798
def trim_txt(txt, limit=10000): """Trim a str if it is over n characters. Args: txt (:obj:`str`): String to trim. limit (:obj:`int`, optional): Number of characters to trim txt to. Defaults to: 10000. Returns: :obj:`str` """ trim_line = "\n... trimmed over {limit} characters".format(limit=limit) txt = txt[:limit] + trim_line if len(txt) > limit else txt return txt
197dd9da07d8a5a42a33246431f50b6bae25b6bf
60,873
import io def readmodifierrules(filename): """Read a file containing heuristic rules for marking modifiers. Example line: ``S *-MOD``, which means that for an S constituent, any child with the MOD function tag is a modifier. A default rule can be specified by using * as the first label, which always matches (in addition to another matching rule, if any). If none of the rules matches, a non-terminal is assumed to be a complement. """ modifierrules = {} with io.open(filename, encoding='utf8') as inp: for line in inp: line = line.strip().upper() if line and not line.startswith("%"): label, modifiers = line.split(None, 1) if label in modifierrules: raise ValueError('duplicate rule for %r (each label' ' should occur at most once in the file)' % label) modifierrules[label] = modifiers.split() return modifierrules
a27420c682a1c8095fe0bd9b63ddcb8b63c9bf74
37,019
from pathlib import Path import shutil def get_cmd_path(cmd: str) -> Path: """Return file path of the command.""" start_script = shutil.which(cmd) if not start_script: raise AssertionError(f"The `{cmd}` not found on PATH") return Path(start_script)
bb0441d05628fcdb7f57dc966fcea4cead17958e
168,292
import typing def _get_exceptions(specification: dict) -> typing.Dict[str, dict]: """ Assemble a mapping of client exceptions raised by the methods. These are extracted from the specified service specification where the values are the shape mappings for those exceptions as specified in the service specification. :param specification: Botocore service specification from which to extract exception information. :return: Dictionary of exception names and their corresponding shapes set on the client.exceptions object. """ errors = { error["shape"] for item in specification["operations"].values() for error in (item.get("errors") or []) } return {e: specification["shapes"][e] for e in errors}
8f2ead181b513c276bc7bae8fb369f6d793d279c
235,271
def find(sexp, *names): """Return the first node in `sexp` whose name is in `names`""" for child in sexp: if child[0] in names: return child
35e581faa028447fcea1106da5877a4955147760
684,243
import time import math def retry(tries, delay=1, backoff=2, exception=Exception): """ A retry decorator with exponential backoff. Retries a function or method if Exception occurred Args: tries: number of times to retry, set to 0 to disable retry delay: initial delay in seconds(can be float, eg 0.01 as 10ms), if the first run failed, it would sleep 'delay' second and try again backoff: must be greater than 1, further failure would sleep delay *= backoff second """ if backoff <= 1: raise ValueError("backoff must be greater than 1") tries = math.floor(tries) if tries < 0: raise ValueError("tries must be 0 or greater") if delay <= 0: raise ValueError("delay must be greater than 0") def decorator(func): def wrapper(*args, **kwargs): _tries, _delay = tries, delay _tries += 1 # ensure we call func at least once while _tries > 0: try: ret = func(*args, **kwargs) return ret except exception as e: _tries -= 1 # retried enough and still fail? raise orignal exception if _tries == 0: raise time.sleep(_delay) # wait longer after each failure _delay *= backoff return wrapper return decorator
0bd38f62d0a296daf5269b079db4d6124fcecbd5
521,129
def scan_0d_mesh(cdl_scanner): """Return a scan representing a 0d (nodes only) mesh.""" test_cdl = """ netcdf data_C4 { dimensions: num_node = 8 ; variables: double sample_data(num_node) ; sample_data:long_name = "sample_data" ; sample_data:location = "node" ; sample_data:mesh = "topology" ; double node_lat(num_node) ; node_lat:standard_name = "latitude" ; node_lat:long_name = "latitude of 2D mesh nodes." ; node_lat:units = "degrees_north" ; double node_lon(num_node) ; node_lon:standard_name = "longitude" ; node_lon:long_name = "longitude of 2D mesh nodes." ; node_lon:units = "degrees_east" ; int topology ; topology:cf_role = "mesh_topology" ; topology:topology_dimension = 0L ; topology:node_coordinates = "node_lat node_lon" ; topology:long_name = "Topology data of 0D unstructured mesh" ; // global attributes: :Conventions = "UGRID-1.0" ; } """ return cdl_scanner.scan(test_cdl)
aa10d42a64ffaead7c212adca1238ff5e9a005ae
327,967
def list_to_pg_array(elem): """Convert the passed list to PostgreSQL array represented as a string. Args: elem (list): List that needs to be converted. Returns: elem (str): String representation of PostgreSQL array. """ elem = str(elem).strip('[]') elem = '{' + elem + '}' return elem
a06ed810a29a0c2aa26ab3615352bad2c2bab19c
596,908
def get_unique_or_none(klass, *args, **kwargs): """ Returns a unique instance of `klass` or None """ try: return klass.objects.get(*args, **kwargs) except klass.DoesNotExist: return None except klass.MultipleObjectsReturned: return None return None
7e935821c2e139d2cb864ef92d969d1e37f40f28
161,714
def find_last_digit(k): """Determines the last digit in a base 10 integer.""" return k%10
f18d1bca686fa4ca092a840e7728058da7d7990a
120,909
def select_countries(df, select_years=[str(x) for x in range(2005, 2019)], threshold=12, values='value'): """ Extract a list of countries that has sufficient data for the selected years. Args: df: (DataFrame) Created by "extract_data". select_years: (list) Years (str) to be included in analysis. threshold: (int) Threshold for dropping rows with missing values as implemented in the pandas .dropna() method. values: (str or list) Name of column that will be used for values in the pandas .pivot() method. Returns: pandas Series. """ # Filter for only relevant years df = df[df.year.isin(select_years)] # Find countries with sufficient data for select years df['missing'] = df['value'].isna() country_missing = df.groupby('country')['missing'].sum() countries = country_missing[country_missing <= 12].index.tolist() return countries
932b30f1df5ecc81a5389b71dc55730b585f7f24
55,391
def _collect_partitioned_variable(name, all_vars): """Returns list of `tf.Variable` that comprise the partitioned variable.""" if name + "/part_0" in all_vars: var = [] i = 0 while name + "/part_%d" % i in all_vars: var.append(all_vars[name + "/part_%d" % i]) i += 1 return var return None
784c845c8d54823c34376c1b81f6bc5b3082aead
145,298
def isize(n): """Get a readable size from a number of bytes.""" if n >= 1024 ** 3: return "%.2f GB" % (n / 1024.0 ** 3) elif n >= 1024 ** 2: return "%.2f MB" % (n / 1024.0 ** 2) elif n >= 1024: return "%.2f KB" % (n / 1024.0) else: return "%d B" % n
f1a792bf0e2fb78d5f418a06fccfa2ab21dfe84f
129,844
def default_log_prior_function(x, *args): """A uniform improper prior.""" return 0.0
8e855d6d577a2c16a12b5f365b97194af08bb534
280,402
import re def noprotocol(fullUrl): """ Strips protocol off URL for nice display/hotlink text. Example: https://www.someDomain.com/some/path/here/ Return: {string} URL with no protocol (ex: www.someDomain.com/some/path/here/) """ returnData = re.sub(r"https?://", "", fullUrl) returnData = re.sub(r"/$", "", returnData) return returnData
cd7fa2b14c5b904b7363726db1f21dd491b02f8f
278,773
import base64 def add_external_fig(figloc: str, style: str = "") -> str: """Add external figure to HTML Parameters ---------- figloc : str Relative figure location style: str Div style Returns ------- str HTML code for figure """ try: with open(figloc, "rb") as image_file: img = base64.b64encode(image_file.read()).decode() return f'<img src="data:image/{figloc.split(".")[1]};base64,{img}"style="{style}">' except Exception: return ""
32f1e5b6615ef8f4680415aaf4b81ea8f0444b68
505,500
def _NoTimeout(state): """False iff the command timed out.""" rcode, out = state return rcode == 0 or not ('TimeoutError' in out or 'timed out' in out)
b5812be217dd3c795a332464f50fb678c3be81d6
264,783
def make_snapshots(frame, ignore): """Extract a subset of atoms from a given frame.""" snapshot = { 'header': 'Removed.', 'box': frame['box'], 'residunr': [], 'residuname': [], 'atomname': [], 'atomnr': [], 'x': [], 'y': [], 'z': [], } for key in ('residunr', 'residuname', 'atomname', 'atomnr', 'x', 'y', 'z'): for i, item in enumerate(frame[key]): if i not in ignore: snapshot[key].append(item) return snapshot
e2356bdad3c8fe7633d598ea6e49825e90daf671
646,593
def deepupdate(original, update): """ Recursively update a dict. Subdict's won't be overwritten but also updated. """ for key, value in original.iteritems(): if key not in update: update[key] = value elif isinstance(value, dict): deepupdate(value, update[key]) return update
7cfac18c53a28a14cd88e820ba9a69b92faf8a93
537,855
def plural(n): """Utility function to optionally pluralize words based on the value of n. """ if n == 1: return '' else: return 's'
bfd70987f63ee6e5ab5c58c5b0a61bdff8385785
606,513
def points_to_list(points): """Make array of dictionaries as a list. Args: points: array of dictionaries - [{'x': x_coord, 'y': y_coord}]. Returns: list: points as a list. """ points_list = [] for point in points: points_list.append([int(point['x']), int(point['y'])]) return points_list
51aa26c90c9cfceb00facab46aa3f067905ff795
429,640
def rearrange_pandasdf_columns(df): """ Takes a pandas Dataframe with columns 'DateTime' and 'WorkerIndex' amongst others and places them at the front :param df: The pandas Dataframe :return: The column rearranged df """ new_columns = ['DateTime', 'WorkerIndex'] for c in df.columns: if c != 'DateTime' and c != 'WorkerIndex': new_columns.append(c) return df[new_columns]
799f65fa8cdc0285e285865d9ed1823d0f802321
646,657
import json def load_json(file_path): """ Load data from json file. >>> result = load_json("./JsonAccessorDoctest.json") >>> result {'Fuji': {'Id': 30, 'server': ['JP', 'CN']}} >>> len(result['Fuji']['server']) 2 """ with open(file_path, 'r', encoding='utf8') as json_data: return json.loads(json_data.read())
fdb28da3129fbdf8c171ddd035483177fd176a6a
605,952
import re def MakeAlphaNum( str ): """Return a version of the argument string, in which all non-alphanumeric chars have been replaced by underscores. """ return re.sub( '\W+', '_', str )
d0eb0b4e4d7be0e592fa5517fa088dc10c1d455e
59,902
def reaction_splitter(reaction): """ Args: reaction (str) - reaction with correct spacing and correct reaction arrow `=>` Returns (list): List of compounds in the reaction Example: >>>reaction_splitter("c6h12o6 + 6o2 => 6h2o + 6co2") ['c6h12o6', '6o2', '6h2o', '6co2'] """ reaction = reaction.replace(" => ", " + ") compounds = reaction.split(" + ") return [compound.strip() for compound in compounds]
cd20e8a3e0f15b236d49834b864513f7bbdb99e5
656,888
from typing import Union import math def _float_to_json(value) -> Union[None, str, float]: """Coerce 'value' to an JSON-compatible representation.""" if value is None: return None if isinstance(value, str): value = float(value) return str(value) if (math.isnan(value) or math.isinf(value)) else float(value)
b1ef0c7af5754d56435ac2f7b8116f03dd4502d9
110,504
import signal def CMDdevserver(parser, args): """Runs the app locally via dev_appserver.py.""" parser.allow_positional_args = True parser.disable_interspersed_args() parser.add_option( '-o', '--open', action='store_true', help='Listen to all interfaces (less secure)') app, options, args = parser.parse_args(args) # Let dev_appserver.py handle Ctrl+C interrupts. signal.signal(signal.SIGINT, signal.SIG_IGN) return app.run_dev_appserver(args, options.open)
514de999ca52d6ef43581f328444686b7b53435e
602,068
import re def include_symbol(tablename, schema=None): """Exclude some tables from consideration by alembic's 'autogenerate'. """ # Exclude `*_alembic_version` tables if re.match(r'.*_alembic_version$', tablename): return False # If the tablename didn't match any exclusion cases, return True return True
cb097a4b6a19c11bccc6dfc780a53431c505f36b
73,727
import re def like(matchlist: list, array: list, andop=False): """Returns a list of matches in the given array by doing a comparison of each object with the values given to the matchlist parameter. Examples: >>> subdir_list = ['get_random.py', ... 'day6_15_payload-by-port.py', ... 'worksheet_get_exif.py', ... 'worksheet_get_random.py', ... 'day6_19_browser-snob.py', ... 'day6_12_letterpassword.py', ... 'day6_21_exif-tag.py', ... 'day6_17_subprocess_ssh.py', ... 'day6_16._just_use_split.py'] >>> like('day', subdir_list)\n ['day6_15_payload-by-port.py', 'day6_19_browser-snob.py', 'day6_12_letterpassword.py', 'day6_21_exif-tag.py', 'day6_17_subprocess_ssh.py', 'day6_16._just_use_split.py'] >>> like(['get','exif'], subdir_list)\n ['get_random.py', 'worksheet_get_exif.py', 'worksheet_get_random.py', 'day6_21_exif-tag.py'] >>> like(['get','exif'], subdir_list, andop=True)\n ['worksheet_get_exif.py'] Args: matchlist (list): Submit one or many substrings to match against array (list): This is the list that we want to filter andop (bool, optional): This will determine if the matchlist criteria is an "And Operation" or an "Or Operation. Defaults to False (which is the "Or Operation"). Only applies when multiple arguments are used for the "matchlist" parameter Returns: list: Returns a list of matches References: https://stackoverflow.com/questions/469913/regular-expressions-is-there-an-and-operator https://stackoverflow.com/questions/3041320/regex-and-operator https://stackoverflow.com/questions/717644/regular-expression-that-doesnt-contain-certain-string """ if isinstance(matchlist, str): # matchlist is a single string object thecompile = re.compile(rf"^(?=.*{matchlist}).*$") result_list = [x for x in array if re.findall(thecompile, x)] return result_list else: if andop: # We will be doing an "AND" match or an "And" "Operation" match_string = r"(?=.*?" + r".*?)(?=.*?".join(matchlist) + r".*?)" # e.g. for the above... ['6_19','6_21','6_24'] turns to: '(?=.*?6_19.*?)(?=.*?6_21.*?)(?=.*?6_24.*?)' thecompile = re.compile(rf"^{match_string}.*$") # equivalent to: '^(?=.*?6_19.*?)(?=.*?6_21.*?)(?=.*?6_24.*?).*$' result_list = [x for x in array if re.findall(thecompile, x)] return result_list else: # We will be doing an "OR" match match_string = r"(?=.*" + r"|.*".join(matchlist) + ")" # e.g. for the above... ['6_19','6_21','6_24'] turns to: '(?=.*6_19|.*6_21|.*6_24)' thecompile = re.compile(rf"^{match_string}.*$") # equivalent to: '^(?=.*6_19|.*6_21|.*6_24).*$' result_list = [x for x in array if re.findall(thecompile, x)] return result_list
b7cf450cdd06bd8e0e3bc6d35c3a6f8ba0cfa457
680,465
def identify_var_units(label): """ This function parses the x-label or y-label to figure out the variable name and unit if possible. Parameters ---------- label : str The label of x- or y-axis. Returns ------- var : str The name of the variable. unit :str The units of the variable. """ if label is not None: if "(" in label: if "$" in label.split("(")[1]: unit = " ".join(label.split("$")[1].split("$")[0].split(" ")[:-1]) else: unit = " " + label.split("(")[1].split(")")[0] var = " ".join(label.split("(")[:-1]).lower() if var[-1] == " ": var = " ".join(var.split(" ")[:-1]).lower() else: unit = "" var = label.lower() else: unit = "" var = None if label == "X-axis": var = "x" if label == "Y-axis": var = "y" return var, unit
438d9168366d17ebb50457f23d415e17ac4f0307
212,362
def input_string(message): """Asks a question and captures the string output.""" return str(input(message + ': ')).strip()
f9fbc75c63c3fe21a2a79824d4c86a6aef7b4eac
161,477
def _successful(response): """ Returns whether a response was considered successful. If no body is available or the 'meta' dict in the response envelope doesn't contain a 'code' value, checks the HTTP response code instead. :param requests.Response response: a response object :returns: (boolean) True if successful """ code = response.status_code try: code = response.json()['meta']['code'] except Exception: pass return code in (200, 201, 202)
e086073f6dc43b7df4e4211250f649f1c86d58cf
107,531
def get_words(line): """Break a line into a list of words. Use a comma as a separator. Strip leading and trailing whitespace. """ return [word.strip() for word in line.split(',')]
1ef882b1863d8ed4e1768b250bc8167fdfa086d4
541,135
def load_key(key_file): """ Loads the key :param key_file: Location of the key :return: the key """ return open(key_file, "rb").read()
64d8aa2f2920b812930628b94db072d77a2b5b86
285,096
from typing import Optional from typing import List from typing import Any from typing import Dict import pickle def make_signature(args: Optional[List[Any]] = None, kwargs: Optional[Dict[str, Any]] = None): """ make_signature ============== Turns *args and **kwargs into a hash. Used to make unique cache keys from function arguments. """ sig = tuple() if args: sig += tuple(args) if kwargs: sig += tuple(kwargs.items()) return hash(pickle.dumps(sig))
335c59a79d56d950a27cfd0c1591e68e5cb4ae3d
515,139
def extract_sequence_accessions_from_seed(seed_file): """ Parses a seed MSA and extracts all sequence accessions in the form of a dictionary seed_file: An Rfam seed alignment return: A dictionary of seed accessions """ accessions = {} fp = open(seed_file, 'r') for line in fp: line = line.strip() if len(line) > 1 and line[0] != '#' and line != '': line = line.split(' ') accession = line[0].partition('/')[0] if accession != '': accessions[accession] = "" fp.close() return accessions
24cb304921dd1fc1d36c7e283b558fbc5cc55bda
379,052
def get_public_ip(setup_info, interface_type): """ return public IP based on test case interface_type :param setup_info: setup info fixture :param interface_type: type of interface :return: public IP """ return setup_info[interface_type]['public_ip']
86a06a30125b361940ddb9c584a04bc3437827b7
297,877
def flatten(obj, ltypes=(list, tuple)): """Flatten a nested variable, by default only list/tuple combinations. Source: Mike C Fletcher's flatten http://www.bit.ly/2ULLMnm """ ltype = type(obj) obj = list(obj) i = 0 while i < len(obj): while isinstance(obj[i], ltypes) and not ( isinstance(obj[i], str) and len(obj[i]) == 1 ): if not obj[i]: obj.pop(i) i -= 1 break else: obj[i : i + 1] = obj[i] i += 1 return ltype(obj)
29d4ebb5fea56dce12f0cefe9daf741959ca571a
603,239
def to_list(inp): """ Convert to list """ if not isinstance(inp, (list, tuple)): return [inp] return list(inp)
55350d48bd578252213710fd4c5e672db8ba1f8e
24,439
def tabq_learn(agent, env, env_state, history, args): """Learning loop for TabularQAgent.""" state, reward, done, info = env_state t = history["t"] # Act action = agent.act_explore(state) successor, reward, done, info = env.step(action) # Learn if args.cheat: reward = info["hidden_reward"] # In case the agent is drunk, use the actual action they took try: action = info["extra_observations"]["actual_actions"] except KeyError: pass agent.learn(state, action, reward, successor) # Modify exploration eps = agent.update_epsilon() history["writer"].add_scalar("Train/epsilon", eps, t) return (successor, reward, done, info), history
5e3494766a90482e05d2a981c6190ff3ff6a8c83
217,686
def update_nested_dict(main_dict, new_dict): """Update nested dict (only level of nesting) with new values. Unlike dict.update, this assumes that the values of the parent dict are dicts, so you shouldn't replace the nested dict if it already exists. Instead you should update the sub-dict. """ # update named styles specified by user for name, rc_dict in new_dict.iteritems(): if name in main_dict: main_dict[name].update(rc_dict) else: main_dict[name] = rc_dict return main_dict
0926d814b2f650c4c2cdc9aebfc69ee580bd0bf9
441,754
def get_name_ARPS_simulation(degree, simulation): """Get short name of ARPS files""" [topo_or_wind, N, dx, xi, sigma, ext] = simulation.split('_') name = str(degree) + 'degree' + '_' + xi + '_' + ext return (name)
4e8cad9a63ee5df0cea6b30fd5bb92dc01a03a84
450,286
import re def sum_of_integers_in_string(s: str) -> int: """ This function calculates the sum of the integers inside a string """ return sum([int(i) for i in re.findall(r'\d+', s)])
159572608e8a6c48a6a877a4ac7f915e94eab2b8
676,427
def GetFileExtension(file_str): """Gets last atom at end of string as extension if no extension whole string is returned @param file_str: path or file name to get extension from """ return file_str.split('.')[-1]
afc5d39a1f6e734eb9cc78fba582d2e9a356a896
468,555
def removesuffix(in_str: str, suffix: str) -> str: """removes suffix from a string.""" # suffix='' should not call self[:-0]. if suffix and in_str.endswith(suffix): return in_str[:-len(suffix)] return in_str[:]
12307730566c3f5b0228989b7a3a75c93039a30c
594,640
def _add_hash(source): """Add a leading hash '#' at the beginning of every line in the source.""" source = '\n'.join('# ' + line.rstrip() for line in source.splitlines()) return source
f5d558d8ed29fa145d88b08ee40351949e112546
556,827
def set_perms(tarinfo): """ Set permissions for a file going into a tar :param tarinfo: the tarinfo for the file :return: the modified tarinfo object """ if tarinfo.isfile(): if tarinfo.name.endswith('.sh'): mode = '774' else: mode = '664' else: mode = '775' tarinfo.mode = int(mode, 8) # octal to decimal return tarinfo
c219b7c95df5f57ddf0cb064f4940211414e0a47
76,693
def _msearch_success(response): """Return true if all requests in a multi search request succeeded Parameters ---------- response : requests.models.Response Returns ------- bool """ parsed = response.json() if 'responses' not in parsed: return False for result in parsed['responses']: if result['status'] != 200: return False return True
acdac4408464120fdeb7f20a06f07aac6ca3809f
699,010
def suffix_str(model, snr): """ Name id for output files """ fixed_str = ''.join([{True: 'T', False: 'F'}[i] for i in model.fixed]) out_suffix = '{}_snr{:n}'.format(fixed_str, snr) return out_suffix
21e67a22f3748a8372dc10b73b4adc9b3b19b1eb
313,629
def df2geojson(df, lat='latitude', long='longitude', remove_coords_properties=True): """ Convert um dataframe, com colunas de latitude e longitude, em um objeto geojson de pontos https://notebook.community/gnestor/jupyter-renderers/notebooks/nteract/pandas-to-geojson # Usage feature_collection = dataframe2geojson(df, lat='latitude_dd', long='longitude_dd', remove_coords_properties=False) with open('file.geojson', 'w', encoding='utf-8') as f: json.dump(feature_collection, f, ensure_ascii=False) :param df: :param lat: Nome da coluna no dataframe que tem os dados de latitude :param long: Nome da coluna no dataframe que tem os dados de longitude :param remove_coords_properties: :return: """ print('Estudar:\nhttps://geopandas.org/en/stable/docs/reference/api/geopandas.points_from_xy.html') # Create a new python dict to contain our geojson data, using geojson format geojson = {'type': 'FeatureCollection', 'features': []} # Loop through each row in the dataframe and convert each row to geojson format for _, row in df.iterrows(): # Create a feature template to fill in feature = { 'type': 'Feature', 'properties': {}, 'geometry': { 'type': 'Point', 'coordinates': [], } } # Fill in the coordinates feature['geometry']['coordinates'] = [row[long], row[lat]] # for each column, get the value and add it as a new feature property properties = list(df.columns) if remove_coords_properties: properties.remove(lat) properties.remove(long) for prop in properties: feature['properties'][prop] = row[prop] # Add this feature (aka, converted dataframe row) to the list of features inside our dict geojson['features'].append(feature) return geojson
cb4ffc75e84f0a201ff10ba60ec4b82ab1c8e683
635,372
def _region2searchspace(region_point, search_space_resolution): """Convert region point to a cube's coordinate in the search space. Assume that the search space's origin is at the region coordinate frame's origin. The `search_space_resolution` has unit m/cell.""" return (int(round(region_point[0] / search_space_resolution)), int(round(region_point[1] / search_space_resolution)), int(round(region_point[2] / search_space_resolution)))
33620a75526cf962f456d22bf4bd5611c57a7e96
351,411
def get_data_by_name(obj, name): """Get data by name within indicators or strategies Parameters ---------- obj: Indicator or Strategy instance name: str data mame Returns DataFeed """ tgt_data = None for data in obj.datas: if data._name == name: tgt_data = data break if tgt_data is None: raise Exception(f'You should define data named {name}') else: return tgt_data
562fb28b4647e30c4fc456e6be393ea690a997b8
475,137
def pdh_signal( ff, power, gamma, finesse, FSR, fpole ): """Laser frequency Pound-Drever-Hall signal response to a cavity in W/Hz. """ pdh = 2 * power * gamma * finesse / FSR / (1 + 1j * ff / fpole) return pdh
4843079dba3bc6706c4431a7ca41250199e84141
333,532
def check_freq(dict_to_check, text_list): """ Checks each given word's freqency in a list of posting strings. Params: words: (dict) a dict of word strings to check frequency for, format: {'languages': ['Python', 'R'..], 'big data': ['AWS', 'Azure'...], ..} text_list: (list) a list of posting strings to search in Returns: freq: (dict) frequency counts """ freq = {} # Join the text together and convert words to lowercase text = ' '.join(text_list).lower() for category, skill_list in dict_to_check.items(): # Initialize each category as a dictionary freq[category] = {} for skill in skill_list: if len(skill) == 1: # pad single letter skills such as "R" with spaces skill_name = ' ' + skill.lower() + ' ' else: skill_name = skill.lower() freq[category][skill] = text.count(skill_name) return freq
c36813b876ff62b26c5caecd58dcafdd0bfc6ded
32,014
def format_serialized(serialized): """Formats the serialized logical form to reduce the number of tokens. The intent and slot labels are lower-cased and broken into words. Example: "[IN:SET_ALARM" --> "[IN set alarm =" The close brackets are also merged with the preceding token to avoid an extra space token after tokenization. Args: serialized: Serialized logical form string Returns: formatted logical form string. """ lf_toks = [] for tok in serialized.split(): if tok == "]": lf_toks.append(tok) elif tok[0] == "[": prefix, suffix = tok[1:].split(":") lf_toks.append(" [" + prefix) lf_toks.append(" " + suffix.lower().replace("_", " ")) lf_toks.append(" =") else: lf_toks.append(" " + tok) return "".join(lf_toks).strip()
a92fb4ca403a19730a595d2108e6b561e48f98d9
510,283
def _NextNondeletedLine(file_lines, line_number): """Returns the line number of the next not-deleted line, or None.""" for line_number in range(line_number + 1, len(file_lines)): if not file_lines[line_number].deleted: return line_number return None
99b163a5c6aac8db82df9fe3472da895820f8b81
369,066
def _has_medwall(model): """Check if structure has medial wall, which is when the model count is equal to the number of vertices. Always false for non surface models """ if ((model['type'] == 'CIFTI_MODEL_TYPE_SURFACE') and (model['count'] == model['n_indices'])): return True else: return False
7973ace19285aa28d636b08b787c492a0130ad1e
405,107
def paths_same_disk(photos_path:str, export_path:str)->bool: """ Checks if the provided input path and the export path are "located" on the same disk. :param photos_path: path to photos :type photos_path: str :param export_path: path to the directory where the photo folder structure will be created :type export_path: str | """ return True if photos_path[0].lower() == export_path[0].lower() else False
07476953bbaec5ad0045b064ea5ce09e246ebca1
681,135
def format_coords(df, col_to_convert, col_new_name): """ Takes col_to_convert and creates a list of a set of strings that contain coordinates in an integer form. Parameters ---------- df : df pd.DataFrame that contains Neotoma information. col_to_convert : string Name of column of df. This column has latitude and longitude coordinates col_new_name : string Name of new column of df. Returns ------- df: pd.DataFrame pd.DataFrame with new column `col_new_name` which contains a list of unique positive integer coordinates. """ df[col_new_name] = df[col_to_convert]\ .apply(lambda x: [int(abs(i)) for i in x])\ .apply(lambda x: [str(i) for i in x])\ .replace('-', '', regex=True)\ .apply(lambda x: list(set(x))) return df
2c1c8591bdc968066603021bb4504fe17341edfd
618,126
from typing import List def solution(board: List[List[int]]) -> int: """ μž…λ ₯된 2차원 λ°°μ—΄μ—μ„œ κ°€μž₯ 큰 1둜 된 μ •μ‚¬κ°ν˜•μ„ κ΅¬ν•˜λΌ Args: board (List[List[int]]): 각각의 μ›μ†Œκ°€ 1μ΄λ‚˜ 0으둜 λ˜μ–΄ μžˆλŠ” 2차원 λ°°μ—΄ Returns: int: κ°€μž₯ 큰 μ •μ‚¬κ°ν˜•μ˜ 넓이 """ row, calumn = len(board), len(board[0]) global_max = 0 for i in range(row): for j in range(calumn): if not (i and j): global_max = board[i][j] if board[i][j] > global_max else global_max continue if board[i][j]: near = [board[i - 1][j - 1], board[i - 1][j], board[i][j - 1]] board[i][j] = min(near) + 1 global_max = board[i][j] if board[i][j] > global_max else global_max return global_max * global_max
a34b7decafc1dc79bce566335e4edba7900e7744
24,592
def vect3_length_sqrd(v): """ Squared length of a 3d vector. v (3-tuple): 3d vector return (float): squared length """ return v[0] ** 2 + v[1] ** 2 + v[2] ** 2
1192808dfc5c52a5d57b9856d8b0a6e5067bfd8d
494,458
import re def get_string_between(text, before, after): """ Get the string between two strings. """ pattern = "%s(.*)%s" % (before, after) result = re.search(pattern, text) if not result: return None return result.group(1)
024eae6ed3d386ec3b3f79d0ca0c608da5c18d72
502,485
def new_value(start, end, frac): """Get value at a fraction of an interval Args: start (number): Start of interval end (number): End of interval frac (number): Fraction of interval Returns: number: Value at a fraction of an interval """ return (end - start) * frac + start
796e894322b5d090d854a7629f8dcd9755fcc6c7
389,158
def read_dysplasia_mappings(mappings): """ Read dysplasia mappings stored within file Params: mappings (str): path to dysplasia mappings file Returns: a dict of {trigger: grade} representing mappings for each use-case """ with open(mappings, 'r') as file: lines = file.readlines() mappings = {'colon': {}, 'cervix': {}, 'celiac': {}, 'lung': {}} for line in lines: trigger, grade, use_cases = line.strip().split('\t') use_cases = use_cases.split(',') for use_case in use_cases: mappings[use_case][trigger] = grade.split(',') return mappings
a0801f1014176fa258f4cd8718617ebb3f4c7d19
210,865
def strip_position(pos): """ Stripes position from +- and blankspaces. """ pos = str(pos) return pos.strip(' +-').replace(' ', '')
1097bf545d58cea0ef526683cf020be7678aa029
668,590
def authority(agent_authorities, other_agent, scale): """ All authorities of the agent get the scale's maximum, while all others get the minimum to trust value. :param agent_authorities: The list of agents to be seen as authority by the current agent. :type agent_authorities: list :param other_agent: The other agent for which the trust relationship is calculated. (end of relationship) :type other_agent: str :param scale: The Scale object to be used by the agent. :type scale: Scale :return: Returns authority trust value. :rtype: float or int """ if other_agent in agent_authorities: return scale.maximum_value() else: return scale.minimum_to_trust_others()
708b61d03948db4b9d7fcc0914da83d94c8c414e
478,357
import re def replace_fullwidth_alpha_numeral_to_halfwidth(text: str): """ Replace full-width alpha-numeral characters to half-width characters. Args: text (str): Text to replace. Returns: str: Replaced text. """ return re.sub(r'[οΌ‘-Za-z0-οΌ™]', lambda mathobj: chr(ord(mathobj.group(0)) - 0xFEE0), text)
acf0d45e63a6595955c3da1d9f70424ea95c8d95
670,815
from typing import Union def is_false(val: Union[str, int]) -> bool: """Decide if `val` is false. Arguments: val: Value to check. Returns: True or False. """ value = str(val).strip().upper() if value in ("0", "FALSE", "F", "N", "NO", "NONE", ""): return True return False
b2b532bbd56bfd1b7cf724da35792044c00f2dc0
527,020
def kgtk_empty_to_null(x): """If 'x' is the empty string, map it onto NULL, otherwise return 'x' unmodified. """ if x == '': return None else: return x
ce345332733bb963e3bb362700b7a7c152db55c0
390,489
import struct def signed_int_to_unsigned_hex(signed_int: int) -> str: """Converts a signed int value to a 64-bit hex string. Examples: 1662740067609015813 => '17133d482ba4f605' -5270423489115668655 => 'b6dbb1c2b362bf51' :param signed_int: an int to convert :returns: unsigned hex string """ hex_string = hex(struct.unpack('Q', struct.pack('q', signed_int))[0])[2:] if hex_string.endswith('L'): return hex_string[:-1] return hex_string
3ac1e3dfbeeb88b07d48fdaa05085209378b4440
497,435
def extract_comment(line): """ Extract comment string from `# This is comment`。 """ if not line.startswith("#"): raise ValueError index = 0 for char in line: if char == "#": index += 1 else: return line[index:].strip()
eea712516ca61b567edd5381f05263e3db7fcdf4
365,412
def fillNaToCustom(data): """Iterates through NA values and changes them to Typ Parameters: dataset (pd.Dataset): Both datasets Returns: pd.Dataset: Dataset with any NA values in the columns listed changed to Typ """ data["Functional"] = data["Functional"].fillna("Typ") return data
a7cc65503297a5b9bf740ed0ec41d643b59ea1f8
568,889
from datetime import datetime def strftime(time): """ Return a string representing the date and time as expressed in the RFC 3339 date-time format. https://tools.ietf.org/html/rfc3339""" text = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S%z") return text[:22] + ":" + text[22:]
bed55ed66a2bdf61939141adb6e4e1086733c305
181,714
def toChunk(data): """ Convert string to a chunk. @returns: a tuple of strings representing the chunked encoding of data """ return ("%x\r\n" % len(data), data, "\r\n")
d11b043e631b30755ac5bc796edc9edf9edf24f5
44,363
def even_or_odd(x=0): # if not specified, x should take value 0. """Find whether a number x is even or odd.""" if x % 2 == 0: #The conditional if return "%d is Even!" % x return "%d is Odd!" % x
dca3e91eac2498aea5f21375bd90381c2ff42022
326,123
def convert_masking(sentences): """Converts hash masking to internal [MASK] tokens""" converted_sentences = [] for sent in sentences: sent = list(sent) for i, c in enumerate(sent): if c == "#": sent[i] = "[MASK]" converted_sentences.append("".join(sent)) return converted_sentences
2ae095b06694d8f592159f5f5eee6491fe479d23
627,572