content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def pg_db_name(worker_id):
"""
An auto-generated Postgres database name. One database is generated for each xdist worker.
eg. test_2 (for worker-2)
"""
return f"test_{worker_id}" | f33ac232f8bcfcfeb659fcaae3e2dc3eeb4c5bf0 | 543,488 |
import math
def highest_bit(num):
"""highest_bit(num) -> n
Determines the bit position of the highest bit in the given number.
Example: 0x800000 returns 24.
"""
try:
return int(math.floor(math.log10(num) / math.log10(2))) + 1
except OverflowError:
assert(num == 0)
return 0 | c9441061e8ec7f7267b641d647477ad6f873ab17 | 585,823 |
import json
def args_as_string(*args, **kwargs):
"""Turn arguments and keyword arguments into a string representation.
:param args: each member must support __repr__().
:param kwargs: order-insensitive, each member must be serializable.
:returns: str -- string representation of all arguments.
"""
args_str_form = ", ".join([_arg.__repr__() for _arg in args])
kwargs_str_form = json.dumps(kwargs, ensure_ascii=False, sort_keys=True)
return f"args: {args_str_form}, kwargs: {kwargs_str_form}" | 5fa08511dfdf1ee67012ad3916f874b49ddb0036 | 230,089 |
def get_model_parameters(model):
"""Returns the paramters of a sklearn LogisticRegression model"""
if model.fit_intercept:
params = (model.coef_, model.intercept_)
else:
params = (model.coef_,)
return params | aa0bac200671cc6aa9353e0d7c28eccc9bfacff9 | 381,636 |
def join_population_data(daily_data, population_data):
"""
Merges daily_data and population_data dataframes
Parameters
----------
daily_data : df
dataframe of daily observation
population_data : df
dataframe of population
Returns
-------
merged df
merged dataframe from daily_data and population_data
"""
return daily_data.merge(population_data, how = 'left', on = 'Country/Region') | 56086e59a60342b1c994bba09ccf66d6fa02f379 | 44,585 |
def clean(text):
"""Clean text of any weird characters."""
words = text.replace('\n', ' ').split()
out = []
for word in words:
word = word.strip('/-_<>&')
if word:
out.append(word)
return ' '.join(out) | f3e7df8475093bfe08d40f15e02adfb15290a899 | 300,419 |
def strip_linebreaks(text):
"""Turn double linebreaks into a single.
:param text: the text to perform the replacement within
:returns: the text stripped of extra linebreaks
"""
return text.replace("\n\n", "\n") | cf9697b3b7f842820e351b06296dba814501f77c | 427,289 |
import uuid
def isUuid(value):
"""Returns true if convertible to UUID"""
try:
uuid.UUID(value)
return True
except (ValueError, TypeError):
return False | 95cb84dda6c4f1d2efbcc9a34ad69ec76724f2a3 | 202,172 |
def groups_categories_string(groups):
"""Returns a string of group categories."""
return ', '.join(
[group.category.name for group in groups]) | 54131ec38e9ad050bb8f0659ade24af04a0d1e63 | 294,069 |
from pathlib import Path
def exists(path) -> bool:
"""
Check path or file exists (use os.path.exists)
Args:
path: path or file
"""
if not path: return False
return Path(path).is_dir() or Path(path).is_file() | 00d25caf420bdd52e299777692bbe417af49266f | 579,532 |
def get_positional(args, kw, kw_overrides=False):
"""Interpolates keyword arguments into argument lists.
If `kw` contains keywords of the form "_0", "_1", etc., these
are positionally interpolated into the argument list.
Args:
args: argument list
kw: keyword dictionary
kw_overrides: key/value pairs that override kw
Returns:
(new_args, new_kw), new argument lists and keyword dictionaries
with values interpolated.
"""
new_kw = {k: v for k, v in kw.items() if k[0] != "_"}
if len(new_kw) == len(kw):
return args, kw
new_args = list(args)
for key, value in kw.items():
if key[0] != "_": continue
index = int(key[1:])
while len(new_args) <= index:
new_args += [None]
if kw_overrides or new_args[index] is None:
new_args[index] = value
return new_args, new_kw | df701d956d12c751eea4a330887af85bfe12423a | 205,541 |
def n_prds_per_week_init(M):
"""
Initialize convenience parameter n_prds_per_week
"""
return M.n_days_per_week() * M.n_prds_per_day() | 0e1dcc0e391da8963ae0d8e004323ba6c35545f5 | 360,875 |
import math
def get_vector_coords(x1, y1, length, angle_deg):
""" find the end point coordinates of a vector
:param x1: start point
:param y1: start point
:param length: vector magnitude
:param angle_deg: vector angle (degrees)
:return: [start_x, end_x], [start_y, end_y]
"""
y2 = y1 + length * math.sin(math.pi - math.radians(angle_deg))
x2 = x1 + length * math.cos(math.pi - math.radians(angle_deg))
return [x1, x2], [y1, y2] | 2b9700b4197890f9f23b0288b096cb7a3ced67b2 | 377,016 |
def histogram(values, mode=0, bin_function=None):
"""Return a list of (value, count) pairs, summarizing the input values.
Sorted by increasing value, or if mode=1, by decreasing count.
If bin_function is given, map it over values first."""
if bin_function:
values = map(bin_function, values)
bins = {}
for val in values:
bins[val] = bins.get(val, 0) + 1
if mode:
return sorted(list(bins.items()), key=lambda x: (x[1], x[0]),
reverse=True)
else:
return sorted(bins.items()) | 5b598076375796db8f043735e55a5361407b4e5a | 484,945 |
def summary_example(field):
"""Returns an example of a value in the summary of the field
"""
distribution_keys = ["categories", "counts", "bins", "tag_cloud", "items"]
for key in distribution_keys:
if key in field["summary"]:
return repr(field["summary"][key][0][0]) | a18fa2d3dc763a4429ed41d628dc84fce1476acd | 667,551 |
def merge_into_dict(original, secondary):
"""Merge two dictionaries into the first and return it.
This is simply a conveinence wrapper around the dictionary update method. In
addition to the update it returns the original dict to allow for chaining.
Args:
original: The dict which will be updated.
secondary: The dict which will be copied.
Returns:
The updated original dictionary.
"""
original.update(secondary)
return original | 899d40396885f0775f2cbaa865702ed0e5706dba | 10,291 |
import binascii
def bin_compare_region(fn1, start1, end1, fn2, start2, end2):
"""compare region from two files
:param fn1: the first file to compare.
:param start1: start address of the file fn1.
:param end1: end address of file fn1
:param fn2: the second file to compare.
:param start2: start address of the file fn2.
:param end2: end address of file fn2.
:returns rtn: True/False.
"""
rtn = True
s1, s2 = (end1-start1), (end2-start2)
size_bytes = s1
if s1 > s2: size_bytes = s2
with open(fn1, 'rb') as f1, open(fn2, 'rb') as f2:
f1.seek(start1)
f2.seek(start2)
total, bufsize = 0, 16
while total < size_bytes:
temp1, temp2 =f1.read(bufsize), f2.read(bufsize)
total += 16
if temp1 != temp2:
print("index: 0x%x, 0x%x"%(start1+total, start2+total))
print("%-30s"%fn1, binascii.hexlify(temp1))
print("%-30s"%fn2, binascii.hexlify(temp2))
rtn=False
return rtn | 7e587e49e2a572ebe8c4b699399e1861546e6202 | 87,683 |
from pathlib import Path
def project_dir(tests_dir) -> Path:
""" Root directory of the project.
"""
return tests_dir.parent.parent | 4cd674a05065eb058826ce254d35f22076e436a9 | 220,882 |
def prefer_insertions_at_309_and_315(mb):
"""Prefer alternatives that include 309.1C or 315.1C over others.
There are two multi-C runs at the beginning of the 300's and by
convention, any insert in one of those runs is pushed to the end.
Mostly, the other rules will pick these, but in some circumstances
with other substitutions or deletions in this area, these won't get
picked - although we want them to. Thus, this special case preference.
"""
special_cases = ['309.1C', '315.1C']
# mb: mismatch block
if len(mb) > 1:
scores = [0] * len(mb)
# pos: position
# aa: alternate alignment
for pos, aa in enumerate(mb):
for variant in aa:
if str(variant) in special_cases:
scores[pos] += 1
if max(scores) > 0:
# lsi: indices of lower scoring alternate alignments
lsi = list(x for x in range(len(mb)) if scores[x] < max(scores))
# remove low scoring alignments from the mismatch block
# in reverse order so as to not mess up the preceding indices
lsi.sort(reverse=True)
for i in lsi:
mb.pop(i)
return mb | 92152d8de90617ce4a21da680bd34e96d7df98cc | 21,896 |
def discard_after_character(url, character='#'):
""" Discard a URL after a character """
return url.split(character)[0] | 7c1fb56a142c63e83fd7f069f080c44436e19c89 | 328,201 |
def fit_model_segments(segmented_dataset_dict, fit_segment):
""" A function which fits a model to each item in a dataset.
Parameters
----------
segmented_dataset_dict : :any:`dict` of :any:`pandas.DataFrame`
A dict with keys as segment names and values as dataframes of model input.
fit_segment : :any:`function`
A function which fits a model to a dataset in the `segmented_dataset_dict`.
Returns
-------
segment_models : :any:`list` of :any:`object`
List of fitted model objects - the return values of the fit_segment function.
"""
segment_models = [
fit_segment(segment_name, segment_data)
for segment_name, segment_data in segmented_dataset_dict.items()
]
return segment_models | 138b58951dd6976124e75ba4de3761da45bbb95a | 520,850 |
def altsumma(f, k, p):
"""Return the sum of f(i) from i=k, k+1, ... till p(i) holds true or 0.
This is an implementation of the Summation formula from Kahan,
see Theorem 8 in Goldberg, David 'What Every Computer Scientist
Should Know About Floating-Point Arithmetic', ACM Computer Survey,
Vol. 23, No. 1, March 1991."""
if not p(k):
return 0
else:
S = f(k)
C = 0
j = k + 1
while p(j):
Y = f(j) - C
T = S + Y
C = (T - S) - Y
S = T
j += 1
return S | 952e77fcedfbe01658342126d95b79175c082976 | 707,902 |
import json
import logging
def process_covid_api_data(area_type: str) -> tuple:
""" Function to process data from the covid API.
Loads data from json files - 'region_covid_data.json' and
'nation_covid_data.json'. Calculates 7 day infection rate
based on api data.
Arguments:
area_type: the area that the data is being fetched for, determines how data is processed
Returns:
seven_day_rate: Cumulative cases in last 7 days, excluding empty and
incomplete cell
hospital_cases: current number of hospitalisations
deaths: total number of deaths in desired region
"""
seven_day_rate = 0
no_of_days_ignore = 2 # the number of days of data skipped due to no data uploaded
no_of_days_counted = 0
hospital_cases = None
deaths = None
# load covid data from json
if area_type == 'region':
with open('region_covid_data.json', 'r', encoding='UTF-8') as covid_json:
covid_data = json.load(covid_json)
else:
with open('nation_covid_data.json', 'r', encoding='UTF-8') as covid_json:
covid_data = json.load(covid_json)
covid_data_entries = covid_data['data']
for daily_data in covid_data_entries:
if daily_data['newCasesBySpecimenDate'] is None: # if the data column is empty
no_of_days_ignore +=1
logging.warning("Warning: More case data than expected empty.\
Data for %s missing. API may not be up to date.", str(daily_data['date']))
# adds an extra day to skip over to find complete data for 7 day rate
no_of_days_ignore -= 1
if area_type == 'nation': # if data is for nation, more is processed
if deaths is None and daily_data['cumDailyNsoDeathsByDeathDate'] is not None:
deaths = daily_data['cumDailyNsoDeathsByDeathDate']
if daily_data['hospitalCases'] is not None and hospital_cases is None:
hospital_cases = daily_data['hospitalCases']
if no_of_days_ignore <= 0 and no_of_days_counted <= 6:
seven_day_rate += daily_data['newCasesBySpecimenDate']
no_of_days_counted +=1
if area_type == 'nation': # returns data
logging.info('Returning API data for: %s', area_type)
return 'England', seven_day_rate, hospital_cases, deaths
else:
logging.info('Returning API data for: %s', area_type)
return 'Exeter', seven_day_rate | ca0bc8fc05dfae2a06f1615b64f98766f9fd7208 | 228,988 |
def revert(vocab, indices):
"""Convert word indices into words
"""
return [vocab.get(i, 'X') for i in indices] | 457831d28d26c68b19a07585f0d4de9fe31b0203 | 47,720 |
import configparser
def get_default(parser, section, option, default):
"""helper to get config settings with a default if not present"""
try:
result = parser.get(section, option)
except (configparser.NoSectionError, configparser.NoOptionError):
result = default
return result | 61a92b2010036bc0c58e9888da98f417a69fdd64 | 461,588 |
def _find_by_prior_token(tokens, keys):
"""
If any key in keys appears in tokens, return the token following said key.
If more than one key in keys matches, use the first matching key in keys.
Otherwise, return None.
"""
# Determine which key matches
match_key = None
for key in keys:
if key in tokens:
match_key = key
break
else:
return None
# Determine the index of the following token
index = tokens.index(match_key) + 1
if index >= len(tokens):
return None
return tokens[index] | 6d1ffb604b0236e2faa229d6f4552378b6cdec2a | 231,059 |
def _create_remove_item_task(title, filepath, videoid):
"""Create a single task item"""
return {
'title': title,
'filepath': filepath,
'videoid': videoid
} | fcd7a9cd61ae2e6f96dfeed2d4574441c9c50758 | 243,795 |
def len_at_start(l, elem):
"""finds the index in l of the first item that does not match elem"""
for idx, lelem in enumerate(l):
if lelem != elem:
return idx
return 0 | a3cc09d73ae77157550540a95276382e735131de | 266,336 |
def graph_to_dot(graph, **kwargs):
"""Graph is expected to be a dict of the form { 'nodes' : list(), 'edges' :
list() }. This function returns a string that will be input to dot."""
title = kwargs.get('title', '')
# Template, including setup and formatting:
template = """digraph G {
ratio = "fill" ;
size = "4.0, 4.0" ;
page = "4, 4" ;
margin = 0 ;
mincross = 2.0 ;
rankdir = "BT" ;
nodesep = 0.25 ;
ranksep = 0.25 ;
node [fontname="Helvetica", fontsize=10,
shape=oval, style=filled, fillcolor=white ] ;
// The nodes
%s
// The edges
%s
// The title
labelloc="t";
label="%s";
}"""
# Nodes:
nodes = ['"%s" [label="%s"] ;' % (id(n), n.shortStr().replace(r'"', r'\"'))
for n in graph['nodes']]
node_str = '\n '.join(nodes)
# Edges:
edges = ['"%s" -> "%s" ;' % (id(x), id(y)) for (x, y) in graph['edges']]
edge_str = '\n '.join(edges)
return template % (node_str, edge_str, title) | d037c686a8c522f53d0ff8795182ec8e6499fe73 | 330,471 |
def bytes_to_str(input_bytes):
"""Convert bytes to string.
"""
return input_bytes.decode() | 6ebbad997fd682d1c62d0402a0d6a284ab64de1a | 111,000 |
def title(file):
"""Title of the file."""
return file.name | c709ad4ab272f8a01e058ce38bff993d84eddb16 | 598,253 |
def filter_benchmarks(experiment_df, included_benchmarks):
"""Returns table with only rows where benchmark is in
|included_benchmarks|."""
return experiment_df[experiment_df['benchmark'].isin(included_benchmarks)] | 08ba689f3bce09efb4c4bee91d46a3ff3e0ba72c | 285,916 |
def format_line_protocol(measurement: str,field: dict,tags: dict={}) -> str:
"""Converts input into influxDB line protocol format.
Args:
measurement (str): This is the overarching "thing" you're monitoring.
field (dict): This is the metric you're collecting for the "thing."
tags (dict, optional): These are optional other attributes you want to attach to the measurement. Defaults to {}.
Raises:
Exception: Multiple fields/metrics specified. Please format each field/metric separately.
Returns:
str: Line protocol formated string ready to be pushed to InfluxDB.
Examples:
```
>>> measurement = 'serverroom'
>>> tags = {'category': 'general', 'priority': 'high', 'owningParty': 'maintenace'}
>>> field = {'humidity': 0.99} #get_sensor_humidity
>>> format_line_protocol(measurement,field,tags)
>>> 'serverroom,category=general,priority=high,owningParty=maintenace humidity=0.99'
```
"""
str_output = f'{measurement}'
if tags:
for t in tags:
str_output += f',{t}={tags[t]}'
if len(field) == 1:
f = list(field.keys())[0]
str_output += f" {f}={field[f]}"
return str_output
else:
raise Exception("Multiple fields/metrics specified. Please format each field/metric separately.") | d41eeb63346d555d8e5acc676b4ffb7b3d775a06 | 174,934 |
from typing import Tuple
def determine_rep(heads, tails) -> Tuple[str, str]:
"""Determine single-character representations of each binary value
Parameters
----------
heads: ``Face``
The ``1`` abstraction
tails: ``Face``
The ``0`` abstraction
Returns
-------
heads_rep : ``str``
Character representation of the ``heads``
tails_rep : ``str``
Character representation of the ``tails``
"""
heads_rep = str(heads)[0]
tails_rep = str(tails)[0]
if heads_rep == tails_rep:
heads_rep = "1"
tails_rep = "0"
return heads_rep, tails_rep | a17a72355ec6a8dd82367d77ff84f0a8a3221ebd | 285,427 |
def isInt(string):
""" is the given string an interger? """
try: int(string)
except ValueError: return 0
else: return 1 | 1e3d78753a305fcd546f2d4ea090bf5e5cc7bc8f | 180,989 |
def _to_extended_delta_code(seconds):
"""Return the deltaCode encoding for the ExtendedZoneProcessor which is
roughtly: deltaCode = (deltaSeconds + 1h) / 15m. With 4-bits, this will
handle deltaOffsets from -1:00 to +2:45.
"""
return f"({seconds // 900} + 4)" | e24189c73900cceaa125f6f471633362ec82e27c | 630,479 |
def round2ArbatraryBase(value, direction, roundingBase):
"""Round value up or down to arbitrary base
Parameters
----------
value : float
Value to be rounded.
direction : str
Round up, down to the nearest base (choices: "up","down","nearest")
roundingBase : int
rounding base. (Example: if base = 5, values will be rounded to the nearest multiple of 5 or 10.)
Returns
-------
rv : int
rounded value.
"""
if direction.lower().startswith("u"):
rv = value + (roundingBase - value % roundingBase) # round up to nearest base
elif direction.lower().startswith("d"):
rv = value - value % roundingBase # round down to nearest base
else:
rv = int(roundingBase * round(float(value) / roundingBase)) # round up or down to nearest base
return rv | 1bb5af142d697b7ecabc1ed66fe3463963e9d1e4 | 452,206 |
def crop_center(img, new_shape):
"""
Crop an image equally on each size to create the new_shape
Args:
img (numpy array): 2D array to crop
new_shape: desired shape of the return
Returns:
numpy array: array cropped according to shape
"""
ul = ((img.shape[0]-new_shape[0])/2, (img.shape[1]-new_shape[1])/2)
br = (ul[0]+new_shape[0], ul[1]+new_shape[1])
return img[ul[0]:br[0], ul[1]:br[1]] | d20ed57976def7d7a88344d90937bc7027040fcc | 592,773 |
import itertools
from typing import Tuple
def itertools_product(arr_1: Tuple[int, ...],
arr_2: Tuple[int, ...]) -> Tuple[Tuple[int, ...], ...]:
"""
>>> itertools_product((1, 2), (3, 4))
((1, 3), (1, 4), (2, 3), (2, 4))
"""
return tuple(itertools.product(arr_1, arr_2)) | 8568036274ba0ec4ce3deda12e8a1aef18a9ae5d | 88,194 |
def build_header(token):
"""Builds the headers part of the request to Web Services API."""
headers = {'Accept': 'application/json;odata.metadata=minimal',
'Connection': 'Keep-Alive',
'Host': 'services-api.lexisnexis.com'}
headers['Authorization'] = 'Bearer ' + token
return headers | 32636754bb0b8b59394e0c5f1f521e9986cc7dfa | 263,176 |
def mean(rv, max_iter=int(1e5), tol=1e-5):
"""
Returns the mean of `rv`.
In general computed numerically using up to `max_iter`
successive approximations or until these approximations no longer
change by more than `tol`. However, subclasses of `RandomVariable`
may override the `mean` to produce an exact value.
:param rv: RandomVariable
:param max_iter: int
:param tol: float
"""
return rv.mean(max_iter=max_iter, tol=tol) | 296b257cd02c4800d8372e290bf0bb9cdf668279 | 208,703 |
def create_module(project, name, sourcefolder=None):
"""Creates a module and returns a `rope.base.resources.File`"""
if sourcefolder is None:
sourcefolder = project.root
packages = name.split('.')
parent = sourcefolder
for package in packages[:-1]:
parent = parent.get_child(package)
return parent.create_file(packages[-1] + '.py') | 56dd10540e626f9a471d9a271502d60f8731bfde | 118,371 |
from typing import List
import hashlib
def hashes() -> List[str]:
"""
Return a list of available hashing algorithms.
:rtype: list of strings
"""
t = []
if 'md5' in dir(hashlib):
t = ['MD5']
if 'md2' in dir(hashlib):
t += ['MD2']
hashes = ['SHA-' + h[3:] for h in dir(hashlib) if h.startswith('sha')]
return t + hashes | 4077694caa1200e923821515a3ec77e720fe755c | 37,096 |
from typing import List
import math
def _get_min_step_size(tick_values: List[float]) -> float:
"""Get the minimum step size given a list of tick vales.
Tick values must be in ascending order.
Args:
tick_values (List[float]): The tick values in ascending order.
Raises:
ValueError: If ``tick_values`` are not found to be in ascending
order.
Returns:
float
"""
left_tick, right_tick = min(
zip(tick_values[:-1], tick_values[1:]),
key=lambda tick_pair: tick_pair[1] - tick_pair[0],
)
min_tick_step = right_tick - left_tick
if min_tick_step < 0:
raise ValueError(
"Ticks must be in ascending order."
f" {left_tick} is greater than {right_tick}"
)
# Round off in case of floating point errors
elif min_tick_step != 0:
min_tick_step_place = math.floor(math.log10(min_tick_step))
next_place_up = 10 ** (min_tick_step_place + 1)
if ((next_place_up - min_tick_step) / min_tick_step) < 0.001:
min_tick_step = next_place_up
return min_tick_step | 24c88dc9c0172e5c6bf761b856a44ee5db676c35 | 327,739 |
from typing import TextIO
def skip_header(reader: TextIO) -> str:
"""Skip the header in reader and return the first real piece of data.
>>> infile = StringIO('Example\\n# Comment\\n# Comment\\nData line\\n')
>>> skip_header(infile)
'Data line\\n'
"""
# Read the description line
line = reader.readline()
# Find the first non-comment line
line = reader.readline()
while line.startswith('#'):
line = reader.readline()
# Now line contains the first real piece of data
return line | 89617cf0fca71822e4c3c54b2a6a8d7aeb05abfe | 647,887 |
def _add_text_axes(axes, text):
"""Use a given axes to place given text."""
txt = axes.text(0.5, 0.5, text, ha='center', va='center')
axes.axis('off')
return txt | b8370a92a3c589044e45a5d295015e3afd5f35e2 | 370,534 |
def sed(app: str, pattern: str, repl: str):
"""sed with extended regex and edit file in-place"""
repl = repl.replace('$', '\$').replace('&', '\&').replace('=', '\='). \
replace('`', '\`').replace('"', '\\"').replace('\n', '\\n')
return f'sed -r "s={pattern}={repl}=" -i /tmp/daemon_{app}.sh' | 4ca084d10666fdb2be47b0093fb0b4d6fbf436db | 243,215 |
def convert_8_to_16(value):
"""Scale an 8 bit level into 16 bits."""
return (value << 8) | value | e94dc436c96a7b9aae006679f85d4872d702823d | 679,176 |
def _is_udp_in_ipv4(pkt):
"""If UDP is in IPv4 packet return True,
else return False. False is returned also if exception occurs."""
ipv4_type = int('0x0800', 16) # IPv4
try:
if pkt.type == ipv4_type:
if pkt.payload.proto == 17: # UDP
return True
except: # pylint: disable=bare-except
return False
return False | b9ccebecd72b5336fea4ed9ec3e38490294ab126 | 193,376 |
def read_file(file_path):
"""
Open the file and return the contents
Parameters
----------
file_path : str
The path of the file.
Returns
----------
file_data : str
Data in the file
"""
try:
with open(file_path, 'r', encoding="utf-8_sig") as target_file:
file_data = target_file.read()
except FileNotFoundError:
file_data = None
print('File not Found!')
return file_data | fcfc2f00bc51ad6d0f887bef1fa901697fa26f80 | 440,575 |
def every_n(sequence, n=1):
"""
Iterate every n items in sequence.
:param sequence: iterable sequence
:param n: n items to iterate
"""
i_sequence = iter(sequence)
return list(zip(*[i_sequence for i in range(n)])) | dcd4c952b7fee698daeefbdc1e7de3605bc20077 | 506,049 |
import math
def haversine_bearing(lat1, lon1, lat2, lon2):
"""
Calculate the bearing from 1 point to 1 other
on the earth (specified in decimal degrees)
"""
# convert decimal degrees to radians
lat1, lon1, lat2, lon2 = map(math.radians, [lat1, lon1, lat2, lon2])
dlon = lon2 - lon1
b = math.atan2(math.sin(dlon) * math.cos(lat2),
math.cos(lat1) * math.sin(lat2)
- math.sin(lat1) * math.cos(lat2) * math.cos(dlon)) # bearing calc
bd = math.degrees(b)
br, bn = divmod(bd + 360, 360) # the bearing remainder and final bearing
return bn | bf9c4064ab47454e243e773f0b5e0959da7c1628 | 647,981 |
def CreateNetworkResourceFromArgs(messages, network_ref, network_args):
"""Creates a new network resource from flag arguments."""
network = messages.Network(
name=network_ref.Name(),
description=network_args.description)
if network_args.subnet_mode == 'LEGACY':
network.IPv4Range = network_args.range
else:
network.autoCreateSubnetworks = (network_args.subnet_mode == 'AUTO')
if network_args.bgp_routing_mode:
network.routingConfig = messages.NetworkRoutingConfig()
network.routingConfig.routingMode = (messages.NetworkRoutingConfig.
RoutingModeValueValuesEnum(
network_args.bgp_routing_mode))
return network | e89c9ac1504572b2b421bb9d6155b90a3bf4600e | 446,905 |
def _amount(amount, asset='BBD'):
"""Return a dpay-style amount string given a (numeric, asset-str)."""
if asset == 'BBD':
return "%.3f BBD" % amount
raise Exception("unexpected %s" % asset) | bc6a6b10c2a6c8e440f57dfac60786d2b6b18c50 | 470,044 |
from pathlib import Path
import shutil
def copy_file(orig_filepath, copy_filename):
"""Copy a file into the same directory and return filepath of copied file.
:param orig_filepath: Filepath of file to copy
:param copy_filename: New filename of copied file
:return: Filepath of copied file
"""
orig_dir = Path(orig_filepath).parent.absolute()
copy_filepath = shutil.copy(orig_filepath, orig_dir)
Path(copy_filepath).rename(orig_dir / copy_filename)
copy_filepath = orig_dir / copy_filename
return copy_filepath | 4d57d2e502b43674a320bc3f06bd0b192a3bf1b3 | 565,253 |
def isnum(a):
""" Test if a string is all numbers [0-9]. """
return (not a.isalpha()) and a.isalnum() | aa6d40966906d06957ebdde9d1582ab162eaf1a9 | 493,671 |
def get_chromosome_sizes(fasta_path):
"""Returns a dictionary that maps chromosome name to its size in base pairs.
Args:
fasta_path (str): Path of the reference genome .fasta. This function will load the corresponding .fai file to
get the chrom. sizes.
Return:
dict: chromosome name to size in base pairs.
"""
chrom_size_lookup = {}
with open(f"{fasta_path}.fai", "rt") as fai_file:
for line in fai_file:
fields = line.split()
chrom = fields[0]
size = int(fields[1])
chrom_size_lookup[chrom] = size
return chrom_size_lookup | c609526b20585125298b34292549f89bd837a662 | 480,079 |
def h(host_num: int) -> str:
""" Returns the host name for a given host number """
return f'h{host_num}' | 7a8a1a7547af3289687645d1cc0fcaf7c1908870 | 276,727 |
def gen_data_dict(data, columns):
"""Fill expected data tuple based on columns list
"""
return tuple(data.get(attr, '') for attr in columns) | b43e29a4365a9d9e418aeb7dfe2a3452fbe173d3 | 660,937 |
def valid_service(value):
"""Test if a service is a valid format.
Format: <domain>/<service> where both are slugs.
"""
return ('.' in value and
value == value.replace(' ', '_')) | 57b85c0fbd54d1fe4d810742660d66a333276cd2 | 201,209 |
import math
def name_generated_task(parent_name, task_index=None, total_tasks=0, variant=None):
"""
Create a zero-padded sub-task name.
:param parent_name: Name of the parent task.
:param task_index: Index of this sub-task.
:param total_tasks: Total number of sub-tasks being generated.
:param variant: Build variant to run task in.
:return: Zero-padded name of sub-task.
"""
suffix = ""
if variant:
suffix = f"_{variant}"
if task_index is None:
return f"{parent_name}_misc{suffix}"
else:
index_width = int(math.ceil(math.log10(total_tasks)))
return f"{parent_name}_{str(task_index).zfill(index_width)}{suffix}" | ba9de7fe068daa2a1368960b886d0c3c933c47ef | 534,545 |
from typing import Optional
def get_charset(content_type: str) -> Optional[str]:
"""
Gets the charset from a content type header.
Parameters
----------
content_type: :class:`str`
The content type header to get the charset from.
Returns
-------
Optional[:class:`str`]
The charset, or ``None`` if none was found.
"""
split = content_type.split('; ')
if len(split) > 1:
_, charset = split
return charset.split('=')[1]
return None | c1c60a7c71054526d3880808ec1fc9644f59058b | 277,747 |
def identifier_clean(value: str) -> str:
"""Clean the identifier key."""
return value.replace("-", "").replace(":", "").upper() | 639fd7082ff23cbbafbf385526662205e02ee08f | 252,180 |
def format_grid(grid):
"""
Formats each row to represent list value delimited by spaces
:param list grid: input with nested lists to format
"""
formatted_grid = ''
for row in grid:
# list comprehension used to explicitly cast items to str
# this allows for the grid param to consist of non-str variables
formatted_grid += ' '.join([str(item) for item in row]) + '\n'
return formatted_grid | 7a2305f459740b991cfe57f510032eea6bcf15a0 | 637,509 |
def passedCredits(classes):
"""Calculates the number of credits a student passed.
:param dict classes:
The class information. Format:
classes = {className: {"grade": grade, "credits",
numCredits}}
:return:
The number of credits the student passed.
:rtype: int
"""
numCredits = 0
classList = list(classes.keys())
for c in classList:
if classes[c]["grade"] != "F":
numCredits += classes[c]["credits"]
return numCredits | 5df69d063acf52421c38bfaa432556672f99adb5 | 358,332 |
def update_and_return(dictionary: dict, **kwargs) -> dict:
"""
Utilises the standard dictionary update() function but instead of
returning None it will return the updated dictionary.
:param dictionary: The dictionary that should be updated
:param kwargs: Kwargs of the update method
:return: The updated dictionary
"""
dictionary.update(**kwargs)
return dictionary | 69652f1a512eecfa323b7df5048f14c6e4900bde | 630,447 |
def gcd_recur(a, b):
"""
Euclide's trick to calculate gcd: perfect for applying recursion
a, b: positive integers
returns: a positive integer, the greatest common divisor of a & b.
"""
if b == 0:
return a
else:
return gcd_recur(b, a % b) | 6a488ebb0f1a60f541bccd41a2f3a4d989e9da8b | 370,034 |
def _get_high_res_img_url(img_url):
""" Returns a modified url pointing to the high resolution version of
the image
>>> print(_get_high_res_img_url("https://images-na.ssl-images-amazon.com/\
images/I/513gErH1dML._AC_SX236_SY340_FMwebp_QL65_.jpg"))
https://images-na.ssl-images-amazon.com/\
images/I/513gErH1dML.jpg
>>> print(_get_high_res_img_url("https://images-na.ssl-images-amazon.com/\
images/I/51F48HFHq6L._AC_SX118_SY170_QL70_.jpg"))
https://images-na.ssl-images-amazon.com/\
images/I/51F48HFHq6L.jpg
"""
high_res_url = img_url.split("._")[0] + ".jpg"
return high_res_url | 8f16c2c603339f4e01a7f66e3859fafe5ff28b39 | 534,808 |
import math
def scientific2int(count):
"""Convert scientific notation to integer"""
(num, exp) = count.split('e+')
count = int(float(num) * float(math.pow(10, int(exp))))
return count | b7363a1c3a172353b09627474a350629fef484d2 | 221,945 |
import re
def is_valid_iqn(iqn):
"""
Validates if an iSCSI/iSER IQN is well formed.
:type iqn: str
:param iqn: The IQN to validate. For example:
'iqn.1993-08.org.debian:01:dea714656496'
:rtype: bool
:return: True or False depending on whether iqn passes validation.
"""
if iqn is None:
return False
match = re.match(r'^(?:iqn\.[0-9]{4}-[0-9]{2}(?:\.[A-Za-z](?:[A-Za-z0-9\-]'
r'*[A-Za-z0-9])?)+(?::.*)?|eui\.[0-9A-Fa-f]{16})', iqn)
if not match:
return False
return True | 8227c1f7e32197b6f6bee611d16e6d923cf0e29b | 298,287 |
def commits_text(commits):
"""Returns text in the form 'X commits' or '1 commit'"""
plural = "s" if len(commits) != 1 else ""
return "{} commit{}".format(len(commits), plural) | f4a8a4c61bb73fc477d83c4a7b4ec20a77ba633d | 558,527 |
def ran_check(num, low, high):
"""
Write a function that checks whether a number is in a given range
(inclusive of high and low)
ran_check(5,2,7)-> 5 is in the range between 2 and 7
"""
if num in range(low, high + 1):
return '{} is in the range between {} and {}'.format(num, low, high)
else:
return 'The number is outside the range' | 77ed0350fb14d3332691096b7bb09311a5d9a0b0 | 539,536 |
def _get_state(inspect_results):
"""
Helper for deriving the current state of the container from the inspect
results.
"""
if inspect_results.get("State", {}).get("Paused", False):
return "paused"
elif inspect_results.get("State", {}).get("Running", False):
return "running"
else:
return "stopped" | 6e9d140518b0ae42d37c648808d6fb96cea54188 | 409,845 |
import csv
import ast
def parse(string):
"""Parse args, kwargs call representation.
Parameters
----------
string: str
String to parse.
Returns
-------
tuple
(args, kwargs)
"""
args = []
kwargs = {}
elements = next(csv.reader([string], quotechar='\\'))
for element in elements:
parts = []
literals = next(csv.reader(
[element], delimiter='=', quotechar='\\'))
for literal in literals:
literal = literal.strip()
try:
value = ast.literal_eval(literal)
except Exception:
value = literal
parts.append(value)
if len(parts) == 1:
args.append(parts[0])
elif len(parts) == 2:
kwargs[parts[0]] = parts[1]
args = tuple(args)
return (args, kwargs) | 8eb1b4732eb271c2443915bfe15bfe4d78092fe8 | 198,970 |
def has_family_and_given_name(po):
""" returns True when a Person object has both family and given name
"""
if isinstance(po.familyName, str) and len(po.familyName) > 0:
if isinstance(po.givenName, str) and len(po.givenName) > 0:
return True
return False | 5378d76bcfda1309af68c8fa2ff203dbf9174e39 | 513,478 |
def is_number(s):
"""
Test if a string is an int or float.
:param s: input string (word)
:type s: str
:return: bool
"""
try:
float(s) if "." in s else int(s)
return True
except ValueError:
return False | 82c02c121ac863c9f6cbf034dec95abc7120f7cc | 682,722 |
def divide_round_up(a, b):
"""Calculates a / b rounded up to the nearest integer"""
if a % b < b / 2:
return a // b
else:
return (a // b) + 1 | 7ec28dfbe05c006e4e2cad340002a39c9b23f4b9 | 14,572 |
import math
def from_gps_to_xyz(latitude: float, longitude: float, altitude: float):
"""Get carla location x y z coordinates from GPS (latitude, longitude, altitude)."""
# Equatorial mean radius of Earth in meters
# https://nssdc.gsfc.nasa.gov/planetary/factsheet/earthfact.html
earth_radius = 6378137.0
# Hardcoded can only work for town 01 to 07
lat_ref = 0.0
lon_ref = 0.0
scale = math.cos(lat_ref * math.pi / 180.0)
base_x = scale * lon_ref * math.pi * earth_radius / 180.0
base_y = scale * earth_radius * math.log(math.tan((90.0 + lat_ref) * math.pi / 360.0))
x = scale * (longitude - base_x) * math.pi * earth_radius / 180.0
y = (scale * earth_radius * math.log(math.tan((90.0 + latitude) * math.pi / 360.0)) - base_y) * -1
z = altitude
# Like carla.Location
carla_location = (x, y, z)
return carla_location | 65e658b13dbf318f1cf67f2f3c026df1e42a186f | 163,167 |
def redirect(environ, location):
"""
Redirect Wrapper to create a redirect response, environ should be
the environ object provided by the wsgi request. location should be
the location of the redirect.
"""
environ["request"]["headers"].append(
("Location", str(location))
)
environ["request"]["status"] = 302
environ["request"]["done"] = True
return "" | 151596c0456636faa317ec6b6f37072347f33745 | 292,200 |
def is_IPv4(ip_string):
"""Returns true if the string is an IPv4: 4 digits < 255, separated by dots"""
digit_list = ip_string.split(".")
if len(digit_list) != 4:
return False
for d in digit_list:
if int(d) > 255:
return False
return True | 1a0b20b4b366e8f4e19e225d32b7a887aed3fe17 | 35,189 |
def attrs_to_dot(attrs):
"""Convert list of attributes to a dot string."""
return "<BR/>".join([f"[{attr}]" for attr in attrs]) if attrs else "" | cd6c41e8e2739bb4ce51a10182394c6635799b1c | 238,369 |
def load(f):
"""
read a file into a string
"""
fh = open(f)
text = fh.read()
fh.close()
return text | 6f8efa32c48309c8768e71b7fa0a46791c83746b | 662,621 |
def _drop(x): # pylint: disable=invalid-name
"""Helper: pop top element of a stack (make it a non-list if length is 1)."""
result = x[1:]
if len(result) == 1:
return result[0]
return result | b288ae562b9ac3e006d6977998bb0ce81ef89885 | 467,925 |
import inspect
def get_docs(obj: object) -> str:
"""Return object docs or empty string if there are none"""
return inspect.getdoc(obj) or '' | 5959b3f7f5d947a503fc614811cb2636fb655bf4 | 559,055 |
def exists(test_subject, keys):
"""Check if a list of keys exists in an object.
:param test_subject: A dictionary object.
:type test_subject: dic
:param keys: The list of keys to be checked against the 'test_subject'.
:type keys: list of str
:return: True if the list of keys is available, false otherwise.
:rtype: bool
"""
#Examples:froggy.gadgets.exists({"id": 1, "name": "Anna"}, {"createdon", "updatedon"})
for key in keys:
if key in test_subject:
# Check if there is a value in the key, if not, return false.
if (not test_subject[key]):
return(False)
else:
return(False)
# 'They call me Mister Tibbs!'
return(True) | 32a6cb705d731a821135911d05b70b2599e3d64c | 490,803 |
def get_state_fips_code(fips_code):
""" Returns the FIPS code of the state that the given fips_code
belongs in. The first two chars of the fips code indicate the
state.
"""
return fips_code[:2] + "000" | 9785c17263ff8750805ca07a52b8799a78f59627 | 575,013 |
import re
def cleanlines(lines):
"""Remove comments and blank lines from splitlines output."""
# Clean comments.
matchRE = re.compile('(.*?)(//|%|#)')
for i in range(len(lines)):
line = lines[i]
match = matchRE.match(line)
if match is not None:
lines[i] = match.group(1)
# Clean blank lines.
return [x.strip() for x in lines if len(x.strip()) > 0] | 2971936b5b7098983aad3af40c82d337f998f5a1 | 30,317 |
def filter_rare_cards(df, rarity_num=5, card_column='CRD_NO', date_column='dates'):
"""
Removes rare cards. Takes only cards which have more than rarity_num bills (different days)
Args:
df: Dataframe with bills
rarity_num: Minimum num bills per crd
card_column: Column that contains crd number
date_column: Column that contains date
Returns:
Filtered DataFrame
"""
# drop_duplicates - Чтобы оставить только уникальные пары карта-день
# (смотреть сколько разных дней покупатель приходил)
cards_num_bills = df.drop_duplicates([date_column, card_column])\
.groupby([card_column]).size().reset_index(name='bill_counts')
df = df.merge(cards_num_bills, on=card_column, how='left')
return df[df['bill_counts'] > rarity_num].drop('bill_counts', axis=1) | a14eb7829cf37ee77232f4c98cda3ac211fd38ad | 574,289 |
def gcs_to_bigstore_path(gs_path):
"""Converts a gs:// path to a /bigstore/ path.
Args:
gs_path: Path to convert from gcs to bigstore.
Raises:
ValueError: If provided path is not a gs:// path.
Returns:
Provided path with gs:// replaced by /bigstore/.
"""
if not gs_path.startswith('gs://'):
raise ValueError('Path must start with `gs://`: {}.'.format(gs_path))
return gs_path.replace('gs://', '/bigstore/') | b3746cc96ff8b3c52d2337c580425ae3a8609561 | 141,989 |
import pathlib
import builtins
import typing
def glob_imgs(
path: pathlib.Path, ext: builtins.str = "*.nii*"
) -> typing.List[pathlib.Path]:
"""grab all `ext` files in a directory and sort them for consistency"""
return sorted(path.glob(ext)) | a9f2a24b3c25c6d48a14acd92e8a193292fb6d25 | 521,253 |
import math
def fix_ang(ang: float) -> float:
"""
Transforms the given angle into the range -pi...pi
"""
return ((ang + math.pi) % math.tau) - math.pi | e3f0f85b055ffc1b6143997116b9fe773cc428a9 | 358,738 |
def lookup_drivers_license_for_person(transaction_executor, person_id):
"""
Query drivers license table by person ID.
:type transaction_executor: :py:class:`pyqldb.execution.executor.Executor`
:param transaction_executor: An Executor object allowing for execution of statements within a transaction.
:type person_id: str
:param person_id: The person ID to check.
:rtype: :py:class:`pyqldb.cursor.stream_cursor.StreamCursor`
:return: Cursor on the result set of a statement query.
"""
query = 'SELECT * FROM DriversLicense AS d WHERE d.PersonId = ?'
cursor = transaction_executor.execute_statement(query, person_id)
return cursor | 712ccbb15408ab6c870efc354fb6c0f1f06e2dda | 153,942 |
def zeta_a(eN,cL,w):
"""
EnKF-N inflation estimation via w.
Returns zeta_a = (N-1)/pre-inflation^2.
Using this inside an iterative minimization as in the iEnKS
effectively blends the distinction between the primal and dual EnKF-N.
"""
N = len(w)
N1 = N-1
za = N1*cL/(eN + w@w)
return za | d7626d43b88bfb8c9d0dc0ec68949e9145c0a7dc | 684,855 |
import math
def NIST_helper(L, N=None, p=None):
"""
Computes percentile p of sorted list L of length N using definition in NIST handbook Sec 7.2.5.2.
:param L: sorted list of values
:param N: length of list
:param p: percentile desired, in the interval [0,1], or None for median
:return: computer percentile p of sorted list L
"""
if N is None: N = len(L)
if N < 1:
return None
elif N < 2:
return L[0]
if p is None:
p = 0.5
weight, base = math.modf(float(p) * (N + 1))
base = int(base) - 1
if base < 0:
return L[0]
elif base >= N - 1:
return L[-1]
else:
return float(L[base]) + weight * (float(L[base + 1]) - float(L[base])) | 7edd970f29718ca75814b2bfd8bef6981049ee6c | 93,617 |
def compression(request):
"""
Fixture for trying common compression types in compression tests
"""
return request.param | 427ecb0425ad6467f49e82039cf3aaef6cff54d0 | 480,776 |
def argmax(pairs):
"""
Given an iterable of pairs return the key corresponding to the greatest
value.
Parameters:
:param pairs : iterable of pairs
Outputs:
:returns argmax_val : key corresponding to greatest value
"""
argmax_val = max(pairs, key=lambda x: x[1])[0]
return argmax_val | d0a50638d5020763f7b482fb92df7083b5a25f2a | 383,188 |
def resize_bbox_list(label_list, img_w, img_h, outp_img_size):
"""
Resizes a list of bboxes to the desired output size. If an image is resized, should also the corresponding bboxes be
resized. The img_w and img_h specify the original image size. Outp_img_size specifies the image size after resizing.
:param label_list: list of bboxes in format [[[x,y,width,height], bbox_category_id], ...]
:param img_w: input width of bboxes. This is the original size of the image for which the bbox list is meant.
:param img_h: input height of bboxes. This is the original size of the image for which the bbox list is meant.
:param outp_img_size: output size in format (width, height). This is the image size after resizing.
:return: list of resized bboxes in format [[[x,y,width,height], bbox_category_id], ...]
"""
resize_factor_x = outp_img_size[0] / img_w
resize_factor_y = outp_img_size[1] / img_h
resize_label = []
for bbox, category in label_list:
resize_label.append([[bbox[0] * resize_factor_x, bbox[1] * resize_factor_y, bbox[2] * resize_factor_x,
bbox[3] * resize_factor_y], category])
return resize_label | 79206ed4b88ed0e64b9be9b90482710747a501dc | 75,437 |
def get_complete_testing_sets(playlists, test_indices_dict):
"""
Generates dictionary with test buckets according to provided indices.
Adds additional seed and groundtruth lists to playlists.
Parameters:
--------------
playlists: list, original playlists included in test set
test_indices_dict: dict, dictionary including the indices for every split
Returns:
--------------
return_dict: dict, {bucket_no: [playlist1, playlist2, ..., playlistn], ...}
"""
# prepare return_dict
return_dict = {}
for bucket in test_indices_dict.keys():
return_dict[bucket] = [y for x, y in enumerate(playlists) if x in test_indices_dict[bucket]]
# add seed tracks and ground_truth to playlists
for key in return_dict.keys():
for playlist in return_dict[key]:
playlist['seed'] = [x for x in playlist['tracks'][:key]]
playlist['groundtruth'] = [x for x in playlist['tracks'][key:]]
return return_dict | 4846117155041762cfbc2aff86497debca55c9f1 | 123,501 |
def _varint_final_byte(char):
"""Return True iff the char is the last of current varint"""
return not ord(char) & 0x80 | d287347f6129e9c498b1c51ff68b7e261dedaa34 | 425,534 |
def rescale(pyr, scale_base):
"""Rescale a wavelet decomposition `pyr` by `scale_base`^level.
Args:
pyr: A wavelet decomposition produced by construct().
scale_base: The base of the exponentiation used for the per-level scaling.
Returns:
pyr where each level has been scaled by `scale_base`^level. The first
level is 0 and is therefore not scaled.
"""
pyr_norm = []
for d in range(len(pyr) - 1):
level_norm = []
scale = scale_base**d
for b in range(3):
level_norm.append(pyr[d][b] * scale)
pyr_norm.append(level_norm)
d = len(pyr) - 1
scale = scale_base**d
pyr_norm.append(pyr[d] * scale)
return pyr_norm | 8ad00f3e709a8351aee1fc44dbe62cbcc5b840ec | 193,972 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.