content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def remove_xenon(te_img, xe_img, te_iso, xe_iso, clamp_neg=True):
"""
Based on the abundances of the Xe and Te isotopes in question, and the Xe
images, calculates the expected component the Te image that actually comes
from Xe and subtracts it.
By default the function assumes any pixels that become negative after
subtraction contain no tellurium and so clamps them to 0.
"""
# Percentage abundance of different Xe isotopes
xe_abundances = {'124': 0.095,
'126': 0.089,
'128': 1.910,
'129': 26.401,
'130': 4.071,
'131': 21.232,
'132': 26.909,
'134': 10.436,
'136': 8.857}
"""
Checks that the isotope requested is xenon contaminated.
Returns the input array if not, and prints a warning.
"""
if str(te_iso)not in xe_abundances:
print('{0} is not contaminated with Xe. Input image returned.'.format(str(te_iso)))
return te_img
ratio = xe_abundances[str(te_iso)] / xe_abundances[str(xe_iso)]
scaled_xe = xe_img * ratio
subtracted = te_img - scaled_xe
# Clamp negative pixels to zero if clamp_neg
if clamp_neg:
subtracted[subtracted < 0] = 0
return subtracted | 25c1f3d8d8f600290c1d41b520458dc6a00fc54a | 19,965 |
import logging
def get_logger(name: str) -> logging.Logger:
""" Gets the appropriate logger for this backend name. """
return logging.getLogger('proxytest.' + name) | d0e0f9de13d9b603326b70a6acdbff7f3288b421 | 37,409 |
import importlib
def get_function(function_path):
""" find and import a function dynamically.
Parameters
----------
function_path : string
A string with the path to the function. The expected format is:
- `<function>`
- `<packages>.<module>.<function>`
- `<packages>.<module>.<class>.<method>`
"""
try:
return importlib.import_module(function_path)
except ImportError:
components = function_path.split('.')
try:
module = importlib.import_module('.'.join(components[:-1]))
except ImportError:
module = importlib.import_module('.'.join(components[:-2]))
class_object = getattr(module, components[-2])
return getattr(class_object, components[-1])
else:
return getattr(module, components[-1]) | f58459a42f5eafa7fda0ca881cec74b49e7e623d | 254,859 |
def all_relevant_registers_filled(registers, relevant_registers) :
"""
@param registers : a dictionary of each register content { 'register #': 'value' }
@param relevant_registers : an ordered list of register indexes related to that method call
@rtype : True if all the relevant_registers are filled, False if not
"""
answer = True
for i in relevant_registers :
# assert a False answer for null registers from the "move-result" instruction
if not(i in registers) or (i in registers and len(registers[i]) < 1) :
answer = False
return answer | cba6d33b168749dd989bcf850f4cbcde01e43b3f | 572,240 |
def _pretty_algorithm(opt):
"""
Return the algorithm name.
"""
if opt.algorithm == opt.algo_bit_by_bit:
return 'bit-by-bit'
elif opt.algorithm == opt.algo_bit_by_bit_fast:
return 'bit-by-bit-fast'
elif opt.algorithm == opt.algo_table_driven:
return 'table-driven'
else:
return 'UNDEFINED' | d0b54bba3c28785edc9dbd7af8f420d68f25c865 | 476,769 |
def reverse_array(n):
"""
Reverse an array (or string). If it is string, you can also use the reverse() method.
>>> reverse_array([1,2,3])
[3, 2, 1]
This works for String too !!
>>> reverse_array("abhishek")
'kehsihba'
"""
return n[::-1] | c7c30ebcc0d7a3364b3cea852524c6ab08dd6b96 | 542,201 |
def predict_sklearn_model(ts, model):
"""
Parameters:
ts (pandas.DataFrame): time series values created by
src.time_series_functions.create_windowing
model (Sklearn Model): base model to predict ts
Returns:
numpy array: predicted values
"""
x = ts.drop(columns=['actual'], axis=1)
return model.predict(x) | 5da75b78ab1745a751aa1a3022faeedcd7d19202 | 170,285 |
def _read_band_number(file_path):
"""
:type file_path: Path
:return:
>>> _read_band_number(Path('reflectance_brdf_2.tif'))
'2'
>>> _read_band_number(Path('reflectance_terrain_7.tif'))
'7'
>>> p = Path('/tmp/something/LS8_OLITIRS_NBAR_P54_GALPGS01-002_112_079_20140126_B4.tif')
>>> _read_band_number(p)
'4'
"""
number = file_path.stem.split('_')[-1].lower()
if number.startswith('b'):
return number[1:]
return number | e02594f32d87260231951df94bbe8e3d704ddc6b | 691,794 |
def subsequent_pairs(l):
"""
Return subsequent pairs of values in a list <l>, i.e. [(x1, x2), (x2, x3), (x3, x4), .. (xn-1, xn)] for a
list [x1 .. xn]
"""
return [(l[i-1], v) for i, v in enumerate(l) if i > 0] | c7fdb289e184723900dfb3c504b879728a95e11b | 260,220 |
def dur_attributes_to_dur(d_half, d_semiqvr):
"""
Convert arrays of d_hlf and d_sqv to d.
- See eq. (2) of the paper.
"""
def d_hlf_dur_sqv_to_d(d_hlf, d_sqv):
return 8 * d_hlf + d_sqv
d = d_hlf_dur_sqv_to_d(d_half, d_semiqvr)
return d | aeea74f929ef94d94178444df66a30d0d017fd4e | 6,117 |
from typing import Optional
from typing import Callable
import functools
import warnings
def deprecated_function(
last_version: Optional[str] = None,
msg: Optional[str] = None,
stacklevel: int = 2,
) -> Callable:
"""A function or method decorator to show deprecation warning.
Args:
last_version: The Qiskit Experiment version that this function is removed.
msg: Extra message, for example, to indicate alternative approach.
stacklevel: Stacklevel of this warning. See Python Warnings documentation for details.
Examples:
.. code-block::
@deprecated_function(last_version="0.3", msg="Use new_function instead.")
def old_function(*args, **kwargs):
pass
def new_function(*args, **kwargs):
pass
Returns:
Deprecated function or method.
"""
def deprecated_wrapper(func: Callable):
@functools.wraps(func)
def _wrap(*args, **kwargs):
namespace = func.__qualname__.split(".")
if len(namespace) == 1:
message = f"The function '{func.__name__}' has been deprecated and "
else:
cls_name, meth_name = namespace
message = f"The method '{meth_name}' of '{cls_name}' class has been deprecated and "
if last_version:
message += f"will be removed after Qiskit Experiments {last_version}. "
else:
message += "will be removed in future release. "
if msg:
message += msg
warnings.warn(message, DeprecationWarning, stacklevel=stacklevel)
return func(*args, **kwargs)
return _wrap
return deprecated_wrapper | 69ef02beddf1428892ec2323d6239ea3fe7cf621 | 133,629 |
import pkg_resources
def pkg_resource_filename(path):
"""Return absolute filepath for the given input path.
Args:
path: str, filepath name.
Returns
str, absolute filepath name.
"""
path_vals = path.split('/')
abs_filename = pkg_resources.resource_filename('.'.join(path_vals[:-1]),
path_vals[-1])
return abs_filename | d76e3458ab6d86ebadc0a7e8fa78fd2d1899d6ec | 439,098 |
import hashlib
def make_hash(to_hash: str) -> str:
""" Return a hash of to_hash. """
new_hash = hashlib.md5()
new_hash.update(to_hash.encode("utf-8"))
return str(new_hash.hexdigest()) | 7e800f7942df23256373c221428e5c24b65cabee | 699,024 |
import builtins
def _str_to_list(value, separator):
"""Convert a string to a list with sanitization."""
value_list = [item.strip() for item in value.split(separator)]
value_list_sanitized = builtins.list(filter(None, value_list))
if len(value_list_sanitized) > 0:
return value_list_sanitized
else:
raise ValueError('Invalid list variable.') | 378037df213307a6a98b54c5c0a0d5a386a76375 | 317,341 |
def is_percent(val):
"""Checks that value is a percent.
Args:
val (str): number string to verify.
Returns:
bool: True if 0<=value<=100, False otherwise.
"""
return int(val) >= 0 and int(val) <= 100 | fb24d89315dbf192d57afb30f02d02c400066221 | 244,338 |
def node_is_reachable(src_node, dst_node):
"""
Returns true if a node is unreachable from another node.
:param src_node: The source node to check from reachability from.
:param dst_node: The destination node to check for reachability to.
:return: True only if dst is reachable from src.
"""
return 0 == src_node.account.ssh("nc -w 3 -z %s 22" % dst_node.account.hostname, allow_fail=True) | 90fc2ccdfc293508661e62dcd39cd15ce0cb813a | 140,615 |
def shorten(thelist, maxlen, shorten):
"""
If thelist has more elements than maxlen, remove elements to make it of size maxlen.
The parameter shorten is a string which can be one of left, right, both or middle
and specifies where to remove elements.
:param thelist: the list to shorten
:param maxlen: maximum length of the list
:param shorten: one of left, right, both, middle, where to remove the elements
:return: the shortened list
"""
if len(thelist) <= maxlen:
return thelist
if shorten == "right":
return thelist[0:maxlen]
elif shorten == "left":
return thelist[-maxlen-1:-1]
elif shorten == "both":
excess = len(thelist) - maxlen;
left = int(excess/2)
right = excess - left
return thelist[left:-right]
elif shorten == "middle":
excess = len(thelist) - maxlen;
left = int(excess/2)
right = excess - left
return thelist[0:left]+thelist[-right:]
else:
raise Exception("Not a valid value for the shorten setting: {}".format(shorten)) | 72aa428b04b34737225717396946bce009be6bc1 | 654,623 |
import typing
def compute_first_index(tour_nodes: typing.List[int]) -> typing.List[int]:
"""Compute first index in euler tour from euler tour on nodes.
Args:
tour_nodes (typing.List[int]): euler tour on nodes.
Returns:
typing.List[int]: first indices.
Examples:
>>> tour_nodes = [0, 1, 4, 1, 2, 1, 0, 3, 0]
>>> compute_first_index(tour_nodes)
[0, 1, 4, 7, 2]
"""
n = len(tour_nodes) + 1 >> 1
first_idx = [-1] * n
for i, u in enumerate(tour_nodes):
if first_idx[u] == -1:
first_idx[u] = i
return first_idx | 349c6f76bddb98741bca50bac88a51066c5d3a79 | 353,056 |
def replace_font_name(font_name, replacement_dict):
""" Replaces all keys with vals from replacement_dict in font_name. """
for key, val in replacement_dict.items():
font_name = font_name.replace(key, val)
return font_name | 8e8762aad82b8497fb71a9b4d28e3c23c79b314f | 516,979 |
def polygon_extrema(polygon):
"""
Returns the longitude and latitude extrema from a geojson polygon.
"""
polygon_geometry = polygon.features[0].geometry.coordinates[0][0]
min_lon = polygon_geometry[0][0]
max_lon = polygon_geometry[0][0]
min_lat = polygon_geometry[0][1]
max_lat = polygon_geometry[0][1]
for [lon, lat] in polygon_geometry:
if min_lon > lon:
min_lon = lon
if max_lon < lon:
max_lon = lon
if min_lat > lat:
min_lat = lat
if max_lat < lat:
max_lat = lat
return (min_lon, max_lon, min_lat, max_lat) | d19ba548dfc2254b5cff16a546a053ec4d683338 | 330,733 |
def coerce_int_except(v, msg):
"""Convert to an int, throw an exception if it isn't"""
try:
return int(v)
except:
raise ValueError("Bad value: '{}'; {} ".format(v,msg) ) | f0d615692594e3dde9fb4a81d33278bab6807386 | 319,202 |
def rem_num(num, lis):
""" Removes all instances of a number 'num', from list lis. """
return [ele for ele in lis if ele != num] | c2fd18b49a70a01bf9d44da1b2aacf8d4e93cbe9 | 49,887 |
def transformName(name: str, isClass: bool = False):
"""Transforms a name as KSC transforms it for python language"""
if isClass:
def capitalizeFirstLetter(word):
return word[0].upper() + word[1:]
return "".join((capitalizeFirstLetter(p) for p in name.split("_")))
else:
return name | 225ef57546be69e3ea317f7375a1e52de3215cb2 | 461,011 |
import re
def decamelize(s):
"""Decamelize the string ``s``.
For example, ``MyBaseClass`` will be converted to ``my_base_class``.
"""
if not isinstance(s, str):
raise TypeError('decamelize() requires a string argument')
if not s:
return ''
return re.sub(r'([a-z])([A-Z])', r'\1_\2', s).lower() | fc30254742bcc79047dd6803d0d7b87c951a9f10 | 17,365 |
import csv
def read_csv(input_file_path, verbose=False, delimiter=','):
"""
Function reads csv file and returns list of records
:param input_file_path: path to csv file
:param verbose: defines if verbose mode
:param delimiter: fields separator in csv file
:return: list of records
"""
result = []
with open(input_file_path) as csv_file:
for row in csv.reader(csv_file, delimiter=delimiter):
result.append(row)
if verbose:
print(result)
return result | 57597e820750b11382cd27e31135fa8b6f45153e | 16,501 |
import json
def categorize_dimensions(data_files):
""" Categorize dimensions as cprops, spatial dimensions or time dimensions
Args:
data_files: list of the raw data json files
Returns:
an object: {
spatial: map of spatial dimensions to the number of occurences,
time: list of time dimensions,
cprop: sorted list of {id: dimension code as string,
num: number of occurences}
}
"""
spatial_dim = {}
time_dim = set()
cprop_dim = {}
for f in data_files:
with open(f, "r+") as indicator_data:
indicator_data = json.load(indicator_data).get("value", [])
if len(indicator_data) == 0:
continue
for data in indicator_data:
spatial_dim_type = data.get("SpatialDimType", "")
spatial_dim_count = spatial_dim.get(spatial_dim_type, 0)
spatial_dim[spatial_dim_type] = spatial_dim_count + 1
time_dim.add(data.get("TimeDimType", ""))
dim1Type = data.get("Dim1Type", "")
if not dim1Type:
continue
dim1Count = cprop_dim.get(dim1Type, 0)
cprop_dim[dim1Type] = dim1Count + 1
dim2Type = data.get("Dim2Type", "")
if not dim2Type:
continue
dim2Count = cprop_dim.get(dim2Type, 0)
cprop_dim[dim2Type] = dim2Count + 1
dim3Type = data.get("Dim3Type", "")
if not dim3Type:
continue
dim3Count = cprop_dim.get(dim3Type, 0)
cprop_dim[dim3Type] = dim3Count + 1
cprop_list = []
for prop in cprop_dim:
cprop_list.append({"id": prop, "num": cprop_dim.get(prop)})
sorted_cprop_list = sorted(cprop_list,
key=lambda x: x.get("num"),
reverse=True)
result = {
"spatial": spatial_dim,
"time": list(time_dim),
"cprop": sorted_cprop_list
}
return result | c150943e651cd9ecc3dd4613fd4401ee717aaecd | 298,578 |
def update_dict_recursively(dictionary, key_path, value):
"""
update or insert values to a dictionary recursively.
```
>>> update_dict_recursively({}, ['a', 'b', 1, 2], 'this_value')
{'a': {'b': {1: {2: 'this_value'}}}}
```
:param dict dictionary: the dictionary to be inserted into
:param list key_path: the path for the insertion value
:param value: value to be inserted
:returns: a dictionary with the inserted value
"""
sub_dictionary = dictionary
for key in key_path[:-1]:
if key not in sub_dictionary:
sub_dictionary[key] = {}
sub_dictionary = sub_dictionary[key]
sub_dictionary[key_path[-1]] = value
return dictionary | a8e186fcf7099de5e47d2dd9db7eb416595aaa24 | 540,693 |
def mut_pair_num(table):
"""
A function that calculates the number of pairs of codons one
mutation away from each other. Treats mutations with directionality. In
general, the number of mutational pairs is equal to the number of
codons in a table multiplied by the number of unique codons within one
mutation. Let a = alphabet length (generally 4), L = codon length
(generally 3)
n = (a^L) * L(a-1)
Parameters
----------
dict table: the codon table to analyze
Returns
-------
int mut_num: the number of distinct mutational pairs.
"""
# get list of all codons in table
codon_list = list(table)
# get alphabet size
alphabet = set()
for codon in codon_list:
for nt in codon:
alphabet.add(nt)
a = len(alphabet)
# get codon length
L = len(codon_list[0])
# calculate mut_num and return
return (a ** L) * L * (a - 1) | ce44087d295ac2cf0860c364dbf18b4f2de500b1 | 50,346 |
def _subsample( X, ind):
""" like X[ind,:] but works in case X has size (n,) """
if X.ndim == 1:
y = X[ind]
elif X.ndim == 2:
y = X[ind,:]
else:
raise ValueError("Expected 1D or 2D array")
return y | 6d9a3632fbe88acdf664fe919e93b14ac2933a86 | 633,635 |
def base_temp(tmp_path_factory):
"""
Created a place to store the tests outputs. Can be set using the command
line --basetemp (WARNING: WILL DELETE ALL OF ITS CURRENT CONTENT)
Parameters
----------
tmp_path_factory : fixture
PyTest's build-in fixture.
Returns
-------
str : Path for the tests results for the current session
"""
return tmp_path_factory.mktemp("dragons-tests-") | 0db95a8383dacd0de6878449de4ea7566b738374 | 297,078 |
def changeCourses(previousMessage, currentMessage):
"""
Determine if one of observed currency has changed..
:param previousMessage (str) : previous message for twitter
:param currentMessage (str) : current message for twitter
:return change (bool) : change currencies ?
"""
return bool(previousMessage!=currentMessage) | 4d65e13cfa7b1e7cc73dff3670deebf02a41422b | 531,129 |
def two_bit_flip(node):
"""Give all the nodes by flipping one or two bits of the binary number
representation of a node."""
node_list = list(node)
out = set()
bit_length = len(node_list)
for i in range(bit_length):
for j in range(bit_length):
new_node = node_list[:]
if i != j:
new_node[i] = ('1' if node[i] == '0' else '0')
new_node[j] = ('1' if node[j] == '0' else '0')
else:
new_node[i] = ('1' if node[i] == '0' else '0')
out.add(''.join(new_node))
return out | b2fd25eef42bf608146e29ac1aa4471305f7420b | 528,229 |
from typing import List
def base_determinant(matrix: List[List[int]]) -> int:
"""
Find determinant of a 2 x 2 matrix.
"""
if (len(matrix[0]), len(matrix)) != (2, 2):
raise ValueError(f"Matrix mismatch: Not 2 x 2 matrix.")
return (matrix[0][0] * matrix[1][1]) - (matrix[0][1] * matrix[1][0]) | 596dc8ab03d8fdf4e8ad1ec7e88c99b52cccd4fb | 302,606 |
import random
import string
def generate_random_string(size: int=16) -> str:
"""
Generates a random string with 16 characters. Size value can be changed.
:return: random string: str
"""
random_string = ''.join([random.choice(string.ascii_letters + string.digits) for n in range(size)])
return random_string | b1a44e128871af07ca8aefed2c096b19e5f1d08c | 431,137 |
def get_zone_names(compute_api, project):
"""Obtains a list of zone names for a given project.
Args:
compute_api: The gcloud api instance.
project: The project name.
Returns:
A list of zone names that are running
"""
result = compute_api.zones().list(project=project).execute()
filtered_list = [
i for i in result["items"]
if (i["status"] == "UP") and (i["kind"] == "compute#zone")
]
return [i["name"] for i in filtered_list] | 76683fa5d025d6948f0b3c8f2b65ee25365008ad | 551,328 |
def qs(list_):
"""
Return a new list consisting of the elements of list_ in
ascending order.
@param list list_: list of comparables
@rtype: list
>>> qs([1, 5, 3, 2])
[1, 2, 3, 5]
"""
if len(list_) < 2:
return list_[:] # list_ is sorted, so make a copy
else:
# return (everything smaller than pivot, sorted) + pivot
# + (everything larger than pivot, sorted)
pivot = list_[0] # could choose any pivot
smaller_than_pivot = [v for v in list_ if v < pivot]
larger_than_pivot = [v for v in list_[1:] if v >= pivot]
return qs(smaller_than_pivot) + [pivot] + qs(larger_than_pivot) | e0bd6a52c5da953fe5639da401dae4601e685a3a | 344,385 |
def requires_grad(ctx, name):
"""Checks if a named variable requires gradients."""
for g, n in zip(ctx.needs_input_grad, ctx.names):
if n == name:
return g
return False | 646c011fcbfaec872bcee436ed4d80143338e779 | 402,843 |
def _uvprime_to_CCT_Duv_parabolic(tmm1, tm, tmp1, dmm1, dm, dmp1, sign):
"""Ohno 2011 parabolic technique for computing CCT.
Parameters
----------
tmm1 : `numpy.ndarray`
"T sub m minus 1", the m+1th CCT value
tm : `numpy.ndarray`
"T sub m", the mth CCT value
tmp1 : `numpy.ndarray`
"T sub m plus 1", the m+1th CCT value
dmm1 : `numpy.ndarray`
"d sub m minus 1", the m-1th distance value
dm : `numpy.ndarray`
"d sub m", the mth distance value
dmp1 : `numpy.ndarray`
"d sub m plus 1", m+1th distance value
sign : `int`
either -1 or 1, indicating the sign of the solution
Returns
-------
`tuple`
CCT, Duv values
"""
x = (tmm1 - tm) * (tmp1 - tmm1) * (tm - tmp1)
a = (tmp1 * (dmm1 - dm) + tm * (dmp1 - dmm1) + tmm1 * (dm - dmp1)) * x ** -1
b = (-(tmp1 ** 2 * (dmm1 - dm) + tm ** 2 * (dmp1 - dmm1) + tmm1 ** 2 *
(dm - dmp1)) * x ** -1)
c = (-(dmp1 * (tmm1 - tm) * tm * tmm1 + dm *
(tmp1 - tmm1) * tmp1 * tmm1 + dmm1 *
(tm - tmp1) * tmp1 * tm) * x ** -1)
CCT = -b / (2 * a)
Duv = sign * (a * CCT ** 2 + b * CCT + c)
return CCT, Duv | a48d9c30e8f959c4ce9ded3bc5910a40bb811e18 | 347,389 |
import hashlib
def get_cache_key(*args, **kwargs):
"""
Get MD5 encoded cache key for given arguments.
Note:
We convert arguments and keyword arguments to their string form to build the cache key. So do not pass
arguments that can't be converted to strings. Also, don't pass arguments that may not consistently
be converted to the same string, like an unsorted dict.
Here is the format of key before MD5 encryption `key1:value1__key2:value2`
Example:
>>> get_cache_key(site_domain="example.com", resource="catalogs")
# Here is key format for above call
# "site_domain:example.com__resource:catalogs"
a54349175618ff1659dee0978e3149ca
Arguments:
*args: Positional arguments that need to be present in the cache key
**kwargs: Key word arguments that need to be present in the cache key.
Returns:
An MD5 encoded key uniquely identified by the key word arguments.
"""
key = '__'.join(args)
key += '__'.join([u'{}:{}'.format(k, v) for k, v in sorted(kwargs.items())])
return hashlib.md5(key.encode('utf-8')).hexdigest() | 5053033e3c228dc522dce1aa316c617cc65e6d0b | 155,432 |
import warnings
def deprecated(message):
"""Decorator to mark a function as deprecated"""
def deprecated_decorator(func):
def deprecated_func(*args, **kwargs):
warnings.warn(
"{} is a deprecated function and will be removed in a "
"future version of scirpy. {}".format(func.__name__, message),
category=FutureWarning,
stacklevel=2,
)
return func(*args, **kwargs)
return deprecated_func
return deprecated_decorator | 3e076bc7ad1edacc1f818bccf221f5f8d6d3eefc | 224,186 |
def parse_color(color):
"""Take any css color definition and give back a tuple containing the
r, g, b, a values along with a type which can be: #rgb, #rgba, #rrggbb,
#rrggbbaa, rgb, rgba
"""
r = g = b = a = type = None
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
type = '#rgb'
color = color + 'f'
if len(color) == 4:
type = type or '#rgba'
color = ''.join([c * 2 for c in color])
if len(color) == 6:
type = type or '#rrggbb'
color = color + 'ff'
assert len(color) == 8
type = type or '#rrggbbaa'
r, g, b, a = [
int(''.join(c), 16) for c in zip(color[::2], color[1::2])]
a /= 255
elif color.startswith('rgb('):
type = 'rgb'
color = color[4:-1]
r, g, b, a = [int(c) for c in color.split(',')] + [1]
elif color.startswith('rgba('):
type = 'rgba'
color = color[5:-1]
r, g, b, a = [int(c) for c in color.split(',')[:-1]] + [
float(color.split(',')[-1])]
return r, g, b, a, type | f58331287e4ccc1f2459c1ffff4c0f3ec5f29d8b | 659,269 |
import torch
def flipud(tensor):
"""
Flips a given tensor along the first dimension (up to down)
Parameters
----------
tensor
a tensor at least two-dimensional
Returns
-------
Tensor
the flipped tensor
"""
return torch.flip(tensor, dims=[0]) | b0fd62172b0055d9539b554a8c967c058e46b397 | 707,531 |
def massString(mass):
""" HELPER FUNCTION. Turns a float mass into a string with the correct units after
(between mg and kg)
Arguments:
mass: [float] the mass to be converted
Returns:
mass_str: [string] the mass as a string with the proper units
"""
# make mass into str
if mass < 1:
mass *= 1000
if mass < 1:
mass *= 1000
mass_str = str(eval("%.0e" % (mass))) + ' mg'
else:
mass_str = str(eval("%.0e" % (mass))) + ' g'
else:
mass_str = str(eval("%.0e" % (mass))) + ' kg'
return mass_str | f8e45328538e690f971ee6d2c2709954b052a0c4 | 509,819 |
def flatten(lst):
"""Flattens a list of lists"""
return [sub_elem for elem in lst
for sub_elem in elem] | 553118be917f03488a1a7e9443878ddde907042e | 162,211 |
def get_text_in_quotes(string):
"""
Returns the text in double quotes from a given string, e.g.
Which one of these is “the apple”?
would return 'the apple' (without the single quotes)
"""
new_str = ""
add_to_new_str = False
for char in string:
if add_to_new_str:
new_str += char
if char == '\"' or ord(char) in [8220, 8221]: # in and out quotes are different! # the "quotation marks used in duolingo aren't acutally quotation marks!"
add_to_new_str = not(add_to_new_str)
return new_str[:-1] | 0980b633126696fd230e6cd85a1d8aa7711e5927 | 224,737 |
def slice_for_range(data, fromVal, toVal):
"""
Returns a slice indicating the region [fromVal, toVal] in a 1D ndarray data
Example:
>>> data = linspace(1, 50, 50)
>>> s = slice_for_range(data, 4, 10)
>>> data[s]
array([ 4., 5., 6., 7., 8., 9., 10.])
"""
if (data[-1] - data[0]) / (toVal - fromVal) < 0:
# Order of fromVal and toVal is reversed
temp = toVal
toVal = fromVal
fromVal = temp
foundFromVal = False
start = None
stop = None
if toVal > fromVal:
# ascending order
for idx, val in enumerate(data):
if foundFromVal:
if val > toVal:
stop = idx
break
else:
if val >= fromVal:
start = idx
foundFromVal = True
else:
# descending order
for idx, val in enumerate(data):
if foundFromVal:
if val < toVal:
stop = idx
break
else:
if val <= fromVal:
start = idx
foundFromVal = True
return slice(start, stop) | 44cb009d01cb1bf4415aa867cd4cc9e2685e8d4c | 631,529 |
from typing import Set
from typing import Union
from typing import List
def canonicalize_resources(resources: Set[str]) -> Union[str, List[str]]:
"""Return the set of resources as either a single string or a sorted list of strings."""
if len(resources) == 1:
return next(iter(resources))
return sorted(resources) | 5ed51e192375b74159f9ee99fe6b1e13e6572474 | 312,180 |
import json
def getmeta_ocred(fname, cfg):
""" Get metadata info of fname.
Args:
fname (str): filename from which to get metadata.
cfg (Config): configuration data.
Return:
dict: metadata info corresponding to fname.
Raise:
ValueError: metadata for fname not found.
"""
allmeta = json.load(open(cfg.get('ocred', 'metadata file')))
try:
return allmeta[fname]
except KeyError:
raise ValueError('Error: metadata for document %s not found.' % fname) | e2ac3b34515e219bf26564572e943e06de852b23 | 605,685 |
def check_interface_capability(interface, capability):
"""Evaluate interface to determine if capability is present.
:param interface: The interface object to check.
:param capability: The value representing the capability that
the caller wishes to check if present.
:returns: True if capability found, otherwise False.
"""
return capability in getattr(interface, 'capabilities', []) | 472b9c833de34d4edfb53d5ff5a362827ee9aa17 | 592,743 |
def array_to_list(np_array):
"""Transform a numpy array in a nested list."""
return [list(e) for e in np_array] | fbb7eb1b3848e95ca20dfc515fd4d287a228ae46 | 170,136 |
def int_or_none(val):
"""Attempt to parse an integer value, or None."""
if val is None:
return val
else:
return int(val) | 8d0a8edc56ce23b60ab9b5747d053026def1ee80 | 569,416 |
import pytz
def getLocalTime(UTC_time):
"""
Transforms message time to local timezone
:param UTC_time: message datetime in UTC timezone
:return: local date and time
"""
local_tz = pytz.timezone('Asia/Jerusalem')
local_datetime = UTC_time.replace(tzinfo=pytz.utc).astimezone(local_tz)
datetime_list = str(local_datetime.date()).split('-')
local_date = datetime_list[2] + '/' + datetime_list[1] + '/' + datetime_list[0]
local_time = str(local_datetime.time())
return local_date, local_time | 393b5fa3039e1a11f199c685e23f91f00043a800 | 201,148 |
def get_or_create_attr(obj, attr: str, fn):
"""
Sets the named attribute on the given object to the specified value if it
doesn't exist, else it'll return the attribute
setattr(obj, 'attr', fn) is equivalent to ``obj['attr'] = fn''
"""
if not hasattr(obj, attr):
setattr(obj, attr, fn)
return getattr(obj, attr) | 848b3c4075a34d9b3f3812411c1d1e9d70648781 | 247,716 |
def _is_simple_rig_root(dagNode):
"""Check if the dagNode is a simple rig ctl
Args:
dagNode (PyNode): Control to check
Returns:
bool: Return true if is simple rig
"""
return dagNode.hasAttr("is_simple_rig") | d3fc5e04c874c9778cd8a7cc1880e849517edee9 | 452,446 |
def scale_gradient(tensor, scale, clone_input=True):
"""Scales the gradient of `tensor` for the backward pass.
Args:
tensor (Tensor): a tensor which requires gradient.
scale (float): a scalar factor to be multiplied to the gradient
of `tensor`.
clone_input (bool): If True, clone the input tensor before applying
gradient scaling. This option is useful when there are multiple
computational branches originated from `tensor` and we want to
apply gradient scaling to part of them without impacting the rest.
If False, apply gradient scaling to the input tensor directly.
Returns:
The (cloned) tensor with gradient scaling hook registered.
"""
if clone_input:
output = tensor.clone()
else:
output = tensor
output.register_hook(lambda grad: grad * scale)
return output | 9c969307980a19b29767af82ff001ed47c9f36d0 | 426,954 |
def build_select(table, to_select, where):
"""
Build an select request.
Parameters
----------
table : str
Table where query will be directed.
to_set: iterable
The list of columns to select.
where: iterable
The list of conditions to constrain the query.
Returns
-------
str
Built query.
"""
sql_q = "SELECT "
sql_q += ', '.join('{0}'.format(w) for w in to_select)
sql_q += ' FROM \"' + table + '\"'
if len(where) > 0:
sql_q += ' WHERE '
sql_q += ' AND '.join('{0} = :{0}'.format(w) for w in where)
return sql_q | be146e09de0373b614ec40b5a4f78b2289bb6c81 | 247,390 |
def bezier_line(P0, P1, P2, P3):
"""Cubic Bezier formula
Returns:
Function of parameter t (1d array)
Reference
https://en.wikipedia.org/wiki/Bezier_curve"""
return lambda t: (1 - t)**3 * P0 + 3 * (1 - t)**2 * t * P1 + 3 * (1 - t) * t**2 * P2 + t**3 * P3 | 6c7cf18ab77eafd89434528cdd090391e8528bec | 609,739 |
def select_with_processing(selector, cluster_sim):
"""A selection wrapper that uses the given operator to select from
the union of the population and the currently processing individuals.
"""
def select(population):
return selector(population + cluster_sim.processing)
return select | 76cf35e02dfe391ff3e2b6f552a36b87d2ce5cf9 | 675,143 |
import re
def tidy_cols(my_csv):
"""
Tidies column names ie lower and replace spaces with underscores
"""
return [re.sub(" ", "_", col.lower()) for col in my_csv.columns] | 31bfa773f8c8e064645ea613666c1329e1f23711 | 304,798 |
def get_mnsp_index(data) -> list:
"""Get MNSP index"""
mnsps = (data.get('NEMSPDCaseFile').get('NemSpdInputs')
.get('PeriodCollection').get('Period')
.get('InterconnectorPeriodCollection')
.get('InterconnectorPeriod'))
return [i['@InterconnectorID'] for i in mnsps if i['@MNSP'] == '1'] | 4dc7e80d29124910c6aac18e02635ad3a3e1c31f | 369,251 |
def check_policy_profile_exists_on_all_vsm(pprofiles, vsm_hosts):
"""Checks if policy profile is present on all VSM.
:param pprofiles: all the port profile rows for a particular profile
:param vsm_hosts: list of configured VSMs
:returns: boolean
"""
return (len(pprofiles) == len(vsm_hosts) and
len(set(pprofile['id'] for pprofile in pprofiles)) == 1) | beb6e4f31b3043d78fd57041c806e7cf3984d27f | 238,420 |
def get_chunk(total, current, list):
"""
:param total: Total number of wanted chunks
:param current: Current chunk position wanted
:param list: List of data who will be chunked
:return: The chunk
"""
length = len(list)//total
start = current*length
if current == total-1:
return list[start:]
return list[start:start+length] | 11423312c1ed71bd5b04a3b05d8326d3887f8283 | 667,422 |
from contextlib import suppress
import signal
def to_signal(sig):
"""
Parses a signal number or named signal.
>>> to_signal(15)
<Signals.SIGTERM: 15>
>>> to_signal("15")
<Signals.SIGTERM: 15>
>>> to_signal("SIGTERM")
<Signals.SIGTERM: 15>
>>> to_signal("TERM")
<Signals.SIGTERM: 15>
"""
with suppress(TypeError, ValueError):
return signal.Signals(int(sig))
with suppress(KeyError):
return signal.Signals[str(sig)]
with suppress(KeyError):
return signal.Signals["SIG" + str(sig)]
raise ValueError(f"not a signal: {sig}") | a71eb593b90acbe52017b5426a5903c730c7fa93 | 354,480 |
from typing import Sequence
def pool_sum(pool: Sequence[int]) -> int:
"""Sum the dice in the pool."""
return sum(pool) | 1c02faa63c6a327ede745171ff9a61fc19fcdf1e | 133,300 |
def get_nonce_bytes(n):
"""BOLT 8 requires the nonce to be 12 bytes, 4 bytes leading
zeroes and 8 bytes little endian encoded 64 bit integer.
"""
return b"\x00"*4 + n.to_bytes(8, 'little') | 3c35dd968d2b92a38937a14a1cecf73a40bd16d2 | 662,632 |
from pathlib import Path
def generate_support_matrix_jobs(
cluster_file: Path,
cluster_config: dict,
cluster_info: dict,
added_or_modified_files: set,
upgrade_support_on_this_cluster: bool = False,
upgrade_support_on_all_clusters: bool = False,
) -> list:
"""Generate a list of dictionaries describing which clusters need to undergo a helm
upgrade of their support chart based on whether their associated support chart
values files have been modified. To be parsed to GitHub Actions in order to generate
jobs in a matrix.
Args:
cluster_file (path obj): The absolute path to the cluster.yaml file of a given
cluster
cluster_config (dict): The cluster-wide config for a given cluster in
dictionary format
cluster_info (dict): A template dictionary for defining matrix jobs prepopulated
with some info. "cluster_name": The name of the given cluster; "provider":
the cloud provider the given cluster runs on; "reason_for_redeploy":
what has changed in the repository to prompt the support chart for this
cluster to be redeployed.
added_or_modified_files (set[str]): A set of all added or modified files
provided in a GitHub Pull Requests
upgrade_support_on_this_cluster (bool, optional): If True, generates jobs to
update the support chart on the given cluster. This is triggered when the
cluster.yaml file itself is modified. Defaults to False.
upgrade_support_on_all_clusters (bool, optional): If True, generates jobs to
update the support chart on all clusters. This is triggered when common
config has been modified in the support helm chart. Defaults to False.
Returns:
list[dict]: A list of dictionaries. Each dictionary contains: the name of a
cluster, the cloud provider that cluster runs on, a Boolean indicating if
the support chart should be upgraded, and a reason why the support chart
needs upgrading.
"""
cluster_info["reason_for_support_redeploy"] = cluster_info.pop(
"reason_for_redeploy"
)
# Empty list to store the matrix definitions in
matrix_jobs = []
# Double-check that support is defined for this cluster.
support_config = cluster_config.get("support", {})
if support_config:
if upgrade_support_on_all_clusters or upgrade_support_on_this_cluster:
# We know we're upgrading support on all clusters, so just add the cluster
# name to the list of matrix jobs and move on
matrix_job = cluster_info.copy()
matrix_job["upgrade_support"] = "true"
if upgrade_support_on_all_clusters:
matrix_job[
"reason_for_support_redeploy"
] = "Support helm chart has been modified"
matrix_jobs.append(matrix_job)
else:
# Have the related support values files for this cluster been modified?
values_files = [
cluster_file.parent.joinpath(values_file)
for values_file in support_config.get("helm_chart_values_files", {})
]
intersection = added_or_modified_files.intersection(values_files)
if intersection:
matrix_job = cluster_info.copy()
matrix_job["upgrade_support"] = "true"
matrix_job[
"reason_for_support_redeploy"
] = "Following helm chart values files were modified: " + ", ".join(
[path.name for path in intersection]
)
matrix_jobs.append(matrix_job)
else:
print(f"No support defined for cluster: {cluster_info['cluster_name']}")
return matrix_jobs | 9fcd7631075eb9144777c3a4218f6a123921d7c6 | 124,530 |
def documentation_link(chapter):
# type: (str)->str
"""
Creates a link to the documentation.
This method is useful for showing a link to the ZSL documentation in case of any misconfiguration, etc.
:param chapter: Chapter name in to which the link points. Use underscores instead of spaces.
:return: The absolute link to the documentation.
"""
return "http://zsl.readthedocs.io/en/latest/{0}.html".format(chapter) | 3fa25188811b1fece777e95755e5ebdc5cb1a4b8 | 661,162 |
import json
def read_dataformat(file):
"""
Reads a dataformat .json file and returns it as a dict.
Parameters
----------
file : str
Path to dataformat.json file.
"""
return json.load(open(file)) | 3676bb2e2e2bb61649183aae41dfbff63018f793 | 116,888 |
def norm_minmax(a):
"""
Normalize the data by setting the minimum at 0 and the maximum at 1.
Parameters:
===========
a: numpy.array
Data to normalize
"""
return (a-a.min())/(a.max()-a.min()) | 33a65d151158c2930274d2a6bd655978196855c7 | 424,034 |
from typing import OrderedDict
def _color_bool(series, color, bg_color):
"""Converts a boolean annotation column to colors."""
color_map = OrderedDict(zip([True, False], [color, bg_color]))
mapped = series.map(color_map).fillna(bg_color)
return mapped, color_map | 7f6030eb3b312ede5564d24f47d7c7c5d9405b59 | 260,911 |
def build_scoring_matrix(alphabet, diag_score, off_diag_score, dash_score):
"""
Takes as input a set of characters alphabet
and three scores diag_score, off_diag_score,
and dash_score. The function returns a dictionary
of dictionaries whose entries are indexed by pairs
of characters in alphabet plus '-'. The score for
any entry indexed by one or more dashes is dash_score.
The score for the remaining diagonal entries is diag_score.
Finally, the score for the remaining off-diagonal entries is off_diag_score
"""
alphabet.add('-')
scoring_matri = {}
for first_ltr in alphabet:
temp = {}
for sec_ltr in alphabet:
if first_ltr == sec_ltr and first_ltr != '-':
temp[sec_ltr] = diag_score
elif first_ltr == '-' or sec_ltr == '-':
temp[sec_ltr] = dash_score
else:
temp[sec_ltr] = off_diag_score
scoring_matri[first_ltr] = temp
return scoring_matri | 703c3ef7fb6899a46a26d55dae740705b6953adb | 705,146 |
def format_tasks(tasks):
"""Converts a list of tasks to a list of string representations.
Args:
tasks: A list of the tasks to convert.
Returns:
A list of string formatted tasks.
"""
return ['%d : %s (%s)' % (task.key.id(),
task.description,
('done' if task.done
else 'created %s' % task.created))
for task in tasks] | 3fcf5b83714d61daf7c17d0f4d0c212d8114bd59 | 587,529 |
def multi_endpoint(mocker):
"""Create a multi network manager endpoint mock"""
return [mocker.stub(), mocker.stub()] | a2c0d2ed563d09fdf0cf94e11af30571f4da22e8 | 114,904 |
from pathlib import Path
from typing import List
def fetch_scene_gifs(scene_path: Path) -> List[Path]:
"""Fetches each gif that has been rendered for a scene.
Args:
scene_path (Path): The path towards the scene.
Returns:
List[Path]: List of paths towards each gif. Paths
are constructed like so:
[project-path]/[scene-path]/gifs/[gif-name].gif
"""
gifs_path: Path = scene_path / Path("gifs")
all_gifs: List[Path] = []
for file in gifs_path.iterdir():
# Assuming a user won't add a `.gif` file.
if file.suffix == ".gif":
all_gifs.append(file)
return all_gifs | 2c30f57bed62ee6db7b329d363584f0aec9d5ac4 | 446,959 |
def get_primes(n):
"""
Returns prime factors for an integer N
:param n: integer to factor
:return: list<integers> prime factors
"""
primefac = []
d = 2
while d*d <= n:
while (n % d) == 0:
primefac.append(d) # supposing you want multiple factors repeated
n //= d
d += 1
if n > 1:
primefac.append(n)
return primefac | 464c8fd9d50fda5a04aceb993768416146120b6d | 359,403 |
from typing import List
def average(l: List[float]) -> float:
"""Average of a list of numbers"""
n = len(l)
if n == 0:
return 0
return sum(l) / n | 715d455608a5c55aa8699bfc6823aa8c96a1fdf3 | 79,591 |
def guess_shap(model):
"""guesses which SHAP explainer to use for a particular model, based
on str(type(model)). Returns 'tree' for tree based models such as
RandomForest and XGBoost that need shap.TreeExplainer, and 'linear'
for linear models such as LinearRegression or Elasticnet that can use
shap.LinearExplainer.
Args:
model: a fitted (sklearn-compatible) model
Returns:
str: {'tree', 'linear', None}
"""
tree_models = ['RandomForestClassifier', 'RandomForestRegressor',
'DecisionTreeClassifier', 'DecisionTreeRegressor',
'ExtraTreesClassifier', 'ExtraTreesRegressor',
'GradientBoostingClassifier', 'GradientBoostingRegressor',
'HistGradientBoostingClassifier', 'HistGradientBoostingRegressor',
'XGBClassifier', 'XGBRegressor',
'LGBMClassifier', 'LGBMRegressor',
'CatBoostClassifier', 'CatBoostRegressor',
'NGClassifier', 'NGBRegressor',
'GBTClassifier', ' GBTRegressor',
'IsolationForest'
]
linear_models = ['LinearRegression', 'LogisticRegression',
'Ridge', 'Lasso', 'ElasticNet', 'SGDClassifier']
skorch_models = ['skorch.net.NeuralNet',
'skorch.regressor.NeuralNetRegressor',
'skorch.classifier.NeuralNetClassifier']
for tree_model in tree_models:
if str(type(model)).endswith(tree_model + "'>"):
return 'tree'
for lin_model in linear_models:
if str(type(model)).endswith(lin_model + "'>"):
return 'linear'
for skorch_model in skorch_models:
if str(type(model)).endswith(skorch_model + "'>"):
return 'skorch'
return None | a1ea7e8c24b4a10610ef7b7193f1612c459befe3 | 249,600 |
import json
def _load_config(config_file):
"""
Load the configuration file
Args:
config_file (str): Configuration file
Returns:
dict: Content of the configuration file
"""
with open(config_file, 'r') as fp:
return json.load(fp) | 921af497ed5b7d8d4eb3f55be46faf59f8fe36db | 397,859 |
def expression_closure(field, operator, value):
""" Creates and returns a function expression.
Operator supports: < <= > >= !=
Example:
>>> class Obj:
>>> number = 5
>>> obj = Obj()
>>> func_expression = expression_closure('number', '<', 10)
>>> func_expression(obj)
True
>>> obj.number = 11
>>> func_expression(obj)
False
"""
if operator == '>':
return lambda obj: getattr(obj, field) > value
elif operator == '<':
return lambda obj: getattr(obj, field) < value
elif operator == '>=':
return lambda obj: getattr(obj, field) >= value
elif operator == '<=':
return lambda obj: getattr(obj, field) <= value
elif operator == '!=':
return lambda obj: getattr(obj, field) != value
else:
raise ValueError("Unknown operator %s" % operator) | 3682cc566eff48eaa67e083aa4f42240541978f2 | 442,270 |
def blob_exists(storage_client, bucket_name, filename):
"""Checks if a file exists in the bucket."""
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(filename)
return (blob.exists()) | 4f5fa78328401930ce6399a5cea6cdcecc10a173 | 25,553 |
from typing import Tuple
def square_box(box_size: float, image_size: Tuple[float, float]) -> Tuple[float, float]:
"""Calculate normalized square box shape.
:param box_size: initial size
:param image_size: image shape tuple
:return: normalized square box shape
"""
return box_size / image_size[1], box_size / image_size[0] | fa1e39e94afcb0378149051c8d2fefa9a5816b7e | 304,022 |
from typing import Tuple
def count_bases(dna: str) -> Tuple[int, int, int, int]:
""" Count each of the bases in DNA """
cnt_a, cnt_c, cnt_g, cnt_t = 0, 0, 0, 0
for base in dna:
if base == 'A':
cnt_a += 1
elif base == 'C':
cnt_c += 1
elif base == 'G':
cnt_g += 1
elif base == 'T':
cnt_t += 1
return (cnt_a, cnt_c, cnt_g, cnt_t) | adb3bf95d0177cb3533cbdf2dcac9ba587205647 | 83,088 |
def order_df_columns_by_run(dataframe):
"""
Returns a dataframe with re-ordered columns such that second column up to column 'mean'
are ordered by run number from low to high
"""
cols = dataframe.columns.tolist()
stats_start_idx = cols.index("mean")
to_sort = cols[:stats_start_idx]
no_sort = cols[stats_start_idx:]
to_sort.sort(key=lambda x: int(x.split(".")[0].split("_Run")[1]))
new_cols = to_sort + no_sort
return dataframe[new_cols] | 8eb25d63613654864282bf52aa56d3da6c5c934f | 215,768 |
import torch
def bounded_scale_function(bounds):
"""
Sets up a scaling function that maps `(0,1)` to `(bounds[0], bounds[1])`
and clips the values to remain in that range
Args:
bounds: tuple giving the parameter bounds
"""
return lambda x: torch.clamp(x, 0, 1)*(bounds[1]-bounds[0]) + bounds[0] | fab5d3c105ca6e4ffd094424337550191c41a52a | 464,703 |
def get_role(roles, role_id=None, role_name=None):
"""Gets a role from its id or name."""
for role in roles:
if role_id and role.id == int(role_id):
return role
if role_name and role.name == role_name:
return role
return None | 162c0786c97adae91914a5d1c14a9ccfb3d54bc1 | 430,206 |
def string_to_dict(separated_string, separator=',') -> dict:
"""
Takes a string e.g:
a,b,,,,c, ,d
and converts to:
{'a':'', 'b':'', 'c':'', 'd':''}
"""
output = {}
if isinstance(separated_string, str) and len(separated_string) > 0:
values = separated_string.split(separator)
for value in values:
stripped = value.strip()
if len(stripped) > 0:
output[stripped] = ""
return output | ccf3c1aba3e15cec7d799a22608408dde19bd048 | 291,349 |
import time
import logging
def is_even(n: int) -> bool:
"""
Checks if the number 'n' is even or odd
:param n: The number to check
:return: Whether (True) or not (False) the number is even
"""
time.sleep(0.1)
if n % 2 == 0:
logging.info(f"{n} - is even")
return True
else:
logging.info(f"{n} - is odd")
return False | 25fb81ad343aba92308fbb3c4f8878df786bb2fe | 69,175 |
def percentile(N, P):
"""
Find the percentile of a list of values
@parameter N - A list of values. N must be sorted.
@parameter P - A float value from 0.0 to 1.0
@return - The percentile of the values.
"""
if len(N)==0:
return 0.0
n = int(round(P * len(N) + 0.5))
return N[n-1] | 4ef0e9baad1f008e28b821ee7c65177d50e5b8fd | 631,082 |
from typing import Counter
def anf_xor(form1,form2):
"""
(1^x)^(1^x^y) = 0^y
>>> anf_xor([1,(1,)],[1,(1,),(2,)])
[0, (2,)]
"""
const = form1[0]^form2[0]
xored = Counter(form1[1:])+Counter(form2[1:])
return [const] + [ i for i in xored if xored[i]&1] | a588788d1ff9930e2399d6215700b9e45558c0ef | 492,100 |
def subtract(df, y, exclude=None):
"""Subtract array-like y from dataframe columns.
"""
if exclude is None:
exclude = []
df_subtracted = df.apply(
lambda x: x - y
if x.name not in exclude
else x
)
return df_subtracted | 448dc9a1d70ef3cdfce548b32ccfca2e42234cd7 | 439,450 |
import math
def calculate_frustration(fun, derivative_fun):
"""
Frustration value based on the given function and its derivative.
:param fun: the function to calculate the frustration
:param derivative_fun: The derivative of the provided function.
:return:
"""
return fun(0) / math.sqrt(2) / derivative_fun(0) | 9841fbf64a8a8d2de17832eb101bf1043156394a | 303,216 |
def quantize(rgb, quanta):
"""map a tuple (r,g,b) each between 0 and 255
to our discrete color buckets"""
r,g,b = rgb
r = max([q for q in quanta if q <= r])
g = max([q for q in quanta if q <= g])
b = max([q for q in quanta if q <= b])
return (r,g,b) | 239eb2f7b72d3c1c5e72948dad6e18a0ed2312f2 | 218,600 |
def sanitize_string(input_string):
"""Removes unwanted characters from a string and returns the result
"""
return input_string.replace('"', '') | 596bd7bf4810ce9ef2b96c6d6fead947bd4e22ec | 27,116 |
def rbgToHex(*clr):
"""Given a color in rbg, convert it to hexadecimal color"""
if len(clr) != 3 or max(clr) > 255 or min(clr) < 0: raise ValueError('invalid color')
return '#' + ''.join([
hex(c)[2:].zfill(2) for c in clr
]).upper() | 160da2269f274920fdeca77703592f74a650d10c | 341,930 |
import string
def remove_punctuation(word):
"""Removes all punctuation from word"""
return word.translate(str.maketrans(dict.fromkeys(string.punctuation))) | a8f672ce3b95247578d76874c357f89a2a1f23bc | 85,665 |
def _get_character_pairs(text):
"""Returns a defaultdict(int) of adjacent character pair counts.
>>> _get_character_pairs('Test IS')
{'IS': 1, 'TE': 1, 'ES': 1, 'ST': 1}
>>> _get_character_pairs('Test 123')
{'23': 1, '12': 1, 'TE': 1, 'ES': 1, 'ST': 1}
>>> _get_character_pairs('Test TEST')
{'TE': 2, 'ES': 2, 'ST': 2}
>>> _get_character_pairs('ai a al a')
{'AI': 1, 'AL': 1}
>>> _get_character_pairs('12345')
{'34': 1, '12': 1, '45': 1, '23': 1}
>>> _get_character_pairs('A')
{}
>>> _get_character_pairs('A B')
{}
>>> _get_character_pairs(123)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "strikeamatch.py", line 31, in _get_character_pairs
if not hasattr(text, "upper"): raise ValueError
ValueError: Invalid argument
"""
if not hasattr(text, "upper"):
raise ValueError("Invalid argument")
results = dict()
for word in text.upper().split():
for pair in [word[i]+word[i+1] for i in range(len(word)-1)]:
if pair in results:
results[pair] += 1
else:
results[pair] = 1
return results | 5abcf36cc25bc901d7a00b10c9f021f06a3878b3 | 405,736 |
def depend_on_proj_props(target, source, env):
""" Emitter which adds a dependency for the project properties file """
#sys.stderr.write("depend_on_proj_props called\n")
#sys.stderr.flush()
return (target, source + [env['XISE_PY_PROPFILE']]) | f0ea4c5aa0a6958e71dd051a6aff08cf9318d136 | 687,756 |
def gainbias(lmax, lmin, qcalmax, qcalmin):
"""Calculates gain and bias from max and min radiance"""
gain = (lmax - lmin)/(qcalmax - qcalmin)
bias = (qcalmax*lmin - qcalmin*lmax)/(qcalmax - qcalmin)
return gain, bias | cd520b0b9342ce37d2076aae0e47d4e1716fc014 | 380,381 |
def _map_column_name(key_store, target_name):
"""Figure out the internal column name for a given target name
If a column is used as the index of a table, its PyTables name is 'index'
instead of the desired name. Handle that gracefully.
"""
try:
index_name = key_store.attrs.info['index']['index_name']
except KeyError:
index_name = ''
if index_name == target_name:
return 'index'
else:
return target_name | 671b3e19b821036b3dac16064f757c53e99e5b48 | 137,558 |
def required_keys_avaiable(readme_dict):
"""
Check if the required key in the readme dictionary are available
Required key are the ones I need for further processing.
These are:
* base_runs
* username
* email
* info_block
* sub_date
* solver
Parameters
----------
readme_dict : dict
Dictionary to be tested for containing all required keys
Returns
-------
boolean
True if all required keys are available, False otherwise.
"""
REQUIRED_KEYS = [
"main_name",
"base_runs",
"username",
"email",
"info_block",
"sub_date",
"solver"
]
return all(req_key in readme_dict.keys() for req_key in REQUIRED_KEYS) | b54b50ac26e1a5c5135ceafabe96234ac5ff31ec | 611,548 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.