content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import Tuple
import itertools
def get_img_windows_list(img_shape: Tuple[int, int], tile_size: int):
"""
return list of tiles coordinate inside an image
currently the tile are computed without overlaping and by rounding
so union of all tiles are smaller than image.
Tile are square.
Parameters
----------
img_shape : Tuple[int, int]
shape of the image in pixel, similar to numpy array shape.
tile_size : int
size of the tile to extract in pixel. size in all dimension are the same.
Returns
-------
List[Tuple(int, int, int, int)] :
List of tile pixel coordinate as a tuple with row/col min and row/col max.
"""
col_step = [col for col in range(0, img_shape[0], tile_size)]
col_step.append(img_shape[0])
row_step = [row for row in range(0, img_shape[1], tile_size)]
row_step.append(img_shape[1])
windows_list = []
for i, j in itertools.product(
range(0, len(col_step) - 2), range(0, len(row_step) - 2)
):
windows_list.append(
tuple(
(
row_step[j],
col_step[i],
row_step[j + 1] - row_step[j],
col_step[i + 1] - col_step[i],
)
)
)
return windows_list | ac245fb96587a3610b0950502276b5473b63db25 | 517,223 |
def groupwise_normalise(gs):
"""
Normalises each group of GeoSeries
:param gs: GeoSeries
:return: normalised GeoSeries
"""
return gs.groupby(level=0).apply(lambda x: x / x.sum()) | e0c87701658481ccea01828c75244b7a1043ee29 | 13,447 |
import tempfile
def create_named_text_file(dir: str, prefix: str, suffix: str) -> str:
"""
Create a named unique file.
"""
fd = tempfile.NamedTemporaryFile(
mode='w+', prefix=prefix, suffix=suffix, dir=dir, delete=False
)
path = fd.name
fd.close()
return path | 7876515d9243ca11d7954b0c259d1bd83c213dea | 402,248 |
def next_params(args, result):
"""Get the parameters for making a next link."""
next_offset = result['offset'] + result['limit']
if result['total'] > next_offset:
params = {'offset': next_offset}
for k, v in args.iterlists():
if k in ['offset']:
continue
params[k] = v
return params | 26475750fd50c08018ff88d89e2c473fe989a1ca | 398,777 |
def solution(array):
"""
ret = max{a[i] + a[j] + i - j} = max{a[i] + i} + max{a[j] - j}
"""
# 假设 第一个观光点是 0
ret, aii = 0, 0
for j, aj in enumerate(array):
# max{a[j] - j}
ret = max(ret, aj - j + aii)
# max{a[i] + i}
aii = max(aii, aj + j)
return ret | 5a1d764ee583a47ac9ab4020505c1fe37af5b8c5 | 581,761 |
import locale
import math
def parse_unit(s, unit, ceil=True):
"""Converts '123.1unit' string into ints
If ceil is True it will be rounded up (124)
and and down (123) if ceil is False.
"""
flt = locale.atof(s.split(unit)[0])
if ceil:
return int(math.ceil(flt))
return int(math.floor(flt)) | 8bd27dc8534d0bf7a5f03f41098b39bc7d5aa470 | 440,373 |
def get_lib_name(lib):
"""Returns the name of a library artifact, eg. libabc.a -> abc
Args:
lib (File): A library file
Returns:
str: The name of the library
"""
# On macos and windows, dynamic/static libraries always end with the
# extension and potential versions will be before the extension, and should
# be part of the library name.
# On linux, the version usually comes after the extension.
# So regardless of the platform we want to find the extension and make
# everything left to it the library name.
# Search for the extension - starting from the right - by removing any
# trailing digit.
comps = lib.basename.split(".")
for comp in reversed(comps):
if comp.isdigit():
comps.pop()
else:
break
# The library name is now everything minus the extension.
libname = ".".join(comps[:-1])
if libname.startswith("lib"):
return libname[3:]
else:
return libname | bafb2ef68cba448a2419c22977c40f103a426e9a | 517,017 |
def contains(a: str, b: str) -> bool:
"""Returns true if a contains all chars in b."""
return all([c in a for c in b]) | af90fae1f29af856e9aafbec6fd9f2a44d9331c1 | 137,042 |
def EnsureFull(path):
"""Prepends 'end_snippet', making it the full field path.
Args:
path: the path to ensure is full.
Returns:
The path made full.
"""
if not path.startswith("end_snippet."):
path = "end_snippet." + path
return path | 5de3d0a939c0c869eb9f52a9748597d574521fb3 | 510,588 |
from pathlib import Path
def file_exists(file_path):
"""Determines whether or not a file exists."""
path = Path(file_path)
exists = path.is_file()
return exists | ac6e573fe2152ec30e02e3301de948dc4a87e327 | 468,509 |
def _schultz_get_closest_extrema(contour):
"""
Part of Step 11.
Returns the closest repeating extrema to the start and end of the contour.
From Ex15B:
>>> contour = [
... [1, {1, -1}],
... [3, {1}],
... [0, {-1}],
... [3, {1}],
... [0, {-1}],
... [3, {1}],
... [0, {-1}],
... [3, {1}],
... [2, {1, -1}]
... ]
>>> maxima = [(1, [3, {1}]), (3, [3, {1}]), (5, [3, {1}]), (7, [3, {1}])]
>>> minima = [(2, [0, {-1}]), (4, [0, {-1}]), (6, [0, {-1}])]
>>> (c_start, c_end) = _schultz_get_closest_extrema(contour)
>>> c_start
('max', (1, [3, {1}]))
>>> c_end
('max', (7, [3, {1}]))
"""
# Find the closest elements. Unflag (and store) all repetions that are not those from above.
closest_start_extrema = next((i + 1, x) for (i, x) in enumerate(contour[1:-1]) if 1 in x[1] or -1 in x[1]) # noqa
closest_end_extrema = next((len(contour) - i - 2, x) for (i, x) in enumerate(contour[1:-1][::-1]) if 1 in x[1] or -1 in x[1]) # noqa
if -1 in closest_start_extrema[1][1]:
closest_start = ("min", closest_start_extrema)
else:
closest_start = ("max", closest_start_extrema)
if -1 in closest_end_extrema[1][1]:
closest_end = ("min", closest_end_extrema)
else:
closest_end = ("max", closest_end_extrema)
return (closest_start, closest_end) | ae3374457f3d9d7cb34c14c47c0827e6001da350 | 413,436 |
def sanitise_db_creds(creds):
"""Clean up certain values in the credentials to make sure that the DB driver
doesn't get confused.
"""
tmp = {}
for name, value in creds.items():
if name == 'port':
tmp[name] = int(value)
elif name == 'password':
tmp['passwd'] = value
else:
tmp[name] = value
return tmp | a5f3e8d4aab2f5959a8a03833f7c3be653234126 | 18,547 |
def encode_classes(df):
"""Encodes the output classes as integers and returns a
dictionary of the encodings.
Parameters
----------
df : Pandas DataFrame
The dataframe containing the photometry of all events.
Returns
-------
Pandas DataFrame
The same dataframe with encoded column Type_ID.
Pandas dict
Dictionary of encoded classes.
"""
df['Type_ID'] = df['Type'].astype('category').cat.codes
#some clunky trickery to get the mapping from classes to values
encoding_dict = df[['Type', 'Type_ID']].drop_duplicates(subset=['Type', 'Type_ID']).sort_values(by='Type_ID').reset_index(drop=True)['Type'].to_dict()
return df, encoding_dict | fb1f6384e04e86b033e4b079676468a4e7e4ff60 | 64,317 |
def find_neighbors_hexagonal_grid(map_coordinates: list, current_position: tuple) -> list:
"""Finds the set of adjacent positions of coordinates 'current_position' in a hexagonal grid.
Args:
map_coordinates (list): List of map coordinates.
current_position (tuple): Current position of the hexagonal grid whose neighbors we want to find.
Returns:
neighbors (list): List of neighbors from the current position in the hexagonal grid map.
"""
x = current_position[0]
y = current_position[1]
candidates = [(x - 2, y), (x - 1, y + 1), (x + 1, y + 1), (x + 2, y), (x + 1, y - 1), (x - 1, y - 1)]
neighbors = [
neighbor
for neighbor in candidates
if neighbor[0] >= 0 and neighbor[1] >= 0 and (neighbor[0], neighbor[1]) in map_coordinates
]
return neighbors | 2a6071d59a69b828eb252504508fa3f706969e1b | 698,662 |
import pprint
def dict_to_string(dict_):
"""Converts a dictionary into a pretty string
"""
return pprint.pformat(dict_) | d6dec89db37ae9e1c2f32d97595ff9e02146c326 | 622,167 |
def wrap(x):
""" Ensure x is a list of float """
if x is None:
return None
elif isinstance(x,(float,int)):
return [float(x)]
else:
return list(x) | e3031a96e08466487e6874e149989d55ecf21e2a | 191,611 |
def get_gpu_count () -> int:
"""
Special handling for detecting GPU availability: an approach
recommended by the NVidia RAPIDS engineering team, since `nvml`
bindings are difficult for Python libraries to keep updated.
returns:
count of available GPUs
"""
try:
import pynvml # type: ignore # pylint: disable=E0401
pynvml.nvmlInit()
gpu_count = pynvml.nvmlDeviceGetCount()
except Exception: # pylint: disable=W0703
gpu_count = 0
return gpu_count | e51e9e803977fc5397ffa8b155a4b7b157b00d98 | 130,731 |
def list2str(l):
"""
Convert list to a string
:param l: list
:returns: list <string>
"""
s = ''
for i in range(len(l)):
s = s + str(l[i])
return s | 94e70d371f4c81c08dbdd7d2a583b9c2e68500a8 | 49,152 |
def _threshold_calc(random_edge, max_edge, vertex_degree):
"""
Calculate threshold for branch_gen function.
:param random_edge: number of vertex edges
:type random_edge: int
:param max_edge : maximum edge number
:type max_edge : int
:param vertex_degree: vertex degree
:type vertex_degree: int
:return: threshold as int
"""
threshold = min(random_edge, abs(max_edge - vertex_degree))
return threshold | be50dd97241b2f5aa58c2c67bf9c52f3bde6b361 | 126,633 |
def args_to_list(items):
"""Convert an argument into a list of arguments (by splitting each element on comma)"""
result = []
if items is not None:
for item in items:
if item:
for val in item.split(','):
val = val.strip()
if val:
result.append(val)
return result | 1c73ec092160f311c0fa663d4dc4b6a4b717d5c0 | 118,872 |
def _get_sstable_proto_dict(*input_values):
"""Returns table key -> serialized proto map.
This function exists because the create_parse_tf_example_fn operates on
dequeued batches which could be 1-tuples or 2-tuples or dictionaries.
Args:
*input_values: A (string tensor,) tuple if mapping from a RecordIODataset or
TFRecordDataset, or a (key, string tensor) tuple if mapping from a
SSTableDataset, or (Dict[dataset_key, values],) if mapping from multiple
datasets.
Returns:
dict_extracted: dictionary mapping each sstable (or '' for singular) to the
batch of string tensors for the corresponding serialized protos.
"""
dict_extracted = {}
if isinstance(input_values[0], dict):
for key, serialized_proto in input_values[0].items():
if isinstance(serialized_proto, tuple):
# Assume an SSTable key, value pair.
_, dict_extracted[key] = serialized_proto
else:
dict_extracted[key] = serialized_proto
else:
if len(input_values) == 2:
_, dict_extracted[''] = input_values
else:
dict_extracted[''], = input_values
return dict_extracted | c0d3b3aa9b423115a141b9fddfe6b1aca08912c2 | 426,445 |
def multiply_two_polynomials(polynomial_1: list, polynomial_2: list) -> list:
"""
This function expects two `polynomials` and returns a `polynomial` that contains
their `produce`.
:param polynomial_1: First polynomial
:param polynomial_2: Second polynomial
:return: A polynomial representing the produce of the two polynomials
"""
# initializing a list that is big enough to store the coefficients for every power of 'X'
return_polynomial = [0] * (len(polynomial_1) + len(polynomial_2) - 1)
for i in range(len(polynomial_1)):
for j in range(len(polynomial_2)):
# updating the coefficient to the appropriate value
return_polynomial[i + j] += polynomial_1[i] * polynomial_2[j]
return return_polynomial | 1e859089bb2d5178f745dcb0db13c39e9fcf7512 | 217,363 |
def _get_shlib_stem(target, source, env, for_signature: bool) -> str:
"""Get the base name of a shared library.
Args:
target: target node containing the lib name
source: source node, not used
env: environment context for running subst
for_signature: whether this is being done for signature generation
Returns:
the library name without prefix/suffix
"""
verbose = False
target_name = str(target.name)
shlibprefix = env.subst("$SHLIBPREFIX")
shlibsuffix = env.subst("$_SHLIBSUFFIX")
if verbose and not for_signature:
print(
"_get_shlib_stem: target_name:%s shlibprefix:%s shlibsuffix:%s"
% (target_name, shlibprefix, shlibsuffix)
)
if shlibsuffix and target_name.endswith(shlibsuffix):
target_name = target_name[: -len(shlibsuffix)]
if shlibprefix and target_name.startswith(shlibprefix):
# skip pathological case were target _is_ the prefix
if target_name != shlibprefix:
target_name = target_name[len(shlibprefix) :]
if verbose and not for_signature:
print("_get_shlib_stem: target_name:%s AFTER" % (target_name,))
return target_name | 196cb810731944270199e3a90548a3e6e8772a6e | 152,339 |
def bool_to_string(b):
"""Convert a boolean type to string.
Args:
b (bool): A Boolean.
Raises:
TypeError
Returns:
str: String representation of a bool type.
"""
s = str(b).lower()
if s in ["true", "false"]:
return s
raise TypeError("Value must be True or False.") | 586cd8312ba071982bf7db407d568181796e8e8e | 86,586 |
def place_rfs(length,count,width):
"""
place-rfs - returns a list of receptive field index lists
for use as rf-indices in an rf-array
params
length = the length of the input vector
count = the number of rfs
width = the width of each rf
The rfs will be placed such that the first begins at 0 and the
last ends at length - 1. The rest will be (approximately) evenly
spaced in between. i.e. in 0..(length - width) step (length -
width)/(count - 1)
Note that they're assumed to overlap!
"""
if count==1:
return [(0,length)]
end_pos = length-width
step = int(round(end_pos / (count - 1.0)))
pos = 0
result = []
for i in range(count-1):
result.append((pos,pos+width))
pos += step
result.append((end_pos,end_pos+width))
return result | 94c13f594fcd9e8990792842b0020d00373c6415 | 278,454 |
def _process(json_data):
"""Return a list of filenames grouped by iteration."""
iterations = []
for iteration in json_data[u'iterations']:
filenames = [x[u'filename'] for x in iteration[u'spreadsheets']]
iterations.append(filenames)
return iterations | 98c89b637849c53d6cb2e67467f541bcb0e4099b | 341,209 |
def is_column_fully_sorted(column, max_column_length):
"""Check if a column is fully sorted, that is, if it's empty or full of the same colour
>>> is_column_fully_sorted(['b', 'b', 'b', 'b'], 4)
True
>>> is_column_fully_sorted([], 4)
True
>>> is_column_fully_sorted(['b', 'b', 'b', 'o'], 4)
False
>>> is_column_fully_sorted(['b', 'b', 'b'], 4)
False
:param column: list to check sorted status
:param max_column_length: length of a full column
:returns: whether the column is fully sorted
"""
return len(column) == 0 or column.count(column[0]) == max_column_length | cb3c7f06e3934b33d5657e8937ad276c1a751f61 | 276,916 |
def get_cutout(image, cutout_size):
"""Takes an image, and cuts it down to `cutout_size x cutout_size`
It only affects the final two dimensions of the array, so
you can easily deal with multiple images / multiple channels
simply by setting up the array with shape (n_channels, height, width)
Inputs
------
image : np.ndarray (ndim >= 2)
cutout_size : int
the [maximum] number of pixels you want in each dimension
of the final image.
Notes
-----
If `cutout_size` is large than an image dimension, it'll silently
keep *the entire* range of that dimension. This won't have any
side-effects on the other dimension of non-square images.
"""
image_shape = image.shape[-2:]
center_index = (image_shape[0]//2, image_shape[1]//2)
# Check that these are actually x and y ordered
min_x = center_index[0] - (cutout_size//2)
max_x = center_index[0] + (cutout_size//2)
min_y = center_index[1] - (cutout_size//2)
max_y = center_index[1] + (cutout_size//2)
if cutout_size % 2 == 1:
# handle odd number of pixels
max_x += 1
max_y += 1
cutout = image[..., min_x:max_x, min_y:max_y]
return cutout | e6d70e395b3b5b032565fa39ee09d1b71bbfb4c8 | 601,690 |
def require_dict_kwargs(kwargs, msg=None):
""" Ensure arguments passed kwargs are either None or a dict.
If arguments are neither a dict nor None a RuntimeError
is thrown
Args:
kwargs (object): possible dict or None
msg (None, optional): Error msg
Returns:
dict: kwargs dict
Raises:
RuntimeError: if the passed value is neither a dict nor None
this error is raised
"""
if kwargs is None:
return dict()
elif isinstance(kwargs, dict):
return kwargs
else:
if msg is None:
raise RuntimeError(
"value passed as keyword argument dict is neither None nor a dict")
else:
raise RuntimeError("%s" % str(msg)) | 12a66114dd24d316e0229225126f6d2af2d3f0e6 | 472,352 |
def simd_loop_filter(loops, tuning):
""" Filter out the SIMD candidate loops based on the tuning information.
We select the legal simd loop with the highest score.
If there is no such loop, we will set "loops" to all "1"s.
AutoSA will not tile loops with the tiling factor as one for latency hiding or
SIMD vectorization.
If one such loop is found, we will set all loop bounds to 1 except the target loop.
Parameters
----------
loops: list
upper bounds of all candidate SIMD loops
tuning: dict
tuning information for the SIMD stage
"""
scores = tuning['simd']['scores']
legal = tuning['simd']['legal']
# Find the candidate loop with the highest score
simd_loop_idx = -1
max_score = -1
for i in range(len(legal)):
if legal[i] == 0:
continue
if scores[i] > max_score:
max_score = scores[i]
simd_loop_idx = i
if simd_loop_idx < 0:
filter_loops = [1 for i in range(len(loops))]
else:
filter_loops = [1 for i in range(len(loops))]
filter_loops[simd_loop_idx] = loops[simd_loop_idx]
return filter_loops | bf9c261aa6a3af7fd4cb30205f3745c7ed29a551 | 637,149 |
import configparser
def load_status(status_file):
"""
Load pipeline status
Args:
status_file (string): name of configparser file
Returns:
configparser.RawConfigParser
"""
config = configparser.RawConfigParser()
gl = config.read(status_file)
return config | bd118146e10f632c1edfed7c362eb5a39fa61069 | 578,072 |
def batch_by_property(items, property_func):
"""
Takes in a list, and returns a list of tuples, (batch, prop)
such that all items in a batch have the same output when
put into property_func, and such that chaining all these
batches together would give the original list (i.e. order is
preserved)
"""
batch_prop_pairs = []
def add_batch_prop_pair(batch):
if len(batch) > 0:
batch_prop_pairs.append(
(batch, property_func(batch[0]))
)
curr_batch = []
curr_prop = None
for item in items:
prop = property_func(item)
if prop != curr_prop:
add_batch_prop_pair(curr_batch)
curr_prop = prop
curr_batch = [item]
else:
curr_batch.append(item)
add_batch_prop_pair(curr_batch)
return batch_prop_pairs | 8b01fa3f882bf3298e67432f67657476577831f8 | 549,663 |
def no_filter(img, m, *args):
"""filter that does nothing
Parameters
----------
img : 2D array (float)
image
m : scalar (float)
filter width
Returns
-------
output : 2D array (float)
same image as input
"""
return img | 88e42c8fd82aa27bb8a66eed59281be5fd809605 | 271,597 |
def get_I0_Phi0(self):
"""Return I0 and Phi0
Parameters
----------
self : OPslip
An OPslip object
Returns
-------
I_dict : dict
Dict with key "I0", "Phi0"
"""
return {"I0": self.I0_ref, "Phi0": self.IPhi0_ref} | ef7316f0fcbcf7b4bd5ed22e640eb8534bb1a963 | 132,087 |
def to_lower_case(given: str) -> str:
"""Returns 'given' in lower case
>>> to_lower_case("0D")
'0d'
"""
return given.lower() | 23e8298f7f4e33b827a76a7c17d1e5468f6d5fd1 | 19,278 |
def get_span_literal(span):
"""Get the literal value from an entity's TargetRule, which is set when an entity is extracted by TargetMatcher.
If the span does not have a TargetRule, it returns the lower-cased text.
"""
target_rule = span._.target_rule
if target_rule is None:
return span.text.lower()
return target_rule.literal | ccd7bd17d4cd3da56b973b8667c67ebc67d6b219 | 406,161 |
def split_path(path):
"""
Normalise GCSFS path string into bucket and key.
"""
if path.startswith('gs://'):
path = path[5:]
path = path.rstrip('/').lstrip('/')
if '/' not in path:
return path, ""
else:
return path.split('/', 1) | d4f1881b9d280f9a5bca003e69b017bf2f408e54 | 101,477 |
def fix_line_breaks(text):
""" Convert Win line breaks to Unix
"""
return text.replace("\r\n", "\n") | c4c698fce80d7c3820f689a163d0df19ea682573 | 679,240 |
def despine(chart):
"""Despine altair chart.
"""
chart = chart.configure_axis(
ticks=False,
grid=False,
domain=False,
labels=False)
return chart | fe9c713f0320ecb58e1aea89799502be0073c6ec | 277,982 |
def projects_id_contacts_get(id):
"""
List all contacts associated with this project
:param id: Project id
:type id: int
:rtype: List[Contact]
"""
return 'do some magic!' | 0dec8c0bda6f8d5a6de0d028415c83c7c09624f5 | 344,897 |
def end_ignore_marker() -> str:
"""
Creates an end ignore marker.
:returns: The end ignore marker.
"""
return "# nb--ignore-end\n" | 18d08bf9844003d841dc89003e6a4b5457d18628 | 542,017 |
def _prefixscan_combine(func, binop, pre, x, axis, dtype):
"""Combine results of a parallel prefix scan such as cumsum
Parameters
----------
func : callable
Cumulative function (e.g. ``np.cumsum``)
binop : callable
Associative function (e.g. ``add``)
pre : np.array
The value calculated in parallel from ``preop``.
For example, the sum of all the previous blocks.
x : np.array
Current block
axis : int
dtype : dtype
Returns
-------
np.array
"""
# We could compute this in two tasks.
# This would allow us to do useful work (i.e., func), while waiting on `pre`.
# Using one task may guide the scheduler to do better and reduce scheduling overhead.
return binop(pre, func(x, axis=axis, dtype=dtype)) | 95eff470bc28cb55519608391686781edb2dce1c | 479,316 |
def bool_to_str(boolean: bool) -> str:
"""Bool to str."""
return str(boolean) | d67aaf110f850108b0503e8731aabb28a4fbc89c | 610,983 |
def crosscorr(data1, data2, lag=0, wrap=False):
""" Lag-N cross correlation.
Take two time series data1 and data2 then shift series 2 by a lag (+ve or -ve)
and then see what correlation between the two is.
Either wrap data around to fill gap or shifted data filled with NaNs
Parameters
----------
lag : int, default 0
data1, data2 : pandas.Series objects of equal length
wrap: bool, default False wrap data around - useful in some situations but not for us
Returns
----------
crosscorr : float
"""
if wrap:
shifted2 = data2.shift(lag)
shifted2.iloc[:lag] = data2.iloc[-lag:].values
return data1.corr(shifted2)
else:
return data1.corr(data2.shift(lag)) | 0f7f965d10b2ca8604b46f08f279eda479aafc81 | 143,136 |
def return_default_nb_of_cores(nb_of_cores, openmp_proportion=2):
"""Function that returns the number of cores used by OpenMP and Nipype by default.
Given ``openmp_proportion``, the proportion of cores dedicated to OpenMP threads,
``openmp_nb_of_cores`` and ``nipype_nb_of_cores`` are set by default to the following:
.. code-block:: python
openmp_nb_of_cores = nb_of_cores // openmp_proportion
nipype_nb_of_cores = nb_of_cores // openmp_nb_of_cores
where ``//`` is the integer division operator.
Parameters
----------
nb_of_cores : int
Number of cores available on the computer
openmp_proportion : int
Proportion of cores dedicated to OpenMP threads
Returns
-------
openmp_nb_of_cores : int
Number of cores used by default by openmp
nipype_nb_of_cores : int
Number of cores used by default by openmp
"""
openmp_nb_of_cores = nb_of_cores // openmp_proportion
nipype_nb_of_cores = nb_of_cores // openmp_nb_of_cores
return openmp_nb_of_cores, nipype_nb_of_cores | a1f0be9b795eb64eedec01ef58afe49c98fadc54 | 335,469 |
def dwid_exists(dwid, cursor):
"""See if a dwid exists in the database or not. If yes, return 1,
if not return 0."""
array = (dwid,)
cursor.execute('SELECT * from dwids where dwid=?', array)
if cursor.fetchone():
return 1
else:
return 0 | 08586088a14cdc11f8308da3d3038667c672fc13 | 644,637 |
import typing
import grp
def get_group_by_name(name: str) -> typing.Optional[typing.Mapping]:
"""
Get group info by group's name from group database.
"""
try:
group = grp.getgrnam(name)
return dict(gid=group.gr_gid, name=group.gr_name, members=group.gr_mem)
except KeyError:
pass
return None | 7d23c3d5e2ac58b679df4f499d5b764e2b9d0516 | 244,127 |
def _resolve(scope, key, context):
"""
Resolve scope and key to a context item
Provides very minimal validation of presence
Parameters
----------
scope: str
singular variant of scope in context
key: str
key to lookup context item in context within scope
context: dict
Returns
-------
context item: str, dict, etc
"""
if scope not in context:
raise ValueError("Scope {0} is not found in context!".format(scope))
if key not in context[scope]:
raise ValueError("Key {0} is not found in context!".format(key))
return context[scope][key] | ac4bb1cc4ba485a34dc1c915949a4c838a64408a | 110,188 |
import random
def random_apply(img, transforms, prob):
"""
Apply a list of transformation, randomly with a given probability.
Args:
img: Image to be randomly applied a list transformations.
transforms (list): List of transformations to be applied.
prob (float): The probability to apply the transformation list.
Returns:
img, Transformed image.
"""
if prob < random.random():
return img
for transform in transforms:
img = transform(img)
return img | 849c94fe6696bd724c02826e0677e3351537d257 | 393,072 |
def parse_size(s):
"""
Converts a string to an integer
Example input can either be: '(1234)' or '1234'
Example output: 1234
"""
s = s.replace('(', '').replace(')', '')
return int(s) | 510527930f5986e623d10a7834a452c2d4b61ebc | 298,574 |
def netperf_commands(target_ips):
"""Generate latency or throughput commands for netperf
Args:
target_ips (list(str)): List of ips to use netperf to
mode (str): Generate latency or throughput commands
Returns:
list(str): List of netperf commands
"""
lat_commands = []
tp_commands = []
for ip in target_ips:
lat_commands.append(
[
"netperf",
"-H",
ip,
"-t",
"TCP_RR",
"--",
"-O",
"min_latency,mean_latency,max_latency,stddev_latency,transaction_rate,p50_latency,p90_latency,p99_latency",
]
)
tp_commands.append(["netperf", "-H", ip, "-t", "TCP_STREAM"])
return lat_commands, tp_commands | 53879db0c29f2db3c259b774e997a39bcd41efd3 | 429,227 |
import base64
def get_image(image_uri):
"""
Loads a local png to be shown in dash
Args:
image_uri: uri of the image
Returns:
the string to be placed in a html.Img container
"""
with open(image_uri, "rb") as file:
encoded = base64.b64encode(file.read())
return f"data:image/png;base64,{encoded.decode()}" | ccc360b0d37281c36ccfb2ffef4569d48ac201a9 | 211,800 |
def humanize_time(secs):
"""Convert seconds into time format.
:type secs: integer
:param secs: the time in seconds to represent in human readable format
(hh:mm:ss)"""
mins, secs = divmod(secs, 60)
hours, mins = divmod(mins, 60)
return '%02d:%02d:%02d,%s' % (hours, mins, int(secs),
str(("%0.3f" % secs))[-3:]) | c8771ab7c65372c67f72575a0a023864cda25802 | 78,494 |
import requests
def url_ok(url):
"""
Checks that a given URL is reachable.
:param url: A URL
:rtype: bool
"""
return requests.head(url).ok | f4aa22e55a6c05948488fcb16098c4ed76f9c0d6 | 31,649 |
def get_complete_phrases(ph2parse, tf_api):
"""Retrieve phrases completely covered by the parsings.
The phrase atom parser runs on phrase_atoms, which are
component parts of a complete phrase. In some cases the
parser was unable to parse a phrase_atom, meaning that
some phrases are left without a complete parsing. This
function only selects those phrases with complete parses.
"""
F, L = tf_api.F, tf_api.L
parsed_atoms = set(ph2parse)
# add unparsed conjunctions
# these are compensated for in the Composer object
for atom in F.otype.s('phrase_atom'):
if F.rela.v(atom) == 'Link':
parsed_atoms.add(atom)
# select only those phrases completely covered by the parser
whole_phrases = []
for phrase in F.otype.s('phrase'):
ph_atoms = set(L.d(phrase, 'phrase_atom'))
if parsed_atoms.issuperset(ph_atoms):
whole_phrases.append(phrase)
return whole_phrases | c08c3880fbcbd0f92591a2467c3fc2fb3fafc3ef | 398,236 |
import socket
def find_unused_port(base=1025, maxtries=10000):
"""Helper function. Finds an unused server port"""
if base > 65535:
base = 1025
for _ in range(maxtries):
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
serversocket.bind(("localhost", base))
serversocket.close()
return base
except:
base += 1
continue
raise RuntimeError("unable to find free socket port") | 1af439ac6882b760ce5a6b0abded895c8b0d1d12 | 645,203 |
def clean_string(string):
"""Removed unwanted characters from string."""
return string.replace(" ", "_").replace("'", "_").replace(".", "").replace(",", "_").encode('ascii', errors='ignore').decode() | 828dbfde7aa9300af99a722c4a31497b7c4e1958 | 233,993 |
def clean_state_labels(system_labels, system_charges):
"""Convert system labels and charges into clean labels like C$^{+}$.
Typically used for figure axes and labels.
Parameters
----------
system_labels : :obj:`list` [:obj:`str`]
Specifies the atoms in the system.
system_charges : :obj:`list` [:obj:`int`]
Total system charges.
Returns
-------
:obj:`list`
Clean state labels.
"""
clean_labels = []
for i in range(len(system_labels)):
sys_label = ''.join([atom.capitalize() for atom in system_labels[i].split('.')])
charge = system_charges[i]
if charge > 0:
if charge == 1:
charge = '+'
else:
charge = str(charge) + '+'
elif charge < 0:
if charge == -1:
charge = '-'
else:
charge = str(charge)[1] + '-'
else:
charge = ''
clean_labels.append(sys_label + '$\,^{' + charge + '}$')
return clean_labels | bca32ffd837eba3d188a8f5188beb60be99a3822 | 362,883 |
def strip_query(path):
"""Strips query from a path."""
query_start = path.find('?')
if query_start != -1:
return path[:query_start]
return path | 05245f7fead449431e5af0007ad61da42b3ac246 | 623,505 |
def get_parents(cur, term_id):
"""Return a set of parents for a given term ID."""
cur.execute(
f"""SELECT DISTINCT object FROM statements
WHERE stanza = '{term_id}' AND predicate IN ('rdfs:subClassOf', 'rdfs:subPropertyOf')
AND object NOT LIKE '_:%'"""
)
return set([x[0] for x in cur.fetchall()]) | 31992acc6dfa72a60e4c9b522c665956e7c2c0c1 | 398,565 |
def euclid_dist(in_array1, in_array2):
"""
Computes the squared euclidean distance between two NumPy arrays
Parameters
----------
in_array1 : 1D ndarray of floats
NumPy array of real values.
in_array2 : 1D ndarray of floats
NumPy array of real values.
Returns
-------
distance : float
Squared Euclidean distance between two input arrays.
"""
distance = ((in_array1 - in_array2) * (in_array1 - in_array2)).sum()
return distance | 6e596d40b26d73f391f6a678af0403384b7f1d44 | 247,254 |
def xpath_class(name):
"""Returns an XPath expressions which finds a tag which has a specified
class."""
return 'contains(concat(" ", @class, " "), " %s ")' % name | c9ceb34fff0dbb251ff82ab9522018d1fe8095ad | 384,994 |
import re
def get_sub_formula(formula):
"""
"(SO3)2" -> ("(SO3)2", "(SO3)", "2")
"(SO3)" -> ("(SO3)", "(SO3)", "")
"""
match = re.search('(\([a-zA-Z\d]+\))(\d*)', formula)
if not match:
return
full_sub_formula = match.group(0)
sub_formula = match.group(1)
multiplier = match.group(2)
return full_sub_formula, sub_formula, multiplier | b0f4570aff6e8678a3a4901968020d995ed81535 | 195,368 |
def clean_text(text):
"""
Given a block of text, clean it to remove unwanted characters other cruft.
This will:
1. Strip newlines - we don't care about them for word counts,
2. Cast to lowercase for consistency.
3. Replace / with a space, retaining the words on either side of the /
4. Replace "smart" quotes and apostrophes with dumb ones.
"""
text = text.replace('\n', '').lower()
text = text.replace('/', " ")
text = text.replace('’', "'")
return text | 24879e8220637a4a5f97fa1dbdb81c1c83c07477 | 149,450 |
import torch
def redistribute_errors(power_deltaE_hyab, cmax, pc, pt):
"""
Redistributes exponentiated HyAB errors to the [0,1] range
:param power_deltaE_hyab: float tensor (with Nx1xHxW layout) containing the exponentiated HyAb distance
:param cmax: float containing the exponentiated, maximum HyAB difference between two colors in Hunt-adjusted L*A*B* space
:param pc: float containing the cmax multiplier p_c (see FLIP paper)
:param pt: float containing the target value, p_t, for p_c * cmax (see FLIP paper)
:return: image tensor (with Nx1xHxW layout) containing redistributed per-pixel HyAB distances (in range [0,1])
"""
device = power_deltaE_hyab.device
# Re-map error to 0-1 range. Values between 0 and
# pccmax are mapped to the range [0, pt],
# while the rest are mapped to the range (pt, 1]
deltaE_c = torch.zeros(power_deltaE_hyab.size(), device=device)
pccmax = pc * cmax
deltaE_c = torch.where(power_deltaE_hyab < pccmax, (pt / pccmax) * power_deltaE_hyab, pt + ((power_deltaE_hyab - pccmax) / (cmax - pccmax)) * (1.0 - pt))
return deltaE_c | 3e5ce06b0e1ef68b3c04dbc4476ad7e890d91354 | 670,653 |
def make_words(text):
"""
make a list of words from a large bunch of text
Strips all the punctuation and other stuff from a
large string, and returns a list of words
"""
replace_punc = [('-', ' '),
(',', ''),
(',', ''),
('.', ''),
(')', ''),
('(', ''),
('"', '')]
# make a translation table for str.translate
table = {}
for orig, replace in replace_punc:
table[ord(orig)] = replace
text = text.translate(table)
# lower-case everything to remove that complication:
text = text.lower()
# split into words
words = text.split()
# remove the bare single quotes: "'" is both a quote and an apostrophe
# and capitalize "i"
words2 = []
for word in words:
word = "" if word == "'" else word # remove quote by itself
word = "I" if word == 'i' else word
word = word[1:] if word.startswith("'") else word
word = word[:-1] if word.endswith("'") else word
words2.append(word)
return words2 | a45b2b0cf4c9fd83addc002426da3b5196da15e8 | 626,402 |
import re
def is_bvlapi_guid(guid):
""" Checks that a given string is potentially a GUID used by the API.
:param str guid: a string that might be a GUID
:rtype: bool
:return: is the given string a GUID?
"""
return re.match(r"^BVBL", guid) | ee2900effa8efa2b7bd3c4327f533d63a678d2ff | 360,801 |
def xyxy2xywh(box):
"""
Convert bounding box from xyxy to xywh format
:param box: array-like, contains (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = box
w, h = x2 - x1, y2 - y1
x_c = x1 + w / 2
y_c = y1 + h / 2
return x_c, y_c, w, h | af8b5d4568dfc29a71164ccef58f15b9c06f695a | 34,874 |
def get_xy(artist):
"""Gets the xy coordinates of a given artist"""
if "Collection" in str(artist):
x, y = artist.get_offsets().T
elif "Line" in str(artist):
x, y = artist.get_xdata(), artist.get_ydata()
else:
raise ValueError("This type of object isn't implemented yet")
return x, y | 535c0ee0307eac5645debc841dcdd89885c10600 | 128,213 |
def ordinal(string, d):
"""Return the ordinal of the character at dth position.
Specifically, the ordinal of end-of-string is defined to be -1.
"""
if d < len(string):
return ord(string[d])
elif d == len(string):
return -1
else:
raise IndexError | 64b96e6d1a489281ca068c59c89473a94bbc89f2 | 572,741 |
def starts_with(values, list):
"""Checks if a list starts with the provided values"""
return list[: len(values)] == values | 3b1da3a42f707b9d082b0f9734fbb5b0b763dcd1 | 242,115 |
def not_none(seq):
"""Returns True if no item in seq is None."""
for i in seq:
if i is None:
return False
return True | 0995fbce79593a7202574eee8c25b45e872ec61b | 134,211 |
def calculate(nth_number):
"""Returns the difference between the sum of the squares and the
square of the sum of the specified number of the first natural numbers"""
sums = sum(number for number in range(1, nth_number + 1))
sum_of_squares = 0
for number in range(1, nth_number + 1):
sum_of_squares += number ** 2
answer = sums ** 2 - sum_of_squares
return answer | ea5ca1233524e052536a74993718803c74f7c856 | 316,007 |
def bool_converter(value: str) -> bool:
"""
:param value: a string to convert to bool.
:return: False if lower case value in "0", "n", "no" and "false", otherwise, returns the value returned
by the bool builtin function.
"""
if value.lower() in ['n', '0', 'no', 'false']:
return False
return bool(value) | 10251dfbb0200297d191dce1eb20237aed9e5ac9 | 11,550 |
from typing import List
def format_results(json_results: List) -> str:
"""
Purpose:
Format the json to a text response
Args:
json_results: crash data
Returns:
text_string: formatted text
"""
text = ""
if len(json_results) == 0:
text += "No 311 requests in the past year"
for item in json_results:
text += f"On {str(item['adddate']).replace('+00:00','')} request {item['servicerequestid']} was put in at {item['streetaddress']}\n\n"
if item["details"]:
text += f" Here are the details: {item['details']}\n"
return text | da8d1487bc9fcafd66bd1c13e2ee19876fd68f28 | 206,023 |
import torch
def is_sparse_tensor(tensor):
"""Check if a tensor is sparse tensor.
Parameters
----------
tensor : torch.Tensor
given tensor
Returns
-------
bool
whether a tensor is sparse tensor
"""
# if hasattr(tensor, 'nnz'):
if tensor.layout == torch.sparse_coo:
return True
else:
return False | 8d7019b3bacab1e4139a00de9d69fef188477eb6 | 248,672 |
def get_block(cur, cp):
"""get block name of a codepoint"""
cur.execute("SELECT name FROM blocks WHERE first <= %s AND last >= %s", (cp,cp))
blk = cur.fetchone()
if blk:
blk = blk['name']
return blk | fbae26d4f262b6416a0d3b7abb2a1b83493ef8ca | 331,049 |
from typing import List
def group_consecutive_elements(l: list) -> List[tuple]:
"""Group consecutive elements of a list into a list of tuples
"""
return list(zip(l, l[1:])) | aea550fa3cf43bf4afd08d96c9d94803e88fb4a1 | 547,056 |
def katdal_ant_name(aips_ant_nr):
"""Return antenna name, given the AIPS antenna number"""
if aips_ant_nr < 65:
res = f'm{(aips_ant_nr-1):03d}'
else:
res = f's{(aips_ant_nr-65):04d}'
return res | 0c2bd8bfe1a9db42ce4853dd7125ba9fdc5e4534 | 312,511 |
def flip_y_perspective(row:int, current_player:int, is_vwall:bool=False)->int:
"""Flip row coordinates for player 1 so that -- no matter who the 'current player' is -- the enemy's gate is down.
Note that flip_y_perspective is its own inverse, so flip_y_perspective(flip_y_perspective(r, c, v), c, v) == r
"""
# Since vertical walls are labeled by their 'top' coordinate, we need a slightly different rule. This is because
# what was the 'bottom' coordinate at (row+1) will become the top coordinate.
if current_player == 0:
return row
elif is_vwall:
# Essentially 8-(row+1) to flip 'bottom' of wall to 'top'
return 7-row
else:
return 8-row | f649010a56676b22c4491bb9fb6624d10218acd7 | 217,836 |
def calculate_fg(row, fgsum, fgsub):
"""This function calculates a new functional group to be added to the
dataframe
fgsum : list of str (functional groups to add)
fgsub : list of str (functional groups to substract)
returns : count (int)"""
count = 0
for fg in fgsum:
count += row[fg]
for fg in fgsub:
count -= row[fg]
return count | 2178e0ea01ba0dfdf9f81cb0fef867007d4e74a5 | 261,981 |
def create_stream(client, name, description, member_emails, invite_only, mandatory_streams=None):
"""
Create a stream in Zulip and invite users to it.
:param client: A Zulip client object
:param name: Name of the stream
:param description: Description of the stream
:param member_emails: List of emails of all users to be invited
:param invite_only: Option to make the stream invite only
:param mandatory_streams: List containing dictionaries of mandatory steams
:return: Result of request
"""
# To make default value immutable
if mandatory_streams is None:
mandatory_streams = []
result = client.add_subscriptions(
streams=[
{
'name': name,
'description': description
}
] + mandatory_streams,
principals=member_emails,
invite_only=invite_only,
)
return result | 394ec06e9cca378244ff57117a4c2be9c6a39523 | 178,059 |
def GetCorrespondingResidue(seqs, i):
"""Gets the corresponding residue number for two aligned sequences.
*seqs* is a set of two aligned sequences as *(head, sequence)* 2-tuples.
*i* is the number of a residue in sequential numbering of *seqs[0]*
without considering any of the gaps induced by alignment, in 1, 2, ...
numbering.
Returns the number of the residue in sequential numbering of *seqs[1]*
without considering any of the gaps induced by alignment in 1, 2, ...
numbering. Returns *None* if residue *i* of *seqs[0]* aligns with a
gap in *seqs[1]*.
"""
assert len(seqs) == 2
s1 = seqs[0].seq
s2 = seqs[1].seq
assert len(s1) == len(s2)
assert 1 <= i <= len(s1)
s1index = s2index = 0
for j in range(len(s1)):
if s1[j] != '-':
s1index += 1
if s2[j] != '-':
s2index += 1
if s1index == i:
if s2[j] == '-':
return None
else:
return s2index | 1f9e43b7e962bfab2d4db874d26728a9f6183750 | 522,751 |
def get_parametrize_markers_args(node):
"""In pytest 3.6 new API to access markers has been introduced and it deprecated
MarkInfo objects.
This function uses that API if it is available otherwise it uses MarkInfo objects.
"""
return tuple(arg for mark in node.iter_markers("parametrize") for arg in mark.args) | b143d8be0d967bad6f6e7bee59828a45da9dea6a | 617,159 |
from typing import Dict
from typing import OrderedDict
def create_constraint_statements(schema_name: str,
table_name: str,
constraints: Dict,
**kwargs) -> Dict:
"""Function for generating primary key definitions definitions
:param str schema_name: The name of the schema the table belongs to
:param str table_name: The name of the table
:param Dict constraints: The constraint definitions
:param kwargs:
:return: The constraint statements
:rtype: str
"""
statements = OrderedDict(
[
('drop', []),
('create', [])
]
)
if 'new_pk' in constraints:
statement = f"ALTER TABLE {schema_name}.{table_name} ADD CONSTRAINT {constraints['new_pk']['name']}"
statement += f" PRIMARY KEY({','.join(constraints['new_pk']['columns'])})"
statements['create'].append(statement)
if 'drop_pk' in constraints:
statement = f"ALTER TABLE {schema_name}.{table_name} DROP CONSTRAINT IF EXISTS {constraints['drop_pk']['name']} CASCADE"
statements['drop'].append(statement)
return statements | 1282eefa92682fb8214797e5b0fff4618565f3f8 | 385,272 |
def _to_iloc(data):
"""Get indexible attribute of array, so we can perform axis wise operations."""
return getattr(data, 'iloc', data) | 137dc760dc1e33e319017a0b8356a5e12add6b61 | 677,584 |
def is_harmony_cli(args):
"""
Returns True if the passed parsed CLI arguments constitute a Harmony CLI invocation, False otherwise
Parameters
----------
args : Namespace
Argument values parsed from the command line, presumably via ArgumentParser.parse_args
Returns
-------
is_harmony_cli : bool
True if the provided arguments constitute a Harmony CLI invocation, False otherwise
"""
return args.harmony_action is not None | f285bd9e904680266a93e65f82497abba7aef5f0 | 137,630 |
def fix_pyext(mod_path):
"""
Fix a module filename path extension to always end with the
modules source file (i.e. strip compiled/optimized .pyc, .pyo
extension and replace it with .py).
"""
if mod_path[-4:] in [".pyo", "pyc"]:
mod_path = mod_path[:-1]
return mod_path | 56adb08f200a7e63c30080472fb30c7a681eee64 | 560,203 |
import socket
def verify_inet_protocol(inet_protocol):
"""Verify the ``INET_PROTOCOL`` from a proxy protocol line.
Args:
inet_protocol (bytes): The segment from the proxy protocol line.
Returns:
socket.AddressFamily: The address family enum associated with the
protocol.
Raises:
ValueError: If ``inet_protocol`` does not match ``TPC{4,6}``.
"""
if inet_protocol == b"TCP4":
return socket.AF_INET
if inet_protocol == b"TCP6":
return socket.AF_INET6
raise ValueError(f"Unhandled protocol type: {inet_protocol}") | d153127d0312732939141e8829c24712f76d6c2a | 532,360 |
def merge_outputs(*outputs):
"""
Merges model outputs for logging
Parameters
----------
outputs : tuple of dict
Outputs to be merged
Returns
-------
output : dict
Dictionary with a "metrics" key containing a dictionary with various metrics and
all other keys that are not "loss" (it is handled differently).
"""
ignore = ['loss'] # Keys to ignore
combine = ['metrics'] # Keys to combine
merge = {key: {} for key in combine}
for output in outputs:
# Iterate over all keys
for key, val in output.items():
# Combine these keys
if key in combine:
for sub_key, sub_val in output[key].items():
assert sub_key not in merge[key].keys(), \
'Combining duplicated key {} to {}'.format(sub_key, key)
merge[key][sub_key] = sub_val
# Ignore these keys
elif key not in ignore:
assert key not in merge.keys(), \
'Adding duplicated key {}'.format(key)
merge[key] = val
return merge | 6de690a5a8ec4f114d2fd3ab451879585b36376e | 520,849 |
def splitXY(dfXY):
"""
Takes a dataframe with all X (features) and Y (labels) information and
produces four different dataframes: nuclide concentrations only (with
input-related columns deleted) + 1 dataframe for each label column.
Parameters
----------
dfXY : dataframe with nuclide concentraations and 3 labels: reactor type,
enrichment, and burnup
Returns
-------
dfX : dataframe with only nuclide concentrations for each instance
r_dfY : dataframe with reactor type for each instance
e_dfY : dataframe with fuel enrichment for each instance
b_dfY : dataframe with fuel burnup for each instance
"""
x = len(dfXY.columns)-3
dfX = dfXY.iloc[:, 0:x]
r_dfY = dfXY.iloc[:, x]
e_dfY = dfXY.iloc[:, x+1]
b_dfY = dfXY.iloc[:, x+2]
return dfX, r_dfY, e_dfY, b_dfY | 8a2f0a331b8c8ece3f10c2e8e645f1c083125fb5 | 521,444 |
def _find_idx_without_numerical_difference(df, column1, column2, delta, idx=None, equal_nan=False):
"""
Returns indices which have bigger numerical difference than delta.
INPUT:
**df** (DataFrame)
**column1** (str) - name of first column within df to compare.
The values of df[column1] must be numericals.
**column2** (str) - name of second column within df to compare.
The values of df[column2] must be numericals.
**delta** (numerical) - value which defines whether indices are returned or not
OPTIONAL:
**idx** (iterable, None) - list of indices which should be considered only
**equal_nan** (bool, False) - if True, indices are included where at least value in
df[column1] or df[column2] is NaN
OUTPUT:
**index** (pandas.Index) - index within idx where df[column1] and df[column2] deviates by
at least delta or, if equal_na is True, one value is NaN
"""
idx = idx if idx is not None else df.index
idx_isnull = df.index[df[[column1, column2]].isnull().any(axis=1)]
idx_without_null = idx.difference(idx_isnull)
idx_no_delta = idx_without_null[(
df.loc[idx_without_null, column1] -
df.loc[idx_without_null, column2]).abs().values <= delta]
if equal_nan:
return idx.difference(idx_no_delta)
else:
return idx_without_null.difference(idx_no_delta) | 9ed9f34b1b8718ee213fd7b8832e5efe7365f116 | 683,342 |
def get_all_items(list_widget, as_string=True):
"""
Gets all the items in a listWidget as a list
:param list_widget: your QListWidget
:param as_string: <bool> if set to true, will return the text of the item. If set to false will return the actual QListWidgetItem
:return: items of your QListWidget
"""
items = []
if as_string is True:
for item in [list_widget.item(i).text() for i in
range(list_widget.count())]:
if item is not None:
items.append(item)
else:
for i in range(list_widget.count()):
items.append(list_widget.item(i))
return items | 1b3fe7c8660075d65c28ef8e235359e56d3b5e7d | 85,386 |
from typing import Dict
def add_prefix_to_param(prefix:str, param_grid:dict) -> Dict[str,list]:
"""
Create a param_grid for Pipeline from an "ordinary" param_grid.
:param prefix: name of the step
:param param_grid: ordinary grid_param
:return: modified dict
"""
return { "%s__%s" % (prefix,k): v for k,v in param_grid.items()} | 542057cb3fea7da078070860793930cae55a8e3e | 282,723 |
def get_abos_options(clang_version_info):
""" Get options to enable aggressive-binary-operation-simplification.
Returns list of options which enables
aggressive-binary-operation-simplification option (which is needed for the
iterator checker) if the Clang version is greater then 8.
Otherwise returns an empty list.
"""
if clang_version_info and clang_version_info.major_version >= 8:
return ['-Xclang',
'-analyzer-config',
'-Xclang',
'aggressive-binary-operation-simplification=true']
return [] | 3e7b78d1a085d8540accf96033e1039c9d329bea | 445,207 |
import requests
def get_remaining_rate_limit(api_key: str) -> int:
"""
Returns your remaining rate limit by
making a request to
:ref:`Apod <extensions/apod:Apod>` and
getting the header ``X-RateLimit-Remaining``,
that's returned on every API response.
For example, if you are using an
API key different from ``DEMO_KEY``,
you have a default hourly rate
limit of 1.000 requests, acording
to the `Portal <https://api.nasa.gov/>`_.
So, if you make 2 requests, your
remaining rate limit will be equal to
998.
**Example**
.. code-block:: python3
from nasawrapper.utils import get_remaining_rate_limit
remaining = get_remaining_rate_limit("DEMO_KEY")
print(reamining)
"""
headers = requests.get(f"https://api.nasa.gov/planetary/apod?api_key={api_key}").headers
return int(headers["X-RateLimit-Remaining"]) | 39a1b49ca9148a655cc90e25f8a1b8027f4821b5 | 34,318 |
def is_upsidedown_wrong(name):
"""Tell if the string would get a different meaning if written upside down"""
chars = set(name)
mistakable = set("69NZMWpbqd")
rotatable = set("80oOxXIl").union(mistakable)
return chars.issubset(rotatable) and not chars.isdisjoint(mistakable) | bc281a66f004e29acb972007eb65d6e131eabb8e | 478,942 |
def get_related_field_model(field):
"""Returns a field's related field model's app and name or None"""
if field.rel:
model = field.rel.to
return {
'app': model._meta.app_label,
'model': model._meta.object_name
}
else:
return None | 23a872f83ec0be1778afae2fda6df5cabd2e0f24 | 448,545 |
def num2columnletters(num, power=0):
"""
Takes a column number and converts it to the equivalent excel column letters
:param int num: column number
:param int power: internal power multiplier for recursion
:return str: excel column letters
"""
if num <= 26:
return chr(num % 27 + 64)
elif num > 26**(power+1):
power += 1
next_num = num-26**power
# this will return the higher (right most char) first
char = num2columnletters(num=next_num, power=power)
# then call func again on reminder
remainder = next_num - (int(next_num / (26**power)) * (26 ** power))
# for roll over case a 26/26 is supposed to be a Z, not a 0 that is not a char
char_num = int(remainder/(26**(power-1)))+1 if remainder != 0 else 27
char_next = chr(char_num + 64) if power-1 > 0 else chr(char_num-1 + 64)
char_all = char + char_next
else:
# +1 because this else loop already rolled over the <= 26 value and ex: 27/26 should give you B
# but if we did not +1 it would give you an A, however the back end must be conditioned for Z
# for a condition when all chars roll over ex: 702-26=676 676/26=26 should still give a Z
# but a +1 would throw it over
char_num = int(num / (26**power)) + 1 if int(num / (26**power)) != 26 else 26
return chr(char_num + 64)
return char_all | 39096fc9df99baba6ddbc1a7a48a7eb0068ddf44 | 632,398 |
def isin(elt, seq):
"""Like (elt in seq), but compares with is, not ==.
>>> e = []; isin(e, [1, e, 3])
True
>>> isin(e, [1, [], 3])
False
"""
for x in seq:
if elt is x: return True
return False | 19d451399bcaf38d1db5ac2e9440d678bae8255b | 230,791 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.