content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def kw_to_dict(**kwargs):
"""Returns a dictionary based on keyword arguments.
Args:
**kwargs: Arguments to convert.
Returns:
dict: A formatted dictionary.
Example:
``kw_to_dict(foo='Hello', bar='World')`` returns ``{'foo': 'Hello', 'bar': 'World'}``
"""
return {k: v for k, v in kwargs.items()} | f155671462729ed1118cf50b3d999f1f67f97647 | 521,163 |
def add_key_values(d1):
"""Add key in dictionary to values if does not exist."""
for key, values in d1.items():
if key not in values:
values.append(key)
return d1 | d2abade98193ea4979fadc666663df4a2960164d | 536,389 |
def trickySplit(line, delim):
"""trickySplit(line, delim) - split line by delimiter delim, but ignoring delimiters found inside (), [], {}, ''' and "".
eg: trickySplit("email(root,'hi there'),system('echo hi, mum')", ',')
would return: ["email(root,'hi there'", "system('echo hi, mum')"]
"""
parenCnt = 0 # ()
curlyCnt = 0 # {}
squareCnt = 0 # []
doubleqCnt = 0 # ""
quoteCnt = 0 # ''
splitList = [] # split strings
current = '' # current split string'
for c in line:
if c == '(':
parenCnt += 1
elif c == ')':
parenCnt -= 1
elif c == '{':
curlyCnt += 1
elif c == '}':
curlyCnt -= 1
elif c == '[':
squareCnt += 1
elif c == ']':
squareCnt -= 1
elif c == '"':
doubleqCnt = 1 - doubleqCnt
elif c == '\'':
quoteCnt = 1 - quoteCnt
elif c == delim:
if parenCnt == 0 and curlyCnt == 0 and squareCnt == 0 and doubleqCnt == 0 and quoteCnt == 0:
splitList.append(current)
current = ''
continue
current += c
if len(current) > 0:
splitList.append(current)
return splitList | 40bf31f7bff2be5b206a869d89c045a011093a9d | 236,908 |
def dummy_get_request(dummy_request):
"""Make a dummy GET request to test views."""
dummy_request.method = 'GET'
dummy_request.matchdict = {'rover_name': 'Curiosity', 'sol': 1}
# dummy_request.POST = multidict.NoVars()
return dummy_request | e22f5df151271e689ed19cf578b6bf99c494435e | 181,646 |
def youtube_mocker(mocker):
"""Return a mock youtube api client"""
return mocker.patch("videos.youtube.build") | d00a93fdd722ec3a75d5979d1aae7fecf29ea38d | 650,952 |
def delete_duplicates(ls):
"""
Question 6.6: Delete duplicates from sorted array,
return number of elements remainint
"""
if not len(ls):
return 0
write_idx = 0
for idx, elt in enumerate(ls):
if idx > write_idx and elt != ls[write_idx]:
write_idx += 1
ls[write_idx] = elt
return write_idx + 1 | 1b9042c9bf5635168fce52bffeae6304510daacc | 105,110 |
import socket
def is_server_running(host, port):
"""
Checks whether a server is currently listening on the specified
host and port.
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
return s.connect_ex((host, port)) == 0
finally:
s.close() | 7087d4fd9f85db0a2411b8b2bc8cfd5946fe409d | 352,980 |
from typing import Dict
from typing import List
def transform_properties(props=None):
"""
Transform properties
Transform a dict of {label_id : {key: value, key2: value2}}
with a key for every LABEL
into a dict of a key for every VALUE, with a list of values for each
{
"index": [1381342, 1381343...]
"omero:roiId": [1381342, 1381343...],
"omero:shapeId": [1682567, 1682567...]
}
"""
if props is None:
return None
properties: Dict[str, List] = {}
# First, create lists for all existing keys...
for label_id, props_dict in props.items():
for key in props_dict.keys():
properties[key] = []
keys = list(properties.keys())
properties["index"] = []
for label_id, props_dict in props.items():
properties["index"].append(label_id)
# ...in case some objects don't have all the keys
for key in keys:
properties[key].append(props_dict.get(key, None))
return properties | acdcfe6189822e2aa3f58e25da8cd367d290a0b7 | 328,800 |
def calc_life(trajs, ub=5, lb=-5):
"""
Identifies transition paths and returns lifetimes of states.
Parameters
----------
trajs : list of lists
Set of trajectories.
ub, lb : float
Cutoff value for upper and lower states.
"""
try:
assert ub > lb
except AssertionError:
print (" Upper bound is lower than lower bound")
return
lifeA = []
lifeB = []
time = 0
for tr in trajs:
state = None
ntp = 0
time_prev = 0
for t,q in enumerate(tr):
# assign state when beyond boundaries
if q > ub: # state "B"
if state == 'A':
ntp +=1
lifeA.append(time - time_prev)
time_prev = time
state = 'B'
elif q < lb: # state "A"
if state == 'B':
ntp +=1
lifeB.append(time - time_prev)
time_prev = time
state = 'A'
else:
if state == 'A' and q < ub:
time = t
elif state == 'B' and q > lb:
time = t
return lifeA, lifeB | 4feeeb9d8dd0e9457db4e8852c38f4de770289b7 | 652,898 |
def is_enabled(config):
"""Check if the ddtrace plugin is enabled."""
return config.getoption("ddtrace") or config.getini("ddtrace") | 2caff355db6642cbd58e4c38589a9b4872ffbe5f | 296,783 |
def get_params(parameters, deep=True, keepcontainers=True):
"""Depth first redundantly flatten a nested dictionary.
Arguments
---------
parameters : dict
The dictionary to traverse and linearize.
deep : bool, default=True
Whether to perform depth first traversal of nested dictionaries
or not.
keepcontainers : bool, default=True
Whether to keep return the nested containers (dicts) or not.
Effective only if `deep` is `True`.
Details
-------
Adapted from scikit's BaseEstimator. Does not handle recursive
dictionaries.
"""
out = {}
for key in parameters:
value = parameters[key]
if deep and isinstance(value, dict):
nested = get_params(value, deep=True, keepcontainers=keepcontainers)
out.update((key + '__' + k, val) for k, val in nested.items())
if not keepcontainers:
continue
out[key] = value
return out | b3c4d75e0541a4d16615ce81360ff69a86f91ae2 | 280,685 |
import unicodedata
def unicode_to_ascii(sentence):
"""
Converts the unicode file to ascii
:param sentence: str/unicode
:return: str
"""
return ''.join(c for c in unicodedata.normalize('NFD', sentence)
if unicodedata.category(c) != 'Mn') | 012c405d9db27e29f92c6260e277b9905ee18201 | 621,768 |
from typing import List
import re
def match_regex_list(text: str, patterns: List[str]) -> bool:
"""Checks if text matches any regex in list
The match is performed in a case insensitive way
Args:
text (str): input text
regex (List[str]): list of regex
Returns:
bool: match of any regex
"""
if len(patterns) == 0:
return True
for pattern in patterns:
res = re.finditer(pattern, text, re.IGNORECASE)
try:
next(res)
return True
except StopIteration:
pass
return False | 9e7699eb06ac3e592cde2eef3c959812a7cfd9f7 | 555,509 |
def collide(ax0, ay0, ax1, ay1, bx0, by0, bx1=None, by1=None):
"""Return True if the two rectangles intersect."""
if bx1 is None:
bx1 = bx0
if by1 is None:
by1 = by0
return not (ax1 < bx0 or ay1 < by0 or ax0 > bx1 or ay0 > by1) | 9456de8eaebb7199d1e1436d2e4375e0c4e6cc98 | 263,906 |
def _VersionList(release):
"""Parse a version string into a list of ints.
Args:
release: The 'release' version, e.g. '1.2.4'.
(Due to YAML parsing this may also be an int or float.)
Returns:
A list of ints corresponding to the parts of the version string
between periods. Example:
'1.2.4' -> [1, 2, 4]
'1.2.3.4' -> [1, 2, 3, 4]
Raises:
ValueError if not all the parts are valid integers.
"""
return [int(part) for part in str(release).split('.')] | 8152af83e4ad5ff2b56fc4505b4fe50f78d42336 | 201,366 |
def _ecdf_y(data, complementary=False):
"""Give y-values of an ECDF for an unsorted column in a data frame.
Parameters
----------
data : Pandas Series
Series (or column of a DataFrame) from which to generate ECDF
values
complementary : bool, default False
If True, give the ECCDF values.
Returns
-------
output : Pandas Series
Corresponding y-values for an ECDF when plotted with dots.
Notes
-----
This only works for plotting an ECDF with points, not for staircase
ECDFs
"""
if complementary:
return 1 - data.rank(method="first") / len(data) + 1 / len(data)
else:
return data.rank(method="first") / len(data) | 948d6db91d56c0f5edbaced6caa4c7927c9d90a7 | 678,889 |
import random
def choose_from_hist( hist ):
"""Return a random value from the histogram
chosen with probability in proportion to frequency.
"""
t = []
for word, freq in hist.iteritems():
t.extend( [word] * freq )
return random.choice( t ) | 9e9671ba5692902906ec6fe1db3337d517bbb66b | 198,918 |
import itertools
def Group(items, key_func):
"""Groups items based on their key.
Note that this only associate subsequences of consecutive items of the same
key. If a caller requires that all items bearing the same key be grouped
together, it is their responsibility to sort the input by key prior to
calling this function.
Args:
items: Iterable of values.
key_func: A function that returns the key of each item.
Returns:
A list of pairs (key, vals), where vals is a list of consecutive items
whose key_func() equals key.
"""
return [(k, list(v)) for k, v in itertools.groupby(items, key_func)] | 515453e51735b6a5eae4742f81c796443ec1c819 | 505,475 |
def turn_from_decimal_to_binary(decimal_adress: str) -> str:
"""
Return binary address from decimal address.
>>> turn_from_decimal_to_binary("192.168.1.15")
'11000000.10101000.00000001.00001111'
"""
numbers = decimal_adress.split('.')
bin_adress = ''
for number in numbers:
bin_num = bin(int(number))
bin_num = bin_num[2:]
bin_num = bin_num.zfill(8)
bin_adress += bin_num
bin_adress += '.'
return bin_adress[:-1] | 46e48db5ff71d762e8c89585c20ee5e06928cbe2 | 289,989 |
from datetime import datetime
from typing import List
def parse_text(now: datetime, key: str, group: List[str]) -> str:
"""
Handles the Text presentation
https://docs.oracle.com/javase/6/docs/api/java/text/SimpleDateFormat.html#text
"""
# Era designator
if key == "G":
# no length implications
return "AD"
# Day in week
if key == "E":
# full form
if len(group) >= 4:
return now.strftime("%A")
# abbreviated form
return now.strftime("%a")
# Am/pm marker
if key == "a":
# no length implications
return now.strftime("%p")
raise Exception(f"Unknown character {key} for parse_text") | 575862948d6442a73c8d270e4caed2c39ad50ddb | 639,150 |
def _get_error_threshold_function(error_threshold, simulator):
""" Returns a boolean-valued function that indicates whether a particular (x,y) pair is
outside a specified threshold
:param error_threshold: can be None or a string; if this is a string, the first character MAY be '+' or '-', to
indicate whether values above or below the threshold (respectively) are considered "bad". If nothing is
specified, this defaults to '+'. The remainder of the string MUST either be a constant value convertible to a
float, or the name of a piecewise constant function in the simulator. In the former case, any value above/below
the constant is considered "bad", and in the latter, any value above/below the value of the piecewise constant
function at that point in time is considered "bad".
Examples:
* '100', '+100' => values greater than 100 exceed the threshold
* '-0' => values below 0 exceed the threshold
* 'cost_per_hour' => values above the simulator's cost_per_hour function exceed the threshold
* '-cpus' => values below the simulator's cpu function exceed the threshold
:param simulator: a Simulator object
:returns: a boolean-valued threshold function
"""
if not error_threshold:
return lambda x, y: False
reverse = False
if error_threshold[0] in ("-", "+"):
reverse = error_threshold[0] == "-"
error_threshold = error_threshold[1:]
try:
constant = float(error_threshold)
return lambda x, y: ((y > constant) if not reverse else (y < constant))
except ValueError:
piecewise = getattr(simulator, error_threshold)
return lambda x, y: ((y > piecewise.call(x)) if not reverse else (y < piecewise.call(x))) | 7dc15face5fd9f565494d194ec9fec5417e11a49 | 640,912 |
import struct
def encode_double(value):
"""Encode a double type."""
return bytearray(struct.pack("!d", value)) | 6e9a57a8a9da04267bb44faaccea7a17e93a5fd3 | 142,322 |
def calculate_efficiency_ratio(kills, deaths, assists):
"""
Bungies efficiency formula:
efficiency = kills + assists / deaths
Ensure the MLG's have some deaths.
"""
numerator = kills + assists
if deaths > 0:
efficiency = numerator / deaths
else:
efficiency = numerator
return round(efficiency, 2) | 26077680e70fc5b023326e9fd669ccb2d3c5da44 | 181,909 |
def _value_to_numeric(value):
"""Convert a value string to a number.
Parameters
----------
value : str
Parameter value as a string.
Raises
------
ValueError
If the string cannot be converted.
Returns
-------
number
The value converted to ``int`` or ``float``.
"""
try:
return int(value)
except ValueError:
return float(value) | 0b067e9a20a0d0c58b23cff42dbd285437924945 | 396,013 |
import tokenize
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with tokenize.open(filename) as stream:
return stream.read() | 5fef8eb1c1d131433ba403942471b0cbb71604e6 | 112,176 |
import math
def factors(n):
""" Returns the list of n's factors
--param
n : int
--return
list
"""
if n < 1:
return []
elif n in {1,2,3}:
temp = set()
temp.add(1)
temp.add(n)
return list(temp)
else:
temp = set()
temp.add(1)
temp.add(n)
for i in range(2,math.floor(math.sqrt(n))+1):
if n % i == 0:
temp.add(i)
temp.add(n//i)
# end of for
return list(temp) | 47d8fce4c7d736badb769c94455fe5df8e021bf3 | 665,999 |
def splitImageFrames(image_in):
"""Splits an image (or multiple images as a list) to its different frames, and returns a list containing the images.
"""
# Determine if images in are a list or a single image
if isinstance(image_in, list):
full_images = []
# Iterate through images, creating a sublist of frames for each image
for image in image_in:
split_image = [None]*image.n_frames
# Iterate through frames and copy each frame independently, converting to RGB
for i in range(image.n_frames):
image.seek(i)
split_image[i] = image.copy()
full_images.append(split_image)
return(full_images)
else:
split_image = [None]*image_in.n_frames
# Iterate through frames and copy each frame independently, converting to RGB
for i in range(image_in.n_frames):
image_in.seek(i)
split_image[i] = image_in.copy()
return(split_image) | dcc8dee31c89e2f1f486ca84014c72aee36e2ac3 | 542,045 |
from typing import Callable
def custom_reducer(separator: str) -> Callable:
"""Custom reducer for :py:meth:`flatten_dict.flatten_dict.flatten()` accepting a separator."""
def _inner_custom_reducer(key1, key2):
if key1 is None:
return key2
return f"{key1}{separator}{key2}"
return _inner_custom_reducer | 16455f35b1d58a53db98cb75f16020a69ed10197 | 293,860 |
def _any_trits(left: int, right: int) -> int:
"""
Adds two individual trits together and returns a single trit
indicating whether the result is positive or negative.
"""
res = left + right
return (res > 0) - (res < 0) | dbf94b4a650cb06eb851c26b25f0e114d93b3868 | 623,318 |
import json
def load_json(path: str):
"""Load json file from given path and return data"""
with open(path) as f:
data = json.load(f)
return data | d165d087c78a0ba88d318a6dbe8b2ac8f9a8c4b5 | 704,279 |
from pathlib import Path
import gzip
import json
def _read_json_from_gz(compressed_dir_path: Path) -> dict:
"""
Opens a gzip file, reads its content (JSON expected),
and returns a dictionary.
Parameters
----------
compressed_dir_path: Path
Path to the ``.gz`` file to read.
Returns
-------
dict
The information contained in the file,
converted from plain-text JSON.
"""
if not compressed_dir_path.is_file():
raise FileNotFoundError(f"Couldn't find file {compressed_dir_path!s}")
# Read content
with gzip.open(compressed_dir_path, mode='rt') as gz:
content = gz.read()
details_json = json.JSONDecoder().decode(content)
return details_json | 3323e0b84f66070885afb79844e8079655cfac61 | 361,851 |
def first_sample_of_frame(frame,
frame_shift_in_samples,
window_size_in_samples):
"""
Returns the sample-index of the first sample of the frame with index
'frame'. Caution: this may be negative; we treat out-of-range samples
as zero.
Analogous to with kaldi10's FirstSampleOfFrame in feat/feature-window.h
Args:
frame (int): The frame index >= 0.
frame_shift_in_samples (int) The frame shift in samples
window_size_in_samples (int) The window size in samples
Returns:
int: The first sample of this frame (caution: may be negative).
"""
midpoint_of_frame = frame_shift_in_samples * frame + (frame_shift_in_samples // 2)
beginning_of_frame = midpoint_of_frame - window_size_in_samples // 2
assert isinstance(beginning_of_frame, int) # indirectly check inputs were
# int.
return beginning_of_frame | 8d7063a8491e294debd6ba8cdfdf090f7bb08942 | 93,430 |
import random
def generate_sequence(length, alphabet):
"""
Generates a random sequence of a given length, given an alphabet of elements.
This is useful for benchmarking function performance, and creating examples in the docs.
Example
--------
>>> ps.generate_sequence(12, [1,2,3])
[2, 3, 3, 3, 2, 2, 2, 1, 3, 3, 2, 2]
"""
return [random.choice(alphabet) for x in range(length)] | 74c4ea9fe979627ccc7474706dc188ab673ea1b5 | 565,024 |
def find_collection(client, dbid, id):
"""Find whether or not a CosmosDB collection exists.
Args:
client (obj): A pydocumentdb client object.
dbid (str): Database ID.
id (str): Collection ID.
Returns:
bool: True if the collection exists, False otherwise.
"""
database_link = "dbs/" + dbid
collections = list(
client.QueryCollections(
database_link,
{
"query": "SELECT * FROM r WHERE r.id=@id",
"parameters": [{"name": "@id", "value": id}],
},
)
)
if len(collections) > 0:
return True
else:
return False | 96f6bb66797b40daf137ed2d7920b6cd30697aa0 | 672,332 |
def arg_k_max(lst, k):
"""Renvoie les indices des k plus grands éléments de `lst`"""
res = []
for n, val in enumerate(lst):
if len(res) < k:
res.append((n, val))
res.sort(reverse=True, key=lambda x: x[1])
elif res[-1][1] < val:
res.pop()
res.append((n, val))
res.sort(reverse=True, key=lambda x: x[1])
return [i for i, _ in res] | c5020417a48685084117099173d6328938f6e22c | 579,544 |
def _getUniqueHistogram(plt):
"""Return the histogram if there is a single histogram and no curve in
the plot. In all other cases, return None.
:param plt: :class:`.PlotWidget` instance on which to operate
:return: histogram or None
"""
histograms = plt._getItems(kind='histogram')
if len(histograms) != 1:
return None
if plt.getAllCurves(just_legend=True):
return None
return histograms[0] | a5b5eb5dc5112fc4a0fc5ee9eac597269623e469 | 349,866 |
def without_piecewise(expr):
"""
Removes the Piecewise from an expression by assuming that all restricting assumptions are false.
"""
if not expr.args:
return expr
if expr.is_Piecewise:
return without_piecewise(expr.args[-1].expr)
return expr.func(*[without_piecewise(a) for a in expr.args]) | 7c202c95b05da44c7523e235602438514cbe9adc | 244,868 |
def wbgt(twb, tg, tdb=None, with_solar_load=False, **kwargs):
"""
Calculates the Wet Bulb Globe Temperature (WBGT) index calculated in compliance with
the ISO 7243 [11]_. The WBGT is a heat stress index that measures the thermal
environment to which a person is exposed. In most situations, this index is simple
to calculate. It should be used as a screening tool to determine whether or not
heat stress is present. The PHS model allows a more accurate estimation of stress.
PHS can be calculated using the function :py:meth:`pythermalcomfort.models.phs`.
The WBGT determines the impact of heat on a person throughout the course of a working
day (up to 8 h). It does not apply to very brief heat exposures. It pertains to
the evaluation of male and female people who are fit for work in both indoor
and outdoor occupational environments, as well as other sorts of surroundings [11]_.
The WBGT is defined as a function of only twb and tg if the person is not exposed to
direct radiant heat from the sun. When a person is exposed to direct radiant heat,
tdb must also be specified.
Parameters
----------
twb : float,
natural (no forced air flow) wet bulb temperature, [°C]
tg : float
globe temperature, [°C]
tdb : float
dry bulb air temperature, [°C]. This value is needed as input if the person is
exposed to direct solar radiation
with_solar_load: bool
True if the globe sensor is exposed to direct solar radiation
Other Parameters
----------------
round: boolean, default True
if True rounds output value, if False it does not round it
Returns
-------
wbgt : float
Wet Bulb Globe Temperature Index, [°C]
Examples
--------
.. code-block:: python
>>> from pythermalcomfort.models import wbgt
>>> wbgt(twb=25, tg=32)
27.1
>>> # if the persion is exposed to direct solar radiation
>>> wbgt(twb=25, tg=32, tdb=20, with_solar_load=True)
25.9
"""
default_kwargs = {
"round": True,
}
kwargs = {**default_kwargs, **kwargs}
if with_solar_load:
if tdb:
t_wbg = 0.7 * twb + 0.2 * tg + 0.1 * tdb
else:
raise ValueError("Please enter the dry bulb air temperature")
else:
t_wbg = 0.7 * twb + 0.3 * tg
if kwargs["round"]:
return round(t_wbg, 1)
else:
return t_wbg | a0ae327932bc8553472e3628c51e21e7df81a500 | 596,163 |
def price_index(price_of_product_x, price_of_product_y):
"""Return the price index of product X over product Y.
Args:
price_of_product_x (float): Price of product X.
price_of_product_y (float): Price of product Y.
Returns:
price_index (float): Price of X / Price of Y
"""
return (price_of_product_x / price_of_product_y) * 100 | e3f6eeec3395cf039e037eca97bca5e1b7eb55ca | 37,281 |
import random
def get_framesets(cls, maximum=10, pattern=None):
""" Gather FrameSet objects from either Frames or Bars.
If `pattern` is set to a compiled regex pattern,
return all FrameSets matching the pattern.
Otherwise, return up to `maximum` random FrameSets.
"""
frametypes = set()
framenames = cls.names()
if pattern is None:
while len(frametypes) < maximum:
frametypes.add(cls.get_by_name(random.choice(framenames)))
else:
frametypes.update(
cls.get_by_name(s)
for s in framenames
if pattern.search(s) is not None
)
return frametypes | f6f094b67243685f352ea624e2bf45b62693e38d | 26,375 |
def combination(n: int, r: int) -> int:
"""
Returns the combination i.e nCr of given
n and r
>>> combination(5,3)
10
"""
# If either n or r is 0, nCr = 1
if n == 0 or r == 0:
return 1
# Initializing variables
nFac = 1
rFac = 1
nrFac = 1
# A single for loop to compute all three required values
for i in range(1, n+1):
nFac *= i
if i == r:
rFac = nFac
if i == (n-r):
nrFac = nFac
return nFac//(rFac * nrFac) | b4ad37d2189832d96bb0c5ad0e8033f05151298a | 425,553 |
def is_assumed_length(list, assumed_length: int) -> bool:
"""
Return Ture if the list length is the assumed length.
Args:
list (list or tuple): a list to be checked.
assumed_length (int): assumed length of the list.
Returns:
bool: True if the list length is the assumed length.
"""
return len(list) == assumed_length | cf713c0c852fafb94be43965853ec69b98bc53b5 | 248,823 |
from typing import Sequence
from typing import Any
from typing import Optional
def binary_search_rec(seq: Sequence, value: Any) -> Optional[int]:
"""
Recursive binary search.
Notes
-----
Binary search works only on sorted sequences.
Parameters
----------
seq : Sequence
where to search.
value : Any
what to search.
Returns
-------
Optional[int]
the index of value if it is in sequence or None.
"""
def rbins(seq: Sequence, val: Any, low: int, high: int) -> Optional[int]:
"""Core function of recursive binary search."""
while low <= high:
# middle element and its index
midi = (low + high) // 2
mide = seq[midi]
if mide == val:
return midi
if mide < val:
return rbins(seq, val, midi + 1, high)
return rbins(seq, val, low, midi - 1)
return None
return rbins(seq, value, 0, len(seq) - 1) | afaf236136b59565dec9b72b9ca8f63b48ae7379 | 321,845 |
def get_temp_disk_for_node_agent(node_agent: str) -> str:
"""Get temp disk location for node agent
:param node_agent: node agent
:return: temp disk location
"""
if node_agent.startswith('batch.node.unbuntu'):
return '/mnt'
elif node_agent.startswith('batch.node.windows'):
return 'D:\\batch'
else:
return '/mnt/resource' | 4caaa3995363f585740e7b3f2a1c811d7a916ab2 | 55,921 |
from typing import Any
import math
def is_integer(value: Any) -> bool:
""" Returns True if the input consists soley of digits and represents an
integer rather than character data or a float.
'3' is True
'-3' is True
3 is True
-3 is True
3.3 is False
'33.22' is False
'4,333' is False
'$3' is False
'' is False
'b' is False
None is False
Test coverage:
- complete, via test harness
"""
try:
i = float(value)
fract, dummy = math.modf(i)
if fract > 0:
return False
else:
return True
except ValueError:
return False
except TypeError:
return False | 34194c25be1edab5f21235d0b853665d50e1d7b4 | 598,228 |
def elem_match(values, filter_expr):
"""
Element match filter function
:param values: list - values
:param filter_expr: lambda function
:return: bool
"""
for val in values:
if filter_expr(val):
return True
return False | 78d3305f4d8d2be3e55925f74ff2629e16dfcd8a | 168,514 |
def parse_package_status(release, package, status_text, filepath):
"""
parse ubuntu package status string format:
<status code> (<version/notes>)
:return: dict where
'status' : '<not-applicable | unknown | vulnerable | fixed>',
'fix-version' : '<version with issue fixed, if applicable>'
"""
# break out status code and detail
status_sections = status_text.strip().split(' ', 1)
code = status_sections[0].strip().lower()
detail = status_sections[1].strip('()') if len(status_sections) > 1 else None
status = 'unknown'
fix_version = None
if code == 'dne':
status = 'not-applicable'
elif code in ['ignored', 'pending', 'deferred', 'needed', 'needs-triage']:
status = 'vulnerable'
elif code == 'not-affected':
status = 'not-vulnerable'
elif code in ['released', 'released-esm']:
# if there isn't a release version, then just mark
# as vulnerable to test for package existence
if not detail:
status = 'vulnerable'
else:
status = 'fixed'
fix_version = detail
else:
print('Unsupported status "{0}" in {1}_{2} in "{3}". Setting to "unknown".'
.format(code, release, package, filepath))
result = {'status': status}
if fix_version is not None:
result['fix-version'] = fix_version
return result | a079fadfa1e0df1c5dcb849095bb80feebf4329c | 520,294 |
def extract_ini_from_doc(doc):
"""Extracts INI from doc strings."""
return doc.rsplit("INI")[-1] | 08e65d4dc169c9fcfb0e76016aa3fc5d103560f9 | 365,317 |
def isAESround(rnd, aes_rounds):
"""
Return True if rnd is an AES round.
"""
return rnd == 0 or (((rnd + 1) % (aes_rounds + 1)) != 0) | ad8591053432fa45c630a18378c7a985535e8ec8 | 413,535 |
def days_of_year(year):
"""
通过判断闰年,获取年份year的总天数
:param year: 年份, int
:return:days_sum, 年份year的总天数, 366 or 365
"""
year = int(year)
assert isinstance(year, int), "Please enter integer, for example: 2018"
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
days_sum = 366
else:
days_sum = 365
return days_sum | 59b6b8b17483c8ae914a02b1ed89ff66f684e071 | 363,565 |
import re
def toSentenceCase(text):
"""
Converts the given text to sentence case.
Args:
text (string): Text to convert to sentence case.
Returns:
(string): Sentence case version of given text.
"""
return re.sub(r"(?<=\w)([A-Z])", r" \1", text).title() | e1c608befc50a9896aa67dba38b8aef8924540d0 | 684,451 |
from pathlib import Path
def make_absolute(path: Path) -> Path:
"""Make absolute path out of a potentially relative one."""
return path if path.is_absolute() else Path.cwd() / path | 29edf8f1c45d4ea61f8700dcc6bd14b3adde92b3 | 212,944 |
def replace_repeated_characters(text):
"""Replaces every 3+ repetition of a character with a 2-repetition."""
if not text:
return text
res = text[0]
c_prev = text[0]
n_reps = 0
for c in text[1:]:
if c == c_prev:
n_reps += 1
if n_reps < 2:
res += c
else:
n_reps = 0
res += c
c_prev = c
return res | 15d11aa94d6da43f1d3f17c7077cda883b788780 | 63,291 |
def getKey(spc):
"""
Returns a string of the species that can serve as a key in a dictionary.
"""
return spc.label | f67bd95004f68f435d5b5032c7e381edde930d6a | 680,661 |
def is_string(data):
"""Utility method to see if data is a string."""
return isinstance(data, str) | 0b60e2e766875f80d59ef57dce52a450d6a787e8 | 661,830 |
def twoNumberSumBest(array, targetSum):
"""
This function takes an array of numbers and check if there exists any two numbers sum up to a target number.
This implementation has O(n) time complexity and O(n) space complexity.
args
---------
array: an array of numbers
targetSum: a target number
output
---------
new_array: an array of two numbers which sum up to target number
False: if there is no such two sumbers which sum up to target number in the input array
"""
new_arr = {}
for num in array:
targetNum = targetSum - num
if(targetNum in new_arr):
return [num, targetNum]
else:
new_arr[num] = True
return False | 1b4b710cbda42f8134017d826bd91e59fc194407 | 390,302 |
def igb_is_trusted(request):
"""
Checks the headers for IGB trust.
"""
if request.META['HTTP_EVE_TRUSTED'] == 'Yes':
return True
return False | 7f21af37c5a958ff538cd09348497770d9c5281a | 374,193 |
def compute_pwp(clay_val, oc_val, sand_val):
"""
Calculate permanent wilting point based on Clay, Organic Matter and sand value
:param clay_val: percentage of clay
:param oc_val: percentage of organic carbon
:param sand_val: percentage of sand
:return: a float value representing PWP
"""
# Step #1 - convert OC to OM
om_val = 2 * oc_val
om_val /= 2 #1000
clay_val /= 100
sand_val /= 100
# Step #2 - compute theta_1500_t
theta_1500_t = 0.031 - (0.024 * sand_val) + (0.487 * clay_val) + (0.006 * om_val) \
+ (0.005 * sand_val * om_val) - (0.013 * clay_val * om_val) + (0.068 * sand_val * clay_val)
# Step #3 - finally compute theta_1500
theta_1500 = (1.14 * theta_1500_t) - 0.02
return round(theta_1500, 2) | 5b1b7a60bad3fc2aa631c5721b34c7c127a37664 | 289,215 |
def underscorize(camelcased):
"""
Takes a CamelCase string and returns a separated_with_underscores
version of that name in all lower case. If the name is already all in
lower case and/or separated with underscores, then the returned string
is identical to the original. This function is used to take CStruct
class names and determine the names of their handler methods.
Here are some example conversions:
underscorize("SomeStruct") == "some_struct"
underscorize("SSNLookup") == "ssn_lookup"
underscorize("RS485Adaptor") == "rs485_adaptor"
underscorize("Rot13Encoded") == "rot13_encoded"
underscorize("RequestQ") == "request_q"
underscorize("John316") == "john316"
"""
underscored, prev = "", ""
for i,c in enumerate(camelcased):
if (prev and not c.islower() and c != "_"
and (prev.islower() and not c.isdigit()
or c.isupper() and camelcased[i+1:i+2].islower())):
underscored += "_"
underscored += c.lower()
prev = c
return underscored | b0f2622c105c09502aa984e15cf1b61ac12a608b | 15,112 |
from typing import Mapping
from typing import Sequence
from typing import Any
def chop_mapping(aliases2val: Mapping[Sequence[str], Any]) -> Mapping[str, Any]:
"""
Convert a dict whose keys are sequences of aliases into a dict
in which each alias is now a key by itself.
Example: {('asterisk', 'star'): '*'} -> {'asterisk': '*', 'star': '*'}
"""
key2val = {}
for aliases, val in aliases2val.items():
for alias in aliases:
key2val[alias] = val
return key2val | 17cf5cce009b114cfa50649ef97c6ac87675a493 | 173,708 |
def hgeo_to_hpot(h_geo, Re=6371000.0):
"""Calculates the geopotential altitude from the geometric altitude.
Parameters
----------
h_geo : float
Geometric altitude.
Re : float, optional
Earth's radius (default is 6371000.0).
Returns
-------
hpot : float
Geopotential altitude.
"""
return Re/(Re+h_geo)*h_geo | c0cadb50a913a3da57b8a2b12101b0868d16ea98 | 206,300 |
def check_user(user, event):
"""
Checks whether the ``ReactionAddEvent``, ``ReactionDeleteEvent``'s user is same as the given one.
Parameters
----------
user : ``ClientUserBase``
The user who should be matched.
event : ``ReactionAddEvent``, ``ReactionDeleteEvent``
The reaction addition or deletion event.
"""
return (event.user is user) | 37c5e280ced5a3470ac9691e6cb0dcadf3e3c550 | 20,756 |
def memoryToString(kmem, unit=None):
"""Returns an amount of memory in a human-friendly string."""
k = 1024
if unit == "K" or not unit and kmem < k:
return "%dK" % kmem
if unit == "M" or not unit and kmem < pow(k, 2):
return "%dM" % (kmem // k)
return "%.01fG" % (float(kmem) / pow(k, 2)) | a6a9a5a25cf9d291635ccc9850ee0e8b61ddf816 | 522,529 |
import torch
def adversarial_loss(prob, label):
"""Compute adversarial losses in GAN networks.
Note:
As a reminder, prob is either D(x) or D(G(z)), and label is
either 0 (fake) or 1 (real). With BCELoss(), this means that
- l(D(x), 1) = -log(D(x)): "Real" discriminator loss
- l(D(x), 0) = -log(1 - D(x)): "Fake" discriminator loss
- l(D(G(z)), 1) = -log(D(G(z))): Non-saturating generator loss
Args:
prob: Discriminator output, in interval [0, 1]
label: Data label, with fake = 0 and real = 1
"""
return torch.nn.BCEWithLogitsLoss()(prob, label) | 1eb7eb8e5d82a354e4272fa5de57d67f30ceca92 | 48,328 |
def get_start_token(request):
"""Returns the start token from the request.
"""
token = request.REQUEST['access_token']
# check if token has been seen before..
return token | f5991c2e6ce1f0121468987e178d09b53e04e2e3 | 209,329 |
from pathlib import Path
def get_logfile_path(proj_dir: Path) -> str:
"""
Helper function to fet full path of `pipeline.log` inside project
Args:
proj_dir: path to proj_dir
Returns:
path to `pipeline.log`
"""
log_file = (proj_dir / "logs" / "visualization" / "pipeline.log").absolute()
return str(log_file) | 751c9924fac1fddace2b9906b5e65d2a57faa163 | 462,009 |
def _shape_repr(shape):
"""Return a platform independent reprensentation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined | fc12559a86d1bdf56553208e3afbb623fea732c6 | 574,810 |
import hashlib
import pickle
def get_hash(u):
"""Returns the hash of a url_dict."""
to_hash = [u["req_url"], u["site"], u["real_site"], u["cookies"], u["req_method"], u["resp_code"], u["resp_version"]]
return hashlib.sha1(pickle.dumps(to_hash)).hexdigest() | a7c8af905e1b211991dc00007a8744d28aab2e63 | 390,913 |
def intercalate_sigma_into_peak_params(reduced_peak_params, sigmas):
"""
Insert the sigmas back into reduced_peak_params to form peak_params.
To describe N peaks,
Input
-----
reduced_peak_params : A vector of length = 2*N.
Every odd element describe an amplitude,
every even element describes the centroid of a peak.
sigmas : A vector of length = N.
Each element describes the sigma of a single peak.
Returns
-------
peak_params : A vector of length 3*N.
element 0, 3, 6, 9... describes the amplitude of peak 1, 2, 3, 4...
element 1, 4, 7, 10... describes the amplitude of peak 1, 2, 3, 4...
element 2, 5, 8, 11... describes the amplitude of peak 1, 2, 3, 4...
"""
assert len(reduced_peak_params)//2==len(sigmas), f"Expected to have Number of peaks = len(sigmas) = {len(sigmas)}. "
assert len(reduced_peak_params)%2==0, "Exactly two arguments must be provided to describe each peak: amplitude and centroid."
peak_params = []
for amplitude, centroid, sigma in zip(reduced_peak_params[::2], reduced_peak_params[1::2], sigmas):
peak_params.append(amplitude)
peak_params.append(centroid)
peak_params.append(sigma)
return peak_params | 388f277e3c0f258a0b18a42efb225a8657319531 | 668,001 |
def unfold(vc: set, folded_verts: list):
"""
Compute final vc given the partial vc and list of folded vertices (unreversed)
:param vc: partial vc as a set
:param folded_verts: list of folded vertices as 3-tuples
:return: final vertex cover as a set
"""
final_vc = set(vc)
for u, v, w in folded_verts[::-1]:
if u in final_vc:
final_vc.remove(u)
final_vc.add(v)
final_vc.add(w)
else:
final_vc.add(u)
return final_vc | 89fd6852548bb5e133b7a9d07accfdb51fbf74c5 | 669,780 |
def search_iterator(lines, start=0):
"""Search for iterators in the code
Args:
lines ([list]): A list with Javascript syntax as strings.
start ([int, optional]): The start line to start to search by iterators. Defaults to 0.
Returns:
[list]: A list of tuple (index, operator)
"""
save_lines = list()
iterator_ops = ["--", "++", "+=", "-="]
for iterator_op in iterator_ops:
for index in range(start, len(lines)):
if iterator_op in lines[index]:
save_lines.append((index, iterator_op))
return save_lines | 7c1594a5aa67a390f3fbc1bc6cc9b4a556564c8e | 80,511 |
def get_float_time(stamp):
"""
Convert a stamp style representation of time (sec, nsec) into a floating-
point representation.
"""
# Convert nsec from nsecs (int) to seconds (float)
float_nsecs = float(stamp.nsecs)*0.000000001
# Add the two components together
float_secs = float(stamp.secs) + float_nsecs
return float_secs | 15a3e94484f84a2a0404bd1aaab4f1bda8516dfd | 467,765 |
def order(replay1, replay2):
"""
An ordered tuple of the given replays. The first element is the earlier
replay, and the second element is the later replay.
Parameters
----------
replay1: Replay
The first replay to order.
replay2: Replay
The second replay to order.
Returns
-------
(Replay, Replay)
The first element is the earlier replay, and the second element is the
later replay.
"""
if not replay1.timestamp or not replay2.timestamp:
raise ValueError("Both replay1 and replay2 must provide a timestamp. "
"Replays without a timestamp cannot be ordered.")
# assume they're passed in order (earliest first); if not, switch them
order = (replay1, replay2)
if replay2.timestamp < replay1.timestamp:
order = tuple(reversed(order))
return order | 0f85dd8c56eae59caed8d5efff33f744a2f8dc17 | 643,555 |
def elementwise_greater_than(L, thresh):
"""Return a list with the same length as L, where the value at index i is
True if L[i] is greater than thresh, and False otherwise.
>>> elementwise_greater_than([1, 2, 3, 4], 2)
[False, False, True, True]
"""
is_greater = []
for i in L:
is_greater.append(i > thresh)
return is_greater | 3d733d0d9e38e01163d86c828104e699dee5e86e | 416,862 |
import math
def gompertz(current_age, live_to, female=True):
"""
This calculates the probability of survival to age 'live_to',
conditional on a life at age 'current_age'. female is boolean.
This comes from Blanchett's paper Simple Formulas for
Complex Withdrawal Strategies.
The parameters are based on the Annuity 2000 table, calculated
by Blancett in his paper, which means they are biased towards healthy
people with extra longevity.
>>> gompertz(65, 70)
0.9603531460340051
>>> gompertz(65, 70, False)
0.9332099343866791
"""
if female:
model_lifespan = 91
dispersion_coeff = 8.88
else:
model_lifespan = 88
dispersion_coeff = 10.65
q = math.exp(
math.exp((current_age - model_lifespan) / dispersion_coeff)
* (1 - math.exp((live_to - current_age) / dispersion_coeff))
)
return q | d8cf074aa3072ce9efe529e34aee2dab984b82db | 459,642 |
import operator
def _ext_proc_tpls_to_procs(xps):
"""List processors by each priority.
:param xps: A list of (file_extension, processor_cls)
:return: List of [processor_cls]
"""
return sorted((operator.itemgetter(1)(xp) for xp in xps),
key=operator.methodcaller("priority")) | ecc93f6bb6de68f72982329607e00f1f3f8a5274 | 644,783 |
import re
def _fasta_dict_from_file(file_object, header_search='specific'):
"""
Reads a file of FASTA entries and returns a dict for each entry.
It parsers the headers such that `>gi|12345|ref|NP_XXXXX| Description`
returns as `{gi: 12345, ref: 'NP_XXXXX', description : 'Description'}`
The sequence has all whitespace removed.
:header_search: One of `specific` or `generic`
if `specific` tries to parse the header
if `general` just returns each whitespace separated header
"""
current_id = dict()
current_seq = ''
current_header = None
pat = re.compile('>(\S+)\s*(.*)')
header_pat = re.compile(r'(\w+)\|(\w+\.?\w*)?')
def parse_header(header, pairs=True):
keys = header_pat.findall(header)
header_data = dict()
for key in keys:
header_data[key[0]] = key[1]
# gi -> ProteinGI #, ref -> NP_XXXX
return header_data
for line in file_object:
line = line.rstrip()
m = pat.search(line)
if m:
## new residue line matched, purge the existing one, if not the first
if current_id:
## remove all whitespace and save
current_seq = ''.join(current_seq.split())
current_id['sequence'] = current_seq
yield current_id
# current_id.clear() # this is actually bad for list comprehensions
# as it returns empty dictionaries
current_seq = ''
header = m.group(1)
if header_search == 'specific':
current_id = parse_header(header)
elif header_search == 'generic':
current_id = dict(header = header)
current_id['description'] = m.group(2)
else:
## python 2.6+ makes string concatenation amortized O(n)
## http://stackoverflow.com/a/4435752/1368079
current_seq += str(line)
## don't forget the last one
current_seq = ''.join(current_seq.split())
current_id['sequence'] = current_seq
yield current_id | 7746a6915e4b90bd05e94a5d3905b02e348169b4 | 326,744 |
def get_novelty_smi(gen_smis, ref_smis, return_novelty=False,):
"""
Get novelty generated SMILES which are not exist in training dataset
para gen_smis: generated SMILES, in list format
para ref_smis: training SMILES, in list format
para return_novelty: if return novelty MOLs, in canonical SMILES format, default False
"""
nov_smis = [i for i in gen_smis if i not in ref_smis]
if return_novelty:
return nov_smis
else:
return len(nov_smis)/len(gen_smis) | 3953ddeb8968d5562edaccf7a95aeb62866c3176 | 56,184 |
import re
def escape_filename_component(fragment):
"""
Escape a component of the filename as specified in PEP 427.
"""
return re.sub("[^\w\d.]+", "_", fragment, re.UNICODE) | cd18dccff7d5f9964e95ba03beb6c67eedfc888a | 233,650 |
def my_find(what, where):
"""
Funkce pro nalezení podřetězce.
Parametry:
----------
what - Řetězec, který se hledá
where - Prohledávaný řetězec
Vrací:
------
{ start: index začátku,
end: index konce } - Pokud je řetězec nalezen
False - Pokud není podřetězec nalezen
"""
# Pokud je hledaný řetězec delší, než prohledávaný řetězec
if len(what) > len(where):
# nemá cenu hledat
return False
# Projde prohledávaný řetězec znak po znaku.
for c in range(len(where)):
# Pokud se hledaný řetězec v prohledávaném nachází na
# pozici od aktuálního znaku dál
if where[c:c+len(what)] == what:
# Vrátí indexy začátku a konce
return {
"start": c,
"end": c+len(what) }
return False | 7a8cc284569c3fc0e67da25a6b97ef6fe9ee2a4d | 140,700 |
def read_file(file):
"""
Read text from file
:param file: input file path
:return: text - text content in input file
"""
with open(file, 'r') as f:
text = f.readlines()
return text | ff46ab4bec460f74626a15b610f1a594f6d1614b | 402,019 |
def get_module_author(mod):
"""Find and return the module author.
Args:
mod (module): Python module
Returns:
(str): Author string or None if not found
Raises:
nothing
"""
return getattr(mod, "__author__", None) | fc49e9d704bc2e9fea933192973f27a5263e8b12 | 225,165 |
def get_name(port):
"""Get the ports name."""
return port.attr["pkgname"] | 2eb2086af87950e63a8b947e85d9a2f692110757 | 462,899 |
def shorten(s):
"""Shorten the title if necessary."""
if len(s) < 240:
return s
else:
return s[:240] + "..." | 3e8d045ab0239beecd341f2ae39bc5685fb9d7ba | 413,155 |
def _pair(s: str, i: int, i1: int, j: int, j1: int) -> str:
"""Return a stack representation, a key for the NN maps
Args:
s: Sequence being folded
i: leftmost index
i1: index to right of i
j: rightmost index
j1: index to left of j
Returns:
str: string representation of the pair
"""
return (
(s[i] if i >= 0 else ".")
+ (s[i1] if i1 >= 0 else ".")
+ "/"
+ (s[j] if j >= 0 else ".")
+ (s[j1] if j1 >= 0 else ".")
) | b647c247c87834f86d48b4ad68a509be2869acaf | 618,685 |
def _span(xy):
"""Return the vertical span of the points.
"""
return xy[:, 1].max() - xy[:, 1].min() | b3201de078dbc60fbf2e8eaa7f56e708475eac61 | 546,394 |
def flatten_corner(corner_kick, game_id):
"""Flatten the schema of a corner kick."""
ck_id = corner_kick[0]
ck_data = corner_kick[1]
return {'game_id': game_id,
'ck_id': ck_id,
'time_of_event(min)': (ck_data['t']['m'] + (ck_data['t']['s'] / 60 )),
# 'assist': ck_data.get('assBy', None),
'player_id': float(ck_data['plyrId']),
'ck_coord_x1': ck_data['coord']['1']['x'],
'ck_coord_y1': ck_data['coord']['1']['y'],
'ck_coord_z1': ck_data['coord']['1']['z'],
'ck_coord_x2': ck_data['coord']['2']['x'],
'ck_coord_y2': ck_data['coord']['2']['y'],
'ck_coord_z2': ck_data['coord']['2']['z']} | 385bcb73348c997d447f87333e5d141f2b96e0d3 | 31,178 |
from typing import Union
import pathlib
from datetime import datetime
def get_timestamp(file_path: Union[str, pathlib.Path]) -> str:
"""Return a textual timestamp from the modification date of a file
Args:
file_path: Path to file.
Returns:
String representing the modification date of the file.
"""
file_path = pathlib.Path(file_path)
try:
mod_time = file_path.stat().st_mtime
except FileNotFoundError:
return "File does not exist"
return datetime.fromtimestamp(mod_time).isoformat() | 7d5fefe57102e53993f487d043b75601c840b40c | 379,559 |
def lorentz_ip(u, v=None):
"""
Compute Lorentz inner product of two vectors
For vectors `u` and `v`, the
Lorentz inner product for 3-dimensional case is defined as
u[0]*v[0] + u[1]*v[1] + u[2]*v[2] - u[3]*v[3]
Or, for 2-dimensional case as
u[0]*v[0] + u[1]*v[1] - u[2]*v[2]
Args
u: vector with shape either (3,) or (4,)
v: vector with same shape as x1; if None (default), sets v = u
Returns
float: value of Lorentz IP """
if v is None:
v = u
if len(u) == 3 and len(v) == 3:
c = [1, 1, -1]
return sum([u[i] * v[i] * c[i] for i in range(len(u))])
elif len(u) == 4 and len(v) == 4:
c = [1, 1, 1, -1]
return sum([u[i] * v[i] * c[i] for i in range(len(u))])
return ValueError(f"length of x should be 3 or 4, was{len(u)}") | ff28b23ea29388252635d4da670cf4514df92ac8 | 129,805 |
def websocket_method(fn):
"""Decorator to mark a function as valid endpoint method."""
fn.is_endpoint_method = True
return fn | f091340e001ffb6ba7fe4d6bf1c8dd538746ab2c | 124,130 |
def attrs_as_dict(obj):
"""
Tries different methods to get a dict representation of obj.
"""
attrs = None
if hasattr(obj, "__getstate__"):
attrs = obj.__getstate__()
elif hasattr(obj, "__dict__"):
attrs = obj.__dict__
elif hasattr(obj, "__slots__"):
attr_dict = {n: getattr(obj, n) for n in obj.__slots__}
elif isinstance(obj, dict):
attrs = obj
return attrs | 143e7488c39e35396e557fa1a12c4a84d6746d16 | 344,415 |
import re
def validate_hash(value: str) -> bool:
"""Validates the format of a hash. Returns true if the hash is properly
formatted. False otherwise."""
m = re.match("[a-zA-Z0-9]+:[a-fA-F0-9]+", value)
return m is not None | aa296a8c541fbb91432a32a316f044eae7455acb | 274,723 |
import yaml
def get_appname(file):
"""Parse given docker-compose file and returns dict for
AppName:CertType from environment
:param file: Full path of docker-compose file.
:type file: String
"""
dictApps = {}
with open(file) as f:
docs = yaml.load_all(f, Loader=yaml.FullLoader)
for doc in docs:
for key, value in doc.items():
if key == "services":
for key, value in value.items():
for key, value in value.items():
if key == "environment":
try:
dictApps.setdefault(value["AppName"],
value["CertType"])
except KeyError as ke:
print(ke)
pass
return dictApps | a32eb257b0fb6a6392315720c192cd02c8b4fe16 | 584,241 |
import itertools
def common_age(a, b):
"""
Calculates the number of ages in common between two lists of ages.
Allows for ages to be one year apart.
Parameters
----------
a: list
list of age strings to be compared to b
b: list
list of age strings to be compared to a
Raises
------
TypeError
if a or b are not lists
Returns
-------
integer
number of ages in common
Example
--------
>>> list_1 = ['15', '20', '2']
>>> list_2 = ['15', '15', '20', '2', '99']
>>> common_age(list_1, list_2)
4
"""
# Check variable types
if not ((isinstance(a, list)) and (isinstance(b, list))):
raise TypeError('Both variables being compared must contain lists')
# Compare two age sets against each other
comparisons = list(itertools.product(a, b))
# Count how many are equal or 1 year apart
value = [abs(int(x)-int(y)) for x, y in comparisons]
value = len(list(filter(lambda x: x <= 1, value)))
return value | 10dc4299cf81ce7e611e906dc752afb591e472ce | 39,421 |
def get_backbone_atoms_nucleicacids (chain):
""" This function returns a list of nucleic acids' backbone atoms (C4') from a given chain. """
c4_atoms = []
for residue in chain:
if residue.get_id()[0] == " " and residue.has_id("C4\'"):
c4_atoms.append(residue["C4\'"])
return c4_atoms | 571a4cb51ad6bcadce0cdad000c4c3e4ccb30a16 | 390,659 |
def _get_cell_range(sheet_obj, start_row, start_col, end_row, end_col):
"""
Get cell range in xlrd module as two level nested list.
Arguments:
sheet_obj {xlrd worksheet object} -- xlrd worksheet instance
start_row {int} -- Number of start row
start_col {int} -- Number of start column
end_row {int} -- Number of last row
end_col {int} -- Number of last column
Returns:
list -- Cell range as two level nested list
"""
return [
sheet_obj.row_slice(row, start_colx=start_col, end_colx=end_col + 1)
for row in range(start_row, end_row + 1)
] | 59498bda8e0e446a5f508495d8e41aab24448f8c | 532,572 |
import re
def _name_from_project_path(path, project, template):
"""Validate a URI path and get the leaf object's name.
:type path: string
:param path: URI path containing the name.
:type project: string or NoneType
:param project: The project associated with the request. It is
included for validation purposes. If passed as None,
disables validation.
:type template: string
:param template: Template regex describing the expected form of the path.
The regex must have two named groups, 'project' and
'name'.
:rtype: string
:returns: Name parsed from ``path``.
:raises: :class:`ValueError` if the ``path`` is ill-formed or if
the project from the ``path`` does not agree with the
``project`` passed in.
"""
if isinstance(template, str):
template = re.compile(template)
match = template.match(path)
if not match:
raise ValueError('path "%s" did not match expected pattern "%s"' % (
path, template.pattern,))
if project is not None:
found_project = match.group('project')
if found_project != project:
raise ValueError(
'Project from client (%s) should agree with '
'project from resource(%s).' % (project, found_project))
return match.group('name') | 51b93e47c8d00bb3637a83806373cf2c36d9e0a9 | 305,612 |
def get_map_name(location):
"""
Gets the "map name" of a SQLLocation, defaulting to the location's name if not available.
"""
if not location:
return
if 'map_location_name' in location.metadata and location.metadata['map_location_name']:
return location.metadata['map_location_name']
else:
return location.name | 2b54f44b76cc809124a72aea5f3de8727d158dc3 | 197,675 |
def _get_key(path, context):
"""
Retrieves key from path
:param string path: Path to analyze
:param Context context: Thumbor's context
:return: Extracted key
:rtype: string
"""
root_path = context.config.get('TC_AWS_LOADER_ROOT_PATH')
return '/'.join([root_path, path]) if root_path is not '' else path | a7090b8baabd5028325608ea76037756fd573152 | 165,360 |
def load_bounding_boxes(path, switch_order=True):
"""
Returns a dict {filename: [x1, y1, x2, y2]} given path to
DeepFashion bbox file. Each x and y are swapped if switch_order.
"""
bboxes = {}
with open(path, 'r') as ff:
num_boxes = ff.readline()
num_boxes = int(num_boxes)
fields = ff.readline()
for ii, line in enumerate(ff):
imname, clothes_type, source_type, x1, y1, x2, y2 = line.strip().split()
if switch_order:
# TensorFlow wants height, width
box = [y1, x1, y2, x2]
else:
box = [x1, y1, x2, y2]
bboxes[imname] = [int(xx) for xx in box]
assert len(bboxes) == num_boxes
return bboxes | 6a34eba99807028d0d6dd8322de4e549d47d8d37 | 505,842 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.