content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
def patch_id(options):
"""Returns the review ID or the GitHub pull request number."""
return options['review_id'] or options['github'] | 7623a25f4d74c4794d8c56c94c6b98f3db80a604 | 413,934 |
def add_http_parameters(url: str, params: dict) -> str:
"""
Adds HTTP parameters to url.
:param url: url address
:param params: http parameters
:return: url with added http parameters
"""
result: str = url + "?"
params_added: int = 0
for param in params:
result += param + "=" + params[param].replace(" ", "%20")
if params_added < len(params) - 1:
result += "&"
params_added += 1
return result | 0918c263d764efe8b1fc668f73a70dc953f2ba90 | 57,946 |
def get_adi_ids(metadata):
"""Returns a list of the adipose shell IDs for each sample
Parameters
----------
metadata : array_like
Array of metadata dicts for each sample
Returns
-------
adi_ids : array_like
Array of the adipose IDs for each sample in the metadata
"""
adi_ids = [md['phant_id'].split('F')[0] for md in metadata]
return adi_ids | b057d56e0e47bc2b604ccfa28bca1660ff711d5f | 126,307 |
import json
def has_value(json_str, value):
"""Check for the presence of a value in a JSON-serialized dict."""
return value in json.loads(json_str).values() | e05307b1b12debb702eda2c1d0c87baf6e0ebcb0 | 261,842 |
def bin_search_recursive(array, what_to_find, left=0, right=None):
"""
Finds element in a sorted array using recursion.
:param list array: A sorted list of values.
:param what_to_find: An item to find.
:returns: Index of the searchable item or -1 if not found.
"""
right = right if right is not None else len(array) - 1
if left > right:
return -1 # Searchable not found
middle_pos = (left + right) // 2
if array[middle_pos] == what_to_find:
return middle_pos
if what_to_find < array[middle_pos]:
return bin_search_recursive(array, what_to_find, left=left, right=middle_pos - 1)
return bin_search_recursive(array, what_to_find, left=middle_pos + 1, right=right) | 83ff4dbcd9cab179c5e83f73d5fdc7c5a6bca4d4 | 8,712 |
import requests
def _get_access_token(key: str, secret: str) -> str:
"""
Requests an access token from Bitbucket.
:param key:
The OAuth consumer key
:param secret:
The OAuth consumer secret
:return:
The Bitbucket access token
"""
url = 'https://bitbucket.org/site/oauth2/access_token'
data = {
'grant_type': 'client_credentials'
}
response = requests.post(url, data, auth=(key, secret))
if response.status_code != requests.codes.get('ok'):
response.raise_for_status()
return response.json()['access_token'] | 78330eb368129a7d94ca8b790698e4a0a4fc39f5 | 230,575 |
def sequentialize_header_priorities(header_priority_pairs):
"""
In a case where a H3 or H4 succeeds a H1, due to the nature of the Table of Contents generator\
which adds the number of tabs corresponding to the header priority/strength, this will sequentialize\
the headers such that all headers have a priority of atmost 1 more than their preceeding header.
[('Header 1', 1), ('Header 3', 3), ('Header 4', 4)] -> [('Header 1', 1), ('Header 2', 2), ('Header 3', 3)]
"""
# Go through each header and and if we see a pair where the difference in priority is > 1, make them sequential
# Ex: (H1, H3) -> (H1, H2)
for i in range(len(header_priority_pairs) - 1):
header, priority = header_priority_pairs[i]
next_header, next_priority = header_priority_pairs[i + 1]
if (next_priority - priority > 1):
header_priority_pairs[i + 1] = (next_header, priority + 1)
return header_priority_pairs | ba90688adf7917d898b836f5913040ef2e1c4e6d | 363,258 |
import random
def _random_float(low, high):
"""
:param low: the inclusive minimum value
:param high: the inclusive maximum value
:return: a random float in the inclusive [low, high] range
"""
return (random.random() * (high - low)) + low | 87be42ad3185e1937709f2bcb80fce7afa9a2639 | 435,270 |
def is_path_prefix_of_path(resource_prefix, resource_path):
"""
Return True if the arborist resource path "resource_prefix" is a
prefix of the arborist resource path "resource_path".
"""
prefix_list = resource_prefix.rstrip("/").split("/")
path_list = resource_path.rstrip("/").split("/")
if len(prefix_list) > len(path_list):
return False
for i, prefix_item in enumerate(prefix_list):
if path_list[i] != prefix_item:
return False
return True | 49415f67cb7d28adf1a5f88bc421864872c267fb | 200,845 |
def _is_member(s, e):
"""Return true if `e` is in the set `s`.
Args:
s: The set to inspect.
e: The element to search for.
Result:
Bool, true if `e` is in `s`, false otherwise.
"""
return e in s._set_items | 810336bb16babcca3af8bc9c931da3d058b6f14f | 694,242 |
from functools import reduce
def a2bits(chars: str) -> str:
"""Converts a string to its bits representation as a string of 0's and 1's.
>>> a2bits("Hello World!")
'010010000110010101101100011011000110111100100000010101110110111101110010011011000110010000100001'
"""
return bin(reduce(lambda x, y: (x << 8) + y, (ord(c) for c in chars), 1))[
3:
] | c155a9ad2b0e704d1cc3a981d10d9be1ac2e905a | 668,333 |
def addr2str(addr):
"""
Converte um endereço IPv4 binário para uma string (no formato x.y.z.w)
"""
return '%d.%d.%d.%d' % tuple(int(x) for x in addr) | bbb48bc6d3c2abc929e08ba0396f30fe397fccc1 | 219,251 |
from typing import List
def listToCSV(lst: List) -> str:
"""
Changes a list to csv format
>>> listToCSV([1,2,3])
'1,2,3'
>>> listToCSV([1.0,2/4,.34])
'1.0,0.5,0.34'
"""
strings = ""
for a in lst:
strings += str(a) + ","
strings = strings[0:len(strings) - 1]
return strings | 89fc272c4b9fc0a3a406f67d7b655b2c72755d07 | 35,479 |
def default_hash(i: int, x: int, n: int):
"""Return a hash value for x.
Arguments:
i: hash function index within family
x: value to be hashed
n: domain
"""
return (x + i) % n | fb58c2acaad8c5c3dd3be7e14892ca18dfbdb2b8 | 516,587 |
import typing
def id_class_name(value: typing.Any) -> str:
"""Provide class name for test identifier."""
return str(value.__class__.__name__) | 9d7fae15e07dd994f865baf67d753b43031efd31 | 93,471 |
import re
def vertical_sep_in_line(line):
"""
Find the indices in a line which are potentially or probably
vertical separators (characters commonly used to indicate
the end of columns).
"""
VERTICAL_SEP = r"[+|\.`'╔╦╗╠╬╣╚╩╝┌┬┐╞╪╡├┼┤└┴┘]"
return [m.start() for m in re.finditer(VERTICAL_SEP, line)] | c8d99a5ae9eee8558e75183b27334afa28d3794d | 523,431 |
import torch
def convert_to_one_hots(a, num_classes: int, dtype=torch.int, device=None):
"""
Convert class index array (num_sample,) to an one hots array
(num_sample, num_classes)
Args:
a: index array
num_classes: number of classes
dtype: data type
Returns:
one hots array in shape of (a.shape[0], num_classes)
"""
one_hots = torch.zeros((len(a), num_classes), dtype=dtype, device=device)
one_hots[torch.arange(one_hots.shape[0]), a] = 1
return one_hots | 04a7465b03736704ba0eabb22e32194efbd4d7a6 | 221,918 |
import json
def GetValueInJsonFile(json_path, key, default_value=None):
"""Reads file containing JSON and returns value or default_value for key.
Args:
json_path: (str) File containing JSON.
key: (str) The desired key to lookup.
default_value: (default:None) The default value returned in case of missing
key.
"""
with open(json_path) as fd:
return json.load(fd).get(key, default_value) | 99e7d22b967b9f47ca3c49cd441e2aac364e0e14 | 201,079 |
import re
def words(string):
"""
Split a string into a list of words, which were delimited by one or more
whitespace characters.
"""
return re.split('\s+', string) | bcd77a2b5005b8418cc55efd96214c4b86f09a5a | 213,261 |
import torch
def quaternion_to_rotmat_jac(q):
"""
Converts batched quaternions q of shape (batch, 4) to the jacobian of the
corresponding rotation matrix w.r.t. q of shape (batch, 9, 4)
"""
qr = q[:, 0:1]
qi = q[:, 1:2]
qj = q[:, 2:3]
qk = q[:, 3:4]
z = torch.zeros_like(qk)
r1 = 2. * torch.cat((z, z, -2. * qj, -2. * qk), dim=1)
r2 = 2. * torch.cat((-qk, qj, qi, -qr), dim=1)
r3 = 2. * torch.cat((qj, qk, qr, qi), dim=1)
r4 = 2. * torch.cat((qk, qj, qi, qr), dim=1)
r5 = 2. * torch.cat((z, -2 * qi, z, -2 * qk), dim=1)
r6 = 2. * torch.cat((-qi, -qr, qk, qj), dim=1)
r7 = 2. * torch.cat((-qj, qk, -qr, qi), dim=1)
r8 = 2. * torch.cat((qi, qr, qk, qj), dim=1)
r9 = 2. * torch.cat((z, -2 * qi, -2 * qj, z), dim=1)
return torch.cat((r1.unsqueeze(1), r2.unsqueeze(1), r3.unsqueeze(1),
r4.unsqueeze(1), r5.unsqueeze(1), r6.unsqueeze(1),
r7.unsqueeze(1), r8.unsqueeze(1), r9.unsqueeze(1)), dim=1) | 0b97c5232b4b11f1feabf27df39e38db135443d7 | 58,868 |
def max_or_0(it):
"""
>>> max_or_0([])
0
>>> max_or_0(iter([]))
0
>>> max_or_0(iter([-10, -2, -11]))
-2
"""
lst = list(it)
return max(lst) if lst else 0 | d45222ae48af18be28d3625d950a61e9e58ec0b4 | 129,143 |
def _qualified_type_name(class_):
"""
Compute a descriptive string representing a class, including
a module name where relevant.
Example outputs are "RuntimeError" for the built-in RuntimeError
exception, or "struct.error" for the struct module exception class.
Parameters
----------
class_ : type
Returns
-------
class_name : str
"""
# We're being extra conservative here and allowing for the possibility that
# the class doesn't have __module__ and/or __qualname__ attributes. This
# function is called during exception handling, so we want to minimise the
# possibility that it raises a new exception.
class_module = getattr(class_, "__module__", "<unknown>")
class_qualname = getattr(class_, "__qualname__", "<unknown>")
if class_module == "builtins":
return f"{class_qualname}"
else:
return f"{class_module}.{class_qualname}" | e1000f66b7ab0e10553beadd3ad91214c31fa3ed | 156,507 |
def dec_to_bin(x):
"""Convert from decimal number to a binary string representation."""
return bin(x)[2:].zfill(4) | b3b2734c7b4fc0c028a27de142783b31dad48792 | 412,244 |
def read_segs(segfile):
"""Returns a dict with scid as the key and segments as value from a '.seg' file.
A '.seg' file consists of a set of lines with the following format:
scene_name[\t]segment_name[\t]start_time[\t]end_time[\n]
scene_name is the id of the Scene that this Segment belongs to,
segment_name is the id of the Segment,
and start_time and end_time determines the time interval for the Segment
Args:
segfile: A string containing the name of the '.seg' file
Returns:
a dict with scid as the key and segments as value
"""
scenes = {}
with open(segfile, 'r') as f:
seglines = f.readlines()
for l in seglines:
l = l.strip()
l = l.split('\t')
if l[0] in scenes:
scenes[l[0]].append((l[1], int(l[2]), int(l[3])))
else:
scenes[l[0]] = [(l[1], int(l[2]), int(l[3]))]
return scenes | 49cfeed5606aa8dc9b2636dc8aa5661427de4314 | 294,724 |
def search(list_data: list, key: int, left: int = 0, right: int = 0) -> int:
"""
Iterate through the array to find the index of key using recursion.
:param list_data: the list to be searched
:param key: the key to be searched
:param left: the index of first element
:param right: the index of last element
:return: the index of key value if found, -1 otherwise.
>>> search(list(range(0, 11)), 5)
5
>>> search([1, 2, 4, 5, 3], 4)
2
>>> search([1, 2, 4, 5, 3], 6)
-1
>>> search([5], 5)
0
>>> search([], 1)
-1
"""
right = right or len(list_data) - 1
if left > right:
return -1
elif list_data[left] == key:
return left
elif list_data[right] == key:
return right
else:
return search(list_data, key, left + 1, right - 1) | a567469caa72be724097594f08e971875f3bb527 | 611,230 |
def scol(string, i):
"""Get column number of ``string[i]`` in `string`.
:Returns: column, starting at 1 (but may be <1 if i<0)
:Note: This works for text-strings with ``\\n`` or ``\\r\\n``.
"""
return i - string.rfind('\n', 0, max(0, i)) | 2aa922fa81dbd700f2da2f3ca177a94351282660 | 387,791 |
def this_extra_edition_number(edicao):
"""
Gets a string 'edicao' that states which is this DOU's edition
(e.g. 132-B or 154) and returns the number (int) of extra editions up
to this current one. No extra editions (e.g. 154) is 0, first
extra edition (134-A) is 1, and so on.
"""
last_char = str(edicao)[-1]
if last_char.isdigit():
return 0
else:
return ord(last_char.lower()) - 96 | 21e313308e691b82539a4ed2d942422bc0c53f33 | 518,776 |
def format_relation(
relation_type: str, start_id: str, end_id: str, **properties) -> dict:
"""Formats a relation."""
relation = {
':TYPE': relation_type,
':START_ID': start_id,
':END_ID': end_id,
}
relation.update(properties)
return relation | 88ccbefcb530556673ee8d6b44a761da84044a8c | 262,604 |
def cal_triangle_area(p1, p2, p3):
"""
Calculate the area of triangle.
S = |(x2 - x1)(y3 - y1) - (x3 - x1)(y2 - y1)| / 2
:param p1: (x, y)
:param p2: (x, y)
:param p3: (x, y)
:return: The area of triangle.
"""
[x1, y1], [x2, y2], [x3, y3] = p1, p2, p3
return abs((x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1)) / 2 | 61d5f16c2573cf912ba7a3c21f3465eb68395971 | 369,362 |
def unescape_special(v):
""" Unescape string version of literals.
"""
if v == 'True': return True
elif v == 'False': return False
elif v == 'None': return None
else: return v | 8566509cffb2f5ee4f8382df9ff43da0da1ef4eb | 282,018 |
def trim_precision(value: float, precision: int = 4) -> float:
"""
Trim the precision of the given floating point value.
For example, if you have the value `170.10000000000002` but really only care about it being
``\u2248 179.1``:
.. code-block:: python
>>> trim_precision(170.10000000000002, 2)
170.1
>>> type(trim_precision(170.10000000000002, 2))
<class 'float'>
.. versionadded:: 2.0.0
:param value:
:param precision: The number of decimal places to leave in the output.
"""
return float(format(value, f"0.{precision}f")) | 20eab8f72e5e7a426523fac70eca4a46b9334ca1 | 203,266 |
def pop_filter(items, predicate=None):
"""Remove the first object from the list for which predicate returns True;
no predicate means no filtering.
"""
for i, item in enumerate(items):
if predicate is None or predicate(item):
items.pop(i)
return item | 9ebe00c41be32054e7f33600e3459488411d68e1 | 556,880 |
def speed_to_pace(x):
""" convert speed in m/s to pace in min/km """
if x == 0:
return 0
else:
p = 16.6666666 / x # 1 m/s --> min/km
return ':'.join([str(int(p)),
str(int((p * 60) % 60)).zfill(2)]) | 9f785fb5b704cb6040b8c468175093b196b6deab | 291,148 |
def num_to_int_str(num: float) -> str:
"""Takes a float and converts to int, and then returns a string version."""
return str(int(round(num))) | 33c5122ee51edd0d8be726ac6bdac9b7e4207be3 | 417,204 |
def milliseconds_to_seconds(milliseconds: int):
"""
Function that attempts to convert the passed in milliseconds into minutes and seconds
Parameters
----------
milliseconds: int
milliseconds for conversion
Returns
-------
int, int
first integer as minute and the second as seconds
"""
sec = int(milliseconds / 1000)
mini = int(sec / 60)
sec %= 60
return mini, sec | c4bea38ca58e737a2141d86ebfc815210081c5ff | 185,945 |
def lam2rgb(wav, gamma=0.8, output='norm'):
"""
Convert a wavelength [nm] to an RGB colour tuple in the visible spectrum.
This converts a given wavelength of light to an
approximate RGB colour value with edge attenuation.
The wavelength must be given in nanometres in the
range from 380 nm - 750 nm (789 THz - 400 THz).
Adapted from: http://www.noah.org/wiki/Wavelength_to_RGB_in_Python
Based on code by Dan Bruton
http://www.physics.sfasu.edu/astro/color/spectra.html
Parameters
----------
wav : int or float
Wavelength of light in nanometres [nm].
gamma : float, optional
Gamma correction exponent (default=0.8).
output : {'norm', 'hex'}, optional
Specify whether to return an RGB tuple or hexadecimal string and
RGB tuple (default=(RGB)).
Returns
-------
tuple
If 'norm' output an RGB tuple with ints in [0, 255] or if 'hex', output
a hexadecimal string followed by an RGB tuple.
"""
# == A few notes about colour ==
# Color Wavelength(nm) Frequency(THz)
# Red 620-750 484-400
# Orange 590-620 508-484
# Yellow 570-590 526-508
# Green 495-570 606-526
# Blue 450-495 668-606
# Violet 380-450 789-668
# f is frequency (cycles per second)
# l (lambda) is wavelength (meters per cycle)
# e is energy (Joules)
# h (Plank's constant) = 6.6260695729 x 10^-34 Joule*seconds
# = 6.6260695729 x 10^-34 m^2*kg/seconds
# c = 299792458 meters per second
# f = c/l
# l = c/f
# e = h*f
# e = c*h/l
# List of peak frequency responses for each type of
# photoreceptor cell in the human eye:
# S cone: 437 nm
# M cone: 533 nm
# L cone: 564 nm
# rod: 550 nm in bright daylight, 498 nm when dark adapted.
# Rods adapt to low light by becoming more sensitive.
# Peak frequency response shifts to 498 nm.
wav = float(wav)
if 380 <= wav < 440:
attenuation = 0.3 + 0.7 * (wav - 380) / (440 - 380)
R = ((-(wav - 440) / (440 - 380)) * attenuation) ** gamma
G = 0.0
B = (1.0 * attenuation) ** gamma
elif 440 <= wav < 490:
R = 0.0
G = ((wav - 440) / (490 - 440)) ** gamma
B = 1.0
elif 490 <= wav < 510:
R = 0.0
G = 1.0
B = (-(wav - 510) / (510 - 490)) ** gamma
elif 510 <= wav < 580:
R = ((wav - 510) / (580 - 510)) ** gamma
G = 1.0
B = 0.0
elif 580 <= wav < 645:
R = 1.0
G = (-(wav - 645) / (645 - 580)) ** gamma
B = 0.0
elif 645 <= wav <= 750:
attenuation = 0.3 + 0.7 * (750 - wav) / (750 - 645)
R = (1.0 * attenuation) ** gamma
G = 0.0
B = 0.0
else: # Outside the visible spectrum
R = 0.0
G = 0.0
B = 0.0
if output == 'norm':
return (int(R), int(G), int(B))
elif output == 'hex':
R *= 255
R = int(max(0, min(round(R), 255)))
G *= 255
G = int(max(0, min(round(G), 255)))
B *= 255
B = int(max(0, min(round(B), 255)))
# return (int(R), int(G), int(B)) # int() truncates towards 0
return "#{0:02x}{1:02x}{2:02x}".format(R, G, B), (R, G, B) | ecd90f0a4f5a2765d755dac6723d4c43307d5773 | 170,716 |
def output_fn(prediction, response_content_type):
"""
Serialize and prepare the prediction output
"""
if response_content_type == "application/json":
response = str(prediction)
else:
response = str(prediction)
return response | cb0bd922011190bf36aa37b792ecd904a545c915 | 225,608 |
def rk4_backward(f, x, h, **kwargs):
"""Implements a backwards classic Runge-Kutta integration RK4.
Parameters
----------
f : callable
function to reverse integrate, must take x as the first argument and
arbitrary kwargs after
x : numpy array, or float
state needed by function
h : float
step size
Returns
-------
state : numpy array, or float
Reverse integrated state
"""
k1 = f(x, **kwargs)
k2 = f(x - 0.5 * h * k1, **kwargs)
k3 = f(x - 0.5 * h * k2, **kwargs)
k4 = f(x - h * k3, **kwargs)
return x - (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4) | f1a017131b41303d82392d7b9283a4ee2955064b | 619,232 |
async def convert_to_json(response):
""" Converts the aiohttp ClientResponse object to JSON.
:param response: The ClientResponse object.
:raises: ValueError if the returned data was not of type application/json
:returns: The parsed json of the response
"""
if "Content-Type" in response.headers and "application/json" not in response.headers["Content-Type"]:
raise ValueError("The response from {} does not have application/json mimetype".format(response.url))
return await response.json() | 9644dc2d7b9b24a7be980dabb8faa57c81ef7ed1 | 419,708 |
def read_file(file_name, verbose=False):
"""Takes a file name and returns the lines from the file as a list. Optional verbosity param
:param file_name: path to file to be read
:param verbose: run with extra logging
:returns lines: list strings representing lines in the file
"""
if verbose:
print('Reading file: <' + file_name + '>')
lines = None
with open(file_name, 'r+') as infile:
lines = infile.readlines()
if verbose:
print('Lines read: <' + str(len(lines)) + '>')
return lines | 3f689a78d61b7d1d4eb35f0c232fa945ee123074 | 31,469 |
def getFloatFromUser(message):
"""
Obtains positive real number from user.
Arguments:
message -- the message that is printed for the user, describing what is expected
Returns:
var -- a positive real number that the user entered
"""
var = None # initialize the variable we return
# try to get input from user until it works
while var is None:
var = input(message)
try:
var = float(var)
if var < 0.0: raise ValueError('Must be positive!')
except: # if var is not float or positive, it comes here
print("\t Please, enter a positive real number...")
var = None
return var | 1cb7ecfcb34e89c2da2d3534a9b5eace4a81f14b | 423,801 |
import torch
def xyxy2xywh(xyxy):
"""Convert bounding box enconding from [x1, y1, x2, y2] to [x, y, w, h]."""
# the input tensor must have four components in the last dimension
xywh = torch.zeros_like(xyxy)
xywh[:, 0] = (xyxy[:, 0] + xyxy[:, 2]) / 2
xywh[:, 1] = (xyxy[:, 1] + xyxy[:, 3]) / 2
xywh[:, 2] = xyxy[:, 2] - xyxy[:, 0]
xywh[:, 3] = xyxy[:, 3] - xyxy[:, 1]
return xywh | 67e2a3db9a5194d88284a7c2180cf3c1292ae1ac | 590,917 |
def unk_counter(sentence, vocab_to_int):
"""Counts the number of time UNK appears in a sentence."""
unk_count = 0
for word in sentence:
if word == vocab_to_int["<UNK>"]:
unk_count += 1
return unk_count | c20717c11c0a7a6a64d005aa9cd800b5119ad8d6 | 438,948 |
def fb_match_metadata(pageSoup):
"""
Extracts general information (teams, managers, captains, date, time, venue, attendance, score, xG) for a given match
Parameters:
pageSoup (html document): bs4 object of a match
Returns:
dict: metadata information
dict: match officials
"""
# Extract url
url = pageSoup.find('meta', {'property' : "og:url"})['content']
url = url.replace('https://fbref.com', '')
# Find scorebox object
scorebox = pageSoup.find('div', {'class' : 'scorebox'})
teams = scorebox.find_all('div', {'itemprop' : 'performer'})
# extract team id and name
id_x = teams[0].find('a', href=True)['href'].split('/')[3]
id_y = teams[1].find('a', href=True)['href'].split('/')[3]
team_x = teams[0].find('a', href=True).text
team_y = teams[1].find('a', href=True).text
# extract scores
scores = pageSoup.find_all('div', {'class' : 'scores'})
score_x = scores[0].find('div', {'class' : 'score'}).text
# error handling for non-xG
try:
xg_x = scores[0].find('div', {'class' : 'score_xg'}).text
except AttributeError:
xg_x = None
score_y = scores[1].find('div', {'class' : 'score'}).text
# error handling for non-xG
try:
xg_y = scores[1].find('div', {'class' : 'score_xg'}).text
except AttributeError:
xg_y = None
# extract managers and captains
managers = pageSoup.find_all('div', {'class' : 'datapoint'})
manager_x = managers[0].text.replace('Manager: ', '')
manager_y = managers[2].text.replace('Manager: ', '')
captain_x = managers[1].find('a', href=True)['href']
captain_y = managers[3].find('a', href=True)['href']
# Find match information object
scorebox_meta = pageSoup.find('div', {'class' : 'scorebox_meta'})
# extract date and time information
datetime = scorebox_meta.find('span', {'class' : 'venuetime'})
date = datetime['data-venue-date']
kickoff = datetime['data-venue-time']
# extract attendance, venue, and official information
scorebox_meta_ = scorebox_meta.find_all('div')
if scorebox_meta_[4].text.startswith('Attendance:'):
attendance = scorebox_meta_[4].text
attendance = attendance.replace("Attendance: ", "")
attendance = attendance.replace(",", "")
venue = scorebox_meta_[5].text.replace('Venue: ', '')
officials = scorebox_meta_[6].find_all('small')[1].text.split('\xa0· ')
else:
attendance = 0
venue = scorebox_meta_[4].text.replace('Venue: ', '')
officials = scorebox_meta_[5].find_all('small')[1].text.split('\xa0· ')
# generate dictionary for general metadata
matadict = {'url' : url,
'match_id' : url.split('/')[3],
'id_x' : id_x,
'id_y' : id_y,
'team_x' : team_x,
'team_y' : team_y,
'score_x' : score_x,
'score_y' : score_y,
'xg_x' : xg_x,
'xg_y' : xg_y,
'manager_x' : manager_x,
'manager_y' : manager_y,
'captain_x' : captain_x,
'captain_id_x' : captain_x.split('/')[3],
'captain_y' : captain_y,
'captain_id_y' : captain_y.split('/')[3],
'date' : date,
'kickoff' : kickoff,
'attendance' : attendance,
'venue' : venue}
# extract officials
referee = officials[0].replace(" (Referee)", "")
ar1 = officials[1].replace(" (AR1)", "")
ar2 = officials[2].replace(" (AR2)", "")
fourth = officials[3].replace(" (4th)", "")
var = (officials[4].replace(" (VAR)", "") if len(officials) > 4 else None)
# generate dictionary for officials
officialsdict = {'referee' : referee,
'ar1' : ar1,
'ar2' : ar2,
'fourth' : fourth,
'var' :var}
return matadict, officialsdict | 95e1be1b828ab85ca14458d1318e233acbfbd7f2 | 330,754 |
def keyfr(frame=0, duration=1000, **kwargs):
"""
Returns a single keyframe with given parameters
:param frame: name or number of image for this keyframe
:param duration: time in milliseconds for this keyframe
:param angle: degrees of clockwise rotation around a center origin
:param flipx: set True to flip image horizontally
:param flipy: set True flip image vertically
:param color: (r,g,b) triplet to shift color values
:param alpha: alpha transparency value
:param scale: scaling multiplier where 1.0 is unchanged
:param pos: optional (x,y) pair or Vector2 to set sprite position
:param velocity: optional (x,y) or Vector2 for sprite to move
measured in pixels per second
:param rotation: optional degrees of clockwise rotation per second
:param scaling: optional amount to scale per second where 0 = None
:param fading: optional int to subract from alpha value per second
:param coloring: (r,g,b) triplet to shift each color value per second
"""
kwargs.update(frame=frame, duration=duration)
return(kwargs) | e5d2970dbab84e5203bc55db5a8193010181a9c8 | 190,432 |
def is_palindrome(number: int) -> bool:
"""Test if a number is a palindrome."""
as_string = str(number)
return as_string == as_string[::-1] | c85456bcf921e60c3622c1a084162aa2159abc70 | 498,350 |
from pathlib import Path
def fixture_tmp_zipfile(tmp_path: Path) -> Path:
"""Path of the ZIP file in the temporary directory."""
return tmp_path.joinpath("zipfile_samples.zip") | d507d19ace236c2d4dd7ce1ffa823b99a16d018f | 385,478 |
def datetime_to_str(value, fmt="%Y-%m-%d %H:%M:%S"):
""" 时间类型转换为字符串。datetime to string.
:type value: datetime.datetime
:type fmt: str
:rtype: str
"""
return value.strftime(fmt) | 34b28de5fbfa8f73957ead7ba314b76f38a89f27 | 465,890 |
def line_side(start_vector, end_vector, position_vector):
"""
Find out what side a position_vector is on given a line defined by start_vector and end_vector.
Args:
start_vector (list): eg. [0,0,0] vector\
end_vector (list): eg. [0,0,0] vector
position_vector (list): eg. [0,0,0] vector
Returns:
float: If positive it's on one side of the line, if negative its on the other side.
"""
return ((end_vector.x - start_vector.x)*(position_vector.y - start_vector.y) - (end_vector.y - start_vector.y)*(position_vector.x - start_vector.x)) > 0 | 26ebb60f6f8779c8be7ef2068bfb5e4c255657c0 | 47,877 |
def DeleteTypeAbbr(suffix, type_abbr='B'):
"""Returns suffix with trailing type abbreviation deleted."""
if not suffix:
return suffix
s = suffix.upper()
i = len(s)
for c in reversed(type_abbr.upper()):
if not i:
break
if s[i - 1] == c:
i -= 1
return suffix[:i] | 6805befcd30bfca8340383570ff555743f764c5f | 356,109 |
def pad_word_chars(words):
"""
Pad the characters of the words in a sentence.
Input:
- list of lists of ints (list of words, a word being a list of char indexes)
Output:
- padded list of lists of ints
- padded list of lists of ints (where chars are reversed)
- list of ints corresponding to the index of the last character of each word
"""
max_length = max([len(word) for word in words])
char_for = []
char_rev = []
char_pos = []
for word in words:
padding = [0] * (max_length - len(word))
char_for.append(word + padding)
char_rev.append(word[::-1] + padding)
char_pos.append(len(word) - 1)
return char_for, char_rev, char_pos | 8b1d5dd5fc92bd95bce2888d2f0db150de1b2e66 | 560,796 |
def get_server_cloud(server_name):
"""Get the server cloud from the server name."""
if "." in server_name:
server_name, server_domain = server_name.split(".", 1)
return (server_name, server_domain)
else:
return (server_name, None) | 4c6485cacc456f8652ee57d5c42703fe1a4fc1d7 | 495,257 |
def _canonicalize_extension(ext):
"""Returns a transformed ext that has a uniform pattern.
Specifically, if ``ext`` has a leading . then it is simply returned.
If ``ext`` doesn't have a leading . then it is prepended.
Exceptions to this are if ``ext`` is ``None`` or "". If ``ext``
is "" then "" is return. If ``ext`` is None then None is returned.
:param ext: The extension to canonicalize.
:returns: The canonicalized extension.
"""
if ext is None or ext == "" or ext.startswith("."):
return ext
return "." + ext | 935e85fd9a0f1bcfadc68c2390446ecbc814a0bc | 695,381 |
def is_first_char(input_string, char):
""" INPUT:
input_string = string of any length, typically a line in a file being parsed
char = string of the character we want to compare with to determine a match
RETURN:
True ; if char == first character in input_string
False ; if char != first character in input_string, or if input_string is empty
"""
input_string = input_string.strip() # remove empty spaces around string
if len(input_string) == 0 :
return False
if ( input_string[0] == char ):
return True
return False | 308a27216e1c657e8cb762e67f1e6250bd205807 | 154,981 |
def post_list(db, usernick=None, limit=50):
"""Return a list of posts ordered by date
db is a database connection (as returned by COMP249Db())
if usernick is not None, return only posts by this user
return at most limit posts (default 50)
Returns a list of tuples (id, timestamp, usernick, avatar, content)
"""
cursor = db.cursor()
if usernick != None:
sql = """SELECT id, timestamp, usernick, avatar, content
FROM posts
JOIN users ON posts.usernick=users.nick
WHERE usernick=(?)
ORDER by timestamp DESC
LIMIT (?);"""
cursor.execute(sql,(usernick, limit))
else:
sql = """SELECT id, timestamp, usernick, avatar, content
FROM posts
JOIN users ON posts.usernick=users.nick
ORDER by timestamp DESC
LIMIT (?);"""
cursor.execute(sql, (limit,))
result = []
for row in cursor:
result.append(row)
return result | 6438f531446611b3de866a60b2fb146907628589 | 351,804 |
def getFirstColumn(df):
"""
:param pd.DataFrame df:
:return list-object: first column of df
"""
return df[df.columns[0]].tolist() | fc44f1a7fda04aa3ced5993646a663fd7acebc0a | 489,481 |
def lcm(a, b):
"""
Simple version of lcm, that does not have any dependencies.
:param a: Integer
:param b: Integer
:rtype: The lowest common multiple of integers a and b
"""
tmp_a = a
while (tmp_a % b) != 0:
tmp_a += a
return tmp_a | cf87f390267bda90631e9e349a9ea81ccac70fc9 | 550,669 |
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None) | fb4d98a4b4db0d10ab97d94d98ccfe21cea05fe9 | 701,472 |
def obj_version_from_env(env):
"""
Fetch an object version from a request environment dictionary.
This discards 'null' versions since they are not supported by the
oio backend.
"""
vers = env.get('oio.query', {}).get('version')
if isinstance(vers, str) and vers.lower() == 'null':
vers = None
return vers | 876304f082884f5f645cdad8a412bcbbb0027b83 | 434,815 |
import torch
def _sort_batch_by_length(tensor, sequence_lengths):
"""
Sorts input sequences by lengths. This is required by Pytorch
`pack_padded_sequence`. Note: `pack_padded_sequence` has an option to
sort sequences internally, but we do it by ourselves.
Args:
tensor: Input tensor to RNN [batch_size, len, dim].
sequence_lengths: Lengths of input sequences.
Returns:
sorted_tensor: Sorted input tensor ready for RNN [batch_size, len, dim].
sorted_sequence_lengths: Sorted lengths.
restoration_indices: Indices to recover the original order.
"""
# Sort sequence lengths
sorted_sequence_lengths, permutation_index = sequence_lengths.sort(0, descending=True)
# Sort sequences
sorted_tensor = tensor.index_select(0, permutation_index)
# Find indices to recover the original order
index_range = sequence_lengths.data.clone().copy_(torch.arange(0, len(sequence_lengths))).long()
_, reverse_mapping = permutation_index.sort(0, descending=False)
restoration_indices = index_range.index_select(0, reverse_mapping)
return sorted_tensor, sorted_sequence_lengths, restoration_indices | 0fd0df95135e82b02ce85f29c366e01c3e67409b | 76,724 |
def build_error_response(msg, code=-1, status="error"):
"""Make Error Response based on the message and code"""
return dict(status=status, success=False, result=dict(description=msg, code=code)) | 358b0de1d4cfdb3c858b9c6f988177ef1be6f4c3 | 436,043 |
import torch
def cat_arange(counts, dtype=torch.int32):
"""
Concatenate results of multiple arange calls
E.g.: cat_arange([2,1,3]) = [0, 1, 0, 0, 1, 2]
Credits: https://stackoverflow.com/a/20033438
:param torch.tensor counts: a 1D tensor
:return: equivalent to torch.cat([torch.arange(c) for c in counts])
"""
counts1 = counts[:-1].type(dtype)
reset_index = torch.cumsum(counts1, dim=0).type(torch.int64)
incr = torch.ones(counts.sum(), dtype=dtype, device=counts.device)
incr[0] = 0
incr[reset_index] = 1 - counts1
# Reuse the incr array for the final result.
return torch.cumsum(incr, dim=0) | 61baf564bf03e7c0713ea2ef1b3ff9f08ae8949d | 648,733 |
def _safe(key, dic):
"""Safe call to a dictionary. Returns value or None if key or dictionary does not exist"""
if dic is not None and key in dic:
return dic[key]
else:
return None | c11b7f236d35f42bef6839a1fc244de446593e56 | 412,679 |
from passlib.context import CryptContext
def make_passwordmanager(schemes=None):
"""
schemes contains a list of replace this list with the hash(es) you wish
to support.
this example sets pbkdf2_sha256 as the default,
with support for legacy bcrypt hashes.
:param schemes:
:return: CryptContext()
"""
if not schemes:
schemes = ["pbkdf2_sha256", "bcrypt"]
pwd_context = CryptContext(schemes=schemes, deprecated="auto")
return pwd_context | fe278c057e735aca56716ecf397f7664ff010a37 | 671,069 |
def get_rendered_content_with_filled_template(total_movies, movie_entries):
""" Given data extracted from input file, fill in the html template
Args:
total_movies: total # of movies
movie_entries: the html content representing the individual movies
Returns:
the final rendered html content
"""
content = None
with open("movie_html_template.html", "r") as template_file:
template = template_file.read()
content = template.format(
_MOVIE_COUNT = total_movies,
_MOVIE_ENTRIES = movie_entries)
return content | 2c49bb45e48973ae83a2e51faa6b30b24343ac1d | 547,664 |
def vecs_from_tensor(x): # shape (...)
"""Converts from tensor of shape (3,) to Vecs."""
# num_components = x.shape[-1]
# assert num_components == 3
return x[..., 0], x[..., 1], x[..., 2] | 5ce0a22daba590f054348ce9736126fc1a611d00 | 410,420 |
def create_udp_port(byte_array):
"""
Creates the UDP port out of the byte array
:param byte_array: The byte array we want to get the port number
:return: Integer of the port number
"""
first_two_bytes = [int(no) for no in byte_array]
first_two_bytes = first_two_bytes[:2]
return int.from_bytes(bytes(first_two_bytes), "little") | 88f9d62ce4b173b10c519c3d347d3e8372f8e2d2 | 470,388 |
import torch
def mul_complex(t1, t2):
"""multiply two complex valued tensors element-wise. the two last dimensions are
assumed to be the real and imaginary part
complex multiplication: (a+bi)(c+di) = (ac-bd) + (bc+ad)i
"""
# real and imaginary parts of first tensor
a, b = t1.split(1, 4)
# real and imaginary parts of second tensor
c, d = t2.split(1, 4)
# multiply out
return torch.cat((a * c - b * d, b * c + a * d), 4) | 15b676321f9e5846a8e3da12eba17ffd052cb6ff | 55,617 |
import calendar
def to_unix_timestamp(timestamp):
"""
Convert datetime object to unix timestamp. Input is local time, result is an
UTC timestamp.
"""
if timestamp is not None:
return calendar.timegm(timestamp.utctimetuple()) | 95481b7c9d1a3a63e6a428f28e8a2fd87e49c623 | 660,015 |
from typing import Dict
from typing import Set
from typing import Any
def axis_keys(D: Dict, axis: int) -> Set[Any]:
"""Return set of keys at given axis.
Parameters
-------------------------------
D: Dict
Dictionary to determine keys of.
Axis:int
Depth of keys.
Returns
-------------------------------
The set of keys at given axis
"""
return set.union(*[
axis_keys(d, axis-1) for d in D.values()
]) if axis else set(D.keys()) | b44dac5a9265e917c76a6d60f26b6bbd4958d733 | 80,995 |
def mults_of_ns(mults=[1], limit=1000):
"""
returns the sum of all the values that are multiples of ns up to limit
"""
return sum(set([val for val in range(limit) for arg in mults if val%arg==0])) | a393a2b08b3a53b2136d0612e43fa26d334aa06c | 448,683 |
import math
def vect_len(a):
"""
Get the length of a vector
"""
return math.sqrt(a[0]**2 + a[1]**2 + a[2]**2) | 7d2bd0f2b7b499df60f0bcc35d0a320703710784 | 486,449 |
def pop_param(request_values, name, default=None):
""" Helper to pop one param from a key-value list
"""
for param_name, value in request_values:
if param_name.lower() == name:
request_values.remove((param_name, value))
return value
return default | e8d1fe8691c062fd49ceff323236668fbda98d3f | 318,033 |
def get_english_chapter_count(book):
"""
A helper function to return the number of chapters in a given book in the English version of the Bible.
:param book: Name of the book
:type book: str
:return: Number of chapters in the book. 0 usually means an invalid book or unsupported translation.
:rtype: int
>>> get_english_chapter_count('Ecclesiastes')
12
>>> get_english_chapter_count('Barnabas')
0
>>> get_english_chapter_count('Song of Solomon')
8
>>> get_english_chapter_count('Psalms')
150
>>> get_english_chapter_count('Philippians')
4
"""
# Standardise letter casing to help find the key easier
book_name = book.title()
if book_name == 'Song Of Solomon':
# Song Of Songs has an alternate name
book_name = 'Song Of Songs'
elif book_name == 'Psalms':
# Psalm and its plural variation are basically the same book, but prefer the singular variant
book_name = 'Psalm'
elif book_name == 'Philippians':
# Prefer the spelling variation with two L's, partly for backwards compatibility with previous versions
book_name = 'Phillippians'
# This is the default mapping of books to their chapter counts
chapter_count_mappings = {
'Genesis': 50,
'Exodus': 40,
'Leviticus': 27,
'Numbers': 36,
'Deuteronomy': 34,
'Joshua': 24,
'Judges': 21,
'Ruth': 4,
'1 Samuel': 31,
'2 Samuel': 24,
'1 Kings': 22,
'2 Kings': 25,
'1 Chronicles': 29,
'2 Chronicles': 36,
'Ezra': 10,
'Nehemiah': 13,
'Esther': 10,
'Job': 42,
'Psalm': 150,
'Proverbs': 31,
'Ecclesiastes': 12,
'Song Of Songs': 8,
'Isaiah': 66,
'Jeremiah': 52,
'Lamentations': 5,
'Ezekiel': 48,
'Daniel': 12,
'Hosea': 14,
'Joel': 3,
'Amos': 9,
'Obadiah': 1,
'Jonah': 4,
'Micah': 7,
'Nahum': 3,
'Habakkuk': 3,
'Zephaniah': 3,
'Haggai': 2,
'Zechariah': 14,
'Malachi': 4,
'Matthew': 28,
'Mark': 16,
'Luke': 24,
'John': 21,
'Acts': 28,
'Romans': 16,
'1 Corinthians': 16,
'2 Corinthians': 13,
'Galatians': 6,
'Ephesians': 6,
'Phillippians': 4,
'Colossians': 4,
'1 Thessalonians': 5,
'2 Thessalonians': 3,
'1 Timothy': 6,
'2 Timothy': 4,
'Titus': 3,
'Philemon': 1,
'Hebrews': 13,
'James': 5,
'1 Peter': 5,
'2 Peter': 3,
'1 John': 5,
'2 John': 1,
'3 John': 1,
'Jude': 1,
'Revelation': 22
}
if book_name not in chapter_count_mappings.keys():
return 0
return chapter_count_mappings[book_name] | e1ee5cb200550d9d34e1753968fb46c332d2c68d | 610,078 |
import socket
import time
def wait_for(ip, port, timeout, _type='tcp'):
"""Wait for service by attempting socket connection to a tuple addr pair.
:param ip: str. an IP address to test if it's up
:param port: int. an associated port to test if a server is up
:param timeout: int. timeout in number of seconds (*2), multiply by two
because the socket timeout is set to 2 seconds
:param _type: can be either tcp or udp
:returns: bool. True if a connection was made, False otherwise
"""
if _type == 'tcp':
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
elif _type == 'udp':
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
else:
raise AttributeError('Invalid socket type specified: {}'.format(_type))
sock.settimeout(2)
for counter in range(timeout):
try:
sock.connect((ip, int(port)))
sock.close()
return True
except socket.error as err:
pass
time.sleep(1)
return False | 507f046d0516ce862b547dcef74ab901bd38fdf6 | 110,582 |
def get_company_by_email(domains_index, email):
"""Get company based on email domain
Automatically maps email domain into company name. Prefers
subdomains to root domains.
:param domains_index: dict {domain -> company name}
:param email: valid email. may be empty
:return: company name or None if nothing matches
"""
if not email:
return None
name, at, domain = email.partition('@')
if domain:
parts = domain.split('.')
for i in range(len(parts), 1, -1):
m = '.'.join(parts[len(parts) - i:])
if m in domains_index:
return domains_index[m]
return None | a0cc14e9a0dafb76c5f8309c8aec22f447824f45 | 440,145 |
def any_to_any(container1, container2):
"""
Returns whether any value of `container1` is in `container2` as well.
Parameters
----------
container1 : `iterable-container`
Any iterable container.
container2 : `iterable-container`
Any iterable container.
Returns
-------
contains : `bool`
"""
for value in container1:
if value in container2:
return True
return False | 893842b4fa83434c396f8e484fbdf9ea5f20d247 | 154,965 |
def number_of_interactions(records, direction=None):
"""
The number of interactions.
Parameters
----------
direction : str, optional
Filters the records by their direction: ``None`` for all records,
``'in'`` for incoming, and ``'out'`` for outgoing.
"""
if direction is None:
return len(records)
else:
return len([r for r in records if r.direction == direction]) | c4262273a2a2bf02f2bd715198d765d9a93335e4 | 162,553 |
import typing
def _remove_keys(
parameters: typing.Dict[str, typing.Any], exclude_labels: typing.List[str]
) -> dict:
"""
Remove keys from a dictionary without changing the original.
Attempts to remove keys that don't exist in the dictinary are silently
ignored.
Args:
parameters: Dictionary to be adjusted.
Returns:
Modified dictionary.
"""
this_copy = parameters.copy()
for k in exclude_labels:
try:
del this_copy[k]
except KeyError:
# silently ignore missing keys by suppressing this exception
pass
return this_copy | 17fdefa40cf558bb3b293f647edf6445dc9b58fd | 87,800 |
import math
def quaternion_from_matrix(M):
"""Returns the 4 quaternion coefficients from a rotation matrix.
Parameters
----------
M : list[list[float]]
The coefficients of the rotation matrix, row per row.
Returns
-------
[float, float, float, float]
The quaternion coefficients.
Examples
--------
>>> q1 = [0.945, -0.021, -0.125, 0.303]
>>> R = matrix_from_quaternion(q1)
>>> q2 = quaternion_from_matrix(R)
>>> allclose(q1, q2, tol=1e-03)
True
"""
sqrt = math.sqrt
qw, qx, qy, qz = 0, 0, 0, 0
trace = M[0][0] + M[1][1] + M[2][2]
if trace > 0.0:
s = 0.5 / sqrt(trace + 1.0)
qw = 0.25 / s
qx = (M[2][1] - M[1][2]) * s
qy = (M[0][2] - M[2][0]) * s
qz = (M[1][0] - M[0][1]) * s
elif (M[0][0] > M[1][1]) and (M[0][0] > M[2][2]):
s = 2.0 * sqrt(1.0 + M[0][0] - M[1][1] - M[2][2])
qw = (M[2][1] - M[1][2]) / s
qx = 0.25 * s
qy = (M[0][1] + M[1][0]) / s
qz = (M[0][2] + M[2][0]) / s
elif M[1][1] > M[2][2]:
s = 2.0 * sqrt(1.0 + M[1][1] - M[0][0] - M[2][2])
qw = (M[0][2] - M[2][0]) / s
qx = (M[0][1] + M[1][0]) / s
qy = 0.25 * s
qz = (M[1][2] + M[2][1]) / s
else:
s = 2.0 * sqrt(1.0 + M[2][2] - M[0][0] - M[1][1])
qw = (M[1][0] - M[0][1]) / s
qx = (M[0][2] + M[2][0]) / s
qy = (M[1][2] + M[2][1]) / s
qz = 0.25 * s
return [qw, qx, qy, qz] | 6318208e9186ddbe26cdd1dc6175898918f39969 | 628,124 |
def indent(amount: int, s: str) -> str:
"""Indents `s` with `amount` spaces."""
prefix = amount * " "
return "\n".join(prefix + line for line in s.splitlines()) | 201dd126cfb46748fd5b0b0cefb3fc01f5fdb5f4 | 423,455 |
def combine_convergence_data(data_A, data_B):
""" Combine dictionaries with potentially overlapping keys. """
data = {}
for key in data_A:
data[key] = data_A[key]
if key in data_B:
data[key].update(data_B[key])
for key in data_B:
if key not in data:
data[key] = data_B[key]
return data | 5117c498ff1b27356de096b5d15768e2005e3e47 | 609,568 |
def module_to_xapi_vm_power_state(power_state):
"""Maps module VM power states to XAPI VM power states."""
vm_power_state_map = {
"poweredon": "running",
"poweredoff": "halted",
"restarted": "running",
"suspended": "suspended",
"shutdownguest": "halted",
"rebootguest": "running",
}
return vm_power_state_map.get(power_state) | 6c02106f08078e7366056b67619f47698a322d22 | 507,368 |
def to_returns(prices):
"""
Calculates the simple arithmetic returns of a price series.
Formula is: (t1 / t0) - 1
Args:
* prices: Expects a price series
"""
return prices / prices.shift(1) - 1 | 4bbe3a23bfe5e95e8b861b9dbf1a1b48a2c82865 | 232,395 |
def unflatten(flat_config):
"""Transforms a flat configuration dictionary into a nested dictionary.
Example:
{
"a": 1,
"b.c": 2,
"b.d.e": 3,
"b.d.f": 4,
}
would be transformed to:
{
"a": 1,
"b": {
"c": 2,
"d": {
"e": 3,
"f": 4,
}
}
}
Args:
flat_config: A dictionary with strings as keys where nested configuration
parameters are represented with period-separated names.
Returns:
A dictionary nested according to the keys of the input dictionary.
"""
config = {}
for path, value in flat_config.items():
path = path.split(".")
final_key = path.pop()
nested_config = config
for key in path:
nested_config = nested_config.setdefault(key, {})
nested_config[final_key] = value
return config | f7788c41cabfd9e992349709db40364ac6e59fe5 | 221,028 |
def bubblesort(a):
"""Bubble Sort algorithm for sorting a sequence container of values.
"""
l = len(a)
swaps = 1
while swaps > 0:
swaps = 0
for i,j in zip(range(0,l-1),range(1,l)):
if a[i] > a[j]:
t = a[i]
a[i] = a[j]
a[j] = t
swaps += 1
return a | 47fe2888cba6f9654f014320038a38a6b24c0780 | 279,010 |
import re
def extract_page_nr(some_string):
""" extracts the page number from a string like `Seite 21`
:param some_string: e.g. `Seite 21`
:type some_string: str
:return: The page number e.g. `21`
:rtype: str
"""
page_nr = re.findall(r'\d+', some_string)
if len(page_nr) > 0:
return "-".join(page_nr)
else:
return some_string | 6d39314de89c8f4bf4d931f2dc329fe394a10091 | 9,404 |
def evaluate(conf_matrix, label_filter=None):
"""
Evaluate Precision, Recall and F1 based on a confusion matrix as produced by `create_confusion_matrix`.
Args:
conf_matrix: a confusion matrix in form of a dictionary from `(gold_label,guess_label)` pairs to counts.
label_filter: a set of gold labels to consider. If set to `None` all labels are considered.
Returns:
Precision, Recall, F1 triple.
"""
tp = 0
tn = 0
fp = 0
fn = 0
for (gold, guess), count in conf_matrix.items():
if label_filter is None or gold in label_filter or guess in label_filter:
if gold == 'None' and guess != gold:
fp += count
elif gold == 'None' and guess == gold:
tn += count
elif gold != 'None' and guess == gold:
tp += count
elif gold != 'None' and guess == 'None':
fn += count
else: # both gold and guess are not-None, but different
fp += count if label_filter is None or guess in label_filter else 0
fn += count if label_filter is None or gold in label_filter else 0
prec = tp / (tp + fp) if (tp + fp) > 0 else 0.0
recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
f1 = 2 * prec * recall / (prec + recall) if prec * recall > 0 else 0.0
return prec, recall, f1 | 5c3284a1647cfa336ec7170b35f7d702d1538c21 | 661,314 |
import csv
def parseList(string):
"""
Decomposes strings like '"val1,val2",val3,"val4,val5"'
into a list of strings:
[ 'val1,val2' ,'val3', 'val4,val5' ]
"""
for line in csv.reader([string],delimiter=","):
values = line
#for i,value in enumerate(values):
# values[i] = value.replace(" ","")
return values | 66cd73e80b67621576f2571e99ac328672acc440 | 619,060 |
def get_cat_code_dict(df, col):
"""Returns a dict to pase a categorical column to another categorical but using integer codes in order to use it with shap functions.
Parameters
----------
df : pd.DataFrame
col : str
Column to transform
Returns
-------
dict
Original names as key and codes as values.
"""
d = dict(
zip(
df[col],
df[col].cat.codes.astype("category")
)
)
return d | 9b0cb51072133cf1896f1bd24313f9160c05fd37 | 76,222 |
import random
import string
def random_id(k=5):
"""Random id to use for AWS identifiers."""
# https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python
return ''.join(random.choices(string.ascii_lowercase + string.digits, k=k)) | c90907ddd48386fcdf0008bea4426b203247e5dc | 261,801 |
import pickle
def load_obj(name, path):
"""
Parameters
----------
name : string
the named of the pkl file that will be loaded.
path : string
the full path of the intended file.
Returns
-------
type of the object within the file
the object in the specified file.
"""
with open(path + name + '.pkl', 'rb') as f:
return pickle.load(f) | 26e28c6bbc9352535a2ce61bc4b420ca574236ca | 608,231 |
def sqrt(number):
"""
Calculate the floored square root of a number
Args:
number(int): Number to find the floored squared root
Returns:
int: Floored Square Root
"""
# Negative numbers don't have real square roots since a square is either positive or 0.
# source : https://www.mathplanet.com/education/algebra-1/exploring-real-numbers/square-roots
if number == None:#input is None
return None
if number <= 0:# return -1 if number is zero or negative
return -1
if number == 1:# return 1 if number is 1
return 1
# -----------Solution Test Start ----------
# checker = 0
# answer = number / 2 # n/2
# # Iterate until checker is equal to answer
# while(checker != answer):
# # store answer to new checker
# checker = answer
# # update new answer by using inital ((n / n/2) + n/2)/2
# answer = ((number / checker) + checker) //2
# return answer
# -----------Solution Test End -----------
# ----- Solution Binary Search
# to store floor of sqrt(number)
result = 0
# the square root of number cannot be more than n/2 for number > 1
start = 1
end = number // 2
while start <= end:
# find the mid between start and end
mid = (start + end) // 2
sqr = mid*mid
# return mid if sqrt and number is a perfect square
if sqr == number:
return mid
# if mid × mid is less than number
elif sqr < number:
# discard left search
start = mid + 1
# update result since we need a floor
result = mid
# if mid × mid is more than number
else:
# discard the right search
end = mid - 1
return result
# return result
# ----- Solution Binary Search | d54fe92f7c79e77916a13485091060ae11c35c6b | 290,860 |
import random
import string
def _random_filename(filename, k=8):
"""Random Filename
:param filename: The original filename. Can be a path
:param k: Length of additional character
:returns: Original filename with additional random string of size k
"""
filename = filename.split(".", 1)
salt = "".join(random.choices(string.ascii_lowercase, k=k))
filename[0] = f"{filename[0]}_{salt}"
return ".".join(filename) | 7bdf31e39c85f29e8b7827443e8f98482f221e3f | 279,070 |
def slope(first_point, second_point):
"""
Returns the slope between 2 points
:param first_point: first point (x,y)
:param second_point: second point (x,y)
:return: the slope
"""
return 0. if first_point[0] == second_point[0] else\
((float(first_point[1]) - float(second_point[1])) / (float(first_point[0]) - float(second_point[0]))) | 8b8738af96279a112cc0cf01591d8d7bf5de429b | 671,541 |
def aumentar(valor=0, porc=0):
"""
Função que aumenta o valor por uma porcentagem dada.
:param valor: valor que se quer aumentar
:param porc: porcentagem
:return: o valor acrescido da porcentagem
"""
num = valor + (porc/100)*valor
return num | d8ff1bb0091a3e979168979aa35f1674161e1f45 | 475,412 |
import time
def timeit(func, *args, **kwargs):
"""
Time execution of function. Returns (res, seconds).
>>> res, timing = timeit(time.sleep, 1)
"""
start_time = time.time()
res = func(*args, **kwargs)
timing = time.time() - start_time
return res, timing | 5b3d01055ee06ff8a77a90514cb12070ac22cda1 | 97,440 |
import string
def normalize(token_list):
"""Removing punctuation, numbers and also lowercase conversion"""
# Building a translate table for punctuation and number removal
# token_list = [re.findall(r'[0-9]+|[a-z]+', s) for s in token_list]
# token_list = reduce(operator.add, token_list)
punctnum_table = str.maketrans({c: None for c in string.punctuation + string.digits})
rawCorpus_punctnum_rem = [token.translate(punctnum_table) for token in token_list]
token_ed = [token.lower() for token
in rawCorpus_punctnum_rem if token]
return token_ed | e000456b57be0fd44e6c03c0162b39165fc48aa5 | 59,685 |
def ensure_string(x):
"""Returns a string if x is not a string, and x if it already is."""
return str(x) | 06a67f31706810694f263db52fd8fd144f38a966 | 560,599 |
def get_height(root):
"""Extracts the ash plume height - only valid for ash alerts!
Values returned are in this order:
1. Maximum ash height (kilometers)
2. Maximum ash height (feet)
3. Tropopause height (kilometers)
4. Tropopause height (feet)"""
hgt_km = root.alert.max_height.attrib.get('value')
hgt_ft = root.alert.max_height_feet.attrib.get('value')
tropohgt_km = root.alert.tropo_height.attrib.get('value')
tropohgt_ft = root.alert.tropo_height_feet.attrib.get('value')
return hgt_km, hgt_ft, tropohgt_km, tropohgt_ft | 6203283f35a46cb6229a606b46e69cb78409624d | 394,558 |
Subsets and Splits