content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
import requests
def url_is_valid(url):
"""
Very simple check if URL exists/is valid or not.
Parameters
----------
url : str
URL to validate.
Returns
-------
bool
"""
try:
r = requests.get(url=url)
if r.status_code != 200:
return False
return True
except:
return False | f3cccc907ec70e0a2257f0ce50e0fade9b4e1038 | 361,330 |
def docker_is_running(client):
"""Return true if Docker server is responsive.
Parameters
----------
client : docker.client.DockerClient
The Docker client. E.g., `client = docker.from_env()`.
Returns
-------
running : bool
True if Docker server is responsive.
"""
try:
client.ping()
return True
except Exception:
return False | ea13a45c37922509374f88e753dc082bc4f006dc | 257,673 |
import json
def load_bytes(b):
"""Converts bytes of JSON to an object."""
return json.loads(b.decode("ascii")) | 54b129e7ae14f49deffdb128f14a09df78ce96e4 | 152,062 |
def summarize_bitmap(bitmap: list) -> str:
"""Give a compact text summary for sparse bitmaps"""
nonzero_bits = []
for i, b in enumerate(bitmap):
if b != 0:
nonzero_bits.append(f'{i:x}:{b:02x}')
sorted_nonzero_bits = ', '.join(sorted(nonzero_bits))
summary = f'{len(nonzero_bits)}/{len(bitmap)}: {sorted_nonzero_bits}'
return summary | 9208fbf2156fef17526f433556f870fa4aa64340 | 609,411 |
def update_probability(prior, prob_true, prob_false):
"""Update probability using Bayes' rule."""
numerator = prob_true * prior
denominator = numerator + prob_false * (1 - prior)
probability = numerator / denominator
return probability | c91a58eba35927c7b498771131fc1eda19e78680 | 260,713 |
def get_datastore_state(target, device):
"""Apply datastore rules according to device and desired datastore.
- If no target is passed in and device has candidate, choose candidate.
- If candidate is chosen, allow commit.
- If candidate is chosen and writable-running exists, allow lock on running
prior to commit.
- If running, allow lock, no commit.
- If startup, allow lock, no commit.
- If intent, no lock, no commit.
- If operational, no lock, no commit.
- Default: running
Args:
target (str): Target datastore for YANG interaction.
device (rpcverify.RpcVerify): Class containing runtime capabilities.
Returns:
(tuple): Target datastore (str): assigned according to capabilities
Datastore state (dict):
commit - can apply a commit to datastore
lock_ok - can apply a lock to datastore
lock_running - apply lock to running datastore prior to commit
"""
target_state = {}
for store in device.datastore:
if store == 'candidate':
if not target:
target = 'candidate'
target_state['candidate'] = ['commit', 'lock_ok']
if 'running' in target_state:
target_state['candidate'].append('lock_running')
continue
if store == 'running':
if 'candidate' in target_state:
target_state['candidate'].append('lock_running')
target_state['running'] = ['lock_ok']
continue
if store == 'startup':
target_state['startup'] = ['lock_ok']
continue
if store == 'intent':
# read only
target_state['intent'] = []
continue
if store == 'operational':
# read only
target_state['operational'] = []
continue
if not target:
target = 'running'
return target, target_state | e4956d5283f525c9b282dec1784622d5f30a4816 | 93,457 |
def mapping_to_list(mapping):
"""Convert a mapping to a list"""
output = []
for key, value in mapping.items():
output.extend([key, value])
return output | 5d5158f04b765166d8be335644bd300676a7ecb0 | 340,027 |
def check_dependents(full_name, import_list):
""" Check if we are parent of a loaded / recursed-to module file.
Notes:
Accept full_name if full_name.something is a recursed-to module
Args:
full_name: The full module name
import_list: List of recursed-to modules
Returns:
Bool
"""
search_name = full_name + "."
for item in import_list:
if item.startswith(search_name):
return True
return False | 5e7cfac9e19b3ac085163cbb40e402f4df1d99e9 | 492,009 |
def getFQDNFromHTTPURL(httpURL):
"""Returns the FQDN from an HTTP URL for a storage account"""
return httpURL.split("//")[1].split("/")[0] | 5bcacb6b35009264dd18cf1acdd3334c01502ec0 | 279,614 |
def falsey_to_none(var):
"""Certain parameters return invalid 0 when they should be False
This function takes a variable, checks if its 'falsey' and return it
with the value of None if it is.
:param var: The variable to check
:return: The variable that was given, with value of None if it was falsey
"""
if not var:
var = None
return var | c6e165ae63ae30e5baadcd3e0bd1683967cc2d9e | 150,094 |
def interleave_lists(*args):
"""Interleaves N lists of equal length."""
for l in args:
assert len(l) == len(args[0]) # all lists need to have equal length
return [val for tup in zip(*args) for val in tup] | 7062d1ea2707198c879a88d7f56e7cdfd755453b | 149,318 |
import re
def htmlize_paragraphs(text):
"""
Convert paragraphs delimited by blank lines into HTML text enclosed
in <p> tags.
"""
paragraphs = re.split('(\r?\n)\s*(\r?\n)', text)
return '\n'.join('<p>%s</p>' % paragraph for paragraph in paragraphs) | 6bd6ad8d9108618f297f253e5f112a54b21e3362 | 438,301 |
def correct_text(spellcheck, input):
"""
Checks whether the input is correctly spelled and correct it otherwise
Returns the corrected input
"""
output = input
ok = spellcheck.spell(input)
if not ok:
suggestions = spellcheck.suggest(input)
if len(suggestions) > 0:
output = suggestions[0]
# print(f"{input} -> {output}")
return output | e6235b9dd06f1849df0070c28dff93041573f0d5 | 205,374 |
import itertools
def dictzip(*dicts):
""" Iterate over multiple dicts 'zipping' their elements with
matching keys. If some of the dicts are missing the entries,
they will be None."""
keyset = set(itertools.chain(*dicts))
return ((k, *[d.get(k,None) for d in dicts]) for k in keyset) | 80da7dd8ca5a5e0c62a58ad97e3573f2ac06c1f6 | 486,211 |
def polarity(num):
"""
Returns the polarity of the given number.
"""
if num > 0:
return 1
if num < 0:
return -1
return 0 | b04ce599d918610f3e39d6004803ed8d7574e478 | 490,895 |
import heapq
def _pop_unscanned_candidate(pqueue, scanned):
"""
Pop out the first unscanned candidate in the pqueue. Return a
tuple of Nones if no more unscanned candidates found.
"""
if not pqueue:
return None, None, None
while True:
cost_sofar, candidate, prev_candidate = heapq.heappop(pqueue)
if not pqueue or candidate.id not in scanned:
break
if candidate.id in scanned:
assert not pqueue
return None, None, None
return cost_sofar, candidate, prev_candidate | 7bdbe84bfb062b3f05d5f522b960f3ac256dbad6 | 342,629 |
def is_fitted(estimator):
"""
Checks if a scikit-learn estimator/transformer has already been fit.
Parameters
----------
estimator: scikit-learn estimator (e.g. RandomForestClassifier)
or transformer (e.g. MinMaxScaler) object
Returns
-------
Boolean that indicates if ``estimator`` has already been fit (True) or not (False).
"""
attrs = [v for v in vars(estimator) if v.endswith("_") and not v.startswith("__")]
return len(attrs) != 0 | 951c2e01c214ae98acb4606f2172055f152d3082 | 587,448 |
from pathlib import Path
def home_cwd(tmpdir):
""" create two temporary directories simulated cwd and home.
Return dict with contents. """
cwd = Path(tmpdir)
home = cwd / "home"
home.mkdir()
return dict(secondary_path=home, primary_path=cwd) | ff1d05e4c0ca6952ff41f66a01505735d867aa16 | 66,701 |
def name(function):
"""Returns the name of a function"""
return function.__name__ | 476d59de4891d56894e7ca8bb91b929439074188 | 103,828 |
def dup_copy(f):
"""
Create a new copy of a polynomial ``f`` in ``K[x]``.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.densebasic import dup_copy
>>> f = ZZ.map([1, 2, 3, 0])
>>> dup_copy([1, 2, 3, 0])
[1, 2, 3, 0]
"""
return list(f) | 0547d1a26d9f76107c6b1748abf3245032c78870 | 236,057 |
import pickle
def load_dubm(fpath):
"""Load diagonal UBM parameters.
Parameters
----------
fpath : Path
Path to pickled UBM model.
Returns
-------
m
iE
w
"""
with open(fpath, "rb") as f:
params = pickle.load(f)
m = params["<MEANS_INVVARS>"] / params["<INV_VARS>"]
iE = params["<INV_VARS>"]
w = params["<WEIGHTS>"]
return m, iE, w | 34ea59652481d1e9495d0a7ec5479164d88f625c | 537,126 |
def get_connected_component_vertices(g, s):
"""
Returns the list of vertices in the connected component of s.
Use a dfs and return all the visited vertices.
:type g: adjacency list of an unweighted graph
:param s: vertex
:type s: int
:return: list(int)
"""
V = [] # List of visited vertices
Q = []
Q.append(s) # Stack
while not Q == []:
v = Q.pop()
if v not in V:
V.append(v)
for neighbor in g[v]:
Q.append(neighbor)
return V | 1a657f50cabdb147ceadef93d0efa2efa75da73d | 201,585 |
from typing import Any
from typing import cast
def _is_package_entry(entry: Any) -> bool:
"""Returns True if the html entry describes a package."""
href = entry["href"]
return cast(bool, (
href.endswith("gz")
and href == entry.string
and href != "PACKAGES.gz"
)) | 03baf1161a2a2c1290a633b05b98596a37c3160b | 586,735 |
import pickle
def pickle_load(path):
"""
Loads a serialized object with pickle.
"""
with open(path, "rb") as f:
return pickle.load(f) | c25faa83a267822b0487973e957c1cfaf4aadaa1 | 409,249 |
def vstack(grid: list[list[int]], n: int) -> list[list[int]]:
"""Vertically stack a list of lists."""
new_grid = []
for _ in range(n):
new_grid.extend([[el for el in row] for row in grid]) # 'deepcopy'
return new_grid | 20f24518138ce4d98f8a852ca3302c109aafa390 | 145,417 |
def _get_arg_config_file(args):
"""Return String yaml config file path."""
if not args.config_file:
raise ValueError("YAML Config File was not supplied.")
return args.config_file | 34e4570cee420035cbaeab3c852069c4abf6a3ae | 32,478 |
def any_id2key(record_id):
""" Creates a (real_id: int, thing: str) pair which allows ordering mixed
collections of real and virtual events.
The first item of the pair is the event's real id, the second one is
either an empty string (for real events) or the datestring (for virtual
ones)
:param record_id:
:type record_id: int | str
:rtype: (int, str)
"""
if isinstance(record_id, int):
return record_id, u''
(real_id, virtual_id) = record_id.split('-')
return int(real_id), virtual_id | bcee2625599a572f5be7ffd912976dbf4ab2f4aa | 389,450 |
from typing import List
def data_string_to_list(string_data: str) -> List:
"""Transforms a data string of the form key1.key2.key3 to list. Validation checks are applied here.
:param string_data: The data string
:return: The serialized data as a single list
"""
return string_data.split(".") | 871c03e4509955bb5818f6eec25ba7235b968865 | 581,047 |
def get_user_id(soup):
"""
Returns the user id via the scraper.
"""
follow = soup.find("meta", {"name": "octolytics-dimension-user_id"})["content"]
return int(follow) | fc9eef62f0cb66321252ba7ba983f601a72a1c74 | 465,390 |
def cunningham(lbm):
"""
The Cunningham equation for resting metabolic rate (RMR). This formula is similar to Katch-McArdle, but provides a slightly higher estimate.
"""
return 500 + (22 * lbm) | 0f0097865ffeceff1f04c0612dfc8b7ffeb9d0c2 | 453,616 |
def array_chunk(array, size):
"""
Given an array and chunk size, divide the array
into many subarrays, where each subarray is of
length size.
array_chunk([1,2,3,4], 2) --> [[1,2], [3,4]]
array_chunk([1,2,3,4,5], 2) --> [[1,2], [3,4], [5]]
"""
counter = 0
outer_list = []
inner_list = []
for item in array:
if counter > 0 and not counter % size:
outer_list.append(inner_list)
inner_list = [item]
else:
inner_list.append(item)
counter += 1
outer_list.append(inner_list)
return outer_list | 7ec1e40c8306dc256387292421474ee9e037568b | 670,466 |
def get_orientation(read1, read2):
"""
Return relative orientation of read pairs. Assumes read pairs have
been ordered such that read 1 is five prime of read 2.
"""
if read1.is_reverse:
if read2.is_reverse:
orientation = "Same-reverse"
else:
orientation = "Outward"
else:
if read2.is_reverse:
orientation = "Inward"
else:
orientation = "Same-forward"
return orientation | b7acfa3fd8001be107fb58004d5879754630890b | 205,300 |
def rmTrailChar(str,c):
"""
Remove all the trailing characters c from a string
@input str :: starting string
@input c :: trailing characters
@return :: string without the trailing character
"""
while(str[-1]==c and len(str)>0):
str=str[:-1]
return str | 15ca0fbfb7e0315f249685a38a62aad11057caaf | 187,756 |
def sort_updates(update: dict) -> int:
"""Return the seconds to go to pass to the python .sort() function
Args:
update (dict): Pass in the update dictionary that is filled with the required information
Returns:
int: The time delta in seconds that is stored in the update dictionary
"""
return update["seconds_to_go"] | 28c363ac1a0d0df32884bc8b91f6c47427eef881 | 469,361 |
def get_interpret_as(self, elem) :
""" Returns a name of a rule based on the interpret_as element if such is valid
for this element. Otherwise returns None. """
cur = elem
while (cur is not None and cur.tag != "say_as") :
cur = cur.getparent()
if cur is None: return None
return cur.attrib.get("interpret_as") | dc61cf6f01f1f773d31a7df39e90e716484d763f | 404,838 |
def _extract_email(gh):
"""Get user email from github."""
return next(
(x.email for x in gh.emails() if x.verified and x.primary), None) | e3803c80e081cadca3e9e07e65a8a8ab57d1b79a | 239,992 |
import re
def binary_as_bytes(binary):
"""Reformat binary string as a sequence of bytes."""
if not binary:
return binary
bits = re.sub(r'\s', '', binary)
bites = re.findall('[01]{8}', bits)
if bits != ''.join(bites):
return binary
return ' '.join(bites) | b9e893153a0f57ab87d3b7d95e1f470e67361667 | 338,433 |
def latlon_decimaldegrees(nmealat, latchar, nmealon, lonchar):
"""
Converts the nmea lat & lon into decimal degrees
Note:
West (of prime meridian) longitude as negative
East (of prime meridian) longitude as positive
North (of the equator) latitude is positive
South (of the equator) latitude is negative
Args:
nmealat(str): latitude
latchar(str): N or S
nmealon(str): longitude
lonchar(str): E or W
Returns:
latdeg(float): the latitude in decimal degrees
londeg(float): the longitude in decimal degrees
"""
nmealon = float(nmealon)
nmealat = float(nmealat)
londegwhole = int(nmealon/100)
londecdeg = (nmealon - londegwhole * 100)/60
londeg = londegwhole + londecdeg
if lonchar == 'W':
londeg = (-1)*londeg
latdegwhole = int(nmealat/100)
latdecdeg = (nmealat - latdegwhole * 100)/60
latdeg = latdegwhole + latdecdeg
if latchar == 'S':
latdeg = (-1)*latdeg
return latdeg, londeg | 423bb3cb50e9eb4485adf564b66161167dc47496 | 83,053 |
def reserrorcalc(test_set, model):
"""
Calculates RSS error for the given test set
"""
# Extracting X
X = test_set[:,:-1]
# Extracting labels
Y = test_set[:,-1]
residual_err = sum((model.predict(X) - Y) ** 2)
return residual_err | f52bf2d98b31e2740e46de6bd0aea5cc8709fd01 | 244,619 |
def ma(df, ma_ranges=[10, 21, 50]):
"""
Simple Moving Average
Parameters
----------
df : pandas.DataFrame, must include columns ['Close']
Dataframe where the ma is extracted from
ma_ranges: list, default [10, 21, 50]
List of periods of Simple Moving Average to be extracted
Return
------
df : pandas.DataFrame
DataFrame with Simple Moving Average
"""
df = df.copy()
for period in ma_ranges:
df[f"MA{period}"] = df['Close'].rolling(window=period).mean()
return df | ddd88c75f17e0dcac6a9ce5991ee263ba92c6e83 | 653,105 |
def combineLists(lsts):
""" combine a list of lists into a single list """
new = []
for lst in lsts:
for i in lst:
new.append(i)
return new | bf26a553cbc8d97d71cc0bef0bc23a247c583c17 | 151,872 |
def first_symbol(pattern):
"""
return first symbol of pattern
"""
x = pattern[0]
return x | 3c56235ec4b95d2239b55f80c980ef1ce6b4ed14 | 261,788 |
def create_ipv6_nat_rule(chain, bridge, proto, host_port, container_ip, container_port):
"""return a iptables v6 nat rule for forwarding a host port to a container IP:port"""
return '-A {chain} ! -i {bridge} -p {proto} -m {proto}' \
' --dport {host_port} -j DNAT' \
' --to-destination [{container_ip}]:{container_port}'.format(chain=chain,
bridge=bridge,
proto=proto,
host_port=host_port,
container_ip=container_ip,
container_port=container_port) | cb432494ad293d38ced8b7c212d09fde1d6f2621 | 183,689 |
def dict_valkey(diction, val):
"""
Given passed dictionary reference, returns the first key found whose value is equal to val,
or None if no matching value.
:param diction: the dictionary whose key is to be looked for
:type diction: reference to dictionary object
:param val: reference to dictionary
:returns: Reference to the dictionary's matching key object
"""
for dkey, dval in diction.iteritems():
if dval == val:
return dkey
return None | 382f9250dc1d8f07401432b2c6c45da0b3508581 | 440,864 |
def split_numeric(s):
""" Split a string into numeric and non-numeric parts """
num, alpha = [], []
for c in s:
if c.isdigit():
num.append(c)
else:
alpha.append(c)
return ''.join(num), ''.join(alpha) | 67c855a6dfaeec2135e5e25cb9b767e9c6ba58af | 556,835 |
def get_specific_label_dfs(raw_df, label_loc):
"""
Purpose: Split the instances of data in raw_df based on specific labels/classes
and load them to a dictionary structured -> label : Pandas Dataframe
Params: 1. raw_df (Pandas Dataframe):
- The df containing data
2. label_loc (String):
- The location where the output labels are stored in 1. raw_df
Returns: A dictionary structured -> label : Pandas Dataframe
"""
labels = list(raw_df[label_loc].unique())
# a list of dataframes storing only instances of data belonging to one specific class/label
label_dataframes = {}
for label in labels:
label_dataframes[label] = raw_df.loc[raw_df[label_loc] == label]
return label_dataframes | 756f03f845da64f6fd5534fb786966edb8610a13 | 708,298 |
def initial_fixation_duration(interest_area, fixation_sequence):
"""
Given an interest area and fixation sequence, return the duration of the
initial fixation on that interest area.
"""
for fixation in fixation_sequence.iter_without_discards():
if fixation in interest_area:
return fixation.duration
return 0 | 3fef15047500b8be8175a28d838feb10cd571f80 | 663,886 |
def get_missed_total(missed_and_total):
"""Parses a string in the format of ``m of n`` where m is the missed, n is the total.
:param missed_and_total: input string to parse
:return: a tuple of two elements, the first element is the missed count, the second element is the total count
"""
parts = missed_and_total.split(" of ")
if len(parts) == 2:
return parts[0], parts[1]
else:
return () | e9eed38b53d76e6e569e1e0d472137db42a07a16 | 429,663 |
def sort_tuple_list(l, tup_idx=0):
"""Return the list of tuples sorted by the index passed as argument."""
return sorted(l, key=lambda tup: tup[tup_idx]) | 76b41cfb1e46516e196a4e8e29d41aaa0f6a217a | 146,872 |
import base64
def encode_image(filename):
"""
编码图片
:param filename: str 本地图片文件名
:return: str 编码后的字符串
eg:
src="data:image/gif;base64,R0lGODlhMwAxAIAAAAAAAP///
yH5BAAAAAAALAAAAAAzADEAAAK8jI+pBr0PowytzotTtbm/DTqQ6C3hGX
ElcraA9jIr66ozVpM3nseUvYP1UEHF0FUUHkNJxhLZfEJNvol06tzwrgd
LbXsFZYmSMPnHLB+zNJFbq15+SOf50+6rG7lKOjwV1ibGdhHYRVYVJ9Wn
k2HWtLdIWMSH9lfyODZoZTb4xdnpxQSEF9oyOWIqp6gaI9pI1Qo7BijbF
ZkoaAtEeiiLeKn72xM7vMZofJy8zJys2UxsCT3kO229LH1tXAAAOw=="
"""
# 1、文件读取
ext = filename.split(".")[-1]
with open(filename, "rb") as f:
img = f.read()
# 2、base64编码
data = base64.b64encode(img).decode()
# 3、图片编码字符串拼接
src = "data:image/{ext};base64,{data}".format(ext=ext, data=data)
return src | d30c5ad11fe894157b5a26f850eb062f547de3a2 | 321,752 |
import random
import string
def _generate_content(cols, lines):
"""Generates a random file content string."""
content = ""
for _ in range(1, lines):
for _ in range(1, cols):
content += random.choice(string.ascii_letters)
content += '\n'
return content | a52a90c9f13b573fa3fd42f15f297f13a7df99a4 | 398,436 |
def yn_prompt(question: str, yes=None, no=None) -> bool:
"""Ask yes-no question.
Args:
question: Description of the prompt
yes: List of strings interpreted as yes
no: List of strings interpreted as no
Returns:
True if yes, False if no.
"""
if not yes:
yes = ["yes", "ye", "y"]
if not no:
no = ["no", "n"]
prompt = question
if not prompt.endswith(" "):
prompt += " "
prompt += "[{} or {}] ".format("/".join(yes), "/".join(no))
print(prompt, end="")
while True:
choice = input().lower().strip()
if choice in yes:
return True
elif choice in no:
return False
else:
print(
"Please respond with '{}' or '{}': ".format(
"/".join(yes), "/".join(no)
),
end="",
) | 3338493b42b118d9aacadff70dab3738643b538a | 27,341 |
import requests
from bs4 import BeautifulSoup
def get_hashtags_from_URL(URL):
"""Returns a list of hashtags from a given Tumblr-URL
Parameters:
URL (string): URL of meme-tagged page
Returns:
List of hashtags
"""
results = []
page = requests.get(URL)
imageList = BeautifulSoup(page.content, 'html.parser')
for link in imageList.findAll('a', {'class': 'post_tag'}):
hashtag = link.text
results.append(hashtag)
return results | 6cf20e03e641c0a9970c726794adcb55df622684 | 424,500 |
import math
def select_ghostdag_k(x, delta):
"""
Selects the k parameter of the GHOSTDAG protocol such that anticones lager than k will be created
with probability less than 'delta' (follows eq. 1 from section 4.2 of the PHANTOM paper)
:param x: Expected to be 2Dλ where D is the maximal network delay and λ is the block mining rate
:param delta: An upper bound for the probability of anticones larger than k
:return: The minimal k such that the above conditions hold
"""
k_hat, sigma, fraction, exp = 0, 0, 1, math.e ** (-x)
while True:
sigma += exp * fraction
if 1 - sigma < delta:
return k_hat
k_hat += 1
fraction = fraction * (x / k_hat) | 9731b509e35db024e17d63fbc6ef46235207c3ee | 687,072 |
import math
def f_triangle (side_b, angle_a, angle_b):
"""Сторона треугольника по двум углам и другой стороне.
Теорема синусов:
# http://www-formula.ru/lengthpartiestriangle
a = (b * sin(α))/sin(β)
Где:
b - известная сторона
α - угол противолежащий от стороны a и прилежащий к стороне b.
β - угол противолежащий от стороны b и прилежащий к стороне a.
"""
# math.sin ждёт угла в радианах, поэтому преобразуем градусы с помощью math.radians
side_a = abs(side_b * math.sin(math.radians(angle_a))) / math.sin(math.radians(angle_b))
return side_a | c05b9b45e237a3e59a534080c167df24bd834335 | 165,995 |
import time
def format_time(datetime):
"""Format a datetime object standardly."""
format_string = '%m/%d/%y %I:%M %p'
if hasattr(datetime,'strftime'):
return datetime.strftime(format_string)
else:
return time.strftime(format_string,time.gmtime(datetime)) | c5fbad45c3882793050583857b70660352b04101 | 393,994 |
import base64
def parse_authorization_header(auth_header):
""" Parse auth header and return (login, password) """
auth_str = auth_header.split(' ')[1] # Remove 'Basic ' part
auth_str = base64.b64decode(auth_str).decode() # Decode from base64
auth_str = auth_str.split(':')
return auth_str[0], auth_str[1] | cc2db762ddf7b4ce0669a81f24bce9517785350a | 49,516 |
def calc_lf_improvement(
param_lf_improved_cy,
loadfactor_yd_cy
):
"""Calculate load factor improvement
Arguments
---------
lf_improvement_ey : dict
Load factor improvement until end year provided as decimal (1 == 100%)
loadfactor_yd_cy : float
Yd Load factor of current year
Returns
-------
lf_improved_cy : str
Improved load factor of current year
peak_shift_crit : bool
True: Peak is shifted, False: Peak isn't shifed
"""
# Add load factor improvement to current year load factor
lf_improved_cy = loadfactor_yd_cy + param_lf_improved_cy
# Where load factor larger than zero, set to 1
lf_improved_cy[lf_improved_cy > 1] = 1
return lf_improved_cy | 084b12ca8ca9a80c0a8c018e3346001ea67e2962 | 490,906 |
import logging
def download_exps_file(dfu, exps_fp, exps_ref):
"""We download an experiments file
Args:
dfu: DataFileUtil class object
exps_fp: (str) Path to download exps file to
exps_ref: (str) Reference to file
"""
GetObjectsParams = {
'object_refs': [exps_ref]
}
# We get the handle id
expsFileObjectData = dfu.get_objects(GetObjectsParams)['data'][0]['data']
logging.info("DFU Experiment File Get objects results:")
logging.info(expsFileObjectData)
expsfile_handle = expsFileObjectData['expsfile']
# Set params for shock to file
ShockToFileParams = {
"handle_id": expsfile_handle,
"file_path": exps_fp,
"unpack": "uncompress"
}
ShockToFileOutput = dfu.shock_to_file(ShockToFileParams)
logging.info(ShockToFileOutput)
exps_fp = ShockToFileOutput['file_path']
# expsfile is at location "expsfile_path"
return exps_fp | 7ed0417605cfbf4a3cd55f8e3c2e3e5f584e231a | 330,576 |
def __parseAuthHeader(string):
"""
Returns the token given an input of 'Bearer <token>'
"""
components = string.split(' ')
if len(components) != 2:
raise ValueError('Invalid authorization header.')
return components[1] | df88b6831da39bbbcfd731a2d7253a12b8d36ed5 | 551,443 |
def int_or_none(n):
"""Returns input n cast as int, or None if input is none.
This is used in parsing sample information from image filenames.
Parameters
----------
n : any value castable as int, None
The input value.
Returns
-------
The input value cast an int, or None.
Examples
--------
>>> int_or_none(3.0)
3
>>> int_or_none(None)
"""
return int(n) if n is not None else None | f4bb9372d871718efe2f828d37abab0d4569e6f9 | 364,384 |
def final_output(image, backround, palette):
"""
Makes the final output Image based on image, backround, palette
"""
# Create an offset and paste original Image within.
offset = ((backround.width - image.width) // 2, (backround.height - image.height) // 8)
backround.paste(image, offset)
# Create an offset and paste palette below original Image.
offset = ((backround.width - palette.width) // 2, ((backround.height - palette.height) // 8) + image.height)
backround.paste(palette, offset)
return backround | 8489d89887f0e8f22489af6fd33b5074a1c72774 | 175,071 |
import types
def is_method_of_class(mthd, cls):
"""
Returns True if function 'mthd' is a method of class 'cls'
:type mthd: function
:type cls: class
:return: True if 'mthd' is a method of class 'cls', and False otherwise.
:rtype: bool
"""
try:
class_method = getattr(cls, mthd.__name__)
except AttributeError:
return False
if isinstance(mthd, types.MethodType):
return class_method == mthd.__func__
return class_method == mthd | 91a1e78b70d6607390f153f3f268ad9613118dd6 | 582,932 |
def connection_mock_factory(mocker):
"""Factory of DB connection mocks."""
def factory(vendor, fetch_one_result=None):
connection_mock = mocker.MagicMock(vendor=vendor)
cursor_mock = connection_mock.cursor.return_value
cursor_mock = cursor_mock.__enter__.return_value # noqa: WPS609
cursor_mock.fetchone.return_value = fetch_one_result
return connection_mock
return factory | ce559c55024e3a2db181ca50509a264888036b16 | 654,172 |
def dict_get(inp, *subfields):
"""Find the value of the provided sequence of keys in the dictionary,
if available.
Retrieve the value of the dictionary in a sequence of keys if it is
available. Otherwise it provides as default value the last item of the
sequence ``subfields``.
Args:
inp (dict): the top-level dictionary. Unchanged on exit.
subfields (str,object): keys, ordered by level, that have to be
retrieved from topmost level of ``inp``. The last item correspond
to the value to be set.
Returns:
The value provided by the sequence of subfields if available,
otherwise the default value given as the last item of the ``subfields``
sequence.
"""
if len(subfields) <= 1:
raise ValueError('invalid subfields, the sequence should be longer than one item as the last one is the value to be given')
tmp = inp
keys = subfields[:-1]
val = subfields[-1]
for key in keys:
tmp = tmp.get(key)
if tmp is None:
return val
return tmp | 30ee48214dae147e4b38aca870d05aed0285dba5 | 607,620 |
def _b_min_ ( bp ) :
"""Get min values for bernstein polynomials
>>> p = ...
>>> mn = p.min()
The value is such that: mn <= p(x) for all x_min<=x<x_max
"""
b = bp.bernstein()
pars = b .pars()
return min ( pars ) | fdcd68ddb557ff3ee30afe392141e3134b6342d0 | 587,408 |
def quoted(s):
""" Given a string, return a quoted string as per RFC 3501, section 9."""
if isinstance(s, str):
return '"' + s.replace('\\', '\\\\').replace('"', '\\"') + '"'
else:
return b'"' + s.replace(b'\\', b'\\\\').replace(b'"', b'\\"') + b'"' | 198230e3810adc7c93cc4962b6ffea3b88a8c8ba | 117,397 |
import requests
def get_record(
airtable_key: str,
base_id: str,
table_name: str = "submissions",
record_id: str = "",
):
"""
Get record from Airtable with a given record ID `record_id`
"""
if record_id != "":
request_url = f"https://api.airtable.com/v0/{base_id}/{table_name}/{record_id}"
headers = {
"Authorization": f"Bearer {airtable_key}",
}
output = requests.get(request_url, headers=headers)
return output
else:
return None | 7b0312b7fe6759deb9d9492c15cace6d8d031387 | 83,750 |
def result(df):
"""Returns in-memory dataframe or series, getting result if necessary from dask
"""
if hasattr(df, 'compute'):
return df.compute()
elif hasattr(df, 'result'):
return df.result()
else:
return df | 8183586f937f6e704cb8471af784ad68e54b1dbb | 348,948 |
def parse_fasta_file(path: str) -> str:
"""Parses contents of an input text file in FASTA format.
Args:
path: Path to input text file.
Returns:
The sequence.
"""
file = open(path, "r")
contents = file.readlines()
text = "".join(contents[1:]).replace("\n", "")
file.close()
return text | 890e45d2cd732cd708f6f0dead9072492be0d384 | 595,185 |
import time
import calendar
def amazon2unixtime(amazon_timestr):
"""Given a string in Amazon-time format (i.e. ISOFORMAT),
return the Unix time in seconds (i.e. as returned by time(2))
"""
try:
tm_utc = time.strptime(amazon_timestr, "%Y-%m-%dT%H:%M:%S.%fZ")
except ValueError:
tm_utc = None
if tm_utc is None:
tm_utc = time.strptime(amazon_timestr, "%Y-%m-%dT%H:%M:%SZ")
time_sec = calendar.timegm(tm_utc)
return time_sec | 070142a7350ced30ed201772c20336e4373c2410 | 294,290 |
def lower_first_letter(name):
"""Return name with first letter lowercased."""
if not name:
return ''
return name[0].lower() + name[1:] | 52c7ece3e4ddd15f9641d794547b909c6397732b | 164,185 |
def first_translatable(store):
"""returns first translatable unit, skipping header if present"""
if store.units[0].isheader() and len(store.units) > 1:
return store.units[1]
else:
return store.units[0] | 483380882efe037e89098275cabf870f9302f61e | 626,074 |
def _rindex(seq, element):
"""Like list.index, but gives the index of the *last* occurrence."""
seq = list(reversed(seq))
reversed_index = seq.index(element)
return len(seq) - 1 - reversed_index | f9834a0860c5c2fa107a1af39be91a2c918cbf51 | 684,717 |
def angle_calculator(angle: int) -> float:
"""Calculate the duty cycle of the servo motor from an angle.
Parameters
----------
angle : int
The angle of the camera between 0° to 180°
If the value is out of range, a modulo of 180° will be apply
Returns
-------
`float`
The Duty cycle for the servo motor
"""
return 1.0 / 18.0 * (angle % 180) + 2 | 7a5da5b157d552e2fb33690772fbb7bf4680e0db | 254,874 |
def find_named(ui, name):
"""
Go through the hierarchy of UI elements and find the elements that have the same name
"""
results = set()
if hasattr(ui, 'name'):
if ui.name == name:
results.add(ui)
if hasattr(ui, 'children'):
for c in ui.children:
resuls_children = find_named(c, name)
results.update(resuls_children)
if hasattr(ui, 'tabs'):
for c in ui.tabs:
resuls_children = find_named(c, name)
results.update(resuls_children)
if hasattr(ui, 'child'):
resuls_children = find_named(ui.child, name)
results.update(resuls_children)
return results | 921a81f0b7053a5c54d08542f9eabd960db33370 | 156,671 |
def swap(alist: list, l:int, r:int, pivot)->tuple:
""" Prohodi prvky v `alist` tak, aby na zacatku byly mensi nez pivot a na konci vetsi nez pivot
Vraci, kdyz se indexy prohodi, tedy `i` je alespoň tak vpravo jako `j`.
"""
i = l
j = r
while i < j:
# Posouvame levou hranici doprava
while alist[i] < pivot:
i+=1
# Posouvame pravou hranici doleva
while alist[j] > pivot:
j-=1
# Pokud se indexy prekrizily, koncime
if i > j:
return i,j
# Jinak mame 2 kandidaty na prohozeni
# Prohodime je
tmp = alist[i]
alist[i] = alist[j]
alist[j] = tmp
# A posuneme indexy, protoze po prohozeni jsou hotovy
i+=1
j-=1
return i,j | 62525fc5a84bd8f3370930d1d0ad8dd608ec1bb7 | 335,617 |
def last(xs):
"""
Get the last element in a sequence
Example:
assert last([1,2,3]) == 3
"""
return xs[-1] | 19d5c59186d44d6ac2299e756f3e218de66e9abb | 488,924 |
from typing import Optional
from datetime import datetime
def datetime_equals(dt1: Optional[datetime], dt2: Optional[datetime]) -> bool:
"""Compare equality of two datetimes, ignoring microseconds."""
if not dt1 and not dt2:
return True
if not (dt1 and dt2):
return False
return dt1.replace(microsecond=0) == dt2.replace(microsecond=0) | ea84b9187c1c87e35a29656331ffd28a9d118103 | 512,929 |
def is_namedtuple(*objs) -> bool:
"""
Takes one or more objects as positional arguments, and returns ``True`` if ALL passed objects
are namedtuple instances
**Example usage**
First, create or obtain one or more NamedTuple objects::
>>> from collections import namedtuple
>>> Point, Person = namedtuple('Point', 'x y'), namedtuple('Person', 'first_name last_name')
>>> pt1, pt2 = Point(1.0, 5.0), Point(2.5, 1.5)
>>> john = Person('John', 'Doe')
We'll also create a ``tuple``, ``dict``, and ``str`` to show they're detected as invalid::
>>> normal_tuple, tst_dict, tst_str = (1, 2, 3,), dict(hello='world'), "hello world"
First we'll call :func:`.is_namedtuple` with our Person NamedTuple object ``john``::
>>> is_namedtuple(john)
True
As expected, the function shows ``john`` is in-fact a named tuple.
Now let's try it with our two Point named tuple's ``pt1`` and ``pt2``, plus our Person named tuple ``john``.
>>> is_namedtuple(pt1, john, pt2)
True
Since all three arguments were named tuples (even though pt1/pt2 and john are different types), the function
returns ``True``.
Now we'll test with a few objects that clearly aren't named tuple's::
>>> is_namedtuple(tst_str) # Strings aren't named tuples.
False
>>> is_namedtuple(normal_tuple) # A plain bracket tuple is not a named tuple.
False
>>> is_namedtuple(john, tst_dict) # ``john`` is a named tuple, but a dict isn't, thus False is returned.
False
Original source: https://stackoverflow.com/a/2166841
:param Any objs: The objects (as positional args) to check whether they are a NamedTuple
:return bool is_namedtuple: ``True`` if all passed ``objs`` are named tuples.
"""
if len(objs) == 0: raise AttributeError("is_namedtuple expects at least one argument")
for x in objs:
t = type(x)
b = t.__bases__
if tuple not in b: return False
f = getattr(t, '_fields', None)
if not isinstance(f, tuple): return False
if not all(type(n) == str for n in f): return False
return True | 3c29da4538833a42398b47afdba34c92289496c3 | 445,283 |
def sanitize_package_field(field):
""" Sanitize package field
Args:
field (str): package field.
Returns:
str: sanitized package field.
"""
return field.replace(" ", "") | 25fdb9e720bd6675801091933daaed0d54eee361 | 445,777 |
import pytz
def AdaptReadableDatetime(date_obj):
"""Adapts a datetime.datetime object to its ISO-8601 date/time notation."""
try:
date_obj = date_obj.astimezone(pytz.utc)
except ValueError:
pass # naive datetime object
return date_obj.isoformat() | feceafb58995001acdeb2285fa5782bec7cc756d | 10,569 |
import gzip
def _get_fh(path, mode='r'):
"""Return a file handle for given path and attempt to uncompress/compress
files ending in '.gz'"""
if path.endswith('.gz'):
fh = gzip.open(path, mode=mode)
else:
fh = open(path, mode=mode)
return fh | 10373709dfde716ddd0474f1c6fde6383a000fd9 | 163,583 |
def field_type(field):
"""
Retrieves the type of a given field.
:param DjangoField field: A reference to the given field.
:rtype: str
:returns: The type of the field.
"""
return field.get_internal_type() | b10b657f11a857224784f88e17f021333288a567 | 632,556 |
def toRGB(hex_color_str):
"""
transform hex color string to integer tuple.
e.g. r,g,b = toRGB('0xFFFFFF')
"""
return int(hex_color_str[2:4],16)/255., int(hex_color_str[4:6],16)/255., int(hex_color_str[6:8],16)/255. | 5821d5f0d42d1a53982eb81739fe81e47d75fa23 | 37,855 |
def compute_figure_score(target, result):
""" Compute the score corresponding to the found result, knowing that
target was supposed to be found
"""
if target == result:
return 10
elif abs(target - result) == 1:
return 8
elif abs(target - result) == 2:
return 7
elif abs(target - result) == 3:
return 6
elif abs(target - result) == 4:
return 5
elif 5 <= abs(target - result) <= 6:
return 4
elif 7 <= abs(target - result) <= 8:
return 3
elif 9 <= abs(target - result) <= 10:
return 2
elif abs(target - result) <= 100:
return 1
else:
return 0 | 767c2ff57f304d759aeef3fda6443092bf5d8b1d | 378,674 |
def selection_sort(nums: list[float]) -> list[float]:
"""Sorts a list in-place using the Selection Sort approach.
Time complexity: O(n^2) for best, worst, and average.
Space complexity: total O(n) auxiliary O(1).
Args:
nums: A list of numbers.
Returns:
The sorted list.
"""
for pivot in range(0, len(nums) - 1):
smallest = pivot
# Find smallest value, then swap it with the pivot
for target in range(pivot + 1, len(nums)):
if nums[target] < nums[smallest]:
smallest = target
nums[pivot], nums[smallest] = nums[smallest], nums[pivot]
return nums | c96c12e7361e6b617528b9cc632b4003963ea8ab | 34,361 |
def snappi_api_serv_port(duthosts, rand_one_dut_hostname):
"""
This fixture returns the TCP Port of the Snappi API server.
Args:
duthost (pytest fixture): The duthost fixture.
Returns:
snappi API server port.
"""
duthost = duthosts[rand_one_dut_hostname]
return (duthost.host.options['variable_manager'].
_hostvars[duthost.hostname]['secret_group_vars']
['snappi_api_server']['rest_port']) | 42cd8a6016d470c57d4319c2a9508ad0df943b48 | 380,046 |
def jordan_cell_sizes(J):
"""Return a tuple of Jordan cell sizes from a matrix J in Jordan
normal form.
Examples:
>>> jordan_cell_sizes(matrix([[1,1,0,0],[0,1,0,0],[0,0,1,1],[0,0,0,1]]))
(2, 2)
>>> jordan_cell_sizes(matrix([[1,1,0,0],[0,1,1,0],[0,0,1,0],[0,0,0,1]]))
(3, 1)
>>> jordan_cell_sizes(zero_matrix(5,5))
(1, 1, 1, 1, 1)
"""
assert J.is_square()
sizes = []
n = 1
for i in range(J.nrows() - 1):
if J[i, i+1].is_zero():
sizes.append(n)
n = 1
else:
n += 1
sizes.append(n)
assert sum(sizes) == J.nrows()
return tuple(sizes) | a9442af8299f294126ec562a22adf19820df6c8f | 239,164 |
def ALL_ELEMENTS_TRUE(*expressions):
"""
Evaluates an array as a set and returns true if no element in the array is false. Otherwise, returns false.
An empty array returns true.
https://docs.mongodb.com/manual/reference/operator/aggregation/allElementsTrue/
for more details
:param expressions: The arrays (expressions)
:return: Aggregation operator
"""
return {'$allElementsTrue': list(expressions)} | 33c343c42bc8bcdf9dfdddf643c481fd85f4a784 | 43,670 |
def calc_sales_price(price):
"""
计算9折后的价格
:param price: 折前价格
:return: 折后价格
"""
if price < 0:
raise ValueError("price should not < 0!")
sales = 0.9 * price
return sales | 4b3b7244115e22ba4d02914167db565db1b3b71a | 619,834 |
import re
def is_valid_ecr_repo(repo_name: str) -> bool:
"""Returns true if repo_name is a ECR repository, it expectes
a domain part (*.amazonaws.com) and a repository part (/images/container-x/...)."""
rex = re.compile(
r"^[0-9]{10,}\.dkr\.ecr\.[a-z]{2}\-[a-z]+\-[0-9]+\.amazonaws\.com/.+"
)
return rex.match(repo_name) is not None | e4e2567afcb18db0aee48eac8c4ba37b8b25a617 | 427,049 |
def calc_corr_matrix(wallet_df):
"""Calculates the Pearson correlation coefficient between cryptocurrency pairs
Args:
wallet_df (DataFrame): Transformed DF containing historical price data for cryptocurrencies
"""
corrmat_df = wallet_df.corr()
return corrmat_df | 7d76f496783f129749888d7913a93919a5570273 | 28,722 |
def filter_dict_nulls(mapping: dict) -> dict:
"""Return a new dict instance whose values are not None."""
return {k: v for k, v in mapping.items() if v is not None} | 671e3749703e7d7db21939ff2906cdfd940acef4 | 666,727 |
import functools
import time
import random
def pause(_func=None, *, delay=1, rand=1):
"""Decorator that pauses for a random-ish interval before executing function
Used to delay the execution of functions that use Selenium by random-ish
intervals to 1. seem more human-like and 2. avoid being blocked a website
for too many requests within a set time frame.
Parameters
----------
_func : function, default None
Function to be wrapped by decorator.
delay : float, default 1
Fixed duration in seconds to pause before function call.
rand : float, default 1
Random time interval to add to fixed delay, generated using
random.uniform(0, rand).
"""
def decorator_pause(func):
@functools.wraps(func)
def wrapper_pause(*args, **kwargs):
time.sleep(delay + random.uniform(0, rand))
return func(*args, **kwargs)
return wrapper_pause
if _func is None:
return decorator_pause
else:
return decorator_pause(_func) | 406f4196a030d6fa7961dfdf9701c81be51ce9f7 | 204,710 |
def preBuildPage(page, context, data):
"""
Called prior to building a page.
:param page: The page about to be built
:param context: The context for this page (you can modify this, but you must return it)
:param data: The raw body for this page (you can modify this).
:returns: Modified (or not) context and data.
"""
return context, data | f44cd838bba8cbb77d2c009bb674e8126e2ab2cf | 245,682 |
import copy
def _prune_keys(in_dict, *keys):
"""remove key(s) and their values if they exist in the dict."""
dict_ = copy.deepcopy(in_dict)
for key in keys:
if dict_.get(key):
dict_.pop(key)
return dict_ | cd759f7aba8c3305c1d66dafbb448223e6a23835 | 81,286 |
def longest_common_substring(s1,s2):
"""Returns longest common substring of two sequences."""
# Make sure that s1 is the shorter one
if len(s1) > len(s2):
# If s1 was shorter, then switch sequence order
s1, s2 = s2, s1
# Start with the entire sequence and shorten
substr_len = len(s1) # Define substring of length of s1
# While substring is longer than zero, iterate.
while substr_len > 0:
# Try all substrings
for i in range(len(s1) - substr_len+1):
# Check if the whole string is in the sequence 2, if not ,then shorten
# and start from beginning
if s1[i:i+substr_len] in s2:
return s1[i:i+substr_len]
substr_len -= 1
# If we haven't returned, there is no common substring
return '' | 2d325a21848df67076825fdd6d396aaaa1339b0b | 315,896 |
def format_position(position):
"""formats an ir_pb2.Position to "line:column" form."""
return "{}:{}".format(position.line, position.column) | fd12a0c1e5219c1eba201450dd0189844f40dfc0 | 602,098 |
import sqlite3
def open_database(path):
"""Open SQLite database specified by path, configure connection and
create tables if they don't exist.
"""
db = sqlite3.connect(path, timeout = 600)
# massively improves commit performance, introduces risk of corrupt
# Database on machine/OS crash or power out. Application crashes still
# don't corrupt database.
db.execute('PRAGMA synchronous = OFF;')
#db.set_trace_callback(print)
db.execute("""CREATE TABLE IF NOT EXISTS kernels
-- Contains a unique id for each kernel launch of an application run.
-- 'tag' is the unique name of an application run.
-- One use case is to encode application parameters with it, e.g.
-- 'matmul-n-512' could be used as the tag for a matrix multiply with
-- a problem size of 512.
(id INTEGER PRIMARY KEY, tag TEXT, kernel TEXT, launch INTEGER,
UNIQUE(id, tag, kernel, launch));""")
db.execute("""CREATE TABLE IF NOT EXISTS ctas
-- Contains non-overlapping semi-closed intervals of addresses accessed
-- by a CTA during a kernel launch (identified by 'kernel').
-- E.g. the record 10, 2,3,4, 0, 500,700 indicates that during kernel 10,
-- the cta with index (2, 3, 4) read addresses 500 (inclusive) to
-- 700 (exclusive), or {500 .. 699}.
-- Kind: 0 = loads, 1 = stores, 2 = atomics
(kernel INTEGER, x INTEGER, y INTEGER, z INTEGER, kind INTEGER,
start INTEGER, stop INTEGER,
FOREIGN KEY(kernel) REFERENCES kernels(id) ON DELETE CASCADE);""")
return db | 6d99dc6933b7c9c6226b734a996901f3cb811e1d | 533,767 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.