content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def nearest(last, unvisited, D):
"""Return the index of the node which is closest to 'last'."""
near = unvisited[0]
min_dist = D[last, near]
for i in unvisited[1:]:
if D[last,i] < min_dist:
near = i
min_dist = D[last, near]
return near
|
8b9ad31fbcba52ee9b9bfbf7c2b0caa78959e6dc
| 695,865 |
import hashlib
def md5_file_hash(file_path):
"""
A method generating MD5 hash of the provided file.
:param file_path: file's path with an extension, which will be opened for reading and generating md5 hash
:return: hex representation of md5 hash
"""
hash_md5 = hashlib.md5()
with open(file_path, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
|
be8241fd0c254bbfc4d0e1e4330cf36228e0a040
| 695,868 |
def line_coeff_from_endpoints(p1, p2):
"""Given two points on a line, find the coefficients of the line
equation ax + by + c = 0"""
a = p1.y - p2.y
b = p2.x - p1.x
c = -a * p1.x - b * p1.y
return a, b, c
|
fdbed65cbd3dbabd920817367005c8f05c7dadaf
| 695,869 |
import math
def butterfly(theta: float) -> float:
"""Butterfly function"""
return math.e ** math.sin(theta) - 2 * math.cos(4 * theta)
|
5d48df3df396fd666770f61e841b3dee2e967ab9
| 695,870 |
from typing import List
from typing import Optional
from typing import Set
def create_exp_name(flags_and_values: List[str],
flag_skip_set: Optional[Set[str]] = None,
skip_paths: bool = False) -> str:
"""
Creates an experiment name based on the command line arguments (besides dataset and paths).
Example: "--dataset multinews --sinkhorn" --> "dataset=multinews_sinkhorn"
:param flags_and_values: The command line flags and their values.
:param flag_skip_set: A set of flags to skip.
:param skip_paths: Whether to skip paths (i.e. any flag with a value with a "/" in it).
:return: An experiment name created based on the command line arguments.
"""
# Remove "-" from flags
flag_skip_set = flag_skip_set or set()
flag_skip_set = {flag.lstrip('-') for flag in flag_skip_set}
# Extract flags and values, skipping where necessary
args = {}
current_flag = None
for flag_or_value in flags_and_values:
if flag_or_value.startswith('-'):
flag = flag_or_value.lstrip('-')
current_flag = flag if flag not in flag_skip_set else None
if current_flag is not None:
args[current_flag] = []
elif current_flag is not None:
args[current_flag].append(flag_or_value)
# Handle paths
if skip_paths:
for key, values in list(args.items()):
if any('/' in value for value in values):
del args[key]
# Handle boolean flags
for key, values in args.items():
if len(values) == 0:
values.append('True')
exp_name = '_'.join(f'{key}={"_".join(values)}' for key, values in args.items())
if exp_name == '':
exp_name = 'default'
return exp_name
|
e8c6c114a62146a5a3200e88315b1921c28b258f
| 695,871 |
def bisect_left(func, val, low, high):
"""
Like bisect.bisect_left, but works on functions.
Return the index where to insert item x in list a, assuming a is sorted.
The return value i is such that all e in a[:i] have e < x, and all e in a[i:] have e >= x. So if x already appears
in the list, a.insert(x) will insert just before the leftmost x already there. Optional args lo (default 0) and hi
(default len(a)) bound the slice of a to be searched.
>>> bisect_left([1,2,3,3,4].__getitem__, 3, 0, 4)
2
>>> bisect_left([1,2,3,3,4].__getitem__, 4, 0, 4)
4
>>> bisect_left([1,2,3,6,8].__getitem__, 4, 0, 4)
3
"""
a = low
b = high
while b > a:
guess = (a+b)//2
if val > func(guess):
a = guess+1
else:
b = guess
return a
|
de7b72585657c183176b4cd1c6b5301e0f837a01
| 695,873 |
def is_shared(resource):
"""Checks if a resource is shared
"""
return resource['object'].get('shared', False)
|
dd54a631cff0e79b00942ca4b1e43b1fb2a70c04
| 695,874 |
def h1(curr_state, goal_dict):
"""
Heuristic for calculating the distance of goal state using Manhattan distance
Parameters:
curr_state(np.ndarray): A 3x3 numpy array with each cell containing unique elements
goal_dict(dict[int, tuple[int, int]]): A mapping of cell contents to a tuple
contianing its indices
Returns:
h(int): Heuristic value
"""
h = 0
for i in range(curr_state.shape[0]):
for j in range(curr_state.shape[1]):
value = curr_state[i][j]
x = goal_dict[value][0]
y = goal_dict[value][1]
h += abs(i-x) + abs(j-y)
return h
|
e7d353dcfe5dacee5319dc7b8c4fbae43294acd3
| 695,879 |
import importlib
def plugin_import(plugin):
"""Import a plugin from string.
:param plugin: Python import in dot notation.
:type plugin: String
:returns: Object
"""
return importlib.import_module(plugin, package="directord")
|
147c1c053eda10935c1f597cbde1a2d71451f843
| 695,880 |
def Edges_Exist_Via(G, p, q):
"""Helper for del_gnfa_states
---
If G has a direct edge p--edgelab-->q, return edgelab.
Else return "NOEDGE". We maintain the invariant of
at-most one edge such as edgelab for any p,q in the GNFA.
"""
edges = [ edge
for ((x, edge), States) in G["Delta"].items()
if x==p and q in States ]
if len(edges) == 0:
return "NOEDGE"
else:
return edges
|
53af339eb5317321a8f125a289215bff89a95b5d
| 695,887 |
import csv
import requests
def get_words(min_length=5,max_length=5,capitalization='lower',use_file=''):
"""Gets a list of english words from instructables of a desired length.
Args:
min_length (int, optional): Keep words of this length or longer. Defaults to 5.
min_length (int, optional): Keep words of this length or shorter. Defaults to 5.
capitalizaton (string, optional): Capitalization rules of the word list to return (lower, upper, title). Defaults to lower.
use_local (boolean, optional): Alternatively, use a local copy for faster reference
Returns:
List: returns a lower case list of words meeting length requirements.
"""
WordList = []
if len(use_file) > 0:
with open(f'Docs/{max_length}Words.csv', newline='') as f:
for row in csv.reader(f):
WordList.append(row[0])
else:
InitialList = requests.get("https://content.instructables.com/ORIG/FLU/YE8L/H82UHPR8/FLUYE8LH82UHPR8.txt").text
InitialList = str.splitlines(InitialList)
for word in InitialList:
if len(word) >= min_length and len(word) <= max_length:
if capitalization.lower() == 'upper':
WordList.append(word.upper())
elif capitalization.lower() == 'title':
WordList.append(word.title())
else:
WordList.append(word.lower())
return WordList
|
ceeacd7772ced20c86d3a66cb966cb25ea286d85
| 695,888 |
import hashlib
import binascii
def multipart_etag(digests):
"""
Computes etag for multipart uploads
:type digests: list of hex-encoded md5 sums (string)
:param digests: The list of digests for each individual chunk.
:rtype: string
:returns: The etag computed from the individual chunks.
"""
etag = hashlib.md5()
count = 0
for dig in digests:
count += 1
etag.update(binascii.a2b_hex(dig))
return f"'{etag.hexdigest()}-{count}'"
|
1d6d13d3f28cdbae6a56fe903329bd8f91b53000
| 695,889 |
def Tsorties_echangeur(Te1,Te2,mf1,mf2,Cp1,Cp2,eff):
"""
Calcul les températures au niveau des sorties d'un échangeur thermique'
Parameters
----------
Te1 : Température d'entrée du fluide chaud
Te2 : Température d'entrée du fluide froid
mf1 : Débit massique du fluide chaud
mf2 : Débit massique du fluide froid
Cp1 : Capacité calorifique massique du fluide chaud
Cp2 : Capacité calorifique massique du fluide froid
eff : efficacité de l'échangeur
Returns
-------
Ts1 : Température de sortie du fluide chaud
Ts2 : Température de sortie du fluide froid
"""
if (mf1*Cp1)<=(mf2*Cp2):
Ts1=Te1-eff*(Te1-Te2)
Ts2=Te2+(mf1*Cp1/(mf2*Cp2))*(Te1-Ts1)
else:
Ts2=Te2+eff*(Te1-Te2)
Ts1=Te1+(mf2*Cp2/(mf1*Cp1))*(Te2-Ts2)
return Ts1,Ts2
|
ebae4e1f99bc0eea1941e85dbd5087f825bb5105
| 695,890 |
import json
def load_schema(filename):
"""Load schema from a JSON file.
Parameters
----------
filename : str
The path to your file.
Returns
-------
schema : dict
A dictionary containing the schema for your table.
"""
with open(filename) as f:
schema = json.load(f)
return schema
|
55b475a4cc7bfb184c0f2db3e41d6e3408b888e6
| 695,891 |
import inspect
def get_linenumbers(functions, module, searchstr='def {}(image):\n'):
"""Returns a dictionary which maps function names to line numbers.
Args:
functions: a list of function names
module: the module to look the functions up
searchstr: the string to search for
Returns:
A dictionary with functions as keys and their line numbers as values.
"""
lines = inspect.getsourcelines(module)[0]
line_numbers = {}
for function in functions:
try:
line_numbers[function] = lines.index(
searchstr.format(function)) + 1
except ValueError:
print(r'Can not find `{}`'.format(searchstr.format(function)))
line_numbers[function] = 0
return line_numbers
|
b2cc1bc104cdae6bbbfb3680ac940540396db08a
| 695,893 |
def get_lrs(optimizer):
"""Return the learning-rates in optimizer's parameter groups."""
return [pg['lr'] for pg in optimizer.param_groups]
|
6e32c90e42321d070cc1f444a6f117b72ad59adb
| 695,895 |
def highlight_min(s):
"""
highlight the minimum in a Pandas dataframe series yellow
"""
is_min = s == s.min()
return ["background-color: yellow" if v else "" for v in is_min]
|
ee74a19721fc7312744847b0c4d6de9255f312b0
| 695,896 |
import torch
def Rotz(t):
"""
Rotation about the z-axis.
np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]])
-- input t shape B x 1
-- return B x 3 x 3
"""
B = t.shape[0]
Rz = torch.zeros((B, 9, 1), dtype=torch.float)
c = torch.cos(t)
s = torch.sin(t)
ones = torch.ones(B)
Rz[:, 0, 0] = c
Rz[:, 1, 0] = -s
Rz[:, 3, 0] = s
Rz[:, 4, 0] = c
Rz[:, 8, 0] = ones
Rz = Rz.reshape(B, 3, 3)
return Rz
|
6a078e033f6ad6b497da05999433ef745c76d2ed
| 695,897 |
import tokenize
import codecs
def read_pyfile(filename):
"""Read and return the contents of a Python source file (as a
string), taking into account the file encoding."""
with open(filename, "rb") as pyfile:
encoding = tokenize.detect_encoding(pyfile.readline)[0]
with codecs.open(filename, "r", encoding=encoding) as pyfile:
source = pyfile.read()
return source
|
a6fce0f2acdb5156872ef572ab7e77ba6507873d
| 695,901 |
def parse_version_string(version):
"""
Returns a tuple containing the major, minor, revision integers
"""
nums = version.split(".")
return int(nums[0]), int(nums[1]), int(nums[2])
|
a8d50804ebe82541e57c83a813843d255b3b6fbb
| 695,902 |
def get_building_coords(town):
"""
Generates a dictionary of all (x,y) co-ordinates that are within buildings
in the town, where the keys are the buildings' numbers (or "pub" for the
pub) and the values are lists of co-ordinates associated with the building.
Data must have 25 houses (numbered as multiples of 10 from 10 to 250) and
1 pub.
Parameters
----------
town : list
List (cols) of lists (rows) representing raster data of the town.
Returns
-------
building_coords : dict
Keys are the buildings' numbers (or "pub" for the pub) and the values
are lists of all co-ordinates that are within the building.
"""
#Create empty dictionary to collect building co-ordinates
building_coords = {}
# Create list of co-ordinates for each building in the town
# Dictionary key is either "pub" or building number and value is list of
# coords
for n in [1, *range(10, 260, 10)]:
if n == 1:
building_name = "pub"
else:
building_name = n
building_coords[building_name] = []
for y in range(len(town)):
for x in range(len(town[y])):
if town[y][x] == n:
building_coords[building_name].append((x, y))
return building_coords
"""
# Make pub clearer for plotting
for i in range(len(town)):
for j in range(len(town[i])):
if town[i][j] == 1:
town[i][j] = -50
"""
|
085c95d40d9d84569180155f5b0b150334dbc526
| 695,908 |
def promptConfirm(message: str) -> bool:
""" Prompts confirming a message. Defaults to "no" (False).
:param message: Message to prompt.
:return: Whether the prompt was confirmed.
"""
result = input(message + " (y/N): ").strip().lower()
return result == "y" or result == "yes"
|
4d0ba40150231939571915676740a1e6b5857f0d
| 695,912 |
def _check_state(monomer, site, state):
""" Check a monomer site allows the specified state """
if state not in monomer.site_states[site]:
args = state, monomer.name, site, monomer.site_states[site]
template = "Invalid state choice '{}' in Monomer {}, site {}. Valid " \
"state choices: {}"
raise ValueError(template.format(*args))
return True
|
8f61c91ddae5af378503d98377401446f69c37db
| 695,914 |
import unicodedata
def unicode_to_ascii(s):
"""
Takes in a unicode string, outputs ASCII equivalent
:param s: String of unicode
:return: String of ASCII
"""
return ''.join(c for c in unicodedata.normalize('NFD', s) if unicodedata.category(c) != 'Mn')
|
03781e1286aef5a67ea389dad4fcf59cc9317f23
| 695,916 |
def image_layers(state):
"""Get all image layer names in the state
Parameters
----------
state : dict
Neuroglancer state as a JSON dict
Returns
-------
names : list
List of layer names
"""
return [l["name"] for l in state["layers"] if l["type"] == "image"]
|
6c843855e01957386871f90d54eee69800f08c37
| 695,923 |
import requests
def coordinate_finder(result: requests.Response) -> tuple:
"""
One filter function for the send_get_request function. This filter function is used for
getting a tuple (lat, long) from the wikipedia geoData api with type props=coordinate.
More information found: https://www.mediawiki.org/wiki/Extension:GeoData
"""
json_data = result.json()
page_values = json_data['query']['pages'].values()
coordinates = list(page_values)[0]['coordinates'][0]
return (coordinates['lat'], coordinates['lon'])
|
3069e7c3782d292fc708f79a7d178a4b4f40ebb7
| 695,928 |
def filter_merge_clusters(clusters, max_block_size_multi=5, min_block_pop=50, buffer_amount=150):
"""
The vectors created by create_clusters() are a single square for each raster pixel.
This function does the follows:
- Remove overly large clusters, caused by defects in the input raster.
- Remove clusters with population below a certain threshold.
- Buffer the remaining clusters and merge those that overlap.
Parameters
----------
clusters: geopandas.GeoDataFrame
The unprocessed clusters created by create_clusters()
max_block_size_multi: int, optional
Remove clusters that are more than this many times average size. Default 5.
min_block_pop: int, optional
Remove clusters with below this population. Default 50.
buffer_amount: int, optional
Distance in metres by which to buffer the clusters before merging. Default 150.
Returns
-------
clusters: geopandas.GeoDataFrame
The processed clusters.
"""
# remove blocks that are too big (basically artifacts)
clusters['area_m2'] = clusters.geometry.area
clusters = clusters[clusters['area_m2'] < clusters['area_m2'].mean() * max_block_size_multi]
# remove blocks with too few people
clusters = clusters[clusters['raster_val'] > min_block_pop]
# buffer outwards so that nearby blocks will overlap
clusters['geometry'] = clusters.geometry.buffer(buffer_amount)
# and dissolve the thousands of blocks into a single layer (with no attributes!)
clusters['same'] = 1
clusters = clusters.dissolve(by='same')
# To get our attributes back, we convert the dissolves polygon into singleparts
# This means each contiguous bubble becomes its own polygon and can store its own attributes
crs = clusters.crs
clusters = clusters.explode()
clusters = clusters.reset_index()
# no longer needed in GeoPandas >= 0.4.0
# clusters['geometry'] = clusters[0]
# clusters = gpd.GeoDataFrame(clusters)
# clusters.crs = crs
clusters = clusters.drop(columns=['same', 'level_1', 'raster_val']) # raster_val is no longer meaningful
# And then add the polygon's area back to its attributes
clusters["area_m2"] = clusters['geometry'].area
return clusters
|
8b6091baeb55e0c72c468aa6eb4300c4db40ecbd
| 695,931 |
import pickle
def load(path):
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file
"""
f = open(path, 'rb')
try:
return pickle.load(f)
finally:
f.close()
|
6ed0b0ae944fa8bdaacfd4cbd8cdb3864f1adb47
| 695,934 |
def get_y_indicator_variable_index(i, j, m, n):
"""
Map the i,j indices to the sequential indicator variable index
for the y_{ij} variable.
This is basically the (2-dimensional) 'array equation' (as per
row-major arrays in C for example).
Note that for MiniSat+, the variables are juist indexed sequentially
and we are mapping the y_{ij} to y_r for 0 <= r < m*n variables.
This function gets the sequential index for a y_{ij} variable.
Parameters:
i, j - indices for y indicator variable
m - order of tableau a (0 <= i,k < m)
n - order of tableau b (0 <= j,l < n)
Return value:
index r of indicator variable y_{r} corresponding to y_{ij}
"""
return i*n + j
|
8c6dc999ebe3120084ae741403f15acdd900e783
| 695,936 |
def truncate_string(s,length):
""" Truncate string to given length.
"""
return s[0:min(length,len(s))]
|
9b44cf31c7905109497e485d0ffa707dada9d67b
| 695,937 |
import torch
def inverse_sigmoid(x, eps=1e-5):
"""Inverse function of sigmoid.
Args:
x (Tensor): The tensor to do the
inverse.
eps (float): EPS avoid numerical
overflow. Defaults 1e-5.
Returns:
Tensor: The x has passed the inverse
function of sigmoid, has same
shape with input.
"""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
|
01d02c9f04d4a9318f0ec0d4bb8cf7301181c8f5
| 695,939 |
def get_best_sales_rep(df):
"""Return a tuple of the name of the sales rep and
the total of his/her sales"""
best_rep_df = df.groupby(['Rep'])['Total'].sum()
return (best_rep_df.idxmax(), best_rep_df.loc[best_rep_df.idxmax()])
pass
|
e4313cf517ecb513918944fdbb55caea3f20fb0c
| 695,942 |
def _solveX(L, U, b):
"""Use forward and backwards substitution to calculate the x vector to solve the linear system of equations.
Parameters
----------
L: numpy.arrays
Lower triangular matrix
U: numpy.arrays
Upper triangular matrix
b: numpy.array
Column vector of constant terms
Returns
-------
x: numpy.array
Solution vector
"""
m, n = L.shape
# Forward Substitution
y = list()
y.insert(0, b[0]/L[0][0])
for i in range(1, m):
summ = 0
for k in range(0, i):
summ += L[i][k]*y[k]
y.insert(i, (b[i]-summ)/(L[i][i]))
# Backwards Substitution
x = [0]*m
x[m-1] = y[m-1] / U[m-1][m-1]
for i in range(m - 2, -1, -1):
summ = 0
for k in range(i+1, n):
summ += U[i][k]*x[k]
x[i] = (y[i] - summ)/U[i][i]
return x
|
997b472ea45796268a1d87c5ade3de4ab66115a0
| 695,944 |
def nearest_square(num):
"""
Find the nearest square number to num
"""
root = 0
while (root + 1) ** 2 <= num:
root += 1
return root ** 2
|
53b899958a053c8bfe6383e240d3b8ddb7d291c1
| 695,945 |
import socket
def create_rawsock(iface):
"""Creates a new raw socket object.
The socket sends/receives data at the link layer (TCP/IP model)/data-link
layer (OSI model).
Args:
iface: A string specifying the name of the network interface to which
the raw socket should be bound. For example "eth0".
Returns:
A socket object.
"""
sock = socket.socket(socket.AF_PACKET,
socket.SOCK_RAW,
socket.htons(socket.SOCK_RAW))
sock.bind((iface, socket.SOCK_RAW))
return sock
|
ea56408403ada6b9750265547677028c197ae933
| 695,946 |
def vector_mul(k, a):
"""Multiplication of a vector by a scalar.
>>> vector_mul((1, 2), 2)
(2, 4)
"""
return tuple(map(lambda x: k * x, a))
|
cdc289430ab87ac70e8387d4dd807fb4dfd1e1da
| 695,949 |
import math
def replace_invalid_values(row):
"""Replace float values that are not available in BigQuery.
Args:
row: List of values to insert into BigQuery.
Returns:
List, `row` with invalid values replaced with `None`.
"""
invalid_values = [math.inf, -math.inf, math.nan]
return [x if x not in invalid_values else None for x in row]
|
c07c16780a52870f9d0954b3f8bba5a91baf6b58
| 695,954 |
def interp_from_u(idx, w, y):
"""
compute Wy
W.shape: (n, u)
y.shape: (u,)
"""
return (y[idx] * w).sum(axis=1)
|
b0312063699a16a75307774dbf54cb758082d678
| 695,959 |
def A004767(n: int) -> int:
"""Integers of a(n) = 4*n + 3."""
return 4 * n + 3
|
5f97cccc4f540b46029e57c11d1ab718a59e227c
| 695,961 |
import random
def get_random_string(length=10):
"""
Generates a random string of fixed length
"""
string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
return ''.join(random.choice(string) for i in range(length))
|
043f7ef3b495c199825242ffd55fabf9e7c1cda7
| 695,971 |
def assemble_api_url(domain, operators, protocol='https'):
"""Assemble the requests api url."""
return '{}://{}{}'.format(protocol, domain, operators)
|
105d541f2e9196583b2fd5eac1af75cf4c62669f
| 695,979 |
from datetime import datetime
import uuid
def generate_ami_name(prefix):
"""
Generate AMI image name.
"""
# current date and time
now = datetime.now()
s2 = now.strftime("%Y%m%d%H%M%S")
x = uuid.uuid4().hex
postfix = str(x)[:10]
ami_name = prefix + "-" + s2 + "-" + postfix
return ami_name
|
50cf4e6f8ab55b252503319429594242cea9a77e
| 695,980 |
def _cap_str_to_mln_float(cap: str):
"""If cap = 'n/a' return 0, else:
- strip off leading '$',
- if 'M' in cap value, strip it off and return value as float,
- if 'B', strip it off and multiple by 1,000 and return
value as float"""
if cap == "n/a":
return 0
capital = cap.lstrip("$")
if capital[-1] == "M":
return float(capital.replace("M", ""))
if capital[-1] == "B":
return float(capital.replace("B", "")) * 1000
|
a4c984013ba7c1e06b3569f61d65fe69d98ae2da
| 695,981 |
def bed_get_region_id_scores(in_bed, no_float=False):
"""
Read in .bed file, and store scores for each region in dictionary
(unique column 4 ID and column 5 score have to be present).
Return dictionary with mappings region ID -> region score
>>> test_bed = "test_data/test5.bed"
>>> bed_get_region_id_scores(test_bed)
{'CLIP2': 2.57, 'CLIP1': 1.58, 'CLIP3': 3.11}
"""
id2sc_dic = {}
# Open input .bed file.
with open(in_bed) as f:
for line in f:
cols = line.strip().split("\t")
site_id = cols[3]
site_sc = float(cols[4])
if no_float:
site_sc = cols[4]
id2sc_dic[site_id] = site_sc
f.closed
assert id2sc_dic, "nothing read in for in_bed \"%s\"" %(in_bed)
return id2sc_dic
|
cd4305388251ab9ff9d301ff4bf0409783d1bcfd
| 695,984 |
import json
def put_json(client, url, data, headers={}):
"""Send PUT request with JSON data to specified URL.
:url: URL string
:data: Data dict
:headers: Optional headers argument (usually will be authorization)
:returns: Flask response object
"""
return client.put(url, data=json.dumps(data),
content_type='application/json', headers=headers)
|
e5fff7c1fdc9cf72e8b314854415c9426e5b261b
| 695,985 |
def sipi(b3, b4, b8):
"""
Structure Intensive Pigment Index \
(Peñuelas, Baret and Filella, 1995).
.. math:: SIPI = b3/b8 - b4
:param b3: Green.
:type b3: numpy.ndarray or float
:param b4: Red.
:type b4: numpy.ndarray or float
:param b8: NIR.
:type b8: numpy.ndarray or float
:returns SIPI: Index value
.. Tip::
Peñuelas, J., Baret, F., Filella, I. 1995. Semi-empirical \
indices to assess carotenoids/chlorophyll-a ratio from leaf \
spectral reflectance. Photosynthetica 31, 221-230.
"""
SIPI = b3/b8 - b4
return SIPI
|
b0a7181970e9165d0e75ab7319646fd6bd1c6bbd
| 695,986 |
import random
def random_nucleotides(sample_size: int, seq_length: int, seed: int = 1789):
""" Return a random list of DNA nucleotides sequences.
Args:
sample_size: generate N random sequences.
seq_length: set sequence length.
seed: random seed for reproducibility.
Returns:
list of generated sequences
"""
random.seed(seed) # set random seed
alphabet = list("TAGC") # define DNA nucleotides
# generate sequences
seq_list = [
"".join(random.choices(alphabet, k=seq_length)) for i in range(0, sample_size)
]
return seq_list
|
ce283d43495fa53be4276cf0ab3c6793e053a723
| 695,987 |
def populate_game_starting_lineups(gl, games, player_id):
""" Populates all the starting lineups in a game by updating the "teams" dictionary in each Game
object, with key = team_id, value = [list of player_ids on that team]
Every "teams" dictionary has two keys because two teams play in a game
Each key in the "teams" dictionary has a list of 5 players (starting lineup)
:param gl: Dataframe of GameLineupNBA.csv
:param games: Dictionary mapping game_id's to Game objects
:param player_id: the unique id string of a player
:return: updated games dictionary which maps game_id's to Game objects
"""
# Filters the dataframe to find all players with the specified player_id and the starting
# lineup denoted by period 1
df = gl[(gl["Person_id"] == player_id) & (gl["Period"] == 0)]
# Loop through each row of the df
for index, row in df.iterrows():
game_id = row["Game_id"]
team_id = row["Team_id"]
if team_id in games[game_id].teams:
# If the team_id already exists in the "teams" dictionary
# then just append the current player_id to the list of players
# on the team
games[game_id].teams[team_id]["players"].append(player_id)
else:
# If the team_id does not exist yet in the "teams" dictionary
# then just create a new team_id key and set its value to be a new list with
# the first player id on the team
games[game_id].teams[team_id] = { "players": [player_id] }
# Returns the updated dictionary of games
return games
|
9de9be892765581a4c74d060979d086a0bf6031c
| 695,989 |
def clean_venue_name(venue_name: str) -> str:
"""Clean the venue name, by removing or replacing symbols that are not allowed in a file name.
Args:
venue_name: Original venue name.
Returns:
Cleaned venue name.
"""
return venue_name.replace("*", "").replace("/", "_").replace(" ", "_")
|
90b6f8b3787af17750c548bb816383bf8a5b07a4
| 695,990 |
def get_border(char, length):
"""Get a border consisting of a character repeated multiple times.
:param char: The character to make up the border.
:param length: The length of the border.
:return: A string consisting of the character repeated for the given length.
"""
border = ''
for i in range(length):
border += char
return border
|
9cd73504dc450e1e31c75b398240a27184a130e4
| 695,996 |
from typing import Iterable
def hash_from_dict(dictionary):
"""
Creates a hashable string from a dictionary
that maps values to their assignments.
Ex:
dictionary={"A": 1, "B": 5, "C": 1}
=> "A=1,B=5,C=1"
"""
hashstring = ""
for i, key in enumerate(sorted(list(dictionary.keys()))):
hashstring += str(key)
if not isinstance(dictionary[key], Iterable) and dictionary[key] is not None:
hashstring += "=" + str(dictionary[key])
if i < len(dictionary.keys()) - 1:
hashstring += ","
return hashstring
|
1e906f178a6353e9bdac7bed929be1e0f16ae060
| 696,002 |
def almost_equal_floats(value_1: float, value_2: float, *, delta: float = 1e-8) -> bool:
"""
Return True if two floats are almost equal
"""
return abs(value_1 - value_2) <= delta
|
ff1c29c57434a169824fe76e451053f3edc6e519
| 696,006 |
def stationObjectsByRiver(stations, rivers):
"""Returns a list of Monitoring Station objects which are on the rivers input"""
stationObjectsByRiverOutput = []
for river in rivers:
for station in stations:
if station.river==river:
stationObjectsByRiverOutput.append(station)
return stationObjectsByRiverOutput
|
882c42acf4ef1d9af2aec8a1c4090f8bca4322e7
| 696,007 |
from typing import Tuple
def _yymmdd2ymd(yymmdd: int) -> Tuple[int, int, int]:
"""yymmdd -> (year, month, day)
Examples:
>>> _yymmdd2ymd(321123)
(32, 11, 23)
>>> _yymmdd2ymd(320323)
(32, 3, 23)
"""
year, mmdd = divmod(yymmdd, 10000)
month, day = divmod(mmdd, 100)
return year, month, day
|
9e9d3fa20b4684b603a203c5cc8c8284a8f45dd7
| 696,008 |
def sqliteRowToDict(sqliteRow):
"""
Unpacks a single sqlite row as returned by fetchone
into a simple dict.
:param sqliteRow: single row returned from fetchone DB call
:return: dictionary corresponding to this row
"""
return dict(zip(sqliteRow.keys(), sqliteRow))
|
979bb63142a797749937ee382d9b9eb1c26dd7bd
| 696,013 |
def _get_average_score(concept, _keywords):
"""Get average score of words in `concept`.
Parameters
----------
concept : str
Input text.
_keywords : dict
Keywords as keys and its scores as values.
Returns
-------
float
Average score.
"""
word_list = concept.split()
word_counter = len(word_list)
total = float(sum(_keywords[word] for word in word_list))
return total / word_counter
|
ce930ae5871dfc218ae5f057f7dc76d64671a7f6
| 696,015 |
from datetime import datetime
def get_start_next_month(date):
"""
Parameters
----------
date: datetime.datetime
Returns
-------
datetime.datetime
date of the start of the next month
"""
if date.month+1 <= 12:
return datetime(date.year,date.month+1,1)
return datetime(date.year+1,1,1)
|
737ace5854593007ff62d169c3b69f3118056be1
| 696,019 |
def computePoint(triangle):
"""
Computes the last point D in an ABC square triangle
where a and c are the diagonal - triangle = [a,b,c]
D--- C
| / |
| / |
|/ |
A----B
"""
# get coordinates of each point a, b, c
a, b, c = triangle
xa, ya = a
xb, yb = b
xc, yc = c
# due to subtriangle congruence
xd = xc - (xb - xa)
yd = (ya -yb) + yc
d = (xd, yd)
return d
|
e7b37852440eaf43844d5d6e1cd2c2921bc9e6b3
| 696,020 |
def get_metric_scores(ground_truth, simulation, measurement, metric, measurement_kwargs={}, metric_kwargs={}):
"""
Function to combine measurement and metric computations
:param ground_truth: pandas dataframe of ground truth
:param simulation: pandas dataframe of simulation
:param measurement: measurement function
:param metric: metric function
:return: metric computation for measurements calculated from gold and simulation
"""
print("Calculating {} for {}".format(metric.__name__, measurement.__name__))
measurement_on_gt = measurement(ground_truth, **measurement_kwargs)
measurement_on_sim = measurement(simulation, **measurement_kwargs)
return measurement_on_gt, measurement_on_sim, metric(measurement_on_gt, measurement_on_sim, **metric_kwargs)
|
4087e60ce0578d11756f449e776e01ee81b6e4ac
| 696,025 |
import json
def util_json_get_value ( s_json, key ):
"""Returns value for supplied key in JSON document"""
try:
t = json.loads(s_json, strict=False)
except ValueError:
return ''
try:
value = t[key]
except KeyError:
return ''
return value
|
773ae165ac58f4ac20772d5c344aca241c74786c
| 696,034 |
import hashlib
import json
def treehash(var):
"""
Returns the hash of any dict or list, by using a string conversion
via the json library.
"""
return hashlib.sha256(json.dumps(var, sort_keys=True).encode("utf-8")).hexdigest()
|
e196a8d601b59a893bf05bc903aa7e3af4927cef
| 696,038 |
def is_float(dtype):
"""Return True if datatype dtype is a float kind"""
return ('float' in dtype.name) or dtype.name in ['single', 'double']
|
3c6301e6d89fb8d825ac4181ca02b5cf95028066
| 696,041 |
def _find_method(obj, string):
"""Find methods in object that starts with `string`.
"""
out = []
for key in dir(obj):
if key.startswith(string):
out.append(getattr(obj, key))
return out
|
1250a6dc39d2ac47ca4a5e561f55f4fb2b456c96
| 696,042 |
def split_list(ls, size):
"""
Split list
:param list ls: list
:param int size: size
:return list: result
>>> split_list([1, 2, 3, 4], 3)
[[1, 2, 3], [4]]
"""
if size == 0:
return ls
return [ls[i:i + size] for i in range(0, len(ls), size)]
|
de28981d576122f99be34a57c94d73457b63c04b
| 696,046 |
def getColumnsEndingAt(columns, zLevel):
"""Returns columns w/ endPoint at zLevel"""
columnGroup = {}
for columnID, column in columns.inventory.iteritems():
diff = abs(zLevel - column.endJoint.z)
if diff <= 0.001:
columnGroup[column.uniqueName] = column
return columnGroup
|
4f6b7aac922bd5985b6faeb509d26bf6aec98629
| 696,047 |
def hpo_job_describe(sm_client, tuning_job_name):
"""API call to describe a hyperparameter tuning job."""
try:
response = sm_client.describe_hyper_parameter_tuning_job(
HyperParameterTuningJobName=tuning_job_name
)
return response
except sm_client.exceptions.ResourceNotFound:
raise Exception(f"Hyperparameter job not found: '{tuning_job_name}'")
|
98bb1ad03883e862a8730ec4740c3bb92b1a4830
| 696,050 |
import traceback
def tb_log_str(exception) -> str:
"""
Format an exception as a full traceback.
"""
return "".join(traceback.format_exception(None, exception, exception.__traceback__))
|
6776a7416cb512bf23e6557833e3a95779172bd4
| 696,051 |
import requests
def get_agol_token(username, password):
"""
purpose:
get a security token from ArcGIS Online
arguments:
username: string
password: string
return value: string
token, None if error
"""
try:
url = "https://www.arcgis.com/sharing/rest/generateToken"
params = {
"username": username,
"password": password,
"referer": "something",
"f": "json"}
result = requests.post(url, params).json()
return result.get("token")
except Exception:
return None
|
9086fbb7e199c7dd9410bfe4f89b65d34a2424be
| 696,055 |
def read_grammar(grammar_file):
"""
Reads in the given grammar file and splits it into separate lists for each rule.
:param grammar_file: the grammar file to read in.
:return: the list of rules.
"""
with open(grammar_file) as cfg:
lines = cfg.readlines()
return [x.replace("->", "").split() for x in lines]
|
c9320a4126ed6bc05a5df8b05c9997890c7f620a
| 696,059 |
from pathlib import Path
def list_files(dirpath: str, pattern: str = "*.csv") -> list:
"""
List files in a directory
"""
file_names = list(Path(dirpath).glob(pattern))
return file_names
|
0c291d6818c38f6f9219f92b900e5fd8ed5960d6
| 696,060 |
def _plot(ax, coords, pos_columns, **plot_style):
""" This function wraps Axes.plot to make its call signature the same for
2D and 3D plotting. The y axis is inverted for 2D plots, but not for 3D
plots.
Parameters
----------
ax : Axes object
The axes object on which the plot will be called
coords : DataFrame
DataFrame of coordinates that will be plotted
pos_columns : list of strings
List of column names in x, y(, z) order.
plot_style : keyword arguments
Keyword arguments passed through to the `Axes.plot(...)` method
Returns
-------
Axes object
"""
if len(pos_columns) == 3:
return ax.plot(coords[pos_columns[0]], coords[pos_columns[1]],
zs=coords[pos_columns[2]], **plot_style)
elif len(pos_columns) == 2:
return ax.plot(coords[pos_columns[0]], coords[pos_columns[1]],
**plot_style)
|
80cadfff00f864b9d38e51768e68674dfc981a06
| 696,062 |
def get_i_colour(axis_handle) -> int:
""" Get index appropriate to colour value to plot on a figure (will be 0 if brand new figure) """
if axis_handle is None:
return 0
else:
if len(axis_handle.lines) == 0:
return 0
else:
return len(axis_handle.lines)-1
|
b5001da3325168e0f359596bfe65487708f59e3b
| 696,064 |
def read_all(port, chunk_size=200):
"""Read all characters on the serial port and return them."""
if not port.timeout:
raise TypeError('Port needs to have a timeout set!')
read_buffer = b''
while True:
# Read in chunks. Each chunk will wait as long as specified by
# timeout. Increase chunk_size to fail quicker
byte_chunk = port.read(size=chunk_size)
read_buffer += byte_chunk
if not len(byte_chunk) == chunk_size:
break
return read_buffer
|
cf894c2449fa4eba763dc7bf4da86b0072a78a19
| 696,065 |
import base64
import pickle
def encode_store_data(store_data):
"""
Encode store_data dict into a JSON serializable dict
This is currently done by pickling store_data and converting to a base64 encoded
string. If HoloViews supports JSON serialization in the future, this method could
be updated to use this approach instead
Args:
store_data: dict potentially containing HoloViews objects
Returns:
dict that can be JSON serialized
"""
return {"pickled": base64.b64encode(pickle.dumps(store_data)).decode("utf-8")}
|
0a576a8146c0657610b508ebc6338d3ed6790b70
| 696,066 |
def find_feature_by_gi(gid, record, ftype):
""" Loops over the ftype features in the passed SeqRecord, checking
db_xref qualifiers for a match to the passed gid.
Returns the first feature identified, or None if no feature found.
"""
for feature in [f for f in record.features if f.type == ftype]:
try:
if 'GI:%s' % gid in feature.qualifiers['db_xref']:
return feature
except KeyError:
continue
return None
|
050c5464a8d425f5db53440abd79c64b2938f81b
| 696,070 |
def loglik_nats(model, x):
"""Compute the log-likelihood in nats."""
return - model.log_prob(x).mean()
|
f929be38cb70fe56b6bb1a0e5cc21cf02fead3b6
| 696,072 |
def _get_item(node):
"""
Returns the item element of the specified node if [the node] is not null.
:param node: The node to extract the item from.
:return: A node's item.
"""
return node.item if node is not None else None
|
42dff5ef2e98a0dd78b822ee29a75c72d737e23f
| 696,075 |
def change_action_status(action_type, new_status):
"""
This function changes the status of an action type.
"""
# replace the last bit of a dot separate string with the new_status
return "%s.%s" % ('.'.join(action_type.split('.')[:-1]) , new_status)
|
1032486b1f5b32a36806d397a68f42f549b6228c
| 696,077 |
def extract_x_positions(parameter, joining_string="X"):
"""
find the positions within a string which are X and return as list, including length of list
:param parameter: str
the string for interrogation
:param joining_string: str
the string of interest whose character positions need to be found
:return: list
list of all the indices for where the X character occurs in the string, along with the total length of the list
"""
return [loc for loc in range(len(parameter)) if parameter[loc] == joining_string] + [
len(parameter)
]
|
5843d7a86823b960bb1c99379174f60697850378
| 696,080 |
def char2cid(char, char2id_dict, OOV="<oov>"):
"""
Transform single character to character index.
:param char: a character
:param char2id_dict: a dict map characters to indexes
:param OOV: a token that represents Out-of-Vocabulary characters
:return: int index of the character
"""
if char in char2id_dict:
return char2id_dict[char]
return char2id_dict[OOV]
|
4a872cb12f11ed8ba2f3369749a3a2f356b7b97e
| 696,081 |
import re
def extractCN(dn):
"""Given the dn on an object, this extracts the cn."""
return re.findall('CN=(.*?),', dn)[0]
|
dad91c436b5035664dd6d463e0e626949b6cd838
| 696,086 |
def packRangeBits(bitSet):
"""Given a set of bit numbers, return the corresponding ulUnicodeRange1,
ulUnicodeRange2, ulUnicodeRange3 and ulUnicodeRange4 for the OS/2 table.
>>> packRangeBits(set([0]))
(1, 0, 0, 0)
>>> packRangeBits(set([32]))
(0, 1, 0, 0)
>>> packRangeBits(set([96]))
(0, 0, 0, 1)
>>> packRangeBits(set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 65, 98]))
(4294967295, 1, 2, 4)
>>> packRangeBits(set(range(128)))
(4294967295, 4294967295, 4294967295, 4294967295)
>>> 0xffffffff
4294967295
"""
bitNum = 0
bitFields = []
for i in range(4):
bitField = 0
for localBitNum in range(32):
if bitNum in bitSet:
mask = 1 << localBitNum
bitField |= mask
bitNum += 1
bitFields.append(bitField)
assert bitNum == 128
ur1, ur2, ur3, ur4 = bitFields
return ur1, ur2, ur3, ur4
|
a4484da8635efe9c1ddc5259563ff6db5b2b5ed4
| 696,087 |
def reward_min_waiting_time(state, *args):
"""Minimizing the waiting time.
Params:
------
* state: ilurl.state.State
captures the delay experienced by phases.
Returns:
--------
* ret: dict<str, float>
keys: tls_ids, values: rewards
"""
try:
wait_times = state.feature_map(
filter_by=('waiting_time',)
)
except AttributeError:
wait_times = state
ret = {}
for tls_id, phase_obs in wait_times.items():
ret[tls_id] = -sum([dly for obs in phase_obs for dly in obs])
return ret
|
85e88f15e560d761ac59bdbd31bfb78cdfe6936f
| 696,088 |
from pathlib import Path
import yaml
def get_config(base_path: Path):
"""
Get the config file from the base path.
:param base_path: The base path to the .fsh-validator.yml File.
:return: Configuration
"""
config_file = base_path / ".fsh-validator.yml"
if not config_file.exists():
return dict()
return yaml.safe_load(open(config_file))
|
694aad52afda7588d44db9f22cc31f05e64358ac
| 696,091 |
def _combine_ind_ranges(ind_ranges_to_merge):
"""
Utility function for subdivide
Function that combines overlapping integer ranges.
Example
[[1,2,3], [2,3], [3], [4,5], [5]] -> [[1,2,3], [4,5]]
"""
ind_ranges_to_merge = sorted(ind_ranges_to_merge)
stack = []
result = []
for curr in ind_ranges_to_merge:
if len(stack) == 0:
stack.append(curr)
elif stack[-1][-1] >= curr[0]:
prev = stack.pop()
merged = sorted(list(set(prev + curr)))
stack.append(merged)
else:
prev = stack.pop()
result.append(prev)
stack.append(curr)
result += stack
return result
|
fcece4c58a0d231863b0bfb22bd3ff20bcd5858e
| 696,092 |
import hashlib
def get_md5_hash(to_hash):
"""Calculate the md5 hash of a string
Args:
to_hash: str
The string to hash
Returns:
md5_hash: str
The hex value of the md5 hash
"""
return hashlib.md5(to_hash.encode('utf-8')).hexdigest()
|
118b5b87500b22780f541fa46ad54361c7e7440e
| 696,095 |
def calc_tile_locations(tile_size, image_size):
"""
Divide an image into tiles to help us cover classes that are spread out.
tile_size: size of tile to distribute
image_size: original image size
return: locations of the tiles
"""
image_size_y, image_size_x = image_size
locations = []
for y in range(image_size_y // tile_size):
for x in range(image_size_x // tile_size):
x_offs = x * tile_size
y_offs = y * tile_size
locations.append((x_offs, y_offs))
return locations
|
fa898d2b5da4a6d6482d52238eecd1460bd0d167
| 696,096 |
def join(G, u, v, theta, alpha, metric):
"""Returns ``True`` if and only if the nodes whose attributes are
``du`` and ``dv`` should be joined, according to the threshold
condition for geographical threshold graphs.
``G`` is an undirected NetworkX graph, and ``u`` and ``v`` are nodes
in that graph. The nodes must have node attributes ``'pos'`` and
``'weight'``.
``metric`` is a distance metric.
"""
du, dv = G.nodes[u], G.nodes[v]
u_pos, v_pos = du['pos'], dv['pos']
u_weight, v_weight = du['weight'], dv['weight']
return theta * metric(u_pos, v_pos) ** alpha <= u_weight + v_weight
|
8968ea954be10cf3c3e2ed2c87748b00da0d850a
| 696,101 |
def epsg_for_UTM(zone, hemisphere):
"""
Return EPSG code for given UTM zone and hemisphere using WGS84 datum.
:param zone: UTM zone
:param hemisphere: hemisphere either 'N' or 'S'
:return: corresponding EPSG code
"""
if hemisphere not in ['N', 'S']:
raise Exception('Invalid hemisphere ("N" or "S").')
if zone < 0 or zone > 60:
raise Exception('UTM zone outside valid range.')
if hemisphere == 'N':
ns = 600
else:
ns = 700
if zone == 0:
zone = 61
return int(32000 + ns + zone)
|
c448ffd7b18e605f938c7e8fa294a29218f74d36
| 696,102 |
def estimate_flag(bflag, corr_moments, cr_moments, cr_ldr, bounds_unfiltered_moments):
"""retrieve the integer flag from the binary flag
the available integer flags are:
.. code-block:: python
{0: 'not influenced', 1: 'hydromet only',
2: 'plankton', 3: 'low snr',
4: '', 5: 'melting layer'}
Args:
bflag (dict): binary flag dict
corr_moments (list): list with the corrected moments
cr_moments (list): list with the cloud radar moemnts
cr_ldr (float): cloud radar ldr in dB
bounds_unfiltered_moments (list): all peak boundaries
Returns:
add_to_binary_flag, flag, flag_doc
"""
# do not overwrite bflag here! (this has to be done at top level)
addbflag = {'low_snr': 0,
'plankton': 0,
'melting_layer': 0}
flag_doc = {0: 'not influenced',
1: 'hydromet only',
2: 'plankton',
3: 'low snr',
4: '',
5: 'melting layer'}
bins_above_noise = sum(
[b[1]-b[0] for b in bounds_unfiltered_moments])
bounds_unfiltered_moments.sort()
bin_max_span = bounds_unfiltered_moments[-1][-1] - bounds_unfiltered_moments[0][0]
if bflag["particle_influence"] == 1:
flag = 1
if corr_moments[0].snr < 10:
addbflag['low_snr'] = 1
flag = 3
if cr_ldr > -13:
if cr_moments[0].Z < -3:
addbflag['plankton'] = 1
flag = 2
else:
addbflag['melting_layer'] = 1
flag = 5
else:
if (len(corr_moments) > 4
or bins_above_noise > 140
or bin_max_span > 180
or bflag['hs_higher_noise'] == 1):
addbflag['melting_layer'] = 1
flag = 5
else:
flag = 0
return addbflag, flag, flag_doc
|
8e600a4972de64cb0171239359b5609779723eab
| 696,103 |
import torch
def _safe_check_pinned(tensor: torch.Tensor) -> bool:
"""Check whether or not a tensor is pinned. If torch cannot initialize cuda, returns False instead of error."""
try:
return torch.cuda.is_available() and tensor.is_pinned()
except RuntimeError:
return False
|
6d023bf0554ac41834f421d07ea7959952dcc9e8
| 696,105 |
def sum_list(list_to_sum):
"""Function to sum the items in the input list."""
return sum(list_to_sum)
|
e4a922888d9ed229b0c74b4e9006cae7ba02c976
| 696,110 |
import torch
def numpy_to_tensor(data):
"""Transform numpy arrays to torch tensors."""
return torch.from_numpy(data)
|
06c2aee2081bbb017d9b33065c6925c589378df9
| 696,111 |
def asint(x):
"""Convert x to float without raising an exception, return 0 instead."""
try: return int(x)
except: return 0
|
b8ebcd4efc43c24726d35f7da80a5001b44b6f17
| 696,116 |
import typing
def sort(array: list) -> list:
"""Insertion sort implementation.
"""
for j in range(1, len(array)):
key: typing.Any = array[j]
i: int = j - 1
while i > -1 and key < array[i]:
array[i + 1] = array[i]
i = i - 1
array[i + 1] = key
return array
|
f3d36f95f3b7fc3e64593e23b9a21544fc880383
| 696,118 |
def line_width( segs ):
"""
Return the screen column width of one line of a text layout structure.
This function ignores any existing shift applied to the line,
represended by an (amount, None) tuple at the start of the line.
"""
sc = 0
seglist = segs
if segs and len(segs[0])==2 and segs[0][1]==None:
seglist = segs[1:]
for s in seglist:
sc += s[0]
return sc
|
7f6585126a0ecdbab4d1d371e23ddc279dce5b75
| 696,119 |
def compute_opt_weight(env, t):
"""
Computes the optimal weight of the risky asset for a given environment
at a time t.
Arguments
---------
:param env : Environment instance
Environment instance specifying the RL environment.
:param t : int
Period in episode for which the optimal weight of the risky asset
should be computed.
Returns
-------
:returns opt : float
The optimal weight of the risky asset in the given environment
in period t.
"""
env.time = t
# regime in time t:
idxt = [t in v["periods"] for v in env.regimes.values()].index(True)
rt = list(env.regimes.keys())[idxt]
mu = env.regimes[rt]["mu"]
sigma = env.regimes[rt]["sigma"]
opt = (mu[1] - mu[0] + (sigma[1]**2)/2) / (env.theta * sigma[1]**2)
return opt
|
92a05ea1172871328ab88ac990ed760012634018
| 696,120 |
def is_palindrome(string: str) -> bool:
""" Test if given string is a palindrome """
return string == string[::-1]
|
1a94f7f2889d6d13080198729825347b939a0a68
| 696,124 |
def _format_s3_error_code(error_code: str):
"""Formats a message to describe and s3 error code."""
return f"S3 error with code: '{error_code}'"
|
9498a531392f0f18e99c9b8cb7a8364b0bff1f9a
| 696,126 |
import hashlib
def hash_ecfp_pair(ecfp_pair, size):
"""Returns an int < size representing that ECFP pair.
Input must be a tuple of strings. This utility is primarily used for
spatial contact featurizers. For example, if a protein and ligand
have close contact region, the first string could be the protein's
fragment and the second the ligand's fragment. The pair could be
hashed together to achieve one hash value for this contact region.
Parameters
----------
ecfp_pair: tuple
Pair of ECFP fragment strings
size: int, optional (default 1024)
Hash to an int in range [0, size)
"""
ecfp = "%s,%s" % (ecfp_pair[0], ecfp_pair[1])
ecfp = ecfp.encode('utf-8')
md5 = hashlib.md5()
md5.update(ecfp)
digest = md5.hexdigest()
ecfp_hash = int(digest, 16) % (size)
return (ecfp_hash)
|
b2e4107ee59ce2c801d10b832e258d567875d987
| 696,128 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.