content
stringlengths 39
14.9k
| sha1
stringlengths 40
40
| id
int64 0
710k
|
---|---|---|
def sort_hsvs(hsv_list):
"""
Sort the list of HSV values
:param hsv_list: List of HSV tuples
:return: List of indexes, sorted by hue, then saturation, then value
"""
bars_with_indexes = []
for index, hsv_val in enumerate(hsv_list):
bars_with_indexes.append((index, hsv_val[0], hsv_val[1], hsv_val[2]))
bars_with_indexes.sort(key=lambda elem: (elem[1], elem[2], elem[3]))
return [item[0] for item in bars_with_indexes]
|
49a0936a04156ef1d785dea7efc95d5fffb368e0
| 14,218 |
def GetCcIds(issue):
"""Get the Cc's of an issue, whether they are explicit or derived."""
return issue.cc_ids + issue.derived_cc_ids
|
c5d558c4bfac4da5e501bc834e6218a44c49fbd9
| 14,219 |
def _layout_to_matrix(layout):
"""Create the adjacency matrix for the tree specified by the
given layout (level sequence)."""
result = [[0] * len(layout) for i in range(len(layout))]
stack = []
for i in range(len(layout)):
i_level = layout[i]
if stack:
j = stack[-1]
j_level = layout[j]
while j_level >= i_level:
stack.pop()
j = stack[-1]
j_level = layout[j]
result[i][j] = result[j][i] = 1
stack.append(i)
return result
|
cd27cad3a7ab6f34ea2be13e5617a34dbbba834f
| 14,228 |
def simplify_dataset_name(dataset_name):
"""In a couple of cases (BraTS and MURA) the dataset name is not quite correct
because of a mistake made earlier in the pipeline.
This function transforms the dataset names into a more readable format.
Args:
dataset_name (string): name of dataset to simplify
Returns:
string: simplified dataset name
"""
if "BraTS20" in dataset_name:
return "BraTS20"
elif "study" in dataset_name:
return "MURA"
else:
return dataset_name
|
af0e504df0af2f250c716fb4f797970f31cbb1e1
| 14,230 |
def get_video_name(image_name):
""" Extracts the video name from an image filename.
Args:
image_name: The name of the image file.
Returns:
The name of the video that the image belongs to. """
video_name = image_name.split("_")[0:3]
video_name = "%s_%s_%s" % (video_name[0], video_name[1], video_name[2])
return video_name
|
d2e5254dfaa650455b346cec43273cd50004b335
| 14,240 |
def estimate_reasonable_max(df, x):
"""Estimate a reasonable maximum value for the plot y axis."""
# Use 1.5 IQR of the largest group.
group = df.groupby(x)
q1 = group.Time.quantile(0.25)
q3 = group.Time.quantile(0.75)
iqr = (q3 - q1).max()
return q3.max() + 1.5*iqr
|
a5fd4be194ca938c7cebd1ad31128ccffcfd44bd
| 14,241 |
def get_points(file,img_shape,mirror=False):
"""
Read wp from a YOLO style file: each wp are the upper-left and the lower-right points of the bounding box
"""
points = []
img_shape = img_shape[:-1][::-1]
file = open(file).read().split('\n')[:-1]
for r in file:
r = r.split()
center = (float(r[1])*img_shape[0],float(r[2])*img_shape[1])
width = float(r[3])*img_shape[0]
height = float(r[4])*img_shape[1]
if mirror: # depends on orientation of rows
p1 = round(center[0]+width/2),round(center[1]-height/2)
p2 = round(center[0]-width/2),round(center[1]+height/2)
else:
p1 = round(center[0]-width/2),round(center[1]-height/2)
p2 = round(center[0]+width/2),round(center[1]+height/2)
points.append((p1,p2))
return points
|
9648edc2aa065aceadf2128c28b5ff71ded2cb92
| 14,243 |
def _is_url(filename):
"""Check if the file is a url link.
Args:
filename (str): the file name or url link.
Returns:
bool: is url or not.
"""
prefixes = ['http://', 'https://']
for p in prefixes:
if filename.startswith(p):
return True
return False
|
cfffb3db75de4f613097c081f950d3feff079f63
| 14,245 |
import types
def list_module_versions(glob, return_dict = False):
"""
Prints the versions of all loaded modules/packages in a script or notebook.
Parameters
----------
glob : dict
output of the globals() function call.
return_dict : bool, optional
Parameter to decide if function should return versions dict. The default is False.
Returns
-------
versions_dict : dict, optional
Dict with module names as keys and version numbers as values.
"""
versions_dict = {}
for name, val in glob.items():
if isinstance(val, types.ModuleType) and "__version__" in val.__dict__:
print(val.__name__, val.__version__)
versions_dict[val.__name__] = val.__version__
if return_dict is True:
return versions_dict
|
17a51568e5bf80f8eb8dac23f1996dc5dd609c50
| 14,246 |
def get_events(trace_collection, keys=None, syscall=True):
"""Return a generator of events. An event is a dict with the key the
arguement's name.
Args:
trace_collection (babeltrace.TraceCollection): Trace from which
to read the events.
keys (dict, optional): dict of the multiple ways of the arguments
to consider in addition to name and timestamp.
syscall (bool, optional): only syscall should be considered
Returns:
generator: a generator of events.
"""
return (
{
**{
'name': event.name,
'timestamp': event.timestamp
},
**{
keys[k]: event[k]
# scope 3 = Stream event context (procname, pid, tid)
for k in event.field_list_with_scope(3) if keys and k in keys
},
**{
keys[k]: event[k]
# scope 5 = Event fields (return value)
for k in event.field_list_with_scope(5) if keys and k in keys
}
} for event in trace_collection.events
if not syscall or "syscall" in event.name)
|
abc6e99c1e3cc64b45671ab6e054bb4bffcf2032
| 14,247 |
import torch
def softplus_inverse(x: torch.Tensor) -> torch.Tensor:
"""
Inverse of the softplus function. This is useful for initialization of
parameters that are constrained to be positive (via softplus).
"""
if not isinstance(x, torch.Tensor):
x = torch.tensor(x)
return x + torch.log(-torch.expm1(-x))
|
aa4649368b9e8372e7a22b4ab460bf8d26a38dad
| 14,248 |
def rosmac(y, t=0, r0=0.5, k=10, g0=0.4, h=2, l=0.15, e=0.6):
""" Rosenzweig-MacArthur predator prey model
"""
prey, cons = y
def r(x):
""" Growth rate """
return r0*(1 - x/k)
def g(x):
""" Grazing rate """
return g0/(x + h)
dydt = [r(prey)*prey -g(prey)*prey*cons,
-l*cons + e*g(prey)*prey*cons]
return dydt
|
ec17e71cf5742b3db2b48123a63c14ae62101bee
| 14,249 |
import re
def shorten(CSTAG: str) -> str:
"""Convert long format of cs tag into short format
Args:
CSTAG (str): cs tag in the **long** format
Return:
str: cs tag in the **short** format
Example:
>>> import cstag
>>> cs = "cs:Z:=ACGT*ag=CGT"
>>> cstag.shorten(cs)
cs:Z::4*ag:3
"""
cstags = re.split(r"([-+*~=])", CSTAG.replace("cs:Z:", ""))[1:]
cstags = [i + j for i, j in zip(cstags[0::2], cstags[1::2])]
csshort = []
for cs in cstags:
if cs[0] == "=":
csshort.append(":" + str(len(cs) - 1))
continue
csshort.append(cs)
return "cs:Z:" + "".join(csshort)
|
653de1947a3cdf06103b01c1a7efc3dbc16ea4ab
| 14,250 |
def dbex_eval(db, expr):
"""Evaluate a database expression"""
return db.ex_eval(expr)
|
9739e2dd85fadd4d1569ddec20c6db57379a2de9
| 14,251 |
import math
def lognormal_stddev(m, stddev):
""" compute std. of log x with mean and std. of x
Args:
m: mean of x
stddev: standard deviation of x
Returns: std. of log x
"""
return math.sqrt(math.log((stddev * stddev) / (m * m) + 1))
|
cb3abebc2225e3b2d33bdc1b6ff12c950437a594
| 14,254 |
import aiohttp
async def get_new_bangumi_json() -> dict:
"""
Get json data from bilibili
Args:
Examples:
data = await get_new_bangumi_json()
Return:
dict:data get from bilibili
"""
url = "https://bangumi.bilibili.com/web_api/timeline_global"
headers = {
"accept": "application/json, text/plain, */*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"origin": "https://www.bilibili.com",
"referer": "https://www.bilibili.com/",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-site",
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36"
}
async with aiohttp.ClientSession() as session:
async with session.post(url=url, headers=headers) as resp:
result = await resp.json()
return result
|
02d39b7535a7db7b50dc81851725ffc67653cd17
| 14,255 |
def compare_math_formula(query, formula):
"""Compares two math tuples
Parameters:
query: the query math tuple (str)
formula: the formula math tuple (str)
Returns:
same: True if tuples are considered equal (boolean)
"""
if "'*'" in query:
# break on the wild card
query_parts = query.split("'*'")
index = 0
# make sure all the parts are in the formula
while index < len(query_parts) and query_parts[index] in formula:
index += 1
same = False
# if all parts of query are in formula then must be good to go
if index == len(query_parts):
same = True
else:
# if no wildcards then just a normal str comparison
same = query == formula
return same
|
736e9f564e4c834270fd25af7ed8252873d4c07d
| 14,256 |
def interpolate(df, key):
"""Interpolate subset of MulitIndex df."""
subset = df.xs(key, level=0, drop_level=False, axis=1)
return (subset.interpolate(axis=1)
.where(subset.fillna(method='bfill', axis=1).notnull()))
|
0f5fec0b891871747337194bf56a0814ee015fc0
| 14,257 |
def exponential(x, halflife):
""" Returns a decay factor based on the exponential function
.. math::
f(x) = 2^(-x/halflife).
:param x: The function argument.
:param halflife: The half-life of the decay process.
"""
return 2 ** (-x / halflife)
|
2561d42f87c2818e82c2cc065592acf9ac8202fe
| 14,260 |
def rm_strheader(images):
"""
Remove the header of the images base64 string from front-end
:param images: the input should be the base64 string of the image
:raises TypeError: if input is not a string
:returns: base64 string without data header
:rtype: string
"""
if type(images) is not str:
raise TypeError('Error: imput should be a string')
index = images.find(',')
image_str = images[index + 1:]
return image_str
|
7466621dfa0a5bb31eb8735da89d68b885cb04ea
| 14,262 |
def box_to_rect(box, width, height):
"""
:param box: center_x, center_y, bbox_w, bbox_h
:param width: image width
:param height: image height
:return: x1, y1, x2, y2(in pixel)
"""
x, y, w, h = box
x1 = (x - w * 0.5) * width
y1 = (y - h * 0.5) * height
x2 = (x + w * 0.5) * width
y2 = (y + h * 0.5) * height
return [int(x1), int(y1), int(x2), int(y2)]
|
32413d14e242f2d790c5ae9c93418abb4152df46
| 14,263 |
def set_zenangle(profs, zen=None):
"""Set zenith angles to a new value. If zen is missing, angles cycle in
steps of 10 degrees between 0 and 70 over the profile set."""
if zen:
zenlist = [zen]
else:
zenlist = range(0, 71, 10)
for p in range(profs['nprof']):
profs[p]['zenangle'] = zenlist[p % len(zenlist)]
return profs
|
41d7099e7eb7ac9da1532c28be00b1a574e84e6e
| 14,271 |
def check_intensifiers(text, INTENSIFIER_MAP):
"""
Utility function to check intensifiers of an emotion
:param text: text chunk with the emotion term
:return: boolean value and booster value for intensifiers
"""
# BOOSTER_MAP = {"B_INCR": 2,
# "B_DECR": 0.5}
intensity_word_list = INTENSIFIER_MAP
print(intensity_word_list)
has_intensity = False
booster = 'NULL'
for int_term in intensity_word_list:
intensifier = int_term.split(':')[0].strip()
# print(intensifier)
if intensifier in text:
# print('yes')
has_intensity = True
booster = float(int_term.split(':')[2].strip())
return has_intensity, booster
|
2c35b6b66395bc7105b9fb1b9cf7f04b5686cb8d
| 14,278 |
def get_next_code(last_code):
"""Generate next code based on the last_code."""
return (last_code * 252533) % 33554393
|
9fcf416df3448a8ca46a55073f66d12cd8fc2593
| 14,279 |
import base64
def decode(b64):
"""
Decode given attribute encoded by using Base64 encoding.
The result is returned as regular Python string. Note that TypeError might
be thrown when the input data are not encoded properly.
"""
barray = base64.b64decode(b64)
return barray.decode('ascii')
|
5c04c43247d1415ca8f1398c3b8206c50a7e0fa4
| 14,282 |
from typing import Counter
def session_referer_class(session):
"""Most common referer class in session (categorical)."""
referer_class = Counter([r['referer_class'] for r in session]).most_common(1)[0][0]
return referer_class
|
0b4047ea63063b7535bbaea7c67756de15561522
| 14,285 |
def _compute_max_name_width(contracts: dict) -> int:
"""Return the maximum width needed by the function name column."""
return max(
len(function) for contract, functions in contracts.items() for function in functions
)
|
bc57c408a49182cdfb26a1955a4a3943c060af86
| 14,288 |
import logging
def get_logger(name):
"""Get the logger with a given name
Args:
name: name of the logger to create
"""
return logging.getLogger(name)
|
0f02aba7f01c2aafbb7fafda0bb260d73fd240a1
| 14,290 |
def create_periodic_dosing(timeHigh, timeLow, highVal, lowVal=0):
"""Create a Periodic dosing profile
Create a dosing profile which oscillates between high and low values.
Remains high for timeHigh and low for lowHigh each period
:param timeHigh: Time dosing remains at highVal (float)
:param timeLow: Time dosing remains at lowVal (float)
:param highVal: dosing ammount during high levels (float)
:param lowVal: dosing level during low levels (float)
:returns: Dosing Profile
:rtype: Callable[[float], float]
"""
def inner(t: float) -> float:
phase = t%(timeHigh + timeLow)
return highVal if phase <= timeHigh else lowVal
return inner
|
358723ba0187ffddb91d9aa1d2c70a1f14a774b2
| 14,299 |
def match(first_list, second_list, attribute_name):
"""Compares two lists and returns true if in both there is at least one element which
has the same value for the attribute 'attribute_name' """
for i in first_list:
for j in second_list:
if i[attribute_name] == j[attribute_name]:
return True
return False
|
2b7c38ef3132c5cb9e693be2995691600ac76ec7
| 14,302 |
def LinearGainLine(x1,y1,x2,y2):
"""
returns tuple (m,b)
that satisfies the equation y=mx+b
uses two points in histogram space to determine slope
x : 0..nbins
y : 0..255
"""
if (x2 > x1):
m = -(y1-y2) / (x2-x1)
else:
m = 0
b = x1
return m,b
|
71d1d321c85636e15f05e3ab26c66a48a1cc5103
| 14,307 |
def mean(num_lst):
"""
Calculates the mean of a list of numbers
Parameters
----------
num_list : list
List of numbers to calculate the average of
Returns
-------
The average/mean of num_lst
"""
Sum = sum(num_lst)
count = 0
for num in num_lst:
count += 1
return Sum / count
|
4339a8f755ebb26328bbbc76b2b76034d21ea004
| 14,309 |
def find_cycle(start, graph):
"""Finds a path from `start` to itself in a directed graph.
Note that if the graph has other cycles (that don't have `start` as a hop),
they are ignored.
Args:
start: str name of the node to start.
graph: {str => iterable of str} is adjacency map that defines the graph.
Returns:
A list or str with nodes that form a cycle or None if there's no cycle.
When not None, the first and the last elements are always `start`.
Raises:
KeyError if there's a reference to a node that is not in the `graph` map.
"""
explored = set() # set of roots of totally explored subgraphs
visiting = [] # stack of nodes currently being traversed
def visit(node):
if node in explored:
return False # been there, no cycles there that have `start` in them
if node in visiting:
# Found a cycle that starts and ends with `node`. Return True if it is
# "start -> ... -> start" cycle or False if is some "inner" cycle. We are
# not interested in the latter.
return node == start
visiting.append(node)
for edge in graph[node]:
if visit(edge):
return True # found a cycle!
popped = visiting.pop()
assert popped == node
explored.add(node) # don't visit this subgraph ever again
return False
if not visit(start):
return None
visiting.append(start) # close the loop
return visiting
|
0ce38b4813f6a0e55898ff3c17b54d35935ce85f
| 14,310 |
def has_connected_children(bone):
""" Returns true/false whether a bone has connected children or not.
"""
t = False
for b in bone.children:
t = t or b.use_connect
return t
|
b88106b3ceac26987253e2995cda851f7a622d2f
| 14,313 |
def create_user(first_name, last_name, email, phone_number, dob, address_id):
"""
Creates immutable user data structure
:param first_name: user's first name (string)
:param last_name: user's last name (string)
:param email: user's email (string)
:param phone_number: user's phone number (string)
:param dob: user's date of birth (string)
:return: (tuple)
"""
return (first_name, last_name, email, phone_number, dob, address_id)
|
2c6f647ac7f85fd56f4870bade01bb7a951ff4d2
| 14,314 |
def dlookup_in(d, l):
"""Return key from a dict if l in val."""
for k,v in d.iteritems():
try:
if l in v:
return k
except TypeError:
continue
return None
|
fa53a4e30607c783096b37eef3b3ca936de07097
| 14,317 |
def remove_duplicates_for_fetch(items: list, last_fetched_ids: list) -> list:
"""Remove items that were already sent in last fetch.
Args:
items (list): Items retrieved in this fetch.
last_fetched_ids (list): ID's of items from last fetch.
Returns:
(list) New items without items from last fetch.
"""
new_items = []
for item in items:
if item.get('id') and item.get('id') not in last_fetched_ids:
new_items.append(item)
return new_items
|
705e786563206015877798e227c89a978831f97f
| 14,319 |
def append_unique(the_list, new_item):
"""
append the newe_item to the_list, only if it does not exist
:param the_list:
:param new_item:
:return:
"""
exist = any(new_item == item for item in the_list)
if not exist:
the_list.append(new_item)
return the_list
|
2974643e1cbbc7c0cf8dccb7ddc1fd21368eb3f8
| 14,320 |
def get_playoff_bracket_string(league):
"""
Creates and returns a message of the league's playoff bracket.
:param league: Object league
:return: string message league's playoff bracket
"""
bracket = league.get_playoff_winners_bracket()
return bracket
|
0ac2e02493a99c830704d6651125e0c30c8f4c7c
| 14,325 |
def oxygen_abundance(Z):
"""
Set the oxygen abundance.
We assume Asplund et al 2009 abundance at Zsun and that Ao scales linearly with Z. Z in solar units
"""
Ao = 4.90e-4
return Ao*Z
|
1fe4ad34afbbe4c43fd883df434d9fcc7a83b9b7
| 14,327 |
def to_geojson(shapes, buildings):
"""Converts the shapes into geojson.
This function will combine the burn scar region and buildings into geojson.
Burn scar polygon in red, buildings polygon all in blue."""
#append burn scar region polygons to geojson
if type(shapes) == list:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s.__geo_interface__}
for i, (s, v)
in enumerate(shapes))
else:
results = ({
'type': 'Feature',
'properties': {'raster_val': v, 'color': 'red'},
'geometry': s}
for i, (s, v)
in enumerate(shapes))
list_results = list(results)
# append the building footprints to geojson
results_buildings = ({
'type': 'Feature',
'properties': {'BuildingID': b['properties']['BuildingID'], 'color': 'blue'},
'geometry': b['geometry']}
for i, b
in enumerate(buildings['features']))
list_results_buildings = list(results_buildings)
collection = {
'type': 'FeatureCollection',
'features': list_results + list_results_buildings}
return collection
|
fb0d417ab3c049d4b89e14dea1f15a3f40f42803
| 14,334 |
def xf_screenname(name):
"""Insure user screen name is prefixed with '@'."""
return '@' + name if name[0] != '@' else name
|
6229a22907c4b1c11b75f5cd0218e056d46111eb
| 14,343 |
def expected(player1, player2):
"""
Calculate expected score of player1 vs player2 given elo rankings.
Args:
player1 (BaseAgent): Agent for whom the score is being calculated.
player2 (BaseAgent): Agent against whom player1 played.
Returns:
The expected score of the matchup.
"""
return 1 / (1 + 10 ** ((player2.elo - player1.elo) / 400))
|
90551b04b15ce62a1d2c5d7022d9402b3efdba60
| 14,354 |
def corpus(request):
"""
A utility fixture to merely execute the actual fixture logic as necessary.
Args:
request: The pytest indirect request object, which has a param object
for the underlying fixture argument.
Returns:
The value of the execution of the corpus fixture.
"""
return request.param()
|
3c6986296a17145bc3b40cc84e2b62acb5c8f00c
| 14,359 |
from math import floor, log10
def findSizeInt(number):
"""
#EN-US:
→ Calculates the number of digits in a number.
:param number: the number to be calculated.
:return: the number of digits of the number entered.
#PT-BR:
→ Calcula a quantidade de dígitos em um número.
:param number: o número a ser calculado.
:return: a quantidade de dígitos do número informado.
"""
number = abs(int(number))
return 1 if number == 0 else floor(log10(number)) + 1
|
8b174183520337f31f17bfb4163d5ed5ff90e896
| 14,360 |
import operator
def _get_operator(comp_str):
"""Returns the operator function corresponding to the given comparison.
Args:
comp_str: str. One of: '<', '<=', '=', '>=', '>'.
Returns:
callable. The binary operator corresponding to the comparison.
Raises:
ValueError. The comparison is not supported.
"""
if comp_str == '<':
return operator.lt
elif comp_str == '<=':
return operator.le
elif comp_str == '=':
return operator.eq
elif comp_str == '>=':
return operator.ge
elif comp_str == '>':
return operator.gt
else:
raise ValueError('Unsupported comparison operator: %s' % comp_str)
|
123ca1e2be8abf81387fb5d2ffa559082116b959
| 14,361 |
import math
def knn(pnts, p, k):
"""
Calculates k nearest neighbours for a given point.
:param points: list of points
:param p: reference point
:param k: amount of neighbours
:return: list
"""
s = sorted(pnts,
key=lambda x: math.sqrt((x[0]-p[0])**2 + (x[1]-p[1])**2))[0:k]
return s
|
2da3d4481db78910548eee04e0532e701e4c4201
| 14,363 |
import requests
from bs4 import BeautifulSoup
def scrape(start, end, logging=True):
"""
Scrape all the reviews from dealerrater.com for the McKaig
Chevrolet Buick dealership.
Parameters:
start: the page of reviews to start scraping
end: the last page of reviews to scrape
Returns:
texts: a list of strings that are the reviews from the website
"""
PAGE_START = start
PAGE_END = end
texts = []
# Scrape the data from pages 1-5
for page in range(PAGE_START, PAGE_END + 1):
if logging:
print("Scraping page"+str(page)+"...")
url = "https://www.dealerrater.com/dealer/McKaig-Chevrolet-Buick-A-Dealer-For-The-People-dealer-reviews-23685/page"+str(page)+"/?filter=ALL_REVIEWS#link"
res = requests.get(url)
soup = BeautifulSoup(res.content, "html.parser")
# Get the reviews on this page
for p in soup.select("p.review-content"):
texts.append(p.get_text())
return texts
|
03dfd059a82c4c56dec47d772ef2e6ade905fac7
| 14,364 |
import re
def multiple_replace(string, replacements):
"""
Given a string and a dictionary of replacements in the format:
{ <word_to_replace>: <replacement>, ... }
Make all the replacements and return the new string.
From: http://stackoverflow.com/questions/2400504/
"""
pattern = re.compile('|'.join(replacements.keys()))
return pattern.sub(lambda x: replacements[x.group()], string)
|
bdaf05f2f9c5de2c0742c12219e2984ed3e7699e
| 14,366 |
from typing import Dict
from pathlib import Path
from typing import Optional
import csv
def write_results(
result_dict: Dict[str, Dict[str, str]],
trans_dict: Dict[str, str],
input_filepath: Path,
output_filepath: Path,
write_csv: Optional[bool] = False,
) -> Dict[str, Dict[str, str]]:
"""
Returns processed output by combining results_dict (predicted values) and trans_dict (compound IDs).
Optionally writes results to a CSV file.
"""
# obtaining all possible column names
acd_columns = []
counter = 0
for key, value in result_dict.items():
for col, value1 in value.items():
if col not in acd_columns:
acd_columns.append(col)
counter += 1
if counter == 10 ** 4:
break
# filling in missing columns
for key, value in result_dict.items():
for col in acd_columns:
if col not in value:
result_dict[key][col] = "NaN"
# translating ID back to original IDs as provided in input file
trans_result_dict = {}
for cp_id, props in result_dict.items():
trans_result_dict[trans_dict[cp_id]] = props
# writting to csv
if write_csv is True:
acd_columns.append("compound_id")
with open(output_filepath, "w") as f:
w = csv.DictWriter(f, acd_columns)
w.writeheader()
for k in trans_result_dict:
w.writerow(
{col: trans_result_dict[k].get(col) or k for col in acd_columns}
)
return trans_result_dict
|
a791f47c9d16b451db14cd599cf2f15f04f0637c
| 14,371 |
def mass_function_abc(m, a, b, c):
"""The parametrized surpression function of the halo mass function"""
return (1 + (a / m)**b )**c
|
d4e0b1d39a67baa121a28fc644b75c9bfd714c5e
| 14,373 |
import json
def loadMetadata(fn):
"""
Load Metadata JSON File
Parameters:
-------------
fn : str - filename
Returns:
-------------
data : dict
"""
data = json.load(open(fn))
return data
|
4fa9aceed53cab0c076d4bdc474a08ebccae5ef6
| 14,377 |
def get_variation(variation_key, variations_dict, defaults_dict):
"""Convert a string to a tuple of integers.
If the passed variation_key doesn't follow this pattern '0 100', it will
return default values defined in defaults_dict.
This is currently used for defining the variation data of the A/B
experiment regarding the multi-steps form.
"""
try:
# We want to create a tuple of integers from a string containing
# integers. Anything else should throw.
rv = tuple(int(x) for x in variations_dict.get(variation_key)
.strip().split())
if (len(rv) != 2):
raise ValueError('The format is incorrect. Expected "{int} {int}"')
except Exception as e:
print('Something went wrong with AB test configuration: {0}'.format(e))
print('Falling back to default values.')
rv = defaults_dict.get(variation_key)
return rv
|
24e069fabcb9bd4dbfbd3c1ea17e859f27fdcceb
| 14,379 |
def requirements(filename):
"""Reads requirements from a file."""
with open(filename) as f:
return [x.strip() for x in f.readlines() if x.strip()]
|
81e5fa3d2a11b9152be6f55ab879b1875fd0b07d
| 14,385 |
def branch_uptodate(branch, true_on_missing_origin=True):
"""Return True is branch is up to date with origin, otherwise False,
also returns True if no remote defined"""
if branch['upstream']:
if branch['ahead'] or branch['behind']:
return False
return True
if true_on_missing_origin:
return True
return False
|
2c0db03ea469b6f75e94ab6a73ace360e73948a3
| 14,386 |
import six
import socket
import contextlib
def is_port_open(port_or_url, timeout=1):
"""Check if TCP port is open."""
if isinstance(port_or_url, six.string_types):
url = six.moves.urllib.parse.urlparse(port_or_url)
port = url.port
host = url.hostname
else:
port = port_or_url
host = "127.0.0.1"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with contextlib.closing(sock):
sock.settimeout(timeout)
result = sock.connect_ex((host, port))
return result == 0
|
ad8d6e80cc2eaee6a5955f4f5bc0e69cac8f60b2
| 14,390 |
def get_feature_dimensions(parameters):
""" Returns dimensions (`int`s) of all node features.
"""
n_atom_types = len(parameters["atom_types"])
n_formal_charge = len(parameters["formal_charge"])
n_numh = int(
not parameters["use_explicit_H"]
and not parameters["ignore_H"]
) * len(parameters["imp_H"])
n_chirality = int(parameters["use_chirality"]) * len(parameters["chirality"])
return n_atom_types, n_formal_charge, n_numh, n_chirality
|
f1409d1dc2f4785bb2c2452b9bc08395a598f16d
| 14,394 |
def removeGenesWithExcessiveReplicationVariance(df_X, max_var=None):
"""
Removes Genes with excessive variation in the variance of their
trinary values. Assumes a single digit replication.
Parameters
----------
df_X: DataFrame
index: str <instance>.replication-digit
ex: T10.2
max_var: float
Returns
-------
DataFrame (Trinary features)
"""
df = df_X.copy()
if max_var is None:
return df
#
df.index = [i[0:-2] for i in df_X.index]
df = df.sort_index()
ser = df.groupby(df.index).std().sum()
ser = ser.sort_values()
ser_sub = ser[ser <= max_var]
columns = list(ser_sub.index)
return df_X[columns]
|
e6b6950e2694792e03940caadbeda85c8e32717c
| 14,395 |
def _py_lazy_and(cond, b):
"""Lazy-eval equivalent of "and" in Python."""
return cond and b()
|
872f382ac72d8253c61043dfe146d05775b4748d
| 14,397 |
def parse_id(hardware_id):
"""Parse Nuki ID."""
return hex(hardware_id).split("x")[-1].upper()
|
483fb1c7a864242335288f53653c461a50eab638
| 14,399 |
from typing import Dict
from typing import Any
from typing import OrderedDict
def sort_dict(item: dict) -> Dict[str, Any]:
"""
Sort nested dict
Input: {"b": 1, "a": {"c": 1,"b": 2}, "c": "c_st[ring]"}
Output: OrderedDict([
('a', OrderedDict([('b', 2), ('c', 1)])),
('b', 1),
('c', 'c_st[ring]')
])
"""
return OrderedDict(
(k, sort_dict(v) if isinstance(v, dict) else v)
for k, v in sorted(item.items())
)
|
0f4c042df4ea2f00dbda249f9d2c7c3488824559
| 14,400 |
import json
def load_dict(filename):
"""
Loads a dictionary stored in JSON format
:param filename:
:return:
"""
data = json.load(open(filename))
return data
|
88f286417bbdd43d4499e83750c92843a9f6231a
| 14,401 |
def nWaveRiseTime(pmax, patm=101e3, csnd=341, lamb=6.8e-8):
"""
Calculate N-wave rise time
Parameters
----------
pmax -- N-wave overpressure amplitude in Pa
patm -- atmospheric pressure in Pa
csnd -- speed of sound in m/s
lamb -- air molecular mean free path in m
Returns
-------
trise -- N-wave rise time in s
"""
trise = (lamb / csnd) * (patm / pmax)
return trise
|
cbcae9d8e3ddaeb6daab5a02a722cd7302ebf562
| 14,402 |
def find_address_of_type(host_elem, type_):
"""Return the host's address of the given type, or `None` if there
is no address element of that type.
"""
address_elem = host_elem.find('./address[@addrtype="{}"]'.format(type_))
if address_elem is not None:
return address_elem.get('addr')
|
ff821703d076865da3c6efe0d2d760c4fcb2c997
| 14,407 |
import re
def fuzzyfinder(user_input,collection):
"""
fuzzy matching, to obtain a fuzzy matched list.
>>>collection = [
"user_name",
"api_user",
"school",
"email"
]
>>>fuzzyfinder("user",collection)
["user_name","api_user"]
"""
suggestions = []
pattern = ".*?".join(user_input)
regex = re.compile(pattern)
for item in collection:
match = regex.search(item)
if match:
suggestions.append((len(match.group()),match.start(),item))
return [x for _, _, x in sorted(suggestions)]
|
db01f13f3caf5dc9a21cfb180776c22b589f1a28
| 14,409 |
def isChildUri(parentUri, childUri):
"""Return True, if childUri is a child of parentUri.
This function accounts for the fact that '/a/b/c' and 'a/b/c/' are
children of '/a/b' (and also of '/a/b/').
Note that '/a/b/cd' is NOT a child of 'a/b/c'.
"""
return parentUri and childUri and childUri.rstrip("/").startswith(parentUri.rstrip("/")+"/")
|
a28a386c38a4d6722f6b7c8bcbeafb8092dbbe77
| 14,419 |
def size(b):
"""
Returns the size in bytes of the first netstring in the provided bytes object.
WARNING: This function doesn't check for netstring validity.
THROWS:
ValueError if cannot determine size
"""
try:
slen = b[:b.find(b':')].decode('ascii')
return 2 + len(slen) + int(slen)
except:
raise ValueError
|
2b50a555e29c6d7cbdc419d3dfb336d60f907d0c
| 14,420 |
import torch
def get_output_size(model, input_shape=(1, 3, 224, 224), device="cpu", dtype='float32'):
"""
Returns the shape of the convolutional features in output to the model.
Parameters
----------
model pytorch model,
neural network model.
input_shape: tuple of int,
shape of the images in input to the model in the form (batch_size, channels, height, width).
Defaults to (1, 3, 224, 224).
device: string,
device for the gradient computation. Choose between "cpu" and "gpu:x", where x is the number of the GPU device.
dtype: string,
datatype for the model. Choose between 'float32' and 'float16'. Defaults to 'float32'.
Return
------
output_size : int,
shape of the flattened convolutional features in output to the model.
Note: It si not possible to do model(x) on CPU with f16. To avoid problems, the model is cast to float32 for this
computation and then it is converted back to float16.
"""
if dtype == "float16":
model.float()
dummy_input = torch.ones(input_shape).to(device)
if model.name[0:12] == "efficientnet":
output_size = model.extract_features(dummy_input).shape[1:].numel()
else:
output_size = model(dummy_input).shape[1:].numel()
if dtype == "float16":
model.half()
return output_size
|
f9c4e79e2c38a424c723cfed6fc73fc63ddc3142
| 14,421 |
def str_function(connectable) -> str:
"""__str__ function for OutputBase and InputBase."""
infos = []
if connectable.owner:
infos.append('owner: %s' % connectable.owner)
if connectable.connected:
infos.append('connected')
else:
infos.append('not connected')
return '%s(%s)' % (type(connectable).__name__, ', '.join(infos))
|
5ed2ba8575314c0fdf9046988f3977ab262ff0af
| 14,423 |
import re
def check_email(email):
"""
Checks an email format against the RFC 5322 specification.
"""
# RFC 5322 Specification as Regex
regex = """(?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|\"
(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])
*\")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:
(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1
[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a
\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])"""
if re.search(regex, email):
return True
return False
|
522547471730c6975246c301492f5b7676032c75
| 14,430 |
def calc_check_digit(number):
"""Calculate the check digit for the number."""
alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
cutoff = lambda x: x - 9 if x > 9 else x
s = sum(
cutoff(alphabet.index(n) * 2) if i % 2 == 0 else alphabet.index(n)
for i, n in enumerate(number[::-1]))
return str((10 - s) % 10)
|
967d9ac7eae3e45b42c6bb4e366345fd34207d0d
| 14,433 |
def left_child(node, new_node=None):
""" Set left child: left_child(node, new_left_child); Get left node: left_child(node). """
if new_node is not None:
node[1] = new_node
return node[1]
|
baabfbeb4acb7e8a2846b9a4a45b9d993619a1e4
| 14,437 |
from datetime import datetime
def prettify_timestamp(value: str) -> str:
"""
Returns a pretty version of a timestamp object.
Current format:
- %b short name of month like Mar, Jun
- %d day of the month from 1 to 31
- %Y year in 4 digit format
"""
return datetime.utcfromtimestamp(float(value)).strftime("%b %d %Y")
|
82faefee2f98db33665c81c309e531d6dacab099
| 14,438 |
def contains(text, pattern):
"""Return a boolean indicating whether pattern occurs in text."""
assert isinstance(text, str), 'text is not a string: {}'.format(text)
assert isinstance(pattern, str), 'pattern is not a string: {}'.format(text)
#funny storytime! initially did not pass because I falsely remembered a lack of explicitly written "else" statement resulting in a default return of "False". In truth, it defaults to "None"
#Runtime: O(n) because it must go through text to find the pattern. Best case would be O(1), like if the pattern/needle was at the very front of the text/haystack.
#Space: O(1) because it will always be simply true or false. Boolean, boom!
if pattern in text:
return True
else:
return False
|
4eff4e8d69e7843e2a4e4043df29139d92ded772
| 14,439 |
from typing import List
def one_minus(data: List[float]) -> List[float]:
"""
return 1 - each element in list
"""
return list(map(lambda elem: 1 - elem, data))
|
bb5c9a3d27866b408e519541778c7cdb8bd9ac13
| 14,441 |
def exercise_output(template):
"""Outputs the way specified in the exercise"""
result = """Marca: {0}
Modelo: {1}
Preco: {2}
Motor: {3}
Ano: {4}
Km: {5}
Combustivel: {6}
Cambio: {7}
Direcao: {8}
Cor: {9}
Ar-cond: {10}
Opcionais: {11}\n"""
return result.format(
template["Brand"],
template["Model"],
template["Price"],
template["Motor"],
template["Year"],
template["Odometer"],
template["Fuel"],
template["Gear"],
template["Steering"],
template["Color"],
template["Air-Conditioning"],
template["Optionals"]
)
|
0944b47d28c8b95c7eab32b799066af14b2b95ea
| 14,444 |
def _GetMetdataValue(metadata, key):
"""Finds a value corresponding to a given metadata key.
Args:
metadata: metadata object, i.e. a dict containing containing 'items'
- a list of key-value pairs.
key: name of the key.
Returns:
Corresponding value or None if it was not found.
"""
for item in metadata['items']:
if item['key'] == key:
return item['value']
return None
|
e4ec468fe1e79605d3d7199a703981bae02bfaa3
| 14,447 |
import re
def match_is_ipv4_address(value):
"""Match given value as a valid dotted-quad IPv4 address."""
# Apply the dotted-quad pattern to the string and detect a mismatch
try:
match = re.search(r'^(\d+)\.(\d+)\.(\d+)\.(\d+)$', value)
except TypeError:
return u'{0} must be a string in IPv4 dotted-quad notation'.format(
repr(value))
if not match:
return u'"{0}" must be in IPv4 dotted-quad notation'.format(
value)
# Validate the range of each octet
octets = [int(x) for x in match.groups()]
for idx, octet in enumerate(octets):
if octet > 255:
return '{0} octet of "{1}" exceeds 255'.format(
['1st', '2nd', '3rd', '4th'][idx], value)
return None
|
d4573d5919d1811b83c26928f3e403d070c41f37
| 14,449 |
def _parse_header(path):
"""Parses all GAMMA header file fields into a dictionary"""
with open(path) as f:
text = f.read().splitlines()
raw_segs = [line.split() for line in text if ':' in line]
# convert the content into a giant dict of all key, values
return dict((i[0][:-1], i[1:]) for i in raw_segs)
|
93e512388f3c5c5f8ee0e9f0365fa5328b0ea864
| 14,452 |
def parse_trousers_input_args(s):
"""Parses Trspi family's input arguments.
Given a string from of input arguments of a trousers API, the input arguments
parsed into tokens and then convert to tuples. For example:
"BYTE *s, unsigned *len"
->
[("BYTE *", "s"), ("unsigned *", "len")]
Args:
s: String representation of the input arguments of a certain Trspi function.
Returns:
A list of tuples in form of (data type, variable name).
"""
arr = s.split(',')
for i, p in enumerate(arr):
p = p.strip()
# are stick with the variable name, e.g., UINT64 *offset, so the separator
# could be last ' ' or '*'.
pos = p.strip().rfind('*')
if pos == -1:
pos = p.rfind(' ')
if pos == -1:
pos = len(p)
var_type, var_name = p[:pos + 1].strip(), p[pos + 1:].strip()
arr[i] = (var_type, var_name)
return arr
|
366edb8d3b8f1369b8c4b095e037a2192b056e0d
| 14,453 |
from pathlib import Path
def make_unique_filename(name):
""" Creates filename that does not collide with existing files.
May add random stuff between name and extension to make filename unique.
Returns Path object.
May return original name when fails to construct a unique name.
"""
name = Path(name)
parent, stem, ext = name.parent, name.stem, name.suffix
result = name
for counter in range(0, 1000): # Very large number.
if not result.exists():
break
result = parent/(stem + '.{0}'.format(counter) + ext)
return result
|
6da56fc220ca7cea6f4dec9ab1c9fb760c174178
| 14,456 |
def columnsBySubmission(submissions, columns):
""" create map submissionName -> set(columnNames)
"""
columnsBySubm = {}
for submission in submissions.keys():
template = submissions[submission]
columnsBySubm[submission] = set(columns[template].keys())
return columnsBySubm
|
8364faef492941fb31008e5b31ee570bd2d29a04
| 14,457 |
def is_password_valid_with_old_rules(dataset):
""" Validate password according to the old rules """
letter_count = dataset['password'].count(dataset['letter'])
return int(dataset['first']) <= letter_count and letter_count <= int(dataset['last'])
|
99a63ece8e3b8520fd71028bd01bb3a1a7e677a7
| 14,462 |
def _f(x, t, s, cls):
"""Return True if x equals t,
or x is an instance of cls and x equals cls(t),
or x.lower().startswith(s)"""
return x == t or \
(isinstance(x, cls) and x == cls(t)) or \
(isinstance(x, str) and x.lower().startswith(s))
|
91d9c518b58528f01034ca67738c220a17f7f40d
| 14,465 |
import threading
import functools
def synchronized(wrapped):
"""Simple synchronization decorator.
Decorating a method like so:
.. code-block:: python
@synchronized
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
"""
lock = threading.RLock()
@functools.wraps(wrapped)
def wrapper(*args, **kwargs):
with lock:
return wrapped(*args, **kwargs)
return wrapper
|
85af7bbd8b7d72f13bfaecc5f1df459745358ab5
| 14,466 |
def counter(countables):
"""
Counter for counting the values inside a particular list or dict.
This is just a scratch/vanilla version of collections.Counter
Args:
countables: List of countables to be counted.
"""
counts = dict()
for k in countables:
if not k in list(counts.keys()):
counts[k] = 1
else:
counts[k] += 1
return counts
|
42bcf22f000de70e0453f9f9078ea8d7c5f74db2
| 14,468 |
def get_alignment_pdb_id(alignment):
"""
Returns a string of the four letter PDB ID found in alignment
:param alignment:
:return: 'pdb id'
"""
pdb_chain_id = alignment.hit_def.encode('ascii').split()[0]
pdb_id = pdb_chain_id.split('_')[0].lower()
return pdb_id
|
35a6abcd18e411328e5b7fb8c5a4f1dba8fe2391
| 14,470 |
import socket
def str_to_inet(ip: str) -> bytes:
"""
Converts a string representation of IP address to binary representation.
:param ip: IP like - "123.45.67.89"
:return: 32 bit representation of "123.45.67.89" like - '{-CY'
"""
try:
return socket.inet_pton(socket.AF_INET, ip)
except OSError:
return socket.inet_pton(socket.AF_INET6, ip)
|
3ab520701ec0271499d3512425a939803ac0adc9
| 14,473 |
def SumCoeffsOverSet(summing_set, A):
"""Returns the sum of coefficients corresponding to the summing set."""
return sum(A[i - 1] for i in summing_set)
|
2fa61493d4abd07cf24670a56d4a56d08b5a602b
| 14,479 |
def snake_to_camel(snake_case_string: str) -> str:
"""
Takes in a `snake_case` string and returns a `camelCase` string.
:params str snake_case_string: The snake_case string to be converted
into camelCase.
:returns: camelCase string
:rtype: str
"""
initial, *temp = snake_case_string.split("_")
return "".join([initial.lower(), *map(str.title, temp)])
|
4ef8fa72580739dbedfbac5bf9f95247f5ea69c3
| 14,481 |
def parse_int_list(text):
"""Parse a string into a list of integers
For example, the string "1,2,3,4" will be parsed to [1, 2, 3, 4].
Parameters
----------
text : str
String to parse
Returns
-------
List[int]
Parsed integer list
"""
result = [int(i) for i in text.split(',')]
return result
|
f83b21c9038e5ea8eb2c0d53ad94479cab9258f3
| 14,485 |
def cal_error(array, typeb = 0):
"""Calculate all errors of the input array.
Args:
array (numpy.array): The data measured directly for a single
physical datus. We use the mean value as the reliable
measurement for that and get type-A error meanwhile.
typeb (float): The type-B error collected directly from the
instrucments.
Returns:
mean (float): The mean value of the array.
delta_a (float): The type-A error of the array.
error (float): The merged error of type-A and type-B
"""
size = array.size
mean = array.mean()
std = array.std(ddof = 1)
params = {
3: 2.48,
4: 1.59,
5: 1.204,
6: 1.05,
7: 0.926,
8: 0.834,
9: 0.770,
10: 0.715
}
delta_a = std * params[size]
if typeb == 0:
return mean, delta_a
deltb_b = typeb
error = (delta_a**2 + deltb_b**2) ** 0.5
return mean, delta_a, deltb_b
|
f0fc8832f730189e4f2a0597e1342f94ed2d8717
| 14,492 |
def device_mapper(os_type: str, proto: str = "netmiko"):
"""
map an os type to a netmiko device_type
:params os_type: type str
:params proto: type str, default "netmiko"
:returns: device_type string
"""
if proto == "netmiko":
device_types = {
"ios": "cisco_ios",
"iosxr": "cisco_xr",
"iosxe": "cisco_xe",
"nxos": "cisco_nxos",
"eos": "arista_eos",
}
try:
result = device_types[os_type]
except KeyError:
return os_type
elif proto == "netconf":
device_types = {
"csr": "csr",
"iosxr": "iosxr",
"iosxe": "iosxe",
"nxos": "nexus",
"junos": "junos",
}
try:
result = device_types[os_type]
except KeyError:
return "default"
else:
result = os_type
return result
|
81ad4c4dd86c7e6930cf0fb070681872783a5fb8
| 14,495 |
def strip_to_category(category):
"""Strip prefix and postfix from category link.
Parameters
----------
category : str
Returns
-------
stripped_category : str
String with stripped category
"""
if category.startswith('[[Kategori:'):
category = category[11:-2]
elif category.startswith('Kategori:'):
category = category[9:]
return category.split('|')[0]
|
d4a274757aed9b3fbe2e9c5c187949a19b53e3ad
| 14,496 |
def is_hexstring(string):
"""
Determines if a string is a hexstring.
:param Union[ByteString, str] string: A string.
:return: Whether the string's length is even and all of its characters
are in [0-9a-fA-f].
"""
if isinstance(string, str):
string = string.encode()
return not len(string) % 2 and all(
0x30 <= c <= 0x39 or 0x61 <= c <= 0x66 for c in string.lower()
)
|
7263cbbb464d805b6e5f0142a2ff6772894c4837
| 14,504 |
def min_ind_of_anch(anchor_info):
""" Finds the index of min ID. """
anch_id = list(anchor_info.keys())
min_id_ind = anch_id.index(min(anch_id))
return min_id_ind
|
89e77679a21f36016174cf8a52ec4b210e1ad295
| 14,506 |
def name_from_id(id):
"""Hash the id into a run name
Construct the name of the run from the id dictionary
Args:
id (dict): id associated to the run
Returns:
str: name of the run associated to the dictionary ``id``
"""
keys = list(id.keys())
keys.sort()
name = ''
for k in keys:
name += k + ':' + str(id[k]) + ','
return name.rstrip(',')
|
ad8eeed94e7f22e96c197753ed59652dcbfcda8e
| 14,508 |
def calculate_clinker(annual_production, clinker_cement_ratio):
"""
Calculate annual clinker production based on given cement/clinker ratio
:param annual_production: Reported annual production
:param clinker_cement_ratio: Amount of clinker produced per cement output
:return: Clinker per year
"""
return annual_production * clinker_cement_ratio
|
f08f4757ee27c2f4e54fd7acbbdaea398bee1a6e
| 14,509 |
def accuracy(task_preds, task_targets):
"""Computes the accuracy of a given task.
:param task_preds: Predicted labels.
:param task_targets: Ground-truth targets.
:return: a float metric between 0 and 1.
"""
assert task_preds.size > 0
assert task_targets.size > 0
assert task_targets.size == task_preds.size, f"{task_targets.size} vs {task_preds.size}"
metric = (task_preds == task_targets).mean()
assert 0. <= metric <= 1.0, metric
return metric
|
ed8bdef02253c952213b87ee39a86505315e4077
| 14,511 |
def strip(table, col):
"""Removes column col from the given table
Preconditions: table is a (non-ragged) 2d List,
col valid column"""
n_row = len(table)
n_col = len(table[0])
assert col < n_col, repr(col) + "要删除的列大于总列数!"
for row in range(n_row):
table[row] = table[row][:col]+table[row][col+1:]
# print(table)
return table
|
09171295b7ed46d12188eb8c882a60f5fce80647
| 14,516 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.