content
stringlengths 39
9.28k
| sha1
stringlengths 40
40
| id
int64 8
710k
|
---|---|---|
from typing import Dict
from typing import Any
def remove_unused_flags(user_calc_params: Dict[str, Any]) -> Dict[str, Any]:
"""
Removes unused flags in the INCAR, like EDIFFG if you are doing NSW = 0.
Parameters
----------
user_calc_params
User-specified calculation parameters
Returns
-------
Dict
Adjusted user-specified calculation parameters
"""
# Turn off opt flags if NSW = 0
opt_flags = ("ediffg", "ibrion", "isif", "potim", "iopt")
if user_calc_params.get("nsw", 0) == 0:
for opt_flag in opt_flags:
user_calc_params.pop(opt_flag, None)
# Turn off +U flags if +U is not even used
ldau_flags = (
"ldau",
"ldauu",
"ldauj",
"ldaul",
"ldautype",
"ldauprint",
"ldau_luj",
)
if not user_calc_params.get("ldau", False) and not user_calc_params.get(
"ldau_luj", None
):
for ldau_flag in ldau_flags:
user_calc_params.pop(ldau_flag, None)
return user_calc_params | 6772212ab5ab398bc0db750219d98f6489dbcd08 | 649,121 |
import torch
def atomic(species, coordinates, model, AEVC, device=None):
"""
Compute atomic contributions.
Parameters
----------
species: np.array
Atomic species (mapped to indices)
coordinates: np.array
Atomic coordinates
model: torch.nn.Module
Trained model
AEVC: torchani.AEVComputer
AEV computer
device:
Computation device
Returns
-------
np.array
Atomic contributions
"""
if device is None:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Move data to device and add batch dimension
species = species.to(device).unsqueeze(0)
coordinates = (
coordinates.clone().detach().requires_grad_(True).to(device).unsqueeze(0)
)
aevs = AEVC.forward((species, coordinates)).aevs
atomic_contributions = model._forward_atomic(species, aevs)
assert atomic_contributions.shape == species.shape
return atomic_contributions | 30121ef830514ed18022bb8adfb2eb0e862b8401 | 143,968 |
def has_duplicates(n):
""" Check if the given list has unique elements
Uses the property of sets:
If converted into a set it is guaranteed to have unique elements.
-> Compare number of elements to determine if the list contains duplicates
"""
if type(n) is not list:
raise TypeError("Given input is not a list")
if len(n) != len(set(n)):
return True
return False | 64db6a2da62432ce2fe9de3a29764a6c5f5b58a0 | 634,530 |
def title_filter(entry, prefix):
"""This function return True if the entry's title starts with prefix."""
return entry['title'].startswith(prefix) | d91320578c563c2f510d522352927d13780a2816 | 436,550 |
def capitalize(match):
"""
capitalize a regex group
:param match: regexp group object
:return: capitilised sting
"""
return match.group(1).capitalize() | 64e7cf41e1a4acdf696673b219bf2c27faf99548 | 538,761 |
def fixture_mktmpfile(tmp_path_factory):
"""Fixture to create a temporary file with 'data' as content"""
def _mktmpfile(data):
"""Fixture to create a temporary file with 'data' as content"""
fn = tmp_path_factory.mktemp("mktmpfile")
fn_file = str(fn) + "/file"
with open(fn_file, "w") as fn_handle:
fn_handle.write(data)
return fn_file
return _mktmpfile | e94038e76c140604643702aaa04c6eb7bd70ff75 | 606,829 |
def _get_unique_index_values(idf, index_col, assert_all_same=True):
"""
Get unique values in index column from a dataframe
Parameters
----------
idf : :obj:`pd.DataFrame`
Dataframe to get index values from
index_col : str
Column in index to get the values for
assert_all_same : bool
Should we assert that all the values are the same before
returning? If True, only a single value is returned. If
False, a list is returned.
Returns
-------
str, list
Values found, either a string or a list depending on
``assert_all_same``.
Raises
------
AssertionError
``assert_all_same`` is True and there's more than one
unique value.
"""
out = idf.index.get_level_values(index_col).unique().tolist()
if assert_all_same:
if len(out) > 1:
raise AssertionError(out)
return out[0]
return out | 306a919a547a6d0056a4547daa50e6149d840910 | 698,766 |
def split_unescape(s, delim, escape='\\', unescape=True):
"""
>>> split_unescape('foo,bar', ',')
['foo', 'bar']
>>> split_unescape('foo$,bar', ',', '$')
['foo,bar']
>>> split_unescape('foo$$,bar', ',', '$', unescape=True)
['foo$', 'bar']
>>> split_unescape('foo$$,bar', ',', '$', unescape=False)
['foo$$', 'bar']
>>> split_unescape('foo$', ',', '$', unescape=True)
['foo$']
"""
ret = []
current = []
itr = iter(s)
for ch in itr:
if ch == escape:
try:
# skip the next character; it has been escaped!
if not unescape:
current.append(escape)
current.append(next(itr))
except StopIteration:
if unescape:
current.append(escape)
elif ch == delim:
# split! (add current to the list and reset it)
ret.append(''.join(current))
current = []
else:
current.append(ch)
ret.append(''.join(current))
return ret | f19c3bed4a15dd35f1e9f1d7461916ee728cb2fb | 236,442 |
def getDomainFromFP(fp):
""" Returns domain number from file path """
path, fileInfo = fp.split("LSBU_") #split returns ["/user/.../data/subdomain_8/LSBU", "<timestep>_<subdomain>.vtu"]
timestep, domain = fileInfo.split("_")
return domain[:-4] | 9720dd8bf23bb76574eaa50f3ad92714e32b85df | 269,800 |
def group_concat(df, gr_cols, col_concat):
"""Concatenate multiple rows into one."""
df_out = (
df.groupby(gr_cols)[col_concat]
.apply(lambda x: " ".join(x))
.to_frame()
.reset_index()
)
return df_out | 2fd5e2293f09f780f5b8afa607a6cf6bd1254a3e | 445,032 |
def item_for_api(method, data_type, item):
"""Return a Bulk API item."""
return {
'method': method,
'data_type': data_type,
'data': item
} | a2f696c26666b98d309aa7118a0bea3d95b3468d | 294,740 |
def bind(fct, value):
"""
Returns a callable representing the function 'fct' with it's
first argument bound to the value
if g = bind(f,1) and f is a function of x,y,z
then g(y,z) will return f(1,y,z)
"""
def callback(*args, **kwargs):
return fct(value, *args, **kwargs)
return callback | 50e27c12c20dd135261b0a6798c50aa9b1cac1a4 | 300,133 |
def IsConfigUserInputValid(user_input, valid_el):
"""Sanity check for user input.
Args:
user_input: str User input.
valid_el: list List of valid elements.
Returns:
bool True if user input is valid, False otherwise.
"""
if not user_input: return False
try:
valid_el.index(str(user_input))
except ValueError:
return False
return True | 0f59802e7f2e7492d79cfa9cef3bba878d3d15fb | 143,105 |
def nested(l):
"""Returns a nested dictionary where keys are taken from a list."""
d = tmp = {}
for k in l:
tmp[k] = {}
tmp = tmp[k]
return d | aa0d665e41c518ae8b644b70cf3a4f75779d9397 | 401,881 |
def next_page(driver):
"""
Navigate to next page
:param driver: webdriver
:type driver: webdriver
:return: webdriver
"""
driver.find_element_by_id("page_next").click()
return driver | 73873364e9520b5b1a1bb319ec466601954b595d | 291,657 |
def distance_threshold(threshold):
"""
A higher order function that returns a predicate which
is only true if the distance is less than threshold.
"""
def predicate(distance):
if distance<=threshold:
return True
else:
return False
return predicate | be1b4855cd10457813b193a5652efcb9a3274893 | 436,169 |
def rss_line_segment(params, ols_data):
"""
For the line model defined by:
y = v + m(x-u)
For data {x_i, y_i}, computes the RSS (residual sum of squares), defined
as
Sum[ y_i -v - m(x_i - u) ]
For OLS, this is the complete RSS. For bkpt models, we can add these over
each linear segment.
"""
u, v, m = params
num_data = ols_data[0]
sum_x = ols_data[1]
sum_y = ols_data[2]
sum_xx = ols_data[3]
sum_yy = ols_data[4]
sum_xy = ols_data[5]
term = (v - m * u)
return (sum_yy - 2.0 * term * sum_y - 2.0 * m * sum_xy
+ m * m * sum_xx + 2.0 * m * term * sum_x + term * term * num_data) | dcfd8b8202e362efc3fba9c0d752ee2c2dacb6c0 | 215,157 |
def midpoint(rooms):
"""
Helper function to help find the midpoint between the two rooms.
Args:
rooms: list of rooms
Returns:
int: Midpoint
"""
return rooms[0] + (rooms[0] + rooms[2]) // 2, rooms[1] + (rooms[1] + rooms[3]) // 2 | 60b3ba53fb15154ff97ab9c6fa3cf1b726bc2df1 | 705,603 |
import six
def _BuildErrorMessage(**kwargs):
"""Builds an error message with given kwargs."""
return ','.join('%s=%s' % item for item in six.iteritems(kwargs)) | 2e3f4af69a26ee1286eef54cb598742dbc29100b | 538,675 |
def prodigal_gene_start(rec_description: str) -> int:
"""Get a gene start index from a Prodigal FASTA header
Examples
--------
Given the following Prodigal FASTA output header, parse the gene start index (i.e. 197)
>>> prodigal_gene_start("k141_2229_1 # 197 # 379 # 1 # ID=4_1;partial=00;start_type=ATG;rbs_motif=AGGAGG;rbs_spacer=5-10bp;gc_cont=0.437")
197
Parameters
----------
rec_description : str
SeqRecord description of Prodigal FASTA header
Returns
-------
int
Gene start index
"""
return int(rec_description.split('#')[1].strip()) | d0aaa9d09d67dea75537f2f48c550a9df31bcf45 | 21,302 |
def decode_labels(y_encoded, labels):
"""
Convert autoincrementing integers back to strings.
"""
return [labels[i] for i in y_encoded] | 0668dce9d315a6b68a8517e8826249be11e73dfc | 114,514 |
def guess_name_from_uri(uri):
"""
Given a URI like host.tld/bla/fah/jah or host.tld/bla/fah/jah/, returns jah.
"""
split_uri = uri.split('/')
if split_uri[-1]:
return split_uri[-1] # no trailing slash
else:
return split_uri[-2] # has trailing slash | 3ae4233be09962caf85f45a6e2466cf266b3e81b | 215,994 |
import re
def extract_from_between_quotations(text):
"""Get everything that's in double quotes
"""
results = re.findall('"([^"]*)"', text)
return [i.strip() for i in results] | 9f8a34591546050385886431cdcae0a70ac6c7dd | 338,243 |
def _parse_coord(coord, coord_dir, positive_sign, negative_sign):
"""
Convert coordinate to single float value, replacing degree minutes with
decimal fraction and taking into consideration the direction specified
:param coord: coordinate string
:type coord: str
:param coord_dir: direction
:param positive_sign: direction when coordinate is positive
:param negative_sign: direction when coordinate is negative
:rtype: float
"""
dot = coord.index('.')
if coord_dir != positive_sign and coord_dir != negative_sign:
raise ValueError("Coordinate direction '{}' is neither '{}' nor '{}'"
.format(coord_dir, positive_sign, negative_sign))
sign = 1 if coord_dir == positive_sign else -1
return sign * (float(coord[:dot - 2]) + float(coord[dot - 2:]) / 60) | 0853ac851fb8bb8ecefb6af944975f7f7270b315 | 452,792 |
def normalize_dictionary(data_dict):
"""
Converts all the keys in "data_dict" to strings. The keys must be
convertible using str().
"""
for key, value in data_dict.items():
if not isinstance(key, str):
del data_dict[key]
data_dict[str(key)] = value
return data_dict | 50ffde831746285773c46e9c8dae4300bef0a14c | 531,639 |
import yaml
def load(filePath):
"""
Parses YAML document using filepath, returns dict.
Input(s): filePath (str) \n
Output(s): yamlDict (dict)
"""
stream = open(filePath, 'r')
yamlDict = yaml.safe_load(stream)
return yamlDict | c24df33b3e5c6800031b66d9239f958d2216d033 | 546,901 |
def get_url_path(value):
"""Takes a gitlab repository url and returns its path component
Example:
>>> get_url_path("https://gitlab.com/thorgate-public/django-project-template")
>>> "thorgate-public/django-project-template"
"""
if not value or not value.strip():
return value
res = value.strip()
res = res.replace('http://', '')
res = res.replace('https://', '')
domain, rest = res.split('/', maxsplit=1)
return f"{rest}" | 66b07d40d6b934cf9bea45fc13e11d18c2ea5fe5 | 278,863 |
def split_list(l, n_parts=1):
"""Split a list in a number of parts
Args:
l (list): a list
n_parts (in): the number of parts to split the list in
Returns:
list: a list of n_parts lists
"""
length = len(l)
return [l[i * length // n_parts: (i + 1) * length // n_parts]
for i in range(n_parts)] | 8829deaae2517856c7a98d98f07fb6163b811b64 | 595,535 |
def remove_duplicates(config):
"""Remove duplicate entries from a list of config options."""
tmp = {}
order = []
for item in config:
try:
key, value = item.split('=', 1)
except ValueError:
raise ValueError('Malformed config option: %s' % item)
tmp[key] = value
if key not in order:
order.append(key)
return [key + '=' + tmp[key] for key in order] | 2ac1a941c7cef1d9e88ddf08f1baf9244db54510 | 335,791 |
import re
def _matches(name, patterns):
"""
If name matches any pattern in patterns return True
:param name: (str) Any string
:param patterns: (iter) Iterable that contains iterables of strings that define prefix and suffix patterns to
match to. Prefix or suffix may be empty.
:return: (bool)
"""
for p in patterns:
prefix, suffix = p + (len(p) == 1) * ['']
pattern = r'^({prefix}).*({suffix})$'.format(prefix=prefix, suffix=suffix)
matches = bool(re.search(pattern, name))
if matches:
return True
return False | 90dabcf84293d94ffc974baddf6c7110bada80a3 | 205,616 |
import datetime
def expired(dt_obj):
"""Return True if datetime.datetime obj is not before current date.
Return False if date occurs in past"""
dt = datetime.date.today()
dt = datetime.datetime(dt.year, dt.month, dt.day)
return dt_obj < dt | 29482d5dc5d76da76006811420319d2f4d643078 | 550,435 |
import json
def _GetBotWithFewestNamedCaches(bots):
"""Selects the bot that has the fewest named caches.
To break ties, the bot with the most available disk space is selected.
Args:
bots(list): A list of bot dicts as returned by the swarming.bots.list api
with a minimum length of 1.
Returns:
One bot from the list.
"""
# This list will contain a triplet (cache_count, -free_space, bot) for each
# bot.
candidates = []
for b in bots:
try:
caches_dimension = [
d['value'] for d in b['dimensions'] if d['key'] == 'caches'
][0]
# We only care about caches whose name starts with 'builder_' as that is
# the convention that we use in GetCacheName.
cache_count = len(
[cache for cache in caches_dimension if cache.startswith('builder_')])
bot_state = json.loads(b['state'])
free_space = sum(
[disk['free_mb'] for _, disk in bot_state['disks'].iteritems()])
except (KeyError, TypeError, ValueError):
# If we can't determine the values, we add the bot to the end of the list.
candidates.append((1000, 0, b))
else:
# We use negative free space in this triplet so that a single sort will
# put the one with the most free space first if there is a tie in cache
# count with a single sort.
candidates.append((cache_count, -free_space, b))
return sorted(candidates)[0][2] | 5a8911985a064781fcd608f01b3ceeba8cc4d7c5 | 97,080 |
import torch
def tensor_dict_diffs(d1, d2):
"""Compare two dictionaries of tensors. The two dicts must have the
same keys.
Parameters
----------
d1: dict[any: torch.Tensor]
d2: dict[any: torch.Tensor]
Returns
-------
list: Returns the keys where tensors differ for d1 and d2.
"""
assert d1.keys() == d2.keys()
res = []
for k, v in d1.items():
if not torch.eq(v, d2[k]).all():
res.append(k)
return res | 237966eb3b3d2fb55f22670883cb58121545936a | 443,178 |
def property_mapping_to_dict(cs_data):
"""Converts the property mapping in config strategy data from an
array like [ {'source': 'string'}, {'target': 'string'} ] to
a dict with { 'source-string': 'target-string', ... }.
"""
property_mapping_arr = cs_data['properties']['mapping']
property_mapping = {}
for mapping_obj in property_mapping_arr:
source = mapping_obj['source']
target = mapping_obj['target']
if source in property_mapping:
property_mapping[source].append(target)
else:
property_mapping[source] = [target]
return property_mapping | b6ce1d529e6129b916ba40c96a1501d5e49b034a | 196,447 |
def readString(fobj):
"""
Read a string from a stream.
"""
return fobj.readline()[:-1].decode() | ca0e407fa8f79285c8f2564d6c8552ab4c358a85 | 264,340 |
from typing import Union
def create_search_body(from_year: Union[str, None], to_year: Union[str, None], filter_fields: dict, size: int) -> dict:
"""Create a search body that is passed on to the elasticsearch 'search' method.
:param from_year: Refers to published year, add to 'range'. Include results where published year >= from_year
:param to_year: Refers to published year, add to 'rangen'. Include results where published year < to_year
:param filter_fields: Add each field and their value in filter_fields as a filter term.
:param size: The returned size (number of hits)
:return: search body
"""
filter_list = []
for field in filter_fields:
# add if value is not None
if filter_fields[field]:
filter_list.append({"terms": {f"{field}.keyword": filter_fields[field]}})
if from_year or to_year:
range_dict = {"range": {"published_year": {"format": "yyyy-MM-dd"}}}
if from_year:
range_dict["range"]["published_year"]["gte"] = from_year
if to_year:
range_dict["range"]["published_year"]["lt"] = to_year
filter_list.append(range_dict)
query_body = {"bool": {"filter": filter_list}}
search_body = {"size": size, "query": query_body, "sort": ["_doc"]}
return search_body | 0601aa4505c0cc158b734c693dc9fea5d6c47e97 | 540,013 |
from typing import List
def join_paths(paths: List[List[str]], separator: str = "/") -> List[str]:
"""Collect rows of strings to string.
Args:
paths: list of stp paths defined as list of strings
separator: character to be used as separator
Returns:
list of joined stp paths
"""
return list(map(lambda path: separator.join(path), paths)) | 1981a9d84b41fe50a2fb2761d6fb1109c1f207e2 | 201,316 |
def clean_text(text: str) -> str:
"""basic text cleaning of spaces and newlines
Args:
text (str): text to clean
Returns:
str: cleaned text
"""
character_list = list()
text = text.replace(' ', ' ')
text = text.replace('\n', ' ')
text = text.lower()
for character in text:
if character == ' ' \
or character.isalpha() \
or character.isdigit():
character_list.append(character)
return ''.join(character_list) | ef6ff6c1f2d1ec12a5d38eaea5518e67dde860cc | 506,766 |
from typing import List
from typing import Tuple
from typing import Any
def simple_producer(key, value) -> List[Tuple[Any, Any]]:
"""simple_producer A function that returns the key,value passed in for production via "KafkaProducerOperator"
:param key: the key for the message
:type key: Any
:param value: the value for the message
:type value: Any
:return: The Key / Value pair for production via the operator
:rtype: List[Tuple[Any, Any]]
"""
return [(key, value)] | 1c04821f7db789c73be4533f60206e5196ab98d4 | 629,335 |
def scrap_comments(sub, limit=10):
"""
Function to scrap the top-10 comments(as it was done in data collection for training data)
Params
sub: submission instance
Returns: top-10(if num_comments > 10 or top x) comments combined into single string with space
"""
# agg into a list
comments_body = []
if sub.num_comments > 0:
for i, comment in enumerate(sub.comments.list()):
# There may be some comments which has no body
try :
comments_body.append(comment.body)
except:
comments_body.append('')
if i==limit:
break
return " ".join(comments_body) | e78186136b4b4f796578ecf074354b4302cfe8c8 | 598,627 |
def get_filter_arg_boolean(f, arg):
"""Convert boolean value to scheme boolean string representation."""
if arg == 1:
return '#t'
else:
return '#f' | f0d581c9515bee1c19d84994bc31cfc5b28396b5 | 266,908 |
def remove_prefix(text, prefix):
"""
Removes given prefix from text, returns stripped text.
Args:
text(str): text to remove prefix from
prefix(str): prefix to remove from text
Returns:
(str) stripped text
"""
if text.startswith(prefix):
return text[len(prefix):]
return text | 8b7285cfecce17d06a08d4d1f6b66858ffcdbb27 | 515,624 |
def get_tags_from_namespace(tag_keys_and_values, namespace):
"""Extract the tags from namespace using tag_keys_and_values.
Arguments:
tag_keys_and_values -- Dictionary mapping tag keys with their valid values.
This dictonary is of the form:
{
'n': [0, 1, 2],
'foo': ['bar'],
}
Which says that the tag 'n' can have the possible values of 0, 1 and 2.
namespace -- A Namespace object containing the tags passed.
Returns:
A dictionary containing the tags extracted from the namespace object.
"""
tags = {}
for tag_key in tag_keys_and_values:
cli_tag_value = getattr(namespace, tag_key, None)
if cli_tag_value is None:
continue
if cli_tag_value not in tag_keys_and_values[tag_key]:
raise ValueError(
'Invalid value: {value} provided for the tag: {key}'.format(key=tag_key, value=cli_tag_value))
tags[tag_key] = cli_tag_value
return tags | fc72acfd29e3a572e9d30f2e766287052cc7660b | 348,507 |
def _loadFiles(dir_, ext=".js", files=None):
"""Creates a list of files by recursing a file director.
Files returned are filtered by the specified extension 'ext' (optional). The defaul
extension is '.js'. If 'dir_' is an ordiary file, it will be returned alone in the list.
"""
if files == None:
files = []
if dir_.is_dir():
for f in dir_.iterdir():
if f.suffix == ext and f.stem != "index":
files.append(f)
elif f.is_dir() and not f.name.startswith("_"):
_loadFiles(f, ext, files)
elif dir_.suffix == ext and dir_.stem != "index":
files.append(dir_)
return files | 3ee31cb582b2399069156b36cdfcf9c4f651910f | 445,355 |
def get_sql_name(text):
"""
Create valid SQL identifier as part of a feature storage table name
"""
# Normalize identifier
text = "".join(c.lower() if c.isalnum() else " " for c in text)
text = "_".join(text.split())
return text | 30e56ec32dbbe8b5819490a2107c6c3d26af34ba | 537,607 |
def grow(neigh, verts, exclude):
""" Grow the vertex set, also keeping track
of which vertices we can safely ignore for
the next iteration
"""
grown = set()
growSet = verts - exclude
for v in growSet:
grown.update(neigh[v])
newGrown = grown - exclude
newExclude = exclude | growSet
return newGrown, newExclude | 1579be114f1483bdaff92e012718f372e1f8a83c | 434,118 |
def get_requirements(requirements_file):
"""
Parse the specified requirements file and return a list of its non-empty,
non-comment lines. The returned lines are without any trailing newline
characters.
"""
with open(requirements_file, 'r') as fp:
lines = fp.readlines()
reqs = []
for line in lines:
line = line.strip('\n')
if not line.startswith('#') and line != '':
reqs.append(line)
return reqs | 4477ea5e376d5f1df8238eb2dcc9a76154861f17 | 437,347 |
def is_member(user):
"""
Determines if the logged in user is an authorized member since anyone
can "register" via the Google OAuth API - once registered, we need
some other way to give them access or not; namely by having them be a
part of the Member group.
"""
if user:
return user.groups.filter(name='Member').count() > 0
return False | 4f7491f69311707044f155ab58d1c91b5fad7cb2 | 161,454 |
def searchForInsert(sortedList:list, value:float)->int:
"""Search for where to insert the value for the list to remain sorted
Args:
sortedList (list): a sorted list
value (float): the value to insert into the sorted list
Returns:
int: the index where to insert the value
"""
for i in range(len(sortedList)-1,-1,-1):
if(sortedList[i] <= value):
return i+1
else:
return 0 | 3974c8e7b58feb9d47aadb68f88a931ba3bd8048 | 44,165 |
def get_function_code(instruction_bytes):
"""
Returns the 6-bit MIPS function code from a 4 byte R-type instruction.
"""
fun = instruction_bytes & 0x3F
return fun | 8e0627d72aeca85ad66d8265aab56bec5c3b79fc | 491,857 |
def get_query(string, pos=1):
"""Get query parameter of a URL."""
try:
return string.split("?")[pos]
except IndexError:
if pos == 1:
return ""
else:
return string | 2c463a597f899306f3bda9ed9b6d16c981cd16e2 | 612,193 |
def match_content_type(filename: str) -> str:
"""
Match file extensions to content-type. A quick lightweight list.
This is needed so s3 vends the right MIME type when the file is downloaded
directly from the bucket.
"""
content_type_match = {
'json': 'application/json',
'js': 'application/javascript',
'css': 'text/css',
'map': 'application/json',
'svg': 'image/svg+xml',
'ttf': 'font/ttf',
'woff': 'font/woff',
'woff2': 'font/woff2',
'eot': 'application/vnd.ms-fontobject',
'txt': 'text/plain',
'png': 'image/png',
'jpg': 'image/jpeg',
'ico': 'image/x-icon',
'html': 'text/html',
'gif': 'image/gif'
}
try:
return content_type_match[filename.rsplit('.', 1)[1]]
except KeyError:
return 'application/octet-stream' | ff4ef5d752a96a39845baad8516e3057408c1b27 | 669,286 |
import pickle
def read_pickle(filename):
"""Read object from pickle format.
Parameters
----------
filename : str
Input file.
Returns
-------
obj : object
Python object.
"""
with open(filename, "rb") as f:
return pickle.load(f) | 6852366add1352af3953279baf7c8d95085d6ea7 | 632,573 |
def build_extracted_list(input_list, subinterval):
""" A utility function to extract a number of elements from a list, leaving only a certain subset.
Generates a new list with just the subset. Creates the subset by specifying a sub-interval.
:param input_list: The list to be extracted
:type input_list: list
:param subinterval: How many other elements to keep (for example, 10 means keep every 10 elements).
:type subinterval: int
:return: The extracted list.
:rtype: list
"""
out = []
wait = subinterval
for i in input_list:
if wait == subinterval:
out.append(i)
wait = 0
else:
wait += 1
return out | 8e9bf9c9ac407a7976d4ec3a16f1dd36aed8af48 | 644,400 |
async def async_create_cloudhook(cloud):
"""Create a cloudhook."""
websession = cloud.hass.helpers.aiohttp_client.async_get_clientsession()
return await websession.post(
cloud.cloudhook_create_url, headers={
'authorization': cloud.id_token
}) | b45a6896f8b56cfd2c123e7dbd30a91e0d973a39 | 88,729 |
def path_contains_data(bucket, root_path, min_file_size=0, file_extension=None):
"""Checks if there are any files under this path that contain files of size greater than 0
Args:
bucket (boto.s3.bucket.Bucket): bucket within which to check.
root_path (str): Should be the path relative to the bucket, does not support wildcards.
file_extension (str): optional filter for file type, e.g. setting this to '.gz' will only be True if there
are .gz files with in the path.
min_file_size (int): sometimes we may have empty gz files so set a minimum file size for returning True.
Files of exactly this size will be excluded.
Returns:
bool
"""
for key in bucket.list(root_path):
if file_extension and not key.name.endswith(file_extension):
continue
if key.size > min_file_size:
return True
return False | 0d917ede77a7a0959516ee1ab3519858c65c885d | 78,953 |
import yaml
def load_yaml(file_path):
"""Load yaml file located at file path, throws error if theres an issue
loading file.
"""
with open(file_path) as fin:
content = yaml.load(fin, Loader=yaml.FullLoader)
return content | 33baca8cb28a935d6a8d0dd643cd7cf716c191ac | 692,911 |
import struct
import socket
import binascii
def compact(ip, port, ascii=False):
"""
Compact IP address and port.
>>> compact('127.0.0.1', 6667, ascii=True)
'7f0000011a0b'
>>> compact('127.0.0.1', 6667) == '7f0000011a0b'.decode('hex')
True
"""
compacted = struct.pack('!4sH', socket.inet_aton(ip), port)
return binascii.hexlify(compacted) if ascii else compacted | 51aa9d2ece55fce558855763e7c4a965d4d800cb | 38,712 |
import math
def get_from_decomposition(decomposition):
"""Returns a number from a prime decomposition"""
result = 1
for key in decomposition:
result *= math.pow(key, decomposition[key])
return result | ed66dda787f22306643fda8e2ff497b4f2e820cb | 691,510 |
import yaml
def load_yaml(filename):
""" Load yaml from file.
Args:
filename (str): The filename to load.
Returns:
d (dict): The loaded yaml dictionary.
"""
with open(filename, 'r') as f:
d = yaml.safe_load(f)
return d | d0c9936ab4d66f51b4ccc8a55bc99f327acf0200 | 300,065 |
def __nuwalther(zed):
"""
Calculate the kinematic viscosity for the Walther equation (ASTM D341).
Parameters
----------
zed: scalar
The z-parameter of the Walther equation.
Returns
-------
kin: scalar
The kinematic viscosity.
"""
kin = (zed - 0.7) - 10 ** (-0.7487 - 3.295 * (zed - 0.7) +
0.6119 * (zed - 0.7) ** 2 - 0.3193 *
(zed - 0.7) ** 3)
return kin | ba7de580cc781fc726a193e10d3133cfb2ff412e | 299,864 |
def recvall(s, count):
"""Receive all of the data otherwise return none.
Args:
s: socket
count: number of bytes
Returns:
data received
None if no data is received
"""
all_data = []
while (count > 0):
data = s.recv(count)
if (len(data) == 0):
return None
count -= len(data)
all_data.append(data)
result = ''.join(all_data);
return result | c2e80f7cde1630ab6058cf5d4c584f5618160fb7 | 234,561 |
from typing import Dict
def parse_node_id(node_id: str) -> Dict:
"""
解析节点ID
:param node_id: 节点ID "host|instance|host|123"
:return: {
"object_type": "HOST",
"node_type": "INSTANCE",
"type": "host",
"id": 123,
}
"""
object_type, node_type, _type, _id = node_id.split("|")
return {
"object_type": object_type.upper(),
"node_type": node_type.upper(),
"type": _type,
"id": _id,
} | bb2d34f715a363dbbb8ef188491898095e9c966e | 665,282 |
import json
def load_config(file):
"""
takes as input a file path and returns a configuration file
that contains relevant information to the training of the NN
:param file:
:return:
"""
# load the file as a raw file
loaded_file = open(file)
# conversion from json file to dictionary
configuration = json.load(loaded_file)
# returning the file to the caller
return configuration | ad9f9228da22b9ac70ae8e6adf5e29e74b0ab510 | 153,579 |
def chomp(text):
"""
If the text in an inline tag like b, a, or em contains a leading or trailing
space, strip the string and return a space as suffix of prefix, if needed.
This function is used to prevent conversions like
<b> foo</b> => ** foo**
"""
prefix = ' ' if text and text[0] == ' ' else ''
suffix = ' ' if text and text[-1] == ' ' else ''
text = text.strip()
return (prefix, suffix, text) | e53d983eb597994f994ce8e84c87693a485a681f | 453,025 |
def lerp(origin, destination, progress):
""" Linear interpolation between origin and destination.
:param tuple origin: 2-tuple of x, y coordinates for origin
:param tuple destination: 2-tuple of x, y coordinates for destination
:param float progress: ratio of completion between origin and destination, [0-1]
e.g. 25% of the way through the trip from origin to destination would be .25
"""
origin_x, origin_y = origin
destination_x, destination_y = destination
x = origin_x + int(progress * float(destination_x - origin_x))
y = origin_y + int(progress * float(destination_y - origin_y))
return x, y | a18886d63c2193f84820b5ab017e71415e6cbd60 | 514,381 |
import hashlib
def _filehash(filepath, blocksize=4096):
""" Return the hash object for the file `filepath', processing the file
by chunk of `blocksize'.
:type filepath: str
:param filepath: Path to file
:type blocksize: int
:param blocksize: Size of the chunk when processing the file
"""
sha = hashlib.sha256()
with open(filepath, 'rb') as fp:
while 1:
data = fp.read(blocksize)
if data:
sha.update(data)
else:
break
return sha | 2a9ccbf3d40cdce7479775973411d789105c30bb | 161,187 |
def fake_message_dict(fake_chat_dict):
"""Return a fake, minimalist Telegram message as dict."""
return {
'message_id': 12345,
'date': 1445207090,
'chat': fake_chat_dict
} | f98316b44d4a5f3c12f293a2cd72a3ec3298cb34 | 507,995 |
def toString( x ):
"""
Return a string representing x. If x is a float convert it to scientific notation.
Arguments:
x: The value to convert to a string.
"""
if isinstance( x, float ):
return "{:.2e}".format( x )
else:
return str( x ) | a619aa9867ab807721812e19ae88df67189b27aa | 567,895 |
def generic_element(title, subtitle=None, image_url=None, buttons=None):
"""
Creates a dict to use with send_generic
:param title: Content for receiver title
:param subtitle: Content for receiver subtitle (optional)
:param image_url: Content for receiver image to show by url (optional)
:param button: Content for receiver button shown (optional)
:return: dict
"""
element = {
"title": title,
"subtitle": subtitle,
"image_url": image_url,
"buttons": buttons
}
if not subtitle:
element.pop('subtitle')
if not image_url:
element.pop('image_url')
if not buttons:
element.pop('buttons')
return element | 29c28e43e7fb146c2d2764f2cc0519534436c6c7 | 185,645 |
def is_system_group(group) -> bool:
"""
>>> is_system_group('SYS:SOLO')
True
>>> is_system_group('sys:mushroom')
True
>>> is_system_group(None)
False
"""
return bool(group and group.upper().startswith('SYS:')) | 6f282d3150be671f76170e04ea814ed14ab00b48 | 293,632 |
from typing import List
def _all_lhc_arcs(beam: int) -> List[str]:
"""
INITIAL IMPLEMENTATION CREDITS GO TO JOSCHUA DILLY (@JoschD).
Names of all LHC arcs for a given beam.
Args:
beam (int): beam to get names for.
Returns:
The list of names.
"""
return [f"A{i+1}{(i+1)%8+1}B{beam:d}" for i in range(8)] | 286f34a7b80f92772bfe2c0052139d95e2e899db | 555,164 |
def arePermsEqualParity(perm0, perm1):
"""Check if 2 permutations are of equal parity.
Assume that both permutation lists are of equal length
and have the same elements. No need to check for these
conditions.
:param perm0: A list.
:param perm1: Another list with same elements.
:return: True if even parity, False if odd parity.
"""
perm1 = perm1[:] ## copy this list so we don't mutate the original
transCount = 0
for loc in range(len(perm0) - 1): # Do (len - 1) transpositions
p0 = perm0[loc]
p1 = perm1[loc]
if p0 != p1:
sloc = perm1[loc:].index(p0)+loc # Find position in perm1
perm1[loc], perm1[sloc] = p0, p1 # Swap in perm1
transCount += 1
# Even number of transpositions means equal parity
if (transCount % 2) == 0:
return True
else:
return False | 40e5bb65afaad68c5c188f3ff2b8551b2be4cacb | 573,702 |
def hmsdms_to_deg(hmsdms):
"""
Convert HMS (hours, minutes, seconds) and DMS (degrees, minutes, seconds) to
RA, DEC in decimal degrees.
Example:
hmsdms_to_deg('06 45 08.91728 -16 42 58.0171')
Return:
(101.28715533333333, -15.28388413888889)
"""
ls = hmsdms.split(' ')
ra_h = int(ls[0])
ra_m = int(ls[1])
ra_s = float(ls[2])
dec_d = int(ls[3])
dec_m = int(ls[4])
dec_s = float(ls[5])
ra = 15*ra_h + 15*ra_m/60 + 15*ra_s/3600
dec = dec_d + dec_m/60 + dec_s/3600
return ra, dec | d2fff84799b0cb49855ecad40a59823094ea3dd9 | 537,638 |
import base64
def image_uri(filename):
"""Return the base64 encoding of an image with the given filename.
"""
image_data = open(filename, "rb").read()
return "data:image/jpg;base64," + base64.b64encode(image_data).decode() | 5c9c437ef5e3336caad1232d9ca314a82bb7be38 | 216,935 |
def find_last_word(s):
"""Find the last word in a string."""
# Note: will break on \n, \r, etc.
alpha_only_sentence = "".join([c for c in s if (c.isalpha() or (c == " "))]).strip()
return alpha_only_sentence.split()[-1] | aaa163496a6c6ba4a5b8113b98e28390398fcbf9 | 635,460 |
from pathlib import Path
def load_header_file(file_path):
"""Load the first line of a local file"""
with open(Path(file_path)) as data_in:
lines = data_in.readlines()
return [line.strip() for line in lines] | b50f3b649699c207d36aae992cb59ae93c786429 | 78,627 |
def human_readable_bytes(value, digits=2, delim="", postfix=""):
"""
Return a human-readable bytes value as a string.
Args:
value (int): the bytes value.
digits (int): how many decimal digits to use.
delim (str): string to add between value and unit.
postfix (str): string to add at the end.
Returns:
str: the human-readable version of the bytes.
"""
chosen_unit = "B"
for unit in ("KiB", "MiB", "GiB", "TiB"):
if value > 1000:
value /= 1024
chosen_unit = unit
else:
break
return f"{value:.{digits}f}" + delim + chosen_unit + postfix | 8024efed6b98349ac94de76dbb55807a24d13648 | 220,489 |
from typing import Tuple
def aspect_ratio(width: int , height: int) -> Tuple[int, int]:
"""
Function to calculate aspect ratio for two given values.
Args:
width (int): width value
height (int): height value
Returns:
Tuple[int, int]: ratio of width to height
"""
def gcd(a, b):
return a if b == 0 else gcd(b, a % b)
if width == height:
return 1,1
if width > height:
divisor = gcd(width, height)
else:
divisor = gcd(height, width)
return int(width / divisor), int(height / divisor) | 3ee89d5630c91fc2144b4fcce0207c2413f5a5db | 571,568 |
def update_parameters(parameters, grads, learning_rate):
"""
update parameters with gradients.
:param parameters: input parameters, dictionaries
:param grads: gradients, dictionaries
:param learning_rate: hyper-parameter alpha for deep learning, floats
:return: updated parameters, dictionaries
"""
num_of_layers = len(parameters) // 2 # number of layers in the neural network
# Update rule for each parameter. Use a for loop.
for l in range(num_of_layers):
parameters["W" + str(l + 1)] = parameters["W" + str(l + 1)] - learning_rate * grads["dW" + str(l + 1)]
parameters["b" + str(l + 1)] = parameters["b" + str(l + 1)] - learning_rate * grads["db" + str(l + 1)]
return parameters | d9a28839f8b3049829dfac9edf80448cba04b69c | 315,730 |
import json
from collections import defaultdict
def no_duplicates(file, attribute="Name"):
"""Assert whether or not dict has duplicated Names.
`attribute` can be another attribute name like "$id".
Args:
file (str or dict): Path of the json file or dict containing umi objects groups
attribute (str): Attribute to search for duplicates in json UMI structure.
eg. : "$id", "Name".
Returns:
bool: True if no duplicates.
Raises:
Exception if duplicates found.
"""
if isinstance(file, str):
data = json.loads(open(file).read())
else:
data = file
ids = {}
for key, value in data.items():
ids[key] = defaultdict(int)
for component in value:
try:
_id = component[attribute]
except KeyError:
pass # BuildingTemplate does not have an id
else:
ids[key][_id] += 1
dups = {
key: dict(filter(lambda x: x[1] > 1, values.items()))
for key, values in ids.items()
if dict(filter(lambda x: x[1] > 1, values.items()))
}
if any(dups.values()):
raise Exception(f"Duplicate {attribute} found: {dups}")
else:
return True | 5754347be89f9f254f6716fda771fd2c2d06f1c5 | 572,338 |
import math
def rotate(origin, point, angle):
"""Rotate a point counterclockwise by a given angle around a given origin.
Because in OpenCV the y-axis is inverted this function swaps the x and y axis.
Args:
origin: (x, y) tuple.
point: the point (x, y) to rotate.
angle: in radiants.
The angle should be given in radians.
"""
oy, ox = origin
py, px = point
qx = ox + int(math.cos(angle) * (px - ox)) - int(math.sin(angle) * (py - oy))
qy = oy + int(math.sin(angle) * (px - ox)) + int(math.cos(angle) * (py - oy))
return int(qy), int(qx) | 33652c98165022f469a0d41f31d3e0ec047ad9b4 | 136,702 |
def create_response(json, code=200):
"""
Create a json response with a custom HTTP code.
:param json: result of jsonify
:param code: responsecode, 200 is the default one
:return: Response to the callee
"""
response = json
response.status_code = code
response.mimetype = "text/json"
return response | 6ec69630c70682bb6efb31c53d5fc6246b2046ee | 328,547 |
def mk_lst(values, unlst=False):
"""
while iterating through several type of lists and items it is convenient to
assume that we recieve a list, use this function to accomplish this. It
also convenient to be able to return the original type after were done
with it.
"""
# Not a list, were not looking to unlist it. make it a list
if not isinstance(values, list) and not unlst:
values = [values]
# We want to unlist a list we get
if unlst:
# single item provided so we don't want it a list
if isinstance(values, list):
if len(values) == 1:
values = values[0]
return values | 670064588bed9f7dd22d3605412a657ddd5204fc | 673,237 |
def _CommonChecks(input_api, output_api):
"""Checks common to both upload and commit."""
# We should figure out what license checks we actually want to use.
license_header = r'.*'
results = []
results.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, maxlen=800, license_header=license_header))
return results | 5d23933ce5b7253185879e7e80144ea594451d70 | 121,076 |
from typing import Any
from typing import Tuple
import operator
def _ensure_index_tuple(x: Any) -> Tuple[int, ...]:
"""Convert x to a tuple of indices."""
try:
return (operator.index(x),)
except TypeError:
return tuple(map(operator.index, x)) | 3c328971d7110f826f377aee008f85027caa28f8 | 632,213 |
def bytes_to_int(byte_data):
""" bytes_to_int utility.
"""
if isinstance(byte_data, (int)):
return byte_data
else:
return int.from_bytes(byte_data, 'big') | 9b5bcefd4884b272aaa95d3db6fc2344017756f4 | 306,520 |
def EnumerateRingBond(mol, idxlist):
"""
Enumerate bonds in a cycle
Input:
mol: rdMol
idxlist: list (ring atoms)
Return:
ringbond: list of tuples [(atom_1, atom_2)]
"""
ringbond = []
size = len(idxlist)
ringbond = [(idxlist[i%size],idxlist[(i+1)%size]) for i in range(size)]
return ringbond | 0cc7625842b1321751cdb10e22d51b51092c90b6 | 344,618 |
import math
def cyclical_lr(step_sz=2000, min_lr=0.001, max_lr=1, mode='triangular', scale_func=None, scale_md='cycles', gamma=1.):
"""implements a cyclical learning rate policy (CLR).
Notes: the learning rate of optimizer should be 1
Parameters:
----------
mode : str, optional
one of {triangular, triangular2, exp_range}.
scale_md : str, optional
{'cycles', 'iterations'}.
gamma : float, optional
constant in 'exp_range' scaling function: gamma**(cycle iterations)
Examples:
--------
>>> # the learning rate of optimizer should be 1
>>> optimizer = optim.SGD(model.parameters(), lr=1.)
>>> step_size = 2*len(train_loader)
>>> clr = cyclical_lr(step_size, min_lr=0.001, max_lr=0.005)
>>> scheduler = lr_scheduler.LambdaLR(optimizer, [clr])
>>> # some other operations
>>> scheduler.step()
>>> optimizer.step()
"""
if scale_func == None:
if mode == 'triangular':
scale_fn = lambda x: 1.
scale_mode = 'cycles'
elif mode == 'triangular2':
scale_fn = lambda x: 1 / (2.**(x - 1))
scale_mode = 'cycles'
elif mode == 'exp_range':
scale_fn = lambda x: gamma**(x)
scale_mode = 'iterations'
else:
raise ValueError(f'The {mode} is not valid value!')
else:
scale_fn = scale_func
scale_mode = scale_md
lr_lambda = lambda iters: min_lr + (max_lr - min_lr) * rel_val(iters, step_sz, scale_mode)
def rel_val(iteration, stepsize, mode):
cycle = math.floor(1 + iteration / (2 * stepsize))
x = abs(iteration / stepsize - 2 * cycle + 1)
if mode == 'cycles':
return max(0, (1 - x)) * scale_fn(cycle)
elif mode == 'iterations':
return max(0, (1 - x)) * scale_fn(iteration)
else:
raise ValueError(f'The {scale_mode} is not valid value!')
return lr_lambda | e5a6abcdaeb21dcd4293ed1fb305f6f882d040d0 | 278,705 |
def _is_hex(c):
"""Ensures character is a hexadecimal digit."""
return c in '0123456789ABCDEFabcdef' | fa688b77d009039ae5c470f19ce07ed645ad6934 | 470,715 |
def getattr_unwrapped(env, attr):
"""Get attribute attr from env, or one of the nested environments.
Args:
- env(gym.Wrapper or gym.Env): a (possibly wrapped) environment.
- attr: name of the attribute
Returns:
env.attr, if present, otherwise env.unwrapped.attr and so on recursively.
"""
try:
return getattr(env, attr)
except AttributeError:
if env.env == env:
raise
else:
return getattr_unwrapped(env.env, attr) | 87942b5b7b35ed90c6f43ce5c3e22e595ff757ef | 576,661 |
def cell_neighbors(cell):
""" Returns direct neighbors of an hexagonal cell.
The elegant way to perform this operation would be to add
the directions to the given cell, but for speed reasons
the full precomputed set is used instead.
"""
return [
(cell[0]+1, cell[1]),
(cell[0]+1, cell[1]-1),
(cell[0], cell[1]-1),
(cell[0]-1, cell[1]),
(cell[0]-1, cell[1]+1),
(cell[0], cell[1]+1)
] | 43e6468772230a17facd38c9c93169735b9488ae | 136,480 |
def cli(ctx, role_id):
"""Display information on a single role
Output:
Details of the given role.
For example::
{"description": "Private Role for Foo",
"id": "f2db41e1fa331b3e",
"model_class": "Role",
"name": "Foo",
"type": "private",
"url": "/api/roles/f2db41e1fa331b3e"}
"""
return ctx.gi.roles.show_role(role_id) | 72e0b8dc4d06d736e67bf1c4b8f70bd030c160b3 | 19,886 |
def forge_block_header_data(protocol_data):
"""
Returns a binary encoding for a dict of the form
`{'block_header_data: string}`, as expected by the protocol.
This corresponds to the encoding given by
`data_encoding.(obj1 (req "block_header_data" string))`. See
`lib_data_encoding/data_encoding.mli` for the spec.
"""
assert len(protocol_data) == 1 and 'block_header_data' in protocol_data
string = protocol_data['block_header_data']
tag = '0000'
padded_hex_len = f'{len(string):#06x}'[2:]
return tag + padded_hex_len + bytes(string, 'utf-8').hex() | 936539f295d7fbb3351ef02a10653d0e5a262d6e | 183,212 |
def distance(coord1, coord2):
"""
Return Manhattan Distance between two coordinates
"""
return abs(coord1[0] - coord2[0]) + abs(coord1[1] - coord2[1]) | 09a0f91ef8a13e04d050074613ef41baaa107189 | 448,861 |
import torch
def random_normal(*size):
"""Apply random values from a normal distribution."""
return torch.randn(*size) | c2554dae447557a39073d635cf34ed55434a1a17 | 146,364 |
def _obtain_input_shape(input_shape,
default_size,
min_size,
data_format,
include_top):
"""Internal utility to compute/validate an ImageNet model's input shape.
# Arguments
input_shape: either None (will return the default network input shape),
or a user-provided shape to be validated.
default_size: default input width/height for the model.
min_size: minimum input width/height accepted by the model.
data_format: image data format to use.
include_top: whether the model is expected to
be linked to a classifier via a Flatten layer.
# Returns
An integer shape tuple (may include None entries).
# Raises
ValueError: in case of invalid argument values.
"""
if data_format == 'channels_first':
default_shape = (3, default_size, default_size)
else:
default_shape = (default_size, default_size, 3)
if include_top:
if input_shape is not None:
if input_shape != default_shape:
raise ValueError('When setting`include_top=True`, '
'`input_shape` should be ' + str(default_shape) + '.')
input_shape = default_shape
else:
if data_format == 'channels_first':
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[0] != 3:
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[1] is not None and input_shape[1] < min_size) or
(input_shape[2] is not None and input_shape[2] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (3, None, None)
else:
if input_shape is not None:
if len(input_shape) != 3:
raise ValueError('`input_shape` must be a tuple of three integers.')
if input_shape[-1] != 3:
raise ValueError('The input must have 3 channels; got '
'`input_shape=' + str(input_shape) + '`')
if ((input_shape[0] is not None and input_shape[0] < min_size) or
(input_shape[1] is not None and input_shape[1] < min_size)):
raise ValueError('Input size must be at least ' +
str(min_size) + 'x' + str(min_size) + ', got '
'`input_shape=' + str(input_shape) + '`')
else:
input_shape = (None, None, 3)
return input_shape | 4307680bd647dd3155d40436d4b8c33cb8b1aaaf | 163,886 |
def convert_rational_to_float(rational):
""" Convert a rational number in the form of a 2-tuple to a float
Args:
rational (2-sized list of int): The number to convert
Returns:
float: The conversion."""
assert len(rational) == 2
return rational[0] / rational[1] | 5713c24c461726d49abee8b9df020b501fcb0ac2 | 108,964 |
def get_class_index(d):
"""Get class name and index from the whole dictionary
E.g.,
'{'index': 'data/Deploy/KLAC/KLAC0570/KLAC0570_12.jpg', 'prediction': ..., 'label': ...}'
==> (str) 'KLAC0570'
"""
return d['index'].split('/')[-2] | 1954f7184d248ac181a727cac180190813f92c50 | 571,448 |
def is_link(test_string):
"""check if the string is a web link
Args:
test_string (str): input string
Returns:
bool: if string is an http link
"""
return test_string.startswith('http') | 5ddcd17d977309200e7071ff1db62fc6767f2375 | 225,152 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.