content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_last_error():
""" Get the last error value, then turn it into a nice string. Return the string. """
error_id = kernel32.GetLastError()
# No actual error
if error_id == 0:
return None
# Gonna need a string pointer
buf = ffi.new("LPWSTR")
chars = kernel32.FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, ffi.NULL, error_id , 0, buf, 0, ffi.NULL)
return ffi.string(ffi.cast("char **",buf)[0][0:chars]).decode('utf-8').strip("\r\n") | 424da4211cc5cb19c8143ffe5b4326b2ff440319 | 3,656,300 |
import pickle
def load_params_from_pkl(params_dump_file_path):
"""
Loads parameters from a pickle _dump file.
:param params_dump_file_path: self-explanatory
:return dict of param_name => param
"""
coll = {}
f = open(params_dump_file_path, 'rb')
while True:
try:
param_name, param_val = pickle.load(f)
coll[param_name] = param_val
except (EOFError, UnpicklingError):
break
f.close()
return coll | e0205d3f4b3d1ac5859eb91424a041273fc23cb8 | 3,656,301 |
from pathlib import Path
import traceback
import sys
def _extract_filename_from_filepath(strFilePath=""):
"""
Function which extracts file name from the given filepath
"""
if strFilePath:
try:
strFileName = Path(strFilePath).name
strFileName = str(strFileName).split(".")[0]
return strFileName
except Exception as ex:
selft.crash_report(traceback.format_exception(*sys.exc_info(),limit=None, chain=True))
print("Error in _extract_filename_from_filepath="+str(ex))
else:
print("Please enter the value="+str(strFilePath)) | 340b51cc6ac484cdf7a978890147fe453f335521 | 3,656,302 |
def plot_by_term(term, df, kind='go', q=0.1, swarm=True,
x='genotype', y='b', gene='ens_gene'):
"""
Plot ontology terms by a given column.
Params:
term - term to look for in melted_df
df - a tidy dataframe with columns x and y
kind - the ontology to use
q - q-value for statistical significance. defaults to 0.1
swarm - if True, plots a swarmplot. Else, plots a violinplot.
x - column to plot on x axis
y - column to plot on y axis
gene - column in the given df where gene WBIDs are provided
Output:
ax - an axis object containing a graph
genes - a list of genes obtained from the melted df
"""
if type(kind) is not str:
raise ValueError('`kind` variable must be a string.')
if kind.lower() not in ['tissue', 'phenotype', 'go']:
raise ValueError('`kind` must be one of `tissue`, `phenotype` or `go`')
if type(term) is not str:
raise ValueError('`term` must be a string.')
if kind.lower() == 'tissue':
onto_df = tea.fetch_dictionary()
elif kind.lower() == 'phenotype':
onto_df = pd.read_csv('../input/phenotype_ontology.csv')
else:
onto_df = pd.read_csv('../input/go_dictionary.csv')
# melt the df:
melted_df = pd.melt(onto_df, id_vars='wbid', var_name='term',
value_name='expressed')
melted_df = melted_df[melted_df.expressed == 1]
# warnings and bells:
sel = melted_df.term.str.contains(term)
if len(melted_df[sel].term.unique()) > 1:
print('Warning: Provided term matches more than one ontological term.')
genes = melted_df[sel].wbid
if len(genes) == 0:
raise ValueError('Provided term is not in ontology dictionary')
ind = (df.qval < q) & (df[gene].isin(genes))
fig, ax = plt.subplots()
if swarm:
ax = sns.swarmplot(x='genotype', y='b', data=df[ind])
else:
ax = sns.violinplot(x='genotype', y='b', data=df[ind])
return ax, genes | 768b59ff479468902af429bdf8603455bad1eab3 | 3,656,303 |
def lab_equality(lab1, lab2):
"""
Check if two labs are identical
"""
if lab1["ncolumns"] != lab1["ncolumns"] or lab1["nlines"] != lab2["nlines"]:
return False
return all(set(lab1[cell]) == set(lab2[cell]) for cell in lab1.keys() if type(cell) != type("a")) | d5ffca9acfa6bc2cc324f1b6c5ed416541812c13 | 3,656,304 |
import attrs
def read_wwm(filename_or_fileglob, chunks={}, convert_wind_vectors=True):
"""Read Spectra from SWAN native netCDF format.
Args:
- filename_or_fileglob (str): filename or fileglob specifying multiple
files to read.
- chunks (dict): chunk sizes for dimensions in dataset. By default
dataset is loaded using single chunk for all dimensions (see
xr.open_mfdataset documentation).
- convert_wind_vectors (bool): choose it to convert wind vectors into
speed / direction data arrays.
Returns:
- dset (SpecDataset): spectra dataset object read from ww3 file.
Note:
- If file is large to fit in memory, consider specifying chunks for
'time' and/or 'station' dims.
"""
dset = xr.open_mfdataset(filename_or_fileglob, chunks=chunks)
_units = dset.AC.attrs.get("units", "")
dset = dset.rename(
{
"nfreq": attrs.FREQNAME,
"ndir": attrs.DIRNAME,
"nbstation": attrs.SITENAME,
"AC": attrs.SPECNAME,
"lon": attrs.LONNAME,
"lat": attrs.LATNAME,
"DEP": attrs.DEPNAME,
"ocean_time": attrs.TIMENAME,
}
)
# Calculating wind speeds and directions
if convert_wind_vectors and "Uwind" in dset and "Vwind" in dset:
dset[attrs.WSPDNAME], dset[attrs.WDIRNAME] = uv_to_spddir(
dset["Uwind"], dset["Vwind"], coming_from=True
)
# Setting standard names and storing original file attributes
set_spec_attributes(dset)
dset[attrs.SPECNAME].attrs.update(
{"_units": _units, "_variable_name": attrs.SPECNAME}
)
# Assigning spectral coordinates
#import ipdb; ipdb.set_trace()
dset[attrs.FREQNAME] = dset.spsig / (2 * np.pi) # convert rad to Hz
dset[attrs.DIRNAME] = dset.spdir
# converting Action to Energy density and adjust density to Hz
dset[attrs.SPECNAME] = dset[attrs.SPECNAME] * dset.spsig * (2 * np.pi)
# Converting from radians
dset[attrs.DIRNAME] *= R2D
dset[attrs.SPECNAME] /= R2D
# Returns only selected variables, transposed
to_drop = [
dvar
for dvar in dset.data_vars
if dvar
not in [
attrs.SPECNAME,
attrs.WSPDNAME,
attrs.WDIRNAME,
attrs.DEPNAME,
attrs.LONNAME,
attrs.LATNAME,
]
]
dims = [d for d in ["time", "site", "freq", "dir"] if d in dset.efth.dims]
return dset.drop(to_drop).transpose(*dims) | 4623a31a0d3d780960d58743278a847377213555 | 3,656,305 |
def is_sorted(t):
"""Checks whether a list is sorted.
t: list
returns: boolean
"""
return t == sorted(t) | 442c5a4670c595f3dea45c8aac315eda5dae26d0 | 3,656,306 |
def create_container_port_mappings(container):
"""
Create the port mappings for the given container.
:param container: The container to create the mappings for.
"""
ports = []
image = None
if container.is_image_based():
image = container.image
elif container.is_clone() and container.clone_of.is_image_based():
image = container.clone_of.image
if image:
protected_port = image.protected_port
public_ports = image.public_ports
if protected_port:
mapping = PortMapping(
server=container.server,
container=container,
external_port=PortMapping.get_available_server_port(container.server),
internal_port=protected_port
)
mapping.save()
ports.append({
ContainerBackend.PORT_MAPPING_KEY_ADDRESS: mapping.server.internal_ip,
ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port,
ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port
})
if public_ports:
for port in public_ports.split(','):
mapping = PortMapping(
server=container.server,
container=container,
external_port=PortMapping.get_available_server_port(container.server),
internal_port=port
)
mapping.save()
ports.append({
ContainerBackend.PORT_MAPPING_KEY_ADDRESS: '0.0.0.0',
ContainerBackend.PORT_MAPPING_KEY_EXTERNAL: mapping.external_port,
ContainerBackend.PORT_MAPPING_KEY_INTERNAL: mapping.internal_port
})
return ports | 15a93e38ccb2c3d6ecab025a8d3c9226ebbf81d0 | 3,656,307 |
def _get_dep_for_package(package, platform):
"""
Convert arguments in the `package` parameter to actual deps.
"""
if is_list(package) or is_tuple(package):
package, _ = package
# TODO: ghc-8.4.4
if (package == "compact" and
_get_ghc_version(platform) == "8.4.4"):
package = "ghc-compact"
if package in _get_internal_ghc_packages(platform):
project = "ghc"
else:
project = "stackage-lts"
return target_utils.ThirdPartyRuleTarget(project, package) | e5e77c0bf0a48864e042ba8fcbdcf8c34f7918f2 | 3,656,308 |
from typing import Callable
from typing import Any
from typing import get_origin
from typing import Union
from typing import Dict
from typing import Sequence
from typing import Set
import time
from typing import Pattern
from typing import IO
from typing import Literal
from enum import Enum
def get_caster(typehint: TypeHint) -> Callable[..., Any]:
"""Returns a conversion class most appropriate for the
supplied type hint. Potential matches are checked in
order from most to least specific to account for
overlapping types (e.g. ABCs).
"""
if typehint in (Any, None):
return untyped_caster
origin = get_origin(typehint)
if origin in (Union, UnionType):
return union_caster(typehint)
typecasters: Dict[TypeHintTuple, Callable[..., Any]] = {
(bytes,): str.encode,
(str,): str,
(dict,): json_caster(typehint),
(bool,): bool_caster,
(Sequence, Set): collection_caster(typehint),
(date, time): datetime_caster(typehint),
(Pattern,): pattern_caster(typehint),
(IO, IOBase): io_caster(typehint),
(Literal,): literal_caster(typehint),
(Enum,): enum_caster(typehint),
}
for cls, caster in typecasters.items():
if typehint in cls:
return caster
if origin in cls and origin is not None:
return caster
if issubtype(typehint, cls):
return caster
return generic_caster(typehint) | 7171e70c2870169a5394d3bafc4d114f4a950db0 | 3,656,309 |
def values(series):
"""Count the values and sort.
series: pd.Series
returns: series mapping from values to frequencies
"""
return series.value_counts(dropna=False).sort_index() | d4ef6b93b7f2790d8130ac045e9c315b8d57a245 | 3,656,310 |
def use_id(type):
"""Declare that this configuration option should point to an ID with the given type."""
def validator(value):
check_not_templatable(value)
if value is None:
return core.ID(None, is_declaration=False, type=type)
if (
isinstance(value, core.ID)
and value.is_declaration is False
and value.type is type
):
return value
return core.ID(validate_id_name(value), is_declaration=False, type=type)
return validator | 0087ad7119999932c9d4b882907019f60346491f | 3,656,311 |
def social_auth_user(backend, uid, user=None, *args, **kwargs):
"""Return UserSocialAuth account for backend/uid pair or None if it
doesn't exists.
Raise AuthAlreadyAssociated if UserSocialAuth entry belongs to another
user.
"""
social_user = UserSocialAuth.get_social_auth(backend.name, uid)
if social_user:
if user and social_user.user != user:
msg = ugettext('This %(provider)s account is already in use.')
raise AuthAlreadyAssociated(backend, msg % {
'provider': backend.name
})
elif not user:
user = social_user.user
return {'social_user': social_user, 'user': user} | 5a421e3f1f24cecbb4e6313bee8172585f9f3708 | 3,656,312 |
def bbox_mask(t_arr, x_arr, limits):
"""
Just a wrapper for np.where
"""
#NOTE: t_arr is included but no longer used
mask = np.where(
(x_arr >= limits[0]) & \
(x_arr <= limits[1]))[0]
return mask | 90e1a92d76d1b3d406ab50300c6528973f610f0a | 3,656,313 |
import array
def kdeplot_2d_clevels(xs, ys, levels=11, **kwargs):
""" Plot contours at specified credible levels.
Arguments
---------
xs: array
samples of the first variable.
ys: array
samples of the second variable, drawn jointly with `xs`.
levels: float, array
if float, interpreted as number of credible levels to be equally
spaced between (0, 1); if array, interpreted as list of credible
levels.
xlow: float
lower bound for abscissa passed to Bounded_2d_kde (optional).
xigh: float
upper bound for abscissa passed to Bounded_2d_kde (optional).
ylow: float
lower bound for ordinate passed to Bounded_2d_kde (optional).
yhigh: float
upper bound for ordinate passed to Bounded_2d_kde (optional).
ax: Axes
matplotlib axes on which to plot (optional).
kwargs:
additional arguments passed to plt.contour().
"""
try:
xs = xs.values.astype(float)
ys = ys.values.astype(float)
except AttributeError:
pass
if all(~isfinite(xs)) or all(~isfinite(ys)):
return None
try:
len(levels)
f = 1 - np.array(levels)
except TypeError:
f = linspace(0, 1, levels+2)[1:-1]
if kwargs.get('auto_bound', False):
kwargs['xlow'] = min(xs)
kwargs['xhigh'] = max(xs)
kwargs['ylow'] = min(ys)
kwargs['yhigh'] = max(ys)
kde_kws = {k: kwargs.pop(k, None) for k in ['xlow', 'xhigh', 'ylow', 'yhigh']}
k = Bounded_2d_kde(np.column_stack((xs, ys)), **kde_kws)
size = max(10*(len(f)+2), 500)
c = np.random.choice(len(xs), size=size)
p = k(np.column_stack((xs[c], ys[c])))
i = argsort(p)
l = array([p[i[int(round(ff*len(i)))]] for ff in f])
Dx = np.percentile(xs, 99) - np.percentile(xs, 1)
Dy = np.percentile(ys, 99) - np.percentile(ys, 1)
x = linspace(np.percentile(xs, 1)-0.1*Dx, np.percentile(xs, 99)+0.1*Dx, 128)
y = linspace(np.percentile(ys, 1)-0.1*Dy, np.percentile(ys, 99)+0.1*Dy, 128)
XS, YS = meshgrid(x, y, indexing='ij')
ZS = k(np.column_stack((XS.flatten(), YS.flatten()))).reshape(XS.shape)
ax = kwargs.pop('ax', gca())
kwargs['colors'] = kwargs.get('colors', [kwargs.pop('color', None),])
ax.contour(XS, YS, ZS, levels=l, **kwargs) | 806b4278a6bbac91fcdfd6354cb3fa5422fab1ee | 3,656,314 |
def normalization_reg_loss(input):
"""
input: [..., 3]
It computes the length of each vector and uses the L2 loss between the lengths and 1.
"""
lengths = (input ** 2).sum(dim=-1).sqrt()
loss_norm_reg = ((lengths - 1) ** 2).mean()
return loss_norm_reg | 3b9d999c90d8e9b3ce797d286bb2f0b215fa7ee5 | 3,656,315 |
def _get_window_size(offset, step_size, image_size):
"""
Calculate window width or height.
Usually same as block size, except when at the end of image and only a
fracture of block size remains
:param offset: start columns/ row
:param step_size: block width/ height
:param image_size: image width/ height
:return: window width/ height
"""
if offset + step_size > image_size:
return image_size - offset
else:
return step_size | 90d65229c54a5878fa9b2af8e30293e743679e42 | 3,656,316 |
def _ListCtrl_IsSelected(self, idx):
"""
Returns ``True`` if the item is selected.
"""
return (self.GetItemState(idx, wx.LIST_STATE_SELECTED) & wx.LIST_STATE_SELECTED) != 0 | 796916c4cf13e77ec7f21cae2210acbb6d250e14 | 3,656,317 |
def sturm_liouville_function(x, y, p, p_x, q, f, alpha=0, nonlinear_exp=2):
"""Second order Sturm-Liouville Function defining y'' for Lu=f.
This form is used because it is expected for Scipy's solve_ivp method.
Keyword arguments:
x -- independent variable
y -- dependent variable
p -- p(x) parameter
p_x -- derivative of p_x wrt x
q -- q(x) parameter
f -- forcing function f(x)
alpha -- nonlinear parameter
nonlinear_exp -- exponent of nonlinear term
"""
y_x = y[1]
y_xx = -1*(p_x/p)*y[1] + (q/p)*y[0] + (q/p)*alpha*y[0]**nonlinear_exp - f/p
return [y_x, y_xx] | 5c34cc622075c640fe2dec03b1ae302192d0f779 | 3,656,318 |
import logging
import sys
def logged(func):
"""Pipes exceptions through root logger"""
@wraps(func)
def deco(*args, **kwargs):
try:
result = func(*args, **kwargs)
except Exception as e:
logging.exception(f"{func.__name__}:\n{e}")
print("Exception logged to {LOGFILE}")
sys.exit(1)
return result
return deco | cbece89917a8329c03b9c4c24b503ff0c78edbd1 | 3,656,319 |
def hamming_set(index: str, d: int = 1, include_N: bool = True):
"""Given an index of bases in {ACGTN}, generate all indexes within hamming
distance d of the input
:param index: string representing the index sequence
:param d: maximum distance to allow
:param include_N: include N when generating possible indexes
:return: set of indexes within hamming distance d
"""
base_d = {"A": 0, "C": 1, "G": 2, "T": 3, "N": 4}
new_base = [i * np.eye(len(index), dtype=np.uint8) for i in range(4 + include_N)]
other_bases = 1 - np.eye(len(index), dtype=np.uint8)
h_set = {tuple(base_d[c] for c in index)}
for _ in range(d):
for a in list(map(np.array, h_set)):
h_set.update(t for i in new_base for t in map(tuple, a * other_bases + i))
h_set = {"".join("ACGTN"[i] for i in h) for h in h_set}
return h_set | d8546bd2f7b04518d2d711488045670a60e449fe | 3,656,320 |
from mne import read_epochs
def _get_epochs_info(raw_fname):
"""Get epoch info."""
epochs = read_epochs(raw_fname)
return epochs.info | 5a7d20041e7b1de7b4dcf3540f7a9d01575fb8f9 | 3,656,321 |
def is_private(key):
"""
Returns whether or not an attribute is private.
A private attribute looks like: __private_attribute__.
:param key: The attribute key
:return: bool
"""
return key.startswith("__") and key.endswith("__") | 498e7522e95317dbb171961f0f5fe8350c29a69d | 3,656,322 |
async def img(filename) -> Response:
"""Image static endpoint."""
return await send_from_directory("img", filename) | d255f0f11f3b380f332a3165f8917d2d2cb65a6b | 3,656,323 |
def ref_genome_info(info, config, dirs):
"""Retrieve reference genome information from configuration variables.
"""
genome_build = info.get("genome_build", None)
(_, sam_ref) = get_genome_ref(genome_build, config["algorithm"]["aligner"],
dirs["galaxy"])
return genome_build, sam_ref | 382d32bddef76bb1ba1ecd6a4b39c042909ac3ed | 3,656,324 |
def load_text(file_arg):
"""
General function used to load data from a text file
"""
file_handle = validate_file_for_reading(file_arg)
try:
df = pd.io.parsers.read_csv(file_handle,delim_whitespace=True,\
comment='#', skip_blank_lines=True, engine='c')
except:
raise SortSeqError(\
'Could not interpret text file %s as dataframe.'%repr(file_handle))
return df.dropna(axis=0, how='all') | f8681d1db1819f2036f0b6304a04fd1762ad31f8 | 3,656,325 |
def entropy_from_mnemonic(mnemonic: Mnemonic, lang: str = "en") -> BinStr:
"""Convert mnemonic sentence to Electrum versioned entropy."""
# verify that it is a valid Electrum mnemonic sentence
_ = version_from_mnemonic(mnemonic)
indexes = _indexes_from_mnemonic(mnemonic, lang)
entropy = _entropy_from_indexes(indexes, lang)
return entropy | adcdfe3f66150f77276af7b4689289fe7609a253 | 3,656,326 |
def delete_data_analysis(analysis_id: UUID, token: HTTPAuthorizationCredentials = Depends(auth)):
"""
Delete a data analysis record.
You may only delete records in your private space,
or that are associated with a collab of which you are an administrator.
"""
return delete_computation(omcmp.DataAnalysis, analysis_id, token) | bc8cc6ee1174017be1b0ca17f221784163975132 | 3,656,327 |
def get_current_blk_file(current_file_number) -> str:
"""
Returns the current blk file name with file format.
"""
return get_current_file_name(blk_file_format(current_file_number)) | 44c81a5977f42fe38426231421bc3c2b76c36717 | 3,656,328 |
def exec_cmd_status(ceph_installer, commands):
"""
Execute command
Args:
ceph_installer: installer object to exec cmd
commands: list of commands to be executed
Returns:
Boolean
"""
for cmd in commands:
out, err = ceph_installer.exec_command(sudo=True, cmd=cmd)
out, err = out.read().decode().strip(), err.read().decode().strip()
logger.info("Command Response : {} {}".format(out, err))
return True | b5deddf504e1ae0cbc67a5b937d75bb02984b224 | 3,656,329 |
import logging
def BuildIsAvailable(bucket_name, remote_path):
"""Checks whether a build is currently archived at some place."""
logging.info('Checking existance: gs://%s/%s' % (bucket_name, remote_path))
try:
exists = cloud_storage.Exists(bucket_name, remote_path)
logging.info('Exists? %s' % exists)
return exists
except cloud_storage.CloudStorageError:
return False | c1947339a00a538c910e669179d19c986cab5b7e | 3,656,330 |
def _channel_name(row, prefix="", suffix=""):
"""Formats a usable name for the repeater."""
length = 16 - len(prefix)
name = prefix + " ".join((row["CALL"], row["CITY"]))[:length]
if suffix:
length = 16 - len(suffix)
name = ("{:%d.%d}" % (length, length)).format(name) + suffix
return name | 4452670e28b614249fb184dd78234e52ee241086 | 3,656,331 |
def wordsinunit(unit):
"""Counts the words in the unit's source and target, taking plurals into
account. The target words are only counted if the unit is translated."""
(sourcewords, targetwords) = (0, 0)
if isinstance(unit.source, multistring):
sourcestrings = unit.source.strings
else:
sourcestrings = [unit.source or ""]
for s in sourcestrings:
sourcewords += wordcount(s)
if not unit.istranslated():
return sourcewords, targetwords
if isinstance(unit.target, multistring):
targetstrings = unit.target.strings
else:
targetstrings = [unit.target or ""]
for s in targetstrings:
targetwords += wordcount(s)
return sourcewords, targetwords | 57f6be28eab17ee2bd2cd31783809bd8a413c09e | 3,656,332 |
def check_instance(arg, types, allow_none=False, message='Argument "%(string)s" is not of type %(expected)s, but of type %(actual)s', level=1):
"""
>>> check_instance(1, int)
1
>>> check_instance(3.5, float)
3.5
>>> check_instance('hello', str)
'hello'
>>> check_instance([1, 2, 3], list)
[1, 2, 3]
>>> check_instance(1, (int, float))
1
>>> check_instance(3.5, (int, float))
3.5
>>> check_instance('hello', (str, list))
'hello'
>>> check_instance([1, 2, 3], (str, list))
[1, 2, 3]
>>> check_instance(1, float)
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type <class 'float'>, but of type <class 'int'>
>>> check_instance(3.5, int)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type <class 'int'>, but of type <class 'float'>
>>> check_instance('hello', list)
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type <class 'list'>, but of type <class 'str'>
>>> check_instance([1, 2, 3], str)
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type <class 'str'>, but of type <class 'list'>
>>> check_instance(1, (list, str))
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type (<class 'list'>, <class 'str'>), but of type <class 'int'>
>>> check_instance(3.5, (list, str))
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type (<class 'list'>, <class 'str'>), but of type <class 'float'>
>>> check_instance('hello', (int, float))
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type (<class 'int'>, <class 'float'>), but of type <class 'str'>
>>> check_instance([1, 2, 3], (int, float))
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type (<class 'int'>, <class 'float'>), but of type <class 'list'>
>>> check_instance(None, int)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'int'>, but of type <class 'NoneType'>
>>> check_instance(None, float)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'float'>, but of type <class 'NoneType'>
>>> check_instance(None, str)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'str'>, but of type <class 'NoneType'>
>>> check_instance(None, list)
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type <class 'list'>, but of type <class 'NoneType'>
>>> check_instance(None, (int, float))
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type (<class 'int'>, <class 'float'>), but of type <class 'NoneType'>
>>> check_instance(None, (str, list))
Traceback (most recent call last):
...
AssertionError: Argument "None" is not of type (<class 'str'>, <class 'list'>), but of type <class 'NoneType'>
>>> check_instance(1, int, allow_none=True)
1
>>> check_instance(3.5, float, allow_none=True)
3.5
>>> check_instance('hello', str, allow_none=True)
'hello'
>>> check_instance([1, 2, 3], list, allow_none=True)
[1, 2, 3]
>>> check_instance(1, (int, float), allow_none=True)
1
>>> check_instance(3.5, (int, float), allow_none=True)
3.5
>>> check_instance('hello', (str, list), allow_none=True)
'hello'
>>> check_instance([1, 2, 3], (str, list), allow_none=True)
[1, 2, 3]
>>> check_instance(1, float, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type <class 'float'>, but of type <class 'int'>
>>> check_instance(3.5, int, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type <class 'int'>, but of type <class 'float'>
>>> check_instance('hello', list, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type <class 'list'>, but of type <class 'str'>
>>> check_instance([1, 2, 3], str, allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type <class 'str'>, but of type <class 'list'>
>>> check_instance(1, (list, str), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "1" is not of type (<class 'list'>, <class 'str'>), but of type <class 'int'>
>>> check_instance(3.5, (list, str), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "3.5" is not of type (<class 'list'>, <class 'str'>), but of type <class 'float'>
>>> check_instance('hello', (int, float), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "hello" is not of type (<class 'int'>, <class 'float'>), but of type <class 'str'>
>>> check_instance([1, 2, 3], (int, float), allow_none=True)
Traceback (most recent call last):
...
AssertionError: Argument "[1, 2, 3]" is not of type (<class 'int'>, <class 'float'>), but of type <class 'list'>
>>> check_instance(None, int, allow_none=True)
>>> check_instance(None, float, allow_none=True)
>>> check_instance(None, str, allow_none=True)
>>> check_instance(None, list, allow_none=True)
>>> check_instance(None, (int, float), allow_none=True)
>>> check_instance(None, (int, float), allow_none=True)
>>> check_instance(None, (str, list), allow_none=True)
>>> check_instance(None, (str, list), allow_none=True)
"""
check(is_instance(arg, types, allow_none), lambda: message % {'string': str(arg), 'actual': type(arg), 'expected': types}, level)
return arg | 362d6101e9f6b88077f8615043713989576c7713 | 3,656,333 |
def spec_lnlike(params, labels, grid_param_list, lbda_obs, spec_obs, err_obs,
dist, model_grid=None, model_reader=None, em_lines={},
em_grid={}, dlbda_obs=None, instru_corr=None,
instru_fwhm=None, instru_idx=None, filter_reader=None,
AV_bef_bb=False, units_obs='si', units_mod='si', interp_order=1):
""" Define the likelihood log-function.
Parameters
----------
params : tuple
Set of models parameters for which the model grid has to be
interpolated.
labels: Tuple of strings
Tuple of labels in the same order as initial_state, that is:
- first all parameters related to loaded models (e.g. 'Teff', 'logg')
- then the planet photometric radius 'R', in Jupiter radius
- (optionally) the flux of emission lines (labels should match those in
the em_lines dictionary), in units of the model spectrum (times mu)
- (optionally) the optical extinction 'Av', in mag
- (optionally) the ratio of total to selective optical extinction 'Rv'
- (optionally) 'Tbb1', 'Rbb1', 'Tbb2', 'Rbb2', etc. for each extra bb
contribution.
grid_param_list : list of 1d numpy arrays/lists OR None
- If list, should contain list/numpy 1d arrays with available grid of
model parameters.
- Set to None for a pure n-blackbody fit, n=1,2,...
- Note1: model grids should not contain grids on radius and Av, but
these should still be passed in initial_state (Av optional).
- Note2: for a combined grid model + black body, just provide
the grid parameter list here, and provide values for 'Tbbn' and 'Rbbn'
in initial_state, labels and bounds.
lbda_obs : numpy 1d ndarray or list
Wavelength of observed spectrum. If several instruments, should be
ordered per instrument, not necessarily as monotonically increasing
wavelength. Hereafter, n_ch = len(lbda_obs).
spec_obs : numpy 1d ndarray or list
Observed spectrum for each value of lbda_obs.
err_obs : numpy 1d/2d ndarray or list
Uncertainties on the observed spectrum. If 2d array, should be [2,n_ch]
where the first (resp. second) column corresponds to lower (upper)
uncertainty, and n_ch is the length of lbda_obs and spec_obs.
dist : float
Distance in parsec, used for flux scaling of the models.
model_grid : numpy N-d array, optional
If provided, should contain the grid of model spectra for each
free parameter of the given grid. I.e. for a grid of n_T values of Teff
and n_g values of Logg, the numpy array should be n_T x n_g x n_ch x 2,
where n_ch is the number of wavelengths for the observed spectrum,
and the last 2 dims are for wavelength and fluxes respectively.
If provided, takes precedence over model_name/model_reader.
model_reader : python routine, opt
External routine that reads a model file and returns a 2D numpy array,
where the first column corresponds to wavelengths, and the second
contains model values. See example routine in model_interpolation()
description.
em_lines: dictionary, opt
Dictionary of emission lines to be added on top of the model spectrum.
Each dict entry should be the name of the line, assigned to a tuple of
4 values:
1) the wavelength (in mu);
2) a string indicating whether line intensity is expressed in flux
('F'), luminosity ('L') or log(L/LSun) ("LogL");
3) the FWHM of the gaussian (or None if to be set automatically);
4) whether the FWHM is expressed in 'nm', 'mu' or 'km/s'.
The third and fourth can also be set to None. In that case, the FWHM of
the gaussian will automatically be set to the equivalent width of the
line, calculated from the flux to be injected and the continuum
level (measured in the grid model to which the line is injected).
Examples: em_lines = {'BrG':(2.1667,'F',None, None)};
em_lines = {'BrG':(2.1667,'LogL', 100, 'km/s')}
em_grid: dictionary pointing to lists, opt
Dictionary where each entry corresponds to an emission line and points
to a list of values to inject for emission line fluxes. For computation
efficiency, interpolation will be performed between the points of this
grid during the MCMC sampling. Dict entries should match labels and
em_lines.
dlbda_obs: numpy 1d ndarray or list, optional
Spectral channel width for the observed spectrum. It should be provided
IF one wants to weigh each point based on the spectral
resolution of the respective instruments (as in Olofsson et al. 2016).
instru_corr : numpy 2d ndarray or list, optional
Spectral correlation throughout post-processed images in which the
spectrum is measured. It is specific to the combination of instrument,
algorithm and radial separation of the companion from the central star.
Can be computed using distances.spectral_correlation(). In case of
a spectrum obtained with different instruments, build it with
distances.combine_corrs(). If not provided, it will consider the
uncertainties in each spectral channels are independent. See Greco &
Brandt (2017) for details.
instru_fwhm : float or list, optional
The instrumental spectral fwhm provided in nm. This is used to convolve
the model spectrum. If several instruments are used, provide a list of
instru_fwhm values, one for each instrument whose spectral resolution
is coarser than the model - including broad band
filter FWHM if relevant.
instru_idx: numpy 1d array, optional
1d array containing an index representing each instrument used
to obtain the spectrum, label them from 0 to n_instru. Zero for points
that don't correspond to any instru_fwhm provided above, and i in
[1,n_instru] for points associated to instru_fwhm[i-1]. This parameter
must be provided if the spectrum consists of points obtained with
different instruments.
filter_reader: python routine, optional
External routine that reads a filter file and returns a 2D numpy array,
where the first column corresponds to wavelengths, and the second
contains transmission values. Important: if not provided, but strings
are detected in instru_fwhm, the default format assumed for the files:
- first row containing header
- starting from 2nd row: 1st column: WL in mu, 2nd column: transmission
Note: files should all have the same format and wavelength units.
AV_bef_bb: bool, optional
If both extinction and an extra bb component are free parameters,
whether to apply extinction before adding the BB component (e.g.
extinction mostly from circumplanetary dust) or after the BB component
(e.g. mostly insterstellar extinction).
units_obs : str, opt {'si','cgs','jy'}
Units of observed spectrum. 'si' for W/m^2/mu; 'cgs' for ergs/s/cm^2/mu
or 'jy' for janskys.
units_mod: str, opt {'si','cgs','jy'}
Units of the model. 'si' for W/m^2/mu; 'cgs' for ergs/s/cm^2/mu or 'jy'
for janskys. If different to units_obs, the spectrum units will be
converted.
interp_order: int, opt, {-1,0,1}
Interpolation mode for model interpolation.
-1: log interpolation (i.e. linear interpolatlion on log(Flux))
0: nearest neighbour model.
1: Order 1 spline interpolation.
Returns
-------
out: float
The log of the likelihood.
"""
if grid_param_list is not None:
if model_grid is None and model_reader is None:
msg = "model_name and model_reader must be provided"
raise TypeError(msg)
lbda_mod, spec_mod = make_model_from_params(params, labels, grid_param_list,
dist, lbda_obs, model_grid,
model_reader, em_lines, em_grid,
dlbda_obs, instru_fwhm,
instru_idx, filter_reader,
AV_bef_bb, units_obs, units_mod,
interp_order)
# evaluate the goodness of fit indicator
chi = goodness_of_fit(lbda_obs, spec_obs, err_obs, lbda_mod, spec_mod,
dlbda_obs=dlbda_obs, instru_corr=instru_corr,
instru_fwhm=instru_fwhm, instru_idx=instru_idx,
filter_reader=filter_reader, plot=False, outfile=None)
# log likelihood
lnlikelihood = -0.5 * chi
return lnlikelihood | 8411ec37268cd2c169b680b955678d13f0d10cbc | 3,656,334 |
def generic_list(request):
"""Returns a list of all of the document IDs in the matched DocStore."""
return umbrella_from_request(request).get_doc_ids() | 8c5f47c8816fca503c2c4fa93db1204b3b511157 | 3,656,335 |
def japan_results(request):
"""
view function returns template that displays New York-specific photos
"""
images = Image.filter_images_by_location(location_id=12)
return render(request, "all_pictures/japan.html", {"images":images}) | d0fd80eac7529f5b9b5699439cabb0c92f82f007 | 3,656,336 |
def add_yaml_literal_block(yaml_object):
"""
Get a yaml literal block representer function to convert normal strings into yaml literals during yaml dumping
Convert string to yaml literal block
yaml docs: see "Block mappings" in https://pyyaml.org/wiki/PyYAMLDocumentation
"""
def literal_str_representer(dumper, data):
return dumper.represent_scalar("tag:yaml.org,2002:str", data, style="|")
return yaml_object.add_representer(literal_block, literal_str_representer) | 47b4295394a67e92bcbc5d7cb4c25a0a1ca220dc | 3,656,337 |
from typing import List
from typing import Dict
from typing import Set
from typing import Optional
def _spans_to_array(
doc: Doc,
sources: List[str],
label2idx: Dict[str, int],
labels_without_prefix: Set[str],
prefixes: Optional[Set[str]] = None,
warn_missing_labels: bool = False
) -> np.ndarray:
"""Convert the annotations of a spacy document into a 2D array.
Each row corresponds to a token, and each column to a labelling
source. In other words, the value at (i,j) represents the prediction
of source j for token i. This prediction is expressed as the
index of the label in the labels.
NB:
- Sources should be a list of labelling sources. If empty, all sources
are employed.
- If `prefixes` are provided (e.g., [I, B, L]), it is assumed that the
labels in `label2idx` contain the prefixes (e.g., I-PERSON,
B-PERSON).
- If `prefixes` are not provided, it is assumed that the labels in
`label2idx` do not contain prefixes (e.g, PERSON).
- We also assume the O is label is at position 0.
"""
if sources is None:
sources = list(doc.spans.keys())
if warn_missing_labels:
missing_labels = set()
# Creating the numpy array itself
data = np.zeros((len(doc), len(sources)), dtype=np.int16)
for source_index, source in enumerate(sources):
for span in doc.spans.get(source, []):
if span.label_ not in labels_without_prefix:
if warn_missing_labels:
missing_labels.add(span.label_)
continue
if prefixes is None:
# Do not use prefix labels (e.g., use PER instead of
# B-PER, I-PER, etc.)
data[span.start:span.end, source_index] = label2idx[
span.label_
]
else:
# If the span is a single token, we can use U
if "U" in prefixes and len(span) == 1:
data[span.start, source_index] = label2idx[
"U-%s" % span.label_
]
continue
# Otherwise, we use B, I and L
if "B" in prefixes:
data[span.start, source_index] = label2idx[
"B-%s" % span.label_
]
if "I" in prefixes:
start_i = (span.start+1) if "B" in prefixes else span.start
end_i = (span.end-1) if "L" in prefixes else span.end
data[start_i:end_i, source_index] = label2idx[
"I-%s" % span.label_
]
if "L" in prefixes:
data[span.end-1, source_index] = label2idx[
"L-%s" % span.label_
]
if warn_missing_labels:
print(
"WARNING: \
Span labels were found in the dataset that were not provided \
in `labels_without_prefices`: {}".format(missing_labels)
)
return data | ce9f22726877b713eb373dbee9ebb719ef655f4a | 3,656,338 |
def d_out_dist_cooler(P_mass, rho_dist_cool, w_drift):
"""
Calculates the tube's diameter of out distilliat from distilliat cooler to distilliat volume.
Parameters
----------
P_mass : float
The mass flow rate of distilliat, [kg/s]
rho_dist_cool : float
The density of liquid at cooling temperature, [kg/m**3]
w_drift :float
The speed of steam at the tube, [m/s]
Returns
-------
d_out_dist_cooler : float
The tube's diameter of out distilliat from distilliat cooler to distilliat volume, [m]
References
----------
&&&
"""
return P_mass/(0,785*rho_dist_cool*w_drift) | 8d6dfb85aa954ef88c821d2ee1d0bb787d409e96 | 3,656,339 |
import socket
def is_port_in_use(port):
"""
test if a port is being used or is free to use.
:param port:
:return:
"""
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0 | 5bbdd7b39c2380d2e07e85f483f3ea5072bb616b | 3,656,340 |
def create_variables_eagerly(getter, initial_value, **kwargs):
"""Attempts to force variable creation to be eager."""
eager_initial_value = None
if isinstance(initial_value, tf.Tensor):
if _is_eager_tensor(initial_value):
eager_initial_value = initial_value
else:
# Try to compute the static value (e.g. if the user used `tf.ones`).
eager_initial_value = tf.get_static_value(initial_value)
if eager_initial_value is not None:
# If we have an eager initial value we can create variables in eager mode.
with tf.init_scope():
return getter(initial_value=eager_initial_value, **kwargs)
else:
# Fall back to creating in whatever context we're in with user input.
return getter(initial_value=initial_value, **kwargs) | 832687547bd06aef61b8a1dca219564ef184dbb3 | 3,656,341 |
import time
def _Run(vm):
"""See base method.
Args:
vm: The vm to run the benchmark on.
Returns:
A list of sample.Sample objects.
"""
# Make changes e.g. compiler flags to spec config file.
if 'gcc' in FLAGS.runspec_config:
_OverwriteGccO3(vm)
# swap only if necessary; free local node memory and avoid remote memory;
# reset caches; set stack size to unlimited
# Also consider setting enable_transparent_hugepages flag to true
cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
'ulimit -s unlimited && ')
cmd += 'runcpu '
if FLAGS.spec17_build_only:
cmd += '--action build '
if FLAGS.spec17_rebuild:
cmd += '--rebuild '
version_specific_parameters = []
# rate runs require 2 GB minimum system main memory per copy,
# not including os overhead. Refer to:
# https://www.spec.org/cpu2017/Docs/system-requirements.html#memory
copies = min(vm.NumCpusForBenchmark(),
vm.total_free_memory_kb // (2 * KB_TO_GB_MULTIPLIER))
version_specific_parameters.append(' --copies=%s ' %
(FLAGS.spec17_copies or copies))
version_specific_parameters.append(
' --threads=%s ' % (FLAGS.spec17_threads or vm.NumCpusForBenchmark()))
if FLAGS.spec17_fdo:
version_specific_parameters.append('--feedback ')
vm.RemoteCommand('cd /scratch/cpu2017; mkdir fdo_profiles')
start_time = time.time()
stdout, _ = speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset),
version_specific_parameters)
if FLAGS.spec17_build_only:
if 'Error' in stdout and 'Please review this file' in stdout:
raise errors.Benchmarks.RunError('Error during SPEC compilation.')
return [
sample.Sample(
'compilation_time',
time.time() - start_time, 's', {
'spec17_subset': FLAGS.spec17_subset,
'gcc_version': build_tools.GetVersion(vm, 'gcc')
})
]
partial_results = True
# Do not allow partial results if any benchmark subset is a full suite.
for benchmark_subset in FLAGS.benchmark_subset:
if benchmark_subset in ['intspeed', 'fpspeed', 'intrate', 'fprate']:
partial_results = False
log_files = set()
for test in FLAGS.spec17_subset:
if test in LOG_FILENAME:
log_files.add(LOG_FILENAME[test])
else:
if test in INTSPEED_SUITE:
log_files.add(LOG_FILENAME['intspeed'])
elif test in INTRATE_SUITE:
log_files.add(LOG_FILENAME['intrate'])
elif test in FPSPEED_SUITE:
log_files.add(LOG_FILENAME['fpspeed'])
elif test in FPRATE_SUITE:
log_files.add(LOG_FILENAME['fprate'])
for log_file in log_files:
vm.RemoteCommand(
f'cp {vm.GetScratchDir()}/cpu2017/result/{log_file} ~/{log_file}.log')
vm.PullFile(vm_util.GetTempDir(), f'~/{log_file}.log')
samples = speccpu.ParseOutput(vm, log_files, partial_results, None)
for item in samples:
item.metadata['vm_name'] = vm.name
item.metadata['spec17_gcc_flags'] = FLAGS.spec17_gcc_flags
return samples | ba7c8575c45cdd7edccdf212d8ff29adb0d0fe1b | 3,656,342 |
def mixin_method(ufunc, rhs=None, transpose=True):
"""Decorator to register a mixin class method
Using this decorator ensures that derived classes that are declared
with the `mixin_class` decorator will also have the behaviors that this
class has.
ufunc : numpy.ufunc
A universal function (or NEP18 callable) that is hooked in awkward1,
i.e. it can be the first argument of a behavior
rhs : Set[type] or None
List of right-hand side argument types (leave None if unary function)
The left-hand side is expected to always be ``self`` of the parent class
If the function is not unary or binary, call for help :)
transpose : bool
Autmatically create a transpose signature (only makes sense for binary ufuncs)
"""
def register(method):
if not isinstance(rhs, (set, type(None))):
raise ValueError("Expected a set of right-hand-side argument types")
if transpose and rhs is not None:
def transposed(left, right):
return method(right, left)
method._awkward_mixin = (ufunc, rhs, transposed)
else:
method._awkward_mixin = (ufunc, rhs, None)
return method
return register | d1130740628eb947bd786bc3393343b8c283164d | 3,656,343 |
def set_def_quick_print(setting):
"""
Set the global default (henceforth) behavior whether to quick print
when stamping or stopping.
Args:
setting: Passed through bool().
Returns:
bool: Implemented setting value.
"""
setting = bool(setting)
SET['QP'] = setting
return setting | 835028c97fb03435de65df6f13c5c05fe61710f0 | 3,656,344 |
from datetime import datetime
def time_handler(start_time, start_fmt, elaps_fmt, today):
"""return StartTime, ElapsedTime tuple using
start/sub time string"""
start_time = datetime.strptime(start_time, '%Y-%m-%dT%H:%M:%S')
start_time = StartTime(start_time.year, start_time.month,
start_time.day, start_time.hour,
start_time.minute, start_time.second)
start_time.fmt = start_fmt
delta = today - start_time
delta = ElapsedTime(delta.days, delta.seconds, 0)
delta.fmt = elaps_fmt
return start_time, delta | 7f063a119947f90a24d76fd2f5ce7eba790a3df5 | 3,656,345 |
def lgb_multi_weighted_logloss_exgal(y_preds, train_data):
"""
@author olivier https://www.kaggle.com/ogrellier
https://www.kaggle.com/ogrellier/plasticc-in-a-kernel-meta-and-data/code
multi logloss for PLAsTiCC challenge
"""
# class_weights taken from Giba's topic : https://www.kaggle.com/titericz
# https://www.kaggle.com/c/PLAsTiCC-2018/discussion/67194
# with Kyle Boone's post https://www.kaggle.com/kyleboone
y_true = train_data.get_label()
if len(np.unique(y_true)) > 14:
classes_exgal.append(99)
class_weight_exgal[99] = 2
y_p = y_preds.reshape(y_true.shape[0], len(classes_exgal), order='F')
# normalize
y_p /= y_p.sum(1)[:,None]
# Trasform y_true in dummies
y_ohe = pd.get_dummies(y_true)
# Normalize rows and limit y_preds to 1e-15, 1-1e-15
y_p = np.clip(a=y_p, a_min=1e-15, a_max=1 - 1e-15)
# Transform to log
y_p_log = np.log(y_p)
# Get the log for ones, .values is used to drop the index of DataFrames
# Exclude class 99 for now, since there is no class99 in the training set
# we gave a special process for that class
y_log_ones = np.sum(y_ohe.values * y_p_log, axis=0)
# Get the number of positives for each class
nb_pos = y_ohe.sum(axis=0).values.astype(float)
# Weight average and divide by the number of positives
class_arr = np.array([class_weight_exgal[k] for k in sorted(class_weight_exgal.keys())])
y_w = y_log_ones * class_arr / nb_pos
loss = - np.sum(y_w) / np.sum(class_arr)
return 'wloss', loss, False | 576e0263f9088341bd4d8b0e7e016de513da26ca | 3,656,346 |
def api_owner_required(f):
"""
Authorization decorator for api requests that require the record's owner
Ensure a user is admin or the actual user who created the record,
if not send a 400 error.
:return: Function
"""
@wraps(f)
def decorated_function(*args, **kwargs):
if current_user.is_admin():
return f(*args, **kwargs)
else:
user_id = kwargs['user_id']
if current_user.id != user_id:
abort(400)
return f(*args, **kwargs)
return decorated_function | 4114abf4abc8afd1fd6d68388c17ed04e4029c13 | 3,656,347 |
import os
def save_prediction_image(stacked_img, im_name, epoch, save_folder_name="result_images", save_im=True):
"""save images to save_path
Args:
stacked_img (numpy): stacked cropped images
save_folder_name (str): saving folder name
"""
div_arr = division_array(388, 2, 2, 512, 512)
img_cont = image_concatenate(stacked_img.cpu().data.numpy(), 2, 2, 512, 512)
img_cont = polarize((img_cont)/div_arr)*255
img_cont_np = img_cont.astype('uint8')
img_cont = Image.fromarray(img_cont_np)
# organize images in every epoch
desired_path = save_folder_name + '/epoch_' + str(epoch) + '/'
# Create the path if it does not exist
if not os.path.exists(desired_path):
os.makedirs(desired_path)
# Save Image!
export_name = str(im_name) + '.png'
img_cont.save(desired_path + export_name)
return img_cont_np | 69bd429c74a3af66346d41f116495b772baea828 | 3,656,348 |
def flatten_probas_ori(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels | 95d0df96a7ea612546616cc96b8ea78f5bd52641 | 3,656,349 |
import os
def get_new_file_number(pat, destdir, startnum=1, endnum=10000):
"""Substitute the integers from startnum to endnum into pat and
return the first one that doesn't exist. The file name that is
searched for is os.path.join(destdir, pat % i)."""
for i in range(startnum, endnum):
temp = pat % i
if not os.path.exists(os.path.join(destdir, temp)):
return i | 33ffed09804692bd1cb3dbe94cbdfc36eed42270 | 3,656,350 |
def VisionTransformer_small(pretrained=False,input_shape=(3,224,224),patch_size=16,num_classes=1000, depth=8,drop_rate=0.2,**kwargs):
""" My custom 'small' ViT model. Depth=8, heads=8= mlp_ratio=3."""
vit= VisionTransformer( patch_size=patch_size,num_classes=num_classes, depth=depth,
num_heads=12, mlp_ratio=3., qkv_bias=False, qk_scale=768 ** -0.5, representation_size=None,
drop_rate=drop_rate, attn_drop_rate=drop_rate, drop_path_rate=drop_rate, hybrid_backbone=None)
model=ImageClassificationModel(input_shape=input_shape,output=vit)
if pretrained:
# NOTE my scale was wrong for original weights, leaving this here until I have better ones for this model
vit.qk_scale=768 ** -0.5
return model | 0022e3371c5c9cc138a78f1927f807a91d077f3c | 3,656,351 |
import os
def downgrade_database(
alembic_config_filename: str,
destination_revision: str,
alembic_base_dir: str = None,
starting_revision: str = None,
version_table: str = DEFAULT_ALEMBIC_VERSION_TABLE,
as_sql: bool = False) -> None:
"""
Use Alembic to downgrade our database. USE WITH EXTREME CAUTION.
"revision" is the destination revision.
See http://alembic.readthedocs.org/en/latest/api/runtime.html
but also, in particular, ``site-packages/alembic/command.py``
Arguments:
alembic_config_filename:
config filename
alembic_base_dir:
directory to start in, so relative paths in the config file work
starting_revision:
revision to start at (typically ``None`` to ask the database)
destination_revision:
revision to aim for
version_table: table name for Alembic versions
as_sql:
run in "offline" mode: print the migration SQL, rather than
modifying the database. See
http://alembic.zzzcomputing.com/en/latest/offline.html
"""
if alembic_base_dir is None:
alembic_base_dir = os.path.dirname(alembic_config_filename)
os.chdir(alembic_base_dir) # so the directory in the config file works
config = Config(alembic_config_filename)
script = ScriptDirectory.from_config(config)
# noinspection PyUnusedLocal,PyProtectedMember
def downgrade(rev, context):
return script._downgrade_revs(destination_revision, rev)
log.info("Downgrading database to revision {!r} using Alembic",
destination_revision)
with EnvironmentContext(config,
script,
fn=downgrade,
as_sql=as_sql,
starting_rev=starting_revision,
destination_rev=destination_revision,
tag=None,
version_table=version_table):
script.run_env()
log.info("Database downgrade completed") | bea91f195eb27fcaf37706ba18d415b0ff743dd6 | 3,656,352 |
import sys
import six
def cast_env(env):
"""Encode all the environment values as the appropriate type for each Python version
This assumes that all the data is or can be represented as UTF8"""
env_type = six.ensure_binary if sys.version_info[0] < 3 else six.ensure_str
return {env_type(key): env_type(value) for key, value in six.iteritems(env)} | 885811983c6ca8732338a68f683e5c0f833820c2 | 3,656,353 |
def query_filter_choices(arg=None, fq=[]):
"""
Makes solr query and returns facets for tickets.
:param arg: solr query, string
"""
params = {
'short_timeout': True,
'fq': [
'project_id_s:%s' % c.project._id,
'mount_point_s:%s' % c.app.config.options.mount_point,
'type_s:Ticket',
] + fq,
'rows': 0,
}
params.update(FACET_PARAMS)
result = search(arg, **params)
return get_facets(result) | 226dd808a42981b6183c4425231107d0e7197b2b | 3,656,354 |
def has_no_duplicates(input_):
"""Check that a list contains no duplicates.
For example:
['aa', 'bb', 'cc'] is valid.
['aa', 'bb', 'aa'] is not valid. The word aa appears more than once.
"""
return len(input_) == len(set(input_)) | 6bc1b29b3509e4b17523408ea362591cace8d05d | 3,656,355 |
def uplab_to_renotation_specification(spec, lab):
"""Convert a color in the normalized UP LAB space to its equivalent Munsell color.
Parameters
----------
lab : np.ndarray of shape (3,) and dtype float
The `l', `a-star` and `b-star` values for the color, with `l` in the domain [0, 1],
and `a-star` and `b-star` each in the domain [-0.5, 0.5].
Returns
-------
np.ndarray of shape (4,) and dtype float
A Colorlab-compatible Munsell specification (`hue_shade`, `value`, `chroma`, `hue_index`),
with `hue_shade` one of [0, 2.5, 5, 7.5], `value` one of [0, 1, 2, ..., 10],
`chroma` one of [0, 2, 4, ..., 50] and `hue_index` one of [1, 2, 3, ..., 10].
Notes
-----
Measures the distance in the UP LAB a-b color plane at the given `l` (luminosity) value
between the given `a*` and `b*` values and those of 4 bracketing `a*` and `b*` value
pairs from the Munsell renotation (`hue_shade` of 2.5, 5, 7.5 and 10, and `chroma` one
of [0, 2, 4, ..., 50]). Selects the one with the closest cartesian distance to the
given target.
"""
hue_shade, value, chroma, hue_index = spec
v_ren = value
if v_ren < 1:
v_ren = 1
elif v_ren > 9 and v_ren < 9.9:
v_ren = 9
v_ren = round(v_ren)
if np.isnan(hue_shade):
# Grays
spec[1] = v_ren
return spec
# Colors
c0, _ = divmod(chroma, 2)
c0 = c0 * 2
c1 = c0 + 2
h0, _ = divmod(hue_shade, 2.5)
h0 = h0 * 2.5
h1 = h0 + 2.5
l, a_star, b_star = lab
closest_dist = None
closest = None
for ct in [c0, c1]:
for ht in [h0, h1]:
test_spec = munsellkit.normalized_color(
np.array([ht, value, ct, hue_index]),
rounding='renotation', out='spec')
lt, at, bt = munsell_specification_to_uplab(test_spec)
distance_sq = (at - a_star) * (at - a_star) + (bt - b_star) * (bt - b_star)
# print(f'test {test_spec}: distance is {distance_sq}')
if closest_dist is None or closest_dist > distance_sq:
closest_dist = distance_sq
closest = test_spec
closest[1] = v_ren
return closest | 7354ee53f9067f4720c438d1a8f743ca0b441c51 | 3,656,356 |
import argparse
def string_type_check(valid_strings, case_sensitive = True, metavar = None):
""" Creates an argparse type for a list of strings.
The passed argument is declared valid if it is a valid string which exists
in the passed list valid_strings. If case_sensitive is False, all input
strings and strings in valid_strings are processed as lowercase. Leading
and trailing whitespace is ignored in all strings.
Returns:
A function which can be passed as an argument type, when calling
add_argument on an ArgumentParser object
Raises:
ArgumentTypeError: Passed argument must be string within valid list.
"""
metavar = 'value' if metavar is None else metavar
valid_strings = [x.strip() for x in valid_strings]
if not case_sensitive:
valid_strings = [x.lower() for x in valid_strings]
def _type_checker(value):
value = str(value)
valid = True
if not case_sensitive:
value = value.lower()
if not value in valid_strings:
valid = False
case_msg = ' (case sensitive)' if case_sensitive else ''
msg = 'invalid choice: %s (valid settings for %s%s are: %s)' % (
value, metavar, case_msg, valid_strings.__str__()[1:-1])
if not valid:
raise argparse.ArgumentTypeError(msg)
return value
return _type_checker | dc2814638d479e2ec5182b0c03ffe95e7379e47c | 3,656,357 |
def _getBestSize(value):
"""
Give a size in bytes, convert it into a nice, human-readable value
with units.
"""
if value >= 1024.0**4:
value = value / 1024.0**4
unit = 'TB'
elif value >= 1024.0**3:
value = value / 1024.0**3
unit = 'GB'
elif value >= 1024.0**2:
value = value / 1024.0**2
unit = 'MB'
elif value >= 1024.0:
value = value / 1024.0
unit = 'kB'
else:
unit = 'B'
return value, unit | 6c1859c50edcbd5715443fbf30775eeee83d6a0c | 3,656,358 |
def enable_oeenclave_debug(oe_enclave_addr):
"""For a given OE enclave, load its symbol and enable debug flag for all its TCS"""
enclave = oe_debug_enclave_t(oe_enclave_addr)
# Check if magic matches
if not enclave.is_valid():
return False
# No version specific checks.
# The contract will be extended in backwards compatible manner.
# Debugger may use version to take specific actions in future.
# Check if debugging is enabled.
if enclave.debug == 0:
print ("oelldb: Debugging not enabled for enclave %s" % enclave.path)
return False
# Check if the enclave is loaded in simulation mode.
if enclave.simulate != 0:
print ("oelldb: Enclave %s loaded in simulation mode" % enclave.path)
# Load symbols for the enclave
if load_enclave_symbol(enclave.path, enclave.base_address) != 1:
return False
print("oelldb: Symbols loaded for enclave \n")
for tcs in enclave.tcs:
set_tcs_debug_flag(tcs)
print("oelldb: All tcs set to debug for enclave \n")
return True | 84de42a5e16adc7f4c40593b299a28b44293ac7d | 3,656,359 |
import os
def loadxrmcresult_xmimsim(xmimsimpath, outradix="out", convoluted=False):
"""XRMC result based on input files converted from XMIMSIM"""
xrmcoutpath = os.path.join(xmimsimpath, "xrmc", "output")
if convoluted:
suffix = "_convoluted"
else:
suffix = "_lines"
return loadxrmcresult(xrmcoutpath, outradix + suffix, ext=".dat") | dba17556f35415ee43105fcd25378eead32ad8f0 | 3,656,360 |
def create_line_segments(df, x="lon", y="lat", epsg=4269):
"""Creates a GeodataFrame of line segments from the
shapes dataframe (CRS is NAD83)
Params:
df (DataFrame): pandas DataFrame
x, y (str, optional) Default values x="lon", y="lat",
column names for x and y coordinates
epsg (int): Default value epsg=4269; EPSG value for x,y coordinate system
Returns:
gdf: (GeoDataFrame) Line GeoDataFrame in passed Coordinate System
"""
if df[x].isna().sum() > 0 or df[y].isna().sum() > 0:
raise f"DataFrame contains Null coordinates; consider removing rows with Null {x,y} values"
points = [Point(xy) for xy in zip(df[x], df[y])]
gdf = gpd.GeoDataFrame(df.copy(), geometry=points)
line_segments = (
gdf.groupby(["shape_id"])["geometry"]
.apply(lambda x: LineString(x.tolist()))
.reset_index()
)
gdf_out = gpd.GeoDataFrame(line_segments, geometry="geometry", crs=from_epsg(epsg))
return gdf_out | a0b23c165dc808cc2793f3a62ce002dbf5990562 | 3,656,361 |
def population_correlation(data_matrix, x_index, y_index):
"""
data_matrix is a numpy multi-dimensional array (matrix)
x_index and y_index are the index for the first and second variables respectively
it returns the correlation between two variables in a data_matrix
"""
transposed_data = data_matrix.transpose()
x_population = transposed_data[x_index]
x_mean = np.mean(x_population)
x_std = np.std(x_population)
y_population = transposed_data[y_index]
y_mean = np.mean(y_population)
y_std = np.std(y_population)
# To calculate the expectation means to calculate the cov(x_population, y_population)
# This can also be done using numpy. For that use: np.cov(x_population, y_population, bias=True)
# bias=True indicates that we are calculating the population covariance
# np.cov returns a bxb matrix, where b is the amount of vectors passed as parameter, in our case b=2
expectation = np.mean((x_population - x_mean) * (y_population - y_mean))
std_product = x_std * y_std
return expectation/std_product | 5216a617b5afba9c784fa18cad8506fd57f64e61 | 3,656,362 |
from typing import Any
def upload(workspace: str, table: str) -> Any:
"""
Store a nested_json tree into the database in coordinated node and edge tables.
`workspace` - the target workspace.
`table` - the target table.
`data` - the nested_json data, passed in the request body.
"""
# Set up the parameters.
data = request.data.decode("utf8")
space = db.db(workspace)
edgetable_name = f"{table}_edges"
int_nodetable_name = f"{table}_internal_nodes"
leaf_nodetable_name = f"{table}_leaf_nodes"
# Set up the database targets.
if space.has_collection(edgetable_name):
edgetable = space.collection(edgetable_name)
else:
edgetable = space.create_collection(edgetable_name, edge=True)
if space.has_collection(int_nodetable_name):
int_nodetable = space.collection(int_nodetable_name)
else:
int_nodetable = space.create_collection(int_nodetable_name)
if space.has_collection(leaf_nodetable_name):
leaf_nodetable = space.collection(leaf_nodetable_name)
else:
leaf_nodetable = space.create_collection(leaf_nodetable_name)
# Analyze the nested_json data into a node and edge table.
(nodes, edges) = analyze_nested_json(data, int_nodetable_name, leaf_nodetable_name)
# Upload the data to the database.
edgetable.insert_many(edges)
int_nodetable.insert_many(nodes[0])
leaf_nodetable.insert_many(nodes[1])
return dict(
edgecount=len(edges), int_nodecount=len(nodes[0]), leaf_nodecount=len(nodes[1])
) | 7fb4d0c4c31f499944b263f1f1fedcff34667ea1 | 3,656,363 |
def validate_google_login(email):
"""
Validate a login completed via Google, returning the user id on success.
An ``ODPIdentityError`` is raised if the login cannot be permitted for any reason.
:param email: the Google email address
:raises ODPUserNotFound: if there is no user account for the given email address
:raises ODPAccountLocked: if the user account has been temporarily locked
:raises ODPAccountDisabled: if the user account has been deactivated
:raises ODPEmailNotVerified: if the email address has not been verified
"""
user = get_user_by_email(email)
if not user:
raise x.ODPUserNotFound
if is_account_locked(user.id):
raise x.ODPAccountLocked
if not user.active:
raise x.ODPAccountDisabled
return user.id | 36e095f58600b6b8a799c459ad6181afafcbcf93 | 3,656,364 |
def add_months(start_date, months, date_format=DATE_FORMAT):
"""
Return a date with an added desired number of business months
Example 31/1/2020 + 1 month = 29/2/2020 (one business month)
"""
new_date = start_date + relativedelta(months=+months)
return new_date.strftime(date_format) | 7f579dd33807f30fa83a95b554882d6f8bf18626 | 3,656,365 |
def inVolts(mv):
""" Converts millivolts to volts... you know, to keep the API
consistent. """
return mv/1000.0 | 6c92195996be1aa2bd52aa0a95d247f7fdef5955 | 3,656,366 |
from typing import Mapping
from typing import Any
from typing import Tuple
import uuid
def extract_hit(
hit: Mapping[str, Any],
includes: Tuple[str] = (ID_FIELD,),
source: str = '_source'
) -> Mapping[str, Any]:
"""
Extract a document from a single search result hit.
:param hit: the search hit document
:param includes: the metadata keys to include in the return document
:param source: the key that contains the source document
:return:
"""
doc = {
**{
k: hit.get(k) for k in includes
},
**hit.get(source)
}
# If the document ID is included...
if ID_FIELD in doc:
# ...convert it to a UUID.
doc[ID_FIELD] = uuid.UUID(doc.get(ID_FIELD))
return doc | d67f68618bbfe0e86c1525845cf4af69be31a8df | 3,656,367 |
import time
import random
def generateUserIDToken(id):
"""Generates a unique user id token."""
t = int(time.time() * 1000)
r = int(random.random() * 100000000000000000)
data = "%s %s %s %s" % (ip, t, r, id)
return md5(data.encode('utf-8')).hexdigest() | bc523855df0b911868d802352c83bb99d4768cf3 | 3,656,368 |
from pytools import generate_nonnegative_integer_tuples_summing_to_at_most \
def grad_simplex_monomial_basis(dims, n):
"""Return the gradients of the functions returned by
:func:`simplex_monomial_basis`.
:returns: a :class:`tuple` of functions, each of which
accepts arrays of shape *(dims, npts)*
and returns a :class:`tuple` of length *dims* containing
the derivatives along each axis as an array of size *npts*.
'Scalar' evaluation, by passing just one vector of length *dims*,
is also supported.
.. versionadded:: 2016.1
"""
warn("grad_simplex_monomial_basis_with_mode_ids is deprecated. "
"Use monomial_basis_for_space instead. "
"This function will go away in 2022.",
DeprecationWarning, stacklevel=2)
as gnitstam
return tuple(partial(grad_monomial, order) for order in gnitstam(n, dims)) | 6be49780a984b9a8fb1b54073d845da683d06e36 | 3,656,369 |
from typing import Collection
def get_collection() -> Collection:
"""Коллекция для хранения моделей."""
return _COLLECTION | e82f93a14e1a6640fe9b7b02b062540073060acf | 3,656,370 |
def ask_for_flasherhwver():
"""
Ask for the flasher version, either 1 or 2 right now...
"""
#if FLASHER_SKIP_ON_VALID_DETECTION and FLASHER_VERSION != 1:
# return FLASHER_VERSION
FLASHER_VERSION = 1
flash_version = FLASHER_VERSION
if FLASHER_VERSION is None:
while True:
try:
flash_version = int(raw_input("--- Enter version of programmer hardware [Available Versions: Programmer V1 or Programmer V2]: ".format(FLASHVER=flash_version)))
except:
pass
if flash_version == 1 or flash_version == 2:
break
print("<<< USER REPORTED HARDWARE FLASHER REVISION AS VERSION", flash_version, ">>>")
return flash_version | 6d9cf88ce3fd6850e345431b85ee0ed1bcaffb84 | 3,656,371 |
import torch
def real_to_complex_channels(x, separate_real_imag=False):
""" Inverse of complex_as_real_channels: C*2 real channels (or 2*C if separate_real_imag) to C complex channels. """
if separate_real_imag:
channel_shape = (2, -1)
permute = (0, 2, 3, 4, 1)
else:
channel_shape = (-1, 2)
permute = (0, 1, 3, 4, 2)
return torch.view_as_complex(channel_reshape(x, channel_shape).permute(*permute).contiguous()) | 76692fb1597ae30d99672361e9a6db74f9d1dd86 | 3,656,372 |
def create_coffee_machine() -> CoffeeMachine:
"""Create CoffeeMachine object for testing"""
_coffee_machine = CoffeeMachine()
_coffee_machine.refill_water()
_coffee_machine.refill_milk()
_coffee_machine.refill_coffee_beans()
return _coffee_machine | 10c203249c681d8f13058227521532aefaeda478 | 3,656,373 |
def validate_mqtt_vacuum(value):
"""Validate MQTT vacuum schema."""
schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE}
return schemas[value[CONF_SCHEMA]](value) | 6c850f7f72d61ef0b0b04363a31d2563bf316d33 | 3,656,374 |
def detect(iring, mode, axis=None, *args, **kwargs):
"""Apply square-law detection to create polarization products.
Args:
iring (Ring or Block): Input data source.
mode (string):
``'scalar': x -> real x.x*``
``'jones': x,y -> complex x.x* + 1j*y.y*, x.y*``
``'stokes': x,y -> real I, Q, U, V``
axis: Integer or string specifying the polarization axis. Defaults to
'pol'. Not used if mode = 'scalar'.
*args: Arguments to ``bifrost.pipeline.TransformBlock``.
**kwargs: Keyword Arguments to ``bifrost.pipeline.TransformBlock``.
**Tensor semantics**::
Input: [..., 'pol', ...], dtype = any complex, space = CUDA
Output: [..., 'pol', ...], dtype = real or complex, space = CUDA
Returns:
DetectBlock: A new block instance.
"""
return DetectBlock(iring, mode, axis, *args, **kwargs) | 42690bf325e0d21f5839ac5f87e3d0be7ca42029 | 3,656,375 |
def DiffedUpdateItem(
Table: TableResource, Key: ItemKey, before: InputItem, after: InputItem, **kwargs
) -> InputItem:
"""Safe top-level diff update that requires only 'before' and 'after' dicts.
By calling this you are trusting that we will make a choice about
whether or not you actually have an update to perform.
"""
item_diff = build_update_diff(before, after)
if item_diff:
logger.info(
f"Updating item {Key} because there was an item diff.",
extra=dict(json=dict(item_diff=item_diff)),
)
kwargs.pop("condition_exists", None)
set_and_remove = select_attributes_for_set_and_remove(item_diff)
return UpdateItem(
Table,
Key,
set_attrs=set_and_remove["set_attrs"],
remove_attrs=set_and_remove["remove_attrs"],
condition_exists=True,
**kwargs,
)
else:
logger.debug(
f"Not updating item {Key} because there was "
"no meaningful difference between the items",
extra=dict(json=dict(before=before, after=after)),
)
return before | e3f8fc9d6ec20a294c7fd04e20dc7b230290ab4a | 3,656,376 |
import watchdog
def is_watchdog_supported():
""" Return ``True`` if watchdog is available."""
try:
except ImportError:
return False
return True | 8c777b9a6b29876d902087f2b719519771b5fc2a | 3,656,377 |
def set_bit(arg1, x, bit, y):
"""
set_bit(Int_ctx arg1, Int_net x, unsigned int bit, Int_net y) -> Int_net
Parameters
----------
arg1: Int_ctx
x: Int_net
bit: unsigned int
y: Int_net
"""
return _api.set_bit(arg1, x, bit, y) | c5e7062a9e7f8f46bb4935905b9a485487c0bfad | 3,656,378 |
def get_time_format(format='medium', locale=LC_TIME):
"""Return the time formatting patterns used by the locale for the specified
format.
>>> get_time_format(locale='en_US')
<DateTimePattern u'h:mm:ss a'>
>>> get_time_format('full', locale='de_DE')
<DateTimePattern u'HH:mm:ss zzzz'>
:param format: the format to use, one of "full", "long", "medium", or
"short"
:param locale: the `Locale` object, or a locale string
"""
return Locale.parse(locale).time_formats[format] | d774c14a27b263f4a9cadfd4d144cd9bd0ce1fd3 | 3,656,379 |
def resource_type_service(resource_type):
"""Gets the service name from a resource type.
:exc:`ValueError` is raised if the resource type is invalid, see
:func:`parse_resource_type`.
>>> resource_type_service('AWS::ECS::Instance')
'ECS'
"""
return parse_resource_type(resource_type)[1] | 04ff4ffa22e742dbd63a41cb8f9eec79628938f2 | 3,656,380 |
def loads(ss):
""" loads(ss)
Load a struct from the given string.
Parameters
----------
ss : (Unicode) string
A serialized struct (obtained using ssdf.saves()).
"""
# Check
if not isinstance(ss, basestring):
raise ValueError('ssdf.loads() expects a string.')
# Read
reader = _SSDFReader()
return reader.text_to_struct(ss) | ee07c433f9453b5a9f444cbcda2b80217243f0f0 | 3,656,381 |
from typing import IO
import mimetypes
def guess_mime_type(file_object: IO) -> str:
"""Guess mime type from file extension."""
mime_type, _encoding = mimetypes.guess_type(file_object.name)
if not mime_type:
mime_type = "application/octet-stream"
return mime_type | 12e6e6667b08eaaa24b822c37d56055c1487a801 | 3,656,382 |
def postcount_test(metadict_friends):
"""Среднее число постов по выборке, чтобы выделить активных/неактивных неймфагов."""
all_postcount = 0
for namefag in metadict_friends.keys():
name_number = namefag[0]
name_postcount = cursor.execute("SELECT postcount FROM namefags WHERE number=?"\
,(name_number,)).fetchall()
all_postcount = all_postcount + int(name_postcount[0][0])
name_number = len(metadict_friends)
medial_postcount = all_postcount / name_number
return medial_postcount,all_postcount | dd9f717f8c1c81e6805a257e28e74124f156661f | 3,656,383 |
def extract_stack_name(fields):
"""_extract_stack_name(self, fields: list[str]) -> str
Extract a stack name from the fields
Examples:
ffffffff818244f2 [unknown] ([kernel.kallsyms]) -> [kernel.kallsyms]
1094d __GI___libc_recvmsg (/lib/x86_64-linux-gnu/libpthread-2.23.so) -> __GI__libc_recvmsg
"""
if fields[1] == '[unknown]':
return to_module_name(fields[2][1:-1])
return fields[1] | 093a2397da50bac3ce299256a6d9640af33bf59f | 3,656,384 |
def parse_args(argv):
"""Parse any command line arguments."""
# Set the default logging level to DEBUG
# log_level = logging.INFO
log_level = logging.DEBUG
# This is the dictionary of arguments.
arg_dict = {'start_date': DEFAULT_START_DATE,
'end_date': DEFAULT_END_DATE,
'type': DEFAULT_REQUEST_TYPE}
try:
opts, args = getopt.getopt(argv,
"hds:e:t:",
["help",
"debug",
"start=",
"end=",
"type="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
sys.exit()
elif opt in ("-d", "--debug"):
log_level = logging.DEBUG
print 'log level is at DEBUG'
elif opt in ("-s", "--start"):
arg_dict['start_date'] = arg
elif opt in ("-e", "--end"):
arg_dict['end_date'] = arg
elif opt in ("-t", "--type"):
arg_dict['type'] = arg
# If this file is running as main, do logging.
if __name__ == "__main__":
logging.basicConfig(filename="log_gpo_tutorial.txt",
level=log_level,
filemode="a")
logging.info('start: ' + strftime("%c"))
return arg_dict | bf11d7992c6962fe1e52dd82e68d921b9463d9e9 | 3,656,385 |
import logging
def get_pafy_stream_obj(url,format=None,only_video=False):
"""This function return stream object from pafy
Arguments:
url {string} -- The url of the video from youtube
Returns:
Stream_Obj -- This is a object of Stream class from pafy
"""
try:
obj = pafy.new(url)
# returning only the pafy obj if format is not given
if format == None:
return obj
stream_obj = None
# returning format specified in the parameter
if format == 'AUDIO':
logging.debug("Getting audio pafy stream_object")
stream_obj = obj.getbestaudio(preftype='m4a')
if format == 'VIDEO':
if only_video:
# get only video at 1080p
# stream_obj = obj.getbestvideo(preftype='mp4')
## iterating from backward as best streams are there and
## slecting best 1920x1080p mp4 stream
logging.debug("Getting HQ video pafy stream_object")
for stream in obj.videostreams[::-1]:
if stream.extension == 'mp4':
if stream.dimensions[0] == 1920 and stream.dimensions[1] == 1080:
stream_obj = stream
break
else:
# get best will return both audio and obj normaly at 640p
logging.debug("Getting normal-video pafy stream_object")
stream_obj = obj.getbest(preftype='mp4')
return stream_obj
except OSError as e:
logging.debug("OSError in new pafy")
logging.debug(e)
raise OSError
except Exception as e:
logging.debug("Error occured in new pafy")
logging.debug(e)
return None | bf1b2d360538b4abc9043a87b34eddae65ec8591 | 3,656,386 |
def valid_shape(shape):
"""
@returns: True if given shape is a valid tetris shape
"""
return shape in SHAPES and len(shape) == 1 | e40fda46078a615b6d93438ea9d9e9d72800b25a | 3,656,387 |
def get_device(device_id):
"""
@api {get} /devices/:device_id Get Unique Device
@apiVersion 1.0.0
@apiName GetDevice
@apiGroup Device
@apiSuccess {Boolean} success Request status
@apiSuccess {Object} message Respond payload
@apiSuccess {Object} message.device Device object
"""
device_obj = Device.query.get(device_id)
if not device_obj:
return jsonify(success=False, message='not found'), 404
return jsonify(success=True, message={'device': device_obj.to_dict()}) | 0bad727a1a554d63db774179f45deacb1164ba18 | 3,656,388 |
import gzip
def read_imgs(filename, num_images):
"""读入图片数据
:param filename:
:param num_images:
:return:
"""
with gzip.open(filename) as bytestream:
bytestream.read(16)
buf = bytestream.read(
28 * 28 * num_images * 1)
data = np.frombuffer(buf, dtype=np.uint8)
data = data.reshape(num_images, 28, 28, 1)
return data | a97602470c729211214b4d4a7acd0744beecdfae | 3,656,389 |
def all_faces(coord, connect):
""" Gets vertices of all faces of the mesh.
Args:
coord (:obj:`numpy.array`): Coordinates of the element.
connect (:obj:`numpy.array`): Element connectivity.
Returns:
Corresponding nodes.
"""
nodes_per_face = np.array([connect[:, [1,2,3,4]], connect[:, [5,6,7,8]], \
connect[:, [6,7,3,2]], connect[:, [7,8,4,3]], \
connect[:, [6,5,1,2]], connect[:, [5,8,4,1]]]).reshape(-1,4)
ind_faces = npi.indices(coord[:,0], nodes_per_face.flatten()).reshape(-1, 4)
return ind_faces | 9955260eae11bd6a32e76fb96468989922e856dc | 3,656,390 |
import os
def _resource_path_dev(relative_path):
"""
:return: Package relative path to resource
"""
base_path = os.path.dirname(os.path.abspath(__file__))
return os.path.join(base_path, relative_path) | 8cdf30f3fa62fb824dcdc70bf9d2627b74f66110 | 3,656,391 |
def edit_assignment(request_ctx, course_id, id, assignment_name=None, assignment_position=None, assignment_submission_types=None, assignment_allowed_extensions=None, assignment_turnitin_enabled=None, assignment_turnitin_settings=None, assignment_peer_reviews=None, assignment_automatic_peer_reviews=None, assignment_notify_of_update=None, assignment_group_category_id=None, assignment_grade_group_students_individually=None, assignment_external_tool_tag_attributes=None, assignment_points_possible=None, assignment_grading_type=None, assignment_due_at=None, assignment_lock_at=None, assignment_unlock_at=None, assignment_description=None, assignment_assignment_group_id=None, assignment_muted=None, assignment_assignment_overrides=None, assignment_only_visible_to_overrides=None, assignment_published=None, assignment_grading_standard_id=None, **request_kwargs):
"""
Modify an existing assignment.
If the assignment[assignment_overrides] key is absent, any existing
overrides are kept as is. If the assignment[assignment_overrides] key is
present, existing overrides are updated or deleted (and new ones created,
as necessary) to match the provided list.
NOTE: The assignment overrides feature is in beta.
:param request_ctx: The request context
:type request_ctx: :class:RequestContext
:param course_id: (required) ID
:type course_id: string
:param id: (required) ID
:type id: string
:param assignment_name: (optional) The assignment name.
:type assignment_name: string or None
:param assignment_position: (optional) The position of this assignment in the group when displaying assignment lists.
:type assignment_position: integer or None
:param assignment_submission_types: (optional) List of supported submission types for the assignment. Unless the assignment is allowing online submissions, the array should only have one element. If not allowing online submissions, your options are: "online_quiz" "none" "on_paper" "online_quiz" "discussion_topic" "external_tool" If you are allowing online submissions, you can have one or many allowed submission types: "online_upload" "online_text_entry" "online_url" "media_recording" (Only valid when the Kaltura plugin is enabled)
:type assignment_submission_types: string or None
:param assignment_allowed_extensions: (optional) Allowed extensions if submission_types includes "online_upload" Example: allowed_extensions: ["docx","ppt"]
:type assignment_allowed_extensions: string or None
:param assignment_turnitin_enabled: (optional) Only applies when the Turnitin plugin is enabled for a course and the submission_types array includes "online_upload". Toggles Turnitin submissions for the assignment. Will be ignored if Turnitin is not available for the course.
:type assignment_turnitin_enabled: boolean or None
:param assignment_turnitin_settings: (optional) Settings to send along to turnitin. See Assignment object definition for format.
:type assignment_turnitin_settings: string or None
:param assignment_peer_reviews: (optional) If submission_types does not include external_tool,discussion_topic, online_quiz, or on_paper, determines whether or not peer reviews will be turned on for the assignment.
:type assignment_peer_reviews: boolean or None
:param assignment_automatic_peer_reviews: (optional) Whether peer reviews will be assigned automatically by Canvas or if teachers must manually assign peer reviews. Does not apply if peer reviews are not enabled.
:type assignment_automatic_peer_reviews: boolean or None
:param assignment_notify_of_update: (optional) If true, Canvas will send a notification to students in the class notifying them that the content has changed.
:type assignment_notify_of_update: boolean or None
:param assignment_group_category_id: (optional) If present, the assignment will become a group assignment assigned to the group.
:type assignment_group_category_id: integer or None
:param assignment_grade_group_students_individually: (optional) If this is a group assignment, teachers have the options to grade students individually. If false, Canvas will apply the assignment's score to each member of the group. If true, the teacher can manually assign scores to each member of the group.
:type assignment_grade_group_students_individually: integer or None
:param assignment_external_tool_tag_attributes: (optional) Hash of attributes if submission_types is ["external_tool"] Example: external_tool_tag_attributes: { // url to the external tool url: "http://instructure.com", // create a new tab for the module, defaults to false. new_tab: false }
:type assignment_external_tool_tag_attributes: string or None
:param assignment_points_possible: (optional) The maximum points possible on the assignment.
:type assignment_points_possible: float or None
:param assignment_grading_type: (optional) The strategy used for grading the assignment. The assignment is ungraded if this field is omitted.
:type assignment_grading_type: string or None
:param assignment_due_at: (optional) The day/time the assignment is due. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_due_at: timestamp or None
:param assignment_lock_at: (optional) The day/time the assignment is locked after. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_lock_at: timestamp or None
:param assignment_unlock_at: (optional) The day/time the assignment is unlocked. Accepts times in ISO 8601 format, e.g. 2014-10-21T18:48:00Z.
:type assignment_unlock_at: timestamp or None
:param assignment_description: (optional) The assignment's description, supports HTML.
:type assignment_description: string or None
:param assignment_assignment_group_id: (optional) The assignment group id to put the assignment in. Defaults to the top assignment group in the course.
:type assignment_assignment_group_id: integer or None
:param assignment_muted: (optional) Whether this assignment is muted. A muted assignment does not send change notifications and hides grades from students. Defaults to false.
:type assignment_muted: boolean or None
:param assignment_assignment_overrides: (optional) List of overrides for the assignment. NOTE: The assignment overrides feature is in beta.
:type assignment_assignment_overrides: assignmentoverride or None
:param assignment_only_visible_to_overrides: (optional) Whether this assignment is only visible to overrides (Only useful if 'differentiated assignments' account setting is on)
:type assignment_only_visible_to_overrides: boolean or None
:param assignment_published: (optional) Whether this assignment is published. (Only useful if 'draft state' account setting is on) Unpublished assignments are not visible to students.
:type assignment_published: boolean or None
:param assignment_grading_standard_id: (optional) The grading standard id to set for the course. If no value is provided for this argument the current grading_standard will be un-set from this course. This will update the grading_type for the course to 'letter_grade' unless it is already 'gpa_scale'.
:type assignment_grading_standard_id: integer or None
:return: Edit an assignment
:rtype: requests.Response (with Assignment data)
"""
assignment_submission_types_types = ('online_quiz', 'none', 'on_paper', 'online_quiz', 'discussion_topic', 'external_tool', 'online_upload', 'online_text_entry', 'online_url', 'media_recording')
assignment_grading_type_types = ('pass_fail', 'percent', 'letter_grade', 'gpa_scale', 'points')
utils.validate_attr_is_acceptable(assignment_submission_types, assignment_submission_types_types)
utils.validate_attr_is_acceptable(assignment_grading_type, assignment_grading_type_types)
path = '/v1/courses/{course_id}/assignments/{id}'
payload = {
'assignment[name]' : assignment_name,
'assignment[position]' : assignment_position,
'assignment[submission_types][]' : assignment_submission_types,
'assignment[allowed_extensions]' : assignment_allowed_extensions,
'assignment[turnitin_enabled]' : assignment_turnitin_enabled,
'assignment[turnitin_settings]' : assignment_turnitin_settings,
'assignment[peer_reviews]' : assignment_peer_reviews,
'assignment[automatic_peer_reviews]' : assignment_automatic_peer_reviews,
'assignment[notify_of_update]' : assignment_notify_of_update,
'assignment[group_category_id]' : assignment_group_category_id,
'assignment[grade_group_students_individually]' : assignment_grade_group_students_individually,
'assignment[points_possible]' : assignment_points_possible,
'assignment[grading_type]' : assignment_grading_type,
'assignment[due_at]' : assignment_due_at,
'assignment[lock_at]' : assignment_lock_at,
'assignment[unlock_at]' : assignment_unlock_at,
'assignment[description]' : assignment_description,
'assignment[assignment_group_id]' : assignment_assignment_group_id,
'assignment[muted]' : assignment_muted,
'assignment[assignment_overrides]' : assignment_assignment_overrides,
'assignment[only_visible_to_overrides]' : assignment_only_visible_to_overrides,
'assignment[published]' : assignment_published,
'assignment[grading_standard_id]' : assignment_grading_standard_id,
}
for attribute, value in list((assignment_external_tool_tag_attributes or {}).items()):
payload['assignment[external_tool_tag_attributes][{}]'.format(attribute)] = value
url = request_ctx.base_api_url + path.format(course_id=course_id, id=id)
response = client.put(request_ctx, url, payload=payload, **request_kwargs)
return response | d83094e9d3e9ab66f5c6d4f5245dde80cf4579cc | 3,656,392 |
import sys
def alpha_097(code, end_date=None, fq="pre"):
"""
公式:
STD(VOLUME,10)
Inputs:
code: 股票池
end_date: 查询日期
Outputs:
因子的值
"""
end_date = to_date_str(end_date)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_191(**locals()) | 5f0a50233502c6162884be015edea2d2fd4bb417 | 3,656,393 |
def filter(p):
"""
把索引list转换为单词list
"""
result = []
for idx in p:
if idx == stop_tag:
break
if idx == padding_tag: continue
result.append(index_word[idx])
return result | ab79343d3d924bf1b69813d6a32b967bf45f39bd | 3,656,394 |
import sys
def get_sequin_annots(sequin_path, ref_contigs, quiet=False):
"""
Load all genes in the Sequin table as SeqRecords, fetching their sequence data from the reference.
ref_contigs is a dictionary of ref contig sequences created with BioPython's SeqIO.to_dict().
For documentation on the Sequin table format, see: https://www.ncbi.nlm.nih.gov/Sequin/table.html
Returns a dictionary with contig names as keys, and lists of (start, end, rev_strand, SeqRecord,
coding_blocks) tuples for each contig in ref_contigs.
"""
annots = defaultdict(list)
# We need a dummy class to hold the current state while parsing
# (otherwise the below private functions can't modify it; there's no "nonlocal" in python 2.x)
class _:
in_contig = None
in_feature = None
gene_name = None
desc = None
chrom_start = None
chrom_end = None
strand = None
feature_seq_str = ""
coding_blocks = []
def _save_sequin_feature():
# The only features we care about are the CDS features. Others get discarded during parsing.
if _.in_feature == "CDS":
if len(_.feature_seq_str) == 0:
if not quiet: sys.stderr.write("WARN: 0-length CDS in contig %s" % _.in_contig)
elif _.gene_name is None or _.strand is None or _.chrom_start is None or _.chrom_end is None:
if not quiet: sys.stderr.write("WARN: invalid CDS feature in contig %s" % _.in_contig)
else:
gene_seq = Seq(_.feature_seq_str, generic_dna)
if _.strand == '-':
gene_seq = gene_seq.reverse_complement()
gene_seq_record = SeqRecord(gene_seq, id=_.gene_name, name=_.gene_name, description=_.desc)
annot = Annot(_.chrom_start, _.chrom_end, _.strand == '-', gene_seq_record,
_.coding_blocks)
annots[contig_to_vcf_chrom(_.in_contig)].append(annot)
_.in_feature = _.gene_name = _.desc = _.chrom_start = _.chrom_end = _.strand = None
_.feature_seq_str = ""
_.coding_blocks = []
def _update_sequin_feature(fields):
if fields[0] != "" and fields[1] != "":
# If the first two fields are present, this specifies a sequence range
if not (fields[0].isdigit() and fields[1].isdigit()):
# We will only attempt to utilize *complete* CDS features
# (None of the start or end positions can be qualified by ">" or "<")
_.in_feature = "CDS-partial"
return
# Append the specified sequence to the `_.feature_seq_str`.
# Note: Sequin table coordinates, like GenBank, are 1-indexed, right-closed.
start = int(fields[0])
end = int(fields[1])
if _.strand is None:
_.strand = '+' if start <= end else '-'
elif _.strand != ('+' if start <= end else '-'):
sys.stderr.write("WARN: strand changed direction, invalid CDS")
_.in_feature = "CDS-partial"
return
if _.strand == '-':
start, end = end, start
start -= 1
ref_contig = ref_contigs[_.in_contig]
seg = str(ref_contig.seq)[start:end]
_.coding_blocks.append((start, end))
_.feature_seq_str = seg + _.feature_seq_str if _.strand == '-' else _.feature_seq_str + seg
_.chrom_start = min(start, _.chrom_start if _.chrom_start is not None else float('inf'))
_.chrom_end = max(end, _.chrom_end if _.chrom_end is not None else float('-inf'))
elif len(fields) >= 5:
# If the first three fields are blank, this specifies a qualifier key + value
if fields[3] == "gene":
_.gene_name = fields[4]
elif fields[3] == "product":
_.desc = fields[4]
with open(sequin_path) as f:
for line in f:
line = line.rstrip("\n")
fields = line.split("\t", 4)
if len(line.strip()) == 0:
# Whitespace-only lines signal the end of feature data for a contig.
# They may be followed by INFO: lines from the annotator, which we ignore.
_save_sequin_feature()
_.in_contig = None
elif _.in_contig is None and line[0] == '>':
# Lines that begin with ">Feature " signal the start of feature data for a contig
# Fields are separated by spaces; the second field is the full contig ID
_save_sequin_feature()
sp_fields = line[1:].split(' ')
if sp_fields[0] == 'Feature' and len(sp_fields) >= 2:
if ref_contigs.has_key(sp_fields[1]):
_.in_contig = sp_fields[1]
elif not quiet:
sys.stderr.write("WARN: unknown contig in Sequin file: %s" % sp_fields[1])
elif _.in_contig is not None:
if len(fields) < 3:
if not quiet: sys.stderr.write("WARN: incomplete Sequin line: %s" % line)
next
in_new_feature = fields[2].strip() != ""
if _.in_feature is None or in_new_feature:
_save_sequin_feature()
_.in_feature = fields[2].strip()
if _.in_feature == "CDS":
_update_sequin_feature(fields)
elif _.in_feature == "CDS":
_update_sequin_feature(fields)
return annots | c984b1588ce5a33c3f2711ac14512050a543dd3f | 3,656,395 |
def transformer_decoder_layer(dec_input,
enc_output,
slf_attn_bias,
dec_enc_attn_bias,
n_head,
d_key,
d_value,
d_model,
d_inner_hid,
prepostprocess_dropout,
attention_dropout,
relu_dropout,
hidden_act,
preprocess_cmd,
postprocess_cmd,
cache=None,
gather_idx=None,
param_initializer=None,
name=''):
"""
The layer to be stacked in decoder part.
:param dec_input: (batch_size, tgt_len, emb_dim)
:param enc_output: (batch_size, n_tokens, emb_dim)
:param slf_attn_bias: (batch_size, n_head, tgt_len, tgt_len)
:param dec_enc_attn_bias: (batch_size, n_head, tgt_len, n_tokens)
"""
# (batch_size, tgt_len, emb_dim)
slf_attn_output = multi_head_attention(
queries=pre_process_layer(out=dec_input, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_slf_attn'),
keys=None,
values=None,
attn_bias=slf_attn_bias, # (batch_size, n_head, tgt_len, tgt_len)
d_key=d_key,
d_value=d_value,
d_model=d_model,
n_head=n_head,
dropout_rate=attention_dropout,
cache=cache,
gather_idx=gather_idx,
param_initializer=param_initializer,
name=name + '_slf_attn')
# add dropout and residual connection
# (batch_size, tgt_len, emb_dim)
slf_attn_output = post_process_layer(
prev_out=dec_input,
out=slf_attn_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_slf_attn')
# (batch_size, tgt_len, emb_dim)
context_attn_output = multi_head_attention(
queries=pre_process_layer(out=slf_attn_output, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_context_attn'),
keys=enc_output, # (batch_size, n_tokens, emb_dim)
values=enc_output, # (batch_size, n_tokens, emb_dim)
attn_bias=dec_enc_attn_bias, # (batch_size, n_head, tgt_len, n_tokens)
d_key=d_key,
d_value=d_value,
d_model=d_model,
n_head=n_head,
dropout_rate=attention_dropout,
cache=cache,
gather_idx=gather_idx,
static_kv=True,
param_initializer=param_initializer,
name=name + '_context_attn')
# add dropout and residual connection
context_attn_output = post_process_layer(
prev_out=slf_attn_output,
out=context_attn_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_context_attn')
ffd_output = positionwise_feed_forward(
x=pre_process_layer(out=context_attn_output, # add layer normalization
process_cmd=preprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_pre_ffn'),
d_inner_hid=d_inner_hid,
d_hid=d_model,
dropout_rate=relu_dropout,
hidden_act=hidden_act,
param_initializer=param_initializer,
name=name + '_ffn')
# add dropout and residual connection
dec_output = post_process_layer(
prev_out=context_attn_output,
out=ffd_output,
process_cmd=postprocess_cmd,
dropout_rate=prepostprocess_dropout,
name=name + '_post_ffn')
return dec_output | 57367b4aa27da48a1cff5ca24bfcecb36a10c39b | 3,656,396 |
def replace_word_choice(sentence: str, old_word: str, new_word: str) -> str:
"""Replace a word in the string with another word.
:param sentence: str - a sentence to replace words in.
:param old_word: str - word to replace
:param new_word: str - replacement word
:return: str - input sentence with new words in place of old words
"""
return sentence.replace(old_word, new_word) | 27d0eae1aa12538c570fec3aa433d59c40556592 | 3,656,397 |
def append_slash(url):
"""Make sure we append a slash at the end of the URL otherwise we
have issues with urljoin Example:
>>> urlparse.urljoin('http://www.example.com/api/v3', 'user/1/')
'http://www.example.com/api/user/1/'
"""
if url and not url.endswith('/'):
url = '{0}/'.format(url)
return url | 3d8f009f0f7a2b93e2c9ed3fee593bbcf0f25c4f | 3,656,398 |
def find_cards(thresh_image):
"""Finds all card-sized contours in a thresholded camera image.
Returns the number of cards, and a list of card contours sorted
from largest to smallest."""
# Find contours and sort their indices by contour size
dummy, cnts, hier = cv2.findContours(thresh_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
index_sort = sorted(range(len(cnts)), key=lambda i: cv2.contourArea(cnts[i]), reverse=True)
# If there are no contours, do nothing
if len(cnts) == 0:
return [], []
# Otherwise, initialize empty sorted contour and hierarchy lists
cnts_sort = []
hier_sort = []
cnt_is_card = np.zeros(len(cnts), dtype=int)
# Fill empty lists with sorted contour and sorted hierarchy. Now,
# the indices of the contour list still correspond with those of
# the hierarchy list. The hierarchy array can be used to check if
# the contours have parents or not.
for i in index_sort:
cnts_sort.append(cnts[i])
hier_sort.append(hier[0][i])
# Determine which of the contours are cards by applying the
# following criteria: 1) Smaller area than the maximum card size,
# 2), bigger area than the minimum card size, 3) have no parents,
# and 4) have four corners
for i in range(len(cnts_sort)):
size = cv2.contourArea(cnts_sort[i])
peri = cv2.arcLength(cnts_sort[i], True)
approx = cv2.approxPolyDP(cnts_sort[i], 0.01*peri, True)
if ((size < CARD_MAX_AREA) and (size > CARD_MIN_AREA) and (hier_sort[i][3] == -1) and (len(approx) == 4)):
cnt_is_card[i] = 1
return cnts_sort, cnt_is_card | 2bf8a0a0ea64de51b34c5826538d591865533353 | 3,656,399 |
Subsets and Splits