content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def _entity_namespace_key(entity, key):
"""Return an entry from an entity_namespace.
Raises :class:`_exc.InvalidRequestError` rather than attribute error
on not found.
"""
ns = entity.entity_namespace
try:
return getattr(ns, key)
except AttributeError as err:
util.raise_(
exc.InvalidRequestError(
'Entity namespace for "%s" has no property "%s"'
% (entity, key)
),
replace_context=err,
) | ffd063523a8011a8ee2dd3700920a0523465d6cc | 3,654,508 |
def get_messages(mtype, read=False, uid=None):
""" Returns query for messages. If `read` is True it only queries for unread messages """
query = Message.select().where(Message.mtype << mtype)
query = query.where(Message.receivedby == current_user.uid if not uid else uid)
if read:
query = query.where(Message.read.is_null(True))
return query | 7959a0510d8f6794ff40d8467d09b0833279be10 | 3,654,509 |
import numpy
def coords_to_indices(coords, top, left, csx, csy, shape, preserve_out_of_bounds=False):
"""
Convert coordinates to array indices using the given specs.
Coordinates outside of the shape are not returned.
:param coords: Tuple of coordinates in the form ([x...], [y...])
:param top: Top coordinate of array
:param left: Left coordinate of array
:param csx: Cell size in the x-direction
:param csy: Cell size in the y-direction
:param shape: Shape of array (for bounds)
:return: tuple of indices in the form ([i...], [j...])
"""
x, y = numpy.asarray(coords[0]), numpy.asarray(coords[1])
i = numpy.int64((top - y) / csy)
j = numpy.int64((x - left) / csx)
if preserve_out_of_bounds:
return i, j
else:
m = (i >= 0) & (j >= 0) & (i < shape[0]) & (j < shape[1])
return i[m], j[m] | 89b99ffc159c56855792d0daeb8bdb5a5d04ad9f | 3,654,510 |
def sanitize_vcf_file(vcf_file, out_file, snp_log_file, sample_log_file, logging, min_count=1, max_missing=0.25,
max_alt_states=4, disruptive_threshold=1,window_size=30,max_snps=2):
"""
Filter a user provided vcf and write a filtered vcf file
Parameters
----------
vcf_file [str] : Path to vcf file
out_file [str] : Path to write santized vcf file
snp_log_file [str] : Path to write SNP log report
sample_log_file [str]: Path to write sample report
logging [logging obj] : logging object
min_count [int] : Minimum number of variable samples ie. if set to 2, at least two samples must differ from the rest
max_missing [float] : Percent of samples which can be missing data for a position for that position to be valid
max_alt_states [int] : Maxmimum number of bases which can be in the vcf for that position to be valid
disruptive_threshold [int]: Identify sequences which are blunting resolution by looking at which ones are missing
where other sequences have it. By default, it looks for sequences which are uniquely
missing a position conserved in all of the other samples
Returns
-------
List of sample_id's in vcf
"""
logging.info("Reading vcf file {}".format(vcf_file))
pandas_vcf_obj = pandas_vcf(vcf_file)
samples = pandas_vcf_obj.get_sample_list()
num_samples = len(samples)
snp_report = open(snp_log_file, 'w')
sample_report = open(sample_log_file, 'w')
sample_ambig_counts = {}
for sample in samples:
sample_ambig_counts[sample] = {'total_ambig': 0, 'uniq_ambig': 0, 'low_freq_variants': 0}
logging.info("VCF file contains #{} samples".format(len(samples)))
logging.info("VCF file samples: {}".format(samples))
count_postitions = 0
valid_position_count = 0
filtered_positions_count = 0
filtered_positions_ambig = 0
filtered_positions_alt = 0
ambiguous_positions = {}
snp_data = {}
positions = []
position_base_calls = pandas_vcf_obj.get_baselookup()
valid_positions = []
for index,row in pandas_vcf_obj.df.iterrows():
calls = get_vcf_row_calls(row, position_base_calls, samples, logging)
count_postitions += 1
position = list(calls.keys())[0]
positions.append(position)
chromosome = calls[position]['chrom']
bases = calls[position]['bases']
ref_base = calls[position]['ref_base']
if not chromosome in snp_data:
snp_data[chromosome] = {}
if not position in snp_data[chromosome]:
snp_data[chromosome][position] = {}
count_ref = 0
count_alt = 0
count_missing = 0
count_alt_bases = 0
alt_samples = []
valid_bases = []
for base in bases:
if base == ref_base:
count_ref = len(bases[base])
elif base == 'N':
count_missing = len(bases[base])
if not position in ambiguous_positions:
ambiguous_positions[position] = []
ambiguous_positions[position] = bases[base]
for sample_id in ambiguous_positions[position]:
sample_ambig_counts[sample_id]['total_ambig'] += 1
else:
count_alt_bases += 1
count_alt += len(bases[base])
alt_samples += bases[base]
valid_bases.append(base)
if (count_ref >= min_count) and \
(count_alt >= min_count) and \
(count_missing / num_samples <= max_missing) and \
count_alt_bases <= max_alt_states:
valid_position_count += 1
valid_positions.append(position)
for base in valid_bases:
snp_data[chromosome][position][base] = {'samples':bases[base],'out_bases':list(set(list(bases.keys())) - set([base]))}
else:
if count_ref < min_count and count_alt < min_count:
filtered_positions_count += 1
snp_report.write(
"Filtering due to minimum polymorphic sample count\tchrom: {}\tposition: {}\tcount_ref: {}\tcount_alt: {}\tcount_missing: {}\n".format(
chromosome, position, count_ref, count_alt, count_missing))
for sample_id in alt_samples:
sample_ambig_counts[sample_id]['low_freq_variants'] += 1
if count_missing / num_samples >= max_missing:
filtered_positions_ambig += 1
snp_report.write(
"Filtering due to maximum missing sample count\tchrom: {}\tposition: {}\tcount_ref: {}\tcount_alt: {}\tcount_missing: {}\n".format(
chromosome, position, count_ref, count_alt, count_missing))
if count_alt_bases > max_alt_states:
filtered_positions_alt += 1
snp_report.write(
"Filtering due to more than allowed alt base states\tchrom: {}\tposition: {}\tcount_ref: {}\tcount_alt: {}\tcount_missing: {}\n".format(
chromosome, position, count_ref, count_alt, count_missing))
filtered_df = pandas_vcf_obj.df[pandas_vcf_obj.df['POS'].isin(valid_positions)]
filtered_df.to_csv(out_file,sep="\t",header=True,index=False)
#for position in positions:
logging.info("Read {} positions in file {}".format(count_postitions, vcf_file))
logging.info("Filtered {} positions due to minimum polymorphic sample requirement".format(filtered_positions_count))
logging.info("Filtered {} positions due to missing in more than {}% of samples".format(filtered_positions_ambig,
max_missing))
logging.info("Filtered {} positions due to more than allowed alternative base states".format(filtered_positions_alt,
max_alt_states))
logging.info("{} positions are valid after filtering".format(valid_position_count))
# Identify for the user unusual sequences that they might want to remove from their analyses
disruptive_sequence_check = identify_disruptive_sequences(ambiguous_positions, disruptive_threshold)
for sample_id in disruptive_sequence_check:
sample_ambig_counts[sample_id]['unique_ambig'] = disruptive_sequence_check[sample_id]
# Get 95 percentile for each attribute
percentile_total_ambig = get_percentile(sample_ambig_counts, 'total_ambig', percentile=95)
percentile_unique_ambig = get_percentile(sample_ambig_counts, 'uniq_ambig', percentile=95)
percentile_low_freq_var = get_percentile(sample_ambig_counts, 'low_freq_variants', percentile=95)
logging.info(
"95% Percentile: Total Ambig={}\t95% Percentile: Unique Ambig={}\t95% Percentile: low frequency variants={}".format(
percentile_total_ambig, percentile_unique_ambig, percentile_low_freq_var))
for sample_id in disruptive_sequence_check:
status = 'PASS'
if sample_ambig_counts[sample_id]['total_ambig'] / count_postitions > max_missing:
status = 'FAIL'
elif sample_ambig_counts[sample_id]['unique_ambig'] > percentile_unique_ambig:
status = 'WARNING: Sample has unusually high number of unique missing core positions'
elif sample_ambig_counts[sample_id]['low_freq_variants'] > percentile_low_freq_var:
status = 'WARNING: Sample has unusually high number of low frequency variants'
elif sample_ambig_counts[sample_id]['total_ambig'] > percentile_total_ambig:
status = 'WARNING: Sample has unusually high number of missing core positions'
sample_report.write("{}\tTOTAL_AMBIG={}\tUNIQUE_AMBIG={}\tLOW_FREQ_VARIANTS={}\tSTATUS={}\n".format(sample_id,
sample_ambig_counts[
sample_id][
'total_ambig'],
sample_ambig_counts[
sample_id][
'unique_ambig'],
sample_ambig_counts[
sample_id][
'low_freq_variants'],
status))
sample_report.close()
snp_report.close()
return [samples, snp_data] | b5d96e9224b5eddad1dff8dcf2caf558522376bc | 3,654,511 |
from typing import Optional
from typing import Any
def geq(column: str, value: Optional[Any]) -> str:
"""
>>> geq("col", None)
'1'
>>> geq("col", 1)
'col >= 1'
>>> geq("col", "1")
"col >= '1'"
"""
if not value:
return "1"
if isinstance(value, str):
return f"{column} >= '{value}'"
return f"{column} >= {value}" | 9216b8e2480232840ad37d8fe0e5c0f07b88873f | 3,654,512 |
from keras.layers import Conv2D, Dense
from palmnet.layers import Conv2DCustom
from palmnet.layers.sparse_facto_sparse_tensor_deprecated import SparseFactorisationDense
def count_model_param_and_flops(model, dct_layer_sparse_facto_op=None):
"""
Return the number of params and the number of flops of 2DConvolutional Layers and Dense Layers for both the base model and the compressed model.
:return:
"""
nb_param_base, nb_param_compressed, nb_flop_base, nb_flop_compressed = 0, 0, 0, 0
param_by_layer = {}
flop_by_layer = {}
for layer in model.layers:
logger.warning("Process layer {}".format(layer.name))
if isinstance(layer, Conv2D) or isinstance(layer, Conv2DCustom):
nb_param_layer, nb_param_compressed_layer = Palminizable.count_nb_param_layer(layer, dct_layer_sparse_facto_op)
nb_flop_layer, nb_flop_compressed_layer = Palminizable.count_nb_flop_conv_layer(layer, nb_param_layer, dct_layer_sparse_facto_op)
elif isinstance(layer, Dense) or isinstance(layer, SparseFactorisationDense):
nb_param_layer, nb_param_compressed_layer = Palminizable.count_nb_param_layer(layer, dct_layer_sparse_facto_op)
nb_flop_layer, nb_flop_compressed_layer = Palminizable.count_nb_flop_dense_layer(layer, nb_param_layer, dct_layer_sparse_facto_op)
else:
logger.warning("Layer {}, class {}, hasn't been compressed".format(layer.name, layer.__class__.__name__))
nb_param_compressed_layer, nb_param_layer, nb_flop_layer, nb_flop_compressed_layer = 0, 0, 0, 0
param_by_layer[layer.name] = nb_param_layer
flop_by_layer[layer.name] = nb_flop_layer
nb_param_base += nb_param_layer
nb_param_compressed += nb_param_compressed_layer
nb_flop_base += nb_flop_layer
nb_flop_compressed += nb_flop_compressed_layer
return nb_param_base, nb_param_compressed, nb_flop_base, nb_flop_compressed, param_by_layer, flop_by_layer | 142b04ad327f662d315d7c92322df8aef2ae9871 | 3,654,513 |
def longest_match(list1, list2):
"""
Find the length of the longest substring match between list1 and list2.
>>> longest_match([], [])
0
>>> longest_match('test', 'test')
4
>>> longest_match('test', 'toast')
2
>>> longest_match('supercalifragilisticexpialidocious', 'mystical californication')
5
"""
m = len(list1)
n = len(list2)
data = [[0 for col in range(n+1)] for row in range(m+1)]
for a in range(1, m+1):
for b in range(1, n+1):
if list1[a-1] == list2[b-1]:
data[a][b] = 1 + data[a-1][b-1]
else:
data[a][b] = 0
maxes = [max(row) for row in data]
return max(maxes) | 4a84dacbb0d59fc7f9c4b59e87e55c72416b8c80 | 3,654,514 |
def deserialize_config(data, **kwargs):
"""Create instance of a JobConfiguration from a dict.
Parameters
----------
data : dict
Dictionary loaded from a serialized config file.
Returns
-------
JobConfiguration
"""
registry = Registry()
config_module = data["configuration_module"]
config_class = data["configuration_class"]
for ext in registry.iter_extensions():
ext_cfg_class = ext[ExtensionClassType.CONFIGURATION]
if ext_cfg_class.__module__ == config_module and ext_cfg_class.__name__ == config_class:
return ext_cfg_class.deserialize(data, **kwargs)
raise InvalidParameter(f"Cannot deserialize {config_module}.{config_class}") | eff887d4e676935742b8169c62a9a581b5f239ce | 3,654,515 |
import numpy
def pmat06(date1, date2):
"""
Wrapper for ERFA function ``eraPmat06``.
Parameters
----------
date1 : double array
date2 : double array
Returns
-------
rbp : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - - -
e r a P m a t 0 6
- - - - - - - - - -
Precession matrix (including frame bias) from GCRS to a specified
date, IAU 2006 model.
Given:
date1,date2 double TT as a 2-part Julian Date (Note 1)
Returned:
rbp double[3][3] bias-precession matrix (Note 2)
Notes:
1) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) The matrix operates in the sense V(date) = rbp * V(GCRS), where
the p-vector V(GCRS) is with respect to the Geocentric Celestial
Reference System (IAU, 2000) and the p-vector V(date) is with
respect to the mean equatorial triad of the given date.
Called:
eraPfw06 bias-precession F-W angles, IAU 2006
eraFw2m F-W angles to r-matrix
References:
Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855
Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
(date1, date2,), rbp = arrayify_inputs_and_create_d3_fix(
[date1, date2], core_dims=[0, 0], out_core_shape=(3, 3), out_dtype=numpy.double)
rbp = ufunc.pmat06(date1, date2, rbp)
return rbp | 69b38637701d804ca83733d7f55fca1fd57a5b72 | 3,654,516 |
def _splitData(data):
"""Takes either a cursor or result set and returns result set and list of columns."""
if hasattr(data, 'fetchall'):
rows = data.fetchall()
cols = data.columns()
elif isinstance(data, list):
rows = data
if hasattr(rows[0], '_fields'):
cols = rows[0]._fields
elif hasattr(rows[0], 'keys'):
cols = list(rows[0].keys())
else:
raise TypeError('Can not determine the list of columns from the result set.')
return (rows, cols) | 9953be08f29fb457782e5401c3dfded8f780924b | 3,654,517 |
import multiprocessing
def get_cpu_count():
"""
Try and estimate the number of CPU on the host. First using multiprocessing
native function, other using content of /proc/cpuinfo. If none of those
methods did work, 4 is returned.
"""
try:
cpucount = multiprocessing.cpu_count()
except:
try:
s = open("/proc/cpuinfo").read()
cpucount = int(s.split('processor')[-1].split(":")[1].split("\n")[0])
cpucount += 1
except:
cpucount = 4
return cpucount | db58112537c4a111ec1ef24eeab70227678d6d1e | 3,654,518 |
def get_relation_data(collection, relation_paths):
"""Prepare relations for usage inside extend_relations."""
out = []
for path in relation_paths:
promote = path.get("promote", False)
numpy_path = []
for step in path["steps"]:
if isinstance(step, str):
step_name, max_usage = step, 1
else:
step_name, max_usage = step
relation = collection.relation(step_name)
numpy_path.append((relation.offsets, relation.values, max_usage))
inv_relation = collection.get_inverted_relation(step_name).edges() > 0
out.append((numpy_path, inv_relation, promote))
return out | 8b4cd9145995aee5e3c9b880073dfd10320b24e5 | 3,654,519 |
def generate_paddle_quads():
"""
This function builds a matrix of paddles, each row in the matrix
represents the paddle skin (four colors) and each column represents
the size.
"""
paddle_base_width = 32
paddle_height = 16
x = 0
y = paddle_height * 4
spritesheet = []
for _ in range(4):
spritesheet.append([
# The smallest paddle is in (0, y) and its dimensions are 32x16.
pygame.Rect(x, y, paddle_base_width, paddle_height),
# The next paddle is in (32, y) and its dimensions are 64x16.
pygame.Rect(
x + paddle_base_width, y,
paddle_base_width * 2, paddle_height
),
# The next paddle is in (96, y) and its dimensions are 96x16.
pygame.Rect(
x + paddle_base_width * 3, y,
paddle_base_width * 3, paddle_height
),
# The largest paddle is in (0, y + 16)
# and its dimensions are 128x16.
pygame.Rect(
x, y + paddle_height,
paddle_base_width * 4, paddle_height
)
])
# To go to the next color, increment y by 32.
y += paddle_height * 2
return spritesheet | e82259d5e203257574c5ae91ad4a5c3a625e5b5a | 3,654,520 |
def cut(img):
"""
Applies central horizontal threshold in Fourier spectrum
"""
# Apply fourier transform and shift
img_fft = fftn(img)
img_fft_shift = fftshift(img_fft)
# Print spectrum before
plt.imshow(np.abs(img_fft_shift), cmap='gray', norm=LogNorm(vmin=5))
plt.show()
# Filter image: remove upper and lower horizontal thirds (1/3)
img_fft_shift_filtered = np.copy(img_fft_shift)
for x in range(img.shape[0]):
for y in range(img.shape[1]):
if((x < img.shape[0]//2 - img.shape[0]//30 or \
x > img.shape[0]//2 + img.shape[0]//30) and\
(y < img.shape[1]//2 - img.shape[1]//30 or \
y > img.shape[1]//2 + img.shape[1]//30)):
img_fft_shift_filtered[x,y] = 0
if((x < img.shape[0]//3 or \
x > img.shape[0]*2//3) or \
(y < img.shape[1]//3 or \
y > img.shape[1]*2//3)):
img_fft_shift_filtered[x, y] = 0
# Print spectrum after
plt.imshow(np.abs(img_fft_shift_filtered), cmap='gray', norm=LogNorm(vmin=5))
plt.show()
# Return to space domain result image using inverse
return np.abs(ifftn(fftshift(img_fft_shift_filtered))) | 74ce6db709aaa91fec2321dc6cc70fc6d5a8c552 | 3,654,521 |
def csrmm2(m, n, k, descrA, csrValA, csrRowPtrA, csrColIndA, B, handle=None,
C=None, nnz=None, transA=CUSPARSE_OPERATION_NON_TRANSPOSE,
transB=CUSPARSE_OPERATION_NON_TRANSPOSE, alpha=1.0, beta=0.0,
ldb=None, ldc=None, check_inputs=True):
""" multiply two sparse matrices: C = transA(A) * transB(B)
higher level wrapper to cusparse<t>csrmm2 routines.
"""
if check_inputs:
for item in [csrValA, csrRowPtrA, csrColIndA, B]:
if not isinstance(item, pycuda.gpuarray.GPUArray):
raise ValueError("csr*, B, must be pyCUDA gpuarrays")
if C is not None:
if not isinstance(C, pycuda.gpuarray.GPUArray):
raise ValueError("C must be a pyCUDA gpuarray or None")
# dense matrices must be in column-major order
if not B.flags.f_contiguous:
raise ValueError("Dense matrix B must be column-major order")
if transB == CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE:
raise ValueError("Conjugate transpose operation not supported "
"for dense matrix B")
if (transB == CUSPARSE_OPERATION_TRANSPOSE) and \
(transA != CUSPARSE_OPERATION_NON_TRANSPOSE):
raise ValueError("if B is transposed, only A non-transpose is "
"supported")
if handle is None:
handle = misc._global_cusparse_handle
dtype = csrValA.dtype
if C is None:
if transA == CUSPARSE_OPERATION_NON_TRANSPOSE:
ldc = m
else:
ldc = k
alloc = misc._global_cusparse_allocator
C = gpuarray.zeros((ldc, n), dtype=dtype, order='F',
allocator=alloc)
elif not C.flags.f_contiguous:
raise ValueError("Dense matrix C must be in column-major order")
if nnz is None:
nnz = csrValA.size
if ldb is None:
ldb = B.shape[0]
if ldc is None:
ldc = C.shape[0]
# perform some basic sanity checks
if check_inputs:
if csrValA.size != nnz:
raise ValueError("length of csrValA array must match nnz")
if (B.dtype != dtype) or (C.dtype != dtype):
raise ValueError("A, B, C must share a common dtype")
if ldb < B.shape[0]:
raise ValueError("ldb invalid for matrix B")
if transA == CUSPARSE_OPERATION_NON_TRANSPOSE:
ldOpA = m # leading dimension for op(A)
tdOpA = k # trailing dimension for op(A)
else:
ldOpA = k
tdOpA = m
if transB == CUSPARSE_OPERATION_NON_TRANSPOSE:
if B.shape[1] != n:
raise ValueError("B, n incompatible")
if (ldb < tdOpA):
raise ValueError("size of A incompatible with B")
else:
if ldb < n:
raise ValueError("B, n incompatible")
if (B.shape[1] != tdOpA):
raise ValueError("size of A incompatible with B")
if (C.shape[1] != n):
raise ValueError("bad shape for C")
if (ldc != ldOpA):
raise ValueError("size of A incompatible with C")
if csrRowPtrA.size != m+1:
raise ValueError("length of csrRowPtrA invalid")
if dtype == np.float32:
fn = cusparseScsrmm2
elif dtype == np.float64:
fn = cusparseDcsrmm2
elif dtype == np.complex64:
fn = cusparseCcsrmm2
elif dtype == np.complex128:
fn = cusparseZcsrmm2
else:
raise ValueError("unsupported sparse matrix dtype: %s" % dtype)
transa = transA
transb = transB
try:
fn(handle, transa, transb, m, n, k, nnz, alpha, descrA, csrValA,
csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc)
except CUSPARSE_STATUS_INVALID_VALUE as e:
print("m={}, n={}, k={}, nnz={}, ldb={}, ldc={}".format(
m, n, k, nnz, ldb, ldc))
raise(e)
return C | fffbecab90dfb831a4429aed759c0218b065aa4d | 3,654,522 |
def is_translated(path):
""" Checks if all files in the translation has at least one translation.
Arguments:
path (str): path to po-file
Returns: True if all files in translation has at least one translation,
otherwise False.
"""
po = polib.pofile(path)
files = []
for e in po:
files += [f[0] for f in e.occurrences]
all_files = sorted(set(files))
translated_entities = [e for e in po if e.translated()]
files = []
for e in translated_entities:
files += [f[0] for f in e.occurrences]
translated_files = sorted(set(files))
return translated_files == all_files | eeacbbc8ff068684e56d79e1aaa65d564b2e33ec | 3,654,523 |
def pylm_component(name):
"""Decorator for registering a class to lightmetrica"""
def pylm_component_(object):
# Get base class
base = object.__bases__[0]
base.reg(object, name)
return object
return pylm_component_ | 531c7e3f224b824b438011d4be348a76154b3444 | 3,654,524 |
import torch
def dice_score(input_mask, target_mask, eps=1e-5):
"""
input mask: (B * K, HW) #probabilities [0, 1]
target_mask: (B * K, HW) #binary
"""
dims = tuple(range(1, input_mask.ndimension()))
intersections = torch.sum(input_mask * target_mask, dims) #(B, N)
cardinalities = torch.sum(input_mask + target_mask, dims)
dice = ((2. * intersections + eps) / (cardinalities + eps))
return dice | 8fbe4b7aaec4a45d7dec4705e4c3feb348250b64 | 3,654,525 |
def append_write(filename="", text=""):
"""
appends a string at the end of a text file (UTF8)
and returns the number of characters added
"""
with open(filename, "a", encoding="utf-8") as f:
f.write(text)
return len(text) | 6767f61b6624b82d732e7277507d03c3f4daf04a | 3,654,526 |
import torch
def psnr(img1, img2):
"""
compute PSNR between two images
"""
MSE = torch.mean((img1-img2)**2)
return 10*torch.log10(1**2/MSE) | f216733631d224aa27f5c5a395c143c3768f8f28 | 3,654,527 |
def is_scalar(dims):
"""
Returns True if a dims specification is effectively
a scalar (has dimension 1).
"""
return np.prod(flatten(dims)) == 1 | d2f2f1a1f2dd66ec01d9e653315d37b4ee4990e1 | 3,654,528 |
def applyMinv(obj, inputs, shape_cache):
"""Simple wrapper around a component's applyMinv where we can reshape the
arrays for each input and expand any needed array elements into full arrays.
"""
inputkeys = sorted(inputs.keys())
for key in inputkeys:
pre_process_dicts(obj, key, inputs, shape_cache)
pre_inputs = inputs.copy()
inputs = obj.applyMinv(pre_inputs, inputs)
# Result vector needs to be flattened.
for key in reversed(inputkeys):
post_process_dicts(key, inputs)
# Clean out any leftover keys we added
for key in inputs.keys():
if key not in inputkeys:
inputs.pop(key)
return inputs | 9fd805408bea659f26eec93b430e450ea9228145 | 3,654,529 |
import json
import requests
import time
def get_county_data():
"""Get the raw data from coronavirus-tracker-api.herokuapp.com."""
url = ('https://coronavirus-tracker-api.herokuapp.com/v2/locations?source=csbs')
raw_data = None
while raw_data is None:
try:
raw_data = json.loads(requests.request('GET', url, verify=False).text)
except:
print('API Get for county-data failed.')
pass
time.sleep(5) # If HTTP Request fails, wait 5s and try again.
return raw_data | 33404a65e6242b7416304f7194dc2a5c7f073d5d | 3,654,531 |
def r2lm(measured_y, estimated_y):
"""
r^2 based on the latest measured y-values (r2lm)
Calculate r^2 based on the latest measured y-values. Measured_y and estimated_y must be vectors.
Parameters
----------
measured_y: numpy.array or pandas.DataFrame
estimated_y: numpy.array or pandas.DataFrame
Returns
-------
r2lm : float
r^2 based on the latest measured y-values
"""
measured_y = np.array(measured_y).flatten()
estimated_y = np.array(estimated_y).flatten()
return float(1 - sum((measured_y - estimated_y) ** 2) / sum((measured_y[1:] - measured_y[:-1]) ** 2)) | f75c89ca3f99659a3e2e12555a3968745fad1007 | 3,654,532 |
def G_to_NX_sparse(X, Y):
"""convert sparse adj matrix to NetworkX Graph"""
Gs = []
N = len(Y)
for n in range(N):
x = X[n]
G = nx.DiGraph()
for i,j,w in x:
G.add_edge(i,j, weight=w)
Gs.append(G)
return Gs, Y | 8113ede05a0015119cceaa9c817b8bf3d46003c0 | 3,654,533 |
def pmf(k, n, a, b, loc=0):
"""JAX implementation of scipy.stats.betabinom.pmf."""
return lax.exp(logpmf(k, n, a, b, loc)) | efba7202231dde7d0dec1e56df7a52dccf7135a0 | 3,654,534 |
def discrete_bottleneck(x,
hidden_size,
z_size,
filter_size,
name,
mode=None,
startup_steps=50000,
bottleneck_kind='dvq',
num_blocks=2,
reshape_method='slice',
projection_tensors=None,
means=None,
beta=0.25,
noise_dev=1.,
decay=0.999,
discrete_mix=0.5,
random_top_k=1,
soft_em=False,
inv_temp=1.0,
epsilon=1e-5,
softmax_k=0,
kl_warmup_steps=150000,
ema=True,
ema_count=None,
ema_means=None,
summary=True,
dp_strength=1.0,
dp_decay=1.0,
dp_alpha=0.5,
slo=False,
slo_alpha=10,
slo_beta=0.5,
c_logits=None):
"""Discretization bottleneck for latent variables.
Args:
x: Input to the discretization bottleneck.
hidden_size: Dimension of the latent state.
z_size: Number of bits used to produce discrete code; discrete codes range
from 1 to 2**z_size.
filter_size: Filter size to be used for the embedding function.
name: Name for the bottleneck scope.
mode: Mode represents whether we are training or testing for bottlenecks
that differ in behavior (Default: None).
startup_steps: Number of steps after which latent predictor is trained
(Default: 50000).
bottleneck_kind: Kind of discretization bottleneck to use; one of dvq,
semhash, gumbel-softmax (Default: dvq).
num_blocks: Number of blocks to use for decomposed vector quantization.
reshape_method: Method to reshape for DVQ (Default: slice).
projection_tensors: If the reshape method is project, then these are the
tensors used to project (Default: None).
means: The embedding table for dvq (Default: None).
beta: Beta factor for the DVQ loss (Default: 0.25).
noise_dev: Stddev for noise added for semhash (Default: 0).
decay: Decay factor for the exponential moving average (Default: 0.999).
discrete_mix: Factor for mixing discrete and non-discrete input for semhash
(Default: 0.5).
random_top_k: Noisy top-k for DVQ (Default: 1).
soft_em: If True then use soft EM rather than hard EM (Default: False).
inv_temp: Inverse temperature for soft EM (Default: 1.)
epsilon: Epsilon parameter for DVQ (Default: 1e-5).
softmax_k: If > 1 then do top-k softmax (Default: 0).
kl_warmup_steps: Number of steps for kl warmup (Default: 150000).
ema: If True update embeddings using exponential moving averages (Default:
True).
ema_count: Table of counts for each embedding corresponding to how many
examples in a batch it was the closest to (Default: None).
ema_means: Exponentially averaged version of the embeddings (Default: None).
summary: If True, then write summaries (Default: True).
dp_strength: Strength of Dirichlet Process loss prior (Default: 1.0).
dp_decay: Decay the dp_strength using an exponential decay using this
term (Default: 1.0).
dp_alpha: Alpha term (pseudo-count) in Dirichlet Process (Default: 0.5).
slo: Smoothed L0
slo_alpha: alpha for smoothed L0
slo_beta: beta for smoothed L0
c_logits: a [num_blocks, block_size] tensor of logits for
computing cluster probabilities.
Returns:
Embedding to pass to the decoder, discrete latent, loss, and the embedding
function.
Raises:
ValueError: If projection_tensors is None for reshape_method project, or
ema_count or ema_means is None if we are using ema, or unknown args.
"""
block_v_size = None
if bottleneck_kind == 'dvq':
# Define the dvq parameters
assert means is not None
# Check block dimensions add up
if hidden_size % num_blocks != 0:
raise ValueError('num_blocks does not divide hidden size')
if 2**z_size % num_blocks != 0:
raise ValueError('num_blocks does not divide embedding table size')
block_v_size = 2**(z_size / num_blocks)
block_v_size = int(block_v_size)
# Set the reshape method corresponding to projections or slices
if reshape_method == 'slice':
reshape_fn = partial(
slice_hidden, hidden_size=hidden_size, num_blocks=num_blocks)
elif reshape_method == 'project':
if projection_tensors is None:
raise ValueError(
'Projection tensors is None for reshape_method project')
reshape_fn = partial(
project_hidden,
projection_tensors=projection_tensors,
hidden_size=hidden_size,
num_blocks=num_blocks)
else:
raise ValueError('Unknown reshape_method')
# Check if the ema settings make sense
if ema:
if ema_count is None:
raise ValueError('ema_count is None but ema is True')
if ema_means is None:
raise ValueError('ema_means is None but ema is True')
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
l = tf.constant(0.0)
if bottleneck_kind == 'dense':
c = tf.layers.dense(x, z_size, name='vcc')
h1 = tf.layers.dense(c, filter_size, name='vch1')
elif bottleneck_kind == 'vae':
c, l, _, _ = vae(x, z_size, 'vae')
h1 = tf.layers.dense(c, filter_size, name='vch1')
elif bottleneck_kind == 'semhash':
c = tf.layers.dense(x, z_size, name='vcc')
y_clean = common_layers.saturating_sigmoid(c)
if summary:
tf.summary.histogram('y_clean', tf.reshape(y_clean, [-1]))
if noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.truncated_normal(
common_layers.shape_list(c), mean=0.0, stddev=noise_dev)
y = common_layers.saturating_sigmoid(c + noise)
else:
y = y_clean
d = tf.to_float(tf.less(0.5, y))
y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y)
pd = common_layers.inverse_exp_decay(startup_steps * 2)
pd *= discrete_mix
pd = pd if mode == tf.estimator.ModeKeys.TRAIN else 1.0
c = tf.where(
tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd),
y_discrete, y)
h1a = tf.layers.dense(c, filter_size, name='vch1a')
h1b = tf.layers.dense(1.0 - c, filter_size, name='vch1b')
h1 = h1a + h1b
dx = tf.to_int32(tf.stop_gradient(d))
c = bit_to_int(dx, z_size)
elif bottleneck_kind == 'gumbel-softmax':
_, hot, l = gumbel_softmax(x, name, z_size, mode, softmax_k,
kl_warmup_steps, summary)
c = tf.argmax(hot, axis=-1)
h1 = tf.layers.dense(hot, hidden_size, name='dae_dense')
elif bottleneck_kind == 'dvq':
c_probs = None
if c_logits is not None:
c_probs = tf.nn.softmax(c_logits, axis=-1)
x_reshaped = reshape_fn(x)
x_means_hot, x_means, q_loss, e_loss = embedding_lookup(
x_reshaped, means, num_blocks, block_v_size, random_top_k, soft_em,
inv_temp, ema_count, c_probs)
# Get the discrete latent represenation
x_means_idx = tf.argmax(x_means_hot, axis=-1)
# Get the binary representation
x_means_bits = int_to_bit(
x_means_idx, num_bits=int(z_size / num_blocks), base=2)
shape = common_layers.shape_list(x_means_bits)
new_shape = shape[:-1]
new_shape[-1] = z_size
x_means_bits = tf.reshape(x_means_bits, shape=new_shape)
c = bit_to_int(tf.to_int32(x_means_bits), num_bits=z_size, base=2)
# Adjust shape of c
shape_x = common_layers.shape_list(x)
new_shape = shape_x[:-1]
c = tf.reshape(c, new_shape)
# Update the ema variables
if ema:
tf.logging.info('Using EMA with beta = {}'.format(beta))
updated_ema_count = moving_averages.assign_moving_average(
ema_count,
tf.reduce_sum(
tf.reshape(x_means_hot, shape=[-1, num_blocks, block_v_size]),
axis=0),
decay,
zero_debias=False)
# Adding a term that puts a Dirichlet prior over cluster probabilities
# Hopefully it'll encourage rich get richer behaviors
dp_prior_loss = 0.
slo_loss = 0.
if dp_strength > 0.0:
# Decay dp_strength over time to make it less important
dp_strength = tf.train.exponential_decay(
dp_strength,
global_step=tf.to_int32(tf.train.get_global_step()),
decay_steps=20000,
decay_rate=dp_decay)
dp_count = ema_count + dp_alpha
p = dp_count / tf.reduce_sum(dp_count, 1, keepdims=True)
dp_prior_loss = tf.log(p)
dp_prior_loss = -1.0 * tf.reduce_sum(dp_prior_loss)
dp_prior_loss /= (num_blocks * block_v_size)
# if using smoothed L0
if slo:
# expected log likelihood
ell = tf.reduce_sum(ema_count * tf.log(c_probs))
# the prior component in the loss for MAP EM.
slo_prior = slo_alpha * tf.reduce_sum(tf.exp(-1.*c_probs/slo_beta))
slo_loss = -1. * (ell + slo_prior)/(num_blocks * block_v_size)
x_means_hot_flat = tf.reshape(
x_means_hot, shape=[-1, num_blocks, block_v_size])
dw = tf.matmul(
tf.transpose(x_means_hot_flat, perm=[1, 2, 0]),
tf.transpose(x_reshaped, perm=[1, 0, 2]))
updated_ema_means = moving_averages.assign_moving_average(
ema_means, dw, decay, zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)
updated_ema_count = ((updated_ema_count + epsilon) /
(n + 2**z_size * epsilon) * n)
updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = tf.assign(means, updated_ema_means)
with tf.control_dependencies([update_means]):
l = beta * e_loss + dp_strength * dp_prior_loss + slo_loss
else:
l = q_loss + beta * e_loss
x_means = tf.reshape(x_means, shape_x)
x_reshaped = tf.reshape(x_reshaped, shape_x)
h1 = x_reshaped + tf.stop_gradient(x_means - x_reshaped)
else:
raise ValueError('Unknown discretization method.')
h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name='vch2')
res = tf.layers.dense(tf.nn.relu(h2), hidden_size, name='vcfin')
embed_fn = partial(
embed,
hidden_size=hidden_size,
z_size=z_size,
filter_size=filter_size,
name=name,
bottleneck_kind=bottleneck_kind,
num_blocks=num_blocks,
block_v_size=block_v_size,
means=means)
return res, c, l, embed_fn | ec1576b2b6a19a03995ec6dfb9a67592b925a28c | 3,654,535 |
def binarize_categorical(x, ids):
""" replace categorical feature with multiple binary ones """
x_ = np.zeros((x.shape[0], 1))
for idx in ids:
x_ = np.hstack((x_, binarize_categorical_feature(x[:, idx:idx+1])))
x = np.delete(x, ids, axis=1)
x = np.hstack((x, x_[:, 1:]))
return x | 625b551b437297c6a0c48f5ebfe2796c3be84c89 | 3,654,536 |
def import_json_dataset(fileset):
"""Returns a list of imported raw JSON data for every file in the fileset.
"""
d = []
for f in fileset:
d.append(import_json_data(f))
return d | 043720f9400cf2734598f6fe476077e004b8ef69 | 3,654,537 |
import math
def angle_difference(angle1, angle2):
"""
Calculates the difference between the given angles in clockwise direction as radians.
:param angle1: float
:param angle2: float
:return: float; between 0 and 2*Pi
"""
if (angle1 > 0 and angle2 >= 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif (angle1 >= 0 and angle2 > 0) and angle1 < angle2:
return 2 * math.pi + angle1 - angle2
elif (angle1 < 0 and angle2 <= 0) and angle1 < angle2:
return 2 * math.pi + angle1 + abs(angle2)
elif (angle1 <= 0 and angle2 < 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif angle1 <= 0 < angle2:
return 2 * math.pi + angle1 - angle2
elif angle1 >= 0 >= angle2:
return angle1 + abs(angle2)
else:
return 0 | 377d1915e58a96b7f1526dceb31febf45c90567b | 3,654,538 |
def merge_nd(nd_cdp, nd_lldp):
""" Merge CDP and LLDP data into one structure """
neis = dict()
nd = list()
for n in nd_lldp:
neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])] = n
for n in nd_cdp:
# Always prefer CDP, but grab description from LLDP if available
if (n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int']) in n:
if 'description' in neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])]:
n['description'] = neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])]['description']
neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])] = n
for n in neis:
nd.append(neis[n])
return nd | 90d55ffdabb6c28198ee4c59bc36fdcb6fa54e62 | 3,654,539 |
def combine_divisions(division):
"""Return the new pattern after the rules have been applied to every division"""
size = int(sqrt(len(division)))
matrix = []
for r in xrange(size):
matrix.append([])
for c in xrange(r * size, (r + 1) * size):
matrix[len(matrix) - 1].append(division[c])
return np.array((np.bmat(matrix))) | a112449421603a227e4ee470330aa1a1ece47762 | 3,654,540 |
import string
def modified_greedy(sentences,
tokenized,
model,
stopwords,
original_indices,
sent_representations,
objective_function,
min_sentence_length):
"""Implementation of the MMR summarizer as described in Lin & Bilmes (2010)."""
# Initialize stuff
# Ground set indices: all indices, stays constant throughout the function
all_indices = tuple(range(len(original_indices)))
# Candidate indices: all candidates (gets smaller every iteration)
candidate_indices = list(range(len(original_indices)))
# Summary indices: indices of represented sentences added to summary
summary_indices = []
# Scaling factor (r) is taken from original paper: r = 0.3
scaling_factor = .3
# Tf-idf clustering, as described in Lin & Bilmes (2011)
n_clusters = len(original_indices) // 5
k_means = KMeans(n_clusters=n_clusters, random_state=42)
clustering = k_means.fit_predict(sent_representations)
clustered_indices = [np.array(all_indices)[np.where(clustering == i)].tolist()
for i in range(n_clusters)]
# Make document vector (since w2v sentences are now sums, it is this easy):
document_vector = np.sum(sent_representations, axis=0)
# Pick the right sentences from sentence list (to match representation matrix)
sentences = [sentences[i] for i in original_indices]
tokenized = [tokenized[i] for i in original_indices]
# Construct bag of words from representable sentences
preprocessed = (sentence.lower().split(' ')
for i, sentence in enumerate(tokenized))
# POS-tag filtering, and punctuation removal
preprocessed = [[word.translate(str.maketrans('', '', string.punctuation))
for word in sentence] for sentence in preprocessed]
# Remove OOV words
sentence_words = [[word for word in sentence if word in model.model.vocab]
for sentence in preprocessed]
# Deduplicate & flatten
bag_of_words = list(set([word for sentence in sentence_words for word in sentence]))
# Look up in-vocabulary word vectors
vectorized = [(word, model.model[word]) for word in bag_of_words]
# Construct word similarity matrix for all words in article object
names, vectors = zip(*vectorized)
# word_distance_matrix = pairwise_distances(vectors, metric='euclidean')
word_distance_matrix = pairwise_distances(vectors, metric='cosine')
# Pandas workaround
name_index_tuples = list(zip(list(range(len(names))), names))
# Fill diagonal with nan, to make sure it's never the minimum
np.fill_diagonal(word_distance_matrix, np.nan)
# Compute sentence similarity matrix based on sentence representations
distance_matrix = pairwise_distances(sent_representations, metric='cosine')
similarity_matrix = np.subtract(1, distance_matrix)
np.fill_diagonal(similarity_matrix, np.nan)
# Compute sentence lengths
sentence_lengths = [len(s.split()) for s in sentences]
length_scaler = np.power(sentence_lengths, scaling_factor).tolist()
# Remove sentences that do not have similarity with other sentences from candidate set
similarity_sum_per_sentence = np.nansum(similarity_matrix, axis=0)
irrelevant_indices = np.where(similarity_sum_per_sentence == 0)[0].tolist()
candidate_indices = [index for index in candidate_indices
if index not in irrelevant_indices]
# Already save the best singleton summary, for comparison to iterative result later
singleton_scores = [objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
[i])
if sentence_lengths[i] <= 100
else np.nan for i in candidate_indices]
best_singleton_score = np.nanmax(singleton_scores)
# Note that the singleton index is directly translated to a sentence representation index
best_singleton_index = candidate_indices[np.nanargmax(singleton_scores)]
# Greedily add sentences to summary
summary_length = 0
for iteration in range(len(sentence_lengths)):
print("Iteration {}".format(iteration))
# Edge case: value of objective function when summary is empty.
if iteration == 0:
current_score = 0.
else:
current_score = objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
summary_indices)
# Compute all relevant new scores
new_scores = [objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
summary_indices+[i])
if sentence_lengths[i] > min_sentence_length
else np.nan
for i in candidate_indices]
# If there are no candidates left, break the loop
if all(np.isnan(score) for score in new_scores):
break
# Remove non-candidate elements from length scaler to fit arrays
current_length_scaler = [v for i, v in enumerate(length_scaler) if i in candidate_indices]
added_values = np.divide(np.subtract(new_scores, current_score), current_length_scaler)
best_index = np.nanargmax(added_values)
# Pass best index if the sentence does not increase MMR-score (+ empty summary edge case)
if not new_scores[best_index] - current_score >= 0 and summary_indices:
candidate_indices.pop(best_index)
else:
summary_indices.append(candidate_indices[best_index])
summary_length += sentence_lengths[candidate_indices[best_index]]
candidate_indices.pop(best_index)
if summary_length >= 100:
break
# Last step: compare singleton score with summary score, and pick best as summary
final_summary_score = objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
summary_indices)
if best_singleton_score >= final_summary_score:
ranked_sentences = [sentences[i] for i in [best_singleton_index]]
ranking = list(zip([best_singleton_index], ranked_sentences))
else:
ranked_sentences = [sentences[i] for i in summary_indices]
ranking = list(zip(summary_indices, ranked_sentences))
# Replace filtered indices with original ones
ranking = [(original_indices[i], s) for i, s in ranking]
return ranking | b542c025fe870e1e7d41d33349de10a395a17eb3 | 3,654,542 |
def noiseless(rho, unitary):
"""Returns the noiseless predictions."""
rhotilde = unitary @ rho @ unitary.conj().T
elt = rhotilde[0, 0]
if elt >= 0.49999999:
return 0, elt
return 1, elt | bfa265046361b159e7d264aa8312b75cd7a0df3f | 3,654,543 |
def __get_service_info_from_thrift(root_path, idl_service, need_test_methods):
"""从指定IDL_Service和request_config配置表中,获取测试方法和Request的映射表"""
customized_request_config = yaml.load(
open(os.path.join(root_path, 'test_red', 'request_config.yaml')))
method_request = collections.OrderedDict()
idl_method_request = dict([
(name[:-5], obj) for name, obj in inspect.getmembers(idl_service)
if inspect.isclass(obj) and '_args' in name])
for method_name in need_test_methods:
if customized_request_config and method_name in customized_request_config:
method_request[method_name] = customized_request_config[method_name]
elif method_name in idl_method_request:
try:
request_obj_name = idl_method_request[method_name].thrift_spec[2][3][0].__name__
method_request[method_name] = {__to_underscore(request_obj_name): request_obj_name}
except Exception:
print 'invalid method name: ' + method_name
return method_request | 0b736bb6b5411904bc28f887e6596c1242c324c9 | 3,654,544 |
def energy_calc(p, t):
"""
Calculates energy from power and time using the formula:
energy = power * time
Parameters
----------
p: Int or float
The power value of the equation.
t: Int or float
The time value of the equation (seconds).
Returns
-------
Int
p * t
Raises
------
ValueError
If p or t is not an integer or float.
Examples
--------
>>> school_algorithms.energy_calc(5, 2)
10
"""
_if_not_int_or_float_raise(p, t)
return p * t | 7df3180fdb56989e62a69305763455edbfa44ebc | 3,654,545 |
import logging
def api_images_list_json(version):
"""
Return Docker Image listing https://docs.docker.com/engine/api/v1.41/#tag/Image
:param version: Docker API version
:return: string of fake images associated with honeypot.
"""
logging.info("images-list - %s, %s, %s, %s, %s" % (
version, request.remote_addr, request.user_agent, request.data, request.url))
req_objs = util.save_request_obj(request)
customLog.info(req_objs)
return API_RESP_IMAGES_JSON_LIST | 083911840c02ddc79af5ed457c42a29a19f1c57f | 3,654,547 |
def _handle_eval_return(self, result, col, as_pyranges, subset):
"""Handle return from eval.
If col is set, add/update cols. If subset is True, use return series to subset PyRanges.
Otherwise return PyRanges or dict of data."""
if as_pyranges:
if not result:
return pr.PyRanges()
first_hit = list(result.values())[0]
if isinstance(first_hit, pd.Series):
if first_hit.dtype == bool and subset:
return self[result]
elif col:
self.__setattr__(col, result)
return self
else:
raise Exception(
"Cannot return PyRanges when function returns a Series! Use as_pyranges=False."
)
return pr.PyRanges(result)
else:
return result | 84698bcb3b1f1e961ac7f3c4e347d65ce0790066 | 3,654,548 |
def compute_sigma0_sparse(V, dX, W_sensors, W_points, W_observations, column_dict):
"""
Computes the resulting standard deviation of the residuals for the current state of the bundle network.
Parameters
----------
V : ndarray
An array of residuals of the difference between registered measure
and back projected ground points in image space.
dX : ndarray
The array of parameter updates ordered according to column_dict
W_sensors : scipy.sparse.matrix
The sensor weight matrix
W_points : dict
Dictionary that maps point IDs to their weight matrices.
W_observations : ndarray
The observation weight matrix (i.e.: measure weights)
column_dict : dict
Dictionary that maps serial numbers and point IDs to index ranges in dX
Returns
-------
: float64
Standard deviation of the residuals
"""
num_image_parameters = W_sensors.shape[0]
num_observations = W_observations.shape[0]
VTPV = V.dot(W_observations).dot(V)
VTPV += dX[:num_image_parameters].dot(W_sensors.dot(dX[:num_image_parameters]))
for point_id, W_p in W_points.items():
point_update = dX[column_dict[point_id][0]:column_dict[point_id][1]]
VTPV += point_update.dot(W_p.dot(point_update))
dof = num_observations - num_image_parameters - 3 * len(W_points)
return np.sqrt(VTPV/dof) | 05606efe21d61f67539eae627caea976a532f85f | 3,654,549 |
def fill(bitdef, value):
"""
Fill undefined bits with a value.
For example ``1..0100.1`` becomes ``111010011`` when filled with 1s.
Args:
bitdef (str): The bitdef to fill.
value (str): The value to fill with, "0" or "1".
Returns:
str: The filled bitdef.
"""
output = ""
for bit in bitdef:
if bit == ".":
output += value
else:
output += bit
return output | eef3ac59a2a7c4d1a25851a2ca14b3ffed6d1463 | 3,654,551 |
import requests
import json
def get_cman_info(state):
"""
Will take a list of congressmen and return the relevant attributes
:param congress_list: list of divs that contain congress data
:param state: state you are scraping
:return: list of relevant scraped attributes
"""
cman_attrs = []
abbrev = states[state]
r = requests.get(url_base + abbrev)
d = json.loads(r.text)
for cman in d['tweeters']:
_id = cman.get('_id', '')
dateOfBirth = cman.get('dateOfBirth', '')
email = cman.get('email', '')
facebookUserName = cman.get('facebookUserName', '')
fullName = cman.get('fullName', '')
gender = cman.get('gender', '')
address = cman['office'].get('address', '')
chamber = cman['office'].get('chamber', '')
country = cman['office'].get('country', '')
district = cman['office'].get('district', '')
leadershipRole = cman['office'].get('leadershipRole', '')
party = cman['office'].get('party', '')
state = cman['office'].get('state', '')
termEnd = cman['office'].get('termEnd', '')
termStart = cman['office'].get('termStart', '')
title = cman['office'].get('title', '')
phone = cman.get('phone', '')
profileImageSmall = cman.get('profileImageSmall', '')
slug = cman.get('slug', '')
followersCount = cman['twitterProfile'].get('followersCount', '')
friendsCount = cman['twitterProfile'].get('friendsCount', '')
idStr = cman['twitterProfile'].get('idStr', '')
name = cman['twitterProfile'].get('name', '')
profileImageUrl = cman['twitterProfile'].get('profileImageUrl', '')
screenName = cman['twitterProfile'].get('screenName', '')
statusesCount = cman['twitterProfile'].get('statusesCount', '')
url = cman['twitterProfile'].get('url', '')
verified = cman['twitterProfile'].get('verified', '')
twitterUserName = cman.get('twitterUserName', '')
website = cman.get('website', '')
cman_attrs.append(
[_id, dateOfBirth, email, facebookUserName, fullName, gender, address, chamber, country, district,
leadershipRole, party, state, termEnd, termStart, title, phone, profileImageSmall, slug, followersCount,
friendsCount, idStr, name, profileImageUrl, screenName, statusesCount, url, verified, twitterUserName,
website])
return cman_attrs | afe180c4bbd930cfbfe42e28a769d07f2c4378cd | 3,654,552 |
def concatenate_data(data, field='normalized_data'):
"""
Concatenate trial data in a list of dictionaries
:param data: nested dict, contains all trial infos
:param field: str, dict key in info dict in general data structure
:return:
"""
time_series = np.concatenate([info[field] for info in data],
axis=1)
assert time_series.shape[0] == 306
return time_series | 7f3dfb7aed2ffedf2124a9f57df0abf8491d1af6 | 3,654,553 |
def _find_weight_ops(op, graph, weights):
""" Find the vars come from operators with weight.
"""
pre_ops = graph.pre_ops(op)
for pre_op in pre_ops:
### if depthwise conv is one of elementwise's input,
### add it into this same search space
if _is_depthwise(pre_op):
for inp in pre_op.all_inputs():
if inp._var.persistable:
weights.append(inp._var.name)
if pre_op.type() in WEIGHT_OP and not _is_depthwise(pre_op):
for inp in pre_op.all_inputs():
if inp._var.persistable:
weights.append(inp._var.name)
return weights
return _find_weight_ops(pre_op, graph, weights)
return weights | 04e4a21079a3857815e39be3fe00e15aeac2f3b3 | 3,654,554 |
def get_GUI_presets_dict():
"""Return a dictionary of all of the available potential functions."""
preset_dict = {'cosine_potential': np.array([3.14, -6.28, 12.57, 0.01, 0,
0, 0, 0]).astype(str),
'two_gaussian_potential': np.array([2.67, -4, 4, 0.01,
0, 0, 0,
0]).astype(str),
'pv_2D_potential': np.array([1.5, 0, 3.0, 0.01, 0.6, -2.0,
2.0, 0.01]).astype(str),
'muller_brown_potential': np.array([0, 0, 0, 0, 0, 0, 0,
0]).astype(str),
'C_Cl_potential': np.array([0, 0, 0, 0, 0, 0, 0,
0]).astype(str)
}
return preset_dict | 0034ecdbde2f27e1b8db25a82231fca9bc79485c | 3,654,555 |
def _escapeEnds(original):
"""Comment, function end.
Escape comment end, because non-greedy becomes greedy in context. Example:
blockCommentNonGreedy = '(\s*/\*[\s\S]+?\*/\s*){0,1}?'
"""
original = _escapeWildCard(original)
commentEscaped = original \
.replace(commentEndEscape, commentEndEscapeEscape) \
.replace(commentEnd, commentEndEscape)
return _escapeFunctionEnd(commentEscaped) | 5a0df98f42d2df2b424cd6bfa7c533e0016557fe | 3,654,556 |
def handle_bad_request(error: BadRequest) -> Response:
"""Render the base 400 error page."""
rendered = render_template("base/400.html", error=error,
pagetitle="400 Bad Request")
response: Response = make_response(rendered)
response.status_code = status.BAD_REQUEST
return response | 70c6c835ef31839ff7b637443c414abbb549bcb0 | 3,654,557 |
import torch
def top_k_top_p_filtering(
logits: torch.FloatTensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> torch.FloatTensor:
"""
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
top_k (`int`, *optional*, defaults to 0):
If > 0, only keep the top k tokens with highest probability (top-k filtering)
top_p (`float`, *optional*, defaults to 1.0):
If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus
filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimumber of tokens we keep per batch example in the output.
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
None, logits
)
if 0 <= top_p <= 1.0:
logits = TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=min_tokens_to_keep)(None, logits)
return logits | 0c2f8392dcc6ada2afb1dc33575465e194a52199 | 3,654,558 |
def parseFimo(fimoFile, strand):
""" parse the fimo.txt file
Args:
the fimo.txt file
strand = single or double
Returns:
fimoDict: a dict between motif ID and a list of sequences it occurs in
"""
#dict to store for each motif list of seqs that it occurs in
fimoDict = {}
#read the fimo.txt file
with open(fimoFile, 'rb') as handler:
for line in handler:
line = line.strip()
#if re.search(r'#', line):
#continue
if re.search(r'stop', line):
continue
lineSplit = line.split()
motifName = lineSplit[0]
seqName = lineSplit[1]
start = int(lineSplit[2])
stop = int(lineSplit[3])
pval = float(lineSplit[6])
#check of this is on the negative strand hit
if strand == 'single' and inStrand == '-':
continue
if motifName not in fimoDict:
fimoDict[motifName] = []
if seqName not in fimoDict[motifName]:
fimoDict[motifName].append(seqName)
#check the motifs
print '\n\nfimo number of motifs:', len(fimoDict)
#for motifName, seqList in fimoDict.items():
#print motifName
#print '\t', seqList
return fimoDict | ea6e0765c474e367653571e9a88e6449fc947ff5 | 3,654,559 |
def pad_batch_dimension_for_multiple_chains(
observed_time_series, model, chain_batch_shape):
""""Expand the observed time series with extra batch dimension(s)."""
# Running with multiple chains introduces an extra batch dimension. In
# general we also need to pad the observed time series with a matching batch
# dimension.
#
# For example, suppose our model has batch shape [3, 4] and
# the observed time series has shape `concat([[5], [3, 4], [100])`,
# corresponding to `sample_shape`, `batch_shape`, and `num_timesteps`
# respectively. The model will produce distributions with batch shape
# `concat([chain_batch_shape, [3, 4]])`, so we pad `observed_time_series` to
# have matching shape `[5, 1, 3, 4, 100]`, where the added `1` dimension
# between the sample and batch shapes will broadcast to `chain_batch_shape`.
[ # Extract mask and guarantee `event_ndims=2`.
observed_time_series,
is_missing
] = canonicalize_observed_time_series_with_mask(observed_time_series)
event_ndims = 2 # event_shape = [num_timesteps, observation_size=1]
model_batch_ndims = (
model.batch_shape.ndims if model.batch_shape.ndims is not None else
tf.shape(model.batch_shape_tensor())[0])
# Compute ndims from chain_batch_shape.
chain_batch_shape = tf.convert_to_tensor(
value=chain_batch_shape, name='chain_batch_shape', dtype=tf.int32)
if not chain_batch_shape.shape.is_fully_defined():
raise ValueError('Batch shape must have static rank. (given: {})'.format(
chain_batch_shape))
if chain_batch_shape.shape.ndims == 0: # expand int `k` to `[k]`.
chain_batch_shape = chain_batch_shape[tf.newaxis]
chain_batch_ndims = tf.compat.dimension_value(chain_batch_shape.shape[0])
def do_padding(observed_time_series_tensor):
current_sample_shape = tf.shape(
observed_time_series_tensor)[:-(model_batch_ndims + event_ndims)]
current_batch_and_event_shape = tf.shape(
observed_time_series_tensor)[-(model_batch_ndims + event_ndims):]
return tf.reshape(
tensor=observed_time_series_tensor,
shape=tf.concat([
current_sample_shape,
tf.ones([chain_batch_ndims], dtype=tf.int32),
current_batch_and_event_shape], axis=0))
# Padding is only needed if the observed time series has sample shape.
observed_time_series = ps.cond(ps.rank(observed_time_series) >
model_batch_ndims + event_ndims,
lambda: do_padding(observed_time_series),
lambda: observed_time_series)
if is_missing is not None:
is_missing = ps.cond(ps.rank(is_missing) >
model_batch_ndims + event_ndims,
lambda: do_padding(is_missing),
lambda: is_missing)
return missing_values_util.MaskedTimeSeries(observed_time_series,
is_missing=is_missing) | ec072f3fa5318ee3f4c82dcc0d3697a5160b257f | 3,654,560 |
from typing import Union
import re
def get_bytes(size: Union[str, int]) -> int:
"""Converts string representation of bytes to a number of bytes.
If an integer is passed, it is returned as is (no conversion).
Args:
size (Union[str, int]): A string or integer representation of bytes to be converted.
(eg. "0.3 Gib", "3mb", "1024", 65536)
Returns:
int: A number of bytes represented by the input string or integer.
Exceptions:
ValueError: If the input string cannot be converted to an integer.
TypeError: If the input string is not a string or integer.
"""
if isinstance(size, int):
if size < 0:
raise ValueError("Negative size not allowed.")
return size
if not isinstance(size, str):
raise TypeError("Size must be a string or integer.")
m = re.match(r"^\s*(?P<size>(([1-9]\d+)|\d)(\.\d+)?)\s*(?P<unit>[a-z]{1,3})?\s*$", size, re.IGNORECASE)
if not m:
raise ValueError(f"Invalid size string ('{size}').")
parsed_size = float(m.group("size"))
unit_match = m.group("unit")
if unit_match:
parsed_unit = unit_match.lower()
else:
parsed_unit = "b" # default to bytes
if parsed_unit not in BYTES_UNIT:
raise ValueError(f"Invalid unit ('{parsed_unit}').")
return int(parsed_size * BYTES_UNIT[parsed_unit]) | 76cd67a0d581b79105a79bc84d66126d3201b07a | 3,654,561 |
def port_translation_func(req: AdvancedDataTypeRequest) -> AdvancedDataTypeResponse:
"""
Convert a passed in AdvancedDataTypeRequest to a AdvancedDataTypeResponse
"""
resp: AdvancedDataTypeResponse = {
"values": [],
"error_message": "",
"display_value": "",
"valid_filter_operators": [
FilterStringOperators.EQUALS,
FilterStringOperators.GREATER_THAN_OR_EQUAL,
FilterStringOperators.GREATER_THAN,
FilterStringOperators.IN,
FilterStringOperators.LESS_THAN,
FilterStringOperators.LESS_THAN_OR_EQUAL,
],
}
if req["values"] == [""]:
resp["values"].append([""])
return resp
for val in req["values"]:
string_value = str(val)
try:
if string_value.isnumeric():
if not 1 <= int(string_value) <= 65535:
raise ValueError
resp["values"].append(
[int(string_value)]
if string_value.isnumeric()
else port_conversion_dict[string_value]
)
except (KeyError, ValueError):
resp["error_message"] = str(
f"'{string_value}' does not appear to be a port name or number"
)
break
else:
resp["display_value"] = ", ".join(
map(
lambda x: f"{x['start']} - {x['end']}"
if isinstance(x, dict)
else str(x),
resp["values"],
)
)
return resp | b8c41d8c3d3c2fa0a9e67b8ef9ff93422921e7e3 | 3,654,562 |
import random
def get_two_diff_order_index(start=0, stop=1, order=True, diff=True):
"""
Returns two integers from a range, they can be:
put in order (default) or unordered
always different(default) or can be repeated
start - integer (default = 0)
stop - integer (default= 1)
order - boolean ( default= True)
"""
my_range = stop - start
first = int(my_range * random())+start
second = int(my_range * random())+start
#first = randint(start, stop)
#second = randint(start, stop)
if diff:
while first == second:
second = int( my_range * random()) + start
#second = randint(start, stop)
if order:
if first > second:
second, first = first, second
return first, second | 7bd0e17efb969ea59e7a30d8fdaae55d901a718e | 3,654,564 |
import math
def GriewankRosenbrock(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB GriewankRosenbrock function."""
dim = len(arr)
r_x = np.matmul(_R(dim, seed, b"R"), arr)
# Slightly off BBOB documentation in order to center optima at origin.
# Should be: max(1.0, (dim**0.5) / 8.0) * r_x + 0.5 * np.ones((dim,)).
z_arr = max(1.0, (dim**0.5) / 8.0) * r_x + np.ones((dim,))
s_arr = np.zeros(dim)
for i in range(dim - 1):
s_arr[i] = 100.0 * (z_arr[i]**2 - z_arr[i + 1])**2 + (z_arr[i] - 1)**2
total = 0.0
for i in range(dim - 1):
total += (s_arr[i] / 4000.0 - math.cos(s_arr[i]))
return (10.0 * total) / (dim - 1) + 10 | 9a9ca4f043e60fb971c5212de33379c29aaade58 | 3,654,566 |
def listCurrentAuctionsByKeyword(username, keyword):
"""Listar os leilões que estão a decorrer"""
try:
valid = utils.validateTypes([keyword], [str])
if not valid:
return jsonify({'erro': 404})
auctions = db.listAuctions(keyword)
if auctions == "noResults":
db.connection.commit()
return jsonify({'Ups': 'Sem resultados para esta pesquisa!'})
db.connection.commit()
return jsonify(auctions)
except Exception as e:
db.connection.rollback()
print(e)
return jsonify({'erro': 401}) | c02c58a294b3d65821f36872dcf23e4f7abff49b | 3,654,567 |
from typing import Dict
def hash_dict(data: Dict) -> int:
"""
Hashes a Dictionary recursively.
List values are converted to Tuples.
WARNING: Hashing nested dictionaries is expensive.
"""
cleaned_dict: Dict = {}
def _clean_dict(data: Dict) -> Dict:
d: Dict = {}
for k, v in data.items():
if isinstance(v, list) or isinstance(v, set):
d[k] = tuple(v)
elif isinstance(v, dict):
d[k] = hash_dict(v)
else:
d[k] = v
return d
cleaned_dict = _clean_dict(data)
return hash(tuple(sorted(cleaned_dict.items()))) | 42b579151c90a42fadf2b53751978eec421ea03c | 3,654,569 |
def instrument_packages_ip_template(instrument, ip_version, template_name=None):
"""
Retrieves the specified instrument package template metadata
:param instrument: instrument used to make observation
:type instrument: str
:param ip_version: ip version description here
:type ip_version: float
:param template_name: template name description goes here
:type template_name: str
:rtype: InstrumentPackage
"""
# if connexion.request.is_json:
# instrument = InstrumentEnum.from_dict(connexion.request.get_json())
if template_name:
return {template_name: get_template_metadata(template_name, ip_version)}
query = {"instrument": instrument.upper(), "version": ip_version}
fields = {"template_names": 1, "_id": 0}
templates = utils.get_fields_by_query(query, fields, 'ipCollect')
metadata = {}
for template_name in templates["template_names"]:
metadata[template_name] = get_template_metadata(template_name, ip_version)
return metadata | 46d3cd57e05a64c03411c31d2b18ca47f670036d | 3,654,571 |
from typing import Literal
def add_feature_metadata(id, description, type):
"""Generate RDF metadata for a feature
:param id: if used to identify the feature
:param description: feature description
:param type: feature type
:return: rdflib graph after loading the feature
"""
g = Graph()
feature_uri = URIRef(OPENPREDICT_NAMESPACE + 'feature/' + id)
g.add((feature_uri, RDF.type, MLS['Feature']))
g.add((feature_uri, DC.identifier, Literal(id)))
g.add((feature_uri, DC.description, Literal(description)))
g.add((feature_uri, OPENPREDICT['embedding_type'], Literal(type)))
insert_graph_in_sparql_endpoint(g)
return g | 0d4987807b3ed97baa50f8b14c588ef162b5c8ac | 3,654,572 |
import copy
def sink(input_flow_direction_raster):
"""
Creates a raster layer identifying all sinks or areas of internal drainage.
The value type for the Sink function output raster layer is floating point.
For more information, see
https://pro.arcgis.com/en/pro-app/help/data/imagery/sink-function.htm
Parameters
----------
:param input_flow_direction_raster: The input raster that shows the direction
of flow out of each cell.
The flow direction raster can be created by
running the Flow Direction function.
:return: output raster with function applied
"""
layer, input_flow_direction_raster, raster_ra = _raster_input(input_flow_direction_raster)
template_dict = {
"rasterFunction" : "GPAdapter",
"rasterFunctionArguments" : {
"toolName" : "Sink_sa",
"PrimaryInputParameterName" : "in_flow_direction_raster",
"OutputRasterParameterName" : "out_raster",
"in_flow_direction_raster" : input_flow_direction_raster
}
}
function_chain_ra = copy.deepcopy(template_dict)
function_chain_ra["rasterFunctionArguments"]["in_flow_direction_raster"] = raster_ra
return _gbl_clone_layer(layer, template_dict, function_chain_ra) | 6d1b22dacd48a0939b7822d62a4867b2b7574c42 | 3,654,573 |
def bad_multi_examples_per_input_estimator_out_of_range_input_refs(
export_path, eval_export_path):
"""Like the above (good) estimator, but the input_refs is out of range."""
estimator = tf.estimator.Estimator(model_fn=_model_fn)
estimator.train(input_fn=_train_input_fn, steps=1)
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=_serving_input_receiver_fn,
eval_input_receiver_fn=(
_bad_eval_input_receiver_fn_out_of_range_input_refs),
export_path=export_path,
eval_export_path=eval_export_path) | 539ec039451c53db72cb676881f48fbe45874dfa | 3,654,574 |
def vector_to_diagonal(v):
"""Converts a vector to a diagonal matrix with vector elements
as the diagonal elements of the matrix"""
diag_matrix = [[0 for i in range(len(v))] for j in range(len(v))]
for i in range(len(v)):
diag_matrix[i][i] = v[i]
return diag_matrix | 6cbaf54a083633a47af92acc7f69421ed68a1c0b | 3,654,575 |
from typing import Union
from pathlib import Path
from typing import List
def _get_filenames(path: Union[str, Path], media_type: MediaType) -> List[str]:
"""
Get filenames from a directory or a path to a file.
:param path: Path to the file or to the location that contains files.
:param media_type: Type of the media (image or video)
:example:
>>> path = "../images"
>>> _get_filenames(path, media_type=MediaType.image)
['images/4.jpeg', 'images/1.jpeg', 'images/5.jpeg', 'images/3.jpeg', 'images/2.jpeg']
"""
extensions = _get_extensions(media_type)
filenames: List[str] = []
if media_type == MediaType.camera:
raise ValueError(
"Cannot get filenames for camera. Only image and video files are supported."
)
if isinstance(path, str):
path = Path(path)
if path.is_file():
if _is_file_with_supported_extensions(path, extensions):
filenames = [path.as_posix()]
else:
raise ValueError("Extension not supported for media type")
if path.is_dir():
for filename in path.rglob("*"):
if _is_file_with_supported_extensions(filename, extensions):
filenames.append(filename.as_posix())
filenames = natsorted(filenames) # type: ignore[assignment]
if len(filenames) == 0:
raise FileNotFoundError(f"No {media_type.name} file found in {path}!")
return filenames | 953bcfce17c6db45772a8eac8890fa161c128322 | 3,654,576 |
from venusian import attach
def method(method_class):
"""Decorator to use to mark an API method.
When invoking L{Registry.scan} the classes marked with this decorator
will be added to the registry.
@param method_class: The L{Method} class to register.
"""
def callback(scanner, name, method_class):
if method_class.actions is not None:
actions = method_class.actions
else:
actions = [name]
if method_class.versions is not None:
versions = method_class.versions
else:
versions = [None]
for action in actions:
for version in versions:
scanner.registry.add(method_class,
action=action,
version=version)
attach(method_class, callback, category="method")
return method_class | 4e40d265a4a5767686f0e37b4d1adf681ce36722 | 3,654,577 |
def generic_validator(check, error_message):
"""
Validator factory
>>> v = generic_validator(is_int, "invalid int")
>>> v(6)
6
>>> v("g")
Traceback (most recent call last):
...
ValidationError: [u'invalid int']
"""
# Validator closure
def inner_validator(value, *args, **kwargs):
if not check(value):
raise ValidationError(error_message)
return value
return inner_validator | 21134ecee1d8c23b10e94181c0c1aa602ce4b76e | 3,654,578 |
def get_molec_shape(mol, conf, confId, vdwScale=1.0,
boxMargin=2.0, spacing=0.2):
"""
Get the shape of a conformer of a molecule as a grid
representation.
"""
box = Chem.ComputeConfBox(conf)
sideLen = (box[1].x-box[0].x + 2*boxMargin,
box[1].y-box[0].y + 2*boxMargin,
box[1].z-box[0].z + 2*boxMargin)
shape = rdGeometry.UniformGrid3D(2*sideLen[0],
2*sideLen[1],
2*sideLen[2],
spacing=spacing)
Chem.EncodeShape(
mol,
shape,
confId=confId,
ignoreHs=False,
vdwScale=vdwScale
)
return box, sideLen, shape | 6a7b404224a116a52d70f7ab14d4301215c1700f | 3,654,579 |
import math
def autoencoder(dimensions=[784, 512, 256, 64]):
"""Build a deep denoising autoencoder w/ tied weights.
Parameters
----------
dimensions : list, optional
The number of neurons for each layer of the autoencoder.
Returns
-------
x : Tensor
Input placeholder to the network
z : Tensor
Inner-most latent representation
y : Tensor
Output reconstruction of the input
cost : Tensor
Overall cost to use for training
"""
# input to the network
x = tf.placeholder(tf.float32, [None, dimensions[0]], name='x')
# Probability that we will corrupt input.
# This is the essence of the denoising autoencoder, and is pretty
# basic. We'll feed forward a noisy input, allowing our network
# to generalize better, possibly, to occlusions of what we're
# really interested in. But to measure accuracy, we'll still
# enforce a training signal which measures the original image's
# reconstruction cost.
#
# We'll change this to 1 during training
# but when we're ready for testing/production ready environments,
# we'll put it back to 0.
corrupt_prob = tf.placeholder(tf.float32, [1])
current_input = corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)
# Build the encoder
encoder = []
for layer_i, n_output in enumerate(dimensions[1:]):
n_input = int(current_input.get_shape()[1])
W = tf.Variable(
tf.random_uniform([n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# latent representation
z = current_input
encoder.reverse()
# Build the decoder using the same weights
for layer_i, n_output in enumerate(dimensions[:-1][::-1]):
W = tf.transpose(encoder[layer_i])
b = tf.Variable(tf.zeros([n_output]))
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference
cost = tf.sqrt(tf.reduce_mean(tf.square(y - x)))
return {'x': x, 'z': z, 'y': y,
'corrupt_prob': corrupt_prob,
'cost': cost} | d9cc8b6f2c8e7df0bc4fb580e1de20dc57f93c7a | 3,654,580 |
def _asymptotic_expansion_of_normalized_black_call(h, t):
"""
Asymptotic expansion of
b = Φ(h+t)·exp(x/2) - Φ(h-t)·exp(-x/2)
with
h = x/s and t = s/2
which makes
b = Φ(h+t)·exp(h·t) - Φ(h-t)·exp(-h·t)
exp(-(h²+t²)/2)
= --------------- · [ Y(h+t) - Y(h-t) ]
√(2π)
with
Y(z) := Φ(z)/φ(z)
for large negative (t-|h|) by the aid of Abramowitz & Stegun (26.2.12) where Φ(z) = φ(z)/|z|·[1-1/z^2+...].
We define
r
A(h,t) := --- · [ Y(h+t) - Y(h-t) ]
t
with r := (h+t)·(h-t) and give an expansion for A(h,t) in q:=(h/r)² expressed in terms of e:=(t/h)² .
:param h:
:type h: float
:param t:
:type t: float
:return:
:rtype: float
"""
e = (t / h) * (t / h)
r = ((h + t) * (h - t))
q = (h / r) * (h / r)
# 17th order asymptotic expansion of A(h,t) in q, sufficient for Φ(h) [and thus y(h)] to have relative accuracy of 1.64E-16 for h <= η with η:=-10.
asymptotic_expansion_sum = (2.0 + q * (-6.0E0 - 2.0 * e + 3.0 * q * (1.0E1 + e * (2.0E1 + 2.0 * e) + 5.0 * q * (
-1.4E1 + e * (-7.0E1 + e * (-4.2E1 - 2.0 * e)) + 7.0 * q * (
1.8E1 + e * (1.68E2 + e * (2.52E2 + e * (7.2E1 + 2.0 * e))) + 9.0 * q * (
-2.2E1 + e * (-3.3E2 + e * (-9.24E2 + e * (-6.6E2 + e * (-1.1E2 - 2.0 * e)))) + 1.1E1 * q * (
2.6E1 + e * (5.72E2 + e * (
2.574E3 + e * (3.432E3 + e * (1.43E3 + e * (1.56E2 + 2.0 * e))))) + 1.3E1 * q * (
-3.0E1 + e * (-9.1E2 + e * (-6.006E3 + e * (-1.287E4 + e * (
-1.001E4 + e * (-2.73E3 + e * (-2.1E2 - 2.0 * e)))))) + 1.5E1 * q * (
3.4E1 + e * (1.36E3 + e * (1.2376E4 + e * (3.8896E4 + e * (
4.862E4 + e * (2.4752E4 + e * (
4.76E3 + e * (2.72E2 + 2.0 * e))))))) + 1.7E1 * q * (
-3.8E1 + e * (-1.938E3 + e * (-2.3256E4 + e * (
-1.00776E5 + e * (-1.84756E5 + e * (
-1.51164E5 + e * (-5.4264E4 + e * (
-7.752E3 + e * (
-3.42E2 - 2.0 * e)))))))) + 1.9E1 * q * (
4.2E1 + e * (2.66E3 + e * (4.0698E4 + e * (
2.3256E5 + e * (5.8786E5 + e * (
7.05432E5 + e * (4.0698E5 + e * (
1.08528E5 + e * (1.197E4 + e * (
4.2E2 + 2.0 * e))))))))) + 2.1E1 * q * (
-4.6E1 + e * (-3.542E3 + e * (
-6.7298E4 + e * (
-4.90314E5 + e * (
-1.63438E6 + e * (
-2.704156E6 + e * (
-2.288132E6 + e * (
-9.80628E5 + e * (
-2.01894E5 + e * (
-1.771E4 + e * (
-5.06E2 - 2.0 * e)))))))))) + 2.3E1 * q * (
5.0E1 + e * (
4.6E3 + e * (
1.0626E5 + e * (
9.614E5 + e * (
4.08595E6 + e * (
8.9148E6 + e * (
1.04006E7 + e * (
6.53752E6 + e * (
2.16315E6 + e * (
3.542E5 + e * (
2.53E4 + e * (
6.0E2 + 2.0 * e))))))))))) + 2.5E1 * q * (
-5.4E1 + e * (
-5.85E3 + e * (
-1.6146E5 + e * (
-1.77606E6 + e * (
-9.37365E6 + e * (
-2.607579E7 + e * (
-4.01166E7 + e * (
-3.476772E7 + e * (
-1.687257E7 + e * (
-4.44015E6 + e * (
-5.9202E5 + e * (
-3.51E4 + e * (
-7.02E2 - 2.0 * e)))))))))))) + 2.7E1 * q * (
5.8E1 + e * (
7.308E3 + e * (
2.3751E5 + e * (
3.12156E6 + e * (
2.003001E7 + e * (
6.919458E7 + e * (
1.3572783E8 + e * (
1.5511752E8 + e * (
1.0379187E8 + e * (
4.006002E7 + e * (
8.58429E6 + e * (
9.5004E5 + e * (
4.7502E4 + e * (
8.12E2 + 2.0 * e))))))))))))) + 2.9E1 * q * (
-6.2E1 + e * (
-8.99E3 + e * (
-3.39822E5 + e * (
-5.25915E6 + e * (
-4.032015E7 + e * (
-1.6934463E8 + e * (
-4.1250615E8 + e * (
-6.0108039E8 + e * (
-5.3036505E8 + e * (
-2.8224105E8 + e * (
-8.870433E7 + e * (
-1.577745E7 + e * (
-1.472562E6 + e * (
-6.293E4 + e * (
-9.3E2 - 2.0 * e)))))))))))))) + 3.1E1 * q * (
6.6E1 + e * (
1.0912E4 + e * (
4.74672E5 + e * (
8.544096E6 + e * (
7.71342E7 + e * (
3.8707344E8 + e * (
1.14633288E9 + e * (
2.07431664E9 + e * (
2.33360622E9 + e * (
1.6376184E9 + e * (
7.0963464E8 + e * (
1.8512208E8 + e * (
2.7768312E7 + e * (
2.215136E6 + e * (
8.184E4 + e * (
1.056E3 + 2.0 * e))))))))))))))) + 3.3E1 * (
-7.0E1 + e * (
-1.309E4 + e * (
-6.49264E5 + e * (
-1.344904E7 + e * (
-1.4121492E8 + e * (
-8.344518E8 + e * (
-2.9526756E9 + e * (
-6.49588632E9 + e * (
-9.0751353E9 + e * (
-8.1198579E9 + e * (
-4.6399188E9 + e * (
-1.6689036E9 + e * (
-3.67158792E8 + e * (
-4.707164E7 + e * (
-3.24632E6 + e * (
-1.0472E5 + e * (
-1.19E3 - 2.0 * e))))))))))))))))) * q)))))))))))))))))
b = ONE_OVER_SQRT_TWO_PI * np.exp((-0.5 * (h * h + t * t))) * (t / r) * asymptotic_expansion_sum
return np.abs(np.maximum(b, 0)) | 9985b36e7f0dec1877d275a23ae747d9a57c1163 | 3,654,581 |
def date_read(date_string, *, convert_to_current_timezone: bool = False):
"""Read the given date (if possible)."""
return date_parse(date_string, convert_to_current_timezone=convert_to_current_timezone) | 96f21f7fcae995a9a17f6008c8e5a4161ed971f2 | 3,654,582 |
import urllib
def encode_name(name):
"""
Encode a unicode as utf-8 and then url encode that
string. Use for entity titles in URLs.
"""
return urllib.quote(name.encode('utf-8'), safe='') | 6e9d34516613ecdf0ce94fb9cfc594de7e76b72f | 3,654,583 |
def cmp_str(element1, element2):
"""
compare number in str format correctley
"""
try:
return cmp(int(element1), int(element2))
except ValueError:
return cmp(element1, element2) | 7c8df75bc1b1ad3997db4a4d6f1b58a37c4e1dd7 | 3,654,584 |
def post(text, appid=2, touser=None, toparty=None):
"""
party
"""
#print '=========',type(text)
if type(text) is unicode:
text = text.encode('utf8')
if not touser:
touser = []
if not toparty:
toparty = ['2']
url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={access_token}'
url = url.format(access_token=get_access_token())
data = {"touser": "|".join(touser),
"toparty": "|".join(toparty),
"msgtype": "text",
"agentid": str(appid),
"text": {"content": text},
"safe": "0",
}
result = requests.post(url, data=json.dumps(data, ensure_ascii=False))
print result.text
return result | b2a92a274007b0502431a856457a244c12b925a9 | 3,654,587 |
import six
import codecs
def hex_encrypt(msg):
"""Hex encrypts a message.
:param bytes msg: string message to be encrypted.
:return: string for encrypted version of msg in hex.
:rtype: bytes
"""
if not cipher:
return msg
if not isinstance(msg, six.binary_type):
raise ValueError('only bytes can be encrypted')
msg = cipher.encrypt(_pad(msg))
msg = codecs.encode(msg, 'hex')
return msg | c2d913d181b8ceb33b3e7d99fc5f21b025da58ea | 3,654,588 |
import regex
async def filter_by_game_stats(opsdroid, string, room, action):
"""Match incoming messages against the current games stats."""
if room not in STAT_REGEXES.keys():
gamestats = await get_stat_names(opsdroid, room)
if not gamestats:
return []
STAT_REGEXES[room] = {"set": regex.compile(f"(?:(?:{'|'.join(['!'+s for s in gamestats])}) {MODIFIER_REGEX})",
flags=regex.IGNORECASE),
"roll": regex.compile("|".join(gamestats), flags=regex.IGNORECASE)}
stats = STAT_REGEXES[room][action].findall(string)
return stats | 4971e5567c8a1b89aa47fdaab2e42e51620f475b | 3,654,590 |
def password_provider():
"""
Provides the full password check
"""
return [(n,) for n in range(5)] | afdb188844e4b0979528b290477130313679e4df | 3,654,591 |
def make_combiparameter(*args, **kwargs):
"""
Make a combined qcodes parameter.
Args:
*args : list of gates or parameters
(e.g. make_combiparameter("A1", "A3", station.gates.B1 ))
"""
station = qc.Station.default
parameters = []
for i in args:
if type(i) == str:
parameters.append(getattr(station.gates, i))
else:
parameters.append(i)
label = ""
for i in parameters:
label += i.label + " "
try:
name = kwargs['name']
except:
name = 'combi_par'
return combi_par(parameters, label, name) | 6482187dc463c67e322a281181ba827eb39eb28d | 3,654,592 |
def get_delta_fmt(delta):
"""arbitrary colour formatting of rank delta
more red for bigger losses, more green for bigger gains
"""
col = (0, 0, 0, 255)
n = abs(delta)
s = delta
if delta < 0:
sat = min(n/200 + 0.2, 1)
r, g, b = hsv_to_rgb(0, sat, 1)
col = (r, g, b, 1)
else:
s = "+"+str(n)
sat = min(n/100 + 0.2, 1)
r, g, b = hsv_to_rgb(1/3, sat, 1)
col = (r, g, b, 1)
return "(" + str(s) + ")", col | a7860df4f19632c9623c39c38ac70a76f405ae56 | 3,654,593 |
def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'):
"""
Performs least squares fit to find parameters for 1d analytic functions fit
to diffraction data
Required Arguments:
p0 -- (m) ndarray containing initial guesses for parameters
for the input peaktype
x -- (n) ndarray of coordinate positions
f -- (n) ndarray of intensity measurements at coordinate positions x
pktype -- string, type of analytic function that will be used to
fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
Outputs:
p -- (m) ndarray containing fit parameters for the input peaktype
(see peak function help for what each parameters corresponds to)
Notes:
1. Currently no checks are in place to make sure that the guess of
parameters has a consistent number of parameters with the requested
peak type
"""
weight = np.max(f)*10. # hard coded should be changed
fitArgs = (x, f, pktype)
if pktype == 'gaussian':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs, Dfun=eval_pk_deriv_1d,
ftol=ftol, xtol=xtol
)
elif pktype == 'lorentzian':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs, Dfun=eval_pk_deriv_1d,
ftol=ftol, xtol=xtol
)
elif pktype == 'pvoigt':
lb = [p0[0]*0.5, np.min(x), 0., 0., 0., None]
ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 1., 2.*p0[4], None]
fitArgs = (x, f, pktype, weight, lb, ub)
p, outflag = optimize.leastsq(
fit_pk_obj_1d_bnded, p0,
args=fitArgs,
ftol=ftol, xtol=xtol
)
elif pktype == 'split_pvoigt':
lb = [p0[0]*0.5, np.min(x), 0., 0., 0., 0., 0., None]
ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 4.*p0[2], 1., 1., 2.*p0[4], None]
fitArgs = (x, f, pktype, weight, lb, ub)
p, outflag = optimize.leastsq(
fit_pk_obj_1d_bnded, p0,
args=fitArgs,
ftol=ftol, xtol=xtol
)
elif pktype == 'tanh_stepdown':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs,
ftol=ftol, xtol=xtol)
elif pktype == 'dcs_pinkbeam':
lb = np.array([0.0, x.min(), -100., -100.,
-100., -100., 0., 0.,
-np.inf, -np.inf, -np.inf])
ub = np.array([np.inf, x.max(), 100., 100.,
100., 100., 10., 10.,
np.inf, np.inf, np.inf])
res = optimize.least_squares(
fit_pk_obj_1d, p0,
jac='2-point',
bounds=(lb, ub),
method='trf',
args=fitArgs,
ftol=ftol,
xtol=xtol)
p = res['x']
outflag = res['success']
else:
p = p0
print('non-valid option, returning guess')
if np.any(np.isnan(p)):
p = p0
print('failed fitting, returning guess')
return p | 52dbff47fd8ad6f7727b0241bba48d2b10393a18 | 3,654,596 |
def tract_segmentation_single_example_lap (kdt_T_A, prototypes_T_A,sid, num_NN,T_A ):
""" step 1: tract segmentation from a single example using Jonker-Volgenant algorithm (LAPJV)
"""
E_t_filename= 'data/example/'+ str(sid) +'_'+str(tract_name)+'.trk'
print("Loading Example tract: %s" % E_t_filename)
E_t, hdr= load(E_t_filename, threshold_short_streamlines=threshold_short_streamlines)
dm_E_t= dissimilarity(E_t, prototypes_T_A,bundles_distances_mam)
#compute the NN of the example tract in order to construcse the cost matrix
NN_E_t_NN_Idx= NN (kdt_T_A, dm_E_t,num_NN)
print("Computing the cost matrix with mam distance (%s x %s) for RLAP " % (len(E_t),
len( NN_E_t_NN_Idx)))
cost_matrix = bundles_distances_mam_smarter_faster(E_t,
T_A[NN_E_t_NN_Idx])
print("Computing optimal assignmnet with LAPJV")
assignment = LinearAssignment(cost_matrix).solution
min_cost_values= cost_matrix[np.arange(len(cost_matrix)), assignment]
return NN_E_t_NN_Idx[assignment], min_cost_values, len(E_t) | cc14e598f359fc9b92995bdc3a6a98192333b800 | 3,654,599 |
def url(method):
"""对于每一个URL的请求访问装饰器,在出错时返回对应的信息"""
@wraps(method)
def error_handler(*args, **kwargs):
try:
return success(method(*args, **kwargs))
except RequestError as r:
current_app.logger.exception(r)
# 返回对应异常类的字符串文档
return failed(reason=r.err_num(), message=r.err_msg())
except Exception as e:
current_app.logger.exception(e)
return failed()
return error_handler | cb2c36981372738b6b708d4e28566d4bb8ffcd90 | 3,654,600 |
def is_abbreviation(sentence):
"""
Evaluate a word to be an abbreviation if the immediate word before the
period contains a capital letter and not a single word sentence.
"""
sentence_split = sentence.split(" ")
if len(sentence_split) == 1:
return False
elif len(sentence_split[-1]) <= 3 and \
any(x.isupper() for x in sentence_split[-1]):
return True
else:
return False | a6f6ceae5b3b9adb7817a913e80a6af86b6d27d5 | 3,654,601 |
def compose_redis_key(vim_name, identifier, identifier_type="vdu"):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
identifier (str): The VDU or VNF uuid (NFVI based)
identifier_type (str): the identifier type. Default type is vdu. Also vnf is supported.
Returns:
str: the key for redis
"""
if identifier_type == "vnf":
return "{}:vnf#{}".format(vim_name.lower(), identifier)
else:
return "{}:{}".format(vim_name.lower(), identifier) | e9a03cf9ff704fea8b9cdf75c59695568e366649 | 3,654,602 |
def calGridID(locs, id, SPLIT = 0.0005):
"""
根据城市网格编号还原经纬度信息
:param locs:
:param id:
:param SPLIT=0.05:
"""
centerincrement = SPLIT/2.0
LNGNUM = int((locs['east'] - locs['west']) / SPLIT + 1)
latind = int(id / LNGNUM)
lngind = id - latind * LNGNUM
lat = (locs['south'] + latind * SPLIT)
lng = (locs['west'] + lngind * SPLIT)
lngcen = (lng + centerincrement)
latcen = (lat + centerincrement)
return "%.3f,%.3f" % (latcen, lngcen)
# {
# 'lat': latcen,
# 'lng': lngcen
# } | 8df119ff82bc1d3c14dbdfe358af6d956d6a52a2 | 3,654,603 |
def linear(x, *p):
"""[summary]
Arguments:
x {[type]} -- [description]
Returns:
[type] -- [description]
"""
return p[0] * x + p[1] | 07ef5fc7c5e78148528cccd09fe14c37cad22ead | 3,654,604 |
def convert_price_text(t):
"""
convert "$175/month' to 175
:param t:
:return: price, unit (i.e. 175, 'month')
"""
tok = t.split('$')[1]
if '/' in tok:
price, unit = tok.split('/')
else:
price = tok
unit = None
return float(price.strip().strip('$').replace(',', '')), unit | b42d26dcd4eb1b2c2f8c5a63ddc9d48469e30a52 | 3,654,605 |
async def async_setup(hass, config):
"""Set up the WWLLN component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
latitude = conf.get(CONF_LATITUDE, hass.config.latitude)
longitude = conf.get(CONF_LONGITUDE, hass.config.longitude)
identifier = '{0}, {1}'.format(latitude, longitude)
if identifier in configured_instances(hass):
return True
if hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
unit_system = CONF_UNIT_SYSTEM_IMPERIAL
else:
unit_system = CONF_UNIT_SYSTEM_METRIC
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data={
CONF_LATITUDE: latitude,
CONF_LONGITUDE: longitude,
CONF_RADIUS: conf[CONF_RADIUS],
CONF_WINDOW: conf[CONF_WINDOW],
CONF_UNIT_SYSTEM: unit_system,
}))
return True | 3f0a4f5a017340780c8c1122425804e7862c3d0f | 3,654,606 |
from typing import Any
def __are_nearly_overlapped(
plane_predicted: NDArray[Any, np.int32],
plane_gt: NDArray[Any, np.int32],
required_overlap: np.float64,
) -> (bool, bool):
"""
Calculate if planes are overlapped enough (required_overlap %) to be used for PP-PR metric
:param required_overlap: overlap threshold which will b checked to say that planes overlaps
:param plane_predicted: predicted segmentation
:param plane_gt: ground truth segmentation
:return: true if planes are overlapping by required_overlap % or more, false otherwise
"""
intersection = np.intersect1d(plane_predicted, plane_gt)
return (
intersection.size / plane_predicted.size >= required_overlap
and intersection.size / plane_gt.size >= required_overlap,
intersection.size > 0,
) | 7b686e7bb4b18e4e2e116cdfd14878acbcc4c92d | 3,654,607 |
def _get_prob_k_given_L(B, N=None):
"""
Helper function.
"""
if N is None:
N = int(B[0, 1])
return B / N | be1d0848b148b3413aaee2c5549bd6063e1f2d33 | 3,654,608 |
def base64_encode(s):
"""unicode-safe base64
base64 API only talks bytes
"""
if not isinstance(s, bytes):
s = s.encode('ascii', 'replace')
encoded = encodebytes(s)
return encoded.decode('ascii') | 6ef0722014aa56e22de102aa0ce8286416640f86 | 3,654,609 |
def _unpack_tableswitch(bc, offset):
"""
function for unpacking the tableswitch op arguments
"""
jump = (offset % 4)
if jump:
offset += (4 - jump)
(default, low, high), offset = _unpack(_struct_iii, bc, offset)
joffs = list()
for _index in xrange((high - low) + 1):
j, offset = _unpack(_struct_i, bc, offset)
joffs.append(j)
return (default, low, high, joffs), offset | af08ab85def5bf132227f20da8cb6032e2a9dff1 | 3,654,610 |
def force_orders(self, **kwargs):
"""User's Force Orders (USER_DATA)
GET /fapi/v1/forceOrders
https://binance-docs.github.io/apidocs/futures/en/#user-39-s-force-orders-user_data
Keyword Args:
symbol (str, optional)
autoCloseType (str, optional): "LIQUIDATION" for liquidation orders, "ADL" for ADL orders.
startTime (int, optional)
endTime (int, optional)
limit (int, optional): Default 50; max 100.
recvWindow (int, optional)
Notes:
If "autoCloseType" is not sent, orders with both of the types will be returned
If "startTime" is not sent, data within 7 days before "endTime" can be queried
"""
payload = {**kwargs}
url_path = "/fapi/v1/forceOrders"
return self.sign_request("GET", url_path, payload) | 6e848820e17e54df0f275ec4087d9c609d4e08fa | 3,654,611 |
def prosp_power_analysis_norm(d, sigma, pow_lev, alpha, direction):
"""
This function conducts pre-testing power analysis and
calculates the minimally required sample size for a normal sample.
@param d: difference between the mean differences under H1 and H0
@param sigma: standard deviation
@param pow_lev: power level
@param alpha: significance level
@param direction: direction of the test, two-sided or one-sided
@return: required minimal sample size
"""
# first calculates for a z test
n_z = np.ceil(z_test_sample_size(d, sigma, alpha, pow_lev, direction))
# first iteration for t test
n_t_1 = np.ceil(t_test_sample_size(d, sigma, n_z-1, alpha, pow_lev, direction))
# second iteration for t test
n_t_2 = np.ceil(t_test_sample_size(d, sigma, n_t_1-1, alpha, pow_lev, direction))
return(np.ceil(n_t_2 )) | 319daf6434b774dcf3bf3f6f936a566e1640c175 | 3,654,612 |
def decision_tree_construction(examples, target_attribute, attributes, depth):
"""
:param examples: The data we will use to train the tree(x)
:param target_attribute: The label we want to classify(y)
:param attributes: The number(index) of the labels/attributes of the data-set
:return: The tree corresponding to the given data
"""
# This is the first base condition of the algorithm. It is used if the attributes variable is empty, then we return
# the single-node tree Root, with label = most common value of target_attribute in examples
# The base condition for the recursion when we check if all the variables are same or not in the node and if they
# are same then we return that value as the node
if len(attributes) == 0 or len(np.unique(target_attribute)) == 1:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion part of the algorithm in which we try to find the sub-tree's by using recursion and
# information gain
else:
Information_Gain = Information_Gain_Heuristic(examples, attributes, target_attribute)
best_attribute_number = attributes[np.argmax(Information_Gain)]
# Since we now have the best_attribute(A in algorithm) we will create the root node of the tree/sub-tree with
# that and name the root as the best attribute among all Here we make the tree as a dictionary for testing
# purposes
tree = dict([(best_attribute_number, dict())])
if isinstance(tree, int):
# If the given value is a int value then it's definitely a leaf node and if it's a dictionary then its a
# node
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
attributes.remove(best_attribute_number)
# Now we do the recursive algorithm which will be used to create the tree after the root node.
depth_of_node = []
for each_unique_value in np.unique(examples[best_attribute_number]):
# We use those values for which the examples[best_attribute_number] == each_unique_value
class1 = each_unique_value
new_target_attribute = pd.DataFrame(target_attribute)
total_data = pd.concat([examples, new_target_attribute], axis=1, sort=False)
# WE do this step so that we can pick the values which belong to the best_attribute = [0,1], i.e. We now
# want to divide our data so that the values for the best_attribute is divided among the branches. And
# thus we will have 4 arrays now, two for the data and two for target attribute.
new_data_after_partition = total_data.loc[total_data[best_attribute_number] == class1]
new_target_attribute, new_examples_after_partition = get_attributes_and_labels(new_data_after_partition)
# This is also a condition for our algorithm in which we check if the number of examples after the
# partition are positive or not. If the values are less than 1 then we return the most frequent value in
# the node
if len(new_examples_after_partition) == 0:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion step, in which we make new decision trees till the case when any of the base
# cases are true
new_sub_tree_after_partition, deptha = decision_tree_construction(new_examples_after_partition,
new_target_attribute, attributes,
depth + 1)
depth_of_node.append(deptha)
# Here we are adding the depth of the node so that we can do the depth based pruning
tree[best_attribute_number][each_unique_value] = new_sub_tree_after_partition
if isinstance(new_sub_tree_after_partition, int):
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
return tree, max(depth_of_node) | c9529deb71d3c0a89bbae053aae07e587d277255 | 3,654,613 |
import numpy
def mass_centered(geo):
""" mass-centered geometry
"""
geo = translate(geo, numpy.negative(center_of_mass(geo)))
return geo | 1081141d77383f857f986031fa03510fd2608741 | 3,654,614 |
def binaryMatrix(l, value=PAD_token):
"""
:param l:
:param value:
:return: seq: [3,4,5,0,0]
m: [[1],[1],[1],[0],[0]]
"""
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m | 3c123b1ce8531bcde7c6673f8ca8a91f1300f0bb | 3,654,615 |
def load_map(mappath):
""" Attempt to load map with known loaders
"""
data = None
shirtloader = lambda path: fio.load_map(path)[0][0:3]
maploaders = [load_pfire_map, shirtloader]
for loader in maploaders:
try:
data = loader(mappath)
except (ValueError, OSError):
pass
if data is not None:
break
if data is None:
raise RuntimeError("Failed to load map \"{}\"".format(mappath))
return data | 2ab5c46e0b1ec0ed2e613b42c0553a1d6bcede36 | 3,654,616 |
def ifttt_account_options_topup_source():
""" Option values for topup source account selection"""
return ifttt_account_options(False, "Internal") | 83a0082ccc829c06c12fca2bb588db31468f51ef | 3,654,617 |
from bs4 import BeautifulSoup
def strip_classes(soup:BeautifulSoup, *args:str):
"""
Strip class from given tags in a BeautifulSoup object.
Args:
soup (BeautifulSoup): soup to clean
args ([str]): A list of tags to be unclassed
Returns:
soup (BeautifulSoup)
Modules:
bs4 (BeautifulSoup)
"""
if not args:
args = ['em', 'strong', 'sup']
# delete classes associated with selected tags:
for arg in args:
for tag in soup.find_all(arg):
if tag.has_attr('class'):
del tag.attrs['class']
return(soup) | c2195cd0eaf2cb3f741247b75411d252c7a85e8c | 3,654,618 |
import trace
def take_measurement(n_grid: np.int, n_rays: np.int, r_theta: np.float64) -> (
np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
Take a measurement with the tomograph from direction r_theta.
Arguments:
n_grid: number of cells of grid in each direction
n_rays: number of parallel rays
r_theta: direction of rays (in radians)
Return:
intensities: measured intensities for all <n_rays> rays of the measurement. intensities[n] contains the intensity for the n-th ray
ray_indices: indices of rays that intersect a cell
isect_indices: indices of intersected cells
lengths: lengths of segments in intersected cells
The tuple (ray_indices[n], isect_indices[n], lengths[n]) stores which ray has intersected which cell with which length. n runs from 0 to the amount of ray/cell intersections (-1) of this measurement.
Raised Exceptions:
-
Side Effects:
-
"""
# compute ray direction in Cartesian coordinates
cs = np.cos(r_theta)
sn = np.sin(r_theta)
r_dir = np.array([-cs, -sn])
# compute start positions for rays
r_pos = np.zeros((n_rays, 2))
for i, g in enumerate(np.linspace(-0.99, 0.99, n_rays)):
r_pos[i] = np.array([cs - sn * g, sn + cs * g])
else:
r_pos[0] = np.array([cs, sn])
# compute measures intensities for each ray
intensities = np.zeros(n_rays)
for i, rs in enumerate(r_pos):
intensities[i] = trace(rs, r_dir)
# take exponential fall off into account
intensities = np.log(1.0 / intensities)
# compute traversal distance in each grid cell
ray_indices, isect_indices, lengths = grid_intersect(n_grid, r_pos, r_dir)
return intensities, ray_indices, isect_indices, lengths | f0ffac9da088402cff126bab9ee880ff33c460f1 | 3,654,619 |
def chrom_karyo_sort(chroms):
"""
:param chroms:
:return:
"""
ordered = []
unordered = []
for cname, size in chroms:
try:
ord = int(cname.lower().strip('chr'))
ordered.append((cname, size, ord * 10))
except ValueError:
ord = check_special_chroms(cname)
if ord > 0:
ordered.append((cname, size, ord))
else:
unordered.append((cname, size, -1))
unordered = sorted(unordered, key=lambda x: x[1], reverse=True)
ordered = sorted(ordered, key=lambda x: x[2])
ordered.extend(unordered)
return [(t[0], t[1]) for t in ordered] | 4531be10ad0c51e0257089aabda778357b2d7950 | 3,654,620 |
from typing import List
def calibrate_stereo(observations_left: List, observations_right: List, detector: FiducialCalibrationDetector,
num_radial: int = 4, tangential: bool = False, zero_skew: bool = True) -> (StereoParameters, List):
"""
Calibrates a stereo camera using a Brown camera model
:param observations: List of {"points":(boofcv detections),"width":(image width),"height":(image height)}
:param detector:
:param num_radial:
:param tangential:
:param zero_skew:
:return:
"""
jlayout = detector.java_obj.getLayout(0) # Hard coded for a single target
jcalib_planar = gateway.jvm.boofcv.abst.geo.calibration.CalibrateStereoPlanar(jlayout)
jcalib_planar.configure(zero_skew, int(num_radial), tangential)
for idx in range(len(observations_left)):
jobs_left = convert_into_boof_calibration_observations(observations_left[idx])
jobs_right = convert_into_boof_calibration_observations(observations_right[idx])
jcalib_planar.addPair(jobs_left, jobs_right)
stereo_parameters = StereoParameters(jcalib_planar.process())
errors = []
for jerror in jcalib_planar.computeErrors():
errors.append({"mean": jerror.getMeanError(),
"max_error": jerror.getMaxError(),
"bias_x": jerror.getBiasX(), "bias_y": jerror.getBiasY()})
return (stereo_parameters, errors) | bf9ee5b369f8614728db0023674c85a958a2559f | 3,654,621 |
from typing import Type
def register_producer_class(cls: Type[C]) -> Type[C]:
"""Registers the producer class and returns it unmodified."""
if not cls.TYPES:
raise ProducerInterfaceError(
f"Invalid producer. When defining producer, make sure to specify at least 1 type in the TYPES class variable."
)
for artifact_type in cls.ARTIFACT_TYPES:
if not (
isclass(artifact_type) and issubclass(artifact_type, BaseArtifact)
):
raise ProducerInterfaceError(
f"Associated artifact type {artifact_type} for producer is not a class or is not a subclass of BaseArtifact."
)
artifact_types = cls.ARTIFACT_TYPES or (BaseArtifact,)
for t in cls.TYPES:
if not isclass(t):
raise ProducerInterfaceError(
f"Associated type {t} for producer is not a class."
)
producer_registry.register_producer(
t,
cls,
)
type_registry.register_artifact_type(
t,
artifact_types,
)
return cls | 7155ddb85077e2774fcc20c2d80345bd52ee86b1 | 3,654,622 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.