content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import struct
def test_eap_proto_otp_errors(dev, apdev):
"""EAP-OTP local error cases"""
def otp_handler2(ctx, req):
logger.info("otp_handler2 - RX " + req.encode("hex"))
if 'num' not in ctx:
ctx['num'] = 0
ctx['num'] = ctx['num'] + 1
if 'id' not in ctx:
ctx['id'] = 1
ctx['id'] = (ctx['id'] + 1) % 256
idx = 0
idx += 1
if ctx['num'] == idx:
logger.info("Test: Challenge included")
return struct.pack(">BBHBB", EAP_CODE_REQUEST, ctx['id'],
4 + 1 + 1,
EAP_TYPE_OTP,
ord('A'))
return struct.pack(">BBH", EAP_CODE_FAILURE, ctx['id'], 4)
srv = start_radius_server(otp_handler2)
try:
hapd = start_ap(apdev[0])
with alloc_fail(dev[0], 1, "eap_msg_alloc;eap_otp_process"):
dev[0].connect("eap-test", key_mgmt="WPA-EAP", scan_freq="2412",
eap="OTP", identity="user", password="password",
wait_connect=False)
wait_fail_trigger(dev[0], "GET_ALLOC_FAIL")
dev[0].request("REMOVE_NETWORK all")
dev[0].wait_disconnected()
finally:
stop_radius_server(srv) | c32797121b695ad30f3cb3013a79c0e309d88715 | 3,654,500 |
def pivot_proportions(df, groups, responses, weights=1):
"""
Pivot data to show the breakdown of responses for each group.
Parameters:
df: a pandas DataFrame with data to be aggregated
groups: the name of the column containing the groups to partition by
respones: the name of the column that contains responses to aggregate into proportions
weights: the statistical weighting associated with each response
Returns:
a pandas DataFrame containing the proportion of responses within each group
"""
pivot_data = df[[groups, responses]].assign(weights=weights)
pivoted_counts = pd.pivot_table(
pivot_data,
columns=groups,
index=responses,
aggfunc='sum'
)
pivoted_counts = pivoted_counts['weights'].sort_index(axis=1)
return (pivoted_counts / pivoted_counts.sum()).fillna(0) | 7bf8cdc199fe800cb1bb280ceb2ffdb489f0d342 | 3,654,501 |
def row_stack(a1, a2):
"""
Stacks data from subsequent sweeps, while padding "empty" columns from
subsequent sweeps.
Inputs
------
a1: np.array
destination array
a2: np.array
array which is added onto the first array
Returns
-------
out: np.array
stacked destination and additional array, with uniform shape
"""
[N1, M1] = a1.shape
[N2, M2] = a2.shape
if M1 > M2:
a2 = np.pad(a2, ((0, 0), (0, M1-M2)), mode='constant',
constant_values=-9999999)
elif M2 < M1:
a1 = np.pad(a2, ((0, 0), (0, M2-M1)), mode='constant',
constant_values=-9999999)
out = np.vstack((a1, a2))
out[out == -9999999] = np.nan
return out | 4e8961351283a1702bc25349f2523c068cfb5424 | 3,654,502 |
def globalPrediction(vid, category_names, vid_probs, predicted_labels):
"""
Get a matrix of probabilities over the classes for the c3d features of
a video. Generate the top 3 predictions from the prob matrix
"""
anno_list = []
# Idea 1 : To form the hist over the categories, each bin has sum of probs
vprobs_sum = vid_probs.sum(axis=0)
top_n = vprobs_sum.sort_values(ascending = False)[:3]
#counter = collections.Counter(predicted_labels)
#top_n = counter.most_common(3) # list of tuples
#assert len(top_n)==3
labels = top_n.index.tolist()
scores = top_n.values.tolist()
for idx,score in enumerate(scores):
anno_list.append({'score': score, 'label':labels[idx]})
#for (idx,score) in top_n:
# anno_list.append({'score': score, 'label':category_names[idx]})
# Idea 2 : Detect temporal continuity of category predicted. Longer the better
# Idea 3 : Count the number of highest votes for top category. (Worse than 1)
# If equal votes for >1 category then use Idea 1
# finds the max val index among the columns for each row and the freq of the
# occurrence of the column names (in decreasing order)
# labels = vid_probs.idxmax(axis=1).value_counts()[:3].index.tolist()
# scores = probs_sum[labels].tolist()
# for idx,score in enumerate(scores):
# anno_list.append({'score': score, 'label':labels[idx]})
return anno_list, vprobs_sum | 51676499cbf719874c49b89557d960ed8a136243 | 3,654,503 |
def GetApexServerStatus(api_key):
"""
get the status of Apex Legends servers.
:param api_key: The API key to use.
Warning
You must put either a clickable link to "https://apexlegendsstatus.com" OR have a message such as "Data from apexlegendsstatus.com" when displaying data coming from this API. Your key may be suspended otherwise.
"""
url = 'https://api.mozambiquehe.re/servers'
try:
res = get_request(url, {'Authorization': api_key})
response = res[0]
if response.status_code == 200:
r = response.json()
res = ApexTrackerPy.Apexclass.A_Server_Data(
row_json=r,
elapsed_time=res[1],
Origin_login_EU_West=r["Origin_login"]["EU-West"],
Origin_login_EU_East=r["Origin_login"]["EU-East"],
Origin_login_US_West=r["Origin_login"]["US-West"],
Origin_login_US_East=r["Origin_login"]["US-East"],
Origin_login_US_Central=r["Origin_login"]["US-Central"],
Origin_login_Asia=r["Origin_login"]["Asia"],
Origin_login_SouthAmerica=r["Origin_login"]["SouthAmerica"],
EA_novafusion_EU_West=r["EA_novafusion"]["EU-West"],
EA_novafusion_EU_East=r["EA_novafusion"]["EU-East"],
EA_novafusion_US_West=r["EA_novafusion"]["US-West"],
EA_novafusion_US_East=r["EA_novafusion"]["US-East"],
EA_novafusion_US_Central=r["EA_novafusion"]["US-Central"],
EA_novafusion_Asia=r["EA_novafusion"]["Asia"],
EA_novafusion_SouthAmerica=r["EA_novafusion"]["SouthAmerica"],
EA_accounts_EU_West=r["EA_accounts"]["EU-West"],
EA_accounts_EU_East=r["EA_accounts"]["EU-East"],
EA_accounts_US_West=r["EA_accounts"]["US-West"],
EA_accounts_US_East=r["EA_accounts"]["US-East"],
EA_accounts_US_Central=r["EA_accounts"]["US-Central"],
EA_accounts_Asia=r["EA_accounts"]["Asia"],
EA_accounts_SouthAmerica=r["EA_accounts"]["SouthAmerica"],
ApexOauth_Crossplay_EU_West=r["ApexOauth_Crossplay"]["EU-West"],
ApexOauth_Crossplay_EU_East=r["ApexOauth_Crossplay"]["EU-East"],
ApexOauth_Crossplay_US_West=r["ApexOauth_Crossplay"]["US-West"],
ApexOauth_Crossplay_US_East=r["ApexOauth_Crossplay"]["US-East"],
ApexOauth_Crossplay_US_Central=r["ApexOauth_Crossplay"]["US-Central"],
ApexOauth_Crossplay_Asia=r["ApexOauth_Crossplay"]["Asia"],
ApexOauth_Crossplay_SouthAmerica=r["ApexOauth_Crossplay"]["SouthAmerica"],
CSServer_Playstation_Network=r["otherPlatforms"]["Playstation-Network"],
CSServer_Xbox_Live=r["otherPlatforms"]["Xbox-Live"],
)
return res
else:
raise Exception('HttpError!:The API returned status code '+str(response.status_code))
except Exception as e:
raise Exception('HttpError!:An error has occurred during the API call.\n'+str(e)) | 362ca4e68ffbf395f56ccb6aad65cc9d13ab4545 | 3,654,504 |
def construct_mdx(cube_name, rows, columns, contexts=None, suppress=None):
""" Method to construct MDX Query from
:param cube_name: Name of the Cube
:param rows: Dictionary of Dimension Names and Selections
:param columns: Dictionary of Dimension Names and Selections (Dimension-MDX, List of Elementnames, Subset, or None)
:param contexts: Dictionary of Dimension Names and Selections
:param suppress: "Both", "Rows", "Columns" or None
:return: Genered MDX Query
"""
# MDX Skeleton
mdx_template = 'SELECT {}{} ON ROWS, {}{} ON COLUMNS FROM [{}] {}'
# Suppression
mdx_rows_suppress = 'NON EMPTY ' if (suppress in ['Rows', 'Both'] and rows) else ''
mdx_columns_suppress = 'NON EMPTY ' if (suppress in ['Columns', 'Both'] and columns) else ''
# Rows and Columns
mdx_rows = construct_mdx_axis(rows)
mdx_columns = construct_mdx_axis(columns)
# Context filter (where statement)
mdx_where = ''
if contexts:
mdx_where_parts = ['[{}].[{}]'.format(dim, elem) for dim, elem in contexts.items()]
mdx_where += "WHERE (" + ','.join(mdx_where_parts) + ")"
# Return Full MDX
return mdx_template.format(mdx_rows_suppress, mdx_rows, mdx_columns_suppress, mdx_columns, cube_name, mdx_where) | 117d554b71fcb5c065664e51a9064b2edb504ed6 | 3,654,505 |
def mock_train_model(spark_context, testserver):
"""Pre-condition: worker.update_one is assumed to be working."""
inq = Queue()
outq = Queue()
job = get_job()
job['urls'] = [testserver.url]
db = get_fake_mongo_client().ophicleide
db.models.insert_one(job)
inq.put(job)
update_model(spark_context, inq, outq, db, 'http://testurl')
return db, job['_id'] | eb862f8f600a6aa64cb65685f122dd577a6e51df | 3,654,506 |
def calc_number_of_children(*args):
"""
calc_number_of_children(loc, tif, dont_deref_ptr=False) -> int
Calculate max number of lines of a formatted c data, when expanded (
'PTV_EXPAND' ).
@param loc: location of the data ( ALOC_STATIC or ALOC_CUSTOM )
(C++: const argloc_t &)
@param tif: type info (C++: const tinfo_t &)
@param dont_deref_ptr: consider 'ea' as the ptr value (C++: bool)
"""
return _ida_typeinf.calc_number_of_children(*args) | cfc7427ec5ff4d0fc78d87d315460c62d130cd3d | 3,654,507 |
def _entity_namespace_key(entity, key):
"""Return an entry from an entity_namespace.
Raises :class:`_exc.InvalidRequestError` rather than attribute error
on not found.
"""
ns = entity.entity_namespace
try:
return getattr(ns, key)
except AttributeError as err:
util.raise_(
exc.InvalidRequestError(
'Entity namespace for "%s" has no property "%s"'
% (entity, key)
),
replace_context=err,
) | ffd063523a8011a8ee2dd3700920a0523465d6cc | 3,654,508 |
def get_messages(mtype, read=False, uid=None):
""" Returns query for messages. If `read` is True it only queries for unread messages """
query = Message.select().where(Message.mtype << mtype)
query = query.where(Message.receivedby == current_user.uid if not uid else uid)
if read:
query = query.where(Message.read.is_null(True))
return query | 7959a0510d8f6794ff40d8467d09b0833279be10 | 3,654,509 |
import numpy
def coords_to_indices(coords, top, left, csx, csy, shape, preserve_out_of_bounds=False):
"""
Convert coordinates to array indices using the given specs.
Coordinates outside of the shape are not returned.
:param coords: Tuple of coordinates in the form ([x...], [y...])
:param top: Top coordinate of array
:param left: Left coordinate of array
:param csx: Cell size in the x-direction
:param csy: Cell size in the y-direction
:param shape: Shape of array (for bounds)
:return: tuple of indices in the form ([i...], [j...])
"""
x, y = numpy.asarray(coords[0]), numpy.asarray(coords[1])
i = numpy.int64((top - y) / csy)
j = numpy.int64((x - left) / csx)
if preserve_out_of_bounds:
return i, j
else:
m = (i >= 0) & (j >= 0) & (i < shape[0]) & (j < shape[1])
return i[m], j[m] | 89b99ffc159c56855792d0daeb8bdb5a5d04ad9f | 3,654,510 |
def sanitize_vcf_file(vcf_file, out_file, snp_log_file, sample_log_file, logging, min_count=1, max_missing=0.25,
max_alt_states=4, disruptive_threshold=1,window_size=30,max_snps=2):
"""
Filter a user provided vcf and write a filtered vcf file
Parameters
----------
vcf_file [str] : Path to vcf file
out_file [str] : Path to write santized vcf file
snp_log_file [str] : Path to write SNP log report
sample_log_file [str]: Path to write sample report
logging [logging obj] : logging object
min_count [int] : Minimum number of variable samples ie. if set to 2, at least two samples must differ from the rest
max_missing [float] : Percent of samples which can be missing data for a position for that position to be valid
max_alt_states [int] : Maxmimum number of bases which can be in the vcf for that position to be valid
disruptive_threshold [int]: Identify sequences which are blunting resolution by looking at which ones are missing
where other sequences have it. By default, it looks for sequences which are uniquely
missing a position conserved in all of the other samples
Returns
-------
List of sample_id's in vcf
"""
logging.info("Reading vcf file {}".format(vcf_file))
pandas_vcf_obj = pandas_vcf(vcf_file)
samples = pandas_vcf_obj.get_sample_list()
num_samples = len(samples)
snp_report = open(snp_log_file, 'w')
sample_report = open(sample_log_file, 'w')
sample_ambig_counts = {}
for sample in samples:
sample_ambig_counts[sample] = {'total_ambig': 0, 'uniq_ambig': 0, 'low_freq_variants': 0}
logging.info("VCF file contains #{} samples".format(len(samples)))
logging.info("VCF file samples: {}".format(samples))
count_postitions = 0
valid_position_count = 0
filtered_positions_count = 0
filtered_positions_ambig = 0
filtered_positions_alt = 0
ambiguous_positions = {}
snp_data = {}
positions = []
position_base_calls = pandas_vcf_obj.get_baselookup()
valid_positions = []
for index,row in pandas_vcf_obj.df.iterrows():
calls = get_vcf_row_calls(row, position_base_calls, samples, logging)
count_postitions += 1
position = list(calls.keys())[0]
positions.append(position)
chromosome = calls[position]['chrom']
bases = calls[position]['bases']
ref_base = calls[position]['ref_base']
if not chromosome in snp_data:
snp_data[chromosome] = {}
if not position in snp_data[chromosome]:
snp_data[chromosome][position] = {}
count_ref = 0
count_alt = 0
count_missing = 0
count_alt_bases = 0
alt_samples = []
valid_bases = []
for base in bases:
if base == ref_base:
count_ref = len(bases[base])
elif base == 'N':
count_missing = len(bases[base])
if not position in ambiguous_positions:
ambiguous_positions[position] = []
ambiguous_positions[position] = bases[base]
for sample_id in ambiguous_positions[position]:
sample_ambig_counts[sample_id]['total_ambig'] += 1
else:
count_alt_bases += 1
count_alt += len(bases[base])
alt_samples += bases[base]
valid_bases.append(base)
if (count_ref >= min_count) and \
(count_alt >= min_count) and \
(count_missing / num_samples <= max_missing) and \
count_alt_bases <= max_alt_states:
valid_position_count += 1
valid_positions.append(position)
for base in valid_bases:
snp_data[chromosome][position][base] = {'samples':bases[base],'out_bases':list(set(list(bases.keys())) - set([base]))}
else:
if count_ref < min_count and count_alt < min_count:
filtered_positions_count += 1
snp_report.write(
"Filtering due to minimum polymorphic sample count\tchrom: {}\tposition: {}\tcount_ref: {}\tcount_alt: {}\tcount_missing: {}\n".format(
chromosome, position, count_ref, count_alt, count_missing))
for sample_id in alt_samples:
sample_ambig_counts[sample_id]['low_freq_variants'] += 1
if count_missing / num_samples >= max_missing:
filtered_positions_ambig += 1
snp_report.write(
"Filtering due to maximum missing sample count\tchrom: {}\tposition: {}\tcount_ref: {}\tcount_alt: {}\tcount_missing: {}\n".format(
chromosome, position, count_ref, count_alt, count_missing))
if count_alt_bases > max_alt_states:
filtered_positions_alt += 1
snp_report.write(
"Filtering due to more than allowed alt base states\tchrom: {}\tposition: {}\tcount_ref: {}\tcount_alt: {}\tcount_missing: {}\n".format(
chromosome, position, count_ref, count_alt, count_missing))
filtered_df = pandas_vcf_obj.df[pandas_vcf_obj.df['POS'].isin(valid_positions)]
filtered_df.to_csv(out_file,sep="\t",header=True,index=False)
#for position in positions:
logging.info("Read {} positions in file {}".format(count_postitions, vcf_file))
logging.info("Filtered {} positions due to minimum polymorphic sample requirement".format(filtered_positions_count))
logging.info("Filtered {} positions due to missing in more than {}% of samples".format(filtered_positions_ambig,
max_missing))
logging.info("Filtered {} positions due to more than allowed alternative base states".format(filtered_positions_alt,
max_alt_states))
logging.info("{} positions are valid after filtering".format(valid_position_count))
# Identify for the user unusual sequences that they might want to remove from their analyses
disruptive_sequence_check = identify_disruptive_sequences(ambiguous_positions, disruptive_threshold)
for sample_id in disruptive_sequence_check:
sample_ambig_counts[sample_id]['unique_ambig'] = disruptive_sequence_check[sample_id]
# Get 95 percentile for each attribute
percentile_total_ambig = get_percentile(sample_ambig_counts, 'total_ambig', percentile=95)
percentile_unique_ambig = get_percentile(sample_ambig_counts, 'uniq_ambig', percentile=95)
percentile_low_freq_var = get_percentile(sample_ambig_counts, 'low_freq_variants', percentile=95)
logging.info(
"95% Percentile: Total Ambig={}\t95% Percentile: Unique Ambig={}\t95% Percentile: low frequency variants={}".format(
percentile_total_ambig, percentile_unique_ambig, percentile_low_freq_var))
for sample_id in disruptive_sequence_check:
status = 'PASS'
if sample_ambig_counts[sample_id]['total_ambig'] / count_postitions > max_missing:
status = 'FAIL'
elif sample_ambig_counts[sample_id]['unique_ambig'] > percentile_unique_ambig:
status = 'WARNING: Sample has unusually high number of unique missing core positions'
elif sample_ambig_counts[sample_id]['low_freq_variants'] > percentile_low_freq_var:
status = 'WARNING: Sample has unusually high number of low frequency variants'
elif sample_ambig_counts[sample_id]['total_ambig'] > percentile_total_ambig:
status = 'WARNING: Sample has unusually high number of missing core positions'
sample_report.write("{}\tTOTAL_AMBIG={}\tUNIQUE_AMBIG={}\tLOW_FREQ_VARIANTS={}\tSTATUS={}\n".format(sample_id,
sample_ambig_counts[
sample_id][
'total_ambig'],
sample_ambig_counts[
sample_id][
'unique_ambig'],
sample_ambig_counts[
sample_id][
'low_freq_variants'],
status))
sample_report.close()
snp_report.close()
return [samples, snp_data] | b5d96e9224b5eddad1dff8dcf2caf558522376bc | 3,654,511 |
from typing import Optional
from typing import Any
def geq(column: str, value: Optional[Any]) -> str:
"""
>>> geq("col", None)
'1'
>>> geq("col", 1)
'col >= 1'
>>> geq("col", "1")
"col >= '1'"
"""
if not value:
return "1"
if isinstance(value, str):
return f"{column} >= '{value}'"
return f"{column} >= {value}" | 9216b8e2480232840ad37d8fe0e5c0f07b88873f | 3,654,512 |
from keras.layers import Conv2D, Dense
from palmnet.layers import Conv2DCustom
from palmnet.layers.sparse_facto_sparse_tensor_deprecated import SparseFactorisationDense
def count_model_param_and_flops(model, dct_layer_sparse_facto_op=None):
"""
Return the number of params and the number of flops of 2DConvolutional Layers and Dense Layers for both the base model and the compressed model.
:return:
"""
nb_param_base, nb_param_compressed, nb_flop_base, nb_flop_compressed = 0, 0, 0, 0
param_by_layer = {}
flop_by_layer = {}
for layer in model.layers:
logger.warning("Process layer {}".format(layer.name))
if isinstance(layer, Conv2D) or isinstance(layer, Conv2DCustom):
nb_param_layer, nb_param_compressed_layer = Palminizable.count_nb_param_layer(layer, dct_layer_sparse_facto_op)
nb_flop_layer, nb_flop_compressed_layer = Palminizable.count_nb_flop_conv_layer(layer, nb_param_layer, dct_layer_sparse_facto_op)
elif isinstance(layer, Dense) or isinstance(layer, SparseFactorisationDense):
nb_param_layer, nb_param_compressed_layer = Palminizable.count_nb_param_layer(layer, dct_layer_sparse_facto_op)
nb_flop_layer, nb_flop_compressed_layer = Palminizable.count_nb_flop_dense_layer(layer, nb_param_layer, dct_layer_sparse_facto_op)
else:
logger.warning("Layer {}, class {}, hasn't been compressed".format(layer.name, layer.__class__.__name__))
nb_param_compressed_layer, nb_param_layer, nb_flop_layer, nb_flop_compressed_layer = 0, 0, 0, 0
param_by_layer[layer.name] = nb_param_layer
flop_by_layer[layer.name] = nb_flop_layer
nb_param_base += nb_param_layer
nb_param_compressed += nb_param_compressed_layer
nb_flop_base += nb_flop_layer
nb_flop_compressed += nb_flop_compressed_layer
return nb_param_base, nb_param_compressed, nb_flop_base, nb_flop_compressed, param_by_layer, flop_by_layer | 142b04ad327f662d315d7c92322df8aef2ae9871 | 3,654,513 |
def longest_match(list1, list2):
"""
Find the length of the longest substring match between list1 and list2.
>>> longest_match([], [])
0
>>> longest_match('test', 'test')
4
>>> longest_match('test', 'toast')
2
>>> longest_match('supercalifragilisticexpialidocious', 'mystical californication')
5
"""
m = len(list1)
n = len(list2)
data = [[0 for col in range(n+1)] for row in range(m+1)]
for a in range(1, m+1):
for b in range(1, n+1):
if list1[a-1] == list2[b-1]:
data[a][b] = 1 + data[a-1][b-1]
else:
data[a][b] = 0
maxes = [max(row) for row in data]
return max(maxes) | 4a84dacbb0d59fc7f9c4b59e87e55c72416b8c80 | 3,654,514 |
def deserialize_config(data, **kwargs):
"""Create instance of a JobConfiguration from a dict.
Parameters
----------
data : dict
Dictionary loaded from a serialized config file.
Returns
-------
JobConfiguration
"""
registry = Registry()
config_module = data["configuration_module"]
config_class = data["configuration_class"]
for ext in registry.iter_extensions():
ext_cfg_class = ext[ExtensionClassType.CONFIGURATION]
if ext_cfg_class.__module__ == config_module and ext_cfg_class.__name__ == config_class:
return ext_cfg_class.deserialize(data, **kwargs)
raise InvalidParameter(f"Cannot deserialize {config_module}.{config_class}") | eff887d4e676935742b8169c62a9a581b5f239ce | 3,654,515 |
import numpy
def pmat06(date1, date2):
"""
Wrapper for ERFA function ``eraPmat06``.
Parameters
----------
date1 : double array
date2 : double array
Returns
-------
rbp : double array
Notes
-----
The ERFA documentation is below.
- - - - - - - - - -
e r a P m a t 0 6
- - - - - - - - - -
Precession matrix (including frame bias) from GCRS to a specified
date, IAU 2006 model.
Given:
date1,date2 double TT as a 2-part Julian Date (Note 1)
Returned:
rbp double[3][3] bias-precession matrix (Note 2)
Notes:
1) The TT date date1+date2 is a Julian Date, apportioned in any
convenient way between the two arguments. For example,
JD(TT)=2450123.7 could be expressed in any of these ways,
among others:
date1 date2
2450123.7 0.0 (JD method)
2451545.0 -1421.3 (J2000 method)
2400000.5 50123.2 (MJD method)
2450123.5 0.2 (date & time method)
The JD method is the most natural and convenient to use in
cases where the loss of several decimal digits of resolution
is acceptable. The J2000 method is best matched to the way
the argument is handled internally and will deliver the
optimum resolution. The MJD method and the date & time methods
are both good compromises between resolution and convenience.
2) The matrix operates in the sense V(date) = rbp * V(GCRS), where
the p-vector V(GCRS) is with respect to the Geocentric Celestial
Reference System (IAU, 2000) and the p-vector V(date) is with
respect to the mean equatorial triad of the given date.
Called:
eraPfw06 bias-precession F-W angles, IAU 2006
eraFw2m F-W angles to r-matrix
References:
Capitaine, N. & Wallace, P.T., 2006, Astron.Astrophys. 450, 855
Wallace, P.T. & Capitaine, N., 2006, Astron.Astrophys. 459, 981
Copyright (C) 2013-2017, NumFOCUS Foundation.
Derived, with permission, from the SOFA library. See notes at end of file.
"""
(date1, date2,), rbp = arrayify_inputs_and_create_d3_fix(
[date1, date2], core_dims=[0, 0], out_core_shape=(3, 3), out_dtype=numpy.double)
rbp = ufunc.pmat06(date1, date2, rbp)
return rbp | 69b38637701d804ca83733d7f55fca1fd57a5b72 | 3,654,516 |
def _splitData(data):
"""Takes either a cursor or result set and returns result set and list of columns."""
if hasattr(data, 'fetchall'):
rows = data.fetchall()
cols = data.columns()
elif isinstance(data, list):
rows = data
if hasattr(rows[0], '_fields'):
cols = rows[0]._fields
elif hasattr(rows[0], 'keys'):
cols = list(rows[0].keys())
else:
raise TypeError('Can not determine the list of columns from the result set.')
return (rows, cols) | 9953be08f29fb457782e5401c3dfded8f780924b | 3,654,517 |
import multiprocessing
def get_cpu_count():
"""
Try and estimate the number of CPU on the host. First using multiprocessing
native function, other using content of /proc/cpuinfo. If none of those
methods did work, 4 is returned.
"""
try:
cpucount = multiprocessing.cpu_count()
except:
try:
s = open("/proc/cpuinfo").read()
cpucount = int(s.split('processor')[-1].split(":")[1].split("\n")[0])
cpucount += 1
except:
cpucount = 4
return cpucount | db58112537c4a111ec1ef24eeab70227678d6d1e | 3,654,518 |
def get_relation_data(collection, relation_paths):
"""Prepare relations for usage inside extend_relations."""
out = []
for path in relation_paths:
promote = path.get("promote", False)
numpy_path = []
for step in path["steps"]:
if isinstance(step, str):
step_name, max_usage = step, 1
else:
step_name, max_usage = step
relation = collection.relation(step_name)
numpy_path.append((relation.offsets, relation.values, max_usage))
inv_relation = collection.get_inverted_relation(step_name).edges() > 0
out.append((numpy_path, inv_relation, promote))
return out | 8b4cd9145995aee5e3c9b880073dfd10320b24e5 | 3,654,519 |
def generate_paddle_quads():
"""
This function builds a matrix of paddles, each row in the matrix
represents the paddle skin (four colors) and each column represents
the size.
"""
paddle_base_width = 32
paddle_height = 16
x = 0
y = paddle_height * 4
spritesheet = []
for _ in range(4):
spritesheet.append([
# The smallest paddle is in (0, y) and its dimensions are 32x16.
pygame.Rect(x, y, paddle_base_width, paddle_height),
# The next paddle is in (32, y) and its dimensions are 64x16.
pygame.Rect(
x + paddle_base_width, y,
paddle_base_width * 2, paddle_height
),
# The next paddle is in (96, y) and its dimensions are 96x16.
pygame.Rect(
x + paddle_base_width * 3, y,
paddle_base_width * 3, paddle_height
),
# The largest paddle is in (0, y + 16)
# and its dimensions are 128x16.
pygame.Rect(
x, y + paddle_height,
paddle_base_width * 4, paddle_height
)
])
# To go to the next color, increment y by 32.
y += paddle_height * 2
return spritesheet | e82259d5e203257574c5ae91ad4a5c3a625e5b5a | 3,654,520 |
def cut(img):
"""
Applies central horizontal threshold in Fourier spectrum
"""
# Apply fourier transform and shift
img_fft = fftn(img)
img_fft_shift = fftshift(img_fft)
# Print spectrum before
plt.imshow(np.abs(img_fft_shift), cmap='gray', norm=LogNorm(vmin=5))
plt.show()
# Filter image: remove upper and lower horizontal thirds (1/3)
img_fft_shift_filtered = np.copy(img_fft_shift)
for x in range(img.shape[0]):
for y in range(img.shape[1]):
if((x < img.shape[0]//2 - img.shape[0]//30 or \
x > img.shape[0]//2 + img.shape[0]//30) and\
(y < img.shape[1]//2 - img.shape[1]//30 or \
y > img.shape[1]//2 + img.shape[1]//30)):
img_fft_shift_filtered[x,y] = 0
if((x < img.shape[0]//3 or \
x > img.shape[0]*2//3) or \
(y < img.shape[1]//3 or \
y > img.shape[1]*2//3)):
img_fft_shift_filtered[x, y] = 0
# Print spectrum after
plt.imshow(np.abs(img_fft_shift_filtered), cmap='gray', norm=LogNorm(vmin=5))
plt.show()
# Return to space domain result image using inverse
return np.abs(ifftn(fftshift(img_fft_shift_filtered))) | 74ce6db709aaa91fec2321dc6cc70fc6d5a8c552 | 3,654,521 |
def csrmm2(m, n, k, descrA, csrValA, csrRowPtrA, csrColIndA, B, handle=None,
C=None, nnz=None, transA=CUSPARSE_OPERATION_NON_TRANSPOSE,
transB=CUSPARSE_OPERATION_NON_TRANSPOSE, alpha=1.0, beta=0.0,
ldb=None, ldc=None, check_inputs=True):
""" multiply two sparse matrices: C = transA(A) * transB(B)
higher level wrapper to cusparse<t>csrmm2 routines.
"""
if check_inputs:
for item in [csrValA, csrRowPtrA, csrColIndA, B]:
if not isinstance(item, pycuda.gpuarray.GPUArray):
raise ValueError("csr*, B, must be pyCUDA gpuarrays")
if C is not None:
if not isinstance(C, pycuda.gpuarray.GPUArray):
raise ValueError("C must be a pyCUDA gpuarray or None")
# dense matrices must be in column-major order
if not B.flags.f_contiguous:
raise ValueError("Dense matrix B must be column-major order")
if transB == CUSPARSE_OPERATION_CONJUGATE_TRANSPOSE:
raise ValueError("Conjugate transpose operation not supported "
"for dense matrix B")
if (transB == CUSPARSE_OPERATION_TRANSPOSE) and \
(transA != CUSPARSE_OPERATION_NON_TRANSPOSE):
raise ValueError("if B is transposed, only A non-transpose is "
"supported")
if handle is None:
handle = misc._global_cusparse_handle
dtype = csrValA.dtype
if C is None:
if transA == CUSPARSE_OPERATION_NON_TRANSPOSE:
ldc = m
else:
ldc = k
alloc = misc._global_cusparse_allocator
C = gpuarray.zeros((ldc, n), dtype=dtype, order='F',
allocator=alloc)
elif not C.flags.f_contiguous:
raise ValueError("Dense matrix C must be in column-major order")
if nnz is None:
nnz = csrValA.size
if ldb is None:
ldb = B.shape[0]
if ldc is None:
ldc = C.shape[0]
# perform some basic sanity checks
if check_inputs:
if csrValA.size != nnz:
raise ValueError("length of csrValA array must match nnz")
if (B.dtype != dtype) or (C.dtype != dtype):
raise ValueError("A, B, C must share a common dtype")
if ldb < B.shape[0]:
raise ValueError("ldb invalid for matrix B")
if transA == CUSPARSE_OPERATION_NON_TRANSPOSE:
ldOpA = m # leading dimension for op(A)
tdOpA = k # trailing dimension for op(A)
else:
ldOpA = k
tdOpA = m
if transB == CUSPARSE_OPERATION_NON_TRANSPOSE:
if B.shape[1] != n:
raise ValueError("B, n incompatible")
if (ldb < tdOpA):
raise ValueError("size of A incompatible with B")
else:
if ldb < n:
raise ValueError("B, n incompatible")
if (B.shape[1] != tdOpA):
raise ValueError("size of A incompatible with B")
if (C.shape[1] != n):
raise ValueError("bad shape for C")
if (ldc != ldOpA):
raise ValueError("size of A incompatible with C")
if csrRowPtrA.size != m+1:
raise ValueError("length of csrRowPtrA invalid")
if dtype == np.float32:
fn = cusparseScsrmm2
elif dtype == np.float64:
fn = cusparseDcsrmm2
elif dtype == np.complex64:
fn = cusparseCcsrmm2
elif dtype == np.complex128:
fn = cusparseZcsrmm2
else:
raise ValueError("unsupported sparse matrix dtype: %s" % dtype)
transa = transA
transb = transB
try:
fn(handle, transa, transb, m, n, k, nnz, alpha, descrA, csrValA,
csrRowPtrA, csrColIndA, B, ldb, beta, C, ldc)
except CUSPARSE_STATUS_INVALID_VALUE as e:
print("m={}, n={}, k={}, nnz={}, ldb={}, ldc={}".format(
m, n, k, nnz, ldb, ldc))
raise(e)
return C | fffbecab90dfb831a4429aed759c0218b065aa4d | 3,654,522 |
def is_translated(path):
""" Checks if all files in the translation has at least one translation.
Arguments:
path (str): path to po-file
Returns: True if all files in translation has at least one translation,
otherwise False.
"""
po = polib.pofile(path)
files = []
for e in po:
files += [f[0] for f in e.occurrences]
all_files = sorted(set(files))
translated_entities = [e for e in po if e.translated()]
files = []
for e in translated_entities:
files += [f[0] for f in e.occurrences]
translated_files = sorted(set(files))
return translated_files == all_files | eeacbbc8ff068684e56d79e1aaa65d564b2e33ec | 3,654,523 |
def pylm_component(name):
"""Decorator for registering a class to lightmetrica"""
def pylm_component_(object):
# Get base class
base = object.__bases__[0]
base.reg(object, name)
return object
return pylm_component_ | 531c7e3f224b824b438011d4be348a76154b3444 | 3,654,524 |
import torch
def dice_score(input_mask, target_mask, eps=1e-5):
"""
input mask: (B * K, HW) #probabilities [0, 1]
target_mask: (B * K, HW) #binary
"""
dims = tuple(range(1, input_mask.ndimension()))
intersections = torch.sum(input_mask * target_mask, dims) #(B, N)
cardinalities = torch.sum(input_mask + target_mask, dims)
dice = ((2. * intersections + eps) / (cardinalities + eps))
return dice | 8fbe4b7aaec4a45d7dec4705e4c3feb348250b64 | 3,654,525 |
def append_write(filename="", text=""):
"""
appends a string at the end of a text file (UTF8)
and returns the number of characters added
"""
with open(filename, "a", encoding="utf-8") as f:
f.write(text)
return len(text) | 6767f61b6624b82d732e7277507d03c3f4daf04a | 3,654,526 |
import torch
def psnr(img1, img2):
"""
compute PSNR between two images
"""
MSE = torch.mean((img1-img2)**2)
return 10*torch.log10(1**2/MSE) | f216733631d224aa27f5c5a395c143c3768f8f28 | 3,654,527 |
def is_scalar(dims):
"""
Returns True if a dims specification is effectively
a scalar (has dimension 1).
"""
return np.prod(flatten(dims)) == 1 | d2f2f1a1f2dd66ec01d9e653315d37b4ee4990e1 | 3,654,528 |
def applyMinv(obj, inputs, shape_cache):
"""Simple wrapper around a component's applyMinv where we can reshape the
arrays for each input and expand any needed array elements into full arrays.
"""
inputkeys = sorted(inputs.keys())
for key in inputkeys:
pre_process_dicts(obj, key, inputs, shape_cache)
pre_inputs = inputs.copy()
inputs = obj.applyMinv(pre_inputs, inputs)
# Result vector needs to be flattened.
for key in reversed(inputkeys):
post_process_dicts(key, inputs)
# Clean out any leftover keys we added
for key in inputs.keys():
if key not in inputkeys:
inputs.pop(key)
return inputs | 9fd805408bea659f26eec93b430e450ea9228145 | 3,654,529 |
import os
def get_para_input(arg):
"""Get input directory parameter"""
input_dir = os.path.abspath(arg)
if str(input_dir).endswith('/'):
input_dir = input_dir[:-1]
input_dir = input_dir.replace('\\', '/')
return input_dir | 13ad4d14cac7c4b77e40d0e264e5197b2fbb459b | 3,654,530 |
import json
import requests
import time
def get_county_data():
"""Get the raw data from coronavirus-tracker-api.herokuapp.com."""
url = ('https://coronavirus-tracker-api.herokuapp.com/v2/locations?source=csbs')
raw_data = None
while raw_data is None:
try:
raw_data = json.loads(requests.request('GET', url, verify=False).text)
except:
print('API Get for county-data failed.')
pass
time.sleep(5) # If HTTP Request fails, wait 5s and try again.
return raw_data | 33404a65e6242b7416304f7194dc2a5c7f073d5d | 3,654,531 |
def r2lm(measured_y, estimated_y):
"""
r^2 based on the latest measured y-values (r2lm)
Calculate r^2 based on the latest measured y-values. Measured_y and estimated_y must be vectors.
Parameters
----------
measured_y: numpy.array or pandas.DataFrame
estimated_y: numpy.array or pandas.DataFrame
Returns
-------
r2lm : float
r^2 based on the latest measured y-values
"""
measured_y = np.array(measured_y).flatten()
estimated_y = np.array(estimated_y).flatten()
return float(1 - sum((measured_y - estimated_y) ** 2) / sum((measured_y[1:] - measured_y[:-1]) ** 2)) | f75c89ca3f99659a3e2e12555a3968745fad1007 | 3,654,532 |
def G_to_NX_sparse(X, Y):
"""convert sparse adj matrix to NetworkX Graph"""
Gs = []
N = len(Y)
for n in range(N):
x = X[n]
G = nx.DiGraph()
for i,j,w in x:
G.add_edge(i,j, weight=w)
Gs.append(G)
return Gs, Y | 8113ede05a0015119cceaa9c817b8bf3d46003c0 | 3,654,533 |
def pmf(k, n, a, b, loc=0):
"""JAX implementation of scipy.stats.betabinom.pmf."""
return lax.exp(logpmf(k, n, a, b, loc)) | efba7202231dde7d0dec1e56df7a52dccf7135a0 | 3,654,534 |
def discrete_bottleneck(x,
hidden_size,
z_size,
filter_size,
name,
mode=None,
startup_steps=50000,
bottleneck_kind='dvq',
num_blocks=2,
reshape_method='slice',
projection_tensors=None,
means=None,
beta=0.25,
noise_dev=1.,
decay=0.999,
discrete_mix=0.5,
random_top_k=1,
soft_em=False,
inv_temp=1.0,
epsilon=1e-5,
softmax_k=0,
kl_warmup_steps=150000,
ema=True,
ema_count=None,
ema_means=None,
summary=True,
dp_strength=1.0,
dp_decay=1.0,
dp_alpha=0.5,
slo=False,
slo_alpha=10,
slo_beta=0.5,
c_logits=None):
"""Discretization bottleneck for latent variables.
Args:
x: Input to the discretization bottleneck.
hidden_size: Dimension of the latent state.
z_size: Number of bits used to produce discrete code; discrete codes range
from 1 to 2**z_size.
filter_size: Filter size to be used for the embedding function.
name: Name for the bottleneck scope.
mode: Mode represents whether we are training or testing for bottlenecks
that differ in behavior (Default: None).
startup_steps: Number of steps after which latent predictor is trained
(Default: 50000).
bottleneck_kind: Kind of discretization bottleneck to use; one of dvq,
semhash, gumbel-softmax (Default: dvq).
num_blocks: Number of blocks to use for decomposed vector quantization.
reshape_method: Method to reshape for DVQ (Default: slice).
projection_tensors: If the reshape method is project, then these are the
tensors used to project (Default: None).
means: The embedding table for dvq (Default: None).
beta: Beta factor for the DVQ loss (Default: 0.25).
noise_dev: Stddev for noise added for semhash (Default: 0).
decay: Decay factor for the exponential moving average (Default: 0.999).
discrete_mix: Factor for mixing discrete and non-discrete input for semhash
(Default: 0.5).
random_top_k: Noisy top-k for DVQ (Default: 1).
soft_em: If True then use soft EM rather than hard EM (Default: False).
inv_temp: Inverse temperature for soft EM (Default: 1.)
epsilon: Epsilon parameter for DVQ (Default: 1e-5).
softmax_k: If > 1 then do top-k softmax (Default: 0).
kl_warmup_steps: Number of steps for kl warmup (Default: 150000).
ema: If True update embeddings using exponential moving averages (Default:
True).
ema_count: Table of counts for each embedding corresponding to how many
examples in a batch it was the closest to (Default: None).
ema_means: Exponentially averaged version of the embeddings (Default: None).
summary: If True, then write summaries (Default: True).
dp_strength: Strength of Dirichlet Process loss prior (Default: 1.0).
dp_decay: Decay the dp_strength using an exponential decay using this
term (Default: 1.0).
dp_alpha: Alpha term (pseudo-count) in Dirichlet Process (Default: 0.5).
slo: Smoothed L0
slo_alpha: alpha for smoothed L0
slo_beta: beta for smoothed L0
c_logits: a [num_blocks, block_size] tensor of logits for
computing cluster probabilities.
Returns:
Embedding to pass to the decoder, discrete latent, loss, and the embedding
function.
Raises:
ValueError: If projection_tensors is None for reshape_method project, or
ema_count or ema_means is None if we are using ema, or unknown args.
"""
block_v_size = None
if bottleneck_kind == 'dvq':
# Define the dvq parameters
assert means is not None
# Check block dimensions add up
if hidden_size % num_blocks != 0:
raise ValueError('num_blocks does not divide hidden size')
if 2**z_size % num_blocks != 0:
raise ValueError('num_blocks does not divide embedding table size')
block_v_size = 2**(z_size / num_blocks)
block_v_size = int(block_v_size)
# Set the reshape method corresponding to projections or slices
if reshape_method == 'slice':
reshape_fn = partial(
slice_hidden, hidden_size=hidden_size, num_blocks=num_blocks)
elif reshape_method == 'project':
if projection_tensors is None:
raise ValueError(
'Projection tensors is None for reshape_method project')
reshape_fn = partial(
project_hidden,
projection_tensors=projection_tensors,
hidden_size=hidden_size,
num_blocks=num_blocks)
else:
raise ValueError('Unknown reshape_method')
# Check if the ema settings make sense
if ema:
if ema_count is None:
raise ValueError('ema_count is None but ema is True')
if ema_means is None:
raise ValueError('ema_means is None but ema is True')
with tf.variable_scope(name, reuse=tf.AUTO_REUSE):
l = tf.constant(0.0)
if bottleneck_kind == 'dense':
c = tf.layers.dense(x, z_size, name='vcc')
h1 = tf.layers.dense(c, filter_size, name='vch1')
elif bottleneck_kind == 'vae':
c, l, _, _ = vae(x, z_size, 'vae')
h1 = tf.layers.dense(c, filter_size, name='vch1')
elif bottleneck_kind == 'semhash':
c = tf.layers.dense(x, z_size, name='vcc')
y_clean = common_layers.saturating_sigmoid(c)
if summary:
tf.summary.histogram('y_clean', tf.reshape(y_clean, [-1]))
if noise_dev > 0 and mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.truncated_normal(
common_layers.shape_list(c), mean=0.0, stddev=noise_dev)
y = common_layers.saturating_sigmoid(c + noise)
else:
y = y_clean
d = tf.to_float(tf.less(0.5, y))
y_discrete = tf.stop_gradient(d) + y - tf.stop_gradient(y)
pd = common_layers.inverse_exp_decay(startup_steps * 2)
pd *= discrete_mix
pd = pd if mode == tf.estimator.ModeKeys.TRAIN else 1.0
c = tf.where(
tf.less(tf.random_uniform([common_layers.shape_list(y)[0]]), pd),
y_discrete, y)
h1a = tf.layers.dense(c, filter_size, name='vch1a')
h1b = tf.layers.dense(1.0 - c, filter_size, name='vch1b')
h1 = h1a + h1b
dx = tf.to_int32(tf.stop_gradient(d))
c = bit_to_int(dx, z_size)
elif bottleneck_kind == 'gumbel-softmax':
_, hot, l = gumbel_softmax(x, name, z_size, mode, softmax_k,
kl_warmup_steps, summary)
c = tf.argmax(hot, axis=-1)
h1 = tf.layers.dense(hot, hidden_size, name='dae_dense')
elif bottleneck_kind == 'dvq':
c_probs = None
if c_logits is not None:
c_probs = tf.nn.softmax(c_logits, axis=-1)
x_reshaped = reshape_fn(x)
x_means_hot, x_means, q_loss, e_loss = embedding_lookup(
x_reshaped, means, num_blocks, block_v_size, random_top_k, soft_em,
inv_temp, ema_count, c_probs)
# Get the discrete latent represenation
x_means_idx = tf.argmax(x_means_hot, axis=-1)
# Get the binary representation
x_means_bits = int_to_bit(
x_means_idx, num_bits=int(z_size / num_blocks), base=2)
shape = common_layers.shape_list(x_means_bits)
new_shape = shape[:-1]
new_shape[-1] = z_size
x_means_bits = tf.reshape(x_means_bits, shape=new_shape)
c = bit_to_int(tf.to_int32(x_means_bits), num_bits=z_size, base=2)
# Adjust shape of c
shape_x = common_layers.shape_list(x)
new_shape = shape_x[:-1]
c = tf.reshape(c, new_shape)
# Update the ema variables
if ema:
tf.logging.info('Using EMA with beta = {}'.format(beta))
updated_ema_count = moving_averages.assign_moving_average(
ema_count,
tf.reduce_sum(
tf.reshape(x_means_hot, shape=[-1, num_blocks, block_v_size]),
axis=0),
decay,
zero_debias=False)
# Adding a term that puts a Dirichlet prior over cluster probabilities
# Hopefully it'll encourage rich get richer behaviors
dp_prior_loss = 0.
slo_loss = 0.
if dp_strength > 0.0:
# Decay dp_strength over time to make it less important
dp_strength = tf.train.exponential_decay(
dp_strength,
global_step=tf.to_int32(tf.train.get_global_step()),
decay_steps=20000,
decay_rate=dp_decay)
dp_count = ema_count + dp_alpha
p = dp_count / tf.reduce_sum(dp_count, 1, keepdims=True)
dp_prior_loss = tf.log(p)
dp_prior_loss = -1.0 * tf.reduce_sum(dp_prior_loss)
dp_prior_loss /= (num_blocks * block_v_size)
# if using smoothed L0
if slo:
# expected log likelihood
ell = tf.reduce_sum(ema_count * tf.log(c_probs))
# the prior component in the loss for MAP EM.
slo_prior = slo_alpha * tf.reduce_sum(tf.exp(-1.*c_probs/slo_beta))
slo_loss = -1. * (ell + slo_prior)/(num_blocks * block_v_size)
x_means_hot_flat = tf.reshape(
x_means_hot, shape=[-1, num_blocks, block_v_size])
dw = tf.matmul(
tf.transpose(x_means_hot_flat, perm=[1, 2, 0]),
tf.transpose(x_reshaped, perm=[1, 0, 2]))
updated_ema_means = moving_averages.assign_moving_average(
ema_means, dw, decay, zero_debias=False)
n = tf.reduce_sum(updated_ema_count, axis=-1, keep_dims=True)
updated_ema_count = ((updated_ema_count + epsilon) /
(n + 2**z_size * epsilon) * n)
updated_ema_means /= tf.expand_dims(updated_ema_count, axis=-1)
with tf.control_dependencies([e_loss]):
update_means = tf.assign(means, updated_ema_means)
with tf.control_dependencies([update_means]):
l = beta * e_loss + dp_strength * dp_prior_loss + slo_loss
else:
l = q_loss + beta * e_loss
x_means = tf.reshape(x_means, shape_x)
x_reshaped = tf.reshape(x_reshaped, shape_x)
h1 = x_reshaped + tf.stop_gradient(x_means - x_reshaped)
else:
raise ValueError('Unknown discretization method.')
h2 = tf.layers.dense(tf.nn.relu(h1), filter_size, name='vch2')
res = tf.layers.dense(tf.nn.relu(h2), hidden_size, name='vcfin')
embed_fn = partial(
embed,
hidden_size=hidden_size,
z_size=z_size,
filter_size=filter_size,
name=name,
bottleneck_kind=bottleneck_kind,
num_blocks=num_blocks,
block_v_size=block_v_size,
means=means)
return res, c, l, embed_fn | ec1576b2b6a19a03995ec6dfb9a67592b925a28c | 3,654,535 |
def binarize_categorical(x, ids):
""" replace categorical feature with multiple binary ones """
x_ = np.zeros((x.shape[0], 1))
for idx in ids:
x_ = np.hstack((x_, binarize_categorical_feature(x[:, idx:idx+1])))
x = np.delete(x, ids, axis=1)
x = np.hstack((x, x_[:, 1:]))
return x | 625b551b437297c6a0c48f5ebfe2796c3be84c89 | 3,654,536 |
def import_json_dataset(fileset):
"""Returns a list of imported raw JSON data for every file in the fileset.
"""
d = []
for f in fileset:
d.append(import_json_data(f))
return d | 043720f9400cf2734598f6fe476077e004b8ef69 | 3,654,537 |
import math
def angle_difference(angle1, angle2):
"""
Calculates the difference between the given angles in clockwise direction as radians.
:param angle1: float
:param angle2: float
:return: float; between 0 and 2*Pi
"""
if (angle1 > 0 and angle2 >= 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif (angle1 >= 0 and angle2 > 0) and angle1 < angle2:
return 2 * math.pi + angle1 - angle2
elif (angle1 < 0 and angle2 <= 0) and angle1 < angle2:
return 2 * math.pi + angle1 + abs(angle2)
elif (angle1 <= 0 and angle2 < 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif angle1 <= 0 < angle2:
return 2 * math.pi + angle1 - angle2
elif angle1 >= 0 >= angle2:
return angle1 + abs(angle2)
else:
return 0 | 377d1915e58a96b7f1526dceb31febf45c90567b | 3,654,538 |
def merge_nd(nd_cdp, nd_lldp):
""" Merge CDP and LLDP data into one structure """
neis = dict()
nd = list()
for n in nd_lldp:
neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])] = n
for n in nd_cdp:
# Always prefer CDP, but grab description from LLDP if available
if (n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int']) in n:
if 'description' in neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])]:
n['description'] = neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])]['description']
neis[(n['local_device_id'], n['remote_device_id'], n['local_int'], n['remote_int'])] = n
for n in neis:
nd.append(neis[n])
return nd | 90d55ffdabb6c28198ee4c59bc36fdcb6fa54e62 | 3,654,539 |
def combine_divisions(division):
"""Return the new pattern after the rules have been applied to every division"""
size = int(sqrt(len(division)))
matrix = []
for r in xrange(size):
matrix.append([])
for c in xrange(r * size, (r + 1) * size):
matrix[len(matrix) - 1].append(division[c])
return np.array((np.bmat(matrix))) | a112449421603a227e4ee470330aa1a1ece47762 | 3,654,540 |
def is_repair(expr):
"""
判断赋值表达式是否出现过滤函数,如果已经过滤,停止污点回溯,判定漏洞已修复
:param expr: 赋值表达式
:return:
"""
is_re = False # 是否修复,默认值是未修复
global is_repair_functions
if expr in is_repair_functions:
logger.debug("[AST] function {} in is_repair_functions, The vulnerability does not exist ".format(expr))
is_re = True
return is_re | 4184cbedaa006b75d5f6171a5201f218f852820c | 3,654,541 |
import string
def modified_greedy(sentences,
tokenized,
model,
stopwords,
original_indices,
sent_representations,
objective_function,
min_sentence_length):
"""Implementation of the MMR summarizer as described in Lin & Bilmes (2010)."""
# Initialize stuff
# Ground set indices: all indices, stays constant throughout the function
all_indices = tuple(range(len(original_indices)))
# Candidate indices: all candidates (gets smaller every iteration)
candidate_indices = list(range(len(original_indices)))
# Summary indices: indices of represented sentences added to summary
summary_indices = []
# Scaling factor (r) is taken from original paper: r = 0.3
scaling_factor = .3
# Tf-idf clustering, as described in Lin & Bilmes (2011)
n_clusters = len(original_indices) // 5
k_means = KMeans(n_clusters=n_clusters, random_state=42)
clustering = k_means.fit_predict(sent_representations)
clustered_indices = [np.array(all_indices)[np.where(clustering == i)].tolist()
for i in range(n_clusters)]
# Make document vector (since w2v sentences are now sums, it is this easy):
document_vector = np.sum(sent_representations, axis=0)
# Pick the right sentences from sentence list (to match representation matrix)
sentences = [sentences[i] for i in original_indices]
tokenized = [tokenized[i] for i in original_indices]
# Construct bag of words from representable sentences
preprocessed = (sentence.lower().split(' ')
for i, sentence in enumerate(tokenized))
# POS-tag filtering, and punctuation removal
preprocessed = [[word.translate(str.maketrans('', '', string.punctuation))
for word in sentence] for sentence in preprocessed]
# Remove OOV words
sentence_words = [[word for word in sentence if word in model.model.vocab]
for sentence in preprocessed]
# Deduplicate & flatten
bag_of_words = list(set([word for sentence in sentence_words for word in sentence]))
# Look up in-vocabulary word vectors
vectorized = [(word, model.model[word]) for word in bag_of_words]
# Construct word similarity matrix for all words in article object
names, vectors = zip(*vectorized)
# word_distance_matrix = pairwise_distances(vectors, metric='euclidean')
word_distance_matrix = pairwise_distances(vectors, metric='cosine')
# Pandas workaround
name_index_tuples = list(zip(list(range(len(names))), names))
# Fill diagonal with nan, to make sure it's never the minimum
np.fill_diagonal(word_distance_matrix, np.nan)
# Compute sentence similarity matrix based on sentence representations
distance_matrix = pairwise_distances(sent_representations, metric='cosine')
similarity_matrix = np.subtract(1, distance_matrix)
np.fill_diagonal(similarity_matrix, np.nan)
# Compute sentence lengths
sentence_lengths = [len(s.split()) for s in sentences]
length_scaler = np.power(sentence_lengths, scaling_factor).tolist()
# Remove sentences that do not have similarity with other sentences from candidate set
similarity_sum_per_sentence = np.nansum(similarity_matrix, axis=0)
irrelevant_indices = np.where(similarity_sum_per_sentence == 0)[0].tolist()
candidate_indices = [index for index in candidate_indices
if index not in irrelevant_indices]
# Already save the best singleton summary, for comparison to iterative result later
singleton_scores = [objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
[i])
if sentence_lengths[i] <= 100
else np.nan for i in candidate_indices]
best_singleton_score = np.nanmax(singleton_scores)
# Note that the singleton index is directly translated to a sentence representation index
best_singleton_index = candidate_indices[np.nanargmax(singleton_scores)]
# Greedily add sentences to summary
summary_length = 0
for iteration in range(len(sentence_lengths)):
print("Iteration {}".format(iteration))
# Edge case: value of objective function when summary is empty.
if iteration == 0:
current_score = 0.
else:
current_score = objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
summary_indices)
# Compute all relevant new scores
new_scores = [objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
summary_indices+[i])
if sentence_lengths[i] > min_sentence_length
else np.nan
for i in candidate_indices]
# If there are no candidates left, break the loop
if all(np.isnan(score) for score in new_scores):
break
# Remove non-candidate elements from length scaler to fit arrays
current_length_scaler = [v for i, v in enumerate(length_scaler) if i in candidate_indices]
added_values = np.divide(np.subtract(new_scores, current_score), current_length_scaler)
best_index = np.nanargmax(added_values)
# Pass best index if the sentence does not increase MMR-score (+ empty summary edge case)
if not new_scores[best_index] - current_score >= 0 and summary_indices:
candidate_indices.pop(best_index)
else:
summary_indices.append(candidate_indices[best_index])
summary_length += sentence_lengths[candidate_indices[best_index]]
candidate_indices.pop(best_index)
if summary_length >= 100:
break
# Last step: compare singleton score with summary score, and pick best as summary
final_summary_score = objective_function(similarity_matrix,
sent_representations,
name_index_tuples,
sentence_words,
word_distance_matrix,
document_vector,
clustered_indices,
all_indices,
summary_indices)
if best_singleton_score >= final_summary_score:
ranked_sentences = [sentences[i] for i in [best_singleton_index]]
ranking = list(zip([best_singleton_index], ranked_sentences))
else:
ranked_sentences = [sentences[i] for i in summary_indices]
ranking = list(zip(summary_indices, ranked_sentences))
# Replace filtered indices with original ones
ranking = [(original_indices[i], s) for i, s in ranking]
return ranking | b542c025fe870e1e7d41d33349de10a395a17eb3 | 3,654,542 |
def noiseless(rho, unitary):
"""Returns the noiseless predictions."""
rhotilde = unitary @ rho @ unitary.conj().T
elt = rhotilde[0, 0]
if elt >= 0.49999999:
return 0, elt
return 1, elt | bfa265046361b159e7d264aa8312b75cd7a0df3f | 3,654,543 |
def __get_service_info_from_thrift(root_path, idl_service, need_test_methods):
"""从指定IDL_Service和request_config配置表中,获取测试方法和Request的映射表"""
customized_request_config = yaml.load(
open(os.path.join(root_path, 'test_red', 'request_config.yaml')))
method_request = collections.OrderedDict()
idl_method_request = dict([
(name[:-5], obj) for name, obj in inspect.getmembers(idl_service)
if inspect.isclass(obj) and '_args' in name])
for method_name in need_test_methods:
if customized_request_config and method_name in customized_request_config:
method_request[method_name] = customized_request_config[method_name]
elif method_name in idl_method_request:
try:
request_obj_name = idl_method_request[method_name].thrift_spec[2][3][0].__name__
method_request[method_name] = {__to_underscore(request_obj_name): request_obj_name}
except Exception:
print 'invalid method name: ' + method_name
return method_request | 0b736bb6b5411904bc28f887e6596c1242c324c9 | 3,654,544 |
def energy_calc(p, t):
"""
Calculates energy from power and time using the formula:
energy = power * time
Parameters
----------
p: Int or float
The power value of the equation.
t: Int or float
The time value of the equation (seconds).
Returns
-------
Int
p * t
Raises
------
ValueError
If p or t is not an integer or float.
Examples
--------
>>> school_algorithms.energy_calc(5, 2)
10
"""
_if_not_int_or_float_raise(p, t)
return p * t | 7df3180fdb56989e62a69305763455edbfa44ebc | 3,654,545 |
import base64
import uuid
import os
def copy_data_to_device(device, data, destination, filename=None):
""" Copies data into a device and creates a file to store that data.
Args:
data ('str'): The data to be copied
destination ('str'): Folder of where to store file
filename ('str'): Name of the file created. If left none then a
random name will be generated
Raise:
Exception: Permission Denied, File Creation Failed
Returns:
Path (str): path of created file
"""
try:
device.execute('ls {}'.format(destination))
except Exception:
raise FileNotFoundError("Directory '{}' does not exist.".format(
destination))
# Data must end in new line
if len(data) > 0 and not data[-1] == "\n":
data += "\n"
# Transforms text data into base64 string
encoded = base64.b64encode(bytes(data, "utf-8")).decode("utf-8")
if filename is None:
id = uuid.uuid4().hex
filename = os.path.join(destination, id)
else:
filename = os.path.join(destination, filename)
# Decode base 64 data into file
device.execute("DATA=\"{}\"".format(encoded))
device_out = device.execute("echo $DATA | base64 -d > {}".format(filename))
if 'Permission denied' in device_out:
raise Exception("Permission denied while trying to create file. " + \
"Make sure {} has the correct permissions!".format(filename))
# Verify file has been successfully created
try:
device.execute("ls {}".format(filename))
except Exception:
raise Exception("Creating of file {} has failed. No file created."
.format(filename))
if int(device.execute('stat {} --printf="%s\\n"'.format(filename))) == 0:
raise Exception("Creating of file {} has failed. Created file has no content"
.format(filename))
return filename | 73fad19637363a31c19e55c59e42479f2b9b0c84 | 3,654,546 |
import logging
def api_images_list_json(version):
"""
Return Docker Image listing https://docs.docker.com/engine/api/v1.41/#tag/Image
:param version: Docker API version
:return: string of fake images associated with honeypot.
"""
logging.info("images-list - %s, %s, %s, %s, %s" % (
version, request.remote_addr, request.user_agent, request.data, request.url))
req_objs = util.save_request_obj(request)
customLog.info(req_objs)
return API_RESP_IMAGES_JSON_LIST | 083911840c02ddc79af5ed457c42a29a19f1c57f | 3,654,547 |
def _handle_eval_return(self, result, col, as_pyranges, subset):
"""Handle return from eval.
If col is set, add/update cols. If subset is True, use return series to subset PyRanges.
Otherwise return PyRanges or dict of data."""
if as_pyranges:
if not result:
return pr.PyRanges()
first_hit = list(result.values())[0]
if isinstance(first_hit, pd.Series):
if first_hit.dtype == bool and subset:
return self[result]
elif col:
self.__setattr__(col, result)
return self
else:
raise Exception(
"Cannot return PyRanges when function returns a Series! Use as_pyranges=False."
)
return pr.PyRanges(result)
else:
return result | 84698bcb3b1f1e961ac7f3c4e347d65ce0790066 | 3,654,548 |
def compute_sigma0_sparse(V, dX, W_sensors, W_points, W_observations, column_dict):
"""
Computes the resulting standard deviation of the residuals for the current state of the bundle network.
Parameters
----------
V : ndarray
An array of residuals of the difference between registered measure
and back projected ground points in image space.
dX : ndarray
The array of parameter updates ordered according to column_dict
W_sensors : scipy.sparse.matrix
The sensor weight matrix
W_points : dict
Dictionary that maps point IDs to their weight matrices.
W_observations : ndarray
The observation weight matrix (i.e.: measure weights)
column_dict : dict
Dictionary that maps serial numbers and point IDs to index ranges in dX
Returns
-------
: float64
Standard deviation of the residuals
"""
num_image_parameters = W_sensors.shape[0]
num_observations = W_observations.shape[0]
VTPV = V.dot(W_observations).dot(V)
VTPV += dX[:num_image_parameters].dot(W_sensors.dot(dX[:num_image_parameters]))
for point_id, W_p in W_points.items():
point_update = dX[column_dict[point_id][0]:column_dict[point_id][1]]
VTPV += point_update.dot(W_p.dot(point_update))
dof = num_observations - num_image_parameters - 3 * len(W_points)
return np.sqrt(VTPV/dof) | 05606efe21d61f67539eae627caea976a532f85f | 3,654,549 |
from typing import Any
from sys import version
def version_callback() -> Any:
"""Print the version of the package."""
print(f"version: {version}")
return version | 987643727d133dc09163cebd6c4293f78b0b7f6a | 3,654,550 |
def fill(bitdef, value):
"""
Fill undefined bits with a value.
For example ``1..0100.1`` becomes ``111010011`` when filled with 1s.
Args:
bitdef (str): The bitdef to fill.
value (str): The value to fill with, "0" or "1".
Returns:
str: The filled bitdef.
"""
output = ""
for bit in bitdef:
if bit == ".":
output += value
else:
output += bit
return output | eef3ac59a2a7c4d1a25851a2ca14b3ffed6d1463 | 3,654,551 |
import requests
import json
def get_cman_info(state):
"""
Will take a list of congressmen and return the relevant attributes
:param congress_list: list of divs that contain congress data
:param state: state you are scraping
:return: list of relevant scraped attributes
"""
cman_attrs = []
abbrev = states[state]
r = requests.get(url_base + abbrev)
d = json.loads(r.text)
for cman in d['tweeters']:
_id = cman.get('_id', '')
dateOfBirth = cman.get('dateOfBirth', '')
email = cman.get('email', '')
facebookUserName = cman.get('facebookUserName', '')
fullName = cman.get('fullName', '')
gender = cman.get('gender', '')
address = cman['office'].get('address', '')
chamber = cman['office'].get('chamber', '')
country = cman['office'].get('country', '')
district = cman['office'].get('district', '')
leadershipRole = cman['office'].get('leadershipRole', '')
party = cman['office'].get('party', '')
state = cman['office'].get('state', '')
termEnd = cman['office'].get('termEnd', '')
termStart = cman['office'].get('termStart', '')
title = cman['office'].get('title', '')
phone = cman.get('phone', '')
profileImageSmall = cman.get('profileImageSmall', '')
slug = cman.get('slug', '')
followersCount = cman['twitterProfile'].get('followersCount', '')
friendsCount = cman['twitterProfile'].get('friendsCount', '')
idStr = cman['twitterProfile'].get('idStr', '')
name = cman['twitterProfile'].get('name', '')
profileImageUrl = cman['twitterProfile'].get('profileImageUrl', '')
screenName = cman['twitterProfile'].get('screenName', '')
statusesCount = cman['twitterProfile'].get('statusesCount', '')
url = cman['twitterProfile'].get('url', '')
verified = cman['twitterProfile'].get('verified', '')
twitterUserName = cman.get('twitterUserName', '')
website = cman.get('website', '')
cman_attrs.append(
[_id, dateOfBirth, email, facebookUserName, fullName, gender, address, chamber, country, district,
leadershipRole, party, state, termEnd, termStart, title, phone, profileImageSmall, slug, followersCount,
friendsCount, idStr, name, profileImageUrl, screenName, statusesCount, url, verified, twitterUserName,
website])
return cman_attrs | afe180c4bbd930cfbfe42e28a769d07f2c4378cd | 3,654,552 |
def concatenate_data(data, field='normalized_data'):
"""
Concatenate trial data in a list of dictionaries
:param data: nested dict, contains all trial infos
:param field: str, dict key in info dict in general data structure
:return:
"""
time_series = np.concatenate([info[field] for info in data],
axis=1)
assert time_series.shape[0] == 306
return time_series | 7f3dfb7aed2ffedf2124a9f57df0abf8491d1af6 | 3,654,553 |
def _find_weight_ops(op, graph, weights):
""" Find the vars come from operators with weight.
"""
pre_ops = graph.pre_ops(op)
for pre_op in pre_ops:
### if depthwise conv is one of elementwise's input,
### add it into this same search space
if _is_depthwise(pre_op):
for inp in pre_op.all_inputs():
if inp._var.persistable:
weights.append(inp._var.name)
if pre_op.type() in WEIGHT_OP and not _is_depthwise(pre_op):
for inp in pre_op.all_inputs():
if inp._var.persistable:
weights.append(inp._var.name)
return weights
return _find_weight_ops(pre_op, graph, weights)
return weights | 04e4a21079a3857815e39be3fe00e15aeac2f3b3 | 3,654,554 |
def get_GUI_presets_dict():
"""Return a dictionary of all of the available potential functions."""
preset_dict = {'cosine_potential': np.array([3.14, -6.28, 12.57, 0.01, 0,
0, 0, 0]).astype(str),
'two_gaussian_potential': np.array([2.67, -4, 4, 0.01,
0, 0, 0,
0]).astype(str),
'pv_2D_potential': np.array([1.5, 0, 3.0, 0.01, 0.6, -2.0,
2.0, 0.01]).astype(str),
'muller_brown_potential': np.array([0, 0, 0, 0, 0, 0, 0,
0]).astype(str),
'C_Cl_potential': np.array([0, 0, 0, 0, 0, 0, 0,
0]).astype(str)
}
return preset_dict | 0034ecdbde2f27e1b8db25a82231fca9bc79485c | 3,654,555 |
def _escapeEnds(original):
"""Comment, function end.
Escape comment end, because non-greedy becomes greedy in context. Example:
blockCommentNonGreedy = '(\s*/\*[\s\S]+?\*/\s*){0,1}?'
"""
original = _escapeWildCard(original)
commentEscaped = original \
.replace(commentEndEscape, commentEndEscapeEscape) \
.replace(commentEnd, commentEndEscape)
return _escapeFunctionEnd(commentEscaped) | 5a0df98f42d2df2b424cd6bfa7c533e0016557fe | 3,654,556 |
def handle_bad_request(error: BadRequest) -> Response:
"""Render the base 400 error page."""
rendered = render_template("base/400.html", error=error,
pagetitle="400 Bad Request")
response: Response = make_response(rendered)
response.status_code = status.BAD_REQUEST
return response | 70c6c835ef31839ff7b637443c414abbb549bcb0 | 3,654,557 |
import torch
def top_k_top_p_filtering(
logits: torch.FloatTensor,
top_k: int = 0,
top_p: float = 1.0,
filter_value: float = -float("Inf"),
min_tokens_to_keep: int = 1,
) -> torch.FloatTensor:
"""
Filter a distribution of logits using top-k and/or nucleus (top-p) filtering
Args:
logits: logits distribution shape (batch size, vocabulary size)
top_k (`int`, *optional*, defaults to 0):
If > 0, only keep the top k tokens with highest probability (top-k filtering)
top_p (`float`, *optional*, defaults to 1.0):
If < 1.0, only keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus
filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751)
min_tokens_to_keep (`int`, *optional*, defaults to 1):
Minimumber of tokens we keep per batch example in the output.
From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317
"""
if top_k > 0:
logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)(
None, logits
)
if 0 <= top_p <= 1.0:
logits = TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=min_tokens_to_keep)(None, logits)
return logits | 0c2f8392dcc6ada2afb1dc33575465e194a52199 | 3,654,558 |
def parseFimo(fimoFile, strand):
""" parse the fimo.txt file
Args:
the fimo.txt file
strand = single or double
Returns:
fimoDict: a dict between motif ID and a list of sequences it occurs in
"""
#dict to store for each motif list of seqs that it occurs in
fimoDict = {}
#read the fimo.txt file
with open(fimoFile, 'rb') as handler:
for line in handler:
line = line.strip()
#if re.search(r'#', line):
#continue
if re.search(r'stop', line):
continue
lineSplit = line.split()
motifName = lineSplit[0]
seqName = lineSplit[1]
start = int(lineSplit[2])
stop = int(lineSplit[3])
pval = float(lineSplit[6])
#check of this is on the negative strand hit
if strand == 'single' and inStrand == '-':
continue
if motifName not in fimoDict:
fimoDict[motifName] = []
if seqName not in fimoDict[motifName]:
fimoDict[motifName].append(seqName)
#check the motifs
print '\n\nfimo number of motifs:', len(fimoDict)
#for motifName, seqList in fimoDict.items():
#print motifName
#print '\t', seqList
return fimoDict | ea6e0765c474e367653571e9a88e6449fc947ff5 | 3,654,559 |
def pad_batch_dimension_for_multiple_chains(
observed_time_series, model, chain_batch_shape):
""""Expand the observed time series with extra batch dimension(s)."""
# Running with multiple chains introduces an extra batch dimension. In
# general we also need to pad the observed time series with a matching batch
# dimension.
#
# For example, suppose our model has batch shape [3, 4] and
# the observed time series has shape `concat([[5], [3, 4], [100])`,
# corresponding to `sample_shape`, `batch_shape`, and `num_timesteps`
# respectively. The model will produce distributions with batch shape
# `concat([chain_batch_shape, [3, 4]])`, so we pad `observed_time_series` to
# have matching shape `[5, 1, 3, 4, 100]`, where the added `1` dimension
# between the sample and batch shapes will broadcast to `chain_batch_shape`.
[ # Extract mask and guarantee `event_ndims=2`.
observed_time_series,
is_missing
] = canonicalize_observed_time_series_with_mask(observed_time_series)
event_ndims = 2 # event_shape = [num_timesteps, observation_size=1]
model_batch_ndims = (
model.batch_shape.ndims if model.batch_shape.ndims is not None else
tf.shape(model.batch_shape_tensor())[0])
# Compute ndims from chain_batch_shape.
chain_batch_shape = tf.convert_to_tensor(
value=chain_batch_shape, name='chain_batch_shape', dtype=tf.int32)
if not chain_batch_shape.shape.is_fully_defined():
raise ValueError('Batch shape must have static rank. (given: {})'.format(
chain_batch_shape))
if chain_batch_shape.shape.ndims == 0: # expand int `k` to `[k]`.
chain_batch_shape = chain_batch_shape[tf.newaxis]
chain_batch_ndims = tf.compat.dimension_value(chain_batch_shape.shape[0])
def do_padding(observed_time_series_tensor):
current_sample_shape = tf.shape(
observed_time_series_tensor)[:-(model_batch_ndims + event_ndims)]
current_batch_and_event_shape = tf.shape(
observed_time_series_tensor)[-(model_batch_ndims + event_ndims):]
return tf.reshape(
tensor=observed_time_series_tensor,
shape=tf.concat([
current_sample_shape,
tf.ones([chain_batch_ndims], dtype=tf.int32),
current_batch_and_event_shape], axis=0))
# Padding is only needed if the observed time series has sample shape.
observed_time_series = ps.cond(ps.rank(observed_time_series) >
model_batch_ndims + event_ndims,
lambda: do_padding(observed_time_series),
lambda: observed_time_series)
if is_missing is not None:
is_missing = ps.cond(ps.rank(is_missing) >
model_batch_ndims + event_ndims,
lambda: do_padding(is_missing),
lambda: is_missing)
return missing_values_util.MaskedTimeSeries(observed_time_series,
is_missing=is_missing) | ec072f3fa5318ee3f4c82dcc0d3697a5160b257f | 3,654,560 |
from typing import Union
import re
def get_bytes(size: Union[str, int]) -> int:
"""Converts string representation of bytes to a number of bytes.
If an integer is passed, it is returned as is (no conversion).
Args:
size (Union[str, int]): A string or integer representation of bytes to be converted.
(eg. "0.3 Gib", "3mb", "1024", 65536)
Returns:
int: A number of bytes represented by the input string or integer.
Exceptions:
ValueError: If the input string cannot be converted to an integer.
TypeError: If the input string is not a string or integer.
"""
if isinstance(size, int):
if size < 0:
raise ValueError("Negative size not allowed.")
return size
if not isinstance(size, str):
raise TypeError("Size must be a string or integer.")
m = re.match(r"^\s*(?P<size>(([1-9]\d+)|\d)(\.\d+)?)\s*(?P<unit>[a-z]{1,3})?\s*$", size, re.IGNORECASE)
if not m:
raise ValueError(f"Invalid size string ('{size}').")
parsed_size = float(m.group("size"))
unit_match = m.group("unit")
if unit_match:
parsed_unit = unit_match.lower()
else:
parsed_unit = "b" # default to bytes
if parsed_unit not in BYTES_UNIT:
raise ValueError(f"Invalid unit ('{parsed_unit}').")
return int(parsed_size * BYTES_UNIT[parsed_unit]) | 76cd67a0d581b79105a79bc84d66126d3201b07a | 3,654,561 |
def port_translation_func(req: AdvancedDataTypeRequest) -> AdvancedDataTypeResponse:
"""
Convert a passed in AdvancedDataTypeRequest to a AdvancedDataTypeResponse
"""
resp: AdvancedDataTypeResponse = {
"values": [],
"error_message": "",
"display_value": "",
"valid_filter_operators": [
FilterStringOperators.EQUALS,
FilterStringOperators.GREATER_THAN_OR_EQUAL,
FilterStringOperators.GREATER_THAN,
FilterStringOperators.IN,
FilterStringOperators.LESS_THAN,
FilterStringOperators.LESS_THAN_OR_EQUAL,
],
}
if req["values"] == [""]:
resp["values"].append([""])
return resp
for val in req["values"]:
string_value = str(val)
try:
if string_value.isnumeric():
if not 1 <= int(string_value) <= 65535:
raise ValueError
resp["values"].append(
[int(string_value)]
if string_value.isnumeric()
else port_conversion_dict[string_value]
)
except (KeyError, ValueError):
resp["error_message"] = str(
f"'{string_value}' does not appear to be a port name or number"
)
break
else:
resp["display_value"] = ", ".join(
map(
lambda x: f"{x['start']} - {x['end']}"
if isinstance(x, dict)
else str(x),
resp["values"],
)
)
return resp | b8c41d8c3d3c2fa0a9e67b8ef9ff93422921e7e3 | 3,654,562 |
import subprocess
def stop(cli):
"""Wrapper function for the relevant RPC function call.
Args:
cli (str): Full path to cli binary associated with coin.
Returns:
String: String containing the command output.
"""
command = DAEMON_STOP_COMMAND.format(cli)
return subprocess.check_output(command).decode(DEFAULT_DECODE) | 4b39c08cab60017b22ceecb9d97b626e890731d0 | 3,654,563 |
import random
def get_two_diff_order_index(start=0, stop=1, order=True, diff=True):
"""
Returns two integers from a range, they can be:
put in order (default) or unordered
always different(default) or can be repeated
start - integer (default = 0)
stop - integer (default= 1)
order - boolean ( default= True)
"""
my_range = stop - start
first = int(my_range * random())+start
second = int(my_range * random())+start
#first = randint(start, stop)
#second = randint(start, stop)
if diff:
while first == second:
second = int( my_range * random()) + start
#second = randint(start, stop)
if order:
if first > second:
second, first = first, second
return first, second | 7bd0e17efb969ea59e7a30d8fdaae55d901a718e | 3,654,564 |
import os
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = config.get_cfg()
if args.gpus is not None:
gpus = args.gpus
else:
gpus = []
gpus_str = ""
for g in gpus:
gpus_str += str(g) + ","
gpus_str = gpus_str[:-1]
os.environ['CUDA_VISIBLE_DEVICES'] = gpus_str
print(f"Config file {args.config_file}")
config_path = get_config_file(args.config_file)
cfg.merge_from_file(config_path)
cfg.merge_from_list(args.opts)
cfg.log_dir = args.log_dir
cfg.ckpt_dir = args.ckpt_dir
return cfg | de22f24da45bc87d22fa0dd0937b11647996c50b | 3,654,565 |
import math
def GriewankRosenbrock(arr: np.ndarray, seed: int = 0) -> float:
"""Implementation for BBOB GriewankRosenbrock function."""
dim = len(arr)
r_x = np.matmul(_R(dim, seed, b"R"), arr)
# Slightly off BBOB documentation in order to center optima at origin.
# Should be: max(1.0, (dim**0.5) / 8.0) * r_x + 0.5 * np.ones((dim,)).
z_arr = max(1.0, (dim**0.5) / 8.0) * r_x + np.ones((dim,))
s_arr = np.zeros(dim)
for i in range(dim - 1):
s_arr[i] = 100.0 * (z_arr[i]**2 - z_arr[i + 1])**2 + (z_arr[i] - 1)**2
total = 0.0
for i in range(dim - 1):
total += (s_arr[i] / 4000.0 - math.cos(s_arr[i]))
return (10.0 * total) / (dim - 1) + 10 | 9a9ca4f043e60fb971c5212de33379c29aaade58 | 3,654,566 |
def listCurrentAuctionsByKeyword(username, keyword):
"""Listar os leilões que estão a decorrer"""
try:
valid = utils.validateTypes([keyword], [str])
if not valid:
return jsonify({'erro': 404})
auctions = db.listAuctions(keyword)
if auctions == "noResults":
db.connection.commit()
return jsonify({'Ups': 'Sem resultados para esta pesquisa!'})
db.connection.commit()
return jsonify(auctions)
except Exception as e:
db.connection.rollback()
print(e)
return jsonify({'erro': 401}) | c02c58a294b3d65821f36872dcf23e4f7abff49b | 3,654,567 |
import os
def _get_relative_maddir(maddir, port):
""" Return a relative path version of maddir
GPDB and HAWQ installations have a symlink outside of GPHOME that
links to the current GPHOME. After a DB upgrade, this symlink is updated to
the new GPHOME.
'maddir_lib', which uses the absolute path of GPHOME, is hardcoded into each
madlib function definition. Replacing the GPHOME path with the equivalent
relative path makes it simpler to perform DB upgrades without breaking MADlib.
"""
if port not in ('greenplum', 'hawq'):
# do nothing for postgres
return maddir
# e.g. maddir_lib = $GPHOME/madlib/Versions/1.9/lib/libmadlib.so
# 'madlib' is supposed to be in this path, which is the default folder
# used by GPPKG to install madlib
try:
abs_gphome, tail = maddir.split('madlib/')
except ValueError:
return maddir
link_name = 'greenplum-db' if port == 'greenplum' else 'hawq'
# Check outside $GPHOME if there is a symlink to this absolute path
# os.pardir is equivalent to ..
# os.path.normpath removes the extraneous .. from that path
rel_gphome = os.path.normpath(os.path.join(abs_gphome, os.pardir, link_name))
if os.path.islink(rel_gphome) and os.path.realpath(rel_gphome) == os.path.realpath(abs_gphome):
# if the relative link exists and is pointing to current location
return os.path.join(rel_gphome, 'madlib', tail)
else:
return maddir | 7ad76b8d44f68ebd61813a851672b4f4aa18b77d | 3,654,568 |
from typing import Dict
def hash_dict(data: Dict) -> int:
"""
Hashes a Dictionary recursively.
List values are converted to Tuples.
WARNING: Hashing nested dictionaries is expensive.
"""
cleaned_dict: Dict = {}
def _clean_dict(data: Dict) -> Dict:
d: Dict = {}
for k, v in data.items():
if isinstance(v, list) or isinstance(v, set):
d[k] = tuple(v)
elif isinstance(v, dict):
d[k] = hash_dict(v)
else:
d[k] = v
return d
cleaned_dict = _clean_dict(data)
return hash(tuple(sorted(cleaned_dict.items()))) | 42b579151c90a42fadf2b53751978eec421ea03c | 3,654,569 |
import logging
import os
import time
import configparser
def sele():
"""身份验证的JSID获取.
Return:
若获取成功,则返回JSID字符串,
若获取失败,则返回空字符串""
"""
logger = logging.getLogger("sele.py")
logger.info("Start sele")
try:
# phantomjs请求头设置
dcap = dict(DesiredCapabilities.PHANTOMJS)
dcap["phantomjs.page.settings.userAgent"] = (
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36"
)
# 此处取消PhantomJS的log输出,如需输出,请参考以下代码
# driver = webdriver.PhantomJS(service_log_path='/yourpath/ghostdriver.log') # 输出log到指定目录
browser = webdriver.PhantomJS(
desired_capabilities=dcap, service_log_path=os.path.devnull) # 取消log输出
browser.set_page_load_timeout(60)
browser.set_script_timeout(60)
# 测试时使用URL
# browser.get("http://ids1.seu.edu.cn/amserver/UI/Login")
browser.get("http://zccx.seu.edu.cn")
browser.set_window_size(1200, 800)
browser.get_screenshot_as_file("screen.png")
element = browser.find_element_by_css_selector(
"body > table:nth-child(2) > tbody > tr:nth-child(2) > td > table:nth-child(1) > tbody > tr:nth-child(5) > td:nth-child(4) > img")
left = int(element.location['x'])
top = int(element.location['y'])
right = int(element.location['x'] + element.size['width'])
bottom = int(element.location['y'] + element.size['height'])
im = Image.open('screen.png')
im = im.crop((left, top, right, bottom))
# 加入下面一行代码,你可以选择不加入tesseract到你的PATH中
# pytesseract.pytesseract.tesseract_cmd = '<full_path_to_your_tesseract_executable>'
# 示例:pytesseract.pytesseract.tesseract_cmd = 'C:\\Program Files (x86)\\Tesseract-OCR\\tesseract'
code = pytesseract.image_to_string(im)
im.close()
elem1 = browser.find_element_by_id("IDToken1")
elem2 = browser.find_element_by_id("IDToken2")
elem3 = browser.find_element_by_name("inputCode")
time.sleep(3)
conf = configparser.ConfigParser()
conf.read("spider.cfg")
elem1.send_keys(conf["CARD_INFO"]["card_no"])
elem2.send_keys(conf["CARD_INFO"]["card_passwd"])
elem3.send_keys(code)
login = browser.find_element_by_css_selector(
"body > table:nth-child(2) > tbody > tr:nth-child(2) > td > table:nth-child(1) > tbody > tr:nth-child(2) > td:nth-child(5) > img")
login.click()
jsid = ""
for i in browser.get_cookies():
if i['name'] == 'JSESSIONID':
jsid = i['value']
break
time.sleep(3)
browser.quit()
logger.info("Success sele")
return jsid
except Exception as e:
logger.exception("A error happened in running sele.py")
browser.quit()
return "" | 71f89946b5811458450de54ef70354644a135866 | 3,654,570 |
def instrument_packages_ip_template(instrument, ip_version, template_name=None):
"""
Retrieves the specified instrument package template metadata
:param instrument: instrument used to make observation
:type instrument: str
:param ip_version: ip version description here
:type ip_version: float
:param template_name: template name description goes here
:type template_name: str
:rtype: InstrumentPackage
"""
# if connexion.request.is_json:
# instrument = InstrumentEnum.from_dict(connexion.request.get_json())
if template_name:
return {template_name: get_template_metadata(template_name, ip_version)}
query = {"instrument": instrument.upper(), "version": ip_version}
fields = {"template_names": 1, "_id": 0}
templates = utils.get_fields_by_query(query, fields, 'ipCollect')
metadata = {}
for template_name in templates["template_names"]:
metadata[template_name] = get_template_metadata(template_name, ip_version)
return metadata | 46d3cd57e05a64c03411c31d2b18ca47f670036d | 3,654,571 |
from typing import Literal
def add_feature_metadata(id, description, type):
"""Generate RDF metadata for a feature
:param id: if used to identify the feature
:param description: feature description
:param type: feature type
:return: rdflib graph after loading the feature
"""
g = Graph()
feature_uri = URIRef(OPENPREDICT_NAMESPACE + 'feature/' + id)
g.add((feature_uri, RDF.type, MLS['Feature']))
g.add((feature_uri, DC.identifier, Literal(id)))
g.add((feature_uri, DC.description, Literal(description)))
g.add((feature_uri, OPENPREDICT['embedding_type'], Literal(type)))
insert_graph_in_sparql_endpoint(g)
return g | 0d4987807b3ed97baa50f8b14c588ef162b5c8ac | 3,654,572 |
import copy
def sink(input_flow_direction_raster):
"""
Creates a raster layer identifying all sinks or areas of internal drainage.
The value type for the Sink function output raster layer is floating point.
For more information, see
https://pro.arcgis.com/en/pro-app/help/data/imagery/sink-function.htm
Parameters
----------
:param input_flow_direction_raster: The input raster that shows the direction
of flow out of each cell.
The flow direction raster can be created by
running the Flow Direction function.
:return: output raster with function applied
"""
layer, input_flow_direction_raster, raster_ra = _raster_input(input_flow_direction_raster)
template_dict = {
"rasterFunction" : "GPAdapter",
"rasterFunctionArguments" : {
"toolName" : "Sink_sa",
"PrimaryInputParameterName" : "in_flow_direction_raster",
"OutputRasterParameterName" : "out_raster",
"in_flow_direction_raster" : input_flow_direction_raster
}
}
function_chain_ra = copy.deepcopy(template_dict)
function_chain_ra["rasterFunctionArguments"]["in_flow_direction_raster"] = raster_ra
return _gbl_clone_layer(layer, template_dict, function_chain_ra) | 6d1b22dacd48a0939b7822d62a4867b2b7574c42 | 3,654,573 |
def bad_multi_examples_per_input_estimator_out_of_range_input_refs(
export_path, eval_export_path):
"""Like the above (good) estimator, but the input_refs is out of range."""
estimator = tf.estimator.Estimator(model_fn=_model_fn)
estimator.train(input_fn=_train_input_fn, steps=1)
return util.export_model_and_eval_model(
estimator=estimator,
serving_input_receiver_fn=_serving_input_receiver_fn,
eval_input_receiver_fn=(
_bad_eval_input_receiver_fn_out_of_range_input_refs),
export_path=export_path,
eval_export_path=eval_export_path) | 539ec039451c53db72cb676881f48fbe45874dfa | 3,654,574 |
def vector_to_diagonal(v):
"""Converts a vector to a diagonal matrix with vector elements
as the diagonal elements of the matrix"""
diag_matrix = [[0 for i in range(len(v))] for j in range(len(v))]
for i in range(len(v)):
diag_matrix[i][i] = v[i]
return diag_matrix | 6cbaf54a083633a47af92acc7f69421ed68a1c0b | 3,654,575 |
from typing import Union
from pathlib import Path
from typing import List
def _get_filenames(path: Union[str, Path], media_type: MediaType) -> List[str]:
"""
Get filenames from a directory or a path to a file.
:param path: Path to the file or to the location that contains files.
:param media_type: Type of the media (image or video)
:example:
>>> path = "../images"
>>> _get_filenames(path, media_type=MediaType.image)
['images/4.jpeg', 'images/1.jpeg', 'images/5.jpeg', 'images/3.jpeg', 'images/2.jpeg']
"""
extensions = _get_extensions(media_type)
filenames: List[str] = []
if media_type == MediaType.camera:
raise ValueError(
"Cannot get filenames for camera. Only image and video files are supported."
)
if isinstance(path, str):
path = Path(path)
if path.is_file():
if _is_file_with_supported_extensions(path, extensions):
filenames = [path.as_posix()]
else:
raise ValueError("Extension not supported for media type")
if path.is_dir():
for filename in path.rglob("*"):
if _is_file_with_supported_extensions(filename, extensions):
filenames.append(filename.as_posix())
filenames = natsorted(filenames) # type: ignore[assignment]
if len(filenames) == 0:
raise FileNotFoundError(f"No {media_type.name} file found in {path}!")
return filenames | 953bcfce17c6db45772a8eac8890fa161c128322 | 3,654,576 |
from venusian import attach
def method(method_class):
"""Decorator to use to mark an API method.
When invoking L{Registry.scan} the classes marked with this decorator
will be added to the registry.
@param method_class: The L{Method} class to register.
"""
def callback(scanner, name, method_class):
if method_class.actions is not None:
actions = method_class.actions
else:
actions = [name]
if method_class.versions is not None:
versions = method_class.versions
else:
versions = [None]
for action in actions:
for version in versions:
scanner.registry.add(method_class,
action=action,
version=version)
attach(method_class, callback, category="method")
return method_class | 4e40d265a4a5767686f0e37b4d1adf681ce36722 | 3,654,577 |
def generic_validator(check, error_message):
"""
Validator factory
>>> v = generic_validator(is_int, "invalid int")
>>> v(6)
6
>>> v("g")
Traceback (most recent call last):
...
ValidationError: [u'invalid int']
"""
# Validator closure
def inner_validator(value, *args, **kwargs):
if not check(value):
raise ValidationError(error_message)
return value
return inner_validator | 21134ecee1d8c23b10e94181c0c1aa602ce4b76e | 3,654,578 |
def get_molec_shape(mol, conf, confId, vdwScale=1.0,
boxMargin=2.0, spacing=0.2):
"""
Get the shape of a conformer of a molecule as a grid
representation.
"""
box = Chem.ComputeConfBox(conf)
sideLen = (box[1].x-box[0].x + 2*boxMargin,
box[1].y-box[0].y + 2*boxMargin,
box[1].z-box[0].z + 2*boxMargin)
shape = rdGeometry.UniformGrid3D(2*sideLen[0],
2*sideLen[1],
2*sideLen[2],
spacing=spacing)
Chem.EncodeShape(
mol,
shape,
confId=confId,
ignoreHs=False,
vdwScale=vdwScale
)
return box, sideLen, shape | 6a7b404224a116a52d70f7ab14d4301215c1700f | 3,654,579 |
import math
def autoencoder(dimensions=[784, 512, 256, 64]):
"""Build a deep denoising autoencoder w/ tied weights.
Parameters
----------
dimensions : list, optional
The number of neurons for each layer of the autoencoder.
Returns
-------
x : Tensor
Input placeholder to the network
z : Tensor
Inner-most latent representation
y : Tensor
Output reconstruction of the input
cost : Tensor
Overall cost to use for training
"""
# input to the network
x = tf.placeholder(tf.float32, [None, dimensions[0]], name='x')
# Probability that we will corrupt input.
# This is the essence of the denoising autoencoder, and is pretty
# basic. We'll feed forward a noisy input, allowing our network
# to generalize better, possibly, to occlusions of what we're
# really interested in. But to measure accuracy, we'll still
# enforce a training signal which measures the original image's
# reconstruction cost.
#
# We'll change this to 1 during training
# but when we're ready for testing/production ready environments,
# we'll put it back to 0.
corrupt_prob = tf.placeholder(tf.float32, [1])
current_input = corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)
# Build the encoder
encoder = []
for layer_i, n_output in enumerate(dimensions[1:]):
n_input = int(current_input.get_shape()[1])
W = tf.Variable(
tf.random_uniform([n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# latent representation
z = current_input
encoder.reverse()
# Build the decoder using the same weights
for layer_i, n_output in enumerate(dimensions[:-1][::-1]):
W = tf.transpose(encoder[layer_i])
b = tf.Variable(tf.zeros([n_output]))
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference
cost = tf.sqrt(tf.reduce_mean(tf.square(y - x)))
return {'x': x, 'z': z, 'y': y,
'corrupt_prob': corrupt_prob,
'cost': cost} | d9cc8b6f2c8e7df0bc4fb580e1de20dc57f93c7a | 3,654,580 |
def _asymptotic_expansion_of_normalized_black_call(h, t):
"""
Asymptotic expansion of
b = Φ(h+t)·exp(x/2) - Φ(h-t)·exp(-x/2)
with
h = x/s and t = s/2
which makes
b = Φ(h+t)·exp(h·t) - Φ(h-t)·exp(-h·t)
exp(-(h²+t²)/2)
= --------------- · [ Y(h+t) - Y(h-t) ]
√(2π)
with
Y(z) := Φ(z)/φ(z)
for large negative (t-|h|) by the aid of Abramowitz & Stegun (26.2.12) where Φ(z) = φ(z)/|z|·[1-1/z^2+...].
We define
r
A(h,t) := --- · [ Y(h+t) - Y(h-t) ]
t
with r := (h+t)·(h-t) and give an expansion for A(h,t) in q:=(h/r)² expressed in terms of e:=(t/h)² .
:param h:
:type h: float
:param t:
:type t: float
:return:
:rtype: float
"""
e = (t / h) * (t / h)
r = ((h + t) * (h - t))
q = (h / r) * (h / r)
# 17th order asymptotic expansion of A(h,t) in q, sufficient for Φ(h) [and thus y(h)] to have relative accuracy of 1.64E-16 for h <= η with η:=-10.
asymptotic_expansion_sum = (2.0 + q * (-6.0E0 - 2.0 * e + 3.0 * q * (1.0E1 + e * (2.0E1 + 2.0 * e) + 5.0 * q * (
-1.4E1 + e * (-7.0E1 + e * (-4.2E1 - 2.0 * e)) + 7.0 * q * (
1.8E1 + e * (1.68E2 + e * (2.52E2 + e * (7.2E1 + 2.0 * e))) + 9.0 * q * (
-2.2E1 + e * (-3.3E2 + e * (-9.24E2 + e * (-6.6E2 + e * (-1.1E2 - 2.0 * e)))) + 1.1E1 * q * (
2.6E1 + e * (5.72E2 + e * (
2.574E3 + e * (3.432E3 + e * (1.43E3 + e * (1.56E2 + 2.0 * e))))) + 1.3E1 * q * (
-3.0E1 + e * (-9.1E2 + e * (-6.006E3 + e * (-1.287E4 + e * (
-1.001E4 + e * (-2.73E3 + e * (-2.1E2 - 2.0 * e)))))) + 1.5E1 * q * (
3.4E1 + e * (1.36E3 + e * (1.2376E4 + e * (3.8896E4 + e * (
4.862E4 + e * (2.4752E4 + e * (
4.76E3 + e * (2.72E2 + 2.0 * e))))))) + 1.7E1 * q * (
-3.8E1 + e * (-1.938E3 + e * (-2.3256E4 + e * (
-1.00776E5 + e * (-1.84756E5 + e * (
-1.51164E5 + e * (-5.4264E4 + e * (
-7.752E3 + e * (
-3.42E2 - 2.0 * e)))))))) + 1.9E1 * q * (
4.2E1 + e * (2.66E3 + e * (4.0698E4 + e * (
2.3256E5 + e * (5.8786E5 + e * (
7.05432E5 + e * (4.0698E5 + e * (
1.08528E5 + e * (1.197E4 + e * (
4.2E2 + 2.0 * e))))))))) + 2.1E1 * q * (
-4.6E1 + e * (-3.542E3 + e * (
-6.7298E4 + e * (
-4.90314E5 + e * (
-1.63438E6 + e * (
-2.704156E6 + e * (
-2.288132E6 + e * (
-9.80628E5 + e * (
-2.01894E5 + e * (
-1.771E4 + e * (
-5.06E2 - 2.0 * e)))))))))) + 2.3E1 * q * (
5.0E1 + e * (
4.6E3 + e * (
1.0626E5 + e * (
9.614E5 + e * (
4.08595E6 + e * (
8.9148E6 + e * (
1.04006E7 + e * (
6.53752E6 + e * (
2.16315E6 + e * (
3.542E5 + e * (
2.53E4 + e * (
6.0E2 + 2.0 * e))))))))))) + 2.5E1 * q * (
-5.4E1 + e * (
-5.85E3 + e * (
-1.6146E5 + e * (
-1.77606E6 + e * (
-9.37365E6 + e * (
-2.607579E7 + e * (
-4.01166E7 + e * (
-3.476772E7 + e * (
-1.687257E7 + e * (
-4.44015E6 + e * (
-5.9202E5 + e * (
-3.51E4 + e * (
-7.02E2 - 2.0 * e)))))))))))) + 2.7E1 * q * (
5.8E1 + e * (
7.308E3 + e * (
2.3751E5 + e * (
3.12156E6 + e * (
2.003001E7 + e * (
6.919458E7 + e * (
1.3572783E8 + e * (
1.5511752E8 + e * (
1.0379187E8 + e * (
4.006002E7 + e * (
8.58429E6 + e * (
9.5004E5 + e * (
4.7502E4 + e * (
8.12E2 + 2.0 * e))))))))))))) + 2.9E1 * q * (
-6.2E1 + e * (
-8.99E3 + e * (
-3.39822E5 + e * (
-5.25915E6 + e * (
-4.032015E7 + e * (
-1.6934463E8 + e * (
-4.1250615E8 + e * (
-6.0108039E8 + e * (
-5.3036505E8 + e * (
-2.8224105E8 + e * (
-8.870433E7 + e * (
-1.577745E7 + e * (
-1.472562E6 + e * (
-6.293E4 + e * (
-9.3E2 - 2.0 * e)))))))))))))) + 3.1E1 * q * (
6.6E1 + e * (
1.0912E4 + e * (
4.74672E5 + e * (
8.544096E6 + e * (
7.71342E7 + e * (
3.8707344E8 + e * (
1.14633288E9 + e * (
2.07431664E9 + e * (
2.33360622E9 + e * (
1.6376184E9 + e * (
7.0963464E8 + e * (
1.8512208E8 + e * (
2.7768312E7 + e * (
2.215136E6 + e * (
8.184E4 + e * (
1.056E3 + 2.0 * e))))))))))))))) + 3.3E1 * (
-7.0E1 + e * (
-1.309E4 + e * (
-6.49264E5 + e * (
-1.344904E7 + e * (
-1.4121492E8 + e * (
-8.344518E8 + e * (
-2.9526756E9 + e * (
-6.49588632E9 + e * (
-9.0751353E9 + e * (
-8.1198579E9 + e * (
-4.6399188E9 + e * (
-1.6689036E9 + e * (
-3.67158792E8 + e * (
-4.707164E7 + e * (
-3.24632E6 + e * (
-1.0472E5 + e * (
-1.19E3 - 2.0 * e))))))))))))))))) * q)))))))))))))))))
b = ONE_OVER_SQRT_TWO_PI * np.exp((-0.5 * (h * h + t * t))) * (t / r) * asymptotic_expansion_sum
return np.abs(np.maximum(b, 0)) | 9985b36e7f0dec1877d275a23ae747d9a57c1163 | 3,654,581 |
def date_read(date_string, *, convert_to_current_timezone: bool = False):
"""Read the given date (if possible)."""
return date_parse(date_string, convert_to_current_timezone=convert_to_current_timezone) | 96f21f7fcae995a9a17f6008c8e5a4161ed971f2 | 3,654,582 |
import urllib
def encode_name(name):
"""
Encode a unicode as utf-8 and then url encode that
string. Use for entity titles in URLs.
"""
return urllib.quote(name.encode('utf-8'), safe='') | 6e9d34516613ecdf0ce94fb9cfc594de7e76b72f | 3,654,583 |
def cmp_str(element1, element2):
"""
compare number in str format correctley
"""
try:
return cmp(int(element1), int(element2))
except ValueError:
return cmp(element1, element2) | 7c8df75bc1b1ad3997db4a4d6f1b58a37c4e1dd7 | 3,654,584 |
def parse_page_file(page_raw: str, type: str, file_name: str) -> Page:
"""
FIXME: add documentation
"""
page_id = extract_page_id(file_name)
title, fields = parse_md(page_raw)
return Page(
id=page_id,
type=type,
title=title,
fields=fields,
) | bf53026374c1720cf2cdcf785e256ce3374226ce | 3,654,585 |
from sys import path
import joblib
def fetch(name):
"""
Fetches an appropriate model to perform the prediction.
:param name: model's name
:return: a trained model
"""
K.clear_session()
try:
full_weights_path = path.join(path_prefix, *load_weights()[name])
if name == 'svm':
return SVMModel(joblib.load(full_weights_path))
elif name == 'cnn':
return CNNModel(load(full_weights_path))
elif name == 'mlp':
return MLPModel(load(full_weights_path))
except KeyError:
raise ModelNotFoundError(f'Model named {name} does not exist.') | fcb82ec61b984e07ece0822c0bcf78dff451eafa | 3,654,586 |
def post(text, appid=2, touser=None, toparty=None):
"""
party
"""
#print '=========',type(text)
if type(text) is unicode:
text = text.encode('utf8')
if not touser:
touser = []
if not toparty:
toparty = ['2']
url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={access_token}'
url = url.format(access_token=get_access_token())
data = {"touser": "|".join(touser),
"toparty": "|".join(toparty),
"msgtype": "text",
"agentid": str(appid),
"text": {"content": text},
"safe": "0",
}
result = requests.post(url, data=json.dumps(data, ensure_ascii=False))
print result.text
return result | b2a92a274007b0502431a856457a244c12b925a9 | 3,654,587 |
import six
import codecs
def hex_encrypt(msg):
"""Hex encrypts a message.
:param bytes msg: string message to be encrypted.
:return: string for encrypted version of msg in hex.
:rtype: bytes
"""
if not cipher:
return msg
if not isinstance(msg, six.binary_type):
raise ValueError('only bytes can be encrypted')
msg = cipher.encrypt(_pad(msg))
msg = codecs.encode(msg, 'hex')
return msg | c2d913d181b8ceb33b3e7d99fc5f21b025da58ea | 3,654,588 |
import requests
def http_request(source_id, endpoint_id, args, kwargs, # pylint: disable=too-many-arguments
service_addr, auth=None):
"""Call http endpoint"""
headers = {"content-type": "application/json"}
if auth is not None:
headers["Authorization"] = basic_auth_header(auth)
payload = _create_http_message(args, endpoint_id, kwargs, source_id)
url = service_addr
if not url.startswith("http"):
url = "http://" + url
LOGGER.debug("Url: %s", url)
response = requests.post(url, data=payload, headers=headers)
return_value = None
if response.status_code < 300:
return_value = json_tricks.loads(response.content.decode("utf-8"))
return return_value, response.status_code | e259508e78aaa7bf9c663a538a2c6b6471938f5e | 3,654,589 |
import regex
async def filter_by_game_stats(opsdroid, string, room, action):
"""Match incoming messages against the current games stats."""
if room not in STAT_REGEXES.keys():
gamestats = await get_stat_names(opsdroid, room)
if not gamestats:
return []
STAT_REGEXES[room] = {"set": regex.compile(f"(?:(?:{'|'.join(['!'+s for s in gamestats])}) {MODIFIER_REGEX})",
flags=regex.IGNORECASE),
"roll": regex.compile("|".join(gamestats), flags=regex.IGNORECASE)}
stats = STAT_REGEXES[room][action].findall(string)
return stats | 4971e5567c8a1b89aa47fdaab2e42e51620f475b | 3,654,590 |
def password_provider():
"""
Provides the full password check
"""
return [(n,) for n in range(5)] | afdb188844e4b0979528b290477130313679e4df | 3,654,591 |
def make_combiparameter(*args, **kwargs):
"""
Make a combined qcodes parameter.
Args:
*args : list of gates or parameters
(e.g. make_combiparameter("A1", "A3", station.gates.B1 ))
"""
station = qc.Station.default
parameters = []
for i in args:
if type(i) == str:
parameters.append(getattr(station.gates, i))
else:
parameters.append(i)
label = ""
for i in parameters:
label += i.label + " "
try:
name = kwargs['name']
except:
name = 'combi_par'
return combi_par(parameters, label, name) | 6482187dc463c67e322a281181ba827eb39eb28d | 3,654,592 |
def get_delta_fmt(delta):
"""arbitrary colour formatting of rank delta
more red for bigger losses, more green for bigger gains
"""
col = (0, 0, 0, 255)
n = abs(delta)
s = delta
if delta < 0:
sat = min(n/200 + 0.2, 1)
r, g, b = hsv_to_rgb(0, sat, 1)
col = (r, g, b, 1)
else:
s = "+"+str(n)
sat = min(n/100 + 0.2, 1)
r, g, b = hsv_to_rgb(1/3, sat, 1)
col = (r, g, b, 1)
return "(" + str(s) + ")", col | a7860df4f19632c9623c39c38ac70a76f405ae56 | 3,654,593 |
import sys
def calculate_wtv(sample_values, epoch_time_interval=WTV_EPOCH_TIME, relative_to_time=None):
"""
Calculate the Wear-Time Validation (30-minute epochs) for a given sample ndarray [[time_seconds, accel_x, accel_y, accel_z]].
Based on the method by van Hees et al in PLos ONE 2011 6(7),
"Estimation of Daily Energy Expenditure in Pregnant and Non-Pregnant Women Using a Wrist-Worn Tri-Axial Accelerometer".
Accelerometer non-wear time is estimated from the standard deviation and range of each accelerometer axis,
calculated for consecutive blocks of 30 minutes.
A block was classified as non-wear time if the standard deviation was less than 3.0 mg
(1 mg = 0.00981 m*s-2) for at least two out of the three axes,
or if the value range, for at least two out of three axes, was less than 50 mg.
:param epoch_time_interval: seconds per epoch (the algorithm is defined for 30 minutes)
:param relative_to_time: None=align epochs to start of data, 0=align epochs to natural time, other=custom alignment
:returns: ndarray of [time,worn], where worn is 0 (not worn), or 1 (worn)
"""
if epoch_time_interval != WTV_EPOCH_TIME:
print('WARNING: WTV algorithm is defined for %d seconds, but currently using %d seconds' % (WTV_EPOCH_TIME, epoch_time_interval), file=sys.stderr)
# Split samples into epochs
epochs = epoch.split_into_epochs(sample_values, epoch_time_interval, relative_to_time=relative_to_time)
# Calculate each epoch
num_epochs = len(epochs)
result = np.empty((num_epochs,2))
for epoch_index in range(num_epochs):
this_epoch = epochs[epoch_index]
# Epoch start time and sample data
epoch_time = this_epoch[0,0]
samples = this_epoch[:,1:4]
# Per-axis/sample standard deviation and range
stddev = np.std(samples, axis=0)
value_range = np.ptp(samples, axis=0)
# Count axes
count_stddev_low = np.sum(stddev < WTV_STD_CUTOFF)
count_range_low = np.sum(value_range < WTV_RANGE_CUTOFF)
# Determine if worn
if count_stddev_low >= WTV_STD_MIN_AXES or count_range_low >= WTV_RANGE_MIN_AXES:
epoch_value = 0
else:
epoch_value = 1
# Result
result[epoch_index,0] = epoch_time
result[epoch_index,1] = epoch_value
return result | c829825e9875d57cbce94a704ad162349a2143c7 | 3,654,594 |
import inner_imports
from re import X
def function(default=None):
"""Docstring comes first.
Possibly many lines.
"""
# FIXME: Some comment about why this function is crap but still in production.
if inner_imports.are_evil():
# Explains why we have this if.
# In great detail indeed.
x = X()
return x.method1() # type: ignore
# This return is also commented for some reason.
return default | 180e412bdeb275a3d7ca56fd0588a565b64778fd | 3,654,595 |
def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'):
"""
Performs least squares fit to find parameters for 1d analytic functions fit
to diffraction data
Required Arguments:
p0 -- (m) ndarray containing initial guesses for parameters
for the input peaktype
x -- (n) ndarray of coordinate positions
f -- (n) ndarray of intensity measurements at coordinate positions x
pktype -- string, type of analytic function that will be used to
fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
Outputs:
p -- (m) ndarray containing fit parameters for the input peaktype
(see peak function help for what each parameters corresponds to)
Notes:
1. Currently no checks are in place to make sure that the guess of
parameters has a consistent number of parameters with the requested
peak type
"""
weight = np.max(f)*10. # hard coded should be changed
fitArgs = (x, f, pktype)
if pktype == 'gaussian':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs, Dfun=eval_pk_deriv_1d,
ftol=ftol, xtol=xtol
)
elif pktype == 'lorentzian':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs, Dfun=eval_pk_deriv_1d,
ftol=ftol, xtol=xtol
)
elif pktype == 'pvoigt':
lb = [p0[0]*0.5, np.min(x), 0., 0., 0., None]
ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 1., 2.*p0[4], None]
fitArgs = (x, f, pktype, weight, lb, ub)
p, outflag = optimize.leastsq(
fit_pk_obj_1d_bnded, p0,
args=fitArgs,
ftol=ftol, xtol=xtol
)
elif pktype == 'split_pvoigt':
lb = [p0[0]*0.5, np.min(x), 0., 0., 0., 0., 0., None]
ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 4.*p0[2], 1., 1., 2.*p0[4], None]
fitArgs = (x, f, pktype, weight, lb, ub)
p, outflag = optimize.leastsq(
fit_pk_obj_1d_bnded, p0,
args=fitArgs,
ftol=ftol, xtol=xtol
)
elif pktype == 'tanh_stepdown':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs,
ftol=ftol, xtol=xtol)
elif pktype == 'dcs_pinkbeam':
lb = np.array([0.0, x.min(), -100., -100.,
-100., -100., 0., 0.,
-np.inf, -np.inf, -np.inf])
ub = np.array([np.inf, x.max(), 100., 100.,
100., 100., 10., 10.,
np.inf, np.inf, np.inf])
res = optimize.least_squares(
fit_pk_obj_1d, p0,
jac='2-point',
bounds=(lb, ub),
method='trf',
args=fitArgs,
ftol=ftol,
xtol=xtol)
p = res['x']
outflag = res['success']
else:
p = p0
print('non-valid option, returning guess')
if np.any(np.isnan(p)):
p = p0
print('failed fitting, returning guess')
return p | 52dbff47fd8ad6f7727b0241bba48d2b10393a18 | 3,654,596 |
from meerschaum.utils.debug import dprint
from typing import Union
def is_pipe_registered(
pipe : Union['meerschaum.Pipe', 'meerschaum.Pipe.MetaPipe'],
pipes : dict,
debug : bool = False
):
"""
Check if a Pipe or MetaPipe is inside the pipes dictionary.
"""
ck, mk, lk = pipe.connector_keys, pipe.metric_key, pipe.location_key
if debug:
dprint(f'{ck}, {mk}, {lk}')
dprint(f'{pipe}, {pipes}')
return ck in pipes and mk in pipes[ck] and lk in pipes[ck][mk] | b3630de8316858afe9272698593a7dec7f984762 | 3,654,597 |
import jinja2
def truncate(s, length=255, killwords=True, end='...'):
"""
Wrapper for jinja's truncate that checks if the object has a
__truncate__ attribute first.
Altering the jinja2 default of killwords=False because of
https://bugzilla.mozilla.org/show_bug.cgi?id=624642, which could occur
elsewhere.
"""
if s is None:
return ''
if hasattr(s, '__truncate__'):
return s.__truncate__(length, killwords, end)
return jinja2.filters.do_truncate(smart_unicode(s), length, killwords, end) | 70c154fbfa344bd24f685f5209e2121d8aac0057 | 3,654,598 |
def tract_segmentation_single_example_lap (kdt_T_A, prototypes_T_A,sid, num_NN,T_A ):
""" step 1: tract segmentation from a single example using Jonker-Volgenant algorithm (LAPJV)
"""
E_t_filename= 'data/example/'+ str(sid) +'_'+str(tract_name)+'.trk'
print("Loading Example tract: %s" % E_t_filename)
E_t, hdr= load(E_t_filename, threshold_short_streamlines=threshold_short_streamlines)
dm_E_t= dissimilarity(E_t, prototypes_T_A,bundles_distances_mam)
#compute the NN of the example tract in order to construcse the cost matrix
NN_E_t_NN_Idx= NN (kdt_T_A, dm_E_t,num_NN)
print("Computing the cost matrix with mam distance (%s x %s) for RLAP " % (len(E_t),
len( NN_E_t_NN_Idx)))
cost_matrix = bundles_distances_mam_smarter_faster(E_t,
T_A[NN_E_t_NN_Idx])
print("Computing optimal assignmnet with LAPJV")
assignment = LinearAssignment(cost_matrix).solution
min_cost_values= cost_matrix[np.arange(len(cost_matrix)), assignment]
return NN_E_t_NN_Idx[assignment], min_cost_values, len(E_t) | cc14e598f359fc9b92995bdc3a6a98192333b800 | 3,654,599 |
Subsets and Splits