content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import time
def confirm_channel(bitcoind, n1, n2):
"""
Confirm that a channel is open between two nodes
"""
assert n1.id() in [p.pub_key for p in n2.list_peers()]
assert n2.id() in [p.pub_key for p in n1.list_peers()]
for i in range(10):
time.sleep(0.5)
if n1.check_channel(n2) and n2.check_channel(n1):
return True
addr = bitcoind.rpc.getnewaddress("", "bech32")
bhash = bitcoind.rpc.generatetoaddress(1, addr)[0]
n1.block_sync(bhash)
n2.block_sync(bhash)
# Last ditch attempt
return n1.check_channel(n2) and n2.check_channel(n1) | bcbf895b286b446f7bb0ad2d7890a0fa902cdbd1 | 4,655 |
def has_permissions(**perms):
"""A :func:`check` that is added that checks if the member has any of
the permissions necessary.
The permissions passed in must be exactly like the properties shown under
:class:`discord.Permissions`.
Parameters
------------
perms
An argument list of permissions to check for.
Example
---------
.. code-block:: python
@bot.command()
@commands.has_permissions(manage_messages=True)
async def test():
await bot.say('You can manage messages.')
"""
def predicate(ctx):
msg = ctx.message
ch = msg.channel
permissions = ch.permissions_for(msg.author)
return all(getattr(permissions, perm, None) == value for perm, value in perms.items())
return check(predicate) | bf9432f136db8cd2643fe7d64807194c0479d3cd | 4,656 |
def extend_params(params, more_params):
"""Extends dictionary with new values.
Args:
params: A dictionary
more_params: A dictionary
Returns:
A dictionary which combines keys from both dictionaries.
Raises:
ValueError: if dicts have the same key.
"""
for yak in more_params:
if yak in params:
raise ValueError('Key "%s" is already in dict' % yak)
params.update(more_params)
return params | 626db0ae8d8a249b8c0b1721b7a2e0f1d4c084b8 | 4,657 |
import logging
def __compute_libdeps(node):
"""
Computes the direct library dependencies for a given SCons library node.
the attribute that it uses is populated by the Libdeps.py script
"""
if getattr(node.attributes, 'libdeps_exploring', False):
raise DependencyCycleError(node)
env = node.get_env()
deps = set()
node.attributes.libdeps_exploring = True
try:
try:
for child in env.Flatten(getattr(node.attributes, 'libdeps_direct',
[])):
if not child:
continue
deps.add(child)
except DependencyCycleError as e:
if len(e.cycle_nodes) == 1 or e.cycle_nodes[0] != e.cycle_nodes[
-1]:
e.cycle_nodes.insert(0, node)
logging.error("Found a dependency cycle" + str(e.cycle_nodes))
finally:
node.attributes.libdeps_exploring = False
return deps | 93e44b55bb187ae6123e22845bd4da69b260b107 | 4,658 |
def _AccumulatorResultToDict(partition, feature, grads, hessians):
"""Converts the inputs to a dictionary since the ordering changes."""
return {(partition[i], feature[i, 0], feature[i, 1]): (grads[i], hessians[i])
for i in range(len(partition))} | 20cc895cf936749a35c42a1158c9ea6645019e7d | 4,659 |
async def create(payload: ProductIn):
"""Create new product from sent data."""
product_id = await db.add_product(payload)
apm.capture_message(param_message={'message': 'Product with %s id created.', 'params': product_id})
return ProductOut(**payload.dict(), product_id=product_id) | 77f9ef1699cba57aa8e0cfd5a09550f6d03b8f72 | 4,661 |
def get_glove_info(glove_file_name):
"""Return the number of vectors and dimensions in a file in GloVe format."""
with smart_open(glove_file_name) as f:
num_lines = sum(1 for line in f)
with smart_open(glove_file_name) as f:
num_dims = len(f.readline().split()) - 1
return num_lines, num_dims | 4fde6a034197e51e3901b22c46d946330e2e213e | 4,662 |
from typing import Dict
from typing import List
def retrieve_database_inputs(db_session: Session) -> (
Dict[str, List[RevenueRate]], Dict[str, MergeAddress], List[Driver]):
"""
Retrieve the static inputs of the model from the database
:param db_session: SQLAlchemy Database connection session
:return: level of service mapped to List of RevenueRate objects, merge addresses mapped to MergeAddress objects,
List of driver objects
"""
revenue_table = load_revenue_table_from_db(db_session)
merge_details = load_merge_details_from_db(db_session)
drivers_table = load_drivers_from_db(db_session)
return revenue_table, merge_details, drivers_table | f5242680576d7e07b87fb8fd31e26efc1b0c30f0 | 4,663 |
def _evolve_cx(base_pauli, qctrl, qtrgt):
"""Update P -> CX.P.CX"""
base_pauli._x[:, qtrgt] ^= base_pauli._x[:, qctrl]
base_pauli._z[:, qctrl] ^= base_pauli._z[:, qtrgt]
return base_pauli | 5d0529bc4bfe74a122c24069eccb20fa2b69f153 | 4,664 |
def tp_pixel_num_cal(im, gt):
""" im is the prediction result;
gt is the ground truth labelled by biologists;"""
tp = np.logical_and(im, gt)
tp_pixel_num = tp.sum()
return tp_pixel_num | 197c1f64df3430cfbb6f45413b83360a1b9c44bf | 4,665 |
import time
def xsg_data(year=None, month=None,
retry_count=3, pause=0.001):
"""
获取限售股解禁数据
Parameters
--------
year:年份,默认为当前年
month:解禁月份,默认为当前月
retry_count : int, 默认 3
如遇网络等问题重复执行的次数
pause : int, 默认 0
重复请求数据过程中暂停的秒数,防止请求间隔时间太短出现的问题
Return
------
DataFrame
code:股票代码
name:名称
date:解禁日期
count:解禁数量(万股)
ratio:占总盘比率
"""
year = du.get_year() if year is None else year
month = du.get_month() if month is None else month
for _ in range(retry_count):
time.sleep(pause)
try:
request = Request(rv.XSG_URL%(ct.P_TYPE['http'], ct.DOMAINS['em'],
ct.PAGES['emxsg'], year, month))
lines = urlopen(request, timeout = 10).read()
lines = lines.decode('utf-8') if ct.PY3 else lines
except Exception as e:
print(e)
else:
da = lines[3:len(lines)-3]
list = []
for row in da.split('","'):
list.append([data for data in row.split(',')])
df = pd.DataFrame(list)
df = df[[1, 3, 4, 5, 6]]
for col in [5, 6]:
df[col] = df[col].astype(float)
df[5] = df[5]/10000
df[6] = df[6]*100
df[5] = df[5].map(ct.FORMAT)
df[6] = df[6].map(ct.FORMAT)
df.columns = rv.XSG_COLS
return df
raise IOError(ct.NETWORK_URL_ERROR_MSG) | 0ca7070a63ec9ee58bb590b82d9bcdb8e4801d33 | 4,666 |
def crm_ybquery_v2():
"""
crm根据用户手机号查询subId
:return:
"""
resp = getJsonResponse()
try:
jsonStr = request.data
# 调用业务逻辑
resp = {"message":"","status":200,"timestamp":1534844188679,"body":{"password":"21232f297a57a5a743894a0e4a801fc3","username":"admin"},"result":{"id":"4291d7da9005377ec9aec4a71ea837f","name":"Ronald Thompson","username":"admin","password":"","avatar":"https://gw.alipayobjects.com/zos/rmsportal/jZUIxmJycoymBprLOUbT.png","status":1,"telephone":"","lastLoginIp":"127.0.0.1","lastLoginTime":1534837621348,"creatorId":"admin","createTime":1497160610259,"deleted":0,"roleId":"admin","token":"4291d7da9005377ec9aec4a71ea837f"}}
except BaseException as e:
current_app.logger.error("=========异常============")
current_app.logger.error(e)
current_app.logger.error("=========异常============")
resp = getJsonResponse(code="101", msg="系统异常" + str(e))
return jsonify(resp) | 02b7ff4e1f44643537b4549376aa637dcdbf5261 | 4,667 |
from typing import Dict
from typing import List
from typing import Union
from pathlib import Path
from typing import Iterable
from typing import Tuple
import tqdm
import logging
def get_split_file_ids_and_pieces(
data_dfs: Dict[str, pd.DataFrame] = None,
xml_and_csv_paths: Dict[str, List[Union[str, Path]]] = None,
splits: Iterable[float] = (0.8, 0.1, 0.1),
seed: int = None,
) -> Tuple[Iterable[Iterable[int]], Iterable[Piece]]:
"""
Get the file_ids that should go in each split of a split dataset.
Parameters
----------
data_dfs : Dict[str, pd.DataFrame]
If using dataframes, a mapping of 'files', 'measures', 'chords', and 'notes' dfs.
xml_and_csv_paths : Dict[str, List[Union[str, Path]]]
If using the MusicXML ('xmls') and label csvs ('csvs'), a list of paths of the
matching xml and csv files.
splits : Iterable[float]
An Iterable of floats representing the proportion of pieces which will go into each split.
This will be normalized to sum to 1.
seed : int
A numpy random seed, if given.
Returns
-------
split_ids : Iterable[Iterable[int]]
An iterable, the length of `splits` containing the file_ids for each data point in each
split.
pieces : Iterable[Iterable[Piece]]
The loaded Pieces of each split.
"""
assert sum(splits) != 0
splits = np.array(splits) / sum(splits)
if seed is not None:
np.random.seed(seed)
indexes = []
pieces = []
if data_dfs is not None:
for i in tqdm(data_dfs["files"].index):
file_name = (
f"{data_dfs['files'].loc[i].corpus_name}/{data_dfs['files'].loc[i].file_name}"
)
logging.info("Parsing %s (id %s)", file_name, i)
dfs = [data_dfs["chords"], data_dfs["measures"], data_dfs["notes"]]
names = ["chords", "measures", "notes"]
exists = [i in df.index.get_level_values(0) for df in dfs]
if not all(exists):
for exist, name in zip(exists, names):
if not exist:
logging.warning(
"%s_df does not contain %s data (id %s).", name, file_name, i
)
continue
try:
piece = get_score_piece_from_data_frames(
data_dfs["notes"].loc[i], data_dfs["chords"].loc[i], data_dfs["measures"].loc[i]
)
pieces.append(piece)
indexes.append(i)
except Exception:
logging.exception("Error parsing index %s", i)
continue
elif xml_and_csv_paths is not None:
for i, (xml_path, csv_path) in tqdm(
enumerate(zip(xml_and_csv_paths["xmls"], xml_and_csv_paths["csvs"])),
desc="Parsing MusicXML files",
total=len(xml_and_csv_paths["xmls"]),
):
piece = get_score_piece_from_music_xml(xml_path, csv_path)
pieces.append(piece)
indexes.append(i)
# Shuffle the pieces and the df_indexes the same way
shuffled_indexes = np.arange(len(indexes))
np.random.shuffle(shuffled_indexes)
pieces = np.array(pieces)[shuffled_indexes]
indexes = np.array(indexes)[shuffled_indexes]
split_pieces = []
split_indexes = []
prop = 0
for split_prop in splits:
start = int(round(prop * len(pieces)))
prop += split_prop
end = int(round(prop * len(pieces)))
length = end - start
if length == 0:
split_pieces.append([])
split_indexes.append([])
elif length == 1:
split_pieces.append([pieces[start]])
split_indexes.append([indexes[start]])
else:
split_pieces.append(pieces[start:end])
split_indexes.append(indexes[start:end])
return split_indexes, split_pieces | d01768fddcef9428e5dd3a22592dca8dd083fc9c | 4,668 |
def calc_full_dist(row, vert, hor, N, site_collection_SM):
"""
Calculates full distance matrix. Called once per row.
INPUTS:
:param vert:
integer, number of included rows
:param hor:
integer, number of columns within radius
:param N:
integer, number of points in row
:param site_collection_SM:
site collection object, for ShakeMap data
:returns:
dict, with following keys
grid_indices- indices of points included in distance matrix
distance_matrix- full distance matrix
"""
# gathers indices for full distance matrix for each row
grid_indices = [None]*(vert*(2*hor+1))
n_grid_indices = 0
for k in range(row-vert+1, row+1):
if k == row:
for j in range(0,hor+1):
grid_indices[n_grid_indices] = j + N*k
n_grid_indices += 1
else:
for j in range(0,2*hor+1):
grid_indices[n_grid_indices] = j + N*k
n_grid_indices += 1
del grid_indices[n_grid_indices:]
distance_matrix = np.zeros([np.size(grid_indices), np.size(grid_indices)])
# Create full distance matrix for row
for k in range(0, np.size(grid_indices)):
distance_matrix[k, k:] = geodetic_distance(
site_collection_SM.lons[grid_indices[k ]], site_collection_SM.lats[grid_indices[k]],
site_collection_SM.lons[grid_indices[k:]], site_collection_SM.lats[grid_indices[k:]]).flatten()
distance_matrix = distance_matrix + distance_matrix.T
return {'grid_indices':grid_indices, 'distance_matrix':distance_matrix} | e332b3b51cf4dadb764865f7c75eb361aa0cc100 | 4,669 |
def background_upload_do():
"""Handle the upload of a file."""
form = request.form
# Is the upload using Ajax, or a direct POST by the form?
is_ajax = False
if form.get("__ajax", None) == "true":
is_ajax = True
print form.items()
# Target folder for these uploads.
# target = os.sep.join(['app', 'static', 'photo_albums', 'Test', 'Dave'])
script_dir = os.path.dirname(os.path.abspath(__file__))
target = os.sep.join([script_dir, 'static', 'photo_albums', form.items()[0][1], form.items()[1][1]])
for upload in request.files.getlist("file"):
filename = upload.filename.rsplit(os.sep)[0]
if not os.path.exists(target):
print "Creating directory:", target
os.makedirs(target)
destination = os.sep.join([target, filename])
print "Accept incoming file:", filename
print "Save it to:", destination
upload.save(destination)
# if is_ajax:
return ajax_response(True, msg="DONE!")
# else:
# return redirect(url_for('upload')) | 267608fa9c93a75ca260eb742fed9023ec350b65 | 4,670 |
def load_dict_data(selected_entities=None, path_to_data_folder=None):
"""Loads up data from .pickle file for the selected entities.
Based on the selected entities, loads data from storage,
into memory, if respective files exists.
Args:
selected_entities: A list of string entity names to be loaded.
Default is load all available entitites.
path_to_data_folder: A string specifying the absolute path to
the data folder that contains the entity dataset files.
By default, uses the built-in entity datasets.
Returns:
A dictionary mapping entity type (key) to all entity values of
that type. Values are dictionary of dictionaries.
{
'genre': {
'comedy': {
'action': {1:1},
'drama': {1:1}
},
'action': {
'thriller': {1:1}
}
}
}
Always returns a dictionary. If .pickle files of selected entitites
are not found, or if no .pickle files are found, returns an empty
dictionary.
"""
return load_entities(
selected_entities=selected_entities, from_pickle=True,
path_to_data_folder=path_to_data_folder
) | 0236d69d6ed6c663c3bba5edabd59ced9755c546 | 4,673 |
def cart_del(request, pk):
""" remove an experiment from the analysis cart and return"""
pk=int(pk) # make integer for lookup within template
analyze_list = request.session.get('analyze_list', [])
if pk in analyze_list:
analyze_list.remove(pk)
request.session['analyze_list'] = analyze_list
return HttpResponseRedirect(request.META.get('HTTP_REFERER')) | 210a0fd58d9470aa365906420f3769b57815839a | 4,674 |
def get_block_devices(bdms=None):
"""
@type bdms: list
"""
ret = ""
if bdms:
for bdm in bdms:
ret += "{0}\n".format(bdm.get('DeviceName', '-'))
ebs = bdm.get('Ebs')
if ebs:
ret += " Status: {0}\n".format(ebs.get('Status', '-'))
ret += " Snapshot Id: {0}\n".format(ebs.get('SnapshotId', '-'))
ret += " Volume Size: {0}\n".format(ebs.get('VolumeSize', '-'))
ret += " Volume Type: {0}\n".format(ebs.get('VolumeType', '-'))
ret += " Encrypted: {0}\n".format(str(ebs.get('Encrypted', '-')))
ret += " Delete on Termination: {0}\n".format(ebs.get('DeleteOnTermination', '-'))
ret += " Attach Time: {0}\n".format(str(ebs.get('AttachTime', '-')))
return ret.rstrip()
else:
return ret | bd375f988b13d8fe5949ebdc994210136acc3405 | 4,675 |
from scipy import stats # lazy import
from pandas import DataFrame
def outlier_test(model_results, method='bonf', alpha=.05, labels=None,
order=False, cutoff=None):
"""
Outlier Tests for RegressionResults instances.
Parameters
----------
model_results : RegressionResults instance
Linear model results
method : str
- `bonferroni` : one-step correction
- `sidak` : one-step correction
- `holm-sidak` :
- `holm` :
- `simes-hochberg` :
- `hommel` :
- `fdr_bh` : Benjamini/Hochberg
- `fdr_by` : Benjamini/Yekutieli
See `statsmodels.stats.multitest.multipletests` for details.
alpha : float
familywise error rate
labels : None or array_like
If `labels` is not None, then it will be used as index to the
returned pandas DataFrame. See also Returns below
order : bool
Whether or not to order the results by the absolute value of the
studentized residuals. If labels are provided they will also be sorted.
cutoff : None or float in [0, 1]
If cutoff is not None, then the return only includes observations with
multiple testing corrected p-values strictly below the cutoff. The
returned array or dataframe can be empty if there are no outlier
candidates at the specified cutoff.
Returns
-------
table : ndarray or DataFrame
Returns either an ndarray or a DataFrame if labels is not None.
Will attempt to get labels from model_results if available. The
columns are the Studentized residuals, the unadjusted p-value,
and the corrected p-value according to method.
Notes
-----
The unadjusted p-value is stats.t.sf(abs(resid), df) where
df = df_resid - 1.
"""
if labels is None:
labels = getattr(model_results.model.data, 'row_labels', None)
infl = getattr(model_results, 'get_influence', None)
if infl is None:
results = maybe_unwrap_results(model_results)
raise AttributeError("model_results object %s does not have a "
"get_influence method." % results.__class__.__name__)
resid = infl().resid_studentized_external
if order:
idx = np.abs(resid).argsort()[::-1]
resid = resid[idx]
if labels is not None:
labels = np.asarray(labels)[idx]
df = model_results.df_resid - 1
unadj_p = stats.t.sf(np.abs(resid), df) * 2
adj_p = multipletests(unadj_p, alpha=alpha, method=method)
data = np.c_[resid, unadj_p, adj_p[1]]
if cutoff is not None:
mask = data[:, -1] < cutoff
data = data[mask]
else:
mask = slice(None)
if labels is not None:
return DataFrame(data,
columns=['student_resid', 'unadj_p', method+"(p)"],
index=np.asarray(labels)[mask])
return data | 39219cf5ad86f91cf6da15ea66dc2d18f0a371af | 4,676 |
def move(request, content_type_id, obj_id, rank):
"""View to be used in the django admin for changing a :class:`RankedModel`
object's rank. See :func:`admin_link_move_up` and
:func:`admin_link_move_down` for helper functions to incoroprate in your
admin models.
Upon completion this view sends the caller back to the referring page.
:param content_type_id:
``ContentType`` id of object being moved
:param obj_id:
ID of object being moved
:param rank:
New rank of the object
"""
content_type = ContentType.objects.get_for_id(content_type_id)
obj = get_object_or_404(content_type.model_class(), id=obj_id)
obj.rank = int(rank)
obj.save()
return HttpResponseRedirect(request.META['HTTP_REFERER']) | 0a8e73d83d7d7c575a8ed5abe43524b22d701a38 | 4,677 |
def test_second_playback_enforcement(mocker, tmp_path):
"""
Given:
- A mockable test
When:
- The mockable test fails on the second playback
Then:
- Ensure that it exists in the failed_playbooks set
- Ensure that it does not exists in the succeeded_playbooks list
"""
class RunIncidentTestMock:
call_count = 0
count_response_mapping = {
1: PB_Status.FAILED, # The first playback run
2: PB_Status.COMPLETED, # The record run
3: PB_Status.FAILED # The second playback run
}
@staticmethod
def run_incident_test(*_):
# First playback run
RunIncidentTestMock.call_count += 1
return RunIncidentTestMock.count_response_mapping[RunIncidentTestMock.call_count]
filtered_tests = ['mocked_playbook']
tests = [generate_test_configuration(playbook_id='mocked_playbook',
integrations=['mocked_integration'])]
integrations_configurations = [generate_integration_configuration('mocked_integration')]
secret_test_conf = generate_secret_conf_json(integrations_configurations)
content_conf_json = generate_content_conf_json(tests=tests)
build_context = get_mocked_build_context(mocker,
tmp_path,
secret_conf_json=secret_test_conf,
content_conf_json=content_conf_json,
filtered_tests_content=filtered_tests)
mocked_demisto_client = DemistoClientMock(integrations=['mocked_integration'])
server_context = generate_mocked_server_context(build_context, mocked_demisto_client, mocker)
mocker.patch('demisto_sdk.commands.test_content.TestContentClasses.TestContext._run_incident_test',
RunIncidentTestMock.run_incident_test)
server_context.execute_tests()
assert 'mocked_playbook (Second Playback)' in build_context.tests_data_keeper.failed_playbooks
assert 'mocked_playbook' not in build_context.tests_data_keeper.succeeded_playbooks | 314cbfb4f659b34adfdafb6b1c1153c8560249b0 | 4,678 |
import re
def decode_textfield_ncr(content):
"""
Decodes the contents for CIF textfield from Numeric Character Reference.
:param content: a string with contents
:return: decoded string
"""
def match2str(m):
return chr(int(m.group(1)))
return re.sub('&#(\d+);', match2str, content) | 28bf8017869d1ad47dce4362ec2b57131f587bba | 4,679 |
def reflect_or_create_tables(options):
"""
returns a dict of classes
make 'em if they don't exist
"tables" is {'wfdisc': mapped table class, ...}
"""
tables = {}
# this list should mirror the command line table options
for table in list(mapfns.keys()) + ['lastid']:
# if options.all_tables:
fulltabnm = getattr(options, table, None)
if fulltabnm:
try:
tables[table] = ps.get_tables(session.bind, [fulltabnm])[0]
except NoSuchTableError:
print("{0} doesn't exist. Adding it.".format(fulltabnm))
tables[table] = ps.make_table(fulltabnm, PROTOTYPES[table])
tables[table].__table__.create(session.bind, checkfirst=True)
return tables | 8974f6e6299240c69cf9deffdb3efb7ba9dc771f | 4,680 |
def config_section_data():
"""Produce the default configuration section for app.config,
when called by `resilient-circuits config [-c|-u]`
"""
config_data = u"""[fn_grpc_interface]
interface_dir=<<path to the parent directory of your Protocol Buffer (pb2) files>>
#<<package_name>>=<<communication_type>>, <<secure connection type>>, <<certificate_path or google API token>>
# 'package_name' is a CSV list of length 3, where each possible value is described in the documentation
# Note: to setup, in your interface_dir, create a sub-directory that has
# the same name as your package, and copy the Protocol Buffer pb2 files
# into that directory.
#
# If the package_name was 'helloworld', your app.config would look like:
# [fn_grpc_interface]
# interface_dir=/home/admin/integrations/grpc_interface_files
# helloworld=unary, None, None"""
return config_data | cb26012ff6ad1a2dbccbbcc5ef81c7a91def7906 | 4,681 |
def color_print(path: str, color = "white", attrs = []) -> None:
"""Prints colorized text on terminal"""
colored_text = colored(
text = read_warfle_text(path),
color = color,
attrs = attrs
)
print(colored_text)
return None | c3f587d929f350c86d166e809c9a63995063cf95 | 4,683 |
def create_cluster_spec(parameters_server: str, workers: str) -> tf.train.ClusterSpec:
"""
Creates a ClusterSpec object representing the cluster.
:param parameters_server: comma-separated list of hostname:port pairs to which the parameter servers are assigned
:param workers: comma-separated list of hostname:port pairs to which the workers are assigned
:return: a ClusterSpec object representing the cluster
"""
# extract the parameter servers and workers from the given strings
ps_hosts = parameters_server.split(",")
worker_hosts = workers.split(",")
# Create a cluster spec from the parameter server and worker hosts
cluster_spec = tf.train.ClusterSpec({"ps": ps_hosts, "worker": worker_hosts})
return cluster_spec | 2b4555b68821327451c48220e64bc92ecd5f3acc | 4,684 |
def bq_client(context):
"""
Initialize and return BigQueryClient()
"""
return BigQueryClient(
context.resource_config["dataset"],
) | 839a72d82b29e0e57f5973aee418360ef6b3e2fc | 4,685 |
def longascnode(x, y, z, u, v, w):
"""Compute value of longitude of ascending node, computed as
the angle between x-axis and the vector n = (-hy,hx,0), where hx, hy, are
respectively, the x and y components of specific angular momentum vector, h.
Args:
x (float): x-component of position
y (float): y-component of position
z (float): z-component of position
u (float): x-component of velocity
v (float): y-component of velocity
w (float): z-component of velocity
Returns:
float: longitude of ascending node
"""
res = np.arctan2(y*w-z*v, x*w-z*u) # remember atan2 is atan2(y/x)
if res >= 0.0:
return res
else:
return res+2.0*np.pi | d108847fa6835bc5e3ff70eb9673f6650ddf795a | 4,686 |
def convert_to_distance(primer_df, tm_opt, gc_opt, gc_clamp_opt=2):
"""
Convert tm, gc%, and gc_clamp to an absolute distance
(tm_dist, gc_dist, gc_clamp_dist)
away from optimum range. This makes it so that all features will need
to be minimized.
"""
primer_df['tm_dist'] = get_distance(
primer_df.tm.values, tm_opt, tm_opt)
primer_df['gc_dist'] = get_distance(
primer_df.gc.values, gc_opt['min'], gc_opt['max'])
primer_df['gc_clamp_dist'] = get_distance(
primer_df.gc_clamp.values, gc_clamp_opt, gc_clamp_opt)
# primer_df.drop(['tm', 'gc', 'gc_clamp'], axis=1, inplace=True)
return primer_df | 4d556fd79c2c21877b3cb59712a923d5645b5eba | 4,689 |
import copy
def _tmap_error_detect(tmap: TensorMap) -> TensorMap:
"""Modifies tm so it returns it's mean unless previous tensor from file fails"""
new_tm = copy.deepcopy(tmap)
new_tm.shape = (1,)
new_tm.interpretation = Interpretation.CONTINUOUS
new_tm.channel_map = None
def tff(_: TensorMap, hd5: h5py.File, dependents=None):
return tmap.tensor_from_file(tmap, hd5, dependents).mean()
new_tm.tensor_from_file = tff
return new_tm | 263a16a5cb92e0a9c3d42357280eeb6d15a59773 | 4,690 |
def generate_dataset(config, ahead=1, data_path=None):
"""
Generates the dataset for training, test and validation
:param ahead: number of steps ahead for prediction
:return:
"""
dataset = config['dataset']
datanames = config['datanames']
datasize = config['datasize']
testsize = config['testsize']
vars = config['vars']
lag = config['lag']
btc = {}
# Reads numpy arrays for all sites and keep only selected columns
btcdata = np.load(data_path + 'bitcoin_price_history.npz')
for d in datanames:
btc[d] = btcdata[d]
if vars is not None:
btc[d] = btc[d][:, vars]
if dataset == 0:
return _generate_dataset_one_var(btc[datanames[0]][:, WEIGHTED_PRICE_INDEX].reshape(-1, 1), datasize, testsize, lag=lag, ahead=ahead)
# Just add more options to generate datasets with more than one variable for predicting one value
# or a sequence of values
raise NameError('ERROR: No such dataset type') | 89136efffbbd6e115b1d0b887fe7a3c904405bda | 4,691 |
def search(isamAppliance, name, check_mode=False, force=False):
"""
Search UUID for named Web Service connection
"""
ret_obj = get_all(isamAppliance)
return_obj = isamAppliance.create_return_object()
return_obj["warnings"] = ret_obj["warnings"]
for obj in ret_obj['data']:
if obj['name'] == name:
logger.info("Found Web Service connection {0} id: {1}".format(name, obj['uuid']))
return_obj['data'] = obj['uuid']
return_obj['rc'] = 0
return return_obj | f642e9e62203b490a347c21899d45968f6258eba | 4,692 |
def flask_app(initialize_configuration) -> Flask:
"""
Fixture for making a Flask instance, to be able to access application context manager.
This is not possible with a FlaskClient, and we need the context manager for creating
JWT tokens when is required.
@return: A Flask instance.
"""
flask_application = vcf_handler_api('TESTING')
flask_application.config['TESTING'] = True
flask_application.config['PROPAGATE_EXCEPTIONS'] = False
return flask_application | 265c912833025d13d06c2470443e68110ce4f60f | 4,693 |
import requests
def http_request(method, url_suffix, params=None, data=None, headers=HEADERS, safe=False):
"""
A wrapper for requests lib to send our requests and handle requests and responses better.
:type method: ``str``
:param method: HTTP method for the request.
:type url_suffix: ``str``
:param url_suffix: The suffix of the URL (endpoint)
:type params: ``dict``
:param params: The URL params to be passed.
:type data: ``str``
:param data: The body data of the request.
:type headers: ``dict``
:param headers: Request headers
:type safe: ``bool``
:param safe: If set to true will return None in case of http error
:return: Returns the http request response json
:rtype: ``dict``
"""
headers['Authorization'] = get_token()
url = BASE_URL + url_suffix
try:
res = requests.request(method, url, verify=USE_SSL, params=params, data=data, headers=headers)
# Try to create a new token
if res.status_code == 401:
headers['Authorization'] = get_token(new_token=True)
res = requests.request(method, url, verify=USE_SSL, params=params, data=data, headers=headers)
except requests.exceptions.RequestException:
return_error('Error in connection to the server. Please make sure you entered the URL correctly.')
# Handle error responses gracefully
if res.status_code not in {200, 201, 202}:
try:
result_msg = res.json()
finally:
reason = result_msg if result_msg else res.reason
err_msg = f'Error in API call. code:{res.status_code}; reason: {reason}'
if safe:
return None
return_error(err_msg)
return res.json() | 9fbd5123e4f1a39f5fa10fbc6a8f41db7ed1775b | 4,694 |
def FP(target, prediction):
"""
False positives.
:param target: target value
:param prediction: prediction value
:return:
"""
return ((target == 0).float() * prediction.float().round()).sum() | 9c8b21ecbc4f48b737c92fbaf73ef820fe035218 | 4,696 |
import math
def get_angle(A, B, C):
"""
Return the angle at C (in radians) for the triangle formed by A, B, C
a, b, c are lengths
C
/ \
b / \a
/ \
A-------B
c
"""
(col_A, row_A) = A
(col_B, row_B) = B
(col_C, row_C) = C
a = pixel_distance(C, B)
b = pixel_distance(A, C)
c = pixel_distance(A, B)
try:
cos_angle = (math.pow(a, 2) + math.pow(b, 2) - math.pow(c, 2)) / (2 * a * b)
except ZeroDivisionError as e:
log.warning(
"get_angle: A %s, B %s, C %s, a %.3f, b %.3f, c %.3f" % (A, B, C, a, b, c)
)
raise e
# If CA and CB are very long and the angle at C very narrow we can get an
# invalid cos_angle which will cause math.acos() to raise a ValueError exception
if cos_angle > 1:
cos_angle = 1
elif cos_angle < -1:
cos_angle = -1
angle_ACB = math.acos(cos_angle)
# log.info("get_angle: A %s, B %s, C %s, a %.3f, b %.3f, c %.3f, cos_angle %s, angle_ACB %s" %
# (A, B, C, a, b, c, pformat(cos_angle), int(math.degrees(angle_ACB))))
return angle_ACB | 30e1681bf2c065c4094b2dd909322158a9968c3c | 4,697 |
def single_labels(interesting_class_id):
"""
:param interesting_class_id: integer in range [0,2] to specify class
:return: number of labels for the "interesting_class"
"""
def s_l(y_true, y_pred):
class_id_true = K.argmax(y_true, axis=-1)
accuracy_mask = K.cast(K.equal(class_id_true, interesting_class_id), 'int32')
return K.cast(K.maximum(K.sum(accuracy_mask), 1), 'int32')
return s_l | d137bbd4bba4bcb19e9bc296e4cecdbd7d8effe6 | 4,698 |
def get_rdf_lables(obj_list):
"""Get rdf:labels from a given list of objects."""
rdf_labels = []
for obj in obj_list:
rdf_labels.append(obj['rdf:label'])
return rdf_labels | 2bcf6a6e8922e622de602f5956747955ea39eeda | 4,700 |
import json
def _create_model_fn(pipeline_proto, is_chief=True):
"""Creates a callable that build the model.
Args:
pipeline_proto: an instance of pipeline_pb2.Pipeline.
Returns:
model_fn: a callable that takes [features, labels, mode, params] as inputs.
"""
if not isinstance(pipeline_proto, pipeline_pb2.Pipeline):
raise ValueError('pipeline_proto has to be an instance of Pipeline.')
def _model_fn(features, labels, mode, params):
"""
Args:
features: a dict mapping from names to tensors, denoting the features.
labels: a dict mapping from names to tensors, denoting the labels.
mode: mode parameter required by the estimator.
params: additional parameters used for creating the model.
Returns:
an instance of EstimatorSpec.
"""
is_training = (tf.estimator.ModeKeys.TRAIN == mode)
tf.logging.info("Current mode is %s, is_training=%s", mode, is_training)
model = builder.build(pipeline_proto.model, is_training)
predictions = model.build_prediction(features)
# Get scaffold and variables_to_train.
scaffold = model.get_scaffold()
variables_to_train = model.get_variables_to_train()
# Compute losses. Note: variables created in build_loss are not trainable.
losses = model.build_loss(predictions, examples=features)
for name, loss in losses.items():
tf.losses.add_loss(loss)
tf.summary.scalar('loss/' + name, loss)
for loss in tf.losses.get_regularization_losses():
tf.summary.scalar(
"loss/regularization/" + '/'.join(loss.op.name.split('/')[:2]), loss)
total_loss = tf.losses.get_total_loss(add_regularization_losses=True)
train_op = None
eval_metric_ops = None
training_hooks = []
if tf.estimator.ModeKeys.TRAIN == mode:
train_config = pipeline_proto.train_config
# Create the optimizer.
learning_rate = train_config.learning_rate
global_step = tf.train.get_or_create_global_step()
if train_config.HasField('learning_rate_decay'):
learning_rate = tf.train.exponential_decay(
learning_rate,
global_step,
train_config.learning_rate_decay.decay_steps,
train_config.learning_rate_decay.decay_rate,
staircase=train_config.learning_rate_decay.staircase)
tf.summary.scalar('loss/learning_rate', learning_rate)
optimizer = training_utils.build_optimizer(
train_config.optimizer, learning_rate=learning_rate)
# Setup the replicas_hook for the SyncReplicasOptimizer.
if train_config.sync_replicas:
optimizer = tf.train.SyncReplicasOptimizer(
optimizer, replicas_to_aggregate=4)
sync_replicas_hook = optimizer.make_session_run_hook(is_chief)
training_hooks.append(sync_replicas_hook)
# Enable MovingAverageOptimizer if specified.
if train_config.HasField('moving_average_decay'):
optimizer = tf.contrib.opt.MovingAverageOptimizer(
optimizer, average_decay=train_config.moving_average_decay)
# Apply gradient multipliers.
trainable_variables = []
gradient_multipliers = {}
for var in variables_to_train:
add_to_trainable_variables = True
for multiplier in train_config.gradient_multiplier:
if var.op.name.startswith(multiplier.scope):
if var.op.name in gradient_multipliers:
tf.logging.warn('Override gradient multiplier: %s', var.op.name)
gradient_multipliers[var.op.name] = multiplier.multiplier
if multiplier.multiplier > 0:
add_to_trainable_variables = True
else:
add_to_trainable_variables = False
# Add to trainable variables.
if add_to_trainable_variables:
trainable_variables.append(var)
tf.logging.info('Variable to train: %s, %s', var.op.name,
var.get_shape())
elif var.op.name in gradient_multipliers:
del gradient_multipliers[var.op.name]
tf.logging.info('Apply gradient multipliers: \n%s',
json.dumps(gradient_multipliers, indent=2))
def transform_grads_fn(grads):
if gradient_multipliers:
grads = tf.contrib.training.multiply_gradients(
grads, gradient_multipliers)
if train_config.HasField('max_gradient_norm'):
grads = tf.contrib.training.clip_gradient_norms(
grads, max_norm=train_config.max_gradient_norm)
return grads
# The train_op is required for mode `TRAIN`.
train_op = tf.contrib.training.create_train_op(
total_loss,
optimizer,
variables_to_train=trainable_variables,
transform_grads_fn=transform_grads_fn,
summarize_gradients=True)
if train_config.HasField('moving_average_decay'):
scaffold = tf.train.Scaffold(
saver=optimizer.swapping_saver(), copy_from_scaffold=scaffold)
elif tf.estimator.ModeKeys.EVAL == mode:
# The eval_metric_ops is optional for mode `EVAL`.
eval_metric_ops = model.build_evaluation(predictions, examples=features)
elif tf.estimator.ModeKeys.PREDICT == mode:
# The predictions is required for mode `PREDICT`.
predictions.update(features)
predictions.update({'summary': tf.summary.merge_all()})
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=total_loss,
train_op=train_op,
training_hooks=training_hooks,
eval_metric_ops=eval_metric_ops,
scaffold=scaffold)
return _model_fn | f29e86a0bc1355a7cf509e57ad0262bc5a9ca1e5 | 4,701 |
def boolean_automatic(meshes, operation, **kwargs):
"""
Automatically pick an engine for booleans based on availability.
Parameters
--------------
meshes : list of Trimesh
Meshes to be booleaned
operation : str
Type of boolean, i.e. 'union', 'intersection', 'difference'
Returns
---------------
result : trimesh.Trimesh
Result of boolean operation
"""
if interfaces.blender.exists:
result = interfaces.blender.boolean(meshes, operation, **kwargs)
elif interfaces.scad.exists:
result = interfaces.scad.boolean(meshes, operation, **kwargs)
else:
raise ValueError('No backends available for boolean operations!')
return result | 7e5b1a483862bb05bb4cd78d21ec22c835f218e6 | 4,702 |
from .workflow import WorkSpec
def get_context(work=None):
"""Get a concrete Context object.
Args:
work (gmx.workflow.WorkSpec): runnable work as a valid gmx.workflow.WorkSpec object
Returns:
An object implementing the :py:class:`gmx.context.Context` interface, if possible.
Raises:
gmx.exceptions.ValueError if an appropriate context for ``work`` could not be loaded.
If work is provided, return a Context object capable of running the provided work or produce an error.
The semantics for finding Context implementations needs more consideration, and a more informative exception
is likely possible.
A Context can run the provided work if
* the Context supports can resolve all operations specified in the elements
* the Context supports DAG topologies implied by the network of dependencies
* the Context supports features required by the elements with the specified parameters,
such as synchronous array jobs.
"""
# We need to define an interface for WorkSpec objects so that we don't need
# to rely on typing and inter-module dependencies.
workspec = None
if work is not None:
if isinstance(work, WorkSpec):
workspec = work
elif hasattr(work, 'workspec') and isinstance(work.workspec,
WorkSpec):
workspec = work.workspec
else:
raise exceptions.ValueError('work argument must provide a gmx.workflow.WorkSpec.')
if workspec is not None and \
hasattr(workspec, '_context') and \
workspec._context is not None:
context = workspec._context
else:
context = Context(work=workspec)
return context | 838de2ce25dbe44c058f5360a59e48a68fa7dc2a | 4,703 |
def test_data():
"""Get the `CIFAR-10` test data."""
global _MEAN # pylint: disable=global-statement
_np.random.seed(1)
view = _skdc10.view.OfficialImageClassificationTask()
permutation = _np.random.permutation(range(10000))
if _MEAN is None:
_MEAN = view.train.x.reshape((50000 * 32 * 32, 3)).mean(axis=0)
return ((view.test.x[:10000, :][permutation, :] - _MEAN).
transpose((0, 3, 1, 2)).astype('float32'),
view.test.y[:10000][permutation].reshape((10000, 1)).astype('float32')) | e20acfc0e46dba2441b03d0d1443fc193c500e62 | 4,704 |
def normalize_key_combo(key_combo):
"""Normalize key combination to make it easily comparable.
All aliases are converted and modifier orders are fixed to:
Control, Alt, Shift, Meta
Letters will always be read as upper-case.
Due to the native implementation of the key system, Shift pressed in
certain key combinations may yield inconsistent or unexpected results.
Therefore, it is not recommended to use Shift with non-letter keys. On OSX,
Control is swapped with Meta such that pressing Command reads as Control.
Parameters
----------
key_combo : str
Key combination.
Returns
-------
normalized_key_combo : str
Normalized key combination.
"""
key, modifiers = parse_key_combo(key_combo)
if len(key) != 1 and key not in SPECIAL_KEYS:
raise TypeError(f'invalid key {key}')
for modifier in modifiers:
if modifier not in MODIFIER_KEYS:
raise TypeError(f'invalid modifier key {modifier}')
return components_to_key_combo(key, modifiers) | e242c6d9177d31c60a534e9734917c6fdf2de9f7 | 4,705 |
def shape_to_np(shape, dtype="int"):
"""
Used to convert from a shape object returned by dlib to an np array
"""
return np.array([[shape.part(i).x, shape.part(i).y] for i in range(68)], dtype=dtype) | 6d3d0205a8ac90dc8fb17b844fd5e150e25bdde1 | 4,706 |
def inet_pton(space, address):
""" Converts a human readable IP
address to its packed in_addr representation"""
n = rsocket.inet_pton(rsocket.AF_INET, address)
return space.newstr(n) | d015f76ab252e8f1f9f8f764bb7a2131f9ca9b92 | 4,707 |
def delete_routing_segmentation_maps_from_source_segment(
self,
segment_id: int,
) -> bool:
"""Delete D-NAT policies for specific source segment
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - vrf
- DELETE
- /vrf/config/maps/{srcSegmentId}
:param segment_id: Numeric id of routing segment
:type segment_id: int
:return: Returns True/False based on successful call
:rtype: bool
"""
return self._delete(
"/vrf/config/maps/{}".format(segment_id),
expected_status=[204],
return_type="bool",
) | 32064ca159928ccc0802791e161a614f3303555f | 4,708 |
def _identifier(name):
"""
:param name: string
:return: name in lower case and with '_' instead of '-'
:rtype: string
"""
if name.isidentifier():
return name
return name.lower().lstrip('0123456789. ').replace('-', '_') | fbbbc9dd3f2bc5b6e43520c0685f63a10ee95f0a | 4,710 |
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like of shape(M,)
Rank-1 array of polynomial co-efficients.
Returns
-------
out : ndarray
An array containing the complex roots of the polynomial.
Raises
------
ValueError:
When `p` cannot be converted to a rank-1 array.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> print np.roots(coeff)
[-0.3125+0.46351241j -0.3125-0.46351241j]
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if len(p.shape) != 1:
raise ValueError,"Input must be a rank-1 array."
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0, :] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots | 02e3f37a81c84aac9ac949662ec64b85e24432c9 | 4,711 |
from typing import List
def calculate_trade_from_swaps(
swaps: List[AMMSwap],
trade_index: int = 0,
) -> AMMTrade:
"""Given a list of 1 or more AMMSwap (swap) return an AMMTrade (trade).
The trade is calculated using the first swap token (QUOTE) and last swap
token (BASE). Be aware that any token data in between will be ignored for
calculating the trade.
Examples:
[USDC -> AMPL] BASE_QUOTE pair is AMPL_USDC.
[USDC -> AMPL, AMPL -> WETH] BASE_QUOTE pair is WETH_USDC.
[USDC -> AMPL, AMPL -> WETH, WETH -> USDC] BASE_QUOTE pair is USDC_USDC.
May raise DeserializationError
"""
assert len(swaps) != 0, "Swaps can't be an empty list here"
if swaps[0].amount0_in == ZERO:
# Prevent a division by zero error when creating the trade.
# Swaps with `tokenIn` amount (<AMMSwap>.amount0_in) equals to zero are
# not expected nor supported. The function `deserialize_swap` will raise
# a DeserializationError, preventing to store them in the DB. In case
# of having a zero amount it means the db data was corrupted.
log.error(
'Failed to deserialize swap from db. First swap amount0_in is zero',
swaps=swaps,
)
raise DeserializationError('First swap amount0_in is zero.')
amm_trade = AMMTrade(
trade_type=TradeType.BUY, # AMMTrade is always a buy
base_asset=swaps[-1].token1,
quote_asset=swaps[0].token0,
amount=swaps[-1].amount1_out,
rate=Price(swaps[0].amount0_in / swaps[-1].amount1_out),
swaps=swaps,
trade_index=trade_index,
)
return amm_trade | 55071041fd0cab3fd2c0cb89f24cd9267a4e164a | 4,712 |
def tokenize(s):
"""
Tokenize a string.
Args:
s: String to be tokenized.
Returns:
A list of words as the result of tokenization.
"""
#return s.split(" ")
return nltk.word_tokenize(s) | 8dcc01364b3442539dbcc979d3238492bb7904d1 | 4,713 |
from datetime import datetime
def evaluate(request):
"""Eval view that shows how many times each entry was tracked"""
# default filter
end_date = datetime.date.today()
start_date = datetime.date(year=end_date.year, month=end_date.month - 1, day=end_date.day)
num_entries = 5
# get custom filter values from form
if request.method == 'POST':
form = PlotForm(request.POST)
if form.is_valid():
start_date = form.cleaned_data['start_date']
end_date = form.cleaned_data['end_date']
num_entries = form.cleaned_data['num_entries']
# or load empty form
else:
form = PlotForm(initial={'start_date': start_date, 'end_date': end_date, 'num_entries': num_entries})
# prepare chart data
labels = []
chart_data = []
entry_counts = most_frequent_entries(request.user, start_date, end_date, number=num_entries)
for entry, count in entry_counts.items():
labels.append(entry)
chart_data.append(count)
context = {
'form': form,
# for chart.js
'labels': labels,
'chart_label': 'Num. Entries',
'chart_data': chart_data,
'chart_title': f'Top {num_entries} Most Common Entries',
}
return render(request, 'app/eval.html', context) | 44708b65846fd9e21ebc7baf1fe0377054ae2221 | 4,714 |
def significant_pc_test(adata, p_cutoff=0.1, update=True, obsm='X_pca', downsample=50000):
"""
Parameters
----------
adata
p_cutoff
update
obsm
downsample
Returns
-------
"""
pcs = adata.obsm[obsm]
if pcs.shape[0] > downsample:
print(f'Downsample PC matrix to {downsample} cells to calculate significant PC components')
use_pcs = pd.DataFrame(pcs).sample(downsample).values
else:
use_pcs = pcs
i = 0
for i in range(use_pcs.shape[1] - 1):
cur_pc = use_pcs[:, i]
next_pc = use_pcs[:, i + 1]
p = ks_2samp(cur_pc, next_pc).pvalue
if p > p_cutoff:
break
n_components = min(i + 1, use_pcs.shape[1])
print(f'{n_components} components passed P cutoff of {p_cutoff}.')
if update:
adata.obsm[obsm] = pcs[:, :n_components]
print(f"Changing adata.obsm['X_pca'] from shape {pcs.shape} to {adata.obsm[obsm].shape}")
return n_components | c8e367c53330bcb959fb7baba9649d090de91389 | 4,716 |
def unique_hurricanes(hurdat):
"""
Returns header info for each unique hurricanes in HURDAT2-formatted text
file hurdat.
"""
#split on returns if hurdat is not a list
if not isinstance(hurdat, list):
hurdat = hurdat.split('\n')
header_rows = [parse_header(
line, line_num
) for line_num, line in enumerate(hurdat) if parse_header(
line, line_num
)]
keys = [h.keys()[0] for h in header_rows]
values = [h.values()[0] for h in header_rows]
return {k: v for k, v in zip(keys, values)} | c87561b80f6c8b70c33d64834c4d289508a2c120 | 4,718 |
def delete_models_shares_groups(id, group_id, client=None):
"""Revoke the permissions a group has on this object
Use this function on both training and scoring jobs.
Parameters
----------
id : integer
The ID of the resource that is shared.
group_id : integer
The ID of the group.
client : :class:`civis.APIClient`, optional
If not provided, an :class:`civis.APIClient` object will be
created from the :envvar:`CIVIS_API_KEY`.
Returns
-------
None
Response code 204: success
"""
return _unshare_model(id, group_id, entity_type='groups', client=client) | 59f3391e6e92fe0bf2f4c204a9da7c55a8ac8c6c | 4,720 |
def step1ddiffusionanalytical(q, dt, alpha, beta, prng=np.random, **kwargs):
"""Analytical time stepping as proposed in Jenkins, Spano arXiv:1506.06998
Uses the asymptotic normality of the death process for small times
(see Griffiths, J. Math. Bio, 1984)
"""
theta = alpha+beta
beta_ = 0.5*(theta-1.0)*dt
if beta_ == 0.0:
eta = 1.0
sigma = (2.0/(3.0*dt))**.5
else:
eta = beta_/np.expm1(beta_)
# calculation can sometimes give negative numbers due to numerical precision
factor = max(0, 2.0*eta/dt *(1.0 + eta/(eta+beta_)-2.0*eta))
sigma = max((eta+beta_) * factor**.5 / beta_, 1e-16)
mu = 2.0*eta/dt
m = max(int(round(prng.normal(mu, sigma))), 0)
l = prng.binomial(m, q)
qnew = prng.beta(alpha+l, beta+m-l)
return qnew | ae1034488250a7a0afc184878496cd656b239016 | 4,721 |
def no_vtk():
""" Checks if VTK is installed and the python wrapper is functional """
global _vtk_version
return _vtk_version is None | 654dfd0f10a36bbfd3e46c5a93f84a9234e8c0ca | 4,722 |
def get_request_list(flow_list: list) -> list:
"""
将flow list转换为request list。在mitmproxy中,flow是对request和response的总称,这个功能只获取request。
:param flow_list: flow的列表
:return: request的列表
"""
req_list = []
for flow in flow_list:
request = flow.get("request")
req_list.append(request)
return req_list | a70e0120ef2be88bd0644b82317a2a0748352c6c | 4,723 |
from typing import Tuple
import logging
def query_total_production(start_date, end_date) -> Tuple[int]:
"""Total count of semi production on the given time interval"""
semi_count = None
fg_count = None
try:
with stSession() as s:
semi_count = (
s.query(ProductionScan)
.filter(
sa.and_(
ProductionScan.date >= start_date,
ProductionScan.date <= end_date,
)
)
.count()
)
fg_count = (
s.query(StorageScan)
.filter(
sa.and_(
StorageScan.date >= start_date,
StorageScan.date <= end_date,
)
)
.count()
)
except sa.exc.OperationalError as e:
logging.error(f"Operational error occured\n{e}")
return None
except Exception as e:
logging.error("Unknown Error", exc_info=True)
return None
finally:
s.close()
return (semi_count, fg_count) | 4ecf7b2e70feaa75456550deca6a5b8a326adc11 | 4,724 |
import pytz
def add_fields(_, level, event_dict):
""" Add custom fields to each record. """
now = dt.datetime.now()
event_dict['timestamp'] = TZ.localize(now, True).astimezone(pytz.utc).isoformat()
event_dict['level'] = level
if session:
event_dict['session_id'] = session.get('session_id')
if request:
try:
event_dict['ip_address'] = request.headers['X-Forwarded-For'].split(',')[0].strip()
except Exception:
event_dict['ip_address'] = 'unknown'
return event_dict | 3efbffc2808a048fde80a3655e28417c39f2ad04 | 4,725 |
def Smith_set(A,P,params,election_ID,printing_wanted=False):
"""
Compute and return a list of the candidates in the Smith set.
This is the smallest set of candidates such that every candidate in the
Smith set beats every candidate not in the Smith set in one-on-one contests.
In this implementation, "a beats b" if at least half the voters prefer a to b.
Thus, a beats b and vice versa if they are tied; this gives probably the most
reasonable notion for a Smith set when there are ties.
The algorithm uses the fact that the Smith set will be the *last*
strongly connected component discovered by the usual DFS SCC algorithm.
Here A = set of alternatives (candidates), and
P = profile (dict mapping ballots to counts).
"""
if printing_wanted:
print "%s: Computing Smith set."%election_ID
pref = pairwise_prefs(A,P,params) # pref[(i,j)] gives number preferring i to j
n = number_of_ballots_in_profile(P)
stack = []
in_stack = set()
index = 0 # DFS node counter
I = { } # gives indices of vertics
L = { } # gives lowlinks of vertices
for a in A:
if not I.has_key(a): # Start a DFS at each node we haven't seen yet
(index,scc)=Smith_aux(a,A,index,I,L,stack,in_stack,pref,n)
scc = sorted(scc)
if printing_wanted:
print indent+"Smith set is: "+string.join(scc)
return scc | eb71ee5ae402d732a3bea804aad5b39fe3bd92a2 | 4,726 |
from typing import Callable
from typing import Coroutine
from typing import Any
def run_async_from_thread(func: Callable[..., Coroutine[Any, Any, T_Retval]], *args) -> T_Retval:
"""
Call a coroutine function from a worker thread.
:param func: a coroutine function
:param args: positional arguments for the callable
:return: the return value of the coroutine function
"""
try:
asynclib = _local.current_async_module
except AttributeError:
raise RuntimeError('This function can only be run from an AnyIO worker thread')
return asynclib.run_async_from_thread(func, *args) | 829a9008e8aa058b66cb637db71f8f8eb8499374 | 4,727 |
def check_tensor_shape(tensor_tf, target_shape):
""" Return a Tensorflow boolean graph that indicates whether
sample[features_key] has the specified target shape. Only check
not None entries of target_shape.
:param tensor_tf: Tensor to check shape for.
:param target_shape: Target shape to compare tensor to.
:returns: True if shape is valid, False otherwise (as TF boolean).
"""
result = tf.constant(True)
for i, target_length in enumerate(target_shape):
if target_length:
result = tf.logical_and(
result,
tf.equal(tf.constant(target_length), tf.shape(tensor_tf)[i]))
return result | 8b9938c67f2e3655f9ff4dac08261fb6e5803af2 | 4,728 |
def LabelAddressPlus(ea, name, force=False, append_once=False, unnamed=False, nousername=False, named=False, throw=False):
"""
Label an address with name (forced) or an alternative_01
:param ea: address
:param name: desired name
:param force: force name (displace existing name)
:param append_once: append `name` if not already ending with `name`
:param named: [str, callable(addr, name)] name for things with existing usernames
:return: success as bool
"""
def ThrowOnFailure(result):
if not result and throw:
raise RuntimeError("Couldn't label address {:x} with \"{}\"".format(ea, name))
return result
def MakeUniqueLabel(name, ea=idc.BADADDR):
fnLoc = idc.get_name_ea_simple(name)
if fnLoc == idc.BADADDR or fnLoc == ea:
return name
fmt = "%s_%%i" % name
for i in range(100000):
tmpName = fmt % i
fnLoc = idc.get_name_ea_simple(tmpName)
if fnLoc == idc.BADADDR or fnLoc == ea:
return tmpName
return ""
if nousername:
unnamed = nousername
if ea < idc.BADADDR:
if HasUserName(ea):
if named:
if callable(named):
_name = idc.get_name(ea)
_name = named(ea, _name, name)
else:
name = named
elif unnamed:
return
fnName = idc.get_name(ea)
if append_once:
if not fnName.endswith(name):
name += fnName
else:
return ThrowOnFailure(False)
fnLoc = idc.get_name_ea_simple(name)
if fnLoc == idc.BADADDR:
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
elif fnLoc == ea:
return ThrowOnFailure(True)
else:
if force:
idc.set_name(fnLoc, "", idc.SN_AUTO | idc.SN_NOWARN)
idc.Wait()
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
else:
name = MakeUniqueLabel(name, ea)
return ThrowOnFailure(idc.set_name(ea, name, idc.SN_NOWARN))
else:
print("0x0%0x: Couldn't label %s, BADADDR" % (ea, name))
return False | 4772fa25c482eb10abdfea6aa9542f50827c9346 | 4,729 |
def do_match(station1, station2, latitude, elevation, distance):
"""
Perform the match between two stations.
Do initial latitude check to speed up the test
(not longitude as this isn't a constant distance)
Return probabilities for elevation, separation and Jaccard Index
:param Station Class station1:
:param Station Class station2:
:returns:
list of 3 probabilities [elev, dist, jaccard]
"""
# latitude - pre check to make quicker
if np.abs(station1.lat - station2.lat) > LATITUDE_THRESHOLD:
return False
# elevation
height = np.abs(station1.elev - station2.elev)
if height < (ELEVATION_THRESHOLD*4):
height_Pr = np.exp(-1.0 * height / ELEVATION_THRESHOLD)
else:
height_Pr = 0
# latitude & longitude
distance, bearing = utils.get_dist_and_bearing([station1.lat, station1.lon],[station2.lat, station2.lon])
if distance < (DISTANCE_THRESHOLD*4):
dist_Pr = np.exp(-1.0 * distance / DISTANCE_THRESHOLD)
else:
dist_Pr = 0.
# Jaccard Index on name - remove all whitespace
jac_Pr = jaccard(station1.name.strip(), station2.name.strip())
# Jaccard Index on METAR call sign
if station1.call != "" and station2.call != "":
jac_Pr_metar = jaccard(station1.call, station2.call)
# name matching
return [height_Pr, dist_Pr, jac_Pr] | 078d04117363087a512449497713c487bc1180e4 | 4,730 |
def rotation_matrix_from_vectors(vec1, vec2):
""" Find the rotation matrix that aligns vec1 to vec2
Args
----
vec1 (numpy.ndarray): A 3d "source" vector
vec2 (numpy.ndarray): A 3d "destination" vector
Returns
-------
numpy.ndarray: A transform matrix (3x3) which when applied to vec1, aligns it with vec2.
"""
a, b = (vec1 / np.linalg.norm(vec1)).reshape(3), (vec2 / np.linalg.norm(vec2)).reshape(3)
v = np.cross(a, b)
c = np.dot(a, b)
s = np.linalg.norm(v)
kmat = np.array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]])
rotation_matrix = np.eye(3) + kmat + kmat.dot(kmat) * ((1 - c) / (s ** 2))
return rotation_matrix | 9568378e309c5da6e6dffee4788e07eb0c2ea189 | 4,731 |
def audio(src, type="audio/ogg", other_attr={}):
"""
add audio file
args:
src <str> : source file
type <str> : type of audio file
other_attr <dict> : other attributes
"""
return f"""
<audio {_parse_attr(other_attr)}>
<source src="{src}" type="{type}">
</audio>
""".strip() | 3ccd8aea6d7257c46336bb81184cf4b7f379624e | 4,733 |
def test_triangle(dim):
"""
Tests if dimensions can come from a triangle.
dim is a list or tuple of the three dimensions
"""
dim = [int(x) for x in dim]
dim.sort()
if dim[0] + dim[1] > dim[2]:
return True
else:
return False | fc5bc8f7d3830da0ae8692d7cf65a72bcfe2ba7d | 4,734 |
from typing import List
def arg_parser(data: str):
"""parse "x[a1, a2, a3], y[k1=a1, a2, k3=a3], z"
nested [] are ignored.
"""
res: List[NameWithAttrs] = _ARG_WITH_ATTR_PARSER.parse(data)
return res | fa530584a96829944562d2c08bdfed34bfa3eec4 | 4,735 |
def _get_resource(span):
"""Get resource name for span"""
if "http.method" in span.attributes:
route = span.attributes.get("http.route")
return (
span.attributes["http.method"] + " " + route
if route
else span.attributes["http.method"]
)
return span.name | 71b4d2e568350ccfb436bbff6e7a2cff1f3cb251 | 4,736 |
def get_draw_title(kdata):
"""根据typ值,返回相应的标题,如 上证指数(日线)
参数:kdata: KData实例
返回:一个包含stock名称的字符串,可用作绘图时的标题
"""
if not kdata:
return ""
query = kdata.getQuery()
stock = kdata.getStock()
if stock.isNull():
return ""
s1 = ''
if query.kType == KQuery.KType.DAY:
s1 = u' (日线)'
elif query.kType == KQuery.KType.WEEK:
s1 = u' (周线)'
elif query.kType == KQuery.KType.MONTH:
s1 = u' (月线)'
elif query.kType == KQuery.KType.QUARTER:
s1 = u' (季线)'
elif query.kType == KQuery.KType.HALFYEAR:
s1 = u' (半年线)'
elif query.kType == KQuery.KType.YEAR:
s1 = u' (年线)'
elif query.kType == KQuery.KType.MIN:
s1 = u' (1分钟线)'
elif query.kType == KQuery.KType.MIN5:
s1 = u' (5分钟线)'
elif query.kType == KQuery.KType.MIN15:
s1 = u' (15分钟线)'
elif query.kType == KQuery.KType.MIN30:
s1 = u' (30分钟线)'
elif query.kType == KQuery.KType.MIN60:
s1 = u' (60分钟线)'
name = stock.name
if stock.code == "":
stitle = "Block(%s) %s" % (stock.id, name) + s1
else:
stitle = stock.market + stock.code + ' ' + name + s1
return stitle | 7c661b63cedb477224d7f5ea9d7c182108f801a5 | 4,737 |
def _B(slot):
"""Convert slot to Byte boundary"""
return slot*2 | 97f13e9fd99989a83e32f635193a0058656df68b | 4,738 |
import torch
def nll(perm, true):
"""
perm: (n, n) or (s, n, n)
true: (n)
"""
n = true.size(-1)
# i = torch.arange(n, device=perm.device)
# j = true.to(perm.device)
# print("perm.nll:", perm.size(), true.size())
elements = perm.cpu()[..., torch.arange(n), true]
# elements = perm.cpu()[torch.arange(n), true]
nll = -torch.sum(torch.log2(elements.to(perm.device)))
if perm.dim() == 3: # normalize by number samples
nll = nll / perm.size(0)
# print("nll", nll)
return nll | a63c95e814529539ecd964f4309ea96f78cfcbb1 | 4,739 |
def _peaks_colors_from_points(points, colors=None, points_per_line=2):
"""
Returns a VTK scalar array containing colors information for each one of
the peaks according to the policy defined by the parameter colors.
Parameters
----------
points : (N, 3) array or ndarray
points coordinates array.
colors : None or string ('rgb_standard') or tuple (3D or 4D) or
array/ndarray (N, 3 or 4) or array/ndarray (K, 3 or 4) or
array/ndarray(N, ) or array/ndarray (K, )
If None a standard orientation colormap is used for every line.
If one tuple of color is used. Then all streamlines will have the same
color.
If an array (N, 3 or 4) is given, where N is equal to the number of
points. Then every point is colored with a different RGB(A) color.
If an array (K, 3 or 4) is given, where K is equal to the number of
lines. Then every line is colored with a different RGB(A) color.
If an array (N, ) is given, where N is the number of points then these
are considered as the values to be used by the colormap.
If an array (K,) is given, where K is the number of lines then these
are considered as the values to be used by the colormap.
points_per_line : int (1 or 2), optional
number of points per peak direction.
Returns
-------
color_array : vtkDataArray
vtk scalar array with name 'colors'.
colors_are_scalars : bool
indicates whether or not the colors are scalars to be interpreted by a
colormap.
global_opacity : float
returns 1 if the colors array doesn't contain opacity otherwise -1.
"""
num_pnts = len(points)
num_lines = num_pnts // points_per_line
colors_are_scalars = False
global_opacity = 1
if colors is None or colors == 'rgb_standard':
# Automatic RGB colors
colors = np.asarray((0, 0, 0))
color_array = numpy_to_vtk_colors(np.tile(255 * colors, (num_pnts, 1)))
elif type(colors) is tuple:
global_opacity = 1 if len(colors) == 3 else -1
colors = np.asarray(colors)
color_array = numpy_to_vtk_colors(np.tile(255 * colors, (num_pnts, 1)))
else:
colors = np.asarray(colors)
if len(colors) == num_lines:
pnts_colors = np.repeat(colors, points_per_line, axis=0)
if colors.ndim == 1: # Scalar per line
color_array = numpy_support.numpy_to_vtk(pnts_colors,
deep=True)
colors_are_scalars = True
elif colors.ndim == 2: # RGB(A) color per line
global_opacity = 1 if colors.shape[1] == 3 else -1
color_array = numpy_to_vtk_colors(255 * pnts_colors)
elif len(colors) == num_pnts:
if colors.ndim == 1: # Scalar per point
color_array = numpy_support.numpy_to_vtk(colors, deep=True)
colors_are_scalars = True
elif colors.ndim == 2: # RGB(A) color per point
global_opacity = 1 if colors.shape[1] == 3 else -1
color_array = numpy_to_vtk_colors(255 * colors)
color_array.SetName('colors')
return color_array, colors_are_scalars, global_opacity | 7abc5be4739164dc225081ec321d1cb591f74bae | 4,740 |
def epi_reg(epi, t1, t1brain, out='epi_reg', **kwargs):
"""Wrapper for the ``epi_reg`` command.
:arg epi: Input EPI image
:arg t1: Input wholehead T1 image
:arg t1brain: Input brain extracted T1 image
:arg out: Output name
"""
asrt.assertIsNifti(epi)
asrt.assertIsNifti(t1)
asrt.assertIsNifti(t1brain)
valmap = {
'nofmapreg' : wutils.SHOW_IF_TRUE,
'noclean' : wutils.SHOW_IF_TRUE,
'v' : wutils.SHOW_IF_TRUE,
}
cmd = ['epi_reg', '--epi='+epi, '--t1='+t1, '--t1brain='+t1brain, '--out='+out]
cmd += wutils.applyArgStyle('--=',
valmap=valmap,
singlechar_args=True,
**kwargs)
return cmd | 1d19f0efcfb4fcfc7293f294978d11811861a06b | 4,741 |
import pathlib
import json
def load_towns():
"""Sample of Wikipedia dataset that contains informations about Toulouse, Paris, Lyon and
Bordeaux.
Examples
--------
>>> from pprint import pprint as print
>>> from cherche import data
>>> towns = data.load_towns()
>>> print(towns[:3])
[{'article': 'Paris (French pronunciation: \u200b[paʁi] (listen)) is the '
'capital and most populous city of France, with an estimated '
'population of 2,175,601 residents as of 2018, in an area of more '
'than 105 square kilometres (41 square miles).',
'id': 0,
'title': 'Paris',
'url': 'https://en.wikipedia.org/wiki/Paris'},
{'article': "Since the 17th century, Paris has been one of Europe's major "
'centres of finance, diplomacy, commerce, fashion, gastronomy, '
'science, and arts.',
'id': 1,
'title': 'Paris',
'url': 'https://en.wikipedia.org/wiki/Paris'},
{'article': 'The City of Paris is the centre and seat of government of the '
'region and province of Île-de-France, or Paris Region, which has '
'an estimated population of 12,174,880, or about 18 percent of '
'the population of France as of 2017.',
'id': 2,
'title': 'Paris',
'url': 'https://en.wikipedia.org/wiki/Paris'}]
"""
with open(pathlib.Path(__file__).parent.joinpath("towns.json"), "r") as towns_json:
return json.load(towns_json) | 72aa393cfc40db5f254059d78679ea5615f494d2 | 4,742 |
def nonce_initialization(params: InitializeNonceParams) -> TransactionInstruction:
"""Generate an instruction to initialize a Nonce account.
Args:
params: The nonce initialization params.
Returns:
The instruction to initialize the nonce account.
"""
return TransactionInstruction.from_solders(ssp.initialize_nonce_account(params.to_solders())) | 99fc70fd7965443b508923013a988f96ecf7b222 | 4,743 |
def to_weeknr(date=''):
"""
Transforms a date strings YYYYMMDD to the corresponding week nr (e.g. 20200713 becomes w29)
"""
week_nr = pd.to_datetime(date).to_pydatetime().isocalendar()[1]
return f"w{week_nr}" | f9699e735be8d92e4340a23464ee54247c355ffd | 4,744 |
def build_logisticregression(X_loc, y_loc, args):
"""finds best parameters for logistic regression"""
Printer(colored('(training) ', 'green') +
'searching for best parameters for logistic regression')
# specify parameters and distributions to sample from
param_dist = {"C": np.logspace(-9, 3, 13),
"solver": ['newton-cg', 'lbfgs', 'liblinear', 'sag'],
"dual": [False],
"tol": np.logspace(-9, 3, 13)
}
clf = LogisticRegression(penalty='l2')
random_search = RandomizedSearchCV(clf, param_distributions=param_dist, scoring='accuracy', n_iter=int(args.iter), n_jobs=-1, refit=True, cv=3)
random_search.fit(X_loc, y_loc)
acc = random_search.cv_results_['mean_test_score']
filename = 'cv/logisticregression_' + str(np.mean(acc)) + '.pkl'
# save model
savemodel(random_search, filename)
# save best params
filename_param = 'cv/logisticregression_param_' + str(np.mean(acc)) + '.json'
saveparams(random_search.best_params_, filename_param)
return random_search | f63f67bc9debd2adccac39910b29ed705498dd4b | 4,745 |
import re
def load_data(experiments,
remove_outlier=True,
peptides=["A5cons",
"A6cons",
"phage_ctl_0",
"phage_ctl_1",
"phage_ctl_2",
"phage_ctl_4",
"phage_ctl_5",
"phage_ctl_6",
"phage_ctl_7",
"phage_ctl_8",
"phage_ctl_9"]):
"""
Convenience function that allows one to load a whole bunch of experiments,
with different peptides, into a single data frame.
experiments should be a list of dictionaries of the following form:
[{"protein":"hA6",
"name_in_file":"hA6_4.3",
"Kd":45,
"prot_conc":4.2,
"probe_conc":4.2,
"data_file":"13_main-collection.txt",
"plate_file":"13_plate-layout.xlsx"},...]
remove_outlier: whether or not to look for outlier points and remove them
when averaging technical reps
peptides: list of peptides. these are used to build regular expressions
to match peptides in each data file. It looks for an exact match
at the start of the string, allowing any trailing characters.
NOTE: this could lead to problems if you had peptides with names
like pep10, pep100.
"""
pep_patterns = [re.compile(f"{p}") for p in peptides]
proteins = set([e["protein"] for e in experiments])
times_pep_was_seen = dict([(protein,dict([(p,0) for p in peptides]))
for protein in proteins])
all_df = []
for expt in experiments:
df, _ = read_file(expt["data_file"],expt["plate_file"])
df = df[df.protein == expt["name_in_file"]]
peptide_Kd_scalar = get_peptide_Kd_scalar(Kd=expt["Kd"],
Mt=expt["prot_conc"],
Xt=expt["probe_conc"])
peps_in_df = np.unique(df.peptide)
for p in peps_in_df:
for pattern in pep_patterns:
if pattern.match(p):
pep_df = df[df.peptide == p]
plates = np.unique(pep_df.plate)
protein = expt["protein"]
peptide = pattern.pattern
for plate in plates:
times_pep_was_seen[protein][peptide] += 1
single_rep = pep_df[pep_df.plate == plate]
fit_df = average_tech_reps(single_rep,remove_outlier=remove_outlier)
fit_df["protein"] = protein
fit_df["peptide"] = peptide
fit_df["rep_number"] = times_pep_was_seen[protein][peptide]
fit_df["Kd_scalar"] = peptide_Kd_scalar
fit_df["plate_file"] = expt["plate_file"]
fit_df["data_file"] = expt["data_file"]
fit_df["name_in_file"] = expt["name_in_file"]
fit_df["plate_number"] = plate
all_df.append(fit_df)
break
return pd.concat(all_df) | b9d7c7be8e0bbe5f5aee785cc0b525d9a57acc8b | 4,746 |
def get_lines(clearance):
"""
Add lines per reference well interval between the closest points on the
reference well and the offset well and color them according to the
calculated Separation Factor (SF) between the two wells at these points.
Parameters
----------
clearance: welleng.clearance object
Returns
-------
lines: vedo.Lines object
A vedo.Lines object colored by the object's SF values.
"""
assert VEDO, "ImportError: try pip install welleng[easy]"
c = clearance.SF
start_points, end_points = clearance.get_lines()
lines = Lines(start_points, end_points).cmap('hot_r', c, on='cells')
lines.addScalarBar(title='SF')
return lines | 2ec0ef039647b9c72219989d00b3e92092a79c16 | 4,747 |
import hashlib
def generate_md5_hash(filepath):
"""Returns md5 hash of file.
Args:
filepath: str. Absolute path to the file.
Returns:
str. Hexadecimal hash of specified file.
"""
m = hashlib.md5()
with python_utils.open_file(filepath, 'rb', encoding=None) as f:
while True:
buf = f.read(HASH_BLOCK_SIZE)
if not buf:
break
m.update(buf)
return m.hexdigest() | d615d9ec14b79eac72168db616664f5878ca8e21 | 4,748 |
def status():
"""Return status."""
return jsonify(STATUS) | de396fdf35e42a36ed40b294a26645efba29c27a | 4,749 |
from typing import Optional
def get_entitlement(account_id: Optional[str] = None,
customer_id: Optional[str] = None,
entitlement_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEntitlementResult:
"""
Returns the requested Entitlement resource. Possible error codes: * PERMISSION_DENIED: The customer doesn't belong to the reseller. * INVALID_ARGUMENT: Required request parameters are missing or invalid. * NOT_FOUND: The customer entitlement was not found. Return value: The requested Entitlement resource.
"""
__args__ = dict()
__args__['accountId'] = account_id
__args__['customerId'] = customer_id
__args__['entitlementId'] = entitlement_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('google-native:cloudchannel/v1:getEntitlement', __args__, opts=opts, typ=GetEntitlementResult).value
return AwaitableGetEntitlementResult(
association_info=__ret__.association_info,
commitment_settings=__ret__.commitment_settings,
create_time=__ret__.create_time,
name=__ret__.name,
offer=__ret__.offer,
parameters=__ret__.parameters,
provisioned_service=__ret__.provisioned_service,
provisioning_state=__ret__.provisioning_state,
purchase_order_id=__ret__.purchase_order_id,
suspension_reasons=__ret__.suspension_reasons,
trial_settings=__ret__.trial_settings,
update_time=__ret__.update_time) | 8cc10901b90a05a4bc0089758ce297c54af48569 | 4,750 |
def skip_to_home(fxn):
""" Skips past page straight to home page if logged in
"""
@wraps(fxn)
def skipped_page_fxn(*arg, **kwargs):
if session.get('logged_in'):
return redirect(url_for('home'))
else:
return fxn(*arg, **kwargs)
return skipped_page_fxn | 9edbbc186caa93046d17c179610a9c1309f281db | 4,751 |
from pathlib import Path
def get_all_paths_from_directory(directory: Path, recursive: bool, paths: [str] = [], ) -> [Path]:
"""
Gets a list of file paths for all files in the given directory (and its subdirectories if recursive is true)
:param directory: The starting directory to get file paths from
:param recursive: Whether files in subdirectories should be included
:param paths: The list that file paths will be added to
:return: A list of file paths from the given directory (and subdirectories if recursive is true)
"""
directories = []
for file in directory.iterdir():
# If the file is a subdirectory and we are processing subdirectories, add it to the list for later processing
if file.is_dir():
if recursive:
directories.append(file)
else: # If the file is just a normal file then add it to the paths list
paths.append(file)
# If we are processing subdirectories then go through all the subdirectories and process them
if recursive:
for file in directories:
get_all_paths_from_directory(file, recursive, paths)
return paths | 95f26d94ff1656fa5e4c656ecf3e424bf29f21b0 | 4,752 |
def check_contigs_for_dupes(matches):
"""check for contigs that match more than 1 UCE locus"""
node_dupes = defaultdict(list)
for node in matches:
node_dupes[node] = len(set(matches[node]))
dupe_set = set([node for node in node_dupes if node_dupes[node] > 1])
return dupe_set | f20ab684388e38b51e193567b14a2a610d87f227 | 4,753 |
def substitute(P, x0, x1, V=0):
"""
Substitute a variable in a polynomial array.
Args:
P (Poly) : Input data.
x0 (Poly, int) : The variable to substitute. Indicated with either unit
variable, e.g. `x`, `y`, `z`, etc. or through an integer
matching the unit variables dimension, e.g. `x==0`, `y==1`,
`z==2`, etc.
x1 (Poly) : Simple polynomial to substitute `x0` in `P`. If `x1` is an
polynomial array, an error will be raised.
Returns:
(Poly) : The resulting polynomial (array) where `x0` is replaced with
`x1`.
Examples:
>>> x,y = cp.variable(2)
>>> P = cp.Poly([y*y-1, y*x])
>>> print(cp.substitute(P, y, x+1))
[q0^2+2q0, q0^2+q0]
With multiple substitutions:
>>> print(cp.substitute(P, [x,y], [y,x]))
[q0^2-1, q0q1]
"""
x0,x1 = map(Poly, [x0,x1])
dim = np.max([p.dim for p in [P,x0,x1]])
dtype = chaospy.poly.typing.dtyping(P.dtype, x0.dtype, x1.dtype)
P, x0, x1 = [chaospy.poly.dimension.setdim(p, dim) for p in [P,x0,x1]]
if x0.shape:
x0 = [x for x in x0]
else:
x0 = [x0]
if x1.shape:
x1 = [x for x in x1]
else:
x1 = [x1]
# Check if substitution is needed.
valid = False
C = [x.keys[0].index(1) for x in x0]
for key in P.keys:
if np.any([key[c] for c in C]):
valid = True
break
if not valid:
return P
dims = [tuple(np.array(x.keys[0])!=0).index(True) for x in x0]
dec = is_decomposed(P)
if not dec:
P = decompose(P)
P = chaospy.poly.dimension.dimsplit(P)
shape = P.shape
P = [p for p in chaospy.poly.shaping.flatten(P)]
for i in range(len(P)):
for j in range(len(dims)):
if P[i].keys and P[i].keys[0][dims[j]]:
P[i] = x1[j].__pow__(P[i].keys[0][dims[j]])
break
P = Poly(P, dim, None, dtype)
P = chaospy.poly.shaping.reshape(P, shape)
P = chaospy.poly.collection.prod(P, 0)
if not dec:
P = chaospy.poly.collection.sum(P, 0)
return P | dd176877f8663e7efb3ae99babf29726dbda025b | 4,754 |
def munkres(costs):
"""
Entry method to solve the assignment problem.
costs: list of non-infinite values entries of the cost matrix
[(i,j,value)...]
"""
solver = Munkres(costs)
return solver.munkres() | 583dfc977c8f97fd5a3c4c82e21ae6626f4a763b | 4,755 |
import torch
def compute_mean_std(dataset):
"""
https://stats.stackexchange.com/questions/25848/how-to-sum-a-standard-deviation
"""
# global_mean = np.zeros((3 * 64), dtype=np.float64)
# global_var = np.zeros((3 * 64), dtype=np.float64)
n_items = 0
s = RunningStatistics()
for image_fname in dataset:
dct_file = np.load(fs.change_extension(image_fname, ".npz"))
y = torch.from_numpy(dct_file["dct_y"])
cb = torch.from_numpy(dct_file["dct_cb"])
cr = torch.from_numpy(dct_file["dct_cr"])
dct = torch.stack([y, cb, cr], dim=0).unsqueeze(0).float()
dct = sd2(dct)[0]
s.update(dct)
# dct = to_numpy()
# global_mean += dct.mean(axis=(1, 2))
# global_var += dct.std(axis=(1, 2)) ** 2
# n_items += 1
return s.mean, s.std | 83f10fc58e83b41a542fbd088895304b0d0521b5 | 4,756 |
def test_clean_connections_p0(monkeypatch):
"""Add a connection, fake a closed thread and make sure it is removed."""
db_disconnect_all()
class mock_connection():
def __init__(self) -> None: self.value = _MOCK_VALUE_1
def close(self): self.value = None
def mock_connect(*args, **kwargs): return mock_connection()
monkeypatch.setattr(database, 'connect', mock_connect)
db_connect(_MOCK_DBNAME, _MOCK_CONFIG)
monkeypatch.setitem(database._connections, _MOCK_CONFIG['host'], {_MOCK_DBNAME: {1234: None}})
_clean_connections()
assert database._connections[_MOCK_CONFIG['host']][_MOCK_DBNAME].get(1234, None) is None | 9c8c7155566170a3598edcb8a9d7441630545522 | 4,757 |
def add(request):
"""
Add contact information.
**Templates:**
* ``rolodex/add.html``
**Template Variables:**
* form
* results: the list of similar names to allow user to check for dupes
* name: the new name that is submitted
"""
results = []
name = None
if request.method == 'POST':
form = NameForm(request.POST)
if form.is_valid():
request.session['post_data'] = request.POST
# search to see if contact already exists
name = form.cleaned_data['name']
results = Alias.objects.filter(name=name)
if not results:
return HttpResponseRedirect('../add-proceed/')
else:
form = NameForm()
return render_to_response('rolodex/add.html', {
'form': form,
'results': results,
'name': name},
RequestContext(request, {}),
) | b0fdb73f2362dc0a82d46529727cfb3b0093b8e0 | 4,758 |
def convert_total (letter1,number1, letter2, number2):
"""
Description
-----------
Converting the letter of a column and the number of a line from an exceldata to a range
Context
----------
is called in wrapp_ProcessUnits and wrapp_SystemData
Parameters
----------
letter1 : String, "A", "B" etc.
number1 : Integer
letter2 : String, "A", "B" etc.
number2 : Integer
Returns
-------
None.
"""
Range = range (convert_numbers(number1), convert_numbers(number2)+1), range(convert_letters(letter1)-1, convert_letters(letter2))
return(Range) | 51cf6480d92fa1d23841dd5605d024548837df5c | 4,759 |
def scale_facet_list(facet_list, scale):
"""
Scale list of facets by the given scaling factor
"""
new_facet_list = []
for facet in facet_list:
new_facet_list.append(scale_facet(facet, scale))
return new_facet_list | 1b1d34803db191b94fc082685718c08895e2ba28 | 4,760 |
def move_lines_to_index(uwline_index_to, lineno, uwlines, lines):
"""Method moves all lines in the list to the proper index of uwlines and
update lineno on these lines. This is useful when you want to change the
order of code lines. But note: it is not updating lineno on other lines
@:returns positions (indexes) from original source where
lines are taken from
"""
# saving positions of imports here, that will be used for restoring 'lineno'
lineno_where_line_was_taken_from = list()
for line in lines:
lineno_where_line_was_taken_from.append(line.lineno)
for token in line.tokens:
# here we will restore correct lineno for moved lines
token.node.lineno = lineno
# hack to remove newlines between imports that we moved to top
pytree_utils.SetNodeAnnotation(token.node,
pytree_utils.Annotation.NEWLINES, 0)
lineno += get_lineno_delta(token)
# need to update lineno on import lines to have consistency
lineno += 1
# filtering moved values and removing them from uwlines
uwlines[:] = [line for line in uwlines if line not in lines]
uwlines[uwline_index_to:uwline_index_to] = lines
return lineno_where_line_was_taken_from | e96f3b9da77468a31275e6255cd08ffa9309fc60 | 4,761 |
def birch(V, E0, B0, BP, V0):
"""
From Intermetallic compounds: Principles and Practice, Vol. I: Principles
Chapter 9 pages 195-210 by M. Mehl. B. Klein, D. Papaconstantopoulos paper downloaded from Web
case where n=0
"""
E = (E0
+ 9.0/8.0*B0*V0*((V0/V)**(2.0/3.0) - 1.0)**2
+ 9.0/16.0*B0*V0*(BP-4.)*((V0/V)**(2.0/3.0) - 1.0)**3)
return E | 6515e2b0b78dfcdc1d7743f3d5a7010fce920aea | 4,762 |
from typing import Set
from typing import Tuple
def debloat(edges: set, nodes: int, threshold: tuple = (0.95, 0.95)) -> Set[Tuple[str, str]]:
"""Remove nodes with inflow and/or ourflow > threshold"""
df = pd.DataFrame(list(edges), columns=["source", "target"])
checkpoint_shape = df.shape[0]
df_inflow = df.groupby("target").count().reset_index().rename(columns={"source": "inflow"})
df_outflow = df.groupby("source").count().reset_index().rename(columns={"target": "outflow"})
df = df.merge(df_inflow, on="target", how="left")
df = df.merge(df_outflow, on="source", how="left")
df["inflow_ratio"] = df["inflow"] / nodes
df["outflow_ratio"] = df["outflow"] / nodes
df = df[(df["inflow_ratio"] <= threshold[0]) & (df["outflow_ratio"] <= threshold[1])]
print(f"{checkpoint_shape - df.shape[0]} edges removed")
df.drop(["outflow", "inflow", "outflow_ratio", "inflow_ratio"], axis=1, inplace=True)
return set(tuple(i) for i in df.values.tolist()) | 5be2dec388086b10409a3de008f357540019c5cf | 4,763 |
def result(jid):
""" Displays a job result.
Args:
jid (str): The job id.
"""
job = q.fetch_job(jid)
statuses = {
'queued': 202,
'started': 202,
'finished': 200,
'failed': 500,
'job not found': 404,
}
if job:
job_status = job.get_status()
result = job.result
else:
job_status = 'job not found'
result = None
resp = {
'status': statuses[job_status],
'job_id': jid,
'job_status': job_status,
'result': result}
return jsonify(**resp) | 2919be693949dd4e873834530565fd28aefcf5d5 | 4,764 |
from typing import Callable
def fd_nabla_1(
x: np.ndarray,
fun: Callable,
delta_vec: np.ndarray,
) -> np.ndarray:
"""Calculate FD approximation to 1st order derivative (Jacobian/gradient).
Parameters
----------
x: Parameter vector, shape (n_par,).
fun: Function returning function values. Scalar- or vector-valued.
delta_vec: Step size vector, shape (n_par,).
Returns
-------
nabla_1:
The FD approximation to the 1st order derivatives.
Shape (n_par, ...) with ndim > 1 if `f_fval` is not scalar-valued.
"""
# parameter dimension
n_par = len(x)
nabla_1 = []
for ix in range(n_par):
delta_val = delta_vec[ix]
delta = delta_val * unit_vec(dim=n_par, ix=ix)
fp = fun(x + delta / 2)
fm = fun(x - delta / 2)
nabla_1.append((fp - fm) / delta_val)
return np.array(nabla_1) | 32363e04bbd22627c7e5c21e02b48154dbfc030a | 4,765 |
def get_ref_len_from_bam(bam_path, target_contig):
"""
Fetch the length of a given reference sequence from a :py:class:`pysam.AlignmentFile`.
Parameters
----------
bam_path : str
Path to the BAM alignment
target_contig : str
The name of the contig for which to recover haplotypes.
Returns
-------
end_pos : int
The 1-indexed genomic position at which to stop considering variants.
"""
bam = pysam.AlignmentFile(bam_path)
end = bam.lengths[bam.get_tid(target_contig)]
bam.close()
return end | e80cb3c50f4408b2a614621ff3d688852931e75b | 4,766 |
def vstd(df, n=10):
"""
成交量标准差 vstd(10)
VSTD=STD(Volume,N)=[∑(Volume-MA(Volume,N))^2/N]^0.5
"""
_vstd = pd.DataFrame()
_vstd['date'] = df.date
_vstd['vstd'] = df.volume.rolling(n).std(ddof=1)
return _vstd | 97b448d00bcbe89d17339f9ed1155786d9ccd0ab | 4,767 |
Subsets and Splits