content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import uuid
def is_uuid_like(val):
"""Returns validation of a value as a UUID.
:param val: Value to verify
:type val: string
:returns: bool
.. versionchanged:: 1.1.1
Support non-lowercase UUIDs.
"""
try:
return str(uuid.UUID(val)).replace("-", "") == _format_uuid_string(val)
except (TypeError, ValueError, AttributeError):
return False | fc0b9618ede3068fe5946948dfbe655e64b27ba8 | 3,653,815 |
from typing import List
from typing import Tuple
def merge_overlapped_spans(spans: List[Tuple[int, int]]) -> List[Tuple[int, int]]:
"""
Merge overlapped spans
Parameters
----------
spans: input list of spans
Returns
-------
merged spans
"""
span_sets = list()
for span in spans:
span_set = set(range(span[0], span[1]))
if not span_sets:
span_sets.append(span_set)
elif span_sets[-1] & span_set:
if span_set - span_sets[-1]:
span_sets[-1] = span_sets[-1] | span_set
else:
span_sets.append(span_set)
merged_spans = list()
for span_set in span_sets:
merged_spans.append((min(span_set), max(span_set) + 1))
return merged_spans | 0ea7f2a730274f7a98f25b8df22754ec79e8fce7 | 3,653,817 |
def network(dataframe, author_col_name, target_col_name, source_col_name=None):
"""
This function runs a Network analysis on the dataset provided.
:param dataframe: DataFrame containing the data on which to conduct the activity analysis.
It must contain at least an *author*, a *target* and a *source* column.
:type dataframe: pandas.DataFrame
:param author_col_name: Name of the column containing the authors of the entries.
:type author_col_name: str
:param target_col_name: Name of the column containing the targets of the relationship that the network analysis is
supposed to exploring.
:type target_col_name: str
:param source_col_name: Name of the column containing the sources of the relationships that the network analysis is
supposed to be exploring.
:type source_col_name: str
:return: Object of type network containing a *dataframe* field and a *graph* one.
"""
graph = _network_from_dataframe(dataframe, author_col_name, target_col_name, source_col_name)
no_edges = []
for u, v, weight in graph.edges.data("weight"):
if weight == 0:
no_edges.append((u, v))
graph.remove_edges_from(no_edges)
degrees = nx.degree_centrality(graph)
nodes = pd.DataFrame.from_records([degrees]).transpose()
nodes.columns = ["centrality"]
return Network(nodes, graph) | edb2942e1e92cad64609819994a4e10b1de85497 | 3,653,818 |
def get_cert_and_update_domain(
zappa_instance,
lambda_name,
api_stage,
domain=None,
manual=False,
):
"""
Main cert installer path.
"""
try:
create_domain_key()
create_domain_csr(domain)
get_cert(zappa_instance)
create_chained_certificate()
with open("{}/signed.crt".format(gettempdir())) as f:
certificate_body = f.read()
with open("{}/domain.key".format(gettempdir())) as f:
certificate_private_key = f.read()
with open("{}/intermediate.pem".format(gettempdir())) as f:
certificate_chain = f.read()
if not manual:
if domain:
if not zappa_instance.get_domain_name(domain):
zappa_instance.create_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
print(
"Created a new domain name. Please note that it can take up to 40 minutes for this domain to be created and propagated through AWS, but it requires no further work on your part."
)
else:
zappa_instance.update_domain_name(
domain_name=domain,
certificate_name=domain + "-Zappa-LE-Cert",
certificate_body=certificate_body,
certificate_private_key=certificate_private_key,
certificate_chain=certificate_chain,
certificate_arn=None,
lambda_name=lambda_name,
stage=api_stage,
)
else:
print("Cerificate body:\n")
print(certificate_body)
print("\nCerificate private key:\n")
print(certificate_private_key)
print("\nCerificate chain:\n")
print(certificate_chain)
except Exception as e:
print(e)
return False
return True | 8ce4d06af0d923165dbbe4c6cbb7617f8e20557f | 3,653,819 |
def _ww3_ounp_contents(run_date, run_type):
"""
:param str run_type:
:param run_date: :py:class:`arrow.Arrow`
:return: ww3_ounp.inp file contents
:rtype: str
"""
start_date = (
run_date.format("YYYYMMDD")
if run_type == "nowcast"
else run_date.shift(days=+1).format("YYYYMMDD")
)
run_hours = {"nowcast": 24, "forecast": 36, "forecast2": 30}
output_interval = 600 # seconds
output_count = int(run_hours[run_type] * 60 * 60 / output_interval)
contents = f"""$ WAVEWATCH III NETCDF Point output post-processing
$
$ First output time (YYYYMMDD HHmmss), output increment (s), number of output times
{start_date} 000000 {output_interval} {output_count}
$
$ All points defined in ww3_shel.inp
-1
$ File prefix
$ number of characters in date
$ netCDF4 output
$ one file, max number of points to process
$ tables of mean parameters
$ WW3 global attributes
$ time,station dimension order
$ WMO standard output
SoG_ww3_points_
8
4
T 100
2
0
T
6
"""
return contents | fda73d25c39c5bd46d791e6745fa72a0285edcdc | 3,653,820 |
import logging
def EMLP(rep_in,rep_out,group,ch=384,num_layers=3):
""" Equivariant MultiLayer Perceptron.
If the input ch argument is an int, uses the hands off uniform_rep heuristic.
If the ch argument is a representation, uses this representation for the hidden layers.
Individual layer representations can be set explicitly by using a list of ints or a list of
representations, rather than use the same for each hidden layer.
Args:
rep_in (Rep): input representation
rep_out (Rep): output representation
group (Group): symmetry group
ch (int or list[int] or Rep or list[Rep]): number of channels in the hidden layers
num_layers (int): number of hidden layers
Returns:
Module: the EMLP objax module."""
logging.info("Initing EMLP (Haiku)")
rep_in =rep_in(group)
rep_out = rep_out(group)
# Parse ch as a single int, a sequence of ints, a single Rep, a sequence of Reps
if isinstance(ch,int): middle_layers = num_layers*[uniform_rep(ch,group)]
elif isinstance(ch,Rep): middle_layers = num_layers*[ch(group)]
else: middle_layers = [(c(group) if isinstance(c,Rep) else uniform_rep(c,group)) for c in ch]
# assert all((not rep.G is None) for rep in middle_layers[0].reps)
reps = [rep_in]+middle_layers
# logging.info(f"Reps: {reps}")
network = Sequential(
*[EMLPBlock(rin,rout) for rin,rout in zip(reps,reps[1:])],
Linear(reps[-1],rep_out)
)
return network | aa4a1b1286ac1c96bedfe82813d9d24f36aabe96 | 3,653,821 |
def decompress(data):
""" Decompress data in one shot.
"""
return GzipFile(fileobj=BytesIO(data), mode='rb').read() | db32cb2b9e2ddeb3a38901460d0882ceee9cab9e | 3,653,822 |
import re
def str_to_rgb(arg):
"""Convert an rgb string 'rgb(x,y,z)' to a list of ints [x,y,z]."""
return list(
map(int, re.match(r'rgb\((\d+),\s*(\d+),\s*(\d+)\)', arg).groups())
) | f8920373d5941fb231c1ae0d732fd04558615bc3 | 3,653,823 |
def vshift(x, shifts=0):
"""shift batch of images vertically"""
return paddle.roll(x, int(shifts*x.shape[2]), axis=2) | cb00948cb58d3c2c13628d44cc36e6cd2ab487ee | 3,653,824 |
def index():
"""Shows book titles and descriptions"""
tagid = request.query.tagid
books = []
if tagid:
try:
tag = Tag.get(tagid)
books = tag.books.all()
except Tag.DoesNotExist: pass
if not books: books = Book.all().order_by("title")
return dict(books=books) | ae1fb3502f75a09577da489fe2488cbe78f699f7 | 3,653,825 |
def generate_file_storage_name(file_uri: str, suffix: str) -> str:
"""
Generate a filename using the hash of the file contents and some provided suffix.
Parameters
----------
file_uri: str
The URI to the file to hash.
suffix: str
The suffix to append to the hash as a part of the filename.
Returns
-------
dst: str
The name of the file as it should be on Google Cloud Storage.
"""
hash = hash_file_contents(file_uri)
return f"{hash}-{suffix}" | 08087e86e1f70e0820cf9e3263c7a419de13ffcc | 3,653,828 |
def mullerlyer_parameters(illusion_strength=0, difference=0, size_min=0.5, distance=1):
"""Compute Parameters for Müller-Lyer Illusion.
Parameters
----------
illusion_strength : float
The strength of the arrow shapes in biasing the perception of lines of unequal lengths. A positive sign
represents the bottom arrows pointing outwards and upper arrows pointing inwards.
A negative sign represents the bottom arrows pointing inwards and upper arrows pointing outwards.
difference : float
The objective length difference of the horizontal lines.
Specifically, the real difference of upper horizontal line relative to the lower horizontal line. E.g.,
if ``difference=1``, the upper line will be 100% longer, i.e., 2 times longer than
the lower line. A negative sign reflects the converse, where ``difference=-1``
will result in the lower line being 100% longer than the upper line.
size_min : float
Length of lower horizontal line.
distance : float
Distance between the upper and lower horizontal lines.
Returns
-------
dict
Dictionary of parameters of the Müller-Lyer illusion.
"""
parameters = _ponzo_parameters_topbottom(difference=difference, size_min=size_min, distance=distance)
length = size_min/2
if difference >= 0:
angle = {"Top": -illusion_strength, "Bottom": illusion_strength}
else:
angle = {"Top": illusion_strength, "Bottom": -illusion_strength}
for which in ["Top", "Bottom"]:
for side in ["Left", "Right"]:
if side == "Left":
coord, _, _ = _coord_line(x1=parameters[which + "_x1"], y1=parameters[which + "_y1"], length=length, angle=angle[which])
else:
coord, _, _ = _coord_line(x1=parameters[which + "_x2"], y1=parameters[which + "_y2"], length=length, angle=-angle[which])
x1, y1, x2, y2 = coord
for c in ["1", "2"]:
parameters["Distractor_" + which + side + c + "_x1"] = x1
parameters["Distractor_" + which + side + c + "_y1"] = y1
parameters["Distractor_" + which + side + c + "_x2"] = x2
if c == "1":
parameters["Distractor_" + which + side + c + "_y2"] = y2
else:
parameters["Distractor_" + which + side + c + "_y2"] = y2 - 2 * (y2 - y1)
parameters.update({"Illusion": "MullerLyer",
"Illusion_Strength": illusion_strength,
"Illusion_Type": "Congruent" if illusion_strength > 0 else "Incongruent",
"Distractor_Length": length})
return parameters | 61631be407aa25608e1321f7e87e030bca9fa90d | 3,653,829 |
def filter_for_corsi(pbp):
"""
Filters given dataframe for goal, shot, miss, and block events
:param pbp: a dataframe with column Event
:return: pbp, filtered for corsi events
"""
return filter_for_event_types(pbp, {'Goal', 'Shot', 'Missed Shot', 'Blocked Shot'}) | 9add922fe3aa4ded63b4032b8fe412bbc5611f3e | 3,653,830 |
from typing import Dict
from typing import Tuple
import json
import hashlib
def upload(msg: Dict, public_key: bytes,
ipns_keypair_name: str = '') -> Tuple[str, str]:
"""Upload encrypted string to IPFS.
This can be manifest files, results, or anything that's been already encrypted.
Optionally pins the file to IPNS. Pass in the IPNS key name
To get IPNS key name, see create_new_ipns_link
Args:
msg (Dict): The message to upload and encrypt.
public_key (bytes): The public_key to encrypt the file for.
ipns_keypair_name (str): If left blank, then don't pin to IPNS
Returns:
Tuple[str, str]: returns [sha1 hash, ipfs hash]
Raises:
Exception: if adding bytes with IPFS fails.
>>> credentials = {
... "gas_payer": "0x1413862C2B7054CDbfdc181B83962CB0FC11fD92",
... "gas_payer_priv": "28e516f1e2f99e96a48a23cea1f94ee5f073403a1c68e818263f0eb898f1c8e5"
... }
>>> pub_key = b"2dbc2c2c86052702e7c219339514b2e8bd4687ba1236c478ad41b43330b08488c12c8c1797aa181f3a4596a1bd8a0c18344ea44d6655f61fa73e56e743f79e0d"
>>> job = Job(credentials=credentials, escrow_manifest=manifest)
>>> (hash_, manifest_url) = upload(job.serialized_manifest, pub_key)
>>> manifest_dict = download(manifest_url, job.gas_payer_priv)
>>> manifest_dict == job.serialized_manifest
True
"""
try:
manifest_ = json.dumps(msg, sort_keys=True)
except Exception as e:
LOG.error("Can't extract the json from the dict")
raise e
hash_ = hashlib.sha1(manifest_.encode('utf-8')).hexdigest()
try:
ipfs_file_hash = IPFS_CLIENT.add_bytes(_encrypt(public_key, manifest_))
except Exception as e:
LOG.warning("Adding bytes with IPFS failed because of: {}".format(e))
raise e
if ipns_keypair_name != '':
try:
# publish ipns ... docs: https://ipfs.io/ipns/12D3KooWEqnTdgqHnkkwarSrJjeMP2ZJiADWLYADaNvUb6SQNyPF/docs/http_client_ref.html#ipfshttpclient.Client.name
IPFS_CLIENT.name.publish(
f'/ipfs/{ipfs_file_hash}',
key=ipns_keypair_name.lower(),
allow_offline=True)
except Exception as e:
LOG.warning("IPNS failed because of: {}".format(e))
raise e
return hash_, ipfs_file_hash | 3dc1b12e57ce0054a1bf5b534f92ed7130187a53 | 3,653,831 |
def test_sakai_auth_url(oauth_mock):
"""
Test auth url retrieval for Sakai.
Test that we can retrieve a formatted Oauth1 URL for Sakai
"""
def mock_fetch_token(mock_oauth_token, mock_oauth_token_secret):
def mock_token_getter(mock_url):
return {
'oauth_token': mock_oauth_token,
'oauth_token_secret': mock_oauth_token_secret,
}
return mock_token_getter
mock_authorize_url = 'http://host/oauth-tool/authorize/'
another_mock = MagicMock()
another_mock.fetch_request_token.side_effect = mock_fetch_token(
fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_TOKEN'],
fixtures.oauth_creds_dict['HTTP_LMS_OAUTH_SECRET'],
)
oauth_mock.return_value = another_mock
data = {
'request_token_url': 'http://host/oauth-tool/request_tokén',
'authorize_url': mock_authorize_url,
'callback_url': "http://this.doesnt.ma/tter",
}
headers = fixtures.get_mocked_headers('http://somebaseurl')
del headers['HTTP_LMS_OAUTH_TOKEN']
del headers['HTTP_LMS_OAUTH_SECRET']
client = Client()
resp = client.get(
reverse('auth_url'),
content_type='application/json',
data=data,
**headers,
)
expected_auth_url = (
f'{mock_authorize_url}'
f'?oauth_token={fixtures.oauth_creds_dict["HTTP_LMS_OAUTH_TOKEN"]}'
)
assert resp.status_code == status.HTTP_200_OK
actual_resp_json = resp.json()
expected_resp_json = {
'auth_url': expected_auth_url,
'redirect_key': 'redirect_uri',
'oauth_token_secret': fixtures.oauth_creds_dict[
'HTTP_LMS_OAUTH_SECRET'
],
}
assert actual_resp_json == expected_resp_json | fc9321d5b88379fb08d40b8dadece1c3fb31b26a | 3,653,832 |
from typing import Tuple
from typing import List
from typing import Iterable
def nodes_and_groups(expr: Expression) -> Tuple[List[Expression], Iterable[List[int]]]:
"""
Returns a list of all sub-expressions, and an iterable of lists of indices to sub-expressions that are equivalent.
Example 1:
(let (x 3)
add (
(let (z 3) (add z (add x x)))
(let (z 5) (add z (add x x)))
)
)
Here, the two identical expressions '(add x x)' will be in one equivalence group (the closest binder for the
free variable 'x' is the same).
The four (single-node) sub-expressions 'x' will also be in one equivalence group.
Example 2:
In expression:
(foo
(let (x 3) (add x x)) # 1
(let (x 4) (add x x)) # 2
(let (y 3) (add y y)) # 3
)
- sub-expressions '(let (x 3) (add x x))' and '(let (y 3) (add y y))' are equivalent.
- The sub-expressions `(add x x)` on line #1 and `(add y y)` on line #3 will not be in equivalence group,
because they are in a different binding scope, even though they will evaluate to the same value.
- '(let (x 3) (add x x))' and '(let (x 4) (add x x))' are not equivalent, because 'x' is assigned a
different value.
Also, for each 'add' expression, the pair of identical variables within it will, of course, be in an
equivalence group.
Args:
expr: An expression
Returns:
A tuple of:
* a list of subtrees (nodes) of the Expression; the same as expr.nodes, but returned to avoid an extra traversal
(and more clearly corresponding to the second element as they are constructed by the same algorithm)
* an iterable of lists of indices, where each list contains indices of nodes which are equivalent
(compute the same value). Note that nodes that are not in
"""
nodes: List[Expression] = []
closest_binders: List[int] = []
def traverse(subexp: Expression, binder_stack: List[Tuple[str, int]]) -> None:
idx = len(nodes)
nodes.append(subexp)
# Calculate the closest binder of a free-variable - intuitively, the scope of the subexp,
# the highest point to which a let containing this subexp's value could be lifted.
# (That is - this subexp cannot be the same as any other subexp unless their closest binder's are the same)
closest_binder = -1 # Global
for skip, (bv_name, binder_idx) in enumerate(reversed(binder_stack)):
if bv_name in subexp.free_var_names:
closest_binder = binder_idx
if skip > 0 and len(subexp.children) > 0:
binder_stack = binder_stack[:-skip]
break
closest_binders.append(closest_binder)
if subexp.is_binder:
bound_stack = binder_stack + [(subexp.bound_var.name, idx)]
for i, c in enumerate(subexp.children):
traverse(
c,
bound_stack
if subexp.is_binder and subexp.binds_in_child(i)
else binder_stack,
)
traverse(expr, [])
assert len(nodes) == expr.num_nodes
assert len(closest_binders) == expr.num_nodes
def equiv_groups() -> Iterable[List[int]]:
# Group node indices by whether they have the same closest binder, same number of nodes, and are the same op.
for g in utils.group_by(
range(len(nodes)),
lambda idx: (closest_binders[idx], nodes[idx].num_nodes, nodes[idx].op),
).values():
# Skip obviously-singleton groups
if len(g) >= 2:
yield from utils.group_by(g, lambda idx: nodes[idx]).values()
return nodes, equiv_groups() | bf5087fa5c4dd36e614c5e9227fd3337960dc9c6 | 3,653,833 |
def masterxprv_from_electrummnemonic(mnemonic: Mnemonic,
passphrase: str = "",
network: str = 'mainnet') -> bytes:
"""Return BIP32 master extended private key from Electrum mnemonic.
Note that for a 'standard' mnemonic the derivation path is "m",
for a 'segwit' mnemonic it is "m/0h" instead.
"""
version, seed = electrum._seed_from_mnemonic(mnemonic, passphrase)
prefix = _NETWORKS.index(network)
if version == 'standard':
xversion = _XPRV_PREFIXES[prefix]
return rootxprv_from_seed(seed, xversion)
elif version == 'segwit':
xversion = _P2WPKH_PRV_PREFIXES[prefix]
rootxprv = rootxprv_from_seed(seed, xversion)
return derive(rootxprv, 0x80000000) # "m/0h"
else:
raise ValueError(f"Unmanaged electrum mnemonic version ({version})") | 6642aba45eb72b5f366c52862ce07ddbf05d80f8 | 3,653,834 |
def release_(ctx, version, branch, master_branch, release_branch, changelog_base, force):
"""
Release a branch.
Note that this differs from the create-release command:
1. Create a Github release with the version as its title.
2. Create a commit bumping the version of setup.py on top of the branch.
3. Generated and upload changelog of the head of the branch, relative to the latest release.
4. Update the master branch to point to the release commit.
4. Close any related issues with a comment specifying the release title.
The version is calculated automatically according to the changelog. Note that the release tag
will point to the above mentioned commit.
The command is mainly intended to be executed automatically using CI systems (as described
below), and implements certain heuristics in order to perform properly.
Note, the release process will only take place if the following conditions hold:
1. The current build passes validation. (see validate-build)
2. The tip of the branch passes validation. (see validate-commit)
3. The release does not yet exist.
If either of these conditions is not satisfied, the command will be silently ignored and
complete successfully. This is useful so that your builds will not fail when running on
commits that shouldn't be released.
This command is idempotent, given that the tip of your branch hasn't changed between
executions. You can safely run this command in parallel, this is important when running
your CI process on multiple systems concurrently.
"""
ci_provider = ctx.obj.ci_provider
gh = ctx.obj.github
branch = branch or (ci_provider.branch if ci_provider else None)
release_branch = release_branch or gh.default_branch_name
sha = ci_provider.sha if ci_provider else branch
if not force:
try:
ctx.invoke(ci.validate_build, release_branch=release_branch)
ctx.invoke(validate_commit, sha=sha)
except TerminationException as e:
if isinstance(e.cause, exceptions.ReleaseValidationFailedException):
log.sub()
log.echo("Not releasing: {}".format(str(e)))
return
raise
log.echo("Releasing branch '{}'".format(branch), add=True)
changelog = _generate_changelog(gh=gh, sha=sha, base=changelog_base)
next_version = version or changelog.next_version
if not next_version:
err = ShellException('None of the commits in the changelog references an issue '
'labeled with a release label. Cannot determine what the '
'version number should be.')
err.cause = 'You probably only committed internal issues since the last release, ' \
'or forgot to reference the issue.'
err.possible_solutions = [
'Amend the message of one of the commits to reference a release issue',
'Push another commit that references a release issue',
'Use --version to specify a version manually'
]
raise err
release = _create_release(ctx=ctx,
changelog=changelog,
branch=branch,
master_branch=master_branch,
version=next_version,
sha=sha)
log.echo('Closing issues', add=True)
for issue in changelog.all_issues:
ctx.invoke(close_issue, number=issue.impl.number, release=release.title)
log.sub()
log.sub()
log.echo('Successfully released: {}'.format(release.url))
return release | e7a9de4c12f3eb3dfe3d6272ccb9254e351641b9 | 3,653,835 |
def get_namedtuple_from_paramnames(owner, parnames):
"""
Returns the namedtuple classname for parameter names
:param owner: Owner of the parameters, usually the spotpy setup
:param parnames: Sequence of parameter names
:return: Class
"""
# Get name of owner class
typename = type(owner).__name__
parnames = ["p" + x if x.isdigit() else x for x in list(parnames)]
return namedtuple('Par_' + typename, # Type name created from the setup name
parnames) | 4c0b2ca46e2d75d1e7a1281e58a3fa6402f42cf0 | 3,653,836 |
def readNotificationGap(alarmName):
"""
Returns the notificationGap of the specified alarm from the database
"""
cur = conn.cursor()
cur.execute('Select notificationGap FROM Alarms WHERE name is "%s"' % alarmName)
gapNotification = int(cur.fetchone()[0])
conn.commit()
return gapNotification | afa7bd0e510433e6a49ecd48937f2d743f8977e4 | 3,653,837 |
def vertical_line(p1, p2, p3):
"""
过点p3,与直线p1,p2垂直的线
互相垂直的线,斜率互为互倒数
:param p1: [x,y]
:param p2: [x,y]
:param p3: [x,y]
:return: 新方程的系数[na,nb,nc]
"""
line = fit_line(p1, p2)
a, b, c = line # ax+by+c=0;一般b为-1
# 以下获取垂线的系数na,nb,nc
if a == 0.: # 原方程为y=c ;新方程为x=-nc
na = 1.
nb = 0.
elif b == 0.: # 原方程为x=-c;新方程为y=nc
na = 0.
nb = -1.
else: # 斜率互为互倒数 a*na=-1;
na = -1. / a
nb = -1.
# 根据ax+by+c=0求解系数c
nc = -(na * p3[0] + nb * p3[1])
return [na, nb, nc] | e1644edf7702996f170b6f53828e1fc864151759 | 3,653,838 |
def _get_value(key, entry):
"""
:param key:
:param entry:
:return:
"""
if key in entry:
if entry[key] and str(entry[key]).lower() == "true":
return True
elif entry[key] and str(entry[key]).lower() == "false":
return False
return entry[key]
return None | 93820395e91323939c8fbee653b6eabb6fbfd8eb | 3,653,839 |
def calculate_bounded_area(x0, y0, x1, y1):
""" Calculate the area bounded by two potentially-nonmonotonic 2D data sets
This function is written to calculate the area between two arbitrary
piecewise-linear curves. The method was inspired by the arbitrary polygon
filling routines in vector software programs when the polygon
self-intersects.
Created: 2015 April 29, msswan
"""
# We start by taking the start of the first data set (pts0) and loop over
# each segment (starting with the closest) and check to see if the
# second data (pts1) set intersects. If there is an intersection, it joins
# all the points together to make a polygon (reversing pts1 so that the
# polygon integration calculation goes around in a single direction) and
# calculates the area from that. Now it removes the points that it used to
# create the polygon and adds the intersection point to pts0 (which is the
# new starting point) and starts the loop again.
# Turn the data into lists of tuples (x,y) coordinates
pts0 = list(zip(x0, y0))
pts1 = list(zip(x1, y1))
area = 0.0
while len(pts0) + len(pts1) > 0:
shouldbreak = False
for idx in range(0, len(pts0)-1):
for jdx in range(0, len(pts1)-1):
doesintersect, int_pt = line_intersect(pts0[idx], pts0[idx+1],
pts1[jdx], pts1[jdx+1])
if not doesintersect:
continue
polygon = list(reversed(pts1[:jdx])) + pts0[:idx] + [int_pt,]
area += get_area(polygon)
# Trim the processed points off of the datasets
pts0 = [int_pt,] + pts0[idx+1:]
pts1 = pts1[jdx+1:]
# Exit out of both for-loops
shouldbreak = True
break
if shouldbreak:
break
else:
# Make a polygon out of whatever points remain
polygon = list(reversed(pts1)) + pts0
area += get_area(polygon)
# exit the while loop
break
return area | 1cdf853a829e68f73254ac1073aadbc29abc4e2a | 3,653,840 |
import requests
def login():
""" Login to APIC-EM northbound APIs in shell.
Returns:
Client (NbClientManager) which is already logged in.
"""
try:
client = NbClientManager(
server=APIC,
username=APIC_USER,
password=APIC_PASSWORD,
connect=True)
return client
except requests.exceptions.HTTPError as exc_info:
if exc_info.response.status_code == 401:
print('Authentication Failed. Please provide valid username/password.')
else:
print('HTTP Status Code {code}. Reason: {reason}'.format(
code=exc_info.response.status_code,
reason=exc_info.response.reason))
exit(1)
except requests.exceptions.ConnectionError:
print('Connection aborted. Please check if the host {host} is available.'.format(host=APIC))
exit(1) | 8a4fd0122769b868dc06aeba17c15d1a2e0055a2 | 3,653,841 |
import numpy
def transfocator_compute_configuration(photon_energy_ev,s_target,\
symbol=["Be","Be","Be"], density=[1.845,1.845,1.845],\
nlenses_max = [15,3,1], nlenses_radii = [500e-4,1000e-4,1500e-4], lens_diameter=0.05, \
sigmaz=6.46e-4, alpha = 0.55, \
tf_p=5960, tf_q=3800, verbose=1 ):
"""
Computes the optimum transfocator configuration for a given photon energy and target image size.
All length units are cm
:param photon_energy_ev: the photon energy in eV
:param s_target: the target image size in cm.
:param symbol: the chemical symbol of the lens material of each type. Default symbol=["Be","Be","Be"]
:param density: the density of each type of lens. Default: density=[1.845,1.845,1.845]
:param nlenses_max: the maximum allowed number of lenases for each type of lens. nlenses_max = [15,3,1]
:param nlenses_radii: the radii in cm of each type of lens. Default: nlenses_radii = [500e-4,1000e-4,1500e-4]
:param lens_diameter: the physical diameter (acceptance) in cm of the lenses. If different for each type of lens,
consider the smaller one. Default: lens_diameter=0.05
:param sigmaz: the sigma (standard deviation) of the source in cm
:param alpha: an adjustable parameter in [0,1](see doc). Default: 0.55 (it is 0.76 for pure Gaussian beams)
:param tf_p: the distance source-transfocator in cm
:param tf_q: the distance transfocator-image in cm
:param:verbose: set to 1 for verbose text output
:return: a list with the number of lenses of each type.
"""
if s_target < 2.35*sigmaz*tf_q/tf_p:
print("Source size FWHM is: %f um"%(1e4*2.35*sigmaz))
print("Maximum Demagnifications is: %f um"%(tf_p/tf_q))
print("Minimum possible size is: %f um"%(1e4*2.35*sigmaz*tf_q/tf_p))
print("Error: redefine size")
return None
deltas = [(1.0 - xraylib.Refractive_Index_Re(symbol[i],photon_energy_ev*1e-3,density[i])) \
for i in range(len(symbol))]
focal_q_target = _tansfocator_guess_focal_position( s_target, p=tf_p, q=tf_q, sigmaz=sigmaz, alpha=alpha, \
lens_diameter=lens_diameter,method=2)
focal_f_target = 1.0 / (1.0/focal_q_target + 1.0/tf_p)
div_q_target = alpha * lens_diameter / focal_q_target
#corrections for extreme cases
source_demagnified = 2.35*sigmaz*focal_q_target/tf_p
if source_demagnified > lens_diameter: source_demagnified = lens_diameter
s_target_calc = numpy.sqrt( (div_q_target*(tf_q-focal_q_target))**2 + source_demagnified**2)
nlenses_target = _transfocator_guess_configuration(focal_f_target,deltas=deltas,\
nlenses_max=nlenses_max,radii=nlenses_radii, )
if verbose:
print("transfocator_compute_configuration: focal_f_target: %f"%(focal_f_target))
print("transfocator_compute_configuration: focal_q_target: %f cm"%(focal_q_target))
print("transfocator_compute_configuration: s_target: %f um"%(s_target_calc*1e4))
print("transfocator_compute_configuration: nlenses_target: ",nlenses_target)
return nlenses_target | 3c25d701117df8857114038f92ebe4a5dee4097f | 3,653,842 |
import logging
import xml
def flickrapi_fn(fn_name,
fn_args, # format: ()
fn_kwargs, # format: dict()
attempts=3,
waittime=5,
randtime=False,
caughtcode='000'):
""" flickrapi_fn
Runs flickrapi fn_name function handing over **fn_kwargs.
It retries attempts, waittime, randtime with @retry
Checks results is_good and provides feedback accordingly.
Captures flicrkapi or BasicException error situations.
caughtcode to report on exception error.
Returns:
fn_success = True/False
fn_result = Actual flickrapi function call result
fn_errcode = error reported by flickrapi exception
"""
@rate_limited.retry(attempts=attempts,
waittime=waittime,
randtime=randtime)
def retry_flickrapi_fn(kwargs):
""" retry_flickrapi_fn
Decorator to retry calling a function
"""
return fn_name(**kwargs)
logging.info('fn:[%s] attempts:[%s] waittime:[%s] randtime:[%s]',
fn_name.__name__, attempts, waittime, randtime)
if logging.getLogger().getEffectiveLevel() <= logging.INFO:
for i, arg in enumerate(fn_args):
logging.info('fn:[%s] arg[%s]={%s}', fn_name.__name__, i, arg)
for name, value in fn_kwargs.items():
logging.info('fn:[%s] kwarg[%s]=[%s]',
fn_name.__name__, name, value)
fn_success = False
fn_result = None
fn_errcode = 0
try:
fn_result = retry_flickrapi_fn(fn_kwargs)
except flickrapi.exceptions.FlickrError as flickr_ex:
fn_errcode = flickr_ex.code
NPR.niceerror(caught=True,
caughtprefix='+++Api',
caughtcode=caughtcode,
caughtmsg='Flickrapi exception on [{!s}]'
.format(fn_name.__name__),
exceptuse=True,
exceptcode=flickr_ex.code,
exceptmsg=flickr_ex,
useniceprint=True,
exceptsysinfo=True)
except (IOError, httplib.HTTPException):
NPR.niceerror(caught=True,
caughtprefix='+++Api',
caughtcode=caughtcode,
caughtmsg='Caught IO/HTTP Error on [{!s}]'
.format(fn_name.__name__))
except Exception as exc:
NPR.niceerror(caught=True,
caughtprefix='+++Api',
caughtcode=caughtcode,
caughtmsg='Exception on [{!s}]'.format(fn_name.__name__),
exceptuse=True,
exceptmsg=exc,
useniceprint=True,
exceptsysinfo=True)
except BaseException:
NPR.niceerror(caught=True,
caughtprefix='+++Api',
caughtcode=caughtcode,
caughtmsg='BaseException on [{!s}]'
.format(fn_name.__name__),
exceptsysinfo=True)
finally:
pass
if is_good(fn_result):
fn_success = True
logging.info('fn:[%s] Output for fn_result:',
fn_name.__name__)
logging.info(xml.etree.ElementTree.tostring(
fn_result,
encoding='utf-8',
method='xml'))
else:
logging.error('fn:[%s] is_good(fn_result):[%s]',
fn_name.__name__,
'None'
if fn_result is None
else is_good(fn_result))
fn_result = None
logging.info('fn:[%s] success:[%s] result:[%s] errcode:[%s]',
fn_name.__name__, fn_success, fn_result, fn_errcode)
return fn_success, fn_result, fn_errcode | fcdb050824aa53ef88d0b879729e3e5444d221a7 | 3,653,843 |
def load_data(CWD):
""" loads the data from a parquet file specified below
input: CWD = current working directory path
output: df_raw = raw data from parquet file as pandas dataframe
"""
folderpath_processed_data = CWD + '/data_sample.parquet'
df_raw = pd.read_parquet(folderpath_processed_data)
return df_raw | 8ba8d77b81e61f90651ca57b186faf965ec51c73 | 3,653,844 |
def http_body():
"""
Returns random binary body data.
"""
return strategies.binary(min_size=0, average_size=600, max_size=1500) | 5789dfc882db32eefb6c543f6fd494fe621b1b8e | 3,653,846 |
def run(data_s: str) -> tuple[int, int]:
"""Solve the puzzles."""
results = [check(line) for line in data_s.splitlines()]
part1 = sum(result.error_score for result in results)
part2 = int(median(result.completion_score for result in results if result.ok))
return part1, part2 | e5870924769b23300b116ceacae3b8b73d4643f3 | 3,653,847 |
import functools
def _inject(*args, **kwargs):
"""Inject variables into the arguments of a function or method.
This is almost identical to decorating with functools.partial, except we also propagate the wrapped
function's __name__.
"""
def injector(f):
assert callable(f)
@functools.wraps(f)
def wrapper(*w_args, **w_kwargs):
return functools.partial(f, *args, **kwargs)(*w_args, **w_kwargs)
wrapper.args = args
wrapper.kwargs = kwargs
wrapper.function = f
return wrapper
return injector | 40ba8ecd01880ebff3997bc16feb775d6b45f711 | 3,653,849 |
def frame_drop_correctors_ready():
"""
Checks to see if the frame drop correctors 'seq_and_image_corr' topics are all
being published. There should be a corrector topic for each camera.
"""
camera_assignment = get_camera_assignment()
number_of_cameras = len(camera_assignment)
number_of_correctors = get_number_of_corrector_topics()
if number_of_cameras == number_of_correctors:
return True
else:
return False | 85c991de9cecd87cd20f7578e1201340d1a7f23a | 3,653,850 |
from typing import List
from typing import Dict
from typing import Any
import yaml
def loads(content: str) -> List[Dict[str, Any]]:
"""
Load the given YAML string
"""
template = list(yaml.load_all(content, Loader=SafeLineLoader))
# Convert an empty file to an empty dict
if template is None:
template = {}
return template | a2c455b40a0b20c4e34af93e08e9e9ae1bb9ab7d | 3,653,851 |
def get_ndim_horizontal_coords(easting, northing):
"""
Return the number of dimensions of the horizontal coordinates arrays
Also check if the two horizontal coordinates arrays same dimensions.
Parameters
----------
easting : nd-array
Array for the easting coordinates
northing : nd-array
Array for the northing coordinates
Returns
-------
ndim : int
Number of dimensions of the ``easting`` and ``northing`` arrays.
"""
ndim = np.ndim(easting)
if ndim != np.ndim(northing):
raise ValueError(
"Horizontal coordinates dimensions mismatch. "
+ f"The easting coordinate array has {easting.ndim} dimensions "
+ f"while the northing has {northing.ndim}."
)
return ndim | a35bf0064aff583c221e8b0c28d8c50cea0826aa | 3,653,852 |
async def info():
"""
API information endpoint
Returns:
[json] -- [description] app version, environment running in (dev/prd),
Doc/Redoc link, Lincense information, and support information
"""
if RELEASE_ENV.lower() == "dev":
main_url = "http://localhost:5000"
else:
main_url = HOST_DOMAIN
openapi_url = f"{main_url}/docs"
redoc_url = f"{main_url}/redoc"
result = {
"App Version": APP_VERSION,
"Environment": RELEASE_ENV,
"Docs": {"OpenAPI": openapi_url, "ReDoc": redoc_url},
"License": {"Type": LICENSE_TYPE, "License Link": LICENSE_LINK},
"Application_Information": {"Owner": OWNER, "Support Site": WEBSITE},
}
return result | 3404ac622711c369ae006bc0edba10f57e825f22 | 3,653,853 |
import torch
def hard_example_mining(dist_mat, labels, return_inds=False):
"""For each anchor, find the hardest positive and negative sample.
Args:
dist_mat: pytorch Variable, pair wise distance between samples, shape [N, N]
labels: pytorch LongTensor, with shape [N]
return_inds: whether to return the indices. Save time if `False`(?)
Returns:
dist_ap: pytorch Variable, distance(anchor, positive); shape [N]
dist_an: pytorch Variable, distance(anchor, negative); shape [N]
p_inds: pytorch LongTensor, with shape [N];
indices of selected hard positive samples; 0 <= p_inds[i] <= N - 1
n_inds: pytorch LongTensor, with shape [N];
indices of selected hard negative samples; 0 <= n_inds[i] <= N - 1
NOTE: Only consider the case in which all labels have same num of samples,
thus we can cope with all anchors in parallel.
"""
assert len(dist_mat.size()) == 2
assert dist_mat.size(0) == dist_mat.size(1)
N = dist_mat.size(0)
# shape [N, N]
is_pos = labels.expand(N, N).eq(labels.expand(N, N).t())
is_neg = labels.expand(N, N).ne(labels.expand(N, N).t())
# `dist_ap` means distance(anchor, positive)
# both `dist_ap` and `relative_p_inds` with shape [N, 1]
dist_ap, relative_p_inds = torch.max(
dist_mat[is_pos].contiguous().view(N, -1), 1, keepdim=True)
# `dist_an` means distance(anchor, negative)
# both `dist_an` and `relative_n_inds` with shape [N, 1]
dist_an, relative_n_inds = torch.min(
dist_mat[is_neg].contiguous().view(N, -1), 1, keepdim=True)
# shape [N]
dist_ap = dist_ap.squeeze(1)
dist_an = dist_an.squeeze(1)
if return_inds:
# shape [N, N]
ind = (labels.new().resize_as_(labels)
.copy_(torch.arange(0, N).long())
.unsqueeze(0).expand(N, N))
# shape [N, 1]
p_inds = torch.gather(
ind[is_pos].contiguous().view(N, -1), 1, relative_p_inds.data)
n_inds = torch.gather(
ind[is_neg].contiguous().view(N, -1), 1, relative_n_inds.data)
# shape [N]
p_inds = p_inds.squeeze(1)
n_inds = n_inds.squeeze(1)
return dist_ap, dist_an, p_inds, n_inds
return dist_ap, dist_an | 15fd533cf74e6cd98ac0fa2e8a83b2734861b9ca | 3,653,855 |
from datetime import datetime
def trace(func):
"""Trace and capture provenance info inside a method /function."""
setup_logging()
@wraps(func)
def wrapper(*args, **kwargs):
activity = func.__name__
activity_id = get_activity_id()
# class_instance = args[0]
class_instance = func
class_instance.args = args
class_instance.kwargs = kwargs
# OSA specific
# variables parsing
global session_name, session_tag
class_instance = parse_variables(class_instance)
if class_instance.__name__ in REDUCTION_TASKS:
session_tag = f"{activity}:{class_instance.ObservationRun}"
session_name = f"{class_instance.ObservationRun}"
else:
session_tag = (
f"{activity}:{class_instance.PedestalRun}-{class_instance.CalibrationRun}"
)
session_name = f"{class_instance.PedestalRun}-{class_instance.CalibrationRun}"
# OSA specific
# variables parsing
# provenance capture before execution
derivation_records = get_derivation_records(class_instance, activity)
parameter_records = get_parameters_records(class_instance, activity, activity_id)
usage_records = get_usage_records(class_instance, activity, activity_id)
# activity execution
start = datetime.datetime.now().isoformat()
result = func(*args, **kwargs)
end = datetime.datetime.now().isoformat()
# no provenance logging
if not log_is_active(class_instance, activity):
return result
# provenance logging only if activity ends properly
session_id = log_session(class_instance, start)
for log_record in derivation_records:
log_prov_info(log_record)
log_start_activity(activity, activity_id, session_id, start)
for log_record in parameter_records:
log_prov_info(log_record)
for log_record in usage_records:
log_prov_info(log_record)
log_generation(class_instance, activity, activity_id)
log_finish_activity(activity_id, end)
return result
return wrapper | 8d624ef70ea4278141f8da9989b3d6787ec003c7 | 3,653,856 |
def get_rbf_gamma_based_in_median_heuristic(X: np.array, standardize: bool = False) -> float:
"""
Function implementing a heuristic to estimate the width of an RBF kernel (as defined in the Scikit-learn package)
from data.
:param X: array-like, shape = (n_samples, n_features), feature matrix
:param standardize: boolean, indicating whether the data should be normalized (z-transformation) before the gamma is
estimated.
:return: scalar, gamma (of the sklearn RBF kernel) estimated from the data
"""
# Z-transform the data if requested
if standardize:
X = StandardScaler(copy=True).fit_transform(X)
# Compute all pairwise euclidean distances
D = euclidean_distances(X)
# Get the median of the distances
sigma = np.median(D)
# Convert to sigma to gamma as defined in the sklearn package
gamma = 1 / (2 * sigma**2)
return gamma | 0a9238b4ba2c3e3cc4ad1f01c7855954b9286294 | 3,653,858 |
def winter_storm(
snd: xarray.DataArray, thresh: str = "25 cm", freq: str = "AS-JUL"
) -> xarray.DataArray:
"""Days with snowfall over threshold.
Number of days with snowfall accumulation greater or equal to threshold.
Parameters
----------
snd : xarray.DataArray
Surface snow depth.
thresh : str
Threshold on snowfall accumulation require to label an event a `winter storm`.
freq : str
Resampling frequency.
Returns
-------
xarray.DataArray
Number of days per period identified as winter storms.
Notes
-----
Snowfall accumulation is estimated by the change in snow depth.
"""
thresh = convert_units_to(thresh, snd)
# Compute daily accumulation
acc = snd.diff(dim="time")
# Winter storm condition
out = threshold_count(acc, ">=", thresh, freq)
out.attrs["units"] = to_agg_units(out, snd, "count")
return out | cef1fa5cf56053f74e70542250c64b398752bd75 | 3,653,859 |
def _check_whitelist_members(rule_members=None, policy_members=None):
"""Whitelist: Check that policy members ARE in rule members.
If a policy member is NOT found in the rule members, add it to
the violating members.
Args:
rule_members (list): IamPolicyMembers allowed in the rule.
policy_members (list): IamPolicyMembers in the policy.
Return:
list: Policy members NOT found in the whitelist (rule members).
"""
violating_members = []
for policy_member in policy_members:
# check if policy_member is found in rule_members
if not any(r.matches(policy_member) for r in rule_members):
violating_members.append(policy_member)
return violating_members | 47f2d6b42f2e1d57a09a2ae6d6c69697e13d03a7 | 3,653,860 |
import re
import uuid
def get_mac():
"""This function returns the first MAC address of the NIC of the PC
without colon"""
return ':'.join(re.findall('..', '%012x' % uuid.getnode())).replace(':', '') | 95ebb381c71741e26b6713638a7770e452d009f2 | 3,653,861 |
async def get_clusters(session, date):
"""
:param session:
:return:
"""
url = "%s/file/clusters" % BASE_URL
params = {'date': date}
return await get(session, url, params) | 8ef55ba14558a60096cc0a96b5b0bc2400f8dbff | 3,653,862 |
def extract_attributes_from_entity(json_object):
"""
returns the attributes from a json representation
Args:
@param json_object: JSON representation
"""
if json_object.has_key('attributes'):
items = json_object['attributes']
attributes = recursive_for_attribute_v2(items)
return attributes
else:
return None | d01886fac8d05e82fa8c0874bafc8860456ead0c | 3,653,863 |
def get_config_with_api_token(tempdir, get_config, api_auth_token):
"""
Get a ``_Config`` object.
:param TempDir tempdir: A temporary directory in which to create the
Tahoe-LAFS node associated with the configuration.
:param (bytes -> bytes -> _Config) get_config: A function which takes a
node directory and a Foolscap "portnum" filename and returns the
configuration object.
:param bytes api_auth_token: The HTTP API authorization token to write to
the node directory.
"""
FilePath(tempdir.join(b"tahoe", b"private")).makedirs()
config = get_config(tempdir.join(b"tahoe"), b"tub.port")
config.write_private_config(b"api_auth_token", api_auth_token)
return config | 682bd037944276c8a09bff46a96337571a605f0e | 3,653,864 |
def calc_base_matrix_1qutrit_y_01() -> np.ndarray:
"""Return the base matrix corresponding to the y-axis w.r.t. levels 0 and 1."""
l = [[0, -1j, 0], [1j, 0, 0], [0, 0, 0]]
mat = np.array(l, dtype=np.complex128)
return mat | 7618021173464962c3e9366d6f159fad01674feb | 3,653,866 |
def get_feature_names_small(ionnumber):
"""
feature names for the fixed peptide length feature vectors
"""
names = []
names += ["pmz", "peplen"]
for c in ["bas", "heli", "hydro", "pI"]:
names.append("sum_" + c)
for c in ["mz", "bas", "heli", "hydro", "pI"]:
names.append("mean_" + c)
names.append("mz_ion")
names.append("mz_ion_other")
names.append("mean_mz_ion")
names.append("mean_mz_ion_other")
for c in ["bas", "heli", "hydro", "pI"]:
names.append("{}_ion".format(c))
names.append("{}_ion_other".format(c))
names.append("endK")
names.append("endR")
names.append("nextP")
names.append("nextK")
names.append("nextR")
for c in ["bas", "heli", "hydro", "pI", "mz"]:
for pos in ["i", "i-1", "i+1", "i+2"]:
names.append("loc_" + pos + "_" + c)
names.append("charge")
for i in range(ionnumber):
for c in ["bas", "heli", "hydro", "pI", "mz"]:
names.append("P_%i_%s"%(i, c))
names.append("P_%i_P"%i)
names.append("P_%i_K"%i)
names.append("P_%i_R"%i)
return names | fbffe98af0cffb05a6b11e06786c5a7076449146 | 3,653,867 |
def vectorproduct(a,b):
"""
Return vector cross product of input vectors a and b
"""
a1, a2, a3 = a
b1, b2, b3 = b
return [a2*b3 - a3*b2, a3*b1 - a1*b3, a1*b2 - a2*b1] | adb9e7c4b5150ab6231f2b852d6860cd0e5060a0 | 3,653,868 |
def f_test_probability(N, p1, Chi2_1, p2, Chi2_2):
"""Return F-Test probability that the simpler model is correct.
e.g. p1 = 5.; //number of PPM parameters
e.g. p2 = p1 + 7.; // number of PPM + orbital parameters
:param N: int
Number of data points
:param p1: int
Number of parameters of the simpler model
:param Chi2_1: float
chi^2 corresponding to the simpler model
:param p2: int
Number of parameters of the model with more parameters
p2 > p1
:param Chi2_2: float
chi^2 corresponding to the model with more parameters
:return:
prob: float
probability
"""
nu1 = p2 - p1
nu2 = N - p2 # degrees of freedom
if (Chi2_1 < Chi2_2):
raise RuntimeWarning('Solution better with less parameters')
# F test
F0 = nu2 / nu1 * (Chi2_1 - Chi2_2) / Chi2_2
# probability
prob = betai(0.5 * nu2, 0.5 * nu1, nu2 / (nu2 + F0 * nu1))
return prob | 21cf7c9eb455309131b6b4808c498927c3d6e485 | 3,653,870 |
def validate_user(headers):
"""Validate the user and return the results."""
user_id = headers.get("User", "")
token = headers.get("Authorization", "")
registered = False
if user_id:
valid_user_id = user_id_or_guest(user_id)
registered = valid_user_id > 1
else:
valid_user_id = 1
is_token_invalid = invalid_token(user_id, token)
return valid_user_id, registered, is_token_invalid | 331608df719d03afd57079d9baba3408b54e0efe | 3,653,871 |
def load_csv_to_journal(batch_info):
"""Take a dict of batch and csv info and load into journal table."""
# Create batch for testing
filename = batch_info['filename']
journal_batch_name = batch_info['journal_batch_name']
journal_batch_description = batch_info['journal_batch_description']
journal_batch_entity = batch_info['journal_batch_entity']
journal_batch_currency = batch_info['journal_batch_currency']
gl_post_reference = batch_info['gl_post_reference']
gl_batch_status = batch_info['gl_batch_status']
insert_new_batch_name(journal_batch_name,
journal_batch_description,
str(journal_batch_entity),
str(journal_batch_currency),
gl_post_reference,
str(gl_batch_status),
)
# Set up csv file to use
batch_row_id = get_journal_batch_row_id_by_name(journal_batch_name)
batch_row_id = batch_row_id[0][0][0]
# Load csv file to journal_loader
load_file = batch_load_je_file(filename, str(batch_row_id))
status_ = [0, batch_row_id] # [load_file status, batch_row_id]
if load_file == 'LOAD OKAY':
status_[0] = 0
else:
status_[0] = 99
raise Exception('Error posting csv file to Journal table')
# Compare csv totals loaded into pandas dataframe to journal
# table totals.
# Load batch in journal_loader to journal
if status_[0] == 0:
load_status_journal = batch_load_insert(batch_row_id)
print(f'load_status_journal: {load_status_journal}')
return status_
else:
print(f'Error loading to journal_loader: {status_}')
raise Exception('Error posting csv file to journal_loader')
return status_ | 285d1113cad16d2d0cc7216f59d089a8f94e908c | 3,653,872 |
def validate_guid(guid: str) -> bool:
"""Validates that a guid is formatted properly"""
valid_chars = set('0123456789abcdef')
count = 0
for char in guid:
count += 1
if char not in valid_chars or count > 32:
raise ValueError('Invalid GUID format.')
if count != 32:
raise ValueError('Invalid GUID format.')
return guid | 75fff17ee0ef2c1c080e2ef2ffb0272fd71d2921 | 3,653,873 |
def load_key_string(string, callback=util.passphrase_callback):
# type: (AnyStr, Callable) -> RSA
"""
Load an RSA key pair from a string.
:param string: String containing RSA key pair in PEM format.
:param callback: A Python callable object that is invoked
to acquire a passphrase with which to unlock the
key. The default is util.passphrase_callback.
:return: M2Crypto.RSA.RSA object.
"""
bio = BIO.MemoryBuffer(string)
return load_key_bio(bio, callback) | 0ac6df63dd7ad42d8eaaa13df7e96caa311332d7 | 3,653,874 |
def generate_iface_status_html(iface=u'lo', status_txt="UNKNOWN"):
"""Generates the html for interface of given status. Status is UNKNOWN by default."""
status = "UNKNOWN"
valid_status = html_generator.HTML_LABEL_ROLES[0]
if status_txt is not None:
if (str(" DOWN") in str(status_txt)):
status = "DOWN"
valid_status = html_generator.HTML_LABEL_STATUS[u'CRITICAL']
elif (str(" UP") in str(status_txt)):
status = "UP"
valid_status = html_generator.HTML_LABEL_STATUS[u'OK']
return generate_iface_status_html_raw(iface, status, valid_status) | c3d459720b5675c9a7d53fa77bb1d7bb6d3988f2 | 3,653,876 |
def is_a(file_name):
"""
Tests whether a given file_name corresponds to a CRSD file. Returns a reader instance, if so.
Parameters
----------
file_name : str
the file_name to check
Returns
-------
CRSDReader1_0|None
Appropriate `CRSDReader` instance if CRSD file, `None` otherwise
"""
try:
crsd_details = CRSDDetails(file_name)
logger.info('File {} is determined to be a CRSD version {} file.'.format(file_name, crsd_details.crsd_version))
return CRSDReader(crsd_details)
except SarpyIOError:
# we don't want to catch parsing errors, for now?
return None | e083a54becdbb86bbefdb7c6504d5cd1d7f81458 | 3,653,877 |
def Closure(molecules):
"""
Returns the set of the closure of a given list of molecules
"""
newmol=set(molecules)
oldmol=set([])
while newmol:
gen=ReactSets(newmol,newmol)
gen|=ReactSets(newmol,oldmol)
gen|=ReactSets(oldmol,newmol)
oldmol|=newmol
newmol=gen-oldmol
return oldmol | 7546a528a43465127c889a93d03fbe1eb83a7d63 | 3,653,878 |
def get_celery_task():
"""get celery task, which takes user id as its sole argument"""
global _celery_app
global _celery_task
if _celery_task:
return _celery_task
load_all_fetcher()
_celery_app = Celery('ukfetcher', broker=ukconfig.celery_broker)
_celery_app.conf.update(
CELERY_ACCEPT_CONTENT=['pickle', 'json', 'msgpack', 'yaml'])
@_celery_app.task
def on_user_activated(user_id):
try:
user_fetcher = get_db_set(user_id, 'fetcher')
for i in user_fetcher:
fetcher = register_fetcher.fetcher_map.get(i)
if fetcher is None:
uklogger.log_err(
'fetcher {} not exist, requested by user {}'.format(
i, user_id))
else:
uklogger.log_info('run fetcher {} for user {}'.format(
i, user_id))
fetcher.run(user_id)
except Exception as ex:
uklogger.log_exc(ex)
if is_in_unittest():
_celery_task = on_user_activated
else:
_celery_task = on_user_activated.delay
return _celery_task | b1cf2aa6ccf462b8e391c8900ac9efaea0b62728 | 3,653,880 |
def plot_keras_activations(activations):
"""Plot keras activation functions.
Args:
activations (list): List of Keras
activation functions
Returns:
[matplotlib figure]
[matplotlib axis]
"""
fig, axs = plt.subplots(1,len(activations),figsize=(3*len(activations),5),sharex=True,sharey=True,dpi=150)
x = tf.constant(tf.range(-3,3,0.1), dtype=tf.float32)
for i, activation in enumerate(activations):
axs[i].plot(x.numpy(), activation(x).numpy())
axs[i].set_title(activation.__name__)
axs[i].set_xlabel(r'$x$')
if i == 0:
axs[i].set_ylabel(r'$\phi(x)$')
despine(ax=axs[i])
fig.tight_layout()
return fig, axs | 3c10bd3a57531ef8a88b6b0d330c2ba7eaf0b35c | 3,653,881 |
def hog_feature(image, pixel_per_cell=8):
"""
Compute hog feature for a given image.
Important: use the hog function provided by skimage to generate both the
feature vector and the visualization image. **For block normalization, use L1.**
Args:
image: an image with object that we want to detect.
pixel_per_cell: number of pixels in each cell, an argument for hog descriptor.
Returns:
score: a vector of hog representation.
hogImage: an image representation of hog provided by skimage.
"""
### YOUR CODE HERE
(hogFeature, hogImage) = feature.hog(image, pixels_per_cell=(pixel_per_cell, pixel_per_cell), visualize=True);
#hogFeature = normalize(hogFeature.reshape(500,-1), 'l1', return_norm=False)
### END YOUR CODE
return (hogFeature, hogImage) | 6509a46dd161f6bde448588314535cb5aeef5e8a | 3,653,882 |
def create_parser() -> ArgumentParser:
"""
Helper function parsing the command line options.
"""
parser = ArgumentParser(description="torchx CLI")
subparser = parser.add_subparsers(
title="sub-commands",
description=sub_parser_description,
)
subcmds = {
"describe": CmdDescribe(),
"log": CmdLog(),
"run": CmdRun(),
"builtins": CmdBuiltins(),
"runopts": CmdRunopts(),
"status": CmdStatus(),
}
for subcmd_name, cmd in subcmds.items():
cmd_parser = subparser.add_parser(subcmd_name)
cmd.add_arguments(cmd_parser)
cmd_parser.set_defaults(func=cmd.run)
return parser | 515309ad03907f5e22d32e5d13744a5fd24bfd40 | 3,653,883 |
import re
def process_word(word):
"""Remove all punctuation and stem words"""
word = re.sub(regex_punc, '', word)
return stemmer.stem(word) | bceb132e7afddaf0540b38c22e9cef7b63a27e8c | 3,653,884 |
def no_autoflush(fn):
"""Wrap the decorated function in a no-autoflush block."""
@wraps(fn)
def wrapper(*args, **kwargs):
with db.session.no_autoflush:
return fn(*args, **kwargs)
return wrapper | c211b05ea68074bc22254c584765ad001ed38f67 | 3,653,885 |
def int_to_ip(ip):
"""
Convert a 32-bit integer into IPv4 string format
:param ip: 32-bit integer
:return: IPv4 string equivalent to ip
"""
if type(ip) is str:
return ip
return '.'.join([str((ip >> i) & 0xff) for i in [24, 16, 8, 0]]) | 8ceb8b9912f10ba49b45510f4470b9cc34bf7a2f | 3,653,887 |
def audit_work_timer_cancel(id):
"""
Cancel timer set.
:param id:
:return:
"""
work = Work.query.get(id)
celery.control.revoke(work.task_id, terminate=True)
work.task_id = None
work.timer = None
db.session.add(work)
db.session.commit()
return redirect(url_for('.audit_work_timer', id=id)) | d05d76dbf31faa4e6b8349af7f698b7021fba50f | 3,653,888 |
def team_pos_evolution(team_id):
"""
returns the evolution of position
for a team for the season
"""
pos_evo = []
for week in team_played_weeks(team_id):
try:
teams_pos = [x[0] for x in league_table_until_with_teamid(week)]
pos = teams_pos.index(int(team_id)) + 1
pos_evo.append(pos)
except:
pass
return pos_evo | 2b1d5378663eadf1f6ca1abb569e72866a58b0aa | 3,653,889 |
def ifft_method(x, y, interpolate=True):
"""
Perfoms IFFT on data.
Parameters
----------
x: array-like
the x-axis data
y: array-like
the y-axis data
interpolate: bool
if True perform a linear interpolation on dataset before transforming
Returns
-------
xf: array-like
the transformed x data
yf: array-like
transformed y data
"""
N = len(x)
if interpolate:
x, y = _fourier_interpolate(x, y)
xf = np.fft.fftfreq(N, d=(x[1] - x[0]) / (2 * np.pi))
yf = np.fft.ifft(y)
return xf, yf | d13e1519cbcec635bbf2f17a0f0abdd44f41ae53 | 3,653,890 |
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError, e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in arguments.iteritems():
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments) | 83a575f633088dc44e1cfcce65efadfb6fda84cc | 3,653,892 |
def PolyMod(f, g):
"""
return f (mod g)
"""
return f % g | 53b47e993e35c09e59e209b68a8a7656edf6b4ce | 3,653,894 |
def policy_improvement(nS, nA, P, full_state_to_index, g=.75,t=0.05):
"""Iteratively evaluates and improves a policy until an optimal policy is found
or reaches threshold of iterations
Parameters:
nS: number of states
nA: number of actions
P: transitional tuples given state and action
full_state_to_index: dictionary of state to index Values
g: gamma which is discount factor
t: theta or stopping condition
Returns: tuple of policy and value of policy
"""
policy = np.ones([nS, nA]) / nA # random policy (equal chance all actions)
i=0
while True:
i+=1
if i%100==0:
print(i)
V = policy_eval(policy, nS, nA, P, full_state_to_index, gamma=g, theta=t) # eval current policy
is_policy_stable = True # true is no changes false if we make changes
for s in range(nS):
chosen_a = np.random.choice(np.argwhere(policy[s] == np.amax(policy[s])).flatten().tolist())
action_values = value(s, V, full_state_to_index, nA, P, gamma=g, theta=t)
best_a = np.random.choice(np.argwhere(action_values == np.amax(action_values)).flatten().tolist())
if chosen_a != best_a: # greedy update
is_policy_stable = False
policy[s] = np.eye(nA)[best_a]
if is_policy_stable or i==10000:
print(i, 'Iterations')
return policy, V | 84373843a179bb2afda20427e24795fbb524ae2c | 3,653,895 |
import torch
def get_train_val_test_data(args):
"""Load the data on rank zero and boradcast number of tokens to all GPUS."""
(train_data, val_data, test_data) = (None, None, None)
# Data loader only on rank 0 of each model parallel group.
if mpu.get_model_parallel_rank() == 0:
data_config = configure_data()
data_config.set_defaults(data_set_type='BERT', transpose=False)
(train_data, val_data, test_data), tokenizer = data_config.apply(args)
before = tokenizer.num_tokens
after = before
multiple = args.make_vocab_size_divisible_by * \
mpu.get_model_parallel_world_size()
while (after % multiple) != 0:
after += 1
print_rank_0('> padded vocab (size: {}) with {} dummy '
'tokens (new size: {})'.format(
before, after - before, after))
# Need to broadcast num_tokens and num_type_tokens.
token_counts = torch.cuda.LongTensor([after,
tokenizer.num_type_tokens,
int(args.do_train), int(args.do_valid), int(args.do_test)])
else:
token_counts = torch.cuda.LongTensor([0, 0, 0, 0, 0])
# Broadcast num tokens.
torch.distributed.broadcast(token_counts,
mpu.get_model_parallel_src_rank(),
group=mpu.get_model_parallel_group())
num_tokens = token_counts[0].item()
num_type_tokens = token_counts[1].item()
args.do_train = token_counts[2].item()
args.do_valid = token_counts[3].item()
args.do_test = token_counts[4].item()
return train_data, val_data, test_data, num_tokens, num_type_tokens | c729262e71bb40c016c6b7a65deaba65f4db951e | 3,653,896 |
def user_data_check(data_file):
"""
1 - Check user data file, and if necessary coerce to correct format.
2 - Check for fold calculation errors, and if correct, return data frame
for passing to later functions.
3 - If incorrect fold calculations detected, error message returned.
:param data_file: user data table.
:return orig_file_parsed: Dataframe (if error checks pass).
:return error_message: Text string (error message).
"""
# Read user_data and assign to dataframe variable.
orig_file = pd.read_table(data_file)
# Subset source df by the first 7 columns.
# Note: last index should be +1 bigger than number of fields.
# AZ20.tsv file has 86 total columns, 80 of which are empty cells.
# Necessary step to maintain indexing references at a later stage!
orig_file_subset = orig_file.iloc[:, 0:7]
# Coerce column 1 to object.
orig_file_subset.iloc[:, 0] = orig_file_subset.iloc[:, 0].astype(object)
# Coerce column 2-7 to float.
orig_file_subset.iloc[:, 1:7] = orig_file_subset.iloc[:, 1:7].astype(float)
# Subset data frame by checking if mean intensities in both columns,
# are greater than zero.
orig_file_subset = orig_file_subset[(orig_file_subset.iloc[:, 1] > 0) |\
(orig_file_subset.iloc[:, 2] > 0)]
# A data file that has been edited such that columns have been deleted,
# i.e. in excel, may introduce "phantom" columns in python environment.
# Such columns are coerced to "un-named" fields with nan entries.
# If cv columns present with values, original data frame unaffected.
# Code drops columns that contain all nan in columns.
orig_file_subset = orig_file_subset.dropna(axis=1, # Iterate by columns.
how="all") # Drop if all na
# in columns.
# Determine number of columns.
num_col = orig_file_subset.shape[1]
# Check if number of cols = 5 and append new columns with all entries
# = to 1 for cv calculations that are missing.
# If number of columns adhere to correct format, data frame unaffected.
if num_col == 5:
orig_file_subset["control_cv"] = 1
orig_file_subset["condition_cv"] = 1
# Add fold calculation column to df.
orig_file_subset["calc_fold_change"] = \
orig_file_subset.iloc[:, 2].divide(orig_file_subset.iloc[:,1])
# Define user and script calculated fold changes as series variables.
user_fold_calc = orig_file_subset.iloc[:, 3]
script_fold_calc = orig_file_subset.iloc[:, 7]
# Determine if fold change calculations match by
# an absolute tolerance of 3 signifcant figures.
# Numpy "isclose()" function used to check closeness of match.
# Boolean series returned to new column in data frame.
orig_file_subset["check_fold_match"] = \
np.isclose(user_fold_calc, script_fold_calc, atol=10**3)
# Determine number of true matches for fold change calculations.
# Summing of boolean series carried out: True = 1, False = 0.
sum_matches = sum(orig_file_subset.iloc[:, 8] == 1)
# Define error message if fold calculation matching determines
# existance of errors.
error_message = \
("Anomaly detected..PhosQuest will self-destruct in T minus 10 seconds"+
"...just kidding! Please check your fold change calculations, "+
"a discrepancy has been detected.")
# If "sum_matches" equal to length of data frame, then return data frame.
# If not, return error message.
# Note: if first logical test passes, this indicates that fold change
# calculations in original user data are correct (within tolerance),
# and filtered dataframe returned for further analysis.
if sum_matches == len(orig_file_subset):
orig_file_parsed = orig_file_subset.iloc[:, 0:7]
return orig_file_parsed
elif sum_matches != len(orig_file_subset):
return error_message | fc1b1d18a0e9a5a28674573cc2ab1c7cf9f08a03 | 3,653,897 |
import requests
def get_modules(request: HttpRequest) -> JsonResponse:
"""Gets a list of modules for the provided course from the Canvas API based on current user
A module ID has to be provided in order to access the correct course
:param request: The current request as provided by django
:return: A JSONResponse containing either an error or the data provided by Canvas
"""
# Note: For functionality documentation, see get_courses, as much of it is the same
error = expire_checker(request)
url = request.user.canvas_oauth2_token.url
if error[0] is not None:
return error[0]
client = error[1]
header = {"Authorization": f"Bearer {request.user.canvas_oauth2_token.access_token}"}
course_id = request.GET.get("course_id", "")
if not course_id: return error_generator("There was no provided course ID!", 404) # Returns without module ID
modules = requests.get(
"{}/api/v1/courses/{}/modules?per_page=50".format(url, course_id),
headers=header, verify=False is client.dev)
return content_helper(modules) | d583779b075419dd67514bd50e709374fd4964bf | 3,653,898 |
def create_workflow(session, workflow_spec=dict(), result_schema=None):
"""Create a new workflow handle for a given workflow specification. Returns
the workflow identifier.
Parameters
----------
session: sqlalchemy.orm.session.Session
Database session.
workflow_spec: dict, default=dict()
Optional workflow specification.
result_schema: dict, default=None
Optional result schema.
Returns
-------
string
"""
workflow_id = util.get_unique_identifier()
workflow = WorkflowObject(
workflow_id=workflow_id,
name=workflow_id,
workflow_spec=workflow_spec,
result_schema=result_schema
)
session.add(workflow)
return workflow_id | 1c3843a15d543fb10427b52c7d654abd877b3342 | 3,653,899 |
from typing import Optional
def get_volume(name: Optional[str] = None,
namespace: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetVolumeResult:
"""
## Example Usage
```python
import pulumi
import pulumi_harvester as harvester
ubuntu20_dev_mount_disk = harvester.get_volume(name="ubuntu20-dev-mount-disk",
namespace="default")
```
:param str name: A unique name
"""
__args__ = dict()
__args__['name'] = name
__args__['namespace'] = namespace
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('harvester:index/getVolume:getVolume', __args__, opts=opts, typ=GetVolumeResult).value
return AwaitableGetVolumeResult(
access_mode=__ret__.access_mode,
attached_vm=__ret__.attached_vm,
description=__ret__.description,
id=__ret__.id,
image=__ret__.image,
name=__ret__.name,
namespace=__ret__.namespace,
phase=__ret__.phase,
size=__ret__.size,
state=__ret__.state,
storage_class_name=__ret__.storage_class_name,
tags=__ret__.tags,
volume_mode=__ret__.volume_mode) | 528dfb0432b30b40037b86a234e83c8327eb5206 | 3,653,901 |
def both_block_num_missing(record):
"""
Returns true of both block numbers are missing
:param record: dict - The record being evaluated
:return: bool
"""
rpt_block_num = record.get("rpt_block_num", "") or ""
rpt_sec_block_num = record.get("rpt_sec_block_num", "") or ""
# True, if neither address has a block number.
if rpt_block_num == "" and rpt_sec_block_num == "":
return True
return False | 63e2fdaef78dbc3c6560a4b015ed022583f30d05 | 3,653,902 |
def jsonize(v):
"""
Convert the discount configuration into a state in which it can be
stored inside the JSON field.
Some information is lost here; f.e. we only store the primary key
of model objects, so you have to remember yourself which objects
are meant by the primary key values.
"""
if isinstance(v, dict):
return dict((i1, jsonize(i2)) for i1, i2 in v.items())
if hasattr(v, "__iter__"):
return [jsonize(i) for i in v]
if isinstance(v, Model):
return v.pk
return v | 1aa7954c0089726b7707e0180b35a12d679c286b | 3,653,904 |
def clean_kaggle_movies(movies_df):
"""
Clean the Kaggle movie data with the following steps:
1. Drop duplicate rows
2. Filter out adult videos and drop unnecessary columns
3. Recast columns to appropriate data types
Parameters
----------
movies_df : Pandas dataframe
Kaggle movie data
Returns
-------
Pandas dataframe
Clean Kaggle movie data
"""
# Drop duplicate rows
movies_df = udf_movies.drop_duplicates(movies_df)
# Filter out adult videos and drop unnecessary columns
movies_df = drop_cols(movies_df)
# Recast columns to appropriate data types
movies_df = recast_cols(movies_df)
return movies_df | 05d5a0eb965b26cdc04dcfb9f3a76690d272389c | 3,653,905 |
def make_shift_x0(shift, ndim):
"""
Returns a callable that calculates a shifted origin for each derivative
of an operation derivatives scheme (given by ndim) given a shift object
which can be a None, a float or a tuple with shape equal to ndim
"""
if shift is None:
return lambda s, d, i, j: None
elif isinstance(shift, float):
return lambda s, d, i, j: d + s * d.spacing
elif type(shift) is tuple and np.shape(shift) == ndim:
if len(ndim) == 1:
return lambda s, d, i, j: d + s[j] * d.spacing
elif len(ndim) == 2:
return lambda s, d, i, j: d + s[i][j] * d.spacing
else:
raise ValueError("ndim length must be equal to 1 or 2")
raise ValueError("shift parameter must be one of the following options: "
"None, float or tuple with shape equal to %s" % (ndim,)) | e6b01e43c8bf73ba21a9bdfcd27a93db9ccb7478 | 3,653,907 |
def one_zone_numerical(params, ref_coeff, num_molecules=1e-9):
"""Returns one zone reactor exit flow."""
time = np.array(params[0], dtype=float)
gradient = np.array(params[1], dtype=float)
gridpoints = int(params[2])
step_size, area = float(params[3]), float(params[4])
solu = odeint(
_one_zone_fd, np.zeros(int(gradient.size)), time,
args=(ref_coeff, gradient, gridpoints, step_size, area)
)
return solu[:, -2] * ref_coeff * area / (step_size * num_molecules) | 4eb17f9684d1d12175bf85d15bada4178074de8a | 3,653,909 |
import re
def get_all_event_history_links():
"""From ufcstat website finds all completed fights and saves
the http into the current working directory
"""
url = "http://www.ufcstats.com/statistics/events/completed?page=all"
href_collection = get_all_a_tags(url)
#Add all links to list that have event-details in them
links = []
for i in href_collection:
site_regex = re.search('event-details', i)
if site_regex is not None:
links.append(i)
links = list(dict.fromkeys(links))
return links | ab452c66460f18b5d55ce2be2e22877f07e959d5 | 3,653,910 |
def plot_book_wordbags(urn, wordbags, window=5000, pr = 100):
"""Generate a diagram of wordbags in book """
return plot_sammen_vekst(urn, wordbags, window=window, pr=pr) | 12a03c70316d3920419f85cd2e4af87c7a16f0f8 | 3,653,912 |
def map_line2citem(decompilation_text):
"""
Map decompilation line numbers to citems.
This function allows us to build a relationship between citems in the
ctree and specific lines in the hexrays decompilation text.
Output:
+- line2citem:
| a map keyed with line numbers, holding sets of citem indexes
|
| eg: { int(line_number): sets(citem_indexes), ... }
'
"""
line2citem = {}
#
# it turns out that citem indexes are actually stored inline with the
# decompilation text output, hidden behind COLOR_ADDR tokens.
#
# here we pass each line of raw decompilation text to our crappy lexer,
# extracting any COLOR_ADDR tokens as citem indexes
#
for line_number in range(decompilation_text.size()):
line_text = decompilation_text[line_number].line
line2citem[line_number] = lex_citem_indexes(line_text)
return line2citem | 86c8a24f769c7404560bb63c34f2b60ff3a097da | 3,653,913 |
def from_dict(params, filter_func=None, excludes=[], seeds=[], order=2,
random_seed=None):
"""Generates pair-wise cases from given parameter dictionary."""
if random_seed is None or isinstance(random_seed, int):
return _from_dict(params, filter_func, excludes, seeds, order, random_seed)
# Find the best (smallest) test suite by trying multiple seeds.
best = None
for rs in random_seed:
case = _from_dict(params, filter_func, excludes, seeds, order, rs)
if best is None or len(case) < len(best):
best = case
return best | d9ecd0528340adbe874afa70d3a9309e53ff87cc | 3,653,914 |
def ensure_min_topology(*args, **kwargs):
"""
verifies if the current testbed topology satifies the
minimum topology required by test script
:param spec: needed topology specification
:type spec: basestring
:return: True if current topology is good enough else False
:rtype: bool
"""
return getwa().ensure_min_topology(*args, **kwargs) | 364e7b3c166b725fd73846e1814bd3b7ab92ad96 | 3,653,915 |
def encode_mode(mode):
"""
JJ2 uses numbers instead of strings, but strings are easier for humans to work with
CANNOT use spaces here, as list server scripts may not expect spaces in modes in port 10057 response
:param mode: Mode number as sent by the client
:return: Mode string
"""
if mode == 16:
return "headhunters"
if mode == 15:
return "domination"
if mode == 14:
return "tlrs"
if mode == 13:
return "flagrun"
if mode == 12:
return "deathctf"
if mode == 11:
return "jailbreak"
if mode == 10:
return "teambattle"
if mode == 9:
return "pestilence"
if mode == 8:
return "xlrs"
if mode == 7:
return "lrs"
if mode == 6:
return "roasttag"
if mode == 5:
return "coop"
if mode == 4:
return "race"
if mode == 3:
return "ctf"
if mode == 2:
return "treasure"
if mode == 1:
return "battle"
return "unknown" | db83c419acb299284b7b5338331efc95051115a5 | 3,653,916 |
import random
def randclust(SC, k):
""" cluster using random """
# generate labels.
labels = np.array([random.randint(0,k-1) for x in range(SC.shape[1])])
# compute the average.
S, cats = avg_cat(labels, SC)
# return it.
return S, labels, cats | 42530495959977c1289fa6bdc2089747a246d210 | 3,653,918 |
def get_domains_by_name(kw, c, adgroup=False):
"""Searches for domains by a text fragment that matches the domain name (not the tld)"""
domains = []
existing = set()
if adgroup:
existing = set(c['adgroups'].find_one({'name': adgroup}, {'sites':1})['sites'])
for domain in c['domains'].find({}, {'domain': 1, 'alexa.rank.latest':1}):
try:
rank = domain['alexa']['rank']['latest']
domain_name = domain['domain'].replace('#', '.')
if kw in domain_name:
if domain_name not in existing:
domains.append({
"domain": domain_name,
"rank": rank
})
except KeyError:
pass
return domains[:50] | 6ecaf4ccf1ecac806fb621c02282bf46929459ce | 3,653,919 |
def read_bbgt(filename):
"""
Read ground truth from bbGt file.
See Piotr's Toolbox for details
"""
boxes = []
with open(filename,"r") as f:
signature = f.readline()
if not signature.startswith("% bbGt version=3"):
raise ValueError("Wrong file signature")
rects = []
ignore = []
labels = []
for line in f:
elms = line.strip().split()
assert len(elms) == 12, "Invalid file"
lbl = elms[0]
rect = tuple(map(float, elms[1:5]))
ign = int(elms[10])
rects.append(rect)
ignore.append(ign)
labels.append(lbl)
if not rects:
rects = np.empty((0,4),"f")
ignore = np.empty(0,"i")
labels = np.empty(0,"<U1")
boxes = bbox_list(np.array(rects,"f"),
format=RectFormat.XYWH,
ignore=np.array(ignore,"i"),
labels=np.array(labels))
return boxes | 25cfe28de9ed67ca0888da5bf27d01a803da8690 | 3,653,920 |
def measure(G, wire, get_cb_delay = False, meas_lut_access = False):
"""Calls HSPICE to obtain the delay of the wire.
Parameters
----------
G : nx.MultiDiGraph
The routing-resource graph.
wire : str
Wire type.
get_cb_delay : Optional[bool], default = False
Determines the position of the wire and the connection block and then calls
>>meas_local_wire.py<< to obtain the delay from the wire to a LUT input pin.
Returns
-------
float
Delay.
"""
#------------------------------------------------------------------------#
def run():
"""Runs HSPICE and parses the delay."""
with open(netlist_filename, "w") as outf:
outf.write(conv_nx_to_spice(net, meas_lut_access = meas_lut_access))
hspice_call = os.environ["HSPICE"] + " %s > %s" % (netlist_filename, hspice_dump)
os.system(hspice_call)
scale_dict = {'f' : 1e-15, 'p' : 1e-12, 'n' : 1e-9}
with open(hspice_dump, "r") as inf:
lines = inf.readlines()
#os.system("rm " + hspice_dump)
td_dict = {}
get_td = lambda l : round(float(l.split()[1][:-1]), 1) * scale_dict[l.split()[1][-1]]
get_tap = lambda l : wire + '_' + l.split('=', 1)[0].split('_', 1)[1]
for line in lines:
if "tfall=" in line:
tfall = get_td(line)
elif "trise=" in line:
trise = get_td(line)
elif meas_lut_access:
if "tfall_ble_mux" in line or "trise_ble_mux" in line:
td = get_td(line)
if td < 0:
print "Negative time!"
raise ValueError
try:
td_dict["ble_mux"] = 0.5 * (td_dict["ble_mux"] + td)
except:
td_dict.update({"ble_mux" : td})
elif wire[0] == 'V':
if "tfall_tap" in line or "trise_tap" in line:
tap = get_tap(line)
td = get_td(line)
if td < 0:
print "Negative time!"
raise ValueError
try:
td_dict[tap] = 0.5 * (td_dict[tap] + td)
except:
td_dict.update({tap : td})
if trise < 0 or tfall < 0:
print "Negative time!"
raise ValueError
if wire[0] == 'V':
td_dict.update({"whole" : 0.5 * (trise + tfall)})
if meas_lut_access:
td_dict.update({"lut_access" : 0.5 * (trise + tfall) - td_dict["ble_mux"]})
return td_dict
if wire[0] == 'V':
return td_dict
return 0.5 * (trise + tfall)
#------------------------------------------------------------------------#
netlist_filename = "sim_global_%s_%s.sp" % (args.arc_name, wire)
hspice_dump = "hspice_%s_%s.dump" % (args.arc_name, wire)
if meas_lut_access:
net = meas_lut_access_delay(G)
return run()
else:
pins, all_sizes = stack_muxes(G, get_pins = True)
source_dict = {}
for mux in pins:
if wire in mux and mux.startswith("ble_%d_" % NEUTRAL_BLE):
if ROBUSTNESS_LEVEL == 0:
source = mux
if get_cb_delay:
return get_netlist(G, wire, source, get_cb_delay = True)
net = get_netlist(G, wire, source)
return run()
key = mux.split("_tap")[0]
offset = pins[mux]['o'][0 if wire[0] == 'V' else 1]
deg = 0
for fanout in G:
if fanout.startswith(key):
deg += G.in_degree(fanout) + G.out_degree(fanout)
source_dict.update({key : {"mux" : mux, "deg" : deg, "offset" : offset}})
sorted_keys = sorted(source_dict, key = lambda s : source_dict[s]["deg"]\
* abs(source_dict[s]["offset"]))
if ROBUSTNESS_LEVEL == 1 or get_cb_delay:
#NOTE: Connection-block delays are very robust to changing the multiplexer as they usually
#assume only one or two columns, immediately next to the crossbar. Hence, the x-offset is
#less varialbe. Also, the load is within the cluster itself. If there is any variation in
#multiplexer sizes, that is more of an artifact of parametrized architecture generation.
#Median fanin should be a good representative in this case.
source = source_dict[sorted_keys[len(source_dict) / 2]]["mux"]
if get_cb_delay:
return get_netlist(G, wire, source, get_cb_delay = True)
net = get_netlist(G, wire, source)
return run()
td_dicts = []
for source_key in sorted_keys:
source = source_dict[source_key]["mux"]
net = get_netlist(G, wire, source)
td_dicts.append(run())
if ROBUSTNESS_LEVEL == 3:
potential_targets = [u for u, attrs in net.nodes(data = True) if attrs.get("potential_target", False)]
for i, u in enumerate(potential_targets):
relabeling_dict = {}
if u == 't':
continue
relabeling_dict.update({'t' : "prev_t_%d" % i})
relabeling_dict.update({u : 't'})
net = nx.relabel_nodes(net, relabeling_dict)
td_dicts.append(run())
if (wire[0] == 'H' and not meas_lut_access) or get_cb_delay:
return sum(td_dicts) / len(td_dicts)
for v in td_dicts[0]:
for td_dict in td_dicts[1:]:
td_dicts[0][v] += td_dict[v]
td_dicts[0][v] /= len(td_dicts)
return td_dicts[0] | 7db83ff5084798100a00d79c4df13a226a2e55a8 | 3,653,921 |
def live_ferc_db(request):
"""Use the live FERC DB or make a temporary one."""
return request.config.getoption("--live_ferc_db") | f0540c8e3383572c5f686ea89011d9e1ab0bf208 | 3,653,923 |
from typing import Optional
async def get_eth_hash(timestamp: int) -> Optional[str]:
"""Fetches next Ethereum blockhash after timestamp from API."""
try:
this_block = w3.eth.get_block("latest")
except Exception as e:
logger.error(f"Unable to retrieve latest block: {e}")
return None
if this_block["timestamp"] < timestamp:
logger.error(
f"Timestamp {timestamp} is older than current "
"block timestamp {this_block['timestamp']}"
)
return None
block_num = block_num_from_timestamp(timestamp)
if block_num is None:
logger.warning("Unable to retrieve block number from Etherscan API")
return None
try:
block = w3.eth.get_block(block_num)
except Exception as e:
logger.error(f"Unable to retrieve block {block_num}: {e}")
return None
return str(block["hash"].hex()) | f7f8cd70857d8bb84261685385f59e7cfd048f4c | 3,653,924 |
import urllib.request, urllib.parse, urllib.error
from bs4 import BeautifulSoup
import ssl
def extract_url_dataset(dataset,msg_flag=False):
"""
Given a dataset identifier this function extracts the URL for the page where the actual raw data resides.
"""
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
dataset_dict={}
baseurl='https://archive.ics.uci.edu/ml/datasets/'
url = baseurl+dataset
try:
uh= urllib.request.urlopen(url, context=ctx)
html =uh.read().decode()
soup=BeautifulSoup(html,'html5lib')
if soup.text.find("does not appear to exist")!=-1:
if msg_flag:
print(f"{dataset} not found")
return None
else:
for link in soup.find_all('a'):
if link.attrs['href'].find('machine-learning-databases')!=-1:
a=link.attrs['href']
a=a[2:]
dataurl="https://archive.ics.uci.edu/ml/"+str(a)
#print(dataurl)
return str(dataurl)
#dataurls.append(dataurl)
# After finishing the for-loop with a-tags, the first dataurl is added to the dictionary
#dataset_dict['dataurl']=dataurls[0]
except:
#print("Could not retrieve")
return None | 06ec2dd6bea4c264fe9590663a28c7c92eed6a49 | 3,653,926 |
def test_encrypt_and_decrypt_one(benchmark: BenchmarkFixture) -> None:
"""Benchmark encryption and decryption run together."""
primitives.encrypt = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_encrypt
primitives.decrypt = nacl.bindings.crypto_aead_xchacha20poly1305_ietf_decrypt
def encrypt_and_decrypt() -> bytes:
token = version2.encrypt(MESSAGE, KEY, FOOTER)
return version2.decrypt(token, KEY, FOOTER)
plain_text = benchmark(encrypt_and_decrypt)
assert plain_text == MESSAGE | fdd15ca362b983e5f7e28632434c2cbe1ab983ac | 3,653,927 |
def MPI_ITOps(mintime = 5, maxtime = 20, cap = 60):
"""
Returns a costOfLaborValue object suitable to attach to a sim or other event
Time is in hours
"""
timeDist = LogNormalValue(maxtime, mintime, cap)
costDist = LogNormalValue(235, 115, 340)
team = costOfLaborValue("IT I&O Team", timeDist, costDist)
return team | 829c702d31a585fc18f81eea01c87f32c2458ea6 | 3,653,928 |
import json
def load_private_wallet(path):
"""
Load a json file with the given path as a private wallet.
"""
d = json.load(open(path))
blob = bytes.fromhex(d["key"])
return BLSPrivateHDKey.from_bytes(blob) | 9c98be3b3891eaab7b62eba32b426b78ae985880 | 3,653,929 |
import json
def format_parameters(parameters: str) -> str:
"""
Receives a key:value string and retuns a dictionary string ({"key":"value"}). In the process strips trailing and
leading spaces.
:param parameters: The key-value-list
:return:
"""
if not parameters:
return '{}'
pairs = []
for item in parameters.split(','):
try:
key, value = item.split(':')
except ValueError:
raise ValueError(f"Got unexpected parameters {item}.")
pairs.append((key.strip(), value.strip()))
return json.dumps(dict(pairs)) | 95f115b9000d495db776798700cfdf35209cfbd4 | 3,653,930 |
def downvote_question(current_user, question_id):
"""Endpoint to downvote a question"""
error = ""
status = 200
response = {}
question = db.get_single_question(question_id)
if not question:
error = "That question does not exist!"
status = 404
elif db.downvote_question(current_user[0], question_id) is False:
error = "You have already downvoted!"
status = 400
else:
db.downvote_question(current_user[0], question_id)
votes = db.get_votes(question_id)
que_details = db.get_question_details(question_id)
data = {
"meetup": que_details[0],
"title": que_details[1].strip(),
"body": que_details[2].strip(),
"votes": votes[0]
}
status = 200
if error:
response.update({"status": status, "error": error})
return jsonify(response), status
response.update({"status": status, "data": data})
return jsonify(response), status | a7bba2a9608d25b3404f22ca2f283486f205f0ad | 3,653,931 |
def get_new_generation(generation: GEN, patterns: PATTERNS) -> GEN:
"""Mutate current generation and get the next one."""
new_generation: GEN = dict()
plant_ids = generation.keys()
min_plant_id = min(plant_ids)
max_plant_id = max(plant_ids)
for i in range(min_plant_id - 2, max_plant_id + 2):
pattern = get_pattern(generation, i)
if patterns.get(pattern, Pot.EMPTY) is Pot.PLANT:
new_generation[i] = Pot.PLANT
return new_generation | a0908c9c7570814ca86d3b447425e7b75cdbfde2 | 3,653,932 |
def ell2tm(latitude, longitude, longitude_CM, ellipsoid = 'GRS80'):
"""
Convert ellipsoidal coordinates to 3 degree Transversal Mercator
projection coordinates
Input:
latitude: latitude of a point in degrees
longitude: longitude of a point in degrees
longitude_CM: central meridian in degrees
ellipsoid: name of ellipsoid in string format
Output:
Easting, Northing [unit:meters]
"""
Phi = _np.deg2rad(latitude) # degree to radian
Lambda = _np.deg2rad(longitude) # degree to radian
Lambda_CM = _np.deg2rad(longitude_CM) # degree to radian
dlambda = Lambda - Lambda_CM
# -----------------------------------------------------------------------------
# Define Ellipsoid
ell = _ellipsoid(ellipsoid)
# -----------------------------------------------------------------------------
# Some parameters
N = ell.a/_np.sqrt(1-ell.e1**2*_np.sin(Phi)**2)
t = _np.tan(Phi)
n = ell.e2 * _np.cos(Phi)
# -----------------------------------------------------------------------------
# Easting Computation
easting = N*(dlambda*_np.cos(Phi)+((dlambda**3*_np.cos(Phi)**3)/6)*(1-t**2+n**2) +
((dlambda**5*_np.cos(Phi)**5)/120)*(5-18*t**2+t**4+14*n**2-58*t**2*n**2+13*n**4+4*n**6-64*n**4*t**2-24*n**6*t**2) +
((dlambda**7*_np.cos(Phi)**7)/5040)*(61-479*t**2+179*t**4-t**6))
easting += 500000 # false easting
# -----------------------------------------------------------------------------
# Meridian Arc Computation
# Meridian Arc Computation
A0 = 1 - ell.e1**2/4 - (3/64)*ell.e1**4 - (5/256)*ell.e1**6 - (175/16384)*ell.e1**8
A2 = (3/8) * (ell.e1**2 + ell.e1**4/4 + (15/128)*ell.e1**6 - (455/4096)*ell.e1**8)
A4 = (15/256) * (ell.e1**4 + (3/4)*ell.e1**6 - (77/128)*ell.e1**8)
A6 = (35/3072) * (ell.e1**6 - (41/32)*ell.e1**8)
A8 = (-315/131072) * ell.e1**8
S_phi = ell.a * ( A0 * Phi - A2*_np.sin(2*Phi) + A4*_np.sin(4*Phi) - A6*_np.sin(6*Phi) + A8*_np.sin(8*Phi))
# -----------------------------------------------------------------------------
# Northing Computation
northing = S_phi + N * ( (dlambda**2/2) * _np.sin(Phi) * _np.cos(Phi) + (dlambda**4/24) * _np.sin(Phi) * _np.cos(Phi)**3 * (5 - t**2 + 9*n**2 + 4*n**4) +
(dlambda**6/720) * _np.sin(Phi) * _np.cos(Phi)**5 * (61 - 58*t**2 + t**4 + 270*n**2 - 330*t**2*n**2 + 445*n**4 + 324*n**6 - 680*n**4*t**2 + 88*n**8 -
600*n**6*t**2 - 192*n**8*t**2) + (dlambda**8/40320) * _np.sin(Phi) * _np.cos(Phi)**7 * (1385 - 311*t**2 + 543*t**4 - t**6))
return easting, northing | b6e1361df8b51e188bbc7a49557dbe8f14905df3 | 3,653,933 |
def Format_Phone(Phone):
"""Function to Format a Phone Number into (999)-999 9999)"""
Phone = str(Phone)
return f"({Phone[0:3]}) {Phone[3:6]}-{Phone[6:10]}" | 8e46c35bca9d302d86909457c84785ad5d366c15 | 3,653,934 |
from sets import Set
def aStarSearch(problem, heuristic=nullHeuristic):
"""Search the node that has the lowest combined cost and heuristic first."""
"*** YOUR CODE HERE ***"
startState = problem.getStartState()
if problem.isGoalState(startState):
return []
# Each element in the fringe stores the state and the cost to reach it.
fringe = util.PriorityQueue()
fringe.push(startState, 0 + heuristic(startState, problem))
# Each pair in itemsInFringe stores a state and the list of actions
# required to reach it. States are added in itemsInFringe when they are
# added to the fringe. The states are removed from itemsInFringe when
# they get removed from the fringe.
itemsInFringe = {startState: []}
visitedStates = Set()
while not fringe.isEmpty():
currState = fringe.pop()
actionsToCurrState = itemsInFringe[currState]
del itemsInFringe[currState]
costOfActionsToCurrState = problem.getCostOfActions(actionsToCurrState)
if problem.isGoalState(currState):
return actionsToCurrState
visitedStates.add(currState)
for successor, action, stepCost in problem.getSuccessors(currState):
heuristicCostToSuccessor = heuristic(successor, problem)
newCostToSuccessor = costOfActionsToCurrState + stepCost + \
heuristicCostToSuccessor
newActionsToSuccessor = actionsToCurrState + [action]
if successor not in visitedStates:
fringe.update(successor, newCostToSuccessor)
if successor in itemsInFringe and \
problem.getCostOfActions(itemsInFringe[successor]) + \
heuristicCostToSuccessor <= newCostToSuccessor:
# If successor is already in itemsInFringe, only update the
# cost if the current cost is greater than the new cost.
continue
itemsInFringe[successor] = newActionsToSuccessor
# Goal not found, so no action.
return [] | 429c45bff701bbd2bb515be6d8a0f538183941d3 | 3,653,935 |
def _stack_add_equal_dataset_attributes(merged_dataset, datasets, a=None):
"""Helper function for vstack and hstack to find dataset
attributes common to a set of datasets, and at them to the output.
Note:by default this function does nothing because testing for equality
may be messy for certain types; to override a value should be assigned
to the add_keys argument.
Parameters
----------
merged_dataset: Dataset
the output dataset to which attributes are added
datasets: tuple of Dataset
Sequence of datasets to be stacked. Only attributes present
in all datasets and with identical values are put in
merged_dataset
a: {'unique','drop_nonunique','uniques','all'} or True or False or None (default: None).
Indicates which dataset attributes from datasets are stored
in merged_dataset. If an int k, then the dataset attributes from
datasets[k] are taken. If 'unique' then it is assumed that any
attribute common to more than one dataset in datasets is unique;
if not an exception is raised. If 'drop_nonunique' then as 'unique',
except that exceptions are not raised. If 'uniques' then, for each
attribute, any unique value across the datasets is stored in a tuple
in merged_datasets. If 'all' then each attribute present in any
dataset across datasets is stored as a tuple in merged_datasets;
missing values are replaced by None. If None (the default) then no
attributes are stored in merged_dataset. True is equivalent to
'drop_nonunique'. False is equivalent to None.
"""
if a is None or a is False:
# do nothing
return
elif a is True:
a = 'drop_nonunique'
if not datasets:
# empty - so nothing to do
return
if type(a) is int:
base_dataset = datasets[a]
for key in base_dataset.a.keys():
merged_dataset.a[key] = base_dataset.a[key].value
return
allowed_values = ['unique', 'uniques', 'drop_nonunique', 'all']
if not a in allowed_values:
raise ValueError("a should be an int or one of "
"%r" % allowed_values)
# consider all keys that are present in at least one dataset
all_keys = set.union(*[set(dataset.a.keys()) for dataset in datasets])
def _contains(xs, y, comparator=all_equal):
for x in xs:
if comparator(x, y):
return True
return False
for key in all_keys:
add_key = True
values = []
for i, dataset in enumerate(datasets):
if not key in dataset.a:
if a == 'all':
values.append(None)
continue
value = dataset.a[key].value
if a in ('drop_nonunique', 'unique'):
if not values:
values.append(value)
elif not _contains(values, value):
if a == 'unique':
raise DatasetError("Not unique dataset attribute value "
" for %s: %s and %s"
% (key, values[0], value))
else:
add_key = False
break
elif a == 'uniques':
if not _contains(values, value):
values.append(value)
elif a == 'all':
values.append(value)
else:
raise ValueError("this should not happen: %s" % a)
if add_key:
if a in ('drop_nonunique', 'unique'):
merged_dataset.a[key] = values[0]
else:
merged_dataset.a[key] = tuple(values) | acfeb1e7ca315aa7109731427ce6f058b2fceb6d | 3,653,936 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.