content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_data_upload_id(jwt: str) -> str:
"""Function to get a temporary upload ID from
DAFNI data upload API
Args:
jwt (str): Users JWT
Returns:
str: Temporary Upload ID
"""
url = f"{DATA_UPLOAD_API_URL}/nid/upload/"
data = {"cancelToken": {"promise": {}}}
return dafni_post_request(url, jwt, data, allow_redirect=True) | dcce05a8efda1c90e6a78a19757f57deffd0c247 | 3,653,400 |
def StationMagnitudeContribution_TypeInfo():
"""StationMagnitudeContribution_TypeInfo() -> RTTI"""
return _DataModel.StationMagnitudeContribution_TypeInfo() | d9af45a3bbe993de37c351b5791ba8b87aeeedc9 | 3,653,401 |
def _get_operations(rescale=0.003921, normalize_weight=0.48):
"""Get operations."""
operation_0 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'RandomCrop',
'weight': [32, 32, 4, 4, 4, 4],
'padding_mode': "constant",
'pad_if_needed': False,
'fill_value': 0
}
operation_1 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Rescale',
'rescale': rescale,
'shift': 0,
'num_classes': 10
}
operation_2 = {
'tensor_op_module': 'minddata.transforms.c_transforms',
'tensor_op_name': 'Normalize',
'weights': [normalize_weight]
}
return [operation_0, operation_1, operation_2] | a3bab4147f1a2020fb87853fc30bede277f0f4bd | 3,653,402 |
def itm_command(
ticker: str = None,
):
"""Options ITM"""
# Check for argument
if ticker is None:
raise Exception("Stock ticker is required")
dates = yfinance_model.option_expirations(ticker)
if not dates:
raise Exception("Stock ticker is invalid")
current_price = yfinance_model.get_price(ticker)
df_date, df_cotm, df_citm, df_potm, df_pitm = [], [], [], [], []
for date in dates:
df_date.append(date)
options = yfinance_model.get_option_chain(ticker, date)
call_oi = options.calls.set_index("strike")["openInterest"].fillna(0)
put_oi = options.puts.set_index("strike")["openInterest"].fillna(0)
df_cotm.append(int(call_oi[call_oi.index >= current_price].sum()))
df_citm.append(int(call_oi[call_oi.index <= current_price].sum()))
df_pitm.append(int(put_oi[put_oi.index >= current_price].sum()))
df_potm.append(int(put_oi[put_oi.index <= current_price].sum()))
# Calculate the total per column
df_date.append("<b>Total</b>")
total = [df_citm, df_cotm, df_pitm, df_potm]
for x in total:
x.append(sum(x))
# Create the DataFrame
df = pd.DataFrame(
{
"Expiry": df_date,
"Calls ITM": df_citm,
"Calls OTM": df_cotm,
"Puts ITM": df_pitm,
"Puts OTM": df_potm,
}
)
formats = {
"Calls ITM": "{:,}",
"Calls OTM": "{:,}",
"Puts ITM": "{:,}",
"Puts OTM": "{:,}",
}
for col, value in formats.items():
df[col] = df[col].map(lambda x: value.format(x)) # pylint: disable=W0640
df.set_index("Expiry", inplace=True)
fig = imps.plot_df(
df,
fig_size=(600, (35 * len(df.index))),
col_width=[3, 2.5],
tbl_header=imps.PLT_TBL_HEADER,
tbl_cells=imps.PLT_TBL_CELLS,
font=imps.PLT_TBL_FONT,
row_fill_color=imps.PLT_TBL_ROW_COLORS,
paper_bgcolor="rgba(0, 0, 0, 0)",
)
fig.update_traces(
cells=dict(
align=["center", "right"],
font=dict(
color=["white"]
+ [imps.PLT_TBL_INCREASING] * 2
+ [imps.PLT_TBL_DECREASING] * 2
),
),
)
imagefile = imps.save_image("opt-itm.png", fig)
return {
"title": f"{ticker.upper()} Options: In The Money",
"imagefile": imagefile,
} | b2230ee5f8c520523f7ce844372a4f26d14fe53d | 3,653,403 |
def create_nx_suite(seed=0, rng=None):
"""
returns a dict of graphs generated by networkx for testing,
designed to be used in a pytest fixture
"""
if rng is None:
rng = np.random.RandomState(seed)
out_graphs = {}
for N in [1, 2, 4, 8, 16, 32, 64, 128]:
for dtype in [np.bool, np.int32, np.float32, np.complex64]:
basename = f"{N}_{str(dtype)[8:-2]}"
name = f"ladder_{basename}"
out_graphs[name] = [gen_ladder(N, dtype)]
SAMPLE_N = 5
# smp = [(4,.1),(4,.5),(4,.7),(7,.1),(7,.5),(16,.1),(16,.5),(32,.1),(100,.1)]
# for N, prob_edge in smp:
for N in [4,7,16,32,100]:
for prob_edge in [.1,.5,.7]:
dtype = np.bool
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
out_graphs[name].append(t)
dtype = np.int32
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
edge_weights = rng.randint(1, rng.randint(2, max(N//2, 3)),
size=rng.randint(1, N//2))
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
for e1, e2 in t.edges():
t[e1, e2] = rng.choice(edge_weights)
out_graphs[name].append(t)
dtype = np.float64
name = f"random_lobster_{prob_edge:.1f}_{str(dtype)[8:-2]}_{N}"
out_graphs[name] = []
for i in range(SAMPLE_N):
edge_weights = rng.rand(rng.randint(1, N//2)) + 0.5
ng = nx.generators.random_graphs.random_lobster(N,prob_edge,\
prob_edge,rng)
if ng.number_of_nodes() == 0:
continue
t = from_nx(ng,adj_type=dtype)
for e1, e2 in t.edges():
t[e1, e2] = rng.choice(edge_weights)
out_graphs[name].append(t)
return out_graphs | eb64688850d48b755dc526ed3d64876d04ba3914 | 3,653,404 |
def _nearest_neighbor_features_per_object_in_chunks(
reference_embeddings_flat, query_embeddings_flat, reference_labels_flat,
ref_obj_ids, k_nearest_neighbors, n_chunks):
"""Calculates the nearest neighbor features per object in chunks to save mem.
Uses chunking to bound the memory use.
Args:
reference_embeddings_flat: Tensor of shape [n, embedding_dim],
the embedding vectors for the reference frame.
query_embeddings_flat: Tensor of shape [m, embedding_dim], the embedding
vectors for the query frames.
reference_labels_flat: Tensor of shape [n], the class labels of the
reference frame.
ref_obj_ids: int tensor of unique object ids in the reference labels.
k_nearest_neighbors: Integer, the number of nearest neighbors to use.
n_chunks: Integer, the number of chunks to use to save memory
(set to 1 for no chunking).
Returns:
nn_features: A float32 tensor of nearest neighbor features of shape
[m, n_objects, feature_dim].
"""
chunk_size = tf.cast(tf.ceil(tf.cast(tf.shape(query_embeddings_flat)[0],
tf.float32) / n_chunks), tf.int32)
wrong_label_mask = tf.not_equal(reference_labels_flat,
ref_obj_ids[:, tf.newaxis])
all_features = []
for n in range(n_chunks):
if n_chunks == 1:
query_embeddings_flat_chunk = query_embeddings_flat
else:
chunk_start = n * chunk_size
chunk_end = (n + 1) * chunk_size
query_embeddings_flat_chunk = query_embeddings_flat[chunk_start:chunk_end]
# Use control dependencies to make sure that the chunks are not processed
# in parallel which would prevent any peak memory savings.
with tf.control_dependencies(all_features):
features = _nn_features_per_object_for_chunk(
reference_embeddings_flat, query_embeddings_flat_chunk,
wrong_label_mask, k_nearest_neighbors
)
all_features.append(features)
if n_chunks == 1:
nn_features = all_features[0]
else:
nn_features = tf.concat(all_features, axis=0)
return nn_features | e9b7af295ddfab56f70748e42fb7b06f6192a3ac | 3,653,405 |
import heapq
def heap_pop(heap):
"""
Wrapper around heapq's heappop method to support updating priorities of
items in the queue.
Main difference here is that we toss out any queue entries that have been
updated since insertion.
"""
while len(heap) > 0:
pri_board_tup = heapq.heappop(heap)
board = pri_board_tup[1]
if not board == None:
del ENTRY_FINDER[board]
return pri_board_tup
raise KeyError('Pop from empty queue :(') | c640fd178a399332fc10ccbb55085cb08d118865 | 3,653,406 |
def _variant_po_to_dict(tokens) -> CentralDogma:
"""Convert a PyParsing data dictionary to a central dogma abundance (i.e., Protein, RNA, miRNA, Gene).
:type tokens: ParseResult
"""
dsl = FUNC_TO_DSL.get(tokens[FUNCTION])
if dsl is None:
raise ValueError('invalid tokens: {}'.format(tokens))
concept = tokens[CONCEPT]
return dsl(
namespace=concept[NAMESPACE],
name=concept[NAME],
identifier=concept.get(IDENTIFIER),
xrefs=tokens.get(XREFS),
variants=[
_variant_to_dsl_helper(variant_tokens)
for variant_tokens in tokens[VARIANTS]
],
) | 28e989087a91accf793eaaada2e65a71ee145c32 | 3,653,407 |
def project(name, param):
"""a tilemill project description, including a basic countries-of-the-world layer."""
return {
"bounds": [-180, -85.05112877980659, 180, 85.05112877980659],
"center": [0, 0, 2],
"format": "png",
"interactivity": False,
"minzoom": 0,
"maxzoom": 22,
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"Stylesheet": ["style.mss"],
"Layer": [
{
"id": "countries",
"name": "countries",
"srs": "+proj=merc +a=6378137 +b=6378137 +lat_ts=0.0 +lon_0=0.0 +x_0=0.0 "
"+y_0=0.0 +k=1.0 +units=m +nadgrids=@null +wktext +no_defs +over",
"geometry": "polygon",
"Datasource": {
"file": "http://mapbox-geodata.s3.amazonaws.com/natural-earth-1.4.0/"
"cultural/10m-admin-0-countries.zip",
"type": "shape"
}
},
],
"scale": 1,
"metatile": 2,
"name": name,
"description": param['properties']['name'],
} | 9609c523cccc99168bbc0e7dbf10fe8624d399c2 | 3,653,408 |
def get_deepest():
"""Return tokens with largest liquidities.
Returns:
str: HTML-formatted message.
"""
url = config.URLS['deepest']
api_params = {'limit': 5,
'orderBy': 'usdLiquidity',
'direction': 'desc',
'key': POOLS_KEY
}
response = api_call(url, params=api_params)
formatted_response = ft.format_deepest(response['results'])
return formatted_response | c944f20f65dd68716b4f436b02ec5e373c04848f | 3,653,409 |
def _grompp_str(op_name, gro_name, checkpoint_file=None):
"""Helper function, returns grompp command string for operation."""
mdp_file = signac.get_project().fn('mdp_files/{op}.mdp'.format(op=op_name))
cmd = '{gmx} grompp -f {mdp_file} -c {gro_file} {checkpoint} -o {op}.tpr -p'.format(
gmx=gmx_exec, mdp_file=mdp_file, op=op_name, gro_file=gro_name,
checkpoint='' if checkpoint_file is None else ('-t ' + checkpoint_file))
return workspace_command(cmd) | 9201bd49fd09ce9faa268d8ea4d33482cea5d7ad | 3,653,410 |
import typing
import argparse
import sys
import os
import asyncio
import signal
def run(
master_cls: typing.Type[master.Master],
make_parser: typing.Callable[[options.Options], argparse.ArgumentParser],
arguments: typing.Sequence[str],
extra: typing.Callable[[typing.Any], dict] = None
) -> master.Master: # pragma: no cover
"""
extra: Extra argument processing callable which returns a dict of
options.
"""
debug.register_info_dumpers()
opts = options.Options()
master = master_cls(opts)
parser = make_parser(opts)
# To make migration from 2.x to 3.0 bearable.
if "-R" in sys.argv and sys.argv[sys.argv.index("-R") + 1].startswith("http"):
print("-R is used for specifying replacements.\n"
"To use mitmproxy in reverse mode please use --mode reverse:SPEC instead")
try:
args = parser.parse_args(arguments)
except SystemExit:
arg_check.check()
sys.exit(1)
try:
opts.set(*args.setoptions, defer=True)
optmanager.load_paths(
opts,
os.path.join(opts.confdir, OPTIONS_FILE_NAME),
)
pconf = process_options(parser, opts, args)
server: typing.Any = None
if pconf.options.server:
try:
server = proxy.server.ProxyServer(pconf)
except exceptions.ServerException as v:
print(str(v), file=sys.stderr)
sys.exit(1)
else:
server = proxy.server.DummyServer(pconf)
master.server = server
if args.options:
print(optmanager.dump_defaults(opts))
sys.exit(0)
if args.commands:
master.commands.dump()
sys.exit(0)
if extra:
opts.update(**extra(args))
loop = asyncio.get_event_loop()
for signame in ('SIGINT', 'SIGTERM'):
try:
loop.add_signal_handler(getattr(signal, signame), master.shutdown)
except NotImplementedError:
# Not supported on Windows
pass
# Make sure that we catch KeyboardInterrupts on Windows.
# https://stackoverflow.com/a/36925722/934719
if os.name == "nt":
async def wakeup():
while True:
await asyncio.sleep(0.2)
asyncio.ensure_future(wakeup())
master.run()
except exceptions.OptionsError as e:
print("%s: %s" % (sys.argv[0], e), file=sys.stderr)
sys.exit(1)
except (KeyboardInterrupt, RuntimeError):
pass
return master | d8e99b97df0cf69affe4fd8b757a40170dea8074 | 3,653,411 |
def get_role_with_name(role_name: str) -> Role:
"""Get role with given name."""
role = Role.query.filter(Role.name == role_name).one()
return role | e20858ef1bcbb54d2c1d09ba9d3a54bf15dfa658 | 3,653,412 |
def namespace_store_factory(
request, cld_mgr, mcg_obj_session, cloud_uls_factory_session, pvc_factory_session
):
"""
Create a NamespaceStore factory.
Calling this fixture lets the user create namespace stores.
Args:
request (object): Pytest built-in fixture
cld_mgr (CloudManager): Cloud Manager object containing all
connections to clouds
mcg_obj (MCG): MCG object containing data and utils
related to MCG
cloud_uls_factory: Factory for creation of underlying storage
Returns:
func: Factory method - allows the user to create namespace stores
"""
created_nss = []
cmdMap = {
"cli": cli_create_namespacestore,
"oc": oc_create_namespacestore,
}
def _create_nss(method, nss_dict):
"""
Tracks creation and cleanup of all the namespace stores that were created in the current scope
Args:
method (str): String for selecting method of namespace store creation (CLI/OC)
nss_dict (dict): Dictionary containing storage provider as key and a list of tuples
as value.
Namespace store dictionary examples - 'CloudName': [(amount, region), (amount, region)]
i.e. - 'aws': [(3, us-west-1),(2, eu-west-2)]
Returns:
list: A list of the NamespaceStore objects created by the factory in the current scope
"""
current_call_created_nss = []
for platform, nss_lst in nss_dict.items():
for nss_tup in nss_lst:
if platform.lower() == "nsfs":
uls_name = nss_tup[0] or create_unique_resource_name(
constants.PVC.lower(), platform
)
pvc_factory_session(
custom_data=template_pvc(uls_name, size=nss_tup[1])
)
else:
# Create the actual target bucket on the request service
uls_dict = cloud_uls_factory_session({platform: [(1, nss_tup[1])]})
uls_name = list(uls_dict[platform])[0]
nss_name = create_unique_resource_name(constants.MCG_NSS, platform)
# Create the actual namespace resource
cmdMap[method.lower()](
nss_name, platform, mcg_obj_session, uls_name, cld_mgr, nss_tup
)
nss_obj = NamespaceStore(
name=nss_name,
method=method.lower(),
mcg_obj=mcg_obj_session,
uls_name=uls_name,
)
created_nss.append(nss_obj)
current_call_created_nss.append(nss_obj)
nss_obj.verify_health()
return current_call_created_nss
def nss_cleanup():
for nss in created_nss:
nss.delete()
request.addfinalizer(nss_cleanup)
return _create_nss | cc2d090d8dc0f12d89331ada54e4054e117e544d | 3,653,413 |
def get_user(request, username):
"""
Gets a user's information.
return:
{
status: HTTP status,
name: string,
gender: string,
marital_status: string,
first_name: string
}
"""
data = get_user_info(username)
if data:
return Response({'data': data}, status=200)
else:
return Response(status=404) | 9820b441718629780ff72ab00776fc2d4c95a63f | 3,653,414 |
def find_changes(d_before, d_after):
"""
Returns a dictionary of changes in the format:
{
<system id>: {
<changed key>: <Change type>,
...
},
...
}
The changes should describe the differences between d_before and d_after.
"""
changes = dict()
for k in d_after:
if k not in d_before:
changes[k] = Change.Addition
elif type(d_before[k]) is dict and type(d_after[k]) is dict:
nested = find_changes(d_before[k], d_after[k])
if len(nested) > 0:
changes[k] = nested
elif d_before[k] != d_after[k]:
changes[k] = Change.Edit
# Apply removals
for k in d_before:
if k not in d_after:
changes[k] = Change.Removal
return changes | 02e5eea5ac1264c593d542a8f745a8d3571d5fac | 3,653,415 |
def check_vector_inbetween(v1, v2, point):
""" Checks if point lies inbetween two vectors v1, v2. Returns boolean. """
if (np.dot(np.cross(v1, point), np.cross(v1, v2))) >= 0 and (np.dot(np.cross(v2, point), np.cross(v2, v1))) >= 0:
return True
else:
return False | 6eeaa4a9e37ea345c3399c103dadbc45c306887c | 3,653,416 |
def accuracy(y_preds, y_test):
"""
Function to calculate the accuracy of algorithm
:param y_preds: predictions for test data
:param y_test: actual labels for test data
:return: accuracy in percentage
"""
return np.sum(np.where(y_preds == y_test, 1, 0)) * 100 / len(y_test) | f41522663ae9a35e976f4d848f14e42ef0993fd9 | 3,653,417 |
def get_all(factory='official', **kwargs):
"""Construct and return an list of Class `Event`.
hookを呼び出す.
Args:
factory: `Event` の取得用マネージャ 今のところ,京大公式HP用のみ.
EventFactoryMixin classを継承したクラスか 'official' に対応
date (:obj:`datetime`, optional): 欲しいイベントのdatetime.
`month` , `year` とどちらかを選択.両方指定した場合,こちらが優先される.
year (int, optional): イベントを取得する年.
両方指定した場合, `date` が優先される.
month (int, optional): イベントを取得する月.
両方指定した場合, `date` が優先される.
Returns:
generator of Events
"""
return kueventparser(factory=factory, method='get_all', **kwargs) | bc1fabe37fc8065ff5394259607caf32c6345b41 | 3,653,418 |
import docker
import json
import os
import time
from vent.helpers.meta import GpuUsage
def gpu_queue(options):
"""
Queued up containers waiting for GPU resources
"""
status = (False, None)
if (os.path.isfile('/root/.vent/vent.cfg') and os.path.isfile('/root/.vent/plugin_manifest.cfg')):
path_dir = '/root/.vent'
else:
path_dir = '/vent'
print('gpu queue', str(options))
print('gpu queue', str(GpuUsage(base_dir=path_dir+'/',
meta_dir=path_dir)))
options = json.loads(options)
configs = options['configs']
gpu_options = configs['gpu_options']
devices = []
options['auto_remove'] = True
# device specified, remove all other devices
if 'device' in gpu_options:
dev = '/dev/nvidia' + gpu_options['device'] + ':/dev/nvidia'
dev += gpu_options['device'] + ':rwm'
if 'devices' in configs:
d = list(configs['devices'])
for device in d:
if any(str.isdigit(str(char)) for char in device):
if dev == device:
devices.append(device)
else:
configs['devices'].remove(device)
else:
d = configs['devices']
for device in d:
if any(str.isdigit(str(char)) for char in device):
devices.append(device)
# check if devices is still an empty list
if not devices:
status = (False, 'no valid devices match the requested device')
print(str(status))
return status
mem_needed = 0
dedicated = False
# need a gpu to itself
if ('dedicated' in configs['gpu_options'] and
configs['gpu_options']['dedicated'] == 'yes'):
dedicated = True
if 'mem_mb' in configs['gpu_options']:
# TODO input error checking
mem_needed = int(configs['gpu_options']['mem_mb'])
print('mem_needed: ', mem_needed)
print('dedicated: ', dedicated)
device = None
while not device:
usage = GpuUsage(base_dir=path_dir+'/', meta_dir=path_dir)
if usage[0]:
usage = usage[1]
else:
return usage
print(usage)
# {"device": "0",
# "mem_mb": "1024",
# "dedicated": "yes",
# "enabled": "yes"}
for d in devices:
dev = str(d.split(':')[0].split('nvidia')[1])
print(dev)
# if the device is already dedicated, can't be used
dedicated_gpus = usage['vent_usage']['dedicated']
is_dedicated = False
for gpu in dedicated_gpus:
if dev in gpu:
is_dedicated = True
print('is_dedicated: ', is_dedicated)
if not is_dedicated:
ram_used = 0
if dev in usage['vent_usage']['mem_mb']:
ram_used = usage['vent_usage']['mem_mb'][dev]
# check for vent usage/processes running
if (dedicated and
dev not in usage['vent_usage']['mem_mb'] and
mem_needed <= usage[int(dev)]['global_memory'] and
not usage[int(dev)]['processes']):
device = dev
# check for ram constraints
elif mem_needed <= (usage[int(dev)]['global_memory'] - ram_used):
device = dev
# TODO make this sleep incremental up to a point, potentially kill
# after a set time configured from vent.cfg, outputting as it goes
time.sleep(1)
# lock jobs to a specific gpu (no shared GPUs for a single process) this is
# needed to calculate if memory requested (but not necessarily in use)
# would become oversubscribed
# store which device was mapped
options['labels']['vent.gpu.device'] = device
gpu_device = '/dev/nvidia' + device + ':/dev/nvidia' + device + ':rwm'
if 'devices' in configs:
d = configs['devices']
for dev in d:
if any(str.isdigit(str(char)) for char in dev):
if gpu_device != dev:
configs['devices'].remove(dev)
try:
d_client = docker.from_env()
del options['configs']
del configs['gpu_options']
params = options.copy()
params.update(configs)
container = d_client.containers.run(**params)
status = (True, None)
except Exception as e: # pragma: no cover
status = (False, str(e))
print(str(status))
return status | aedbff3959477d7f72a9e1d49d1d338cfade9c46 | 3,653,419 |
def nudupl(f):
"""Square(f) following Cohen, Alg. 5.4.8.
"""
L = int(((abs(f.discriminant))/4)**(1/4))
a, b, c = f[0], f[1], f[2]
# Step 1 Euclidean step
d1, u, v = extended_euclid_xgcd(b, a)
A = a//d1
B = b//d1
C = (-c*u) % A
C1 = A-C
if C1 < C:
C = -C1
# Step 2 Partial reduction
d, v, v2, v3, z = parteucl(A, C, L)
# Step 3 Special case
if z==0:
g = (B*v3+c)//d
a2 = d**2
c2 = v3**2
b2 = b + (d+v3)**2 - a2 - c2
c2 = c2 + g*d1
else:
# Step 4 Final computations
e = (c*v + B*d)//A
g = (e*v2 - B)//v
b2 = e*v2 + v*g
if d1>1:
b2 = d1*b2
v = d1*v
v2 = d1*v2
a2 = d**2
c2 = v3**2
b2 = b2 + (d+v3)**2 - a2 - c2
a2 = a2 + e*v
c2 = c2 + g*v2
f2 = type(f)((a2, b2, c2))
return f2 | 7f5a16c0fc2611a5f9dc0ebde00e3587c188f944 | 3,653,420 |
def remove_schema(name):
"""Removes a configuration schema from the database"""
schema = controller.ConfigurationSchema()
schema.remove(name)
return 0 | e45b415ea6eb57402790b04bbf1fa63749242a77 | 3,653,421 |
def get_unassigned_independent_hyperparameters(outputs):
"""Going backward from the outputs provided, gets all the independent
hyperparameters that are not set yet.
Setting an hyperparameter may lead to the creation of additional hyperparameters,
which will be most likely not set. Such behavior happens when dealing with,
for example, hyperparameters associated with substitutition
modules such as :func:`deep_architect.modules.siso_optional`,
:func:`deep_architect.modules.siso_or`, and :func:`deep_architect.modules.siso_repeat`.
Args:
outputs (dict[str, deep_architect.core.Output]): Dictionary of named
outputs to start the traversal at.
Returns:
OrderedSet[deep_architect.core.Hyperparameter]:
Ordered set of hyperparameters that are currently present in the
graph and not have been assigned a value yet.
"""
assert not is_specified(outputs)
unassigned_indep_hs = OrderedSet()
for h in get_all_hyperparameters(outputs):
if not isinstance(
h, DependentHyperparameter) and not h.has_value_assigned():
unassigned_indep_hs.add(h)
return unassigned_indep_hs | 6f28f3fcbaf4a875c3a42cdb4fc9ad715c99a093 | 3,653,422 |
def get_theta_benchmark_matrix(theta_type, theta_value, benchmarks, morpher=None):
"""Calculates vector A such that dsigma(theta) = A * dsigma_benchmarks"""
if theta_type == "benchmark":
n_benchmarks = len(benchmarks)
index = list(benchmarks).index(theta_value)
theta_matrix = np.zeros(n_benchmarks)
theta_matrix[index] = 1.0
elif theta_type == "morphing":
theta_matrix = morpher.calculate_morphing_weights(theta_value)
else:
raise ValueError("Unknown theta {}".format(theta_type))
return theta_matrix | aa84006b7a69faa8803ecea20ffd24e35f178185 | 3,653,423 |
import os
import re
import importlib
def get_plugins(plugin_dir=None):
"""Load plugins from PLUGIN_DIR and return a dict with the plugin name
hashed to the imported plugin.
PLUGIN_DIR is the name of the dir from which to load plugins. If
it is None, use the plugin dir in the dir that holds this func.
We load plugins and run them in asciibetical order. We ignore
plugins that begin with a character other than a digit or a
letter.
PLUGIN API:
run_p(text, meta, opt): predicate returns True if this
plugin thinks it should run in the pipeline.
run(text, meta, opt): runs the plugin, returns text, meta
after_p(pdf_fname, meta, opt): predicate returns True if this plugin
thinks it should run after the pdf is produced.
after(pdf_fname, meta, opt): runs the plugin, returns meta. May change the pdf
"""
if not plugin_dir:
plugin_dir = os.path.join(
os.path.dirname(
os.path.abspath(__file__)), "plugins")
plugins = {}
pat = {
"enabled": r"^[0-9a-zA-Z]",
"prefix": r"^[0-9]+_",
}
pat = {k: re.compile(v) for k, v in pat.items()}
for fname in sorted(os.listdir(plugin_dir)):
if (not fname.endswith(".py")
or fname.startswith('.')
or '#' in fname
or not pat['enabled'].match(fname)):
continue
spec = importlib.util.spec_from_file_location(
fname, os.path.join(plugin_dir, fname))
fname = pat["prefix"].sub("", fname)
plugins[fname] = importlib.util.module_from_spec(spec)
spec.loader.exec_module(plugins[fname])
return plugins | fd9861233b08141a522628a59081ae9f255dc958 | 3,653,424 |
def reorder_points(point_list):
"""
Reorder points of quadrangle.
(top-left, top-right, bottom right, bottom left).
:param point_list: List of point. Point is (x, y).
:return: Reorder points.
"""
# Find the first point which x is minimum.
ordered_point_list = sorted(point_list, key=lambda x: (x[0], x[1]))
first_point = ordered_point_list[0]
# Find the third point. The slope is middle.
slope_list = [[cal_slope(first_point, p), p] for p in ordered_point_list[1:]]
ordered_slope_point_list = sorted(slope_list, key=lambda x: x[0])
first_third_slope, third_point = ordered_slope_point_list[1]
# Find the second point which is above the line between the first point and the third point.
# All that's left is the fourth point.
if above_line(ordered_slope_point_list[0][1], third_point, first_third_slope):
second_point = ordered_slope_point_list[0][1]
fourth_point = ordered_slope_point_list[2][1]
reverse_flag = False
else:
second_point = ordered_slope_point_list[2][1]
fourth_point = ordered_slope_point_list[0][1]
reverse_flag = True
# Find the top left point.
second_fourth_slope = cal_slope(second_point, fourth_point)
if first_third_slope < second_fourth_slope:
if reverse_flag:
reorder_point_list = [fourth_point, first_point, second_point, third_point]
else:
reorder_point_list = [second_point, third_point, fourth_point, first_point]
else:
reorder_point_list = [first_point, second_point, third_point, fourth_point]
return reorder_point_list | 8ef3466616ecf003750cce7e1125d913d258cf15 | 3,653,425 |
import torch
def ppg_acoustics_collate(batch):
"""Zero-pad the PPG and acoustic sequences in a mini-batch.
Also creates the stop token mini-batch.
Args:
batch: An array with B elements, each is a tuple (PPG, acoustic).
Consider this is the return value of [val for val in dataset], where
dataset is an instance of PPGSpeechLoader.
Returns:
ppg_padded: A (batch_size, feature_dim_1, num_frames_1) tensor.
input_lengths: A batch_size array, each containing the actual length
of the input sequence.
acoustic_padded: A (batch_size, feature_dim_2, num_frames_2) tensor.
gate_padded: A (batch_size, num_frames_2) tensor. If "1" means reaching
stop token. Currently assign "1" at the last frame and the padding.
output_lengths: A batch_size array, each containing the actual length
of the output sequence.
"""
# Right zero-pad all PPG sequences to max input length.
# x is (PPG, acoustic), x[0] is PPG, which is an (L(varied), D) tensor.
input_lengths, ids_sorted_decreasing = torch.sort(
torch.LongTensor([x[0].shape[0] for x in batch]), dim=0,
descending=True)
max_input_len = input_lengths[0]
ppg_dim = batch[0][0].shape[1]
ppg_padded = torch.FloatTensor(len(batch), max_input_len, ppg_dim)
ppg_padded.zero_()
for i in range(len(ids_sorted_decreasing)):
curr_ppg = batch[ids_sorted_decreasing[i]][0]
ppg_padded[i, :curr_ppg.shape[0], :] = curr_ppg
# Right zero-pad acoustic features.
feat_dim = batch[0][1].shape[1]
max_target_len = max([x[1].shape[0] for x in batch])
# Create acoustic padded and gate padded
acoustic_padded = torch.FloatTensor(len(batch), max_target_len, feat_dim)
acoustic_padded.zero_()
gate_padded = torch.FloatTensor(len(batch), max_target_len)
gate_padded.zero_()
output_lengths = torch.LongTensor(len(batch))
for i in range(len(ids_sorted_decreasing)):
curr_acoustic = batch[ids_sorted_decreasing[i]][1]
acoustic_padded[i, :curr_acoustic.shape[0], :] = curr_acoustic
gate_padded[i, curr_acoustic.shape[0] - 1:] = 1
output_lengths[i] = curr_acoustic.shape[0]
ppg_padded = ppg_padded.transpose(1, 2)
acoustic_padded = acoustic_padded.transpose(1, 2)
return ppg_padded, input_lengths, acoustic_padded, gate_padded,\
output_lengths | 1357a8a9fa901a9be4f79ea13fd5ae7c3810bbeb | 3,653,426 |
def problem004():
"""
Find the largest palindrome made from the product of two 3-digit numbers.
"""
return largest_palindrome_from_product_of_two_n_digit_numbers(3) | 516c98d75ac2b4e286e58ea940d49c1d2bcd2dc7 | 3,653,427 |
import torch
def residual_l1_max(reconstruction: Tensor, original: Tensor) -> Tensor:
"""Construct l1 difference between original and reconstruction.
Note: Only positive values in the residual are considered, i.e. values below zero are clamped.
That means only cases where bright pixels which are brighter in the input (likely lesions) are kept."""
residual = original - reconstruction
return torch.where(residual > 0.0, residual, torch.zeros_like(residual)) | 8649b1947845c0e3f9e57c0ec2e68d7bed94be5d | 3,653,428 |
def build_url(path):
"""
Construct an absolute url by appending a path to a domain.
"""
return 'http://%s%s' % (DOMAIN, path) | fa9df465607082993571ca71c576d7b250f6cc76 | 3,653,429 |
def get_registration_url(request, event_id):
"""
Compute the absolute URL to create a booking on a given event
@param request: An HttpRequest used to discover the FQDN and path
@param event_id: the ID of the event to register to
"""
registration_url_rel = reverse(booking_create, kwargs={"event_id": event_id})
return request.build_absolute_uri(registration_url_rel) | d8b344c6574a120d365a718934f5dc0e78173a6f | 3,653,430 |
def create_no_args_decorator(decorator_function,
function_for_metadata=None,
):
"""
Utility method to create a decorator that has no arguments at all and is implemented by `decorator_function`, in
implementation-first mode or usage-first mode.
The created decorator is a function with var-args. When called it checks the length
(0=called with parenthesis, 1=called without, 2=error).
Note: we prefer to use this var-arg signature rather than a "(_=None)" signature, because it is more readable for
the decorator's help.
:param decorator_function:
:param function_for_metadata: an alternate function to use for the documentation and module metadata of the
generated function
:return:
"""
if function_for_metadata is None:
function_for_metadata = decorator_function
@with_signature(None,
func_name=function_for_metadata.__name__,
doc=function_for_metadata.__doc__,
module_name=function_for_metadata.__module__)
def new_decorator(*_):
"""
Code for your decorator, generated by decopatch to handle the case when it is called without parenthesis
"""
if len(_) == 0:
# called with no args BUT parenthesis: @foo_decorator().
return with_parenthesis_usage(decorator_function, *_)
elif len(_) == 1:
first_arg_value = _[0]
if can_arg_be_a_decorator_target(first_arg_value):
# called with no arg NOR parenthesis: @foo_decorator
return no_parenthesis_usage(decorator_function, first_arg_value)
# more than 1 argument or non-decorable argument: not possible
raise TypeError("Decorator function '%s' does not accept any argument."
"" % decorator_function.__name__)
return new_decorator | 7067974d7bd15c238968f78aa0057086458940bf | 3,653,431 |
import os
def load_electric_devices_segmentation():
"""Load the Electric Devices segmentation problem and returns X.
We group TS of the UCR Electric Devices dataset by class label and concatenate
all TS to create segments with repeating temporal patterns and
characteristics. The location at which different classes were
concatenated are marked as change points.
We resample the resulting TS to control the TS resolution.
The window sizes for these datasets are hand-selected to capture
temporal patterns but are approximate and limited to the values
[10,20,50,100] to avoid over-fitting.
-----------
Returns
-------
X : pd.Series
Single time series for segmentation
period_length : int
The annotated period length by a human expert
change_points : numpy array
The change points annotated within the dataset
-----------
"""
dir = "segmentation"
name = "ElectricDevices"
fname = name + ".csv"
period_length = int(10)
change_points = np.int32([1090, 4436, 5712, 7923])
path = os.path.join(MODULE, DIRNAME, dir, fname)
ts = pd.read_csv(path, index_col=0, header=None, squeeze=True)
return ts, period_length, change_points | 06533351c0a95e6de041389766dc8dbd1d0c70fd | 3,653,432 |
import torch
def compute_batch_jacobian(input, output, retain_graph=False):
"""
Compute the Jacobian matrix of a batch of outputs with respect to
some input (normally, the activations of a hidden layer).
Returned Jacobian has dimensions Batch x SizeOutput x SizeInput
Args:
input (list or torch.Tensor): Tensor or sequence of tensors
with the parameters to which the Jacobian should be
computed. Important: the requires_grad attribute of input needs to
be True while computing output in the forward pass.
output (torch.Tensor): Tensor with the values of which the Jacobian is
computed
Returns (torch.Tensor): 3D tensor containing the Jacobian of output with
respect to input: batch_size x output_size x input_size.
"""
batch_jacobian = torch.Tensor(output.shape[0], output.shape[1], input.shape[1])
assert output.shape[0] == input.shape[0], \
"Batch size needs to be the same for both input and output"
for batch_idx in range(output.shape[0]):
for i, output_elem in enumerate(output[batch_idx]):
if i < output.shape[1]: rg = True
else: rg = retain_graph
gradients = torch.autograd.grad(output_elem, input, retain_graph=rg)[0][batch_idx].detach()
batch_jacobian[batch_idx, i, :] = gradients
return batch_jacobian | c18f596a3500f2f82e2b4716e6f9892a01fb31c7 | 3,653,433 |
def is_associative(value):
"""Checks if `value` is an associative object meaning that it can be
accessed via an index or key
Args:
value (mixed): Value to check.
Returns:
bool: Whether `value` is associative.
Example:
>>> is_associative([])
True
>>> is_associative({})
True
>>> is_associative(1)
False
>>> is_associative(True)
False
.. versionadded:: 2.0.0
"""
return hasattr(value, '__getitem__') | 5d2a9e0e69ad793a98657dc13b26f79900f29294 | 3,653,434 |
def join_audio(audio1, audio2):
"""
>>> join_audio(([1], [4]), ([2, 3], [5, 6]))
([1, 2, 3], [4, 5, 6])
"""
(left1, right1) = audio1
(left2, right2) = audio2
left = left1 + left2
right = right1 + right2
audio = (left, right)
return audio | 23348b746469d362fd66371d61142b4227814ff3 | 3,653,435 |
def csi_from_sr_and_pod(success_ratio_array, pod_array):
"""Computes CSI (critical success index) from success ratio and POD.
POD = probability of detection
:param success_ratio_array: np array (any shape) of success ratios.
:param pod_array: np array (same shape) of POD values.
:return: csi_array: np array (same shape) of CSI values.
"""
return (success_ratio_array ** -1 + pod_array ** -1 - 1.) ** -1 | 84952fe6f7c8bd780c64c53183342ab0d8f3f90f | 3,653,436 |
import sys
def portrait_plot(
data,
xaxis_labels,
yaxis_labels,
fig=None,
ax=None,
annotate=False,
annotate_data=None,
annotate_fontsize=15,
annotate_format="{x:.2f}",
figsize=(12, 10),
vrange=None,
xaxis_fontsize=15,
yaxis_fontsize=15,
cmap="RdBu_r",
cmap_bounds=None,
cbar_label=None,
cbar_label_fontsize=15,
cbar_tick_fontsize=12,
cbar_kw={},
colorbar_off=False,
missing_color="grey",
invert_yaxis=True,
box_as_square=False,
legend_on=False,
legend_labels=None,
legend_box_xy=None,
legend_box_size=None,
legend_lw=1,
legend_fontsize=14,
logo_rect=None,
logo_off=False,
debug=False,
):
"""
Parameters
----------
- `data`: 2d numpy array, a list of 2d numpy arrays, or a 3d numpy array (i.e. stacked 2d numpy arrays)
- `xaxis_labels`: list of strings, labels for xaixs. Number of list element must consistent to x-axis,
or 0 (empty list) to turn off xaxis tick labels
- `yaxis_labels`: list of strings, labels for yaxis. Number of list element must consistent to y-axis,
or 0 (empty list) to turn off yaxis tick labels
- `fig`: `matplotlib.figure` instance to which the portrait plot is plotted.
If not provided, use current axes or create a new one. Optional.
- `ax`: `matplotlib.axes.Axes` instance to which the portrait plot is plotted.
If not provided, use current axes or create a new one. Optional.
- `annotate`: bool, default=False, add annotating text if true,
but work only for heatmap style map (i.e., no triangles)
- `annotate_data`: 2d numpy array, default=None. If None, the image's data is used. Optional.
- `annotate_fontsize`: number (int/float), default=15. Font size for annotation
- `annotate_format`: format for annotate value, default="{x:.2f}"
- `figsize`: tuple of two numbers (width, height), default=(12, 10), figure size in inches
- `vrange`: tuple of two numbers, range of value for colorbar. Optional.
- `xaxis_fontsize`: number, default=15, font size for xaxis tick labels
- `yaxis_fontsize`: number, default=15, font size for yaxis tick labels
- `cmap`: string, default="RdBu_r", name of matplotlib colormap
- `cmap_bounds`: list of numbers. If given, discrete colors are applied. Optional.
- `cbar_label`: string, default=None, label for colorbar
- `cbar_label_fontsize`: number, default=15, font size for colorbar labels
- `cbar_tick_fontsize`: number, default=12, font size for colorbar tick labels
- `cbar_kw`: A dictionary with arguments to `matplotlib.Figure.colorbar`. Optional.
- `colorbar_off`: Trun off colorbar if True. Optional.
- `missing_color`: color, default="grey", `matplotlib.axes.Axes.set_facecolor` parameter
- `invert_yaxis`: bool, default=True, place y=0 at top on the plot
- `box_as_square`: bool, default=False, make each box as square
- `legend_on`: bool, default=False, show legend (only for 2 or 4 triangles portrait plot)
- `legend_labels`: list of strings, legend labels for triangls
- `legend_box_xy`: tuple of numbers, position of legend box's upper-left corner
(lower-left if `invert_yaxis=False`), in `axes` coordinate
- `legend_box_size`: number, size of legend box
- `legend_lw`: number, line width of legend, default=1
- `legend_fontsize`: number, font size for legend, default=14
- `logo_rect`: sequence of float. The dimensions [left, bottom, width, height] of the the PMP logo.
All quantities are in fractions of figure width and height. Optional
- `logo_off`: bool, default=False, turn off PMP logo
- `debug`: bool, default=False, if true print more message when running that help debugging
Return
------
- `fig`: matplotlib component for figure
- `ax`: matplotlib component for axis
- `cbar`: matplotlib component for colorbar (not returned if colorbar_off=True)
Author: Jiwoo Lee @ LLNL (2021. 7)
"""
# ----------------
# Prepare plotting
# ----------------
data, num_divide = prepare_data(data, xaxis_labels, yaxis_labels, debug)
if num_divide not in [1, 2, 4]:
sys.exit("Error: Number of (stacked) array is not 1, 2, or 4.")
if annotate:
if annotate_data is None:
annotate_data = data
num_divide_annotate = num_divide
else:
annotate_data, num_divide_annotate = prepare_data(
annotate_data, xaxis_labels, yaxis_labels, debug
)
if num_divide_annotate != num_divide:
sys.exit("Error: annotate_data does not have same size as data")
# ----------------
# Ready to plot!!
# ----------------
if fig is None and ax is None:
fig, ax = plt.subplots(figsize=figsize)
ax.set_facecolor(missing_color)
if vrange is None:
vmin = np.nanmin(data)
vmax = np.nanmax(data)
else:
vmin = min(vrange)
vmax = max(vrange)
# Normalize colorbar
if cmap_bounds is None:
norm = matplotlib.colors.Normalize(vmin=vmin, vmax=vmax)
else:
cmap = plt.get_cmap(cmap)
norm = matplotlib.colors.BoundaryNorm(cmap_bounds, cmap.N, **cbar_kw)
# [1] Heatmap-style portrait plot (no triangles)
if num_divide == 1:
ax, im = heatmap(
data,
yaxis_labels,
xaxis_labels,
ax=ax,
invert_yaxis=invert_yaxis,
cmap=cmap,
edgecolors="k",
linewidth=0.5,
norm=norm,
)
if annotate:
if annotate_data is not None:
if annotate_data.shape != data.shape:
sys.exit("Error: annotate_data has different size than data")
else:
annotate_data = data
annotate_heatmap(
im,
ax=ax,
data=data,
annotate_data=annotate_data,
valfmt=annotate_format,
threshold=(2, -2),
fontsize=annotate_fontsize,
)
# [2] Two triangle portrait plot
elif num_divide == 2:
# data order is upper, lower
upper = data[0]
lower = data[1]
ax, im = triamatrix_wrap_up(
upper,
lower,
ax,
xaxis_labels=xaxis_labels,
yaxis_labels=yaxis_labels,
cmap=cmap,
invert_yaxis=invert_yaxis,
norm=norm,
)
# [4] Four triangle portrait plot
elif num_divide == 4:
# data order is clockwise from top: top, right, bottom, left
top = data[0]
right = data[1]
bottom = data[2]
left = data[3]
ax, im = quatromatrix(
top,
right,
bottom,
left,
ax=ax,
tripcolorkw={
"cmap": cmap,
"norm": norm,
"edgecolors": "k",
"linewidth": 0.5,
},
xaxis_labels=xaxis_labels,
yaxis_labels=yaxis_labels,
invert_yaxis=invert_yaxis,
)
# Let the horizontal axes labeling appear on top.
ax.tick_params(top=True, bottom=False, labeltop=True, labelbottom=False)
# Rotate the tick labels and set their alignment.
plt.setp(
ax.get_xticklabels(),
fontsize=xaxis_fontsize,
rotation=-30,
ha="right",
rotation_mode="anchor",
)
# Set font size for yaxis tick labels
plt.setp(ax.get_yticklabels(), fontsize=yaxis_fontsize)
# Legend
if legend_on:
if legend_labels is None:
sys.exit("Error: legend_labels was not provided.")
else:
add_legend(
num_divide,
ax,
legend_box_xy,
legend_box_size,
labels=legend_labels,
lw=legend_lw,
fontsize=legend_fontsize,
)
if box_as_square:
ax.set_aspect("equal")
if not logo_off:
if logo_rect is None:
logo_rect = [0.9, 0.15, 0.15, 0.15]
fig, ax = add_logo(fig, ax, logo_rect)
if not colorbar_off:
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
# Label for colorbar
if cbar_label is not None:
cbar.ax.set_ylabel(
cbar_label, rotation=-90, va="bottom", fontsize=cbar_label_fontsize
)
cbar.ax.tick_params(labelsize=cbar_tick_fontsize)
return fig, ax, cbar
else:
return fig, ax | 293897dd09644969ae00f61000b603a5fe2d06ae | 3,653,437 |
def compute_secondary_observables(data):
"""Computes secondary observables and extends matrix of observables.
Argument
--------
data -- structured array
must contains following fields: length, width, fluo, area, time
Returns
-------
out -- structured array
new fields are added (check `out.dtype.names`)
"""
ell, w, fluo, area, time = map(np.array,
zip(*data[['length',
'width',
'fluo',
'area',
'time']])
)
if len(time) > 1:
delta_t = time[1]-time[0]
age = (time - time[0] + delta_t/2.)/(time[-1] - time[0] + delta_t)
else:
age = np.nan
volume = spherocylinder_volume(ell, w)
concentration = fluo/volume
density = fluo/area
ALratio = area/ell
out = append_fields(data,
['volume',
'concentration',
'density',
'ALratio',
'age'],
[volume,
concentration,
density,
ALratio,
age],
usemask=False, fill_value=np.nan)
return out | 7141d16a579e4b629e25fee3a33c9a844a08e48f | 3,653,438 |
def get_account_number(arn):
"""
Extract the account number from an arn.
:param arn: IAM SSL arn
:return: account number associated with ARN
"""
return arn.split(":")[4] | 3d0fe552691ae98cf0dc70bc2055297f01a5d800 | 3,653,439 |
def get_hashtags(tweet):
"""return hashtags from a given tweet
Args:
tweet (object): an object representing a tweet
Returns:
list: list of hastags in a tweet
"""
entities = tweet.get('entities', {})
hashtags = entities.get('hashtags', [])
return [get_text(tag) for tag in hashtags if get_text(
tag) not in ['rdc', 'drc', 'rdcongo', 'drcongo']] | ef222d64294c62d27e86a4c8520bb197701ed1af | 3,653,440 |
import logging
import asyncio
from typing import cast
from typing import Dict
from typing import List
async def get_organization_catalogs(filter: FilterEnum) -> OrganizationCatalogList:
"""Return all organization catalogs."""
logging.debug("Fetching all catalogs")
async with ClientSession() as session:
(
organizations,
datasets,
dataservices,
concepts,
informationmodels,
) = await asyncio.gather(
asyncio.ensure_future(fetch_all_organizations(session)),
asyncio.ensure_future(
query_all_datasets_ordered_by_publisher(filter, session)
),
asyncio.ensure_future(
query_all_dataservices_ordered_by_publisher(filter, session)
),
asyncio.ensure_future(
query_all_concepts_ordered_by_publisher(filter, session)
),
asyncio.ensure_future(
query_all_informationmodels_ordered_by_publisher(filter, session)
),
return_exceptions=True,
)
if isinstance(organizations, BaseException):
logging.warning("Unable to fetch all organizations")
organizations = {}
if isinstance(datasets, BaseException):
logging.warning("Unable to fetch datasets")
datasets = []
if isinstance(dataservices, BaseException):
logging.warning("Unable to fetch dataservices")
dataservices = []
if isinstance(concepts, BaseException):
logging.warning("Unable to fetch concepts")
concepts = []
if isinstance(informationmodels, BaseException):
logging.warning("Unable to fetch informationmodels")
informationmodels = []
return OrganizationCatalogList(
organizations=map_org_summaries(
organizations=cast(Dict, organizations),
datasets=cast(List, datasets),
dataservices=cast(List, dataservices),
concepts=cast(List, concepts),
informationmodels=cast(List, informationmodels),
)
) | 30245f4e20156b1b5e580e72ec8d5c0f926fcb2b | 3,653,441 |
def get_appliance_ospf_neighbors_state(
self,
ne_id: str,
) -> dict:
"""Get appliance OSPF neighbors state
.. list-table::
:header-rows: 1
* - Swagger Section
- Method
- Endpoint
* - ospf
- GET
- /ospf/state/neighbors/{neId}
:param ne_id: Appliance id in the format of integer.NE e.g. ``3.NE``
:type ne_id: str
:return: Returns dictionary of OSPF neighbors state
:rtype: dict
"""
return self._get("/ospf/state/interfaces/{}".format(ne_id)) | 25a985ccf8b00ee3f27ea43d2a8371eef2443963 | 3,653,442 |
import os
def filter_toolchain_files(dirname, files):
"""Callback for shutil.copytree. Return lists of files to skip."""
split = dirname.split(os.path.sep)
for ign in IGNORE_LIST:
if ign in split:
print('Ignoring dir %s' % dirname)
return files
return [] | 1b8f4a9ca0b0828cb76a2b5de27f0b69715cf7e0 | 3,653,443 |
def info(obj):
"""Return info on shape and dtype of a numpy array or TensorFlow tensor."""
if obj is None:
return 'None.'
elif isinstance(obj, list):
if obj:
return 'List of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty list.'
elif isinstance(obj, tuple):
if obj:
return 'Tuple of %d... %s' % (len(obj), info(obj[0]))
else:
return 'Empty tuple.'
else:
if is_a_numpy_array(obj):
return 'Array with shape: %s, dtype: %s' % (obj.shape, obj.dtype)
else:
return str(obj) | 64ffab112b0f0397541f8661a861a958c8ccf26e | 3,653,444 |
def first_index_k_zeros_left(qstr, k, P):
"""
For a binary string qstr, return the first index of q with k (mod P) zeros to the left.
Return: index in [0, qstr.length]
"""
num_zeros_left = 0
for j in range(qstr.length+1):
if (num_zeros_left - k) % P == 0:
return j
if j == qstr.length:
raise Exception("No valid position found")
if qstr[j] == 0:
num_zeros_left += 1 | 62e505290fb32b43860deae3477dec718028e7af | 3,653,445 |
def transform_points(points, transf_matrix):
"""
Transform (3,N) or (4,N) points using transformation matrix.
"""
if points.shape[0] not in [3, 4]:
raise Exception("Points input should be (3,N) or (4,N) shape, received {}".format(points.shape))
return transf_matrix.dot(np.vstack((points[:3, :], np.ones(points.shape[1]))))[:3, :] | f478dfdfe41c694ada251deca33820336001d61e | 3,653,446 |
def get_lat_long(zip):
"""
This function takes a zip code and looks up the latitude and longitude using
the uszipcode package. Documentation: https://pypi.python.org/pypi/uszipcode
"""
search = ZipcodeSearchEngine()
zip_data = search.by_zipcode(zip)
lat = zip_data['Latitude']
long = zip_data['Longitude']
return lat, long | 4fa8dc583bba9a6068db58ab86c2cab5f310edc4 | 3,653,447 |
def propose_perturbation_requests(current_input, task_idx, perturbations):
"""Wraps requests for perturbations of one task in a EvaluationRequest PB.
Generates one request for each perturbation, given by adding the perturbation
to current_input.
Args:
current_input: the current policy weights
task_idx: The index of the task to evaluate.
perturbations: A list of perturbations.
Returns:
A list of requests, one for each perturbation.
"""
requests = []
for p_idx, p in enumerate(perturbations):
perturbed_input = current_input + p
requests.append(
first_order_pb2.TaskEvaluationRequest(
request_task_idx=task_idx,
input_idx=p_idx,
eval_order=TASK_VALUE_EVAL_ORDER,
current_input=perturbed_input.tolist()))
return requests | 279da36eb633005c8f8ee79e66b71b3bdf8783f3 | 3,653,448 |
import google
def id_token_call_credentials(credentials):
"""Constructs `grpc.CallCredentials` using
`google.auth.Credentials.id_token`.
Args:
credentials (google.auth.credentials.Credentials): The credentials to use.
Returns:
grpc.CallCredentials: The call credentials.
"""
request = google.auth.transport.requests.Request()
return grpc.metadata_call_credentials(
IdTokenAuthMetadataPlugin(credentials, request)
) | 433bb494d9f8de529a891f529a42f89af0b5ef77 | 3,653,449 |
import time
def test_analyze(request,hash,db_name):
"""
Get features of a sequence, using the sequence's sha-1 hash as the
identifier.
"""
db = blat.models.Feature_Database.objects.get(name=db_name)
sequence = blat.models.Sequence.objects.get(db=db,hash=hash)
ts = int(time.mktime(sequence.modified.timetuple()))
return render_to_response(
'test/analyze.html', { "hash" : hash, "mtime" : ts },
context_instance=RequestContext(request)
) | 173ebb356167558cb64a35265caa39e828a43bae | 3,653,450 |
from typing import List
def _collect_scaling_groups(owner: str) -> List:
"""Collect autoscaling groups that contain key `ES_role` and belong to the specified owner"""
client = boto3.client("autoscaling")
print("Collecting scaling groups")
resp = client.describe_auto_scaling_groups()
assert "NextToken" not in resp, "did not program to handle pagination"
groups = resp['AutoScalingGroups']
result = []
for group in groups:
if _get_tag_val(group['Tags'], 'Owner') == owner and \
any([tag['Key'] == ES_ROLE_KEY for tag in group['Tags']]):
result.append(group)
return result | f1f75e6158450aaef834a910f8c36bb8812b1ede | 3,653,451 |
def cross_entropy_loss(logits, labels, label_smoothing=0., dtype=jnp.float32):
"""Compute cross entropy for logits and labels w/ label smoothing
Args:
logits: [batch, length, num_classes] float array.
labels: categorical labels [batch, length] int array.
label_smoothing: label smoothing constant, used to determine the on and off values.
dtype: dtype to perform loss calcs in, including log_softmax
"""
num_classes = logits.shape[-1]
labels = jax.nn.one_hot(labels, num_classes, dtype=dtype)
if label_smoothing > 0:
labels = labels * (1 - label_smoothing) + label_smoothing / num_classes
logp = jax.nn.log_softmax(logits.astype(dtype))
return -jnp.mean(jnp.sum(logp * labels, axis=-1)) | 7b6ce3145bc85433e54cef0ac85570eeb0fe7230 | 3,653,452 |
import torch
def set_optimizer(name, model, learning_rate):
"""
Specify which optimizer to use during training.
Initialize a torch.optim optimizer for the given model based on the specified name and learning rate.
Parameters
----------
name : string or None, default = 'adam'
The name of the torch.optim optimizer to be used. The following
strings are accepted as arguments: 'adagrad', 'adam', 'adamax', 'adamw', 'rmsprop', or 'sgd'
model : utils.models.EncoderDecoder
The model which is to be optimized
learning_rate : float or None
The learning rate to be used by the optimizer. If set to None, the default value as defined in
torch.optim is used
Returns
-------
torch.optim optimizer class
A torch.optim optimizer that implements one of the following algorithms:
Adagrad, Adam, Adamax, AdamW, RMSprop, or SGD (stochastic gradient descent)
SGD is set to use a momentum of 0.5.
"""
if name == "adam":
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
if name == "sgd":
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.5)
if name == "adamw":
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
if name == "adagrad":
optimizer = torch.optim.Adagrad(model.parameters(), lr=learning_rate)
if name == "adamax":
optimizer = torch.optim.Adamax(model.parameters(), lr=learning_rate)
if name == "rmsprop":
optimizer = torch.optim.RMSprop(model.parameters(), lr=learning_rate)
return optimizer | 5c1a5e836176b90506ca6344c01ce6828b43d917 | 3,653,453 |
import httplib
import urllib
def get_http_url(server_path, get_path):
"""
Вариант с использованием httplib напрямую; ничем не лучше urllib2
server_path = "example.com"
get_path = "/some_path"
"""
# urllib - более высокого уровня библиотека, которая в случае http использует
# httplib;
# используем httplib ради лучшего детектирования ошибок
direct_http = 1
if direct_http:
conn = httplib.HTTPConnection(server_path)
try:
conn.request("GET", get_path)
except:
raise RuntimeError("Cant connect to: " + server_path)
response = conn.getresponse()
if response.reason != 'OK':
raise RuntimeError("Error getting data from: " + get_path)
#print response.status, response.reason, response.msg
return response
else:
f = urllib.urlopen("http://" + server_path + get_path)
#print f.info()
return f | d759609b1c48af28e678fa75bd9ff102f7eaafae | 3,653,454 |
def not_found_view(request):
"""Not Found view.
"""
model = request.context
return render_main_template(model, request, contenttile='not_found') | 0fe250d09f8fc007ffb07f848e59e779da9aefb0 | 3,653,455 |
import torch
def top_filtering(
logits, top_k=0, top_p=0.0, threshold=-float("Inf"), filter_value=-float("Inf")
):
""" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering
Args:
logits: logits distribution shape (vocabulary size)
top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.
top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset
whose total probability mass is greater than or equal to the threshold top_p.
In practice, we select the highest probability tokens whose cumulative probability mass exceeds
the threshold top_p.
threshold: a minimal threshold to keep logits
Taken from `interact.py`
"""
assert (
logits.dim() == 1
) # Only work for batch size 1 for now - could update but it would obfuscate a bit the code
top_k = min(top_k, logits.size(-1))
if top_k > 0:
# Remove all tokens with a probability less than the last token in the top-k tokens
indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]
logits[indices_to_remove] = filter_value
if top_p > 0.0:
# Compute cumulative probabilities of sorted tokens
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cumulative_probabilities = torch.cumsum(
F.softmax(sorted_logits, dim=-1), dim=-1
)
# Remove tokens with cumulative probability above the threshold
sorted_indices_to_remove = cumulative_probabilities > top_p
# Shift the indices to the right to keep also the first token above the threshold
sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()
sorted_indices_to_remove[..., 0] = 0
# Back to unsorted indices and set them to -infinity
indices_to_remove = sorted_indices[sorted_indices_to_remove]
logits[indices_to_remove] = filter_value
indices_to_remove = logits < threshold
logits[indices_to_remove] = filter_value
return logits | 7b230fc959e0078f1cfc5b2f2f991c79e0f4fd86 | 3,653,456 |
def get_physical_type(obj):
"""
Return the physical type that corresponds to a unit (or another
physical type representation).
Parameters
----------
obj : quantity-like or `~astropy.units.PhysicalType`-like
An object that (implicitly or explicitly) has a corresponding
physical type. This object may be a unit, a
`~astropy.units.Quantity`, an object that can be converted to a
`~astropy.units.Quantity` (such as a number or array), a string
that contains a name of a physical type, or a
`~astropy.units.PhysicalType` instance.
Returns
-------
`~astropy.units.PhysicalType`
A representation of the physical type(s) of the unit.
Examples
--------
The physical type may be retrieved from a unit or a
`~astropy.units.Quantity`.
>>> import astropy.units as u
>>> u.get_physical_type(u.meter ** -2)
PhysicalType('column density')
>>> u.get_physical_type(0.62 * u.barn * u.Mpc)
PhysicalType('volume')
The physical type may also be retrieved by providing a `str` that
contains the name of a physical type.
>>> u.get_physical_type("energy")
PhysicalType({'energy', 'torque', 'work'})
Numbers and arrays of numbers correspond to a dimensionless physical
type.
>>> u.get_physical_type(1)
PhysicalType('dimensionless')
"""
if isinstance(obj, PhysicalType):
return obj
if isinstance(obj, str):
return _physical_type_from_str(obj)
try:
unit = obj if isinstance(obj, core.UnitBase) else quantity.Quantity(obj, copy=False).unit
except TypeError as exc:
raise TypeError(f"{obj} does not correspond to a physical type.") from exc
unit = _replace_temperatures_with_kelvin(unit)
physical_type_id = unit._get_physical_type_id()
unit_has_known_physical_type = physical_type_id in _physical_unit_mapping
if unit_has_known_physical_type:
return _physical_unit_mapping[physical_type_id]
else:
return PhysicalType(unit, "unknown") | 03d28bdb9a507939e52bc0021dae3c539b4954a5 | 3,653,457 |
def reverse(list):
"""Returns a new list or string with the elements or characters in reverse
order"""
if isinstance(list, str):
return "".join(reversed(list))
return _list(reversed(list)) | ba74d9e4e54782114f534fb4c888c681ab708b67 | 3,653,458 |
from typing import Dict
def PubMedDiabetes(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/linqs",
version: str = "latest",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the PubMedDiabetes graph.
The graph is automatically retrieved from the LINQS repository. The Pubmed Diabetes dataset consists of 19717 scientific publications from
PubMed database pertaining to diabetes classified into one of three classes.
The citation network consists of 44338 links. Each publication in the dataset
is described by a TF/IDF weighted word vector from a dictionary which consists
of 500 unique words.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "latest"
The version of the graph to retrieve.
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of PubMedDiabetes graph.
References
---------------------
Please cite the following if you use the data:
```bib
@inproceedings{namata2012query,
title={Query-driven active surveying for collective classification},
author={Namata, Galileo and London, Ben and Getoor, Lise and Huang, Bert and EDU, UMD},
booktitle={10th International Workshop on Mining and Learning with Graphs},
volume={8},
year={2012}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="PubMedDiabetes",
repository="linqs",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs,
callbacks=[
parse_linqs_pubmed_incidence_matrix
],
callbacks_arguments=[
{
"cites_path": "Pubmed-Diabetes/Pubmed-Diabetes/data/Pubmed-Diabetes.DIRECTED.cites.tab",
"content_path": "Pubmed-Diabetes/Pubmed-Diabetes/data/Pubmed-Diabetes.NODE.paper.tab",
"node_path": "nodes.tsv",
"edge_path": "edges.tsv"
}
]
)() | ddac0cfb8a525c42fe5a8d6c1a70677ab57451e0 | 3,653,459 |
def find_neighbor_indices(atoms, probe, k):
"""
Returns list of indices of atoms within probe distance to atom k.
"""
neighbor_indices = []
atom_k = atoms[k]
radius = atom_k.radius + probe + probe
indices = list(range(k))
indices = indices + list(range(k+1, len(atoms)))
for i in indices:
atom_i = atoms[i]
dist = pos_distance(atom_k.pos, atom_i.pos)
if dist < radius + atom_i.radius:
neighbor_indices.append(i)
return neighbor_indices | 05c3218357d660d6b66c3d614bfcb0d78431d32e | 3,653,460 |
def genDir(EAs):
"""
Generate the projection direction given the euler angles. Since the image
is in the x-y plane, the projection direction is given by R(EA)*z where
z = (0,0,1)
"""
dir_vec = np.array([rotmat3D_EA(*EA)[:, 2] for EA in EAs])
return dir_vec | 0753fad9638ca8b0ac4e899ad103dc08266a208b | 3,653,461 |
def plainica(x, reducedim=0.99, backend=None, random_state=None):
""" Source decomposition with ICA.
Apply ICA to the data x, with optional PCA dimensionality reduction.
Parameters
----------
x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples)
data set
reducedim : {int, float, 'no_pca'}, optional
A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All
components that describe in total less than `1-reducedim` of the variance are removed by the PCA step.
An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA.
If set to 'no_pca' the PCA step is skipped.
backend : dict-like, optional
Specify backend to use. When set to None the backend configured in config.backend is used.
Returns
-------
result : ResultICA
Source decomposition
"""
x = atleast_3d(x)
t, m, l = np.shape(x)
if backend is None:
backend = scotbackend
# pre-transform the data with PCA
if reducedim == 'no pca':
c = np.eye(m)
d = np.eye(m)
xpca = x
else:
c, d, xpca = backend['pca'](x, reducedim)
# run on residuals ICA to estimate volume conduction
mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state)
# correct (un)mixing matrix estimatees
mx = mx.dot(d)
ux = c.dot(ux)
class Result:
unmixing = ux
mixing = mx
return Result | 7ffe9ebc78220898c84459fed61fc0f32fe05e69 | 3,653,462 |
import json
import math
import copy
import os
import itertools
def make_spectrum_layout(obj, spectra, user, device, width, smoothing, smooth_number):
"""
Helper function that takes the object, spectra and user info,
as well as the total width of the figure,
and produces one layout for a spectrum plot.
This can be used once for each tab on the spectrum plot,
if using different spectrum types.
Parameters
----------
obj : dict
The underlying object that is associated with all these spectra.
spectra : dict
The different spectra to be plotted. This can be a subset of
e.g., all the spectra of one type.
user : dict
info about the user, used to get the individual user plot preferences.
device: string
name of the device used ("browser", "mobile", "mobile_portrait", "tablet", etc).
width: int
width of the external frame of the plot, including the buttons/sliders.
smoothing: bool
choose if to start the display with the smoothed plot or the full resolution spectrum.
smooth_number: int
number of data points to use in the moving average when displaying the smoothed spectrum.
Returns
-------
dict
Bokeh JSON embedding of one layout that can be tabbed or
used as the plot specifications on its own.
"""
rainbow = cm.get_cmap('rainbow', len(spectra))
palette = list(map(rgb2hex, rainbow(range(len(spectra)))))
color_map = dict(zip([s.id for s in spectra], palette))
data = []
for i, s in enumerate(spectra):
# normalize spectra to a median flux of 1 for easy comparison
normfac = np.nanmedian(np.abs(s.fluxes))
normfac = normfac if normfac != 0.0 else 1e-20
altdata = json.dumps(s.altdata) if s.altdata is not None else ""
annotations = (
AnnotationOnSpectrum.query_records_accessible_by(user)
.filter(AnnotationOnSpectrum.spectrum_id == s.id)
.all()
)
annotations = (
json.dumps([{a.origin: a.data} for a in annotations])
if len(annotations)
else ""
)
df = pd.DataFrame(
{
'wavelength': s.wavelengths,
'flux': s.fluxes / normfac,
'flux_original': s.fluxes / normfac,
'id': s.id,
'telescope': s.instrument.telescope.name,
'instrument': s.instrument.name,
'date_observed': s.observed_at.isoformat(sep=' ', timespec='seconds'),
'pi': (
s.assignment.run.pi
if s.assignment is not None
else (
s.followup_request.allocation.pi
if s.followup_request is not None
else ""
)
),
'origin': s.origin,
'altdata': altdata[:20] + "..." if len(altdata) > 20 else altdata,
'annotations': annotations,
}
)
data.append(df)
data = pd.concat(data)
data.sort_values(by=['date_observed', 'wavelength'], inplace=True)
split = data.groupby('id', sort=False)
(
frame_width,
aspect_ratio,
legend_row_height,
legend_items_per_row,
) = get_dimensions_by_device(device, width)
plot_height = (
math.floor(width / aspect_ratio)
if device == "browser"
else math.floor(width / aspect_ratio)
+ legend_row_height * int(len(split) / legend_items_per_row)
+ 30 # 30 is the height of the toolbar
)
# Add some height for the checkboxes and sliders
if device == "mobile_portrait":
height = plot_height + 440
elif device == "mobile_landscape":
height = plot_height + 370
else:
height = plot_height + 220
# check browser plot_height for legend overflow
if device == "browser":
plot_height_of_legend = (
legend_row_height * int(len(split) / legend_items_per_row)
+ 90 # 90 is height of toolbar plus legend offset
)
if plot_height_of_legend > plot_height:
plot_height = plot_height_of_legend
hover = HoverTool(
tooltips=[
('wavelength', '@wavelength{0,0.000}'),
('flux', '@flux'),
('telesecope', '@telescope'),
('instrument', '@instrument'),
('UTC date observed', '@date_observed'),
('PI', '@pi'),
('origin', '@origin'),
('altdata', '@altdata{safe}'),
('annotations', '@annotations{safe}'),
],
)
flux_max = np.max(data['flux'])
flux_min = np.min(data['flux'])
ymax = flux_max * 1.05
ymin = flux_min - 0.05 * (flux_max - flux_min)
xmin = np.min(data['wavelength']) - 100
xmax = np.max(data['wavelength']) + 100
if obj.redshift is not None and obj.redshift > 0:
xmin_rest = xmin / (1.0 + obj.redshift)
xmax_rest = xmax / (1.0 + obj.redshift)
active_drag = None if "mobile" in device or "tablet" in device else "box_zoom"
tools = (
"box_zoom, pan, reset"
if "mobile" in device or "tablet" in device
else "box_zoom,wheel_zoom,pan,reset"
)
plot = figure(
frame_width=frame_width,
height=plot_height,
y_range=(ymin, ymax),
x_range=(xmin, xmax),
tools=tools,
toolbar_location="above",
active_drag=active_drag,
)
model_dict = {}
legend_items = []
for i, (key, df) in enumerate(split):
renderers = []
s = next(spec for spec in spectra if spec.id == key)
if s.label is not None and len(s.label) > 0:
label = s.label
else:
label = f'{s.instrument.name} ({s.observed_at.date().strftime("%m/%d/%y")})'
model_dict['s' + str(i)] = plot.step(
x='wavelength',
y='flux',
color=color_map[key],
source=ColumnDataSource(df),
)
renderers.append(model_dict[f's{i}'])
# this starts out the same as the previous plot, but can be binned/smoothed later in JS
dfs = copy.deepcopy(df)
if smoothing:
dfs['flux'] = smoothing_function(dfs['flux_original'], smooth_number)
model_dict[f'bin{i}'] = plot.step(
x='wavelength', y='flux', color=color_map[key], source=ColumnDataSource(dfs)
)
renderers.append(model_dict[f'bin{i}'])
# add this line plot to be able to show tooltip at hover
model_dict['l' + str(i)] = plot.line(
x='wavelength',
y='flux',
color=color_map[key],
source=ColumnDataSource(df),
line_alpha=0.0,
)
renderers.append(model_dict[f'l{i}'])
legend_items.append(LegendItem(label=label, renderers=renderers))
plot.xaxis.axis_label = 'Wavelength (Å)'
plot.yaxis.axis_label = 'Flux'
plot.toolbar.logo = None
if obj.redshift is not None and obj.redshift > 0:
plot.extra_x_ranges = {"rest_wave": Range1d(start=xmin_rest, end=xmax_rest)}
plot.add_layout(
LinearAxis(x_range_name="rest_wave", axis_label="Rest Wavelength (Å)"),
'above',
)
# TODO how to choose a good default?
plot.y_range = Range1d(0, 1.03 * data.flux.max())
legend_loc = "below" if "mobile" in device or "tablet" in device else "right"
legend_orientation = (
"vertical" if device in ["browser", "mobile_portrait"] else "horizontal"
)
add_plot_legend(plot, legend_items, width, legend_orientation, legend_loc)
# only show this tooltip for spectra, not elemental lines
hover.renderers = list(model_dict.values())
plot.add_tools(hover)
smooth_checkbox = CheckboxGroup(
labels=["smoothing"],
active=[0] if smoothing else [],
)
smooth_slider = Slider(
start=0.0,
end=100.0,
value=0.0,
step=1.0,
show_value=False,
max_width=350,
# margin=(4, 10, 0, 10),
)
smooth_input = NumericInput(value=smooth_number)
smooth_callback = CustomJS(
args=dict(
model_dict=model_dict,
n_labels=len(split),
checkbox=smooth_checkbox,
input=smooth_input,
slider=smooth_slider,
),
code=open(
os.path.join(
os.path.dirname(__file__), '../static/js/plotjs', 'smooth_spectra.js'
)
).read(),
)
smooth_checkbox.js_on_click(smooth_callback)
smooth_input.js_on_change('value', smooth_callback)
smooth_slider.js_on_change(
'value',
CustomJS(
args={'slider': smooth_slider, 'input': smooth_input},
code="""
input.value = slider.value;
input.change.emit();
""",
),
)
smooth_column = column(
smooth_checkbox,
smooth_slider,
smooth_input,
width=width if "mobile" in device else int(width * 1 / 5) - 20,
margin=(4, 10, 0, 10),
)
# 20 is for padding
slider_width = width if "mobile" in device else int(width * 2 / 5) - 20
z_title = Div(text="Redshift (<i>z</i>): ")
z_slider = Slider(
value=obj.redshift if obj.redshift is not None else 0.0,
start=0.0,
end=3.0,
step=0.00001,
show_value=False,
format="0[.]0000",
)
z_input = NumericInput(
value=obj.redshift if obj.redshift is not None else 0.0,
mode='float',
)
z_slider.js_on_change(
'value',
CustomJS(
args={'slider': z_slider, 'input': z_input},
code="""
input.value = slider.value;
input.change.emit();
""",
),
)
z = column(
z_title,
z_slider,
z_input,
width=slider_width,
margin=(4, 10, 0, 10),
)
v_title = Div(text="<i>V</i><sub>expansion</sub> (km/s): ")
v_exp_slider = Slider(
value=0.0,
start=0.0,
end=3e4,
step=10.0,
show_value=False,
)
v_exp_input = NumericInput(value=0, mode='int')
v_exp_slider.js_on_change(
'value',
CustomJS(
args={'slider': v_exp_slider, 'input': v_exp_input},
code="""
input.value = slider.value;
input.change.emit();
""",
),
)
v_exp = column(
v_title,
v_exp_slider,
v_exp_input,
width=slider_width,
margin=(0, 10, 0, 10),
)
# Track elements that need to be shifted with change in z / v
shifting_elements = []
renderers = []
obj_redshift = 0 if obj.redshift is None else obj.redshift
for i, (name, (wavelengths, color)) in enumerate(SPEC_LINES.items()):
if name in ('Tellurics-1', 'Tellurics-2'):
el_data = pd.DataFrame(
{
'name': name,
'wavelength': [(wavelengths[0] + wavelengths[1]) / 2],
'bandwidth': [wavelengths[1] - wavelengths[0]],
}
)
new_line = plot.vbar(
x='wavelength',
width='bandwidth',
top=ymax,
color=color,
source=ColumnDataSource(el_data),
alpha=0.3,
)
else:
flux_values = list(np.linspace(ymin, ymax, 100))
flux_values[-1] = np.nan
wavelength_values = [
w for w in wavelengths for _ in flux_values
] # repeat each wavelength 100 times
el_data = pd.DataFrame(
{
'name': name,
'x': wavelength_values,
'wavelength': wavelength_values,
'flux': [f for _ in wavelengths for f in flux_values],
}
)
if name != 'Sky Lines':
el_data['x'] = el_data['wavelength'] * (1.0 + obj_redshift)
new_line = plot.line(
x='x',
y='flux',
color=color,
line_alpha=0.3,
source=ColumnDataSource(el_data),
)
new_line.visible = False
model_dict[f'element_{i}'] = new_line
renderers.append(new_line)
if name not in ('Sky Lines', 'Tellurics-1', 'Tellurics-2'):
shifting_elements.append(new_line)
new_line.glyph.line_alpha = 1.0
# add the elemental lines to hover tool
hover_lines = HoverTool(
tooltips=[
('name', '@name'),
('wavelength', '@wavelength{0,0}'),
],
renderers=renderers,
)
plot.add_tools(hover_lines)
# Split spectral line legend into columns
if device == "mobile_portrait":
columns = 3
elif device == "mobile_landscape":
columns = 5
else:
columns = 7
# Create columns from a list.
#
# `list(zip_longest(a, b, c, ...))` returns a tuple where the i-th
# element comes from the i-th iterable argument.
#
# The trick here is to pass in the same iterable `column` times.
# This gives us rows.
rows = itertools.zip_longest(*[iter(SPEC_LINES.items())] * columns)
# To form columns from the rows, zip the rows together.
element_dicts = zip(*rows)
all_column_checkboxes = []
for column_idx, element_dict in enumerate(element_dicts):
element_dict = [e for e in element_dict if e is not None]
labels = [name for name, _ in element_dict]
colors = [color for name, (wavelengths, color) in element_dict]
column_checkboxes = CheckboxWithLegendGroup(
labels=labels, active=[], colors=colors, width=width // (columns + 1)
)
all_column_checkboxes.append(column_checkboxes)
callback_toggle_lines = CustomJS(
args={'column_checkboxes': column_checkboxes, **model_dict},
code=f"""
for (let i = 0; i < {len(labels)}; i = i + 1) {{
let el_idx = i * {columns} + {column_idx};
let el = eval("element_" + el_idx);
el.visible = (column_checkboxes.active.includes(i))
}}
""",
)
column_checkboxes.js_on_click(callback_toggle_lines)
# Move spectral lines when redshift or velocity changes
speclines = {f'specline_{i}': line for i, line in enumerate(shifting_elements)}
callback_zvs = CustomJS(
args={'z': z_input, 'v_exp': v_exp_input, **speclines},
code=f"""
const c = 299792.458; // speed of light in km / s
for (let i = 0; i < {len(speclines)}; i = i + 1) {{
let el = eval("specline_" + i);
el.data_source.data.x = el.data_source.data.wavelength.map(
x_i => ( x_i * (1 + z.value) /
(1 + v_exp.value / c) )
);
el.data_source.change.emit();
}}
""",
)
# Hook up callback that shifts spectral lines when z or v changes
z_input.js_on_change('value', callback_zvs)
v_exp_input.js_on_change('value', callback_zvs)
z_input.js_on_change(
'value',
CustomJS(
args={'z': z_input, 'slider': z_slider},
code="""
// Update slider value to match text input
slider.value = z.value;
""",
),
)
v_exp_input.js_on_change(
'value',
CustomJS(
args={'slider': v_exp_slider, 'v_exp': v_exp_input},
code="""
// Update slider value to match text input
slider.value = v_exp.value;
""",
),
)
row2 = row(all_column_checkboxes)
row3 = (
column(z, v_exp, smooth_column)
if "mobile" in device
else row(z, v_exp, smooth_column)
)
return column(
plot,
row2,
row3,
sizing_mode='stretch_width',
width=width,
height=height,
) | d99ec2027eaea46283d0581e7f48942c61bf22c0 | 3,653,463 |
def all_equal(values: list):
"""Check that all values in given list are equal"""
return all(values[0] == v for v in values) | 8ed08f63959367f3327554adc11b1286291963d8 | 3,653,464 |
def _tester(func, *args):
"""
Tests function ``func`` on arguments and returns first positive.
>>> _tester(lambda x: x%3 == 0, 1, 2, 3, 4, 5, 6)
3
>>> _tester(lambda x: x%3 == 0, 1, 2)
None
:param func: function(arg)->boolean
:param args: other arguments
:return: something or none
"""
for arg in args:
if arg is not None and func(arg):
return arg
return None | 035c8bf68b4ff7e4fbdb7ed1b2601f04110287d8 | 3,653,465 |
from datetime import datetime
def new_revision(partno):
"""
Presents the form to add a new revision, and creates it upon POST submit
"""
_load_if_released(partno) # ensures the component exists and is released
form = RevisionForm(request.form)
if request.method == 'POST' and form.validate_on_submit():
now = datetime.now()
result = current_app.mongo.db.components.update_one(
filter={'_id': partno},
update={
'$set': {
'released': False # a new revision is not already released
},
'$push': {
'revisions': {
'date': now,
'comment': form.comment.data
},
'history': {
'date': now,
'user': current_user.id,
'message': 'new revision created'
}
}
}
)
if result.modified_count == 1:
flash('new revision created', 'success')
else:
# should not happen.
flash('no data modified, please contact the administrator', 'error')
return redirect(url_for('components.details', partno=partno))
extract_errors(form)
return render_template('components/revision_form.html', form=form, partno=partno) | 722a3860e9daeb4bd5d9339f7dcaf5245c51b5de | 3,653,466 |
def fresnel_parameter(rays, diffraction_points):
""" returns the fresnel diffraction parameter (always as a positive)
Parameters
----------
rays : [n] list of shapely LineString (3d)
diffraction_points: [n] list of Points (3d)
diffraction point which the ray is rounding
Returns
-------
fresnel diffraction parameters: [n,] float array
"""
wavelength = 0.1903 # GPS L1 signal frequency of 1575.42 MHz
distances = np.array([r.project(d)
for r, d in zip(rays, diffraction_points)])
nearest_points = (r.interpolate(d) for r, d in zip(rays, distances))
diffraction_distances = np.array(
[d.z-p.z for p, d in zip(nearest_points, diffraction_points)])
v = np.where(distances == 0, -np.inf, diffraction_distances *
(2 / (wavelength * distances))**0.5)
return v | cd398797161f1e9e66805cd09162359ed6e89330 | 3,653,467 |
def validate(net, val_data, ctx, eval_metric):
"""Test on validation dataset."""
eval_metric.reset()
# set nms threshold and topk constraint
net.set_nms(nms_thresh=0.45, nms_topk=400)
net.hybridize()
for batch in val_data:
data = gluon.utils.split_and_load(batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
det_bboxes = []
det_ids = []
det_scores = []
gt_bboxes = []
gt_ids = []
gt_difficults = []
for x, y in zip(data, label):
# get prediction results
ids, scores, bboxes = net(x)
det_ids.append(ids)
det_scores.append(scores)
# clip to image size
det_bboxes.append(bboxes.clip(0, batch[0].shape[2]))
# split ground truths
gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)
# update metric
eval_metric.update(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults)
return eval_metric.get() | 79dcd9b0d1920952b5badd4aa9f3f234776f6e06 | 3,653,468 |
from re import S
def add_unique_geom_id(point_gdf: gpd.GeoDataFrame, log: Logger=None) -> gpd.GeoDataFrame:
"""Adds an unique identifier (string) to GeoDataFrame of points based on point locations (x/y).
"""
point_gdf[S.xy_id] = [f'{str(round(geom.x, 1))}_{str(round(geom.y, 1))}' for geom in point_gdf[S.geometry]]
unique_count = point_gdf[S.xy_id].nunique()
unique_share = round(100 * unique_count/len(point_gdf.index), 2)
log.info(f'found {unique_count} unique sampling points ({unique_share} %)')
return point_gdf | 0663e24b217c2911083d68146a5d8ff25c4fd8bd | 3,653,469 |
def get_data_parallel_rank():
"""Return my rank for the data parallel group."""
return _TOPOLOGY.get_data_parallel_rank() | a1da062793f6798e2e56809b3076c811f786a82b | 3,653,470 |
import math
def entropy(data):
"""
Compute the Shannon entropy, a measure of uncertainty.
"""
if len(data) == 0:
return None
n = sum(data)
_op = lambda f: f * math.log(f)
return - sum(_op(float(i) / n) for i in data) | ebfd9a84885a95ec6e4e7b2d88a0fb69fbbfaea1 | 3,653,471 |
def transform_mtl_to_stac(metadata: dict) -> Item:
"""
Handle USGS MTL as a dict and return a STAC item.
NOT IMPLEMENTED
Issues include:
- There's no reference to UTM Zone or any other CRS info in the MTL
- There's no absolute file path or reference to a URI to find data.
"""
LANDSAT_METADATA = metadata["LANDSAT_METADATA_FILE"]
product = LANDSAT_METADATA["PRODUCT_CONTENTS"]
projection = LANDSAT_METADATA["PROJECTION_ATTRIBUTES"]
image = LANDSAT_METADATA["IMAGE_ATTRIBUTES"]
proessing_record = LANDSAT_METADATA["LEVEL2_PROCESSING_RECORD"]
scene_id = product["LANDSAT_PRODUCT_ID"]
xmin, xmax = float(projection["CORNER_LL_LON_PRODUCT"]), float(
projection["CORNER_UR_LON_PRODUCT"])
ymin, ymax = float(projection["CORNER_LL_LAT_PRODUCT"]), float(
projection["CORNER_UR_LAT_PRODUCT"])
geom = mapping(box(xmin, ymin, xmax, ymax))
bounds = shape(geom).bounds
# Like: "2020-01-01" for date and "23:08:52.6773140Z" for time
acquired_date = _parse_date(
f"{image['DATE_ACQUIRED']}T{image['SCENE_CENTER_TIME']}")
created = _parse_date(proessing_record["DATE_PRODUCT_GENERATED"])
item = Item(id=scene_id,
geometry=geom,
bbox=bounds,
datetime=acquired_date,
properties={})
# Common metadata
item.common_metadata.created = created
item.common_metadata.platform = image["SPACECRAFT_ID"]
item.common_metadata.instruments = [
i.lower() for i in image["SENSOR_ID"].split("_")
]
# TODO: implement these three extensions
EOExtension.add_to(item)
ViewExtension.add_to(item)
ProjectionExtension.add_to(item)
return item | 6e215de93da9e20451b999a963fe2d42f1ad3548 | 3,653,472 |
import torch
def alexnet(pretrained=False):
"""AlexNet model architecture from the
`"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = AlexNet()
if pretrained:
model_path = './model/alexnet.pth.tar'
pretrained_model = torch.load(model_path)
model.load_state_dict(pretrained_model['state_dict'])
return model | a42df7c926472b88501001eefd691959e6acb3ac | 3,653,473 |
import torch
def generate_random_targets(labels: Tensor, num_classes: int) -> Tensor:
"""
Generates one random target in (num_classes - 1) possibilities for each label that is different from the original
label.
Parameters
----------
labels: Tensor
Original labels. Generated targets will be different from labels.
num_classes: int
Number of classes to generate the random targets from.
Returns
-------
targets: Tensor
Random target for each label. Has the same shape as labels.
"""
random = torch.rand(len(labels), num_classes, device=labels.device, dtype=torch.float)
random.scatter_(1, labels.unsqueeze(-1), 0)
return random.argmax(1) | c0740c5ddc7c1f866b4c3cb2986f45a672d22e49 | 3,653,474 |
def recall_k(sent_im_dist, im_labels, ks=(1, 5, 10)):
"""
Compute recall at given ks.
"""
im_labels = tf.cast(im_labels, tf.bool)
def retrieval_recall(dist, labels, k):
# Use negative distance to find the index of
# the smallest k elements in each row.
pred = tf.nn.top_k(-dist, k=k)[1]
# Create a boolean mask for each column (k value) in pred,
# s.t. mask[i][j] is 1 iff pred[i][k] = j.
pred_k_mask = lambda topk_idx: tf.one_hot(topk_idx, tf.shape(labels)[1],
on_value=True, off_value=False,
dtype=tf.bool)
# Create a boolean mask for the predicted indices
# by taking logical or of boolean masks for each column,
# s.t. mask[i][j] is 1 iff j is in pred[i].
pred_mask = tf.reduce_any(tf.map_fn(
pred_k_mask, tf.transpose(pred), dtype=tf.bool), axis=0)
# pred_mask = tf.map_fn(create_pred_mask, pred)
# Entry (i, j) is matched iff pred_mask[i][j] and labels[i][j] are 1.
matched = tf.cast(tf.logical_and(pred_mask, labels), dtype=tf.float32)
return tf.reduce_mean(tf.reduce_max(matched, axis=1))
img_sent_recall = [retrieval_recall(tf.transpose(sent_im_dist),
tf.transpose(im_labels), k) for k in ks]
sent_img_recall = [retrieval_recall(sent_im_dist, im_labels, k) for k in ks]
return img_sent_recall + sent_img_recall | 188f2cb4c3581f9c565253fbb17797a408ce3d74 | 3,653,475 |
def get_suggestion(project_slug, lang_slug, version_slug, pagename, user):
"""
| # | project | version | language | What to show |
| 1 | 0 | 0 | 0 | Error message |
| 2 | 0 | 0 | 1 | Error message (Can't happen) |
| 3 | 0 | 1 | 0 | Error message (Can't happen) |
| 4 | 0 | 1 | 1 | Error message (Can't happen) |
| 5 | 1 | 0 | 0 | A link to top-level page of default version |
| 6 | 1 | 0 | 1 | Available versions on the translation project |
| 7 | 1 | 1 | 0 | Available translations of requested version |
| 8 | 1 | 1 | 1 | A link to top-level page of requested version |
"""
suggestion = {}
if project_slug:
try:
proj = Project.objects.get(slug=project_slug)
if not lang_slug:
lang_slug = proj.language
try:
ver = Version.objects.get(
project__slug=project_slug, slug=version_slug)
except Version.DoesNotExist:
ver = None
if ver: # if requested version is available on main project
if lang_slug != proj.language:
try:
translations = proj.translations.filter(
language=lang_slug)
if translations:
ver = Version.objects.get(
project__slug=translations[0].slug, slug=version_slug)
else:
ver = None
except Version.DoesNotExist:
ver = None
# if requested version is available on translation project too
if ver:
# Case #8: Show a link to top-level page of the version
suggestion['type'] = 'top'
suggestion['message'] = "What are you looking for?"
suggestion['href'] = proj.get_docs_url(ver.slug, lang_slug)
# requested version is available but not in requested language
else:
# Case #7: Show available translations of the version
suggestion['type'] = 'list'
suggestion['message'] = (
"Requested page seems not to be translated in "
"requested language. But it's available in these "
"languages.")
suggestion['list'] = []
suggestion['list'].append({
'label': proj.language,
'project': proj,
'version_slug': version_slug,
'pagename': pagename
})
for t in proj.translations.all():
try:
Version.objects.get(
project__slug=t.slug, slug=version_slug)
suggestion['list'].append({
'label': t.language,
'project': t,
'version_slug': version_slug,
'pagename': pagename
})
except Version.DoesNotExist:
pass
else: # requested version does not exist on main project
if lang_slug == proj.language:
trans = proj
else:
translations = proj.translations.filter(language=lang_slug)
trans = translations[0] if translations else None
if trans: # requested language is available
# Case #6: Show available versions of the translation
suggestion['type'] = 'list'
suggestion['message'] = (
"Requested version seems not to have been built yet. "
"But these versions are available.")
suggestion['list'] = []
for v in Version.objects.public(user, trans, True):
suggestion['list'].append({
'label': v.slug,
'project': trans,
'version_slug': v.slug,
'pagename': pagename
})
# requested project exists but requested version and language
# are not available.
else:
# Case #5: Show a link to top-level page of default version
# of main project
suggestion['type'] = 'top'
suggestion['message'] = 'What are you looking for??'
suggestion['href'] = proj.get_docs_url()
except Project.DoesNotExist:
# Case #1-4: Show error mssage
suggestion['type'] = 'none'
suggestion[
'message'] = "We're sorry, we don't know what you're looking for"
else:
suggestion['type'] = 'none'
suggestion[
'message'] = "We're sorry, we don't know what you're looking for"
return suggestion | 66ddf3e44f006fcd1339b0483c3219c429643353 | 3,653,476 |
def resample_data_or_seg(data, new_shape, is_seg, axis=None, order=3, do_separate_z=False, cval=0, order_z=0):
"""
separate_z=True will resample with order 0 along z
:param data:
:param new_shape:
:param is_seg:
:param axis:
:param order:
:param do_separate_z:
:param cval:
:param order_z: only applies if do_separate_z is True
:return:
"""
assert len(data.shape) == 4, "data must be (c, x, y, z)"
assert not is_seg, "do not use this patch for resampling segmentations"
print("running patched resample_data_or_seg function")
dtype_data = data.dtype
shape = np.array(data[0].shape)
new_shape = np.array(new_shape)
if np.all(shape == new_shape):
print("no resampling necessary")
return data
data = data.astype(float)
resize_fn = resize
kwargs = {'mode': 'edge', 'anti_aliasing': False}
if do_separate_z:
print("separate z, order in z is", order_z, "order inplane is", order)
assert len(axis) == 1, "only one anisotropic axis supported"
axis = axis[0]
if axis == 0:
new_shape_2d = new_shape[1:]
elif axis == 1:
new_shape_2d = new_shape[[0, 2]]
else:
new_shape_2d = new_shape[:-1]
reshaped_final_data = np.empty(shape=(data.shape[0], new_shape[0], new_shape[1], new_shape[2]), dtype=dtype_data)
do_z = shape[axis] != new_shape[axis]
if do_z:
if axis == 0:
buffer = np.empty(shape=(shape[axis], new_shape_2d[0], new_shape_2d[1]), dtype=float)
elif axis == 1:
buffer = np.empty(shape=(new_shape_2d[0], shape[axis], new_shape_2d[1]), dtype=float)
else:
buffer = np.empty(shape=(new_shape_2d[0], new_shape_2d[1], shape[axis]), dtype=float)
else:
buffer = None
for c in range(data.shape[0]):
if do_z:
reshaped_data = buffer
else:
reshaped_data = reshaped_final_data[c]
for slice_id in range(shape[axis]):
if axis == 0:
reshaped_data[slice_id, :, :] = resize_fn(data[c, slice_id], new_shape_2d, order, cval=cval, **kwargs)
elif axis == 1:
reshaped_data[:, slice_id, :] = resize_fn(data[c, :, slice_id], new_shape_2d, order, cval=cval, **kwargs)
else:
reshaped_data[:, :, slice_id] = resize_fn(data[c, :, :, slice_id], new_shape_2d, order, cval=cval, **kwargs)
if do_z:
# The following few lines are blatantly copied and modified from sklearn's resize()
rows, cols, dim = new_shape[0], new_shape[1], new_shape[2]
orig_rows, orig_cols, orig_dim = reshaped_data.shape
row_scale = float(orig_rows) / rows
col_scale = float(orig_cols) / cols
dim_scale = float(orig_dim) / dim
reshaped_final_data[c] = zoom(reshaped_data, (1 / row_scale, 1 / col_scale, 1 / dim_scale), order=order_z, cval=cval, mode='nearest')
else:
print("no separate z, order", order)
reshaped_final_data = np.empty(shape=(data.shape[0], new_shape[0], new_shape[1], new_shape[2]), dtype=dtype_data)
for c in range(data.shape[0]):
reshaped_final_data[c] = resize_fn(data[c], new_shape, order, cval=cval, **kwargs)
return reshaped_final_data | ab7aa7ab1db40ec605d7069ccf3b1bc8751c3855 | 3,653,477 |
def get_eval(appdir, config):
"""Get an Evaluation object given the configured `GlobalConfig`.
"""
return core.Evaluation(appdir, config.client, config.reps,
config.test_reps, config.simulate) | 89b1a7bbbbbf936b622c90635a54ab6517b7bc65 | 3,653,478 |
def load_queue_from_disk(filename):
"""
Load the old queue from disk when started. Old messages that weren't
posted yet are read from the queue and processed.
"""
if os.path.exists(filename):
log.msg("Loading queue from %s" % filename)
try:
with closing(open(filename, 'r')) as fp:
data = pickle.load(fp)
return data
except IOError, e:
log.err()
backup_filename = "%s.%s" % (
filename,
datetime.utcnow().strftime("%Y%m%d_%H%M%S")
)
shutil.copyfile(filename, backup_filename)
log.err("Couldn't load queue from %s, backed it up to %s" % (
filename, backup_filename
))
# return an empty queue, start from scratch.
return [] | b2641c7c4ad58e683b856d82825f7bd71ec00f91 | 3,653,479 |
def ask_ok(title="Confirm", message=""):
"""Ask the user to confirm something via an ok-cancel question.
Parameters:
title (str): the text to show as the window title.
message (str): the message to show in the body of the dialog.
Returns:
bool: Whether the user selected "OK".
"""
if not isinstance(title, string_types):
raise TypeError("ask_ok() title must be a string.")
if not isinstance(message, string_types):
raise TypeError("ask_ok() message must be a string.")
return _get_app().ask_ok(title, message) | 43e88f56219715a4f292ab6021d08d1e1fbc44de | 3,653,480 |
def indexate(points):
"""
Create an array of unique points and indexes into this array.
Arguments:
points: A sequence of 3-tuples
Returns:
An array of indices and a sequence of unique 3-tuples.
"""
pd = {}
indices = tuple(pd.setdefault(tuple(p), len(pd)) for p in points)
pt = sorted([(v, k) for k, v in pd.items()], key=lambda x: x[0])
unique = tuple(i[1] for i in pt)
return indices, unique | f78ef40ea9bf6cfe427d366026b633fbb67016a2 | 3,653,481 |
import ray
def get_handle(endpoint_name,
relative_slo_ms=None,
absolute_slo_ms=None,
missing_ok=False):
"""Retrieve RayServeHandle for service endpoint to invoke it from Python.
Args:
endpoint_name (str): A registered service endpoint.
relative_slo_ms(float): Specify relative deadline in milliseconds for
queries fired using this handle. (Default: None)
absolute_slo_ms(float): Specify absolute deadline in milliseconds for
queries fired using this handle. (Default: None)
missing_ok (bool): If true, skip the check for the endpoint existence.
It can be useful when the endpoint has not been registered.
Returns:
RayServeHandle
"""
if not missing_ok:
assert endpoint_name in ray.get(
master_actor.get_all_endpoints.remote())
return RayServeHandle(
ray.get(master_actor.get_router.remote())[0],
endpoint_name,
relative_slo_ms,
absolute_slo_ms,
) | 9db603fb9f0069a328f3fce86c2b56eec719dd21 | 3,653,482 |
def create_symbolic_controller(states, inputs):
""""Returns a dictionary with keys that are the joint torque inputs and
the values are the controller expressions. This can be used to convert
the symbolic equations of motion from 0 = f(x', x, u, t) to a closed
loop form 0 = f(x', x, t).
Parameters
----------
states : sequence of len 2 * (n + 1)
The SymPy time dependent functions for the system states where n are
the number of links.
inputs : sequence of len n
The SymPy time depednent functions for the system joint torque
inputs (should not include the lateral force).
Returns
-------
controller_dict : dictionary
Maps joint torques to control expressions.
gain_symbols : list of SymPy Symbols
The symbols used in the gain matrix.
xeq : list of SymPy Symbols
The symbols for the equilibrium point.
"""
num_states = len(states)
num_inputs = len(inputs)
xeq = sym.Matrix([x.__class__.__name__ + '_eq' for x in states])
K = sym.Matrix(num_inputs, num_states, lambda i, j:
sym.Symbol('k_{}{}'.format(i, j)))
x = sym.Matrix(states)
T = sym.Matrix(inputs)
gain_symbols = [k for k in K]
# T = K * (xeq - x) -> 0 = T - K * (xeq - x)
controller_dict = sym.solve(T - K * (xeq - x), inputs)
return controller_dict, gain_symbols, xeq | 98d8cc545e6b70dce6161ef6c14d8bc12e0dfe77 | 3,653,483 |
def is_gene_name(instance):
"""This SHOULD check a webservice at HGNC/MGI for validation, but for now this just returns True always.."""
ignored(instance)
return True | a8a5b4047e8d0d8e70280f54365adf7a5eec20ee | 3,653,484 |
import re
def install_package_family(pkg):
"""
:param: pkg ie asr900rsp2-universal.03.13.03.S.154-3.S3-ext.bin
:return: device_type of the installed image ie asr900
"""
img_dev = None
m = re.search(r'(asr\d+)\w*', pkg)
if m:
img_dev = m.group(1)
return img_dev | b344d51ae426e167dbd2397ab93cbf8707b01496 | 3,653,485 |
def get_dendritic_mask_path_from_sessid(maindir, sessid, runtype="prod",
check=True):
"""
get_dendritic_mask_path_from_sessid(maindir, sessid)
Returns path to dendritic mask file for the specified session.
Required args:
- maindir (str): main directory
- sessid (int) : session ID
Optional args:
- runtype (str) : "prod" (production) or "pilot" data
default: "prod"
- check (bool) : if True, checks whether the files in the output
dictionary exist
default: True
Returns:
- maskfile (str): full path name of the extract masks hdf5 file
"""
sessdir, mouse_dir = get_sess_dir_path(maindir, sessid, runtype)
mouseid = get_mouseid(sessdir, mouse_dir)
expid = get_expid(sessdir)
maskfile = get_dendritic_mask_path(
maindir, sessid, expid, mouseid, runtype, mouse_dir, check)
return maskfile | 3dafdc661f933f93fdfdfa9d7279649ce0d08b01 | 3,653,486 |
def abbn_min_vol():
"""
Real Name: b'"Ab-bn min vol"'
Original Eqn: b'25.6'
Units: b''
Limits: (None, None)
Type: constant
b''
"""
return 25.6 | 9fdde32cf832354b9bda9fe23ab000da66205d60 | 3,653,487 |
def clear(self: Client, player: str = None, item_name: str = None,
data: int = None, max_count: int = None) -> str:
"""Clears items from player inventory, including
items being dragged by the player.
Bedrock Edition implementation.
"""
return self.run('clear', player, item_name, data, max_count) | 3b7975b80f08c1f44c1a49b0a973586859f949bf | 3,653,488 |
def load_glove_embeddings(dim, vocab):
"""
Load GloVe embedding vectors for all words in our vocabulary.
https://machinelearningmastery.com/use-word-embedding-layers-deep-learning-keras/
Parameters
----------
dim : int
Dimension of GloVe embeddings. Can be 50, 100, 200 and 300.
vocab : dict
Dictionary mapping words to index.
Returns
-------
embeddings_index : dict
A dictionary that maps word to embedding vector.
"""
embeddings_index = dict()
lower_dict = [word.lower() for word in vocab.keys()]
with open('glove.6B/glove.6B.'+str(dim)+'d.txt', 'r', encoding="utf-8") as f:
for line in f:
values = line.split()
word = values[0]
coefs = asarray(values[1:], dtype='float32')
# use only low case? GloVe seems to use only low case, but what about NER?
if word in vocab:
embeddings_index[vocab[word]] = coefs
# maybe Word get same embedding as word?
elif word in lower_dict:
try:
embeddings_index[vocab[word.title()]] = coefs
except KeyError:
continue
return embeddings_index | 63bf52b86efbb20ade43d144fd674bebd8111901 | 3,653,489 |
def check_vat_number(vat_number, country_code=None):
"""Check if a VAT number is valid.
If possible, the VAT number will be checked against available registries.
:param vat_number: VAT number to validate.
:param country_code:
Optional country code. Should be supplied if known, as there is no
guarantee that naively entered VAT numbers contain the correct alpha-2
country code prefix for EU countries just as not all non-EU countries
have a reliable country code prefix. Default ``None`` prompting
detection.
:returns:
a :class:`VatNumberCheckResult` instance containing the result for
the full VAT number check.
"""
# Decompose the VAT number.
vat_number, country_code = decompose_vat_number(vat_number, country_code)
if not vat_number or not country_code:
return VatNumberCheckResult(False, [
'> Unable to decompose VAT number, resulted in %r and %r' %
(vat_number, country_code)
])
# Test the VAT number format.
format_result = is_vat_number_format_valid(vat_number, country_code)
if format_result is not True:
return VatNumberCheckResult(format_result, [
'> VAT number validation failed: %r' % (format_result)
])
# Attempt to check the VAT number against a registry.
if country_code not in VAT_REGISTRIES:
return VatNumberCheckResult()
return VAT_REGISTRIES[country_code].check_vat_number(vat_number,
country_code) | 142a2dce1def90beed2a222b67f47e9458f97ea0 | 3,653,490 |
def argextrema(y, separate=True):
"""
Deprecated in favor of argrel{min|max} in scypy.signal to get separate
extrema in about the same CPU time.
If you need a list of
all relative extrema in order, using this with separate=False takes about
half the time as by combining the scipy
functions with searchsorted.
Returns the indices of the local extrema of a series. When consecutive
points at an extreme have the same value, the index of the first is
returned.
"""
delta = y[1:] - y[:-1]
pos_neg = np.zeros(len(delta), np.int8)
pos_neg[delta > 0] = 1
pos_neg[delta < 0] = -1
curve_sign = pos_neg[1:] - pos_neg[:-1]
if separate:
argmax = np.nonzero(curve_sign < 0)[0] + 1
argmin = np.nonzero(curve_sign > 0)[0] + 1
return argmin,argmax
else:
argext = np.nonzero(curve_sign != 0)[0] + 1
return argext | 709c045d608c35c3af5ca29131da8629716a07d5 | 3,653,491 |
from typing import Union
def examine_normal_mode(r_mol: RDKitMol,
p_mol: RDKitMol,
ts_xyz: np.array,
disp: np.array,
amplitude: Union[float, list] = 0.25,
weights: Union[bool, np.array] = True,
verbose: bool = True,
as_factors: bool = True):
"""
Examine a TS's imaginary frequency given a known reactant complex and a
product complex. The function checks if the bond changes are corresponding
to the most significant change in the normal mode. The reactant and product
complex need to be atom mapped.
Args:
r_mol ('RDKitMol'): the reactant complex.
p_mol ('RDKitMol'): the product complex.
ts_xyz (np.array): The xyz coordinates of the transition state. It should have a
size of N x 3.
disp (np.array): The displacement of the normal mode. It should have a size of
N x 3.
amplitude (float): The amplitude of the motion. Defaults to 0.25.
weights (bool or np.array): If ``True``, use the sqrt(atom mass) as a scaling factor to the displacement.
If ``False``, use the identity weights. If a N x 1 ``np.array` is provided, then
The concern is that light atoms (e.g., H) tend to have larger motions
than heavier atoms.
verbose (bool): If print detailed information. Defaults to ``True``.
as_factors (bool): If return the value of factors instead of a judgment.
Defaults to ``False``
Returns:
- bool: ``True`` for pass the examination, ``False`` otherwise.
- list: If `as_factors == True`, two factors will be returned.
"""
# Analyze connectivity
broken, formed, changed = get_all_changing_bonds(r_mol, p_mol)
reacting_bonds = broken + formed + changed
# Generate weights
if isinstance(weights, bool) and weights:
atom_masses = np.array(r_mol.GetAtomMasses()).reshape(-1, 1)
weights = np.sqrt(atom_masses)
elif isinstance(weights, bool) and not weights:
weights = np.ones((ts_xyz.shape[0], 1))
# Generate conformer instance according to the displacement
xyzs = ts_xyz - amplitude * disp * weights, ts_xyz + amplitude * disp * weights
r_copy = r_mol.Copy(); r_copy.SetPositions(xyzs[0])
p_copy = p_mol.Copy(); p_copy.SetPositions(xyzs[1])
r_conf, p_conf = r_copy.GetConformer(), p_copy.GetConformer()
# Calculate bond distance change
formed_and_broken_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond))
for bond in broken + formed]
changed_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond))
for bond in changed]
other_bonds_diff = [abs(r_conf.GetBondLength(bond) - p_conf.GetBondLength(bond))
for bond in r_copy.GetBondsAsTuples() if bond not in reacting_bonds]
# We expect bonds that are formed or broken in the reaction
# have relatively large changes; For bonds that change their bond order
# in the reaction may have a smaller factor.
# In this function, we only use the larger factor as a check.
# The smaller factor is less deterministic, considering the change in
# other bonds due to the change of atom hybridization or bond conjugation.
baseline = np.max(other_bonds_diff)
std = np.std(other_bonds_diff)
larger_factor = (np.min(formed_and_broken_diff) - baseline) / std
if changed_diff:
# There might be no bond that only changes its order
smaller_factor = (np.min(changed_diff) - baseline) / std
else:
smaller_factor = 0
if verbose:
print(f'The min. bond distance change for bonds that are broken or formed'
f' is {np.min(formed_and_broken_diff)} A and is {larger_factor:.1f} STD off the baseline.')
if changed_diff:
print(f'The min. bond distance change for bonds that are changed'
f' is {np.min(changed_diff)} A and is {smaller_factor:.1f} STD off the baseline.')
if as_factors:
return larger_factor, smaller_factor
if larger_factor > 3:
return True
return False | 96fc2f4153dd231756a88e46ee608a0f54d6dabc | 3,653,492 |
def generate_sprites(factor_dist, num_sprites=1):
"""Create callable that samples sprites from a factor distribution.
Args:
factor_dist: The factor distribution from which to sample. Should be an
instance of factor_distributions.AbstractDistribution.
num_sprites: Int or callable returning int. Number of sprites to generate
per call.
Returns:
_generate: Callable that returns a list of Sprites.
"""
def _generate():
n = num_sprites() if callable(num_sprites) else num_sprites
sprites = [sprite.Sprite(**factor_dist.sample()) for _ in range(n)]
return sprites
return _generate | 8c09b3fe9916d0d8bc4094d62de3910de800f835 | 3,653,493 |
import warnings
def recode_from_index_mapper(meta, series, index_mapper, append):
"""
Convert a {value: logic} map to a {value: index} map.
This function takes a mapper of {key: logic} entries and resolves
the logic statements using the given meta/data to return a mapper
of {key: index}. The indexes returned can be used on data to isolate
the cases described by arbitrarily complex logical statements.
Parameters
----------
meta : dict
Quantipy meta document.
series : pandas.Series
The series in which the recoded data will be stored and
returned.
index_mapper : dict
A mapper of {key: index}
append : bool
Should the new recodd data be appended to items already found
in series? If False, data from series (where found) will
overwrite whatever was found for that item in ds1 instead.
Returns
-------
series : pandas.Series
The series in which the recoded data will be stored and
returned.
"""
qtype = meta['columns'][series.name]['type']
if qtype in ['delimited set']:
if series.dtype in ['int64', 'float64']:
not_null = series.notnull()
if len(not_null) > 0:
series.loc[not_null] = series.loc[not_null].map(str) + ';'
if index_mapper:
cols = [str(c) for c in sorted(index_mapper.keys())]
else:
vals = meta['columns'][series.name]['values']
codes = [c['value'] for c in vals]
cols = [str(c) for c in codes]
ds = pd.DataFrame(0, index=series.index, columns=cols)
for key, idx in index_mapper.iteritems():
ds[str(key)].loc[idx] = 1
ds2 = condense_dichotomous_set(ds)
org_name = series.name
series = join_delimited_set_series(series, ds2, append)
## Remove potential duplicate values
if series.dropna().empty:
warn_msg = 'Could not recode {}, found empty data column dependency!'.format(org_name)
warnings.warn(warn_msg)
return series
ds = series.str.get_dummies(';')
# Make sure columns are in numeric order
ds.columns = [int(float(c)) for c in ds.columns]
cols = sorted(ds.columns.tolist())
ds = ds[cols]
ds.columns = [str(i) for i in ds.columns]
# Reconstruct the dichotomous set
series = condense_dichotomous_set(ds)
elif qtype in ['single', 'int', 'float']:
for key, idx in index_mapper.iteritems():
series.loc[idx] = key
else:
raise TypeError(
"Can't recode '{col}'. Recoding for '{typ}' columns is not"
" yet supported.".format(col=series.name, typ=qtype)
)
return series | e8d2afc8536f552e2af277b60af47f8b8c07d961 | 3,653,494 |
def get_variables():
"""Loads ODAHU config as Robot variable
"""
return {'CONFIG': {var: getattr(config, var) for var in config.ALL_VARIABLES}} | 78ae110fdbe2837df00b06e47132b0ceda3648dd | 3,653,495 |
import string
def is_number(char: Text) -> bool:
"""Checks if char is number. Returns Boolean."""
return char in string.digits | 4bec510537057c8f6a48f35c6d0b6d9f300c00b7 | 3,653,496 |
def sliceData(data, slicebox=[None,None,None,None]):
"""
Sum 2d data along both axes and return 1d datasets
**Inputs**
data (sans2d) : data in
slicebox (range?:xy): region over which to integrate (in data coordinates)
**Returns**
xout (sans1d) : xslice
yout (sans1d) : yslice
2018-04-20 Brian Maranville
"""
if slicebox is None:
slicebox = [None, None, None, None]
xmin, xmax, ymin, ymax = slicebox
res = data.copy()
if data.qx is None or data.qy is None:
# then use pixels
xslice = slice(int(np.ceil(xmin)) if xmin is not None else None, int(np.floor(xmax)) if xmax is not None else None)
yslice = slice(int(np.ceil(ymin)) if ymin is not None else None, int(np.floor(ymax)) if ymax is not None else None)
x_in = np.arange(data.data.x.shape[0])
y_in = np.arange(data.data.x.shape[1])
x_out = x_in[xslice]
y_out = y_in[yslice]
dx = np.zeros_like(x_out)
dy = np.zeros_like(y_out)
else:
# then use q-values
qxmin = data.qx_min if data.qx_min is not None else data.qx.min()
qxmax = data.qx_max if data.qx_max is not None else data.qx.max()
qx_in = np.linspace(qxmin, qxmax, data.data.x.shape[0])
qymin = data.qy_min if data.qy_min is not None else data.qy.min()
qymax = data.qy_max if data.qy_max is not None else data.qy.max()
qy_in = np.linspace(qymin, qymax, data.data.x.shape[1])
xslice = slice(get_index(qx_in, xmin), get_index(qx_in, xmax))
yslice = slice(get_index(qy_in, ymin), get_index(qy_in, ymax))
x_out = qx_in[xslice]
y_out = qy_in[yslice]
dx = np.zeros_like(x_out)
dy = np.zeros_like(y_out)
dataslice = (xslice, yslice)
x_sum = uncertainty.sum(data.data[dataslice], axis=1)
y_sum = uncertainty.sum(data.data[dataslice], axis=0)
x_output = Sans1dData(x_out, x_sum.x, dx=dx, dv=x_sum.variance, xlabel=data.xlabel, vlabel="I",
xunits="", vunits="neutrons", metadata=data.metadata)
y_output = Sans1dData(y_out, y_sum.x, dx=dy, dv=y_sum.variance, xlabel=data.ylabel, vlabel="I",
xunits="", vunits="neutrons", metadata=data.metadata)
return x_output, y_output | 1d30a500a29c1803eb6982bb7442f9e328e3f245 | 3,653,497 |
def GetChangeUrl(host, change):
"""Given a Gerrit host name and change ID, returns a URL for the change."""
return '%s://%s/a/changes/%s' % (GERRIT_PROTOCOL, host, change) | 61ff03daa28b22ca88ab2b2f67ec18ab9617c691 | 3,653,498 |
from typing import List
import json
import os
def _ignored_jenkins_node_names() -> List[str]:
"""
Ignore nodes with these names
:return: Config list
"""
return json.loads(os.environ['IGNORED_JENKINS_NODE_NAMES']) | cec9685517cb1344bbf1ec7a6352e6727d7e80e2 | 3,653,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.