content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
import logging
def _GetSharedLibraryInHost(soname, sosize, dirs):
"""Find a shared library by name in a list of directories.
Args:
soname: library name (e.g. libfoo.so)
sosize: library file size to match.
dirs: list of directories to look for the corresponding file.
Returns:
host library path if found, or None
"""
for d in dirs:
host_so_file = os.path.join(d, os.path.basename(soname))
if not os.path.isfile(host_so_file):
continue
if os.path.getsize(host_so_file) != sosize:
continue
logging.debug("%s match to the one in APK" % host_so_file)
return host_so_file | 6ed492053f78fd76fdbb1deab5bd557d13e650de | 3,653,300 |
import os
def check_n_jobs(n_jobs: int) -> int:
"""Check `n_jobs` parameter according to the scikit-learn convention.
Parameters
----------
n_jobs : int, positive or -1
The number of jobs for parallelization.
Returns
-------
n_jobs : int
Checked number of jobs.
"""
# scikit-learn convention
# https://scikit-learn.org/stable/glossary.html#term-n-jobs
if n_jobs is None:
return 1
elif not is_int(n_jobs):
raise ValueError(f"`n_jobs` must be None or an integer, but found: {n_jobs}")
elif n_jobs < 0:
return os.cpu_count() - n_jobs + 1
else:
return n_jobs | a7eca51f4431eda45c574844ea6e4576de13b1fe | 3,653,301 |
def validateFilename(value):
"""
Validate filename.
"""
if 0 == len(value):
raise ValueError("Name of SimpleGridDB file must be specified.")
return value | b8b3c23772437c1ddca597c44c66b239955a26fb | 3,653,302 |
def readPNM(fd):
"""Reads the PNM file from the filehandle"""
t = noncomment(fd)
s = noncomment(fd)
m = noncomment(fd) if not (t.startswith('P1') or t.startswith('P4')) else '1'
data = fd.read()
ls = len(s.split())
if ls != 2 :
name = "<pipe>" if fd.name=="<fdopen>" else "Filename = {0}".format(fd.name)
raise IOError("Expected 2 elements from parsing PNM file, got {0}: {1}".format(ls, name))
xs, ys = s.split()
width = int(xs)
height = int(ys)
m = int(m)
if m != 255 :
print "Just want 8 bit pgms for now!"
d = fromstring(data,dtype=uint8)
d = reshape(d, (height,width) )
return (m,width,height, d) | c03633069b2b8f3302a8f28e03f4476ac7478055 | 3,653,303 |
def gdxfile(rawgdx):
"""A gdx.File fixture."""
return gdx.File(rawgdx) | 6138077fa959cecd4a7402fe3c7b6b7dee5d99f9 | 3,653,304 |
import typing
from typing import Union
from typing import Dict
from typing import Any
def AppBar(
absolute: bool = None,
app: bool = None,
attributes: dict = {},
bottom: bool = None,
children: list = [],
class_: str = None,
clipped_left: bool = None,
clipped_right: bool = None,
collapse: bool = None,
collapse_on_scroll: bool = None,
color: str = None,
dark: bool = None,
dense: bool = None,
elevate_on_scroll: bool = None,
elevation: typing.Union[float, str] = None,
extended: bool = None,
extension_height: typing.Union[float, str] = None,
fade_img_on_scroll: bool = None,
fixed: bool = None,
flat: bool = None,
floating: bool = None,
height: typing.Union[float, str] = None,
hide_on_scroll: bool = None,
inverted_scroll: bool = None,
layout: Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]] = {},
light: bool = None,
max_height: typing.Union[float, str] = None,
max_width: typing.Union[float, str] = None,
min_height: typing.Union[float, str] = None,
min_width: typing.Union[float, str] = None,
prominent: bool = None,
scroll_off_screen: bool = None,
scroll_target: str = None,
scroll_threshold: typing.Union[str, float] = None,
short: bool = None,
shrink_on_scroll: bool = None,
slot: str = None,
src: typing.Union[str, dict] = None,
style_: str = None,
tag: str = None,
tile: bool = None,
v_model: Any = "!!disabled!!",
v_on: str = None,
v_slots: list = [],
value: bool = None,
width: typing.Union[float, str] = None,
on_absolute: typing.Callable[[bool], Any] = None,
on_app: typing.Callable[[bool], Any] = None,
on_attributes: typing.Callable[[dict], Any] = None,
on_bottom: typing.Callable[[bool], Any] = None,
on_children: typing.Callable[[list], Any] = None,
on_class_: typing.Callable[[str], Any] = None,
on_clipped_left: typing.Callable[[bool], Any] = None,
on_clipped_right: typing.Callable[[bool], Any] = None,
on_collapse: typing.Callable[[bool], Any] = None,
on_collapse_on_scroll: typing.Callable[[bool], Any] = None,
on_color: typing.Callable[[str], Any] = None,
on_dark: typing.Callable[[bool], Any] = None,
on_dense: typing.Callable[[bool], Any] = None,
on_elevate_on_scroll: typing.Callable[[bool], Any] = None,
on_elevation: typing.Callable[[typing.Union[float, str]], Any] = None,
on_extended: typing.Callable[[bool], Any] = None,
on_extension_height: typing.Callable[[typing.Union[float, str]], Any] = None,
on_fade_img_on_scroll: typing.Callable[[bool], Any] = None,
on_fixed: typing.Callable[[bool], Any] = None,
on_flat: typing.Callable[[bool], Any] = None,
on_floating: typing.Callable[[bool], Any] = None,
on_height: typing.Callable[[typing.Union[float, str]], Any] = None,
on_hide_on_scroll: typing.Callable[[bool], Any] = None,
on_inverted_scroll: typing.Callable[[bool], Any] = None,
on_layout: typing.Callable[[Union[Dict[str, Any], Element[ipywidgets.widgets.widget_layout.Layout]]], Any] = None,
on_light: typing.Callable[[bool], Any] = None,
on_max_height: typing.Callable[[typing.Union[float, str]], Any] = None,
on_max_width: typing.Callable[[typing.Union[float, str]], Any] = None,
on_min_height: typing.Callable[[typing.Union[float, str]], Any] = None,
on_min_width: typing.Callable[[typing.Union[float, str]], Any] = None,
on_prominent: typing.Callable[[bool], Any] = None,
on_scroll_off_screen: typing.Callable[[bool], Any] = None,
on_scroll_target: typing.Callable[[str], Any] = None,
on_scroll_threshold: typing.Callable[[typing.Union[str, float]], Any] = None,
on_short: typing.Callable[[bool], Any] = None,
on_shrink_on_scroll: typing.Callable[[bool], Any] = None,
on_slot: typing.Callable[[str], Any] = None,
on_src: typing.Callable[[typing.Union[str, dict]], Any] = None,
on_style_: typing.Callable[[str], Any] = None,
on_tag: typing.Callable[[str], Any] = None,
on_tile: typing.Callable[[bool], Any] = None,
on_v_model: typing.Callable[[Any], Any] = None,
on_v_on: typing.Callable[[str], Any] = None,
on_v_slots: typing.Callable[[list], Any] = None,
on_value: typing.Callable[[bool], Any] = None,
on_width: typing.Callable[[typing.Union[float, str]], Any] = None,
) -> Element[ipyvuetify.generated.AppBar]:
""" """
kwargs: Dict[Any, Any] = without_default(AppBar, locals())
if isinstance(kwargs.get("layout"), dict):
kwargs["layout"] = w.Layout(**kwargs["layout"])
widget_cls = ipyvuetify.generated.AppBar
comp = react.core.ComponentWidget(widget=widget_cls)
return Element(comp, **kwargs) | 51de728b0d2935161bd040248d94b3d15aba5d16 | 3,653,305 |
import os
def check_file(filename):
"""Check if "filename" exists and is a file.
Returns:
True if file exists and is a file.
False if filename==None or is not a file.
"""
file_ok = True
error_mssg = ""
if(filename == None):
error_mssg = "Error: file is missing."
file_ok = False
else:
if not os.path.isfile(filename):
error_mssg = "Error: '"+str(filename)+"' is not a file."
file_ok = False
return file_ok, error_mssg | 4a9e9284648c5d6222a44f1156b198f6e64dd409 | 3,653,306 |
def conjoin(*funcs):
"""
Creates a function that composes multiple predicate functions into a single predicate that tests
whether **all** elements of an object pass each predicate.
Args:
*funcs (callable): Function(s) to conjoin.
Returns:
Conjoin: Function(s) wrapped in a :class:`Conjoin` context.
Example:
>>> conjoiner = conjoin(lambda x: isinstance(x, int), lambda x: x > 3)
>>> conjoiner([1, 2, 3])
False
>>> conjoiner([1.0, 2, 1])
False
>>> conjoiner([4.0, 5, 6])
False
>>> conjoiner([4, 5, 6])
True
.. versionadded:: 2.0.0
"""
return Conjoin(*funcs) | 835c2962bcc3a2c3dcf0bf19649221aebb73b63b | 3,653,307 |
import hashlib
def calculate_file_sha256(file_path):
"""calculate file sha256 hash code."""
with open(file_path, 'rb') as fp:
sha256_cal = hashlib.sha256()
sha256_cal.update(fp.read())
return sha256_cal.hexdigest() | bfa7a43516e51a80ccd63ea3ace6be6e5e9dd2c0 | 3,653,308 |
from collections import OrderedDict
import warnings
def select_columns_by_feature_type(df, unique_value_to_total_value_ratio_threshold=.05, text_unique_threshold=.9,
exclude_strings = True, return_dict = False, return_type='categoric'):
""" Determine if a column fits into one of the following types: numeric, categoric, datetime, text.
set return_type to one of these return_types to return a list of the column names associated.
Determination is made based on if a column in the dataframe is continous based on a ratio
between the number of unique values in a column and the total number of values
Low cardinality values will get cut off if above the specified ratio.
Optionally specify return_dict to return a dictionary where values are column names
and values are boolean True if categoric and false if continouous
Default ratio threshold is .05
'exclude_strings' is True by default (i.e. if a column has string values it will be marked
as a categoric column). If looking for columns that may be numeric/continuous but
first need to be processed, this can be set to False.
Parameters
----------
df : Pandas DataFrame
A DataFrame to search columns within
unique_value_to_total_value_ratio_threshold : float
The maximum ratio of unique values in a column / total observations. Akin to a cardinality ratio.
Default is .05, or that anyting with more than 5% of its values being unique will be considered
non-categoric.
exclude_strings : Boolean
Flag to include all columns with any string values as categoric columns. Default is True.
return_dict: Boolean
Flag to return a dictionary of the form {column: Categoric_Boolean} where the value is True if a column
is categoric. Default is False
return_categoric: Boolean
Flag to return a list of the categoric columns. Default is True.
return_numeric: Boolean
Flag to return a list of the continuous columns. Default is False
Returns
-------
Dict/List
A list of the column names that are categoric/continuous OR a dictionary with keys of column names and
values True if categoric
"""
if return_type not in ['categoric', 'numeric', 'text', 'datetime']:
warnings.warn("'return_type' must be one of: ['categoric', 'numeric', 'text', 'datetime']")
likely_categoric = OrderedDict()
for column in df.columns:
likely_categoric[column] = 1.*df[column].nunique()/df[column].count() < unique_value_to_total_value_ratio_threshold
# Check if any of the values in the column are strings.
if exclude_strings:
# If so, its value should be true to indicate it is categoric
if df[column].apply(type).eq(str).any():
likely_categoric[column] = True
likely_text = OrderedDict()
for column in df.columns:
# Check for unique pct above threshold and value is string
likely_text[column] = (1.*df[column].nunique()/df[column].count() > text_unique_threshold) #& isinstance(df[column].values[0], str)
likely_datetime = []
for dtype in [np.datetime64, 'datetime', 'datetime64', np.timedelta64, 'timedelta', 'timedelta64', 'datetimetz']:
# Add any datetime columns found to likely_datetime collection
time_cols = df.select_dtypes(include=dtype).columns.values.tolist()
# Append if not empty
if time_cols:
likely_datetime.append(time_cols)
likely_datetime = np.array(likely_datetime).flatten().tolist()
if return_dict:
return likely_categoric
if return_type == 'numeric':
numeric_cols = [col for col, value in likely_categoric.items() if (not value) & (col not in likely_datetime)]
return numeric_cols
elif return_type == 'categoric':
categoric_cols = [col for col, value in likely_categoric.items() if value]
return categoric_cols
elif return_type == 'text':
text_cols = [col for col, value in likely_text.items() if value]
return text_cols
elif return_type == 'datetime':
return likely_datetime
else:
print('Please specify valid return option') | 6335152405bc175805e8484ff23f58d4f6ce6f6a | 3,653,309 |
def _Counter_random(self, filter=None):
"""Return a single random elements from the Counter collection, weighted by count."""
return _Counter_randoms(self, 1, filter=filter)[0] | 95dc2ab7857b27a831b273af7dba143b8b791b27 | 3,653,310 |
def EnsureAndroidSdkPackagesInstalled(abi):
"""Return true if at least one package was not already installed."""
abiPackageList = SdkPackagesForAbi(abi)
installedSomething = False
packages = AndroidListSdk()
for package in abiPackageList:
installedSomething |= EnsureSdkPackageInstalled(packages, package)
return installedSomething | b43ee6094dc4cd8f71ec1319dbd5bd32d272b55a | 3,653,311 |
def dataframe_like(value, name, optional=False, strict=False):
"""
Convert to dataframe or raise if not dataframe_like
Parameters
----------
value : object
Value to verify
name : str
Variable name for exceptions
optional : bool
Flag indicating whether None is allowed
strict : bool
If True, then only allow dataframe. If False, allow types that support
casting to dataframe.
Returns
-------
converted : dataframe
value converted to a dataframe
"""
if optional and value is None:
return None
if not isinstance(value, dict) or (
strict and not (isinstance(value, pd.DataFrame))
):
extra_text = "If not None, " if optional else ""
strict_text = " or dataframe_like " if strict else ""
msg = "{0}{1} must be a dict{2}".format(extra_text, name, strict_text)
raise TypeError(msg)
return pd.DataFrame(value) | 016ffcda7050ac639d04522a666526753eb52a84 | 3,653,312 |
def pcaFunc(z, n_components=100):
"""
PCA
"""
pca = PCA(n_components=100)
pca_result = pca.fit_transform(z)
re = pd.DataFrame()
re['pca-one'] = pca_result[:, 0]
re['pca-two'] = pca_result[:, 1]
re['pca-three'] = pca_result[:, 2]
# Not print Now
# print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_))
return pca_result, re | 1dda1542a045eab69aab5488be2c754bde555311 | 3,653,313 |
def choose_optimizer(discriminator, generator, netD, netG, lr_d=2e-4, lr_g=2e-3):
"""
Set optimizers for discriminator and generator
:param discriminator: str, name
:param generator: str, name
:param netD:
:param netG:
:param lr_d:
:param lr_g:
:return: optimizerD, optimizerG
"""
if discriminator == 'Adam':
optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(0.5, 0.999))
elif discriminator == 'RMSprop':
optimizerD = optim.RMSprop(netD.parameters(), lr=lr_d)
elif discriminator == 'SGD':
optimizerD = optim.SGD(netD.parameters(), lr=lr_d, momentum=0.9)
elif discriminator == 'zoVIA':
optimizerD = zoVIA(netD, lr=lr_d)
elif discriminator == 'zoESVIA':
optimizerD = zoESVIA(netD, lr=lr_d)
elif discriminator == 'zoscESVIA':
optimizerD = zoscESVIA(netD, lr=lr_d)
if generator == 'Adam':
optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(0.5, 0.999))
elif generator == 'RMSprop':
optimizerG = optim.RMSprop(netG.parameters(), lr=lr_g)
elif generator == 'SGD':
optimizerG = optim.SGD(netG.parameters(), lr=lr_g, momentum=0.9)
elif generator == 'zoVIA':
optimizerG = zoVIA(netG, lr=lr_g)
elif generator == 'zoESVIA':
optimizerG = zoESVIA(netG, lr=lr_g)
elif generator == 'zoscESVIA':
optimizerG = zoscESVIA(netG, lr=lr_g)
print('Discriminator optimizer: {}, lr={}'.format(discriminator, lr_d))
print('Generator optimizer: {}, lr={}'.format(generator, lr_g))
return optimizerD, optimizerG | b3784d98c1743c10e3d1e9bca76288bd45c9c99e | 3,653,314 |
def prod(*args: int) -> int:
"""
This function is wrapped and documented in `_polymorphic.prod()`.
"""
prod_ = 1
for arg in args:
prod_ *= arg
return prod_ | eec30bf6339280173e0e2fa517558e6a452b9c37 | 3,653,315 |
def field_value(field):
"""
Returns the value for this BoundField, as rendered in widgets.
"""
if field.form.is_bound:
if isinstance(field.field, FileField) and field.data is None:
val = field.form.initial.get(field.name, field.field.initial)
else:
val = field.data
else:
val = field.form.initial.get(field.name, field.field.initial)
if callable(val):
val = val()
if val is None:
val = ''
return val | 5dc3792e0d6cd2cb6173c2479a024881f80a6d2b | 3,653,316 |
def distances(p):
"""Compute lengths of shortest paths between all nodes in Pharmacophore.
Args:
p (Pharmacophore): model to analyse
Returns:
dist (numpy array): array with distances between all nodes
"""
if not isinstance(p, Pharmacophore):
raise TypeError("Expected Pharmacophore, got %s instead" %
type(p).__name__)
dist = np.array(p.edges)
for i in range(p.numnodes):
for j in range(i):
if dist[i][j] == 0:
dist[i][j] = dist[j][i] = float("inf")
for i in range(len(dist)):
compute = False
for j in range(i):
if dist[i][j] == float("inf"):
compute = True
break
if compute:
queue = [k for k in range(p.numnodes)]
while queue:
queue.sort(key=lambda x: dist[i, x])
u = queue[0]
del queue[0]
for v in np.where(p.edges[u] > 0)[0]:
if v in queue:
alt = dist[i, u] + p.edges[u, v]
if alt < dist[i, v]:
dist[i, v] = dist[v, i] = alt
return dist | 40e77672ad9447ed4c7b69b14aadbc2f125cb499 | 3,653,317 |
def initial_data(logged_on_user, users_fixture, streams_fixture):
"""
Response from /register API request.
"""
return {
'full_name': logged_on_user['full_name'],
'email': logged_on_user['email'],
'user_id': logged_on_user['user_id'],
'realm_name': 'Test Organization Name',
'unsubscribed': [{
'audible_notifications': False,
'description': 'announce',
'stream_id': 7,
'is_old_stream': True,
'desktop_notifications': False,
'pin_to_top': False,
'stream_weekly_traffic': 0,
'invite_only': False,
'name': 'announce',
'push_notifications': False,
'email_address': '',
'color': '#bfd56f',
'in_home_view': True
}],
'result': 'success',
'queue_id': '1522420755:786',
'realm_users': users_fixture,
'cross_realm_bots': [{
'full_name': 'Notification Bot',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:29.035543+00:00',
'email': '[email protected]',
'user_id': 5,
'is_admin': False,
'avatar_url': 'dummy_avatar_url'
}, {
'full_name': 'Email Gateway',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:29.037658+00:00',
'email': '[email protected]',
'user_id': 6,
'is_admin': False,
'avatar_url': 'dummy_avatar_url'
}, {
'full_name': 'Welcome Bot',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:29.033231+00:00',
'email': '[email protected]',
'user_id': 4,
'is_admin': False,
'avatar_url': 'dummy_avatar_url'
}, {
'full_name': 'Zulip Feedback Bot',
'timezone': '',
'is_bot': True,
'date_joined': '2015-12-28T19:58:28.972281+00:00',
'email': '[email protected]',
'user_id': 1,
'is_admin': False,
'avatar_url': 'dummy_avatar_url'
}],
'subscriptions': streams_fixture,
'msg': '',
'max_message_id': 552761,
'never_subscribed': [{
'invite_only': False,
'description': 'Announcements from the Zulip GCI Mentors',
'stream_id': 87,
'name': 'GCI announce',
'is_old_stream': True,
'stream_weekly_traffic': 0
}, {
'invite_only': False,
'description': 'General discussion',
'stream_id': 74,
'name': 'GCI general',
'is_old_stream': True,
'stream_weekly_traffic': 0
}],
'unread_msgs': {
'pms': [{
'sender_id': 1,
'unread_message_ids': [1, 2]
}, {
'sender_id': 2,
'unread_message_ids': [3]
}],
'count': 0,
'mentions': [],
'streams': [{
'stream_id': 1000,
'topic': 'Some general unread topic',
'unread_message_ids': [4, 5, 6],
'sender_ids': [1, 2]
}, {
'stream_id': 99,
'topic': 'Some private unread topic',
'unread_message_ids': [7],
'sender_ids': [1, 2]
}],
'huddles': [{
'user_ids_string': '1001,11,12',
'unread_message_ids': [11, 12, 13]
}, {
'user_ids_string': '1001,11,12,13',
'unread_message_ids': [101, 102],
}]
},
'presences': {
'[email protected]': {
'ZulipElectron': {
'pushable': False,
'client': 'ZulipElectron',
'status': 'idle',
'timestamp': 1522484059
},
'ZulipMobile': {
'pushable': False,
'client': 'ZulipMobile',
'status': 'idle',
'timestamp': 1522384165
},
'aggregated': {
'timestamp': 1522484059,
'client': 'ZulipElectron',
'status': 'idle'
}
},
logged_on_user['email']: {
'website': {
'pushable': True,
'client': 'website',
'status': 'active',
'timestamp': 1522458138
},
'ZulipMobile': {
'pushable': True,
'client': 'ZulipMobile',
'status': 'active',
'timestamp': 1522480103
},
'aggregated': {
'timestamp': 1522480103,
'client': 'ZulipMobile',
'status': 'active'
}
}
},
'twenty_four_hour_time': True,
'last_event_id': -1,
'muted_topics': [],
'realm_user_groups': [],
# Deliberately use hard-coded zulip version and feature level to avoid
# adding extra tests unnecessarily.
'zulip_version': MINIMUM_SUPPORTED_SERVER_VERSION[0],
'zulip_feature_level': MINIMUM_SUPPORTED_SERVER_VERSION[1],
} | b85eafbf359a6c34decc866f4d1fbb494ac907f8 | 3,653,318 |
def affine_relu_backward(dout, cache):
"""
Backward pass for the affine-relu convenience layer
"""
fc_cache, relu_cache = cache
da = relu_backward(dout, relu_cache)
dx, dw, db = affine_backward(da, fc_cache)
return dx, dw, db | 201f37d4d6ac9e170a52766f41d892527681a3d1 | 3,653,319 |
from typing import List
def create_initial_population() -> List[Image]:
"""
Create population at step 0
"""
return [random_image() for _ in range(POP_SIZE)] | 895632869962014695382e34961f6e6636619fbe | 3,653,320 |
from typing import Any
def adapt(value: Any, pg_type: str) -> Any:
"""
Coerces a value with a PG type into its Python equivalent.
:param value: Value
:param pg_type: Postgres datatype
:return: Coerced value.
"""
if value is None:
return None
if pg_type in _TYPE_MAP:
return _TYPE_MAP[pg_type](value)
return value | f040cd6fbf5aa8a396efa36879b83e13b5d89da7 | 3,653,321 |
import uuid
from datetime import datetime
def createPREMISEventXML(eventType, agentIdentifier, eventDetail, eventOutcome,
outcomeDetail=None, eventIdentifier=None,
linkObjectList=[], eventDate=None):
"""
Actually create our PREMIS Event XML
"""
eventXML = etree.Element(PREMIS + "event", nsmap=PREMIS_NSMAP)
eventIDXML = etree.SubElement(eventXML, PREMIS + "eventIdentifier")
eventTypeXML = etree.SubElement(eventXML, PREMIS + "eventType")
eventTypeXML.text = eventType
eventIDTypeXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierType"
)
eventIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#UUID"
eventIDValueXML = etree.SubElement(
eventIDXML, PREMIS + "eventIdentifierValue"
)
if eventIdentifier:
eventIDValueXML.text = eventIdentifier
else:
eventIDValueXML.text = uuid.uuid4().hex
eventDateTimeXML = etree.SubElement(eventXML, PREMIS + "eventDateTime")
if eventDate is None:
eventDateTimeXML.text = xsDateTime_format(datetime.utcnow())
else:
eventDateTimeXML.text = xsDateTime_format(eventDate)
eventDetailXML = etree.SubElement(eventXML, PREMIS + "eventDetail")
eventDetailXML.text = eventDetail
eventOutcomeInfoXML = etree.SubElement(
eventXML, PREMIS + "eventOutcomeInformation"
)
eventOutcomeXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcome"
)
eventOutcomeXML.text = eventOutcome
if outcomeDetail:
eventOutcomeDetailXML = etree.SubElement(
eventOutcomeInfoXML, PREMIS + "eventOutcomeDetail"
)
eventOutcomeDetailNoteXML = etree.SubElement(
eventOutcomeDetailXML, PREMIS + "eventOutcomeDetailNote"
)
eventOutcomeDetailNoteXML.text = outcomeDetail
# Assuming it's a list of 3-item tuples here [ ( identifier, type, role) ]
linkAgentIDXML = etree.SubElement(
eventXML, PREMIS + "linkingAgentIdentifier")
linkAgentIDTypeXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierType"
)
linkAgentIDTypeXML.text = \
"http://purl.org/net/untl/vocabularies/identifier-qualifiers/#URL"
linkAgentIDValueXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentIdentifierValue"
)
linkAgentIDValueXML.text = agentIdentifier
linkAgentIDRoleXML = etree.SubElement(
linkAgentIDXML, PREMIS + "linkingAgentRole"
)
linkAgentIDRoleXML.text = \
"http://purl.org/net/untl/vocabularies/linkingAgentRoles/#executingProgram"
for linkObject in linkObjectList:
linkObjectIDXML = etree.SubElement(
eventXML, PREMIS + "linkingObjectIdentifier"
)
linkObjectIDTypeXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierType"
)
linkObjectIDTypeXML.text = linkObject[1]
linkObjectIDValueXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectIdentifierValue"
)
linkObjectIDValueXML.text = linkObject[0]
if linkObject[2]:
linkObjectRoleXML = etree.SubElement(
linkObjectIDXML, PREMIS + "linkingObjectRole"
)
linkObjectRoleXML.text = linkObject[2]
return eventXML | 25836d6cd4b40ad672ca3438ba3583cd147a52bb | 3,653,322 |
def get_primary_key(conn, table, columns):
""" attempts to reverse lookup the primary key by querying the table using the first column
and iteratively adding the columns that comes after it until the query returns a
unique row in the table.
:param
conn: an SQLite connection object
table: a string denoting the table name to query
columns: a list containing column names of the table
:return: the list of columns which makes up the primary key
"""
select_row_query = "SELECT * FROM `{}`".format(table)
count_row_query = "SELECT COUNT(*) FROM `{}` WHERE `{}`"
primary_key = []
row = conn.execute(select_row_query).fetchone()
if row is not None:
for i, column in enumerate(columns):
if i == 0:
count_row_query = count_row_query.format(table, column)
else:
count_row_query += " AND `{}`".format(column)
count_row_query += append_eql_condition(row[i])
primary_key.append(column)
count = conn.execute(count_row_query).fetchone()
if count[0] == 1:
return primary_key
# if no primary key was found then the primary key is made up of all columns
return columns | 3b74f85214e89af322fd1da1e6c8de1eba4f4ca7 | 3,653,323 |
def redirect_to_docs():
"""Redirect to API docs when at site root"""
return RedirectResponse('/redoc') | f284167e238845651eedaf3bcc1b85e64979df6a | 3,653,324 |
def init_neighbours(key):
"""
Sets then neighbouring nodes and initializes the edge count to the neighbours to 1
:param key: str - key of node to which we are searching the neighbours
:return: dictionary of neighbours with corresponding edge count
"""
neighbours = {}
neighbouring_nodes = graph[key]
for node in neighbouring_nodes:
if neighbouring_nodes[node] == {}:
neighbours[node] = 1
else:
neighbours[node] = neighbouring_nodes[node]
return neighbours | 6fa49ffa75051eeca9bd1714ec3e4817ef429bad | 3,653,325 |
def computeNumericalGradient(J, theta):
""" Compute numgrad = computeNumericalGradient(J, theta)
theta: a matrix of parameters
J: a function that outputs a real-number and the gradient.
Calling y = J(theta)[0] will return the function value at theta.
"""
# Initialize numgrad with zeros
numgrad = np.zeros(theta.shape)
## ---------- YOUR CODE HERE --------------------------------------
# Instructions:
# Implement numerical gradient checking, and return the result in numgrad.
# You should write code so that numgrad[i][j] is (the numerical approximation to) the
# partial derivative of J with respect to theta[i][j], evaluated at theta.
# I.e., numgrad[i][j] should be the (approximately) partial derivative of J with
# respect to theta[i][j].
#
# Hint: You will probably want to compute the elements of numgrad one at a time.
# Set Epsilon
epsilon = 0.0001
# Outer for loop to check across the x-axis
for i in range(theta.shape[0]):
# Inner for loop to check across the y-axis
for j in range(theta.shape[1]):
# Copy current theta value to min
theta_min = theta.copy()
# Subtract min point by epsilon and store
theta_min[i,j] = theta_min[i,j] - epsilon
# Not sure
cost_min, dW, db = J(theta_min)
# Copy current theta for max
theta_max = theta.copy()
# Add max point by epsilon and store
theta_max[i,j] = theta_max[i,j] + epsilon
# ?
cost_max, dW, db = J(theta_max)
# Final Result for gradient k
numgrad[i][j] = (cost_max - cost_min) / (2 * epsilon)
## ---------------------------------------------------------------
return numgrad | 2d4e4ed190bbb0c5507ecb896c13d33fcd7aa1b5 | 3,653,326 |
def get_error_msg(handle):
"""
Get the latest and greatest DTrace error.
"""
txt = LIBRARY.dtrace_errmsg(handle, LIBRARY.dtrace_errno(handle))
return c_char_p(txt).value | 73d945367e3003beb29505852004f0c71b205873 | 3,653,327 |
def sigma_hat(frequency, sigma, epsilon=epsilon_0, quasistatic=False):
"""
conductivity with displacement current contribution
.. math::
\hat{\sigma} = \sigma + i \omega \\varepsilon
**Required**
:param (float, numpy.array) frequency: frequency (Hz)
:param float sigma: electrical conductivity (S/m)
**Optional**
:param float epsilon: dielectric permittivity. Default :math:`\\varepsilon_0`
:param bool quasistatic: use the quasi-static assumption? Default: False
"""
if quasistatic is True:
return sigma
return sigma + 1j*omega(frequency)*epsilon | 17aee0f33ba8786934d750e37e1afd0617e8aa1d | 3,653,328 |
def encode_list(key, list_):
# type: (str, Iterable) -> Dict[str, str]
"""
Converts a list into a space-separated string and puts it in a dictionary
:param key: Dictionary key to store the list
:param list_: A list of objects
:return: A dictionary key->string or an empty dictionary
"""
if not list_:
return {}
return {key: " ".join(str(i) for i in list_)} | 6cde65017d20e777e27ac86d7f8eb1d025d04947 | 3,653,329 |
async def delete_relationship(request: web.Request):
"""
Remove relationships of resource.
Uses the :meth:`~aiohttp_json_api.schema.BaseSchema.delete_relationship`
method of the schema to update the relationship.
:seealso: http://jsonapi.org/format/#crud-updating-relationships
"""
relation_name = request.match_info['relation']
ctx = JSONAPIContext(request)
relation_field = ctx.schema.get_relationship_field(relation_name,
source_parameter='URI')
resource_id = request.match_info.get('id')
validate_uri_resource_id(ctx.schema, resource_id)
pagination = None
if relation_field.relation is Relation.TO_MANY:
pagination_type = relation_field.pagination
if pagination_type:
pagination = pagination_type(request)
data = await request.json()
sp = JSONPointer('')
field = ctx.schema.get_relationship_field(relation_name)
if field.relation is not Relation.TO_MANY:
raise RuntimeError('Wrong relationship field.'
'Relation to-many is required.')
await ctx.schema.pre_validate_field(field, data, sp)
deserialized_data = field.deserialize(ctx.schema, data, sp)
resource = await ctx.controller.fetch_resource(resource_id)
old_resource, new_resource = \
await ctx.controller.remove_relationship(field, resource,
deserialized_data, sp)
if old_resource == new_resource:
return web.HTTPNoContent()
result = ctx.schema.serialize_relationship(relation_name, new_resource,
pagination=pagination)
return jsonapi_response(result) | 6397ebab365b9339dca7692b4188945401d54779 | 3,653,330 |
def cost_efficiency(radius, height, cost):
"""Compute and return the cost efficiency of a steel can size.
The cost efficiency is the volume of the can divided by its cost.
Parameters
radius: the radius of the steel can
height: the height of the steel can
cost: the cost of the steel can
Return: the cost efficiency of the steel can
"""
volume = cylinder_volume(radius, height)
efficiency = volume / cost
return efficiency | e21f767676d5a1e9e5d97ba8bd8f943ecaad5060 | 3,653,331 |
def process_response():
"""
Outer scope for processing the response to a request via the '/response' endpoint. Ensure all data is present,
request exists in Pending table and then change case status and notify app about the response via webhook.
:return: status code, message
TODO set up
TODO Place response data into a Response object, call the Response Handler
"""
print('---\nRESPONSE')
for key, value in request.args.items():
print(key + ':', value, type(value))
print('---')
return jsonify({'status': 200}) | c2cef76031b2f7396d504eb1c10769bb37b145e0 | 3,653,332 |
def cb_xmlrpc_register(args):
"""
Register as a pyblosxom XML-RPC plugin
"""
args['methods'].update({'pingback.ping': pingback})
return args | e9f5cdde32d1a7b3145918d4fadfc80f4de7301f | 3,653,333 |
def try_except(method):
"""
A decorator method to catch Exceptions
:param:
- `func`: A function to call
"""
def wrapped(self, *args, **kwargs):
try:
return method(self, *args, **kwargs)
except self.error as error:
log_error(error, self.logger, self.error_message)
if hasattr(self, 'close'):
self.close()
return wrapped | 069c5abd6a2f2dcab8424c829f1dae27e8a294b8 | 3,653,334 |
def sosfilter_double_c(signal, sos, states=None):
"""Second order section filter function using cffi, double precision.
signal_out, states = sosfilter_c(signal_in, sos, states=None)
Parameters
----------
signal : ndarray
Signal array of shape (N x 0).
sos : ndarray
Second order section coefficients array of shape (K*6 x 0).
One biquad -> 6 coefficients:
``[b00, b01, b02, a00, a01, a02, ..., b10, bK1 ... , aK2]``
states : ndarray
Filter states, initial value can be None.
Returns
-------
signal :
Filtered signal array of shape (N x 0).
states : ndarray
Filter states, initial value can be None.
"""
signal_c = ffi.new(
'char[]', np.array(signal, dtype=np.double).flatten().tostring())
sos_c = ffi.new(
'char[]', np.array(sos, dtype=np.double).flatten().tostring())
nsamp = int(len(signal))
ksos = int(sos.size/6)
if isinstance(states, type(None)):
states = np.zeros(ksos*2).astype(np.double)
states_c = ffi.new(
'char[]', np.array(states, dtype=np.double).flatten().tostring())
_c.sosfilter_double(ffi.cast("double*", signal_c),
nsamp,
ffi.cast("double*", sos_c),
ksos,
ffi.cast("double*", states_c))
out = np.fromstring(
ffi.buffer(signal_c),
dtype=np.double,
count=nsamp)
states = np.fromstring(
ffi.buffer(states_c),
dtype=np.double,
count=len(states))
return out, states | 387d921f86ec6bc9c814d0ca757b36f803d122af | 3,653,335 |
import logging
import yaml
import sys
import os
def setup_logging(name, default_path='graphy/logging.yaml', default_level=logging.INFO):
""" Setup logging configuration """
path = files.get_absolute_path(default_path, from_project=True)
try:
with open(path, 'r') as f:
config = yaml.safe_load(f.read())
logging.config.dictConfig(config)
coloredlogs.install()
except Exception:
exc_type, exc_obj, exc_tb = sys.exc_info()
file_name = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]
print(exc_type, file_name, exc_tb.tb_lineno)
logging.basicConfig(level=default_level)
coloredlogs.install(level=default_level)
return logging.getLogger(name) | c6114284982244e29792866bbff52591d0787597 | 3,653,336 |
import logging
def node_exporter_check():
"""
Checks existence & health of node exporter pods
"""
kube = kube_api()
namespaces = kube.list_namespace()
ns_names = []
for nspace in namespaces.items:
ns_names.append(nspace.metadata.name)
result = {'category': 'observability',
'case_name': 'node_exporter_check',
'criteria': 'pass',
'details': []
}
status = []
flag = False
logger = logging.getLogger(__name__)
if 'monitoring' in ns_names:
pod_list = kube.list_namespaced_pod('monitoring', watch=False)
pods = pod_list.items
for pod in pods:
if 'node-exporter' in pod.metadata.name:
pod_stats = pod_status(logger, pod)
if pod_stats['criteria'] == 'fail':
pod_stats['logs'] = get_logs(kube, pod)
result['criteria'] = 'fail'
status.append(pod.metadata.name)
status.append(pod_stats)
flag = True
else:
for nspace in namespaces.items:
pod_list = kube.list_namespaced_pod(nspace.metadata.name, watch=False)
pods = pod_list.items
for pod in pods:
if 'node-exporter' in pod.metadata.name:
pod_stats = pod_status(logger, pod)
if pod_stats['criteria'] == 'fail':
pod_stats['logs'] = get_logs(kube, pod)
result['criteria'] = 'fail'
status.append(pod.metadata.name)
status.append(pod_stats)
flag = True
if flag is False:
result['criteria'] = 'fail'
result['details'].append(status)
store_result(logger, result)
return result | 25a1c23107654a6b561d54ffce08aa6025ae1d2e | 3,653,337 |
import time
import os
import json
def create(cmd, resource_group_name=None, workspace_name=None, location=None, storage_account=None, skip_role_assignment=False, provider_sku_list=None):
"""
Create a new Azure Quantum workspace.
"""
client = cf_workspaces(cmd.cli_ctx)
if not workspace_name:
raise RequiredArgumentMissingError("An explicit workspace name is required for this command.")
if not storage_account:
raise RequiredArgumentMissingError("A quantum workspace requires a valid storage account.")
if not location:
raise RequiredArgumentMissingError("A location for the new quantum workspace is required.")
if provider_sku_list is None:
raise RequiredArgumentMissingError("A list of Azure Quantum providers and SKUs is required.")
info = WorkspaceInfo(cmd, resource_group_name, workspace_name, location)
if not info.resource_group:
raise ResourceNotFoundError("Please run 'az quantum workspace set' first to select a default resource group.")
quantum_workspace = _get_basic_quantum_workspace(location, info, storage_account)
# Until the "--skip-role-assignment" parameter is deprecated, use the old non-ARM code to create a workspace without doing a role assignment
if skip_role_assignment:
_add_quantum_providers(cmd, quantum_workspace, provider_sku_list)
poller = client.begin_create_or_update(info.resource_group, info.name, quantum_workspace, polling=False)
while not poller.done():
time.sleep(POLLING_TIME_DURATION)
quantum_workspace = poller.result()
return quantum_workspace
# ARM-template-based code to create an Azure Quantum workspace and make it a "Contributor" to the storage account
template_path = os.path.join(os.path.dirname(
__file__), 'templates', 'create-workspace-and-assign-role.json')
with open(template_path, 'r', encoding='utf8') as template_file_fd:
template = json.load(template_file_fd)
_add_quantum_providers(cmd, quantum_workspace, provider_sku_list)
validated_providers = []
for provider in quantum_workspace.providers:
validated_providers.append({"providerId": provider.provider_id, "providerSku": provider.provider_sku})
# Set default storage account parameters in case the storage account does not exist yet
storage_account_sku = DEFAULT_STORAGE_SKU
storage_account_sku_tier = DEFAULT_STORAGE_SKU_TIER
storage_account_kind = DEFAULT_STORAGE_KIND
storage_account_location = location
# Look for info on existing storage account
storage_account_list = list_storage_accounts(cmd, resource_group_name)
if storage_account_list:
for storage_account_info in storage_account_list:
if storage_account_info.name == storage_account:
storage_account_sku = storage_account_info.sku.name
storage_account_sku_tier = storage_account_info.sku.tier
storage_account_kind = storage_account_info.kind
storage_account_location = storage_account_info.location
break
# Validate the storage account SKU tier and kind
_validate_storage_account('tier', storage_account_sku_tier, SUPPORTED_STORAGE_SKU_TIERS)
_validate_storage_account('kind', storage_account_kind, SUPPORTED_STORAGE_KINDS)
parameters = {
'quantumWorkspaceName': workspace_name,
'location': location,
'tags': {},
'providers': validated_providers,
'storageAccountName': storage_account,
'storageAccountId': _get_storage_account_path(info, storage_account),
'storageAccountLocation': storage_account_location,
'storageAccountSku': storage_account_sku,
'storageAccountKind': storage_account_kind,
'storageAccountDeploymentName': "Microsoft.StorageAccount-" + time.strftime("%d-%b-%Y-%H-%M-%S", time.gmtime())
}
parameters = {k: {'value': v} for k, v in parameters.items()}
deployment_properties = {
'mode': DeploymentMode.incremental,
'template': template,
'parameters': parameters
}
credentials = _get_data_credentials(cmd.cli_ctx, info.subscription)
arm_client = ResourceManagementClient(credentials, info.subscription)
# Show the first progress indicator dot before starting ARM template deployment
print('.', end='', flush=True)
deployment_async_operation = arm_client.deployments.begin_create_or_update(
info.resource_group,
workspace_name, # Note: This is actually specifying a the deployment name, but workspace_name is used here in test_quantum_workspace.py
{'properties': deployment_properties}
)
# Show progress indicator dots
polling_cycles = 0
while not deployment_async_operation.done():
polling_cycles += 1
if polling_cycles > MAX_POLLS_CREATE_WORKSPACE:
print()
raise AzureInternalError("Create quantum workspace operation timed out.")
print('.', end='', flush=True)
time.sleep(POLLING_TIME_DURATION)
print()
quantum_workspace = deployment_async_operation.result()
return quantum_workspace | deace233f9f357137c83e3b58eec8316abceafd2 | 3,653,338 |
def functional_domain_min(braf_gene_descr_min,
location_descriptor_braf_domain):
"""Create functional domain test fixture."""
params = {
"status": "preserved",
"name": "Serine-threonine/tyrosine-protein kinase, catalytic domain",
"id": "interpro:IPR001245",
"gene_descriptor": braf_gene_descr_min,
"location_descriptor": location_descriptor_braf_domain
}
return FunctionalDomain(**params) | 905e6b3dc4c1507c57d71879b582794cd66cdd8e | 3,653,339 |
def rsa_encrypt(rsa_key, data):
"""
rsa_key: 密钥
登录密码加密
"""
data = bytes(data, encoding="utf8")
encrypt = PKCS1_v1_5.new(RSA.importKey(rsa_key))
Sencrypt = b64encode(encrypt.encrypt(data))
return Sencrypt.decode("utf-8") | 07384216eff4d0f109e9a0b3bf45c0c1ab108b26 | 3,653,340 |
import numpy
def shuffle_and_split_data(data_frame):
"""
Shuffle and split the data into 2 sets: training and validation.
Args:
data_frame (pandas.DataFrame): the data to shuffle and split
Returns:
2 numpy.ndarray objects -> (train_indices, validation_indices)
Each hold the index positions for data in the pandas.DataFrame
"""
shuffled_indices = numpy.random.permutation(len(data_frame))
train_up_to = int(len(data_frame) * 0.7)
train_indices = shuffled_indices[:train_up_to]
validation_indices = shuffled_indices[train_up_to:]
return train_indices, validation_indices | dfcad7edb9ec17b81057e00816fe3d5bdadc39be | 3,653,341 |
def parse_array_from_string(list_str, dtype=int):
""" Create a 1D array from text in string.
Args:
list_str: input string holding the array elements.
Array elements should be contained in brackets [] and seperated
by comma.
dtype: data type of the array elements. Default is "int"
Returns:
1D numpy array
"""
list_str = list_str.lstrip().rstrip()
if not (list_str.startswith('[') and list_str.endswith(']')):
msg = 'list_str should start with "[" and end with "]".'
raise (SyntaxError(msg))
return np.array(list_str[1:-1].split(','), dtype=dtype) | b05204a1c6d516a4f4eed298819bda97c5637f37 | 3,653,342 |
def Maj(x, y, z):
""" Majority function: False when majority are False
Maj(x, y, z) = (x ∧ y) ⊕ (x ∧ z) ⊕ (y ∧ z)
"""
return (x & y) ^ (x & z) ^ (y & z) | 7d4013dfc109b4fc39fd3b0bd3f2f5947d207ff0 | 3,653,343 |
import pickle
def get_package_data():
"""Load services and conn_states data into memory"""
with open(DATA_PKL_FILE, "rb") as f:
services, conn_states = pickle.load(f)
return services, conn_states | 8bff214f2256f98e43599f4e5ce73d53232e9a7a | 3,653,344 |
import os
def is_module(module):
"""Check if a given string is an existing module contained in the
``MODULES_FOLDER`` constant."""
if (os.path.isdir(os.path.join(MODULES_FOLDER, module)) and
not module.startswith('_')):
return True
return False | ac3ab55cbc2c359207aaa9d4c83d2aba5e0de895 | 3,653,345 |
import os
def _finalize_sv(solution_file, data):
"""Add output files from TitanCNA calling optional solution.
"""
out = {"variantcaller": "titancna"}
with open(solution_file) as in_handle:
solution = dict(zip(in_handle.readline().strip("\r\n").split("\t"),
in_handle.readline().strip("\r\n").split("\t")))
if solution.get("path"):
out["purity"] = solution["purity"]
out["ploidy"] = solution["ploidy"]
out["cellular_prevalence"] = [x.strip() for x in solution["cellPrev"].split(",")]
base = os.path.basename(solution["path"])
out["plot"] = dict([(n, solution["path"] + ext) for (n, ext) in [("rplots", ".Rplots.pdf"),
("cf", "/%s_CF.pdf" % base),
("cna", "/%s_CNA.pdf" % base),
("loh", "/%s_LOH.pdf" % base)]
if os.path.exists(solution["path"] + ext)])
out["subclones"] = "%s.segs.txt" % solution["path"]
out["hetsummary"] = solution_file
out["vrn_file"] = to_vcf(out["subclones"], "TitanCNA", _get_header, _seg_to_vcf, data)
out["lohsummary"] = loh.summary_status(out, data)
return out | 922642e8450f4345082383d9b80509198912747a | 3,653,346 |
def reload_county():
""" Return bird species, totals, location to map """
# receive data from drop-down menu ajax request
bird = request.args.get("bird")
county = request.args.get("county")
# get the zoom level of the new chosen county
zoomLevel = get_zoom(county)
# reset session data from the ajax request
session["bird_name"] = bird
session["county_name"] = county
session["zoom_level"] = zoomLevel
# CENTER map; get_county returns long, lat tuple.
long_lat = get_county(county)
longitude, latitude = long_lat
birding_locations = create_geoFeature(bird, county)
# send all this information to website using json
bird_data = {
"longitude": longitude,
"latitude": latitude,
"mapbox_api_key": mapbox_api_key,
"birding_locations": birding_locations,
"bird": bird,
"county": county,
"zoomLevel": zoomLevel}
return jsonify(bird_data) | 6c3ad39e12483579d0c9031b5c9a56babcac3823 | 3,653,347 |
import re
def get_conv2d_out_channels(kernel_shape, kernel_layout):
"""Get conv2d output channels"""
kernel_shape = get_const_tuple(kernel_shape)
if len(kernel_shape) == 4:
idx = kernel_layout.find("O")
assert idx >= 0, "Invalid conv2d kernel layout {}".format(kernel_layout)
return kernel_shape[idx]
if re.match(r"OIHW\d*i\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[5]
if re.match(r"OIHW\d*o", kernel_layout):
return kernel_shape[0] * kernel_shape[4]
raise ValueError("Unknown conv2d kernel layout {}".format(kernel_layout)) | 4b26979b873f36b79f5e29d0c814417a4c21eb32 | 3,653,348 |
def bindparam(key, value=None, type_=None, unique=False, required=False, callable_=None):
"""Create a bind parameter clause with the given key.
:param key:
the key for this bind param. Will be used in the generated
SQL statement for dialects that use named parameters. This
value may be modified when part of a compilation operation,
if other :class:`_BindParamClause` objects exist with the same
key, or if its length is too long and truncation is
required.
:param value:
Initial value for this bind param. This value may be
overridden by the dictionary of parameters sent to statement
compilation/execution.
:param callable\_:
A callable function that takes the place of "value". The function
will be called at statement execution time to determine the
ultimate value. Used for scenarios where the actual bind
value cannot be determined at the point at which the clause
construct is created, but embedded bind values are still desirable.
:param type\_:
A ``TypeEngine`` object that will be used to pre-process the
value corresponding to this :class:`_BindParamClause` at
execution time.
:param unique:
if True, the key name of this BindParamClause will be
modified if another :class:`_BindParamClause` of the same name
already has been located within the containing
:class:`.ClauseElement`.
:param required:
a value is required at execution time.
"""
if isinstance(key, ColumnClause):
return _BindParamClause(key.name, value, type_=key.type,
callable_=callable_,
unique=unique, required=required)
else:
return _BindParamClause(key, value, type_=type_,
callable_=callable_,
unique=unique, required=required) | 5dc1b311d0dfae04b31d1e869015dbaef9fc2f42 | 3,653,349 |
def create_dictionary(timestamp, original_sentence, sequence_switched, err_message, suggestion_list):
"""Create Dictionary Function
Generates and exports a dictionary object with relevant data for website interaction to take place.
"""
if len(suggestion_list) != 0:
err_message_str = "Possible error: " + err_message + "\n \n"
new_dictionary = {
"timestamp": timestamp,
"original_sentence": original_sentence,
"masked_sentence": sequence_switched,
"err_message": err_message,
"possible_corrections": suggestion_list
}
return new_dictionary
else:
return {} | 057d407089a7bb4e445bd0db2632dfcb9f291ed6 | 3,653,350 |
import pandas as pd
import os
def benefits(path):
"""Unemployment of Blue Collar Workers
a cross-section from 1972
*number of observations* : 4877
*observation* : individuals
*country* : United States
A time serie containing :
stateur
state unemployment rate (in %)
statemb
state maximum benefit level
state
state of residence code
age
age in years
tenure
years of tenure in job lost
joblost
a factor with levels
(slack\\\_work,position\\\_abolished,seasonal\\\_job\\\_ended,other)
nwhite
non-white ?
school12
more than 12 years of school ?
sex
a factor with levels (male,female)
bluecol
blue collar worker ?
smsa
lives is smsa ?
married
married ?
dkids
has kids ?
dykids
has young kids (0-5 yrs) ?
yrdispl
year of job displacement (1982=1,..., 1991=10)
rr
replacement rate
head
is head of household ?
ui
applied for (and received) UI benefits ?
McCall, B.P. (1995) “The impact of unemployment insurance benefit levels
on recipiency”, *Journal of Business and Economic Statistics*, **13**,
189–198.
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `benefits.csv`.
Returns:
Tuple of np.ndarray `x_train` with 4877 rows and 18 columns and
dictionary `metadata` of column headers (feature names).
"""
path = os.path.expanduser(path)
filename = 'benefits.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Ecdat/Benefits.csv'
maybe_download_and_extract(path, url,
save_file_name='benefits.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata | 651b3b1798f340d668a55241dd4ba6b54ec22881 | 3,653,351 |
def get_L_BB_b2_d_t(L_BB_b2_d, L_dashdash_b2_d_t):
"""
Args:
L_BB_b2_d: param L_dashdash_b2_d_t:
L_dashdash_b2_d_t:
Returns:
"""
L_BB_b2_d_t = np.zeros(24 * 365)
L_BB_b2_d = np.repeat(L_BB_b2_d, 24)
L_dashdash_b2_d = np.repeat(get_L_dashdash_b2_d(L_dashdash_b2_d_t), 24)
f = L_dashdash_b2_d > 0
L_BB_b2_d_t[f] = L_BB_b2_d[f] * L_dashdash_b2_d_t[f] / L_dashdash_b2_d[f]
return L_BB_b2_d_t | 51b3551e68e9bbccbe756156d0d623b32a47c23f | 3,653,352 |
def _get_tab_counts(business_id_filter, conversation_tab, ru_ref_filter, survey_id):
"""gets the thread count for either the current conversation tab, or, if the ru_ref_filter is active it returns
the current conversation tab and all other tabs. i.e the value for the 'current' tab is always populated.
Calls two different secure message endpoints depending on if ru_ref_filter is set
as the get all is more expensive"""
if ru_ref_filter:
return message_controllers.get_all_conversation_type_counts(survey_id=survey_id,
conversation_tab=conversation_tab,
business_id=business_id_filter)
thread_count = message_controllers.get_conversation_count(survey_id=survey_id,
business_id=business_id_filter,
conversation_tab=conversation_tab)
return {'current': thread_count} | 9e79d7d692661496a49db93754716e10644bccf2 | 3,653,353 |
def IsInverseTime(*args):
"""Time delay is inversely adjsuted, proportinal to the amount of voltage outside the regulating band."""
# Getter
if len(args) == 0:
return lib.RegControls_Get_IsInverseTime() != 0
# Setter
Value, = args
lib.RegControls_Set_IsInverseTime(Value) | e0c1b3fef4d3c8b6a822a2946703503628a3f775 | 3,653,354 |
def create_userinfo(fname, lname, keypass):
"""
function to create new user
"""
new_userinfo = Userinfo(fname, lname, keypass)
return new_userinfo | ec7ae9a8cf79482498218571d04bee11ab767d98 | 3,653,355 |
from typing import Dict
def get_networks() -> Dict[str, SpikingNetwork]:
"""Get a set of spiking networks to train."""
somatic_spike_fn = get_spike_fn(threshold=15)
dendritic_nl_fn = get_default_dendritic_fn(
threshold=2, sensitivity=10, gain=1
)
neuron_params = RecurrentNeuronParameters(
tau_mem=10e-3,
tau_syn=5e-3,
backprop_gain=0.5,
feedback_strength=15,
somatic_spike_fn=somatic_spike_fn,
dendritic_spike_fn=dendritic_nl_fn,
)
parallel_params = PRCNeuronParameters(
tau_mem=10e-3,
tau_syn=5e-3,
backprop_gain=0.05,
feedback_strength=15,
somatic_spike_fn=somatic_spike_fn,
dend_na_fn=dendritic_nl_fn,
dend_ca_fn=get_sigmoid_fn(threshold=4, sensitivity=10, gain=1),
dend_nmda_fn=dendritic_nl_fn,
tau_dend_na=5e-3,
tau_dend_ca=40e-3,
tau_dend_nmda=80e-3,
)
simple_network_architecture = deepcopy(NETWORK_ARCHITECTURE)
simple_network_architecture.weight_scale_by_layer = (3, 7)
two_compartment_network_architecture = deepcopy(NETWORK_ARCHITECTURE)
two_compartment_network_architecture.weight_scale_by_layer = (0.5, 7)
parallel_network_architecture = deepcopy(NETWORK_ARCHITECTURE)
parallel_network_architecture.weight_scale_by_layer = (0.02, 7)
nets = {
'One compartment': SpikingNetwork(
neuron_params, simple_network_architecture
),
'No BAP': TwoCompartmentSpikingNetwork(
neuron_params, two_compartment_network_architecture
),
'BAP': RecurrentSpikingNetwork(
neuron_params, two_compartment_network_architecture
),
'Parallel subunits, no BAP': ParallelSpikingNetwork(
parallel_params, parallel_network_architecture
),
'Parallel subunits + BAP (full PRC model)': PRCSpikingNetwork(
parallel_params, parallel_network_architecture
),
}
return nets | d20f93eb849134c5104c22e9724bcadf09a4a141 | 3,653,356 |
import os
import tqdm
def process_files(pair_path):
"""
Process all protein (pdb) and ligand (sdf) files in input directory.
Args
pair_path dir (str): directory containing PDBBind data
Returns
structure_dict (dict): dictionary containing each structure, keyed by PDB code. Each PDB is a dict containing protein in Biopython format and ligand in RDKit Mol format
"""
structure_dict = {}
pose_path = os.path.join(pair_path, 'ligand_poses')
# get starting protein structure
pdb_files = fi.find_files(pair_path, 'pdb')
for f in tqdm(pdb_files, desc='pdb files'):
prot = dt.read_any(f)
structure_dict['protein'] = prot
# get ligand pose structures
lig_files = fi.find_files(pose_path, 'sdf')
for f in tqdm(lig_files, desc='ligand files'):
structure_dict[fi.get_pdb_name(f)] = get_ligand(f)
return structure_dict | 1932e4507b4a1cefca3085940de32488814256d4 | 3,653,357 |
import collections
def metric_group_max(df, metric_names=None):
"""Find the step which achieves the highest mean value for a group of metrics."""
# Use METRIC_NAMES defined at the top as default
metric_names = metric_names or METRIC_NAMES
group_to_metrics = collections.defaultdict(set)
for metric in metric_names.values():
group_to_metrics[metric.group].add(metric.name)
group_df = pd.DataFrame()
for group, metrics in group_to_metrics.items():
if not all(m in df for m in metrics):
continue
group_df[group] = df[metrics].mean(axis=1)
# Need to replace nan with large negative value for idxmax
group_max_step = group_df.fillna(-1e9).idxmax(axis=0)
metric_max = pd.Series()
metric_max_step = pd.Series()
for group_name, max_step in group_max_step.iteritems():
for metric in group_to_metrics[group_name]:
metric_max[metric] = df[metric][max_step]
metric_max_step[metric] = max_step
metric_max = metric_max.reindex(df.columns)
metric_max_step = metric_max_step.reindex(df.columns)
return metric_max, metric_max_step | 6f58e9f3a18f6185c1956a994b47f9f4fb9936ea | 3,653,358 |
def get_settings_value(definitions: Definitions, setting_name: str):
"""Get a Mathics Settings` value with name "setting_name" from definitions. If setting_name is not defined return None"""
settings_value = definitions.get_ownvalue(setting_name)
if settings_value is None:
return None
return settings_value.replace.to_python(string_quotes=False) | 3d05b234f85a13746b47ca97f3db578d3c7d6856 | 3,653,359 |
def show_clusterhost(clusterhost_id):
"""Get clusterhost."""
data = _get_request_args()
return utils.make_json_response(
200,
_reformat_host(cluster_api.get_clusterhost(
clusterhost_id, user=current_user, **data
))
) | a49a0027b8f7ab1ce20e762f960b6d8285d8850c | 3,653,360 |
import math
def resize3d_cubic(data_in, scale, coordinate_transformation_mode):
"""Tricubic 3d scaling using python"""
dtype = data_in.dtype
d, h, w = data_in.shape
new_d, new_h, new_w = [int(round(i * s)) for i, s in zip(data_in.shape, scale)]
data_out = np.ones((new_d, new_h, new_w))
def _cubic_spline_weights(t, alpha=-0.5):
"""create cubic spline weights in 1D"""
t2 = t * t
t3 = t * t * t
w1 = alpha * (t3 - 2 * t2 + t)
w2 = (alpha + 2) * t3 - (3 + alpha) * t2 + 1
w3 = -(alpha + 2) * t3 + (3 + 2 * alpha) * t2 - alpha * t
w4 = -alpha * t3 + alpha * t2
return np.array([w1, w2, w3, w4])
indexes = np.mgrid[-1:3, -1:3, -1:3]
def _get_patch(zint, yint, xint):
# Get the surrounding values
indices = indexes.copy()
indices[0] = np.maximum(np.minimum(indexes[0] + zint, d - 1), 0)
indices[1] = np.maximum(np.minimum(indexes[1] + yint, h - 1), 0)
indices[2] = np.maximum(np.minimum(indexes[2] + xint, w - 1), 0)
p = data_in[indices[0], indices[1], indices[2]]
return p
for m in range(new_d):
for j in range(new_h):
for k in range(new_w):
in_z = get_inx(m, d, new_d, coordinate_transformation_mode)
in_y = get_inx(j, h, new_h, coordinate_transformation_mode)
in_x = get_inx(k, w, new_w, coordinate_transformation_mode)
zint = math.floor(in_z)
zfract = in_z - math.floor(in_z)
yint = math.floor(in_y)
yfract = in_y - math.floor(in_y)
xint = math.floor(in_x)
xfract = in_x - math.floor(in_x)
wz = _cubic_spline_weights(zfract)
wy = _cubic_spline_weights(yfract)
wx = _cubic_spline_weights(xfract)
p = _get_patch(zint, yint, xint)
l = np.sum(p * wx, axis=-1)
col = np.sum(l * wy, axis=-1)
data_out[m, j, k] = np.sum(col * wz)
return data_out | 42f1a14e5c1133c7ce53b5770d62001e1dacbc6d | 3,653,361 |
def seasurface_skintemp_correct(*args):
"""
Description:
Wrapper function which by OOI default applies both of the METBK seasurface
skin temperature correction algorithms (warmlayer, coolskin in coare35vn).
This behavior is set by the global switches JWARMFL=1 and JCOOLFL=1. The
switch construction is retained for generality.
Most of the METBK L2 data products and 2 of the metadata products require
the skin corrections to be applied before their values can be calculated.
Warmlayer corrections dsea are added.
Coolskin corrections dter and dqer are subtracted.
Implemented by:
2014-09-01: Russell Desiderio. Initial code.
Usage (command line spaced out for clarity):
(usr, tsr, qsr, ut, dter, dqer, tkt, L, zou, zot, zoq, # coare35vn output
dt_wrm, tk_pwp, dsea) = # warmlayer output
seasurface_skintemp_correct
(rain_rate, timestamp, lon, ztmpwat, tC_sea, wnd, zwindsp,
tC_air, ztmpair, relhum, zhumair, pr_air, Rshort_down,
Rlong_down, lat, zinvpbl, jcool, jwarm)
where
OUTPUTS (documentation from coare35vn matlab code):
usr = friction veclocity that includes gustiness [m/s]
tsr = temperature scaling parameter [K]
qsr = specific humidity scaling parameter [g/g, I changed this from Edson code]
ut = not an output of the original code
dter = coolskin temperature depression [degC]
dqer = coolskin humidity depression [kg/kg]
tkt = coolskin thickness [m]
L = Obukhov length scale [m]
zou = wind roughness length [m]
zot = thermal roughness length [m]
zoq = moisture roughness length [m]
OUTPUTS (documentation from coare35vnWarm matlab code):
dt_wrm = warming across entire warmlayer [degC]
tk_pwp = warmlayer thickness [m]
dsea = additive warmlayer temperature correction [degC];
(this is warmlayer's key output)
INPUTS:
rain_rate = rainfall [mm/hr]
timestamp = seconds since 01-01-1900
lon = longitude [deg]
ztmpwat = depth of bulk sea temperature measurement [m]
tC_sea = bulk sea surface temperature [degC]
wnd = windspeed relative to current [m/s]
zwindsp = height of windspeed measurement[m]
tC_air = air temperature [degC]
ztmpair = height of air temperature measurement [m]
relhum = relative humidity [%]
zhumair = height of air humidity measurement [m]
pr_air = air pressure [mb]
Rshort_down = downwelling shortwave irradiation [W/m^2]
Rlong_down = downwelling longwave irradiation [W/m^2]
lat = latitude [deg]
zinvpbl = inversion height; default is 600m [m]
jcool = switch to activate coolskin algorithm (hardwired to 1 = true)
jwarm = switch to activate warmlayer algoritgm (hardwired to 1 = true)
References:
Fairall, C.W., E.F. Bradley, J.S. Godfrey, G.A. Wick, J.B. Edson, and G.S. Young
(1996) Cool-skin and warm-layer effects on sea surface temperature. JGR, Vol. 101,
No. C1, 1295-1308, 1996.
OOI (2014). Data Product Specification for L2 BULKFLX Data Products.
Document Control Number 1341-00370.
https://alfresco.oceanobservatories.org/ (See: Company Home >>
OOI >> Controlled >> 1000 System Level >>
1341-00370_Data_Product_Spec_BULKFLX_OOI.pdf)
OOI (2014). 1341-00370_BULKFLX Artifacts. https://alfresco.oceanobservatories.org/
(See: Company Home >> OOI >> REFERENCE >> Data Product Specification Artifacts
>> 1341-00370_BULKFLX (Original matlab code).
Notes:
(1) the jwarm switch selects whether or not the warmlayer code is run.
the jcool 'switch' is itself a variable within the (original)
coare35vn code; it was used as a multiplicative factor when
calculating coolskin corrections, so that when jcool=0, the
corrections are set to 0.
(2) for OOI jwarm and jcool are always 1, because all of the OOI
sea temperature measurements are bulk, not skin, measurements.
(3) in the more general case, jwarm = jcool always, because:
(a) jcool = 1 indicates that the input sea temperature values are
bulk measurements, not surface skin measurements made with
an infrared thermometer. in this bulk measurement case, both
coolskin and warmlayer corrections to the bulk temperature are
required to model the skin temperature (jcool = jwarm = 1).
(b) jcool = 0 indicates that the input sea temperature values are
surface skin temperatures directly measured with an infrared
thermometer, and therefore both the coolskin and warmlayer
corrections are not to be applied (jcool = jwarm = 0).
(4) however, both switches are retained for generality in case this
open source code is appropriated and adapted. (plus, the DPS
specified archiving the jwarm and jcool switches as metadata).
(5) the OOI cyberinfrastructure model originally required that each data
product be specifically calculated by one function. This is the main
reason that the wrapper function construct is used. In addition, I've
chosen to explicitly write out its output tuple arguments for each
data product call, so that the dependence of the various data products
on these tuple arguments is obvious (underscores are used as placeholders
for those arguments not used in any particular function call). In
particular, this construct specifically identifies when coolskin and
warmlayer temperature corrections have been applied to various variables
in the original code. (For example - the latent heat of vaporization for
water depends on water temperature, but only the warmlayer correction is
used calculate it).
"""
jwarm = args[-1] # jwarm (and jcool) are scalars
if jwarm:
(dt_wrm, tk_pwp, dsea) = warmlayer(*args[0:-1]) # does not pass jwarm
else:
# the tk_pwp parameter is often used as a divisor in warmlayer calculations to
# compare the warmlayer depth with the depth of the bulk temperature sensor.
# when the warmlayer code is not run, the desired results will be obtained if
# dt_warm and dsea are set to 0 where tk_pwp is nonzero so that a divide by
# zero error does not result. the value chosen is the default value specified
# in the warmlayer code itself.
(dt_wrm, tk_pwp, dsea) = (0.0, 19.0, 0.0)
# construct tuple containing coolskin input arguments;
# add the warmlayer temperature correction to the msrd bulk sea temp.
coolskin_args = (args[4]+dsea,) + args[5:-1] # does not pass jwarm
# append results of warmlayer calculation to output,
# as is also done in original coare35vn warmlayer matlab code.
return coare35vn(*coolskin_args) + (dt_wrm, tk_pwp, dsea) | 80ccf63dcf961a4fa488a89023c2516e69862f86 | 3,653,362 |
import random
import os
def run_experiment_here(
experiment_function,
variant=None,
exp_id=0,
seed=0,
use_gpu=True,
gpu_id=0,
# Logger params:
exp_name="default",
snapshot_mode='last',
snapshot_gap=1,
git_infos=None,
script_name=None,
trial_dir_suffix=None,
randomize_seed=False,
**setup_logger_kwargs
):
"""
Run an experiment locally without any serialization.
:param experiment_function: Function. `variant` will be passed in as its
only argument.
:param exp_name: Experiment prefix for the save file.
:param variant: Dictionary passed in to `experiment_function`.
:param exp_id: Experiment ID. Should be unique across all
experiments. Note that one experiment may correspond to multiple seeds,.
:param seed: Seed used for this experiment.
:param use_gpu: Run with GPU. By default False.
:param script_name: Name of the running script
:param log_dir: If set, set the log directory to this. Otherwise,
the directory will be auto-generated based on the exp_name.
:return:
"""
if variant is None:
variant = {}
variant['exp_id'] = str(exp_id)
if randomize_seed or (seed is None and 'seed' not in variant):
seed = random.randint(0, 100000)
variant['seed'] = seed
reset_execution_environment()
actual_log_dir = setup_logger(
exp_name=exp_name,
variant=variant,
exp_id=exp_id,
seed=seed,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
git_infos=git_infos,
script_name=script_name,
trial_dir_suffix=trial_dir_suffix,
**setup_logger_kwargs
)
set_seed(seed)
os.environ['gpu_id'] = str(gpu_id)
run_experiment_here_kwargs = dict(
variant=variant,
exp_id=exp_id,
seed=seed,
use_gpu=use_gpu,
exp_name=exp_name,
snapshot_mode=snapshot_mode,
snapshot_gap=snapshot_gap,
git_infos=git_infos,
script_name=script_name,
**setup_logger_kwargs
)
save_experiment_data(
dict(
run_experiment_here_kwargs=run_experiment_here_kwargs
),
actual_log_dir
)
return experiment_function(variant) | f48cda086feef7fefb96c7b0412471bc66f2d206 | 3,653,363 |
def extract_character_pairs(letter_case, reverse_letter_case):
"""
Extract character pairs. Check that two unicode value are also a mapping value of each other.
:param letter_case: case mappings dictionary which contains the conversions.
:param reverse_letter_case: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with character pairs.
"""
character_pairs = []
for letter_id in sorted(letter_case.keys()):
if is_bidirectional_conversion(letter_id, letter_case, reverse_letter_case):
mapped_value = letter_case[letter_id]
character_pairs.extend([letter_id, ord(mapped_value)])
# Remove character pairs from case mapping tables
del letter_case[letter_id]
del reverse_letter_case[ord(mapped_value)]
return character_pairs | 29e5415afc4e4a3bff5cd74c1fa14f78cf715384 | 3,653,364 |
def after_timestep(simulation, is_steady, force_steady=False):
"""
Move u -> up, up -> upp and prepare for the next time step
"""
# Stopping criteria for steady state simulations
vel_diff = None
if is_steady:
vel_diff = 0
for d in range(simulation.ndim):
u_new = simulation.data['u%d' % d]
up = simulation.data['up%d' % d]
diff = abs(u_new.vector().get_local() - up.vector().get_local()).max()
vel_diff = max(vel_diff, diff)
shift_fields(simulation, ['u%d', 'up%d', 'upp%d'])
shift_fields(simulation, ['u_conv%d', 'up_conv%d', 'upp_conv%d'])
if force_steady:
simulation.data['time_coeffs'].assign(dolfin.Constant([0.0, 0.0, 0.0]))
else:
# Change time coefficient to second order
simulation.data['time_coeffs'].assign(dolfin.Constant([3 / 2, -2, 1 / 2]))
# Extrapolate the convecting velocity to the next step
update_convection(simulation, force_steady=force_steady)
return vel_diff | 7aa3436ba8bcc4ec395ba6f030b83e6fc3cb4bf3 | 3,653,365 |
def get_summary_indices(df, on='NOSC'):
""" Get the summary stats for the indices: median, mean, std, weighted mean and weighted std """
samples = get_list_samples(df)
samples.append(on)
t = df[samples]
t = t.melt(id_vars=[on], var_name='SampleID', value_name='NormIntensity')
t = t[t['NormIntensity'] > 0].reset_index(drop=True)
t_agg = t.groupby(['SampleID']).agg({on: ['median', 'mean', 'std']})
t_agg.columns = t_agg.columns.map('_'.join)
t_agg = t_agg.reset_index()
t_agg[[on + '_w_mean', on + '_w_std']] = ''
for sample in t['SampleID'].unique():
# print(sample)
temp = t[t['SampleID'] == sample]
wdf = DescrStatsW(temp[on], weights=temp['NormIntensity'])
t_agg.loc[t_agg['SampleID'] == sample, on + '_w_mean'] = wdf.mean
t_agg.loc[t_agg['SampleID'] == sample, on + '_w_std'] = wdf.std
return t_agg | 1c430a9ad377e3d550e292b381af072d4adc78f0 | 3,653,366 |
def view_evidence(evidence_id: int):
"""View a single Evidence model."""
evidence = manager.get_evidence_by_id_or_404(evidence_id)
return render_template(
'evidence/evidence.html',
evidence=evidence,
manager=manager,
) | 8a51a3c6279a1501c26fb2de09c4450660546bf3 | 3,653,367 |
import os
def get_filenames(split, mode, data_dir):
"""Returns a list of filenames."""
if not split:
data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
assert os.path.exists(data_dir), (
'Run cifar10_download_and_extract.py first to download and extract the '
'CIFAR-10 data.')
if split:
if mode == 'train':
return [
os.path.join(data_dir, 'train_batch_%d.bin' % i)
for i in range(1, _NUM_DATA_FILES + 1)]
elif mode == 'valid':
return [os.path.join(data_dir, 'valid_batch.bin')]
else:
return [os.path.join(data_dir, 'test_batch.bin')]
else:
if mode == 'train':
return [
os.path.join(data_dir, 'data_batch_%d.bin' % i)
for i in range(1, _NUM_DATA_FILES + 1)
]
else:
return [os.path.join(data_dir, 'test_batch.bin')] | a1303f80acc21a3b32fda3b915565c26b6ea9fa6 | 3,653,368 |
def rigidBlades(blds, hub=None, r_O=[0,0,0]):
""" return a rigid body for the three blades
All bodies should be in a similar frame
"""
blades = blds[0].toRigidBody()
for B in blds[1:]:
B_rigid = B.toRigidBody()
blades = blades.combine(B_rigid, r_O=r_O)
blades.name='blades'
return blades | 89b48ba43f748fa4b2db7ee768eabe9e79e9a453 | 3,653,369 |
def mea_slow(posterior_matrix, shortest_ref_per_event, return_all=False):
"""Computes the maximum expected accuracy alignment along a reference with given events and probabilities.
Computes a very slow but thorough search through the matrix
:param posterior_matrix: matrix of posterior probabilities with reference along x axis and events along y
:param shortest_ref_per_event: shortest ref position per event
:param return_all: return all forward edges
"""
ref_len = len(posterior_matrix[0])
events_len = len(posterior_matrix)
initialize = True
forward_edges = list()
new_edges = list()
# step through all events
for event_index in range(events_len):
max_prob = 0
if initialize:
ref_index = 0
while ref_index < ref_len:
# intitialize forward edges with first event alignments
# if type(posterior_matrix[ref_index][event_index]) is not int:
posterior = posterior_matrix[event_index][ref_index]
event_data = [ref_index, event_index, posterior, posterior, None]
if 0 < posterior >= max_prob:
# print("True", posterior, max_prob)
new_edges.append(event_data)
max_prob = posterior
ref_index += 1
# print("INITIALIZE", new_edges, max_prob)
if len(new_edges) != 0:
forward_edges = new_edges
new_edges = list()
initialize = False
else:
# print(forward_edges)
ref_index = 0
top_edge = []
while ref_index < ref_len:
posterior = posterior_matrix[event_index][ref_index]
if posterior >= max_prob:
# no possible connecting edges and is needed for other other events create a new one
if ref_index < shortest_ref_per_event[event_index]:
top_edge.append([ref_index, event_index, posterior, posterior, None])
max_prob = posterior
ref_index += 1
# add top edge if needed
if top_edge:
new_edges.append(top_edge[-1])
ref_index = 0
while ref_index < ref_len:
inxs = []
probs = []
posterior = posterior_matrix[event_index][ref_index]
for j, forward_edge in enumerate(forward_edges):
if forward_edge[0] < ref_index:
# track which probabilities with prev edge
inxs.append(j)
probs.append(posterior + forward_edge[3])
# if needed, keep edges aligned to ref positions previous than the current ref position
elif forward_edge[0] == ref_index:
# stay at reference position
# add probability of event if we want to promote sideways movement
inxs.append(j)
probs.append(forward_edge[3])
# add new edge
inxs = inxs[::-1]
probs = probs[::-1]
if len(probs) != 0:
if max(probs) > max_prob:
connecting_edge = forward_edges[inxs[int(np.argmax(probs))]]
new_edges.append([ref_index, event_index, posterior, max(probs), connecting_edge])
max_prob = max(probs)
else:
if forward_edges[0][0] > ref_index and posterior > max_prob:
new_edges.append([ref_index, event_index, posterior, posterior, None])
max_prob = posterior
ref_index += 1
# print("END_NEW_EDGES", new_edges)
forward_edges = new_edges
new_edges = list()
# grab and return the highest probability edge
if return_all:
return forward_edges
else:
highest_prob = 0
best_forward_edge = 0
for x in forward_edges:
if x[3] > highest_prob:
highest_prob = x[3]
best_forward_edge = x
return best_forward_edge | 4b7165a0145d2e1ad2d0550910e03de5a775733c | 3,653,370 |
import trace
def predict(cart_tree, feature_set, data_set):
"""Predict the quality."""
feature_dict = {}
for index, feature in enumerate(feature_set):
feature_dict[feature] = index
results = []
for element in data_set:
# Append a tuple.
results.append((trace(cart_tree, feature_dict, element), element[-1]))
return results | c7f50557202c4320194ecc5264059c1701e0de73 | 3,653,371 |
def test_incorporate_getitem_through_switch(tag):
""" test_incorporate_getitem_through_switch """
fns = FnDict()
scalar_gt = Primitive('scalar_gt')
@fns
def before(x, y):
def f1(x, y):
return x, y
def f2(x, y):
return y, x
return tuple_getitem(
switch(scalar_gt(x, 0), f1, f2)(x, y),
0)
@fns
def after(x, y):
def f1(x, y):
return x
def f2(x, y):
return y
return switch(scalar_gt(x, 0), f1, f2)(x, y)
return fns[tag] | df128faf55c48ba698340d06b3c232ebc0140511 | 3,653,372 |
def response_json(status, message, response):
"""
Helper method that converts the given data in json format
:param success: status of the APIs either true or false
:param data: data returned by the APIs
:param message: user-friendly message
:return: json response
"""
data = {
"status": status,
"message": message,
"response": response,
}
return data | 9c7e30e81c5412998bc8523b0e45a353c82b5a41 | 3,653,373 |
from . import conf
def settings(request):
"""
"""
conf = dict(vars(conf))
# conf.update(ThemeSite.objects.get_theme_conf(request=request, fail=False))
data = request.session.get('cms_bs3_theme_conf', {})
conf.update(data)
return {'bs3_conf': conf} | 1230171ce1263083aabbd0fb79928c9236af31a9 | 3,653,374 |
def NDVI(R, NIR):
""" Compute the NDVI
INPUT : R (np.array) -> the Red band images as a numpy array of float
NIR (np.array) -> the Near Infrared images as a numpy array of float
OUTPUT : NDVI (np.array) -> the NDVI
"""
NDVI = (NIR - R) / (NIR + R + 1e-12)
return NDVI | aa1789c80720c09aa464b3ae67da7de821e2ba97 | 3,653,375 |
from typing import Union
from typing import Optional
from datetime import datetime
def get_nearby_stations_by_number(
latitude: float,
longitude: float,
num_stations_nearby: int,
parameter: Union[Parameter, str],
time_resolution: Union[TimeResolution, str],
period_type: Union[PeriodType, str],
minimal_available_date: Optional[Union[datetime, str]] = None,
maximal_available_date: Optional[Union[datetime, str]] = None,
) -> pd.DataFrame:
"""
Provides a list of weather station ids for the requested data
:param latitude: Latitude of location to search for nearest
weather station
:param longitude: Longitude of location to search for nearest
weather station
:param minimal_available_date: Start date of timespan where measurements
should be available
:param maximal_available_date: End date of timespan where measurements
should be available
:param parameter: Observation measure
:param time_resolution: Frequency/granularity of measurement interval
:param period_type: Recent or historical files
:param num_stations_nearby: Number of stations that should be nearby
:return: DataFrames with valid stations in radius per
requested location
"""
if num_stations_nearby <= 0:
raise ValueError("'num_stations_nearby' has to be at least 1.")
parameter = parse_enumeration_from_template(parameter, Parameter)
time_resolution = parse_enumeration_from_template(time_resolution, TimeResolution)
period_type = parse_enumeration_from_template(period_type, PeriodType)
if not check_parameters(parameter, time_resolution, period_type):
raise InvalidParameterCombination(
f"The combination of {parameter.value}, {time_resolution.value}, "
f"{period_type.value} is invalid."
)
minimal_available_date = (
minimal_available_date
if not minimal_available_date or isinstance(minimal_available_date, datetime)
else parse_datetime(minimal_available_date)
)
maximal_available_date = (
maximal_available_date
if not minimal_available_date or isinstance(maximal_available_date, datetime)
else parse_datetime(maximal_available_date)
)
if minimal_available_date and maximal_available_date:
if minimal_available_date > maximal_available_date:
raise ValueError(
"'minimal_available_date' has to be before " "'maximal_available_date'"
)
coords = Coordinates(np.array(latitude), np.array(longitude))
metadata = metadata_for_climate_observations(
parameter, time_resolution, period_type
)
# Filter only for stations that have a file
metadata = metadata[metadata[DWDMetaColumns.HAS_FILE.value].values]
if minimal_available_date:
metadata = metadata[
metadata[DWDMetaColumns.FROM_DATE.value] <= minimal_available_date
]
if maximal_available_date:
metadata = metadata[
metadata[DWDMetaColumns.TO_DATE.value] >= maximal_available_date
]
metadata = metadata.reset_index(drop=True)
distances, indices_nearest_neighbours = _derive_nearest_neighbours(
metadata.LAT.values, metadata.LON.values, coords, num_stations_nearby
)
distances = pd.Series(distances)
indices_nearest_neighbours = pd.Series(indices_nearest_neighbours)
# If num_stations_nearby is higher then the actual amount of stations
# further indices and distances are added which have to be filtered out
distances = distances[: min(metadata.shape[0], num_stations_nearby)]
indices_nearest_neighbours = indices_nearest_neighbours[
: min(metadata.shape[0], num_stations_nearby)
]
distances_km = np.array(distances * KM_EARTH_RADIUS)
metadata_location = metadata.iloc[indices_nearest_neighbours, :].reset_index(
drop=True
)
metadata_location[DWDMetaColumns.DISTANCE_TO_LOCATION.value] = distances_km
if metadata_location.empty:
logger.warning(
f"No weather stations were found for coordinate "
f"{latitude}°N and {longitude}°E "
)
return metadata_location | e53896ea4644bcce6351671ec950fe8165a2cb12 | 3,653,376 |
import scipy
def get_state(tau, i=None, h=None, delta=None, state_0=None, a_matrix=None):
"""
Compute the magnetization state.
r(τ) = e^(Aτ)r(0) eq (11) at[1]
"""
if a_matrix is not None:
# get state from a known A matrix
# A matrix can be shared and it takes time to build
return np.matmul(scipy.linalg.expm(tau*a_matrix), state_0)
return np.matmul(scipy.linalg.expm(
tau*generate_A(i, h, delta, state_0.size-1)), state_0) | a4ae277d41b64c9caf49758d62767030db0b244b | 3,653,377 |
import os
import re
def get_version():
"""Returns version number, without module import (which can lead to ImportError
if some dependencies are unavailable before install."""
contents = read_file(os.path.join('webscaff', '__init__.py'))
version = re.search('VERSION = \(([^)]+)\)', contents)
version = version.group(1).replace(', ', '.').strip()
return version | c945b404376f071d1a9f43f6865aea6d677f946f | 3,653,378 |
import aacgmv2._aacgmv2 as c_aacgmv2
import logging
def convert_latlon_arr(in_lat, in_lon, height, dtime, code="G2A"):
"""Converts between geomagnetic coordinates and AACGM coordinates.
Parameters
------------
in_lat : (np.ndarray or list or float)
Input latitude in degrees N (code specifies type of latitude)
in_lon : (np.ndarray or list or float)
Input longitude in degrees E (code specifies type of longitude)
height : (np.ndarray or list or float)
Altitude above the surface of the earth in km
dtime : (datetime)
Single datetime object for magnetic field
code : (int or str)
Bit code or string denoting which type(s) of conversion to perform
G2A - geographic (geodetic) to AACGM-v2
A2G - AACGM-v2 to geographic (geodetic)
TRACE - use field-line tracing, not coefficients
ALLOWTRACE - use trace only above 2000 km
BADIDEA - use coefficients above 2000 km
GEOCENTRIC - assume inputs are geocentric w/ RE=6371.2
(default = "G2A")
Returns
-------
out_lat : (np.ndarray)
Output latitudes in degrees N
out_lon : (np.ndarray)
Output longitudes in degrees E
out_r : (np.ndarray)
Geocentric radial distance (R_Earth) or altitude above the surface of
the Earth (km)
Notes
-------
At least one of in_lat, in_lon, and height must be a list or array.
"""
# If a list was entered instead of a numpy array, recast it here
if isinstance(in_lat, list):
in_lat = np.array(in_lat)
if isinstance(in_lon, list):
in_lon = np.array(in_lon)
if isinstance(height, list):
height = np.array(height)
# If one or two of these elements is a float or int, create an array
test_array = np.array([hasattr(in_lat, "shape"), hasattr(in_lon, "shape"),
hasattr(height, "shape")])
if not test_array.all():
if test_array.any():
arr_shape = in_lat.shape if test_array.argmax() == 0 else \
(in_lon.shape if test_array.argmax() == 1 else
height.shape)
if not test_array[0]:
in_lat = np.ones(shape=arr_shape, dtype=float) * in_lat
if not test_array[1]:
in_lon = np.ones(shape=arr_shape, dtype=float) * in_lon
if not test_array[2]:
height = np.ones(shape=arr_shape, dtype=float) * height
else:
logging.info("for a single location, consider using convert_latlon")
in_lat = np.array([in_lat])
in_lon = np.array([in_lon])
height = np.array([height])
# Ensure that lat, lon, and height are the same length or if the lengths
# differ that the different ones contain only a single value
if not (in_lat.shape == in_lon.shape and in_lat.shape == height.shape):
ulen = np.unique([in_lat.shape, in_lon.shape, height.shape])
if ulen.min() != (1,):
logging.error("mismatched input arrays")
return None, None, None
# Test time
if isinstance(dtime, dt.date):
dtime = dt.datetime.combine(dtime, dt.time(0))
assert isinstance(dtime, dt.datetime), \
logging.error('time must be specified as datetime object')
# Test height
if np.min(height) < 0:
logging.warn('conversion not intended for altitudes < 0 km')
# Initialise output
lat_out = np.empty(shape=in_lat.shape, dtype=float) * np.nan
lon_out = np.empty(shape=in_lon.shape, dtype=float) * np.nan
r_out = np.empty(shape=height.shape, dtype=float) * np.nan
# Test code
try:
code = code.upper()
if(np.nanmax(height) > 2000 and code.find("TRACE") < 0 and
code.find("ALLOWTRACE") < 0 and code.find("BADIDEA") < 0):
estr = 'coefficients are not valid for altitudes above 2000 km. You'
estr += ' must either use field-line tracing (trace=True '
estr += 'or allowtrace=True) or indicate you know this '
estr += 'is a bad idea'
logging.error(estr)
return lat_out, lon_out, r_out
# make flag
bit_code = convert_str_to_bit(code)
except AttributeError:
bit_code = code
assert isinstance(bit_code, int), \
logging.error("unknown code {:}".format(bit_code))
# Test latitude range
if np.abs(in_lat).max() > 90.0:
assert np.abs(in_lat).max() <= 90.1, \
logging.error('unrealistic latitude')
in_lat = np.clip(in_lat, -90.0, 90.0)
# Constrain longitudes between -180 and 180
in_lon = ((in_lon + 180.0) % 360.0) - 180.0
# Set current date and time
try:
c_aacgmv2.set_datetime(dtime.year, dtime.month, dtime.day, dtime.hour,
dtime.minute, dtime.second)
except:
raise RuntimeError("unable to set time for {:}".format(dtime))
# Vectorise the AACGM code
convert_vectorised = np.vectorize(c_aacgmv2.convert)
# convert
try:
lat_out, lon_out, r_out = convert_vectorised(in_lat, in_lon, height,
bit_code)
except:
pass
return lat_out, lon_out, r_out | d9efc4d58925ef9cd63e7c800258b99c91e14f7a | 3,653,379 |
import os
from datetime import datetime
def get_entsoe_renewable_data(file=None, version=None):
"""
Load the default file for re time series or a specific file.
Returns
-------
Examples
--------
>>> my_re=get_entsoe_renewable_data()
>>> int(my_re['DE_solar_generation_actual'].sum())
188160676
"""
if version is None:
version = cfg.get("entsoe", "timeseries_version")
path_pattern = os.path.join(cfg.get("paths", "entsoe"), "{0}")
if file is None:
fn = path_pattern.format(
cfg.get("entsoe", "renewables_file_csv").format(version=version)
)
else:
fn = file.format(version=version)
if not os.path.isfile(fn):
if file is None:
renewables = split_timeseries_file(version=version).renewables
renewables.to_csv(fn)
re = pd.read_csv(
fn,
index_col=[0],
parse_dates=True,
date_parser=lambda x: datetime.datetime.strptime(
x.split("+")[0], "%Y-%m-%d %H:%M:%S"
),
)
return re | 854e97ef3159ac1839145e833bed1708de01c607 | 3,653,380 |
import re
def getPredictedAnchor(title: str) -> str:
"""Return predicted anchor for given title, usually first letter."""
title = title.lower()
if title.startswith('npj '):
return 'npj series'
title = re.sub(r'^(the|a|an|der|die|das|den|dem|le|la|les|el|il)\s+', '',
title)
return title[0].upper() | 972eaa495078bc3929967a052f031c50d439fbdc | 3,653,381 |
from typing import Optional
from typing import Mapping
def get_contact_flow(contact_flow_id: Optional[str] = None,
instance_id: Optional[str] = None,
name: Optional[str] = None,
tags: Optional[Mapping[str, str]] = None,
type: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetContactFlowResult:
"""
Provides details about a specific Amazon Connect Contact Flow.
## Example Usage
By name
```python
import pulumi
import pulumi_aws as aws
test = aws.connect.get_contact_flow(instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111",
name="Test")
```
By contact_flow_id
```python
import pulumi
import pulumi_aws as aws
test = aws.connect.get_contact_flow(contact_flow_id="cccccccc-bbbb-cccc-dddd-111111111111",
instance_id="aaaaaaaa-bbbb-cccc-dddd-111111111111")
```
:param str contact_flow_id: Returns information on a specific Contact Flow by contact flow id
:param str instance_id: Reference to the hosting Amazon Connect Instance
:param str name: Returns information on a specific Contact Flow by name
:param Mapping[str, str] tags: A the map of tags to assign to the Contact Flow.
:param str type: Specifies the type of Contact Flow.
"""
__args__ = dict()
__args__['contactFlowId'] = contact_flow_id
__args__['instanceId'] = instance_id
__args__['name'] = name
__args__['tags'] = tags
__args__['type'] = type
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('aws:connect/getContactFlow:getContactFlow', __args__, opts=opts, typ=GetContactFlowResult).value
return AwaitableGetContactFlowResult(
arn=__ret__.arn,
contact_flow_id=__ret__.contact_flow_id,
content=__ret__.content,
description=__ret__.description,
id=__ret__.id,
instance_id=__ret__.instance_id,
name=__ret__.name,
tags=__ret__.tags,
type=__ret__.type) | ed57d1b17c19f66c38e67613e49653b11c13f699 | 3,653,382 |
def jensen_alpha_beta(risk_returns ,benchmark_returns,Rebalancement_frequency):
"""
Compute the Beta and alpha of the investment under the CAPM
Parameters
----------
risk_returns : np.ndarray
benchmark_returns : np.ndarray
Rebalancement_frequency : np.float64
Returns
----------
np.float64,Beta,np.float64,Alpha
"""
benchmark_returns = sm.add_constant(benchmark_returns)
model = sm.OLS(risk_returns,benchmark_returns).fit()
alpha,beta = model.params[0] * Rebalancement_frequency , model.params[1]
return beta,alpha | ac9d1cf638e2ce67219ed16dbbffc652ff47c541 | 3,653,383 |
def cycles_run() -> int:
"""Number of cycles run so far"""
return lib.m68k_cycles_run() | 145dc9a154a0ec4c2e46fecdeb7106134307cf10 | 3,653,384 |
def loop_and_return_fabric(lines):
"""
loops lines like:
#1196 @ 349,741: 17x17
"""
fabric = {}
for line in lines:
[x, y, x_length, y_length] = parse_line(line)
i_x, i_y = 0, 0
while i_y < y_length:
i_x = 0
while i_x < x_length:
this_coords = (x + i_x, y - i_y)
if fabric.get(this_coords, None) != None:
fabric[this_coords] += 1
else:
fabric[this_coords] = 1
i_x += 1
i_y += 1
return fabric | d5fd18c5b90c0e6576767a77c954b3546cbaef1a | 3,653,385 |
def get_sample(id):
"""Returns sample possessing id."""
for sample in samples_global:
if sample.id == id:
return sample
raise Exception(f'sample "{id}" could not be found') | 524305fe77ef5cc03ba51af3eb61301b697b9c1f | 3,653,386 |
def transcriptIterator(transcriptsBedStream, transcriptDetailsBedStream):
""" Iterates over the transcripts detailed in the two streams, producing
Transcript objects. Streams are any iterator that returns bedlines or empty
strings.
"""
transcriptsAnnotations = {}
for tokens in tokenizeBedStream(transcriptDetailsBedStream):
assert (len(tokens) == 4 or len(tokens) == 9) # 9 if it has color data.
tA = TranscriptAnnotation(
ChromosomeInterval(tokens[0], tokens[1], tokens[2], None),
tokens[3].split('/')[-1], tokens[3].split('/')[:-1])
# normalizeAnnotation(tA) # removed this to improve xml
key = (tA.name, tA.chromosomeInterval.chromosome)
if key not in transcriptsAnnotations:
transcriptsAnnotations[key] = []
transcriptsAnnotations[key].append(tA)
for tokens in tokenizeBedStream(transcriptsBedStream):
assert len(tokens) == 12
# Transcript
name = tokens[3]
# Get the chromosome interval
assert tokens[5] in ['+', '-']
cI = ChromosomeInterval(tokens[0], tokens[1], tokens[2], tokens[5] == '+')
# Get the exons
def getExons(exonNumber, blockSizes, blockStarts):
assert exonNumber == len(blockSizes)
assert exonNumber == len(blockStarts)
return [ChromosomeInterval(
cI.chromosome, cI.start + int(blockStarts[i]),
cI.start + int(blockStarts[i]) + int(blockSizes[i]), cI.strand)
for i in range(exonNumber)]
exons = getExons(int(tokens[9]),
tokens[10].split(','), tokens[11].split(','))
# Get the name annotations
annotations = []
key = (name, cI.chromosome)
if key in transcriptsAnnotations:
annotations = transcriptsAnnotations[key]
filteredAnnotations = []
for tA in annotations:
if cI.contains(tA.chromosomeInterval):
tA.chromosomeInterval.strand = cI.strand
filteredAnnotations.append(tA)
yield Transcript(
cI, name, exons, filteredAnnotations,
int(tokens[4]), int(tokens[6]),
int(tokens[7]), tokens[8]) | 2be2bbca915667be89220d92c42b8a8dce905cc4 | 3,653,387 |
import re
def convert_check_filter(tok):
"""Convert an input string into a filter function.
The filter function accepts a qualified python identifier string
and returns a bool.
The input can be a regexp or a simple string. A simple string must
match a component of the qualified name exactly. A regexp is
matched against the entire qualified name.
Matches are case-insensitive.
Examples::
convert_check_filter('foo')('a.foo.b') == True
convert_check_filter('foo')('a.foobar') == False
convert_check_filter('foo.*')('a.foobar') == False
convert_check_filter('foo.*')('foobar') == True
"""
tok = tok.lower()
if '+' in tok or '*' in tok:
return re.compile(tok, re.I).match
else:
toklist = tok.split('.')
def func(name):
chunks = name.lower().split('.')
if len(toklist) > len(chunks):
return False
for i in range(len(chunks)):
if chunks[i:i + len(toklist)] == toklist:
return True
return False
return func | 9d1aaa9a5007371e4f33ce3b4fbc86edd15875c6 | 3,653,388 |
def region_stats(x, r_start, r_end):
"""
Generate basic stats on each region. Return a dict for easy insertion into a DataFrame.
"""
stats = Munch()
stats["start"] = r_start
stats["end"] = r_end
stats["l"] = r_end - r_start
stats["min"] = np.min(x[r_start:r_end])
stats["max"] = np.max(x[r_start:r_end])
stats["rng"] = stats["max"] - stats["min"]
stats["mean"] = np.mean(x[r_start:r_end])
stats["std"] = np.std(x[r_start:r_end])
stats["var"] = np.var(x[r_start:r_end])
stats["med"] = np.median(x[r_start:r_end])
stats["mad"] = scistat.median_abs_deviation(x[r_start:r_end])
return stats | cb52f6320952be13f9715cb2259b32996bdbb0da | 3,653,389 |
def resnet_v1_generator(block_fn, layers, num_classes,
data_format='channels_first', dropblock_keep_probs=None,
dropblock_size=None):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
num_classes: `int` number of possible classes for image classification.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
dropblock_keep_probs: `list` of 4 elements denoting keep_prob of DropBlock
for each block group. None indicates no DropBlock for the corresponding
block group.
dropblock_size: `int`: size parameter of DropBlock.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
Raises:
if dropblock_keep_probs is not 'None' or a list with len 4.
"""
if dropblock_keep_probs is None:
dropblock_keep_probs = [None] * 4
if not isinstance(dropblock_keep_probs,
list) or len(dropblock_keep_probs) != 4:
raise ValueError('dropblock_keep_probs is not valid:', dropblock_keep_probs)
def model(inputs, is_training):
"""Creation of the model graph."""
inputs = conv2d_fixed_padding(
inputs=inputs, filters=64, kernel_size=7, strides=2,
data_format=data_format)
inputs = tf.identity(inputs, 'initial_conv')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format)
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=data_format)
inputs = tf.identity(inputs, 'initial_max_pool')
inputs = block_group(
inputs=inputs, filters=64, block_fn=block_fn, blocks=layers[0],
strides=1, is_training=is_training, name='block_group1',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[0],
dropblock_size=dropblock_size)
inputs = block_group(
inputs=inputs, filters=128, block_fn=block_fn, blocks=layers[1],
strides=2, is_training=is_training, name='block_group2',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[1],
dropblock_size=dropblock_size)
inputs = block_group(
inputs=inputs, filters=256, block_fn=block_fn, blocks=layers[2],
strides=2, is_training=is_training, name='block_group3',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[2],
dropblock_size=dropblock_size)
inputs = block_group(
inputs=inputs, filters=512, block_fn=block_fn, blocks=layers[3],
strides=2, is_training=is_training, name='block_group4',
data_format=data_format, dropblock_keep_prob=dropblock_keep_probs[3],
dropblock_size=dropblock_size)
# The activation is 7x7 so this is a global average pool.
# TODO(huangyp): reduce_mean will be faster.
pool_size = (inputs.shape[1], inputs.shape[2])
inputs = tf.layers.average_pooling2d(
inputs=inputs, pool_size=pool_size, strides=1, padding='VALID',
data_format=data_format)
inputs = tf.identity(inputs, 'final_avg_pool')
inputs = tf.reshape(
inputs, [-1, 2048 if block_fn is bottleneck_block else 512])
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01))
inputs = tf.identity(inputs, 'final_dense')
return inputs
model.default_image_size = 224
return model | 54e0d2eca651c50075916bac783ed871156469e7 | 3,653,390 |
import sys
import array
def read_hotw(filename):
"""
Read cross-section file fetched from HITRAN-on-the-Web.
The format of the file line must be as follows:
nu, coef
Other lines are omitted.
"""
f = open(filename,'r')
nu = []
coef = []
for line in f:
pars = line.split()
try:
nu.append(float(pars[0]))
coef.append(float(pars[1]))
except:
if False:
print(sys.exc_info())
else:
pass
return array(nu),array(coef) | bda8d2419e48cbe6503e1c5af0da5fd265041995 | 3,653,391 |
def _sql_type(ptype):
"""Convert python type to SQL type"""
if "Union" in ptype.__class__.__name__:
assert len(ptype.__args__) == 2, "Cannot create sql column with more than one type."
assert type(None) in ptype.__args__, "Cannot create sql column with more than one type."
return f"{ptype.__args__[0].__name__} NULL"
elif ptype in SQLTypes.__dict__.values() and hasattr(ptype, "__name__"):
return f"{ptype.__name__} NOT NULL"
else:
raise ValueError(f"Cannot parse type {ptype}.") | 331734ce050ca261d2d78876ebd78540a088597b | 3,653,392 |
def rescale_data(data: np.ndarray,
option: str,
args: t.Optional[t.Dict[str, t.Any]] = None) -> np.ndarray:
"""Rescale numeric fitted data accordingly to user select option.
Args:
data (:obj:`np.ndarray`): data to rescale.
option (:obj:`str`): rescaling strategy. Must be one in ``VALID_RESCA-
LE`` attribute.
args (:obj:`dict`, optional): additional arguments for the scaler. All
scaler used are from ``sklearn`` package, so you should consult
their documentation for a complete list of available arguments to
user customization. The used scalers for each available ``option``
are:
- ``min-max``: ``sklearn.preprocessing.MinMaxScaler``
- ``standard``: ``sklearn.preprocessing.StandardScale``
- ``robust``: ``sklearn.preprocessing.RobustScaler``
Returns:
np.ndarray: scaled ``data`` based in ``option`` correspondent strategy.
Raises:
ValueError: if ``option`` is not in ``VALID_RESCALE``.
Any exception caused by arguments from ``args`` into the
scaler model is also raised by this function.
"""
if option not in VALID_RESCALE:
raise ValueError("Unknown data rescaling option '{0}'. Please choose "
"one value among {1}".format(option, VALID_RESCALE))
if not args:
args = {}
scaler_model = _RESCALE_SCALERS.get(option, "min-max")(**args)
return scaler_model.fit_transform(data.astype(float)) | 5f885233c262fb2d766417e64f783f807212355e | 3,653,393 |
def extract_labels(text, spacy_model):
"""Extract entities using libratom.
Returns: core.Label list
"""
try:
document = spacy_model(text)
except ValueError:
logger.exception(f"spaCy error")
raise
labels = set()
for entity in document.ents:
label, _ = Label.objects.get_or_create(type=Label.IMPORTER, name=entity.label_)
labels.add(label)
return list(labels) | 782fdcb4bdd817b55a38c5efe03db676f0e00eed | 3,653,394 |
from typing import Callable
import click
def variant_option(command: Callable[..., None]) -> Callable[..., None]:
"""
An option decorator for a DC/OS variant.
"""
function = click.option(
'--variant',
type=click.Choice(['auto', 'oss', 'enterprise']),
default='auto',
help=(
'Choose the DC/OS variant. '
'If the variant does not match the variant of the given '
'installer, an error will occur. '
'Using "auto" finds the variant from the installer. '
'Finding the variant from the installer takes some time and so '
'using another option is a performance optimization.'
),
)(command) # type: Callable[..., None]
return function | 4c89dc15b46c9d147445ef458b721c7ce835cbe7 | 3,653,395 |
def GetSegByName(name):
"""
@return Address of the first byte in the Segment
with the provided name, or BADADDR
"""
for Segment in ida.Segments():
if ida.SegName(Segment) == name:
return Segment
return ida.BADADDR | 4b0353da187735095805b5a80bb0e23a2ce6491b | 3,653,396 |
def sample_points_from_plateaus(all_plateaus, mode, stack_size=10, n_samples=1):
"""
Samples points from each plateau in each video
:param all_plateaus: dictionary containing all plateaus, keys are plateaus's ids, values are the plateau objects
:param mode: either `flow` or `rgb`
:param stack_size: optical flow stack size
:param n_samples: number of samples you want to draw from each plateau
:return: sampled_points, dictionary whose keys are video ids and whose values are dictionary containing the sampled
points as values as the plateaus ids as keys
"""
sampled_points = {}
h_stack_c = np.ceil(stack_size / 2)
for g_id, g in all_plateaus.items():
if mode == 'flow':
x_range = np.arange(h_stack_c+1, g.n - h_stack_c, dtype=np.int32)
else:
x_range = None # will take the whole x later for sampling
if g.video not in sampled_points:
sampled_points[g.video] = {}
sampled_points[g.video][g_id] = g.sample_points(n_samples, x_range=x_range)
return sampled_points | 1dd12721acc9b126d244902016e939792b220d1e | 3,653,397 |
def mobile_user_meeting_list(request):
"""
返回用户会议列表
:param request:
:return:
"""
dbs = request.dbsession
user_id = request.POST.get('user_id', '')
start_date = request.POST.get('start_date', '')
end_date = request.POST.get('end_date', '')
error_msg = ''
if not user_id:
error_msg = '用户ID不能为空!'
elif not start_date:
error_msg = '开始时间不能为空!'
elif not end_date:
error_msg = '结束时间不能为空!'
else:
meetings = mob_find_user_meetings(dbs, user_id, start_date, end_date)
if error_msg:
json_str = {
'status': False,
'meeting': '',
'error_msg': error_msg
}
else:
json_str = {
'status': True,
'meeting': meetings,
'error_msg':error_msg
}
resp = Response()
resp.headers['Access-Control-Allow-Origin'] = '*'
resp.json = json_str
return resp | 55e9a61a755ef957f4b6bf504b3efe721b13cfd7 | 3,653,398 |
import ctypes
def get_current_thread_cpu_time():
"""
<Purpose>
Gets the total CPU time for the currently executing thread.
<Exceptions>
An AssertionError will be raised if the underlying system call fails.
<Returns>
A floating amount of time in seconds.
"""
# Get the current thread handle
current_thread = _mach_thread_self()
# Allocate a structure
thread_info = thread_basic_info()
# Structure size
struct_size = ctypes.c_uint(THREAD_BASIC_INFO_SIZE)
# Make the system call
result = _thread_info(current_thread, THREAD_BASIC_INFO,ctypes.byref(thread_info), ctypes.byref(struct_size))
# Sum up the CPU usage
cpu_time = thread_info.user_time.seconds + thread_info.user_time.microseconds / 1000000.0
cpu_time += thread_info.system_time.seconds + thread_info.system_time.microseconds / 1000000.0
# Safety check, result should be 0
# Do the safety check after we free the memory to avoid leaks
assert(result == 0)
# Return the structure
return cpu_time | 6d83314e8ceee0336b6c0ed7f71fa49e89b24ca8 | 3,653,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.