content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import cuml
from cuml.metrics import confusion_matrix
def build_and_predict_model(ml_input_df):
"""
Create a standardized feature matrix X and target array y.
Returns the model and accuracy statistics
"""
feature_names = ["college_education", "male"] + [
"clicks_in_%d" % i for i in range(1, 8)
]
X = ml_input_df[feature_names]
# Standardize input matrix
X = (X - X.mean()) / X.std()
y = ml_input_df["clicks_in_category"]
model = cuml.LogisticRegression(
tol=convergence_tol,
penalty="none",
solver="qn",
fit_intercept=True,
max_iter=iterations,
C=C,
)
model.fit(X, y)
#
# Predict and evaluate accuracy
# (Should be 1.0) at SF-1
#
results_dict = {}
y_pred = model.predict(X)
results_dict["auc"] = roc_auc_score(y.to_array(), y_pred.to_array())
results_dict["precision"] = cupy_precision_score(cp.asarray(y), cp.asarray(y_pred))
results_dict["confusion_matrix"] = confusion_matrix(
cp.asarray(y, dtype="int32"), cp.asarray(y_pred, dtype="int32")
)
results_dict["output_type"] = "supervised"
return results_dict | 2d3e192986c680d910a401c9a4da295595fe236e | 3,652,400 |
def codes_new_from_file(fileobj, product_kind, headers_only=False):
"""
@brief Load in memory a message from a file for a given product.
The message can be accessed through its id and will be available\n
until @ref grib_release is called.\n
\b Examples: \ref get_product_kind.py "get_product_kind.py"
@param fileobj python file object
@param product_kind one of CODES_PRODUCT_GRIB, CODES_PRODUCT_BUFR, CODES_PRODUCT_GTS or CODES_PRODUCT_ANY
@param headers_only whether or not to load the message with the headers only
@return id of the message loaded in memory
@exception GribInternalError
"""
if product_kind == CODES_PRODUCT_GRIB:
return grib_new_from_file(fileobj, headers_only)
if product_kind == CODES_PRODUCT_BUFR:
return bufr_new_from_file(fileobj, headers_only)
if product_kind == CODES_PRODUCT_METAR:
return metar_new_from_file(fileobj, headers_only)
if product_kind == CODES_PRODUCT_GTS:
return gts_new_from_file(fileobj, headers_only)
if product_kind == CODES_PRODUCT_ANY:
return any_new_from_file(fileobj, headers_only)
raise Exception("Invalid product kind: " + product_kind) | 47ed09dbf5bf59160dcab4c36dfd202ae7b190a5 | 3,652,401 |
from datetime import datetime
def list_resources(path, long_format=None, relations=False):
"""List resources in a given DMF workspace.
Args:
path (str): Path to the workspace
long_format (bool): List in long format flag
relations (bool): Show relationships, in long format
Returns:
None
"""
t = ColorTerm()
d = DMF(path)
if long_format:
resources = list(d.find())
uuid_pfx = _uuid_prefix([r.uuid for r in resources])
fields = ("uuid", "name", "type", "modified", "created")
widths = (uuid_pfx, 30, 20, 19, 19)
colors = (t.green, t.white, t.yellow, t.white, t.white)
fmts = [f"{{:{w}s}}" for w in widths]
left_gutter = "| " if relations else ""
# table header
print(
" " * len(left_gutter)
+ t.bold
+ " ".join([f.format(v) for f, v in zip(fmts, fields)])
+ t.reset
)
def datestr(t):
return datetime.isoformat(datetime.fromtimestamp(t))
# table body
for r in resources:
values = list(getattr(r, k) for k in fields[:-2])
values.append(datestr(r.modified))
values.append(datestr(r.created))
if not values[1] and r.desc:
values[1] = r.desc[: widths[1]]
else:
values[1] = values[1][: widths[1]]
if uuid_pfx < 32:
values[0] = values[0][:uuid_pfx]
print(
left_gutter
+ " ".join([c + f.format(v) for c, f, v in zip(colors, fmts, values)])
+ t.reset
)
if relations and len(r.relations) > 0:
relitems = []
for rel in r.relations:
if rel.subject == r.uuid:
fmt = f"{t.white}{{p}}->{t.blue}{{o}}"
else:
fmt = f"{t.blue}{{s}}->{t.white}{{p}}"
item = fmt.format(
s=rel.subject[:uuid_pfx],
p=rel.predicate,
o=rel.object[:uuid_pfx],
)
relitems.append(item)
print(f"+-- {' / '.join(relitems)}")
else:
items = []
for r in d.find():
name_color = "w"
if r.name:
name = r.name
elif r.desc:
name = r.desc[:40]
name_color = t.blue
else:
name = r.uuid
name_color = t.green
item = f"{name_color}{name}{t.yellow}:{r.type}"
items.append(item)
if items:
columnized = _display_in_columns(items, max_line=t.width)
print(columnized + t.reset) | 904f04a008efb8add2d7744bfe1dc71009faff17 | 3,652,402 |
def calc_streamtemp(tas):
""" Global standard regression equation from Punzet et al. (2012)
Calculates grid cell stream temperature based on air temperature
Both input and output temperature are in K"""
# global constants, taken from Punzet et al., 2012
c0 = 32; c1 = -0.13; c2 = 1.94
tas_C = tas - 273.15
streamtemp_C = c0/(1+np.exp(c1*tas_C+c2))
streamtemp = streamtemp_C + 273.15
return streamtemp | 493d1ca3b3543db9bfabc8c0e2a4f013da794028 | 3,652,403 |
def _process(proc_data):
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured data to conform to the schema.
"""
# nothing more to process
return proc_data | 7585a8810667f39d6d6a787f2617590aee1ec8cf | 3,652,404 |
import requests
def get_station_info(my_token, station_id):
"""
This function gets all the information on the station
----------
Input:
my_token (str)
token generated from "token request page"
station_id (str)
----------
Output:
dictionary of station information
"""
station_url = '{}stations/{}'.format(base_url, station_id)
return requests.get(station_url, headers = {'token': my_token}).json() | 9abcb5b74cb0be45396c1b182845467e3fc0829c | 3,652,405 |
import sys
import traceback
def json_response(f, *args, **kwargs):
"""Wrap a view in JSON.
This decorator runs the given function and looks out for ajax.AJAXError's,
which it encodes into a proper HttpResponse object. If an unknown error
is thrown it's encoded as a 500.
All errors are then packaged up with an appropriate Content-Type and a JSON
body that you can inspect in JavaScript on the client. They look like:
{
"message": "Error message here.",
"code": 500
}
Please keep in mind that raw exception messages could very well be exposed
to the client if a non-AJAXError is thrown.
"""
try:
result = f(*args, **kwargs)
except AJAXError, e:
result = e.get_response()
except Exception, e:
type, message, trace = sys.exc_info()
if settings.DEBUG:
tb = [{'file': l[0], 'line': l[1], 'in': l[2], 'code': l[3]} for l in traceback.extract_tb(trace)]
result = AJAXError(500, message, traceback=tb).get_response()
else:
result = AJAXError(500, message).get_response()
result['Content-Type'] = 'application/json'
return result | fd80b5e3d259b9ef69f5278936d8d0dbccae6636 | 3,652,406 |
def check_fit_input(coordinates, data, weights, unpack=True):
"""
Validate the inputs to the fit method of gridders.
Checks that the coordinates, data, and weights (if given) all have the same
shape. Weights arrays are raveled.
Parameters
----------
coordinates : tuple of arrays
Arrays with the coordinates of each data point. Should be in the
following order: (easting, northing, vertical, ...).
data : array or tuple of arrays
The data values of each data point. Data can have more than one
component. In such cases, data should be a tuple of arrays.
weights : None or array
If not None, then the weights assigned to each data point.
Typically, this should be 1 over the data uncertainty squared.
If the data has multiple components, the weights have the same number
of components.
unpack : bool
If False, data and weights will be tuples always. If they are single
arrays, then they will be returned as a 1-element tuple. If True, will
unpack the tuples if there is only 1 array in each.
Returns
-------
validated_inputs
The validated inputs in the same order. If weights are given, will
ravel the array before returning.
"""
data = check_data(data)
weights = check_data(weights)
coordinates = check_coordinates(coordinates)
if any(i.shape != coordinates[0].shape for i in data):
raise ValueError(
"Data arrays must have the same shape {} as coordinates. Data shapes: {}.".format(
coordinates[0].shape, [i.shape for i in data]
)
)
if any(w is not None for w in weights):
if len(weights) != len(data):
raise ValueError(
"Number of data '{}' and weights '{}' must be equal.".format(
len(data), len(weights)
)
)
if any(i.size != j.size for i in weights for j in data):
raise ValueError("Weights must have the same size as the data array.")
weights = tuple(i.ravel() for i in weights)
else:
weights = tuple([None] * len(data))
if unpack:
if len(weights) == 1:
weights = weights[0]
if len(data) == 1:
data = data[0]
return coordinates, data, weights | eef22bb026aad657f096ac4de00a6d2a5a6fa0f8 | 3,652,407 |
import logging
import sys
def get_logger(name, handler=logging.StreamHandler(sys.stderr), level=logging.DEBUG):
"""
encapsulate get logger operation
:param name: logger name
:param handler: logger handler, default is stderr
:param level: logger level, default is debug
:return: logger
"""
logger = logging.getLogger(name)
handler.setFormatter(logging.Formatter('[%(asctime)s] [{}] %(message)s'.format(name)))
logger.addHandler(handler)
logger.setLevel(level)
return logger | 576827fa36ee4e2dae1073653612c1604b953200 | 3,652,408 |
def get_local_beneficiaries(map_lat: float, map_lng: float, map_zoom: int) -> DataFrame:
"""Return only projects that are fairly close to the map's centre."""
return beneficiaries[
(map_lat - 100 / map_zoom < beneficiaries.lat) &
(beneficiaries.lat < map_lat + 100 / map_zoom) &
(map_lng - 100 / map_zoom < beneficiaries.lng) &
(beneficiaries.lng < map_lng + 100 / map_zoom)
][:500] | 30e0d7255a70c15a25f499e867974c711ae3f750 | 3,652,409 |
import argparse
def parse_args(args):
"""
Parse command line parameters
:param args: command line parameters as list of strings
:return: command line parameters as :obj:`argparse.Namespace`
"""
parser = argparse.ArgumentParser(
description="Build html reveal.js slides from markdown in docs/ dir")
parser.add_argument(
'-v',
'--verbose',
help='Whether to show progress messages on stdout, including HTML',
action='store_true')
parser.add_argument(
'--version',
help='print twip package version and exit.',
action='version',
version='twip {ver}'.format(ver=__version__))
parser.add_argument(
'-b',
'--blog_path',
help='Path to source markdown files. Must contain an `images` subdir',
default=BLOG_PATH)
parser.add_argument(
'-s',
'--slide_path',
help='Path to dir for output slides (HTML). An images subdir will be added. A slides subdir should already exist.',
default=DOCS_PATH)
parser.add_argument(
'-p',
'--presentation',
help='Source markdown base file name (without .md extension). The HTML slides will share the same basename.',
default='2015-10-27-Hacking-Oregon-Hidden-Political-Connections')
return parser.parse_args(args) | ce1d6f0132876263bd7af5456c8a3596a3a49bdf | 3,652,410 |
def create_atoms(atoms, atom_dict):
"""Transform the atom types in a molecule (e.g., H, C, and O)
into the indices (e.g., H=0, C=1, and O=2).
"""
atoms = [atom_dict[a] for a in atoms]
return np.array(atoms) | 10f0e0d0669c2148db7cdcd487a6b72ade2e2f06 | 3,652,411 |
def uint2float(A,bits,x_min,x_max=None):
"""
Converts uint[bits] to the corresponding floating point value in the range [x_min,x_max].
"""
if x_max is None:
x_min,x_max = x_range(x_min)
return x_min + (x_max - x_min) * A / ((1 << bits) - 1) | 242b72824309a7e0724c42f29e259e52b11a90d2 | 3,652,412 |
def partCmp(verA: str, verB: str) -> int:
"""Compare parts of a semver.
Args:
verA (str): lhs part to compare
verB (str): rhs part to compare
Returns:
int: 0 if equal, 1 if verA > verB and -1 if verA < verB
"""
if verA == verB or verA == "*" or verB == "*":
return 0
if int(verA) > int(verB):
return 1
return -1 | d9417ce482bf0c2332175412ba3125435f884336 | 3,652,413 |
import io
def OrigPosLemConcordancer(sentences, annots, textMnt, wordType="word", nrows=10):
"""Output HTML for the text (including lemma and pos tags) identified by the AQAnnotation (typically a sentence annotation).
Below the sentence (in successive rows) output the original terms, parts of speech, and lemma terms for the text identified by the AQAnnotation.
Args:
sentences: Sentence annotations that you would like to display.
annots: The Dataframe of AQAnnotations that will contain the the AQAnnotations (orig, lemma, pos) for the above sentences
textPath: Path for the str files. The sentence annotations must be for documents contained in these str files.
wordType: The annotType that identies the AQAnnotation in the above annotations.
nrows: Number of sentences to display
Returns:
HTML
"""
def _buildOrigPosLemmaRow(entryType, entry):
return ("<tr>" +
"<td>" + entryType + "</td>" +
"<td bgcolor='grey'/>" +
"<td bgcolor='grey'/>" +
entry +
"</tr>")
sentenceAnnots = sentences.sort("docId","startOffset").limit(nrows).collect()
tmpStr = ""
docId = ""
docText = ""
text= ""
lastDoc = ""
curDoc = ""
# Get the TextAnnotations (for the specified annotType) for each sentence
for sentence in sentenceAnnots:
textAnnots = annots.filter((col("docId") == sentence.docId) &
(col("annotType") == wordType) &
(col("startOffset") >= sentence.startOffset) &
(col("endOffset") <= sentence.endOffset)) \
.sort("startOffset") \
.collect()
# Get the raw text for the sentence annotation
if docId != sentence.docId:
docid = sentence.docId
try:
with io.open(textMnt + sentence.docId,'r',encoding='utf-8') as f:
docText = f.read()
except Exception as ex:
print(ex)
docText = ""
if docText != "":
text = docText[sentence.startOffset:sentence.endOffset]
else:
text = ""
tmpStr += "<table border='1' style='font-family: monospace;table-layout: fixed;'><tr>"
tmpStr += ("<td>" + sentence.docId + "</td>")
tmpStr += ("<td>" + str(sentence.startOffset) + "</td>")
tmpStr += ("<td>" + str(sentence.endOffset) + "</td>")
tmpStr += ("<td colspan='" + str(len(textAnnots)) + "'>" + text + "</td>")
tmpStr += "</tr>"
# Get original row
origEntry = ""
for annot in textAnnots:
if (annot.properties != None) and ('orig' in annot.properties) and (len(annot.properties['orig']) > 0):
origEntry += ("<td>" + unquote_plus(annot.properties['orig']) + "</td>")
else:
origEntry += ("<td> </td>")
tmpStr += _buildOrigPosLemmaRow('orig',origEntry)
# Get pos row
posEntry = ""
for annot in textAnnots:
if (annot.properties != None) and ('pos' in annot.properties) and (len(annot.properties['pos']) > 0):
posEntry += ("<td>" + unquote_plus(annot.properties['pos']) + "</td>")
else:
posEntry += ("<td> </td>")
tmpStr += _buildOrigPosLemmaRow('pos',posEntry)
# Get lemma row
lemmaEntry = ""
for annot in textAnnots:
if (annot.properties != None) and ('lemma' in annot.properties) and (len(annot.properties['lemma']) > 0):
lemmaEntry += ("<td>" + unquote_plus(annot.properties['lemma']) + "</td>")
else:
lemmaEntry += ("<td> </td>")
tmpStr += _buildOrigPosLemmaRow('lemma',lemmaEntry)
tmpStr += "</table><p/><p/><p/>"
return "<html><body>" + tmpStr + "</body></html>" | f11f0240cbee58954901bd57e728cd54ab51b6dd | 3,652,414 |
import argparse
def get_args(**kwargs):
"""Generate cli args
Arguments:
kwargs[dict]: Pair value in which key is the arg and value a tuple with the help message and default value
Returns:
Namespace: Args namespace object
"""
parser = argparse.ArgumentParser()
for key, (help, default) in kwargs.items():
parser.add_argument("--{}".format(key), help=help, default=default)
return parser.parse_args() | f2cade1e0ec3b5a1fccd7ea94090c719de2849b6 | 3,652,415 |
def show_hidden_article(request, id):
"""
展示隐藏的文章
"""
db = connect_mongodb_database(request)
article = db.articles.find_one({
'Id':int(id), 'IsPublic': False
})
if article is None:
return HttpResponse(404)
return render_admin_and_back(request, 'show-hidden-article.html', {
'page': u'隐私文章 - '+ article['Title'],
'article': article,
}) | 5f9f9c3bc21ed267d8c13226d51a7f44877af976 | 3,652,416 |
def MakeNormalPmf(mu, sigma, num_sigmas, n=201):
"""Makes a PMF discrete approx to a Normal distribution.
mu: float mean
sigma: float standard deviation
num_sigmas: how many sigmas to extend in each direction
n: number of values in the Pmf
returns: normalized Pmf
"""
pmf = Pmf()
low = mu - num_sigmas * sigma
high = mu + num_sigmas * sigma
for x in np.linspace(low, high, n):
p = EvalNormalPdf(x, mu, sigma)
pmf.Set(x, p)
pmf.Normalize()
return pmf | 9bca22cfd3b3b94fa233be9f78696361d9e36726 | 3,652,417 |
import os
def _val_from_env(env, attr):
"""Transforms env-strings to python."""
val = os.environ[env]
if attr == 'rules':
val = _rules_from_env(val)
elif attr == 'wait_command':
val = int(val)
elif attr in ('require_confirmation', 'no_colors'):
val = val.lower() == 'true'
return val | 023dc8eea2c9ed034f19c6ba89d57a815bc52903 | 3,652,418 |
def histogram(a, bins, ranges):
"""
Examples
--------
>>> x = np.random.uniform(0., 1., 100)
>>> H, xedges = np.histogram(x, bins=5, range=[0., 1.])
>>> Hn = histogram(x, bins=5, ranges=[0., 1.])
>>> assert np.all(H == Hn)
"""
hist_arr = np.zeros((bins,), dtype=a.dtype)
return _hist1d_numba_seq(hist_arr, a, bins, np.asarray(ranges)) | a53a8204180c8f9d2a3fb36595d14c07da262cbd | 3,652,419 |
import os
import test
def testfile(path, shell='/bin/sh', indent=2, env=None, cleanenv=True,
debug=False, testname=None):
"""Run test at path and return input, output, and diff.
This returns a 3-tuple containing the following:
(list of lines in test, same list with actual output, diff)
diff is a generator that yields the diff between the two lists.
If a test exits with return code 80, the actual output is set to
None and diff is set to [].
Note that the TESTDIR, TESTFILE, and TESTSHELL environment
variables are available to use in the test.
:param path: Path to test file
:type path: bytes or str
:param shell: Shell to run test in
:type shell: bytes or str or list[bytes] or list[str]
:param indent: Amount of indentation to use for shell commands
:type indent: int
:param env: Optional environment variables for the test shell
:type env: dict or None
:param cleanenv: Whether or not to sanitize the environment
:type cleanenv: bool
:param debug: Whether or not to run in debug mode (don't capture stdout)
:type debug: bool
:param testname: Optional test file name (used in diff output)
:type testname: bytes or None
:return: Input, output, and diff iterables
:rtype: (list[bytes], list[bytes], collections.Iterable[bytes])
"""
f = open(path, 'rb')
try:
abspath = os.path.abspath(path)
env = env or os.environ.copy()
env['TESTDIR'] = envencode(os.path.dirname(abspath))
env['TESTFILE'] = envencode(os.path.basename(abspath))
if testname is None: # pragma: nocover
testname = os.path.basename(abspath)
return test(f, shell, indent=indent, testname=testname, env=env,
cleanenv=cleanenv, debug=debug)
finally:
f.close() | 05a397c09228fddfa61aba554f6d89cc62c5c59b | 3,652,420 |
def is_valid_python_code(src_string: str):
"""True if, and only if, ``src_string`` is valid python.
Valid python is defined as 'ast.parse(src_string)` doesn't raise a ``SyntaxError``'
"""
try:
ast_parse(src_string)
return True
except SyntaxError:
return False | 03ee9d915797ba1cfcbcaf8630d38df529744f1b | 3,652,421 |
def rss_format_export_post():
"""
:return:
"""
try:
payload = request.get_json(force=True) # post data in json
except:
payload = dict(request.form) # post data in form encoding
if 'link' in payload:
link = read_value_list_or_not(payload, 'link')
else:
link = ''
results, status = export_post(request, 'RSS')
if status == 200:
return return_rss_format_export(solr_data=results, link=link)
return return_response(results, status) | 2ddc5b814cabec3fd84f024d44cb04b0063890c5 | 3,652,422 |
from typing import Union
from typing import Callable
from typing import Iterable
import asyncio
def on_command(name: Union[str, CommandName_T],
*,
logger: Logger,
checkfunc: Callable[[CommandSession], bool] = None,
wait_for: Callable[[], bool] = None,
cooldown: int = 0,
use_default_infolog: bool = True,
aliases: Union[Iterable[str], str] = (),
permission: int = nonebot.permission.EVERYBODY,
only_to_me: bool = True,
privileged: bool = False,
shell_like: bool = False,
**kwargs) -> Callable:
"""on_command装饰器。被装饰的函数应当接受两个参数session及bot。
参数:
name:命令名称。
logger:日志器。
checkfunc:检查是否应该工作的函数。函数执行返回True则执行功能,否则退出。
wait_for: 等待函数。函数执行返回为True后再执行功能,否则等待1秒直到返回为True。
cooldown:命令运行后的冷却时间。冷却时间内无法再次运行。
use_default_infolog:是否使用默认info级别的日志记录。
aliases:命令别名。
permission:命令所需权限。
only_to_me:是否仅响应私聊或者at机器人的指令。
privileged:是否允许复数次执行。
shell_like:是否是类shell指令。
"""
def deco(func) -> Callable:
@wraps(func)
async def wrapper(session: CommandSession):
if session.event['user_id'] in BLACKLIST['user']:
return
if session.event['message_type'] == 'group' and session.event['group_id'] in BLACKLIST['group']:
return
if checkfunc is not None:
if not ((await checkfunc(session) if asyncio.iscoroutinefunction(checkfunc) else checkfunc(session))):
return
if wait_for is not None:
count = 0
while not ((await wait_for()) if asyncio.iscoroutinefunction(wait_for) else wait_for()):
await asyncio.sleep(1)
count += 1
if count >= _wait_for_maxtime:
raise WaitForTimeoutError
funcname = func.__module__ + '.' + func.__name__
if funcname in _cooldown_functions[session.self_id].keys():
return
try:
await func(session, bot)
if use_default_infolog:
if session.event['message_type'] == 'group':
logger.info(f'<Command> Group {session.event["group_id"]} user {session.event["user_id"]} call {funcname} successfully')
else:
logger.info(f'<Command> Private user {session.event["user_id"]} call {funcname} successfully')
except (_PauseException, _FinishException, SwitchException) as e:
raise e
except Warning as w:
logger.warning(f'<Command> Warning {type(w)} occured while {funcname} is running.')
except (ApiNotAvailable, RetryExhaustedError) as a:
logger.error(f'<Command> Error {type(a)} occured while {funcname} is running.')
except ActionFailed as a:
logger.error(f'<Command> Error {type(a)} occured while {funcname} is running, retcode = {a.retcode}.')
except Exception as e:
logger.exception(f'<Command> Error {type(e)} occured while {funcname} is running.')
if cooldown > 0:
if funcname not in _cooldown_functions[session.self_id]:
_cooldown_functions[session.self_id][funcname] = cooldown
return nonebot.on_command(
name,
aliases=aliases,
permission=permission,
only_to_me=only_to_me,
privileged=privileged,
shell_like=shell_like,
)(debuglog(logger)(wrapper))
return deco | 5550188f41a3a8fcfee8ca0fc541f05222d6454e | 3,652,423 |
import os
def SetUp(filename, rel_path=RELATIVE_TEST_PATH):
""" SetUp returns a parsed C Program."""
if not os.path.exists(PICKLE_FILE):
KVStore.CreateNewStore(PICKLE_FILE, redhawk.GetVersion())
return G.GetLanguageSpecificTree(os.path.join(rel_path, filename),
PICKLE_FILE, language='c') | 259d7f2e8abe61a409c14359b155b25170e70762 | 3,652,424 |
def setup_agents(model, initial_locations):
"""Load the simulated initial locations and return a list
that holds all agents.
"""
initial_locations = initial_locations.reshape(2, model["n_types"], 30000)
agents = []
for typ in range(model["n_types"]):
for i in range(model["n_agents_by_type"][typ]):
agents.append(
Agent(
typ=typ,
initial_location=initial_locations[typ, :, i],
n_neighbours=model["n_neighbours"],
require_same_type=model["require_same_type"],
max_moves=model["max_moves"],
)
)
return agents | 40368819f0968b3fcaed6a1953f7e4fec453f471 | 3,652,425 |
async def get_active_infraction(
ctx: Context,
user: MemberOrUser,
infr_type: str,
send_msg: bool = True
) -> t.Optional[dict]:
"""
Retrieves an active infraction of the given type for the user.
If `send_msg` is True and the user has an active infraction matching the `infr_type` parameter,
then a message for the moderator will be sent to the context channel letting them know.
Otherwise, no message will be sent.
"""
log.trace(f"Checking if {user} has active infractions of type {infr_type}.")
active_infractions = await ctx.bot.api_client.get(
'bot/infractions',
params={
'active': 'true',
'type': infr_type,
'user__id': str(user.id)
}
)
if active_infractions:
# Checks to see if the moderator should be told there is an active infraction
if send_msg:
log.trace(f"{user} has active infractions of type {infr_type}.")
await send_active_infraction_message(ctx, active_infractions[0])
return active_infractions[0]
else:
log.trace(f"{user} does not have active infractions of type {infr_type}.") | 63361319b75e072489544e2956d21ff5cbe08590 | 3,652,426 |
def __compute_optical_function_vs_s(transporter, particles, optical_function_name):
# todo Adjust
"""
Compute values of optical function vs s for one particle, which coordinates are x_min, theta_x_min, ...
or x_mean - delta_x, ...
:param transporter: transport function
:param particles: BunchConfiguration object
:param optical_function_name: String, name of optical function, as specified in matrix_indexes
:return: matrix with rows: s and optical function
"""
particles = transporter(particles)
result = np.append(particles["end"].T[tmi.ptc_twiss[Parameters.S]].reshape((-1, 1)),
particles["end"].T[tmi.ptc_twiss[optical_function_name]].reshape((-1, 1)),
axis=1)
return result | f4a68dad10184cc8c1c6185e49ef2e488a831fbb | 3,652,427 |
def plot_arb_images(label, data, label_string):
"""
Neatly displays arbitrary numbers of images from the camera
returns fig
Parameters:
-----------
label: array of values that each image is labeled by, e.g. time
data: array of arrays of image data
label_string: string describing label, e.g. 's'
"""
length = len(data)
columns = 10
if length % columns is not 0:
rows = length / columns + 1
else:
rows = length / columns
fig = _p.figure()
fig.set_figheight(rows * 5)
fig.set_figwidth(10)
for i in range(length):
ax = fig.add_subplot(rows, columns, i + 1)
ax.matshow(data[i], vmin=_n.min(data), vmax=_n.max(data))
ax.set_title('%s\n%.1f%s' % (i, label[i], label_string))
if i % 10 is 0:
ax.set_xticks([])
ax.set_ylabel('pixels')
else:
ax.set_xticks([])
ax.set_yticks([])
fig.tight_layout()
return fig | 56e22d9f7a0651d56e93873b95c2228162f4a602 | 3,652,428 |
def listGslbServer(**kargs):
""" List the Servers of KT Cloud GSLB.
* Args:
- zone(String, Required) : [KR-CA, KR-CB, KR-M, KR-M2]
* Examples: print(gslb.listGslbServer(zone='KR-M'))
"""
my_apikey, my_secretkey = c.read_config()
if not 'zone' in kargs:
return c.printZoneHelp()
ZoneName = kargs['zone']
del kargs['zone']
kargs['zoneid'] = c.getzoneidbyhname(ZoneName)
M2Bool = c.IsM2(ZoneName)
baseurl = c.geturl(ctype='gslb', m2=M2Bool)
kargs['command'] = 'listGslbServer'
kargs['response'] = 'json'
kargs['apikey'] = my_apikey
return c.makerequest(kargs, baseurl, my_secretkey) | 18d8f0e6699b7cb3080eef5a3d040420ea45329d | 3,652,429 |
import numpy
def imencode(image, pix_fmt=IM_RGB, quality=DEFAULT_QUALITY):
"""
Encode image into jpeg codec
Adapt convert image pixel color with pix_fmt
Parameters
----------
image: source
pix_fmt: format of pixel color. Default: RGB
quality: JPEG quality image.
Returns
-------
Buffer of image encoded
"""
check_type("image", image, numpy.ndarray)
if pix_fmt == IM_RGB:
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
_, buf = cv2.imencode('.jpeg', image, params=[cv2.IMWRITE_JPEG_QUALITY, int(quality)])
return buf | 65750d176275e56da55c0660370a56f44baa6e48 | 3,652,430 |
def split_train_test_data(X, Y, train_rate):
"""
将数据集划分为训练集与测试集
:param X: 数据集的特征
:param Y: 数据集的标签
:param train_rate: 训练集的比例
:return: 训练集的特征;训练集的标签;测试集的特征;测试集的标签
"""
number = len(X)
number_train = int(number * train_rate)
number_test = number - number_train
train_X = []
train_Y = []
test_X = []
test_Y = []
for i in range(number):
if number_test > 0:
if number_train == 0 or np.random.randint(2) == 0:
number_test -= 1
test_X.append(X[i])
test_Y.append(Y[i])
else:
number_train -= 1
train_X.append(X[i])
train_Y.append(Y[i])
else:
number_train -= 1
train_X.append(X[i])
train_Y.append(Y[i])
return np.array(train_X), np.array(train_Y), np.array(test_X), np.array(test_Y) | 7943278bc662968f8e019368ed8744d9e2a23929 | 3,652,431 |
def nonsingular_concat(X, vector):
"""Appends vector to matrix X iff the resulting matrix is nonsingular.
Args:
X (np.array): NxM Matrix to be appended to
vector (np.array): Nx1 vector to be appended to X
Returns:
new_X (np.array): Nx(M+1) Matrix or None
"""
# Cast vector to matrix
vector = np.atleast_2d(vector)
# Append vector as new row at bottom of matrix
new_X = np.concatenate((X, vector), axis=0)
# Check if matrix is still non-singular
if new_X.shape[0] == np.linalg.matrix_rank(new_X):
return new_X
else:
return None | 68a1f6f8b0ea5e14fbbcacc618f2d19b07814813 | 3,652,432 |
from typing import Union
def get_oxidation_state(element: Union[str, Element]) -> int:
"""Get a typical oxidation state
If it doesn't exist in the database, 0 is returned.
Args:
element (str/ Element): Input element
Return:
Oxidation state of the element.
"""
try:
return oxidation_state_dict[str(element)]
except KeyError:
logger.warning(f"Oxidation state: {element} is unavailable. Set 0.")
return 0 | 24187bd8eb5c6d5794f1e287c676b0f16c170d55 | 3,652,433 |
from typing import Callable
def __quality_indexes(
graph: nx.Graph,
communities: object,
scoring_function: Callable[[object, object], float],
summary: bool = True,
) -> object:
"""
:param graph: NetworkX/igraph graph
:param communities: NodeClustering object
:param summary: boolean. If **True** it is returned an aggregated score for the partition is returned, otherwise individual-communitys ones. Default **True**.
:return: If **summary==True** a FitnessResult object, otherwise a list of floats.
"""
graph = convert_graph_formats(graph, nx.Graph)
values = []
for com in communities.communities:
community = nx.subgraph(graph, com)
values.append(scoring_function(graph, community))
if summary:
return FitnessResult(
min=min(values), max=max(values), score=np.mean(values), std=np.std(values)
)
return values | a328ec08bef43248c6e8fd7a0f11901801b0e2a5 | 3,652,434 |
from re import DEBUG
import os
def serve_scripts(scripts):
""" Combines one or more script files into one and embeds them in the
small autoCSP JavaScript framework. """
debug = DEBUG and not LOCKED_MODE
views_path = os.path.expanduser(PATHS['VIEWS'])
with open(views_path + 'static/sha256.js', 'r') as f:
sha256js = f.read()
template = lib.webinterface.render_template('framework.js', debug=debug,
scripts=scripts,
sha256js=sha256js)
return wrap_static(template, '.js') | 27c4faedd7de71c1943250706c8ffca2e30251b3 | 3,652,435 |
def readFromDB_DSC_authorityKey(authorityKey: bytes, connection: Connection) -> DocumentSignerCertificate:
"""Reading from database"""
try:
logger.info("Reading DSC object from database with authority key.")
return connection.getSession().query(DocumentSignerCertificateStorage).filter(DocumentSignerCertificateStorage.authorityKey == authorityKey).all()
except Exception:
raise DocumentSignerCertificateStorageError("Problem with writing the object") | 34cff097201af92d337568094b8d48577f7e440f | 3,652,436 |
def history_report(history, config=None, html=True):
"""
Test a model and save a history report.
Parameters
----------
history : memote.HistoryManager
The manager grants access to previous results.
config : dict, optional
The final test report configuration.
html : bool, optional
Whether to render the report as full HTML or JSON (default True).
"""
if config is None:
config = ReportConfiguration.load()
report = HistoryReport(history=history, configuration=config)
if html:
return report.render_html()
else:
return report.render_json() | 8ba956c959b72f37b570b91ea4f01287eb8783c6 | 3,652,437 |
def derive_from_dem(dem):
"""derive slope and flow direction from a DEM.
Results are returned in a dictionary that contains references to
ArcPy Raster objects stored in the "in_memory" (temporary) workspace
"""
# set the snap raster for subsequent operations
env.snapRaster = dem
# calculate flow direction for the whole DEM
flowdir = FlowDirection(in_surface_raster=dem, force_flow="NORMAL")
flow_direction_raster = so("flowdir","random","in_memory")
flowdir.save(flow_direction_raster)
# calculate slope for the whole DEM
slope = Slope(in_raster=dem, output_measurement="PERCENT_RISE", method="PLANAR")
slope_raster = so("slope","random","in_memory")
slope.save(slope_raster)
return {
"flow_direction_raster": Raster(flow_direction_raster),
"slope_raster": Raster(slope_raster),
} | 4563e4ccd6695865c05c7a945dcc6244fb8af012 | 3,652,438 |
from typing import Optional
def from_error_details(error: str, message: str, stacktrace: Optional[str]) -> BidiException:
"""Create specific WebDriver BiDi exception class from error details.
Defaults to ``UnknownErrorException`` if `error` is unknown.
"""
cls = get(error)
return cls(error, message, stacktrace) | 238566bcf1092b685deebcadcf60c1905e585cb9 | 3,652,439 |
def tangential_proj(u, n):
"""
See for instance:
https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf
"""
return (ufl.Identity(u.ufl_shape[0]) - ufl.outer(n, n)) * u | 92c8eafa222418221b2fb0e1b242dbd76696407d | 3,652,440 |
def _RemoveEdges(tris, match):
"""tris is list of triangles.
er is as returned from _MaxMatch or _GreedyMatch.
Return list of (A,D,B,C) resulting from deleting edge (A,B) causing a merge
of two triangles; append to that list the remaining unmatched triangles."""
ans = []
triset = set(tris)
while len(match) > 0:
(_, e, tl, tr) = match.pop()
(a, b) = e
if tl in triset:
triset.remove(tl)
if tr in triset:
triset.remove(tr)
c = _OtherVert(tl, a, b)
d = _OtherVert(tr, a, b)
if c is None or d is None:
continue
ans.append((a, d, b, c))
return ans + list(triset) | d2415f7275652254ca87a7621e483a29816a8083 | 3,652,441 |
def get_course_authoring_url(course_locator):
"""
Gets course authoring microfrontend URL
"""
return configuration_helpers.get_value_for_org(
course_locator.org,
'COURSE_AUTHORING_MICROFRONTEND_URL',
settings.COURSE_AUTHORING_MICROFRONTEND_URL
) | cea917ca211be1fdd1b4cf028652101392fd80ab | 3,652,442 |
def sumDwellStateSub(params):
"""Threaded, sums dwell times with 1 day seeing no change & accounting for fractional days"""
(dfIn,dwellTime,weight) = params
dfOut = dfIn.copy(deep=True)
while dwellTime > 1:
if dwellTime > 2:
increment = 1
else:
increment = dwellTime-1
dfOut += dfShift(dfIn,1) * increment
dwellTime += -1
return dfOut * weight | 47ab530bfad9a321bf349e7542f279aae0958a9b | 3,652,443 |
def launch_attacker_view():
"""
Accepts a JSON payload with the following structure:
{
"target": "nlb-something.fqdn.com",
"attacker": "1.2.3.4"
}
If the payload parses correctly, then launch a reverse shell listener using pexpect.spawn
then spawn the auto-sploit.sh tool and enter the target and attacker info again using pexpect
:return: Simple String response for now
"""
managed_instances = get_managed_instances()
if request.method == 'GET':
return render_template('routing/attacker_view.html', log_group=log_group, attacker_ip=attacker_ip,
managed_instances=managed_instances, gd_events_of_interest=gd_events_of_interest,
target_ip=target_ip)
if request.method == 'POST':
logger.info('Attacker is {} and Victim is {}'.format(attacker_ip, target_ip))
print('Attacker is {} and Victim is {}'.format(attacker_ip, target_ip))
if target_ip == "" or attacker_ip == "":
logger.info('Incorrect Json format!')
print(request.payload)
res = make_response(jsonify(
{
"result": "error",
"message": "ERROR - Incorrect Json format"
}), 200)
res.headers['Content-type'] = 'application/json'
return res
# Run auto_sploit.sh
_launch_listener()
logger.info('launching listener process')
#
# Create the payload from the attacker source ip input
create_payload()
# Run the exploit
jenkins_cli_url = 'http://' + target_ip + ':80/cli'
#
# Get an initial session id with download
session = exploit_get_sessionid(jenkins_cli_url)
#
if session:
# Try and upload payload
if upload_chunked(jenkins_cli_url, session, "asdf"):
logger.info('Exploit_launched_ok')
res = make_response(jsonify(
{
"result": "success",
"message": "SUCCESS - auto-sploit launched!"
}), 200)
res.headers['Content-type'] = 'application/json'
return res
else:
logger.info('Failed to launch exploit')
res = make_response(jsonify(
{
"result": "error",
"message": "ERROR - Unable to run exploit"
}), 200)
res.headers['Content-type'] = 'application/json'
return res | 2c987a2b552fa5cfc6e85240c71d496fe43785c3 | 3,652,444 |
import copy
def stretch(alignment, factor):
"""Time-stretch the alignment by a constant factor"""
# Get phoneme durations
durations = [factor * p.duration() for p in alignment.phonemes()]
alignment = copy.deepcopy(alignment)
alignment.update(durations=durations)
return alignment | 1ea58c32509365d503379df616edd00718cfca19 | 3,652,445 |
import time
import torch
import sys
def train(epoch, train_loader, model, contrast, criterion_l, criterion_ab, optimizer, opt):
"""
one epoch training
"""
model.train()
contrast.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
l_loss_meter = AverageMeter()
ab_loss_meter = AverageMeter()
l_prob_meter = AverageMeter()
ab_prob_meter = AverageMeter()
end = time.time()
for idx, (inputs, index) in enumerate(train_loader):
data_time.update(time.time() - end)
# l, ab = inputs['rgb'], inputs['rgb']
l, ab = inputs['rgb'], inputs['dep']
label = inputs['label']
bsz = l.size(0)
l = l.float()
ab = ab.float()
if torch.cuda.is_available():
index = index.cuda()
l = l.cuda()
ab = ab.cuda()
# ===================forward=====================
feat_l, feat_ab = model(l, ab) # [bs, 128]
# print (feat_l.size())
# print (feat_ab.size())
out_l, out_ab = contrast(feat_l, feat_ab, index)
l_loss = criterion_l(out_l)
ab_loss = criterion_ab(out_ab)
l_prob = out_l[:, 0].mean()
ab_prob = out_ab[:, 0].mean()
loss = l_loss + ab_loss
# ===================backward=====================
optimizer.zero_grad()
if opt.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1e-4, norm_type=2.0)
optimizer.step()
# ===================meters=====================
losses.update(loss.item(), bsz)
l_loss_meter.update(l_loss.item(), bsz)
l_prob_meter.update(l_prob.item(), bsz)
ab_loss_meter.update(ab_loss.item(), bsz)
ab_prob_meter.update(ab_prob.item(), bsz)
torch.cuda.synchronize()
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
# 'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
# 'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})\t'
'l_p {lprobs.val:.3f} ({lprobs.avg:.3f})\t'
'ab_p {abprobs.val:.3f} ({abprobs.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, lprobs=l_prob_meter,
abprobs=ab_prob_meter))
# print(out_l.shape)
sys.stdout.flush()
return l_loss_meter.avg, l_prob_meter.avg, ab_loss_meter.avg, ab_prob_meter.avg | 8c1b5bc514f4013939b6403fe5c9860288492a8b | 3,652,446 |
import itertools
import sys
def create_issue_digraph(epics_stories):
"""Return a graph representation of all issues.
Blocking dependencies are modelled as graph edges.
"""
log.info('creating graph...')
graph = nx.DiGraph()
for epic, stories in epics_stories.items():
for issue in stories:
graph.add_node(issue.id, issue=issue, epic=epic)
for issue in itertools.chain(*epics_stories.values()):
for target_issue_id in get_blocked_keys(issue):
if target_issue_id not in graph.nodes:
log.warning(
f'issue {issue.key} blocks unknown issue {target_issue_id}')
continue
graph.add_edge(issue.id, target_issue_id)
if not nx.is_directed_acyclic_graph(graph):
log.error('graph has at least one cycle: {}'.
format(nx.find_cycle(graph)))
sys.exit(1)
return graph | f497341846a030e4e9be3235191f812593518c73 | 3,652,447 |
import os
def loadmesh(basedirMesh, ptcode=None, meshname=None, invertZ=True, fname=None):
""" Load Mesh object, flip z and return Mesh
meshname includes ctcode
"""
if fname is None:
try:
mesh = vv.meshRead(os.path.join(basedirMesh, ptcode, meshname))
except FileNotFoundError:
mesh = vv.meshRead(os.path.join(basedirMesh, meshname))
else:
try:
mesh = vv.meshRead(os.path.join(basedirMesh, ptcode, fname))
except FileNotFoundError:
mesh = vv.meshRead(os.path.join(basedirMesh, fname))
if invertZ == True:
# z is negative, must be flipped to match dicom orientation CT data
mesh._vertices[:, -1] *= -1
return mesh | f1940a07f8cb8949eb7cbbed822a4a57f400146d | 3,652,448 |
from thrift_sasl import TSaslClientTransport
from impala.sasl_compat import PureSASLClient
import getpass
def get_transport(socket, host, kerberos_service_name, auth_mechanism='NOSASL',
user=None, password=None):
"""
Creates a new Thrift Transport using the specified auth_mechanism.
Supported auth_mechanisms are:
- None or 'NOSASL' - returns simple buffered transport (default)
- 'PLAIN' - returns a SASL transport with the PLAIN mechanism
- 'GSSAPI' - returns a SASL transport with the GSSAPI mechanism
"""
log.debug('get_transport: socket=%s host=%s kerberos_service_name=%s '
'auth_mechanism=%s user=%s password=fuggetaboutit', socket, host,
kerberos_service_name, auth_mechanism, user)
if auth_mechanism == 'NOSASL':
return TBufferedTransport(socket)
# Set defaults for PLAIN SASL / LDAP connections.
if auth_mechanism in ['LDAP', 'PLAIN']:
if user is None:
user = getpass.getuser()
log.debug('get_transport: user=%s', user)
if password is None:
if auth_mechanism == 'LDAP':
password = ''
else:
# PLAIN always requires a password for HS2.
password = 'password'
log.debug('get_transport: password=%s', password)
auth_mechanism = 'PLAIN' # sasl doesn't know mechanism LDAP
# Initializes a sasl client
try:
import sasl # pylint: disable=import-error
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr('host', host)
sasl_client.setAttr('service', kerberos_service_name)
if auth_mechanism.upper() in ['PLAIN', 'LDAP']:
sasl_client.setAttr('username', user)
sasl_client.setAttr('password', password)
sasl_client.init()
return sasl_client
except ImportError:
log.warn("Unable to import 'sasl'. Fallback to 'puresasl'.")
def sasl_factory():
return PureSASLClient(host, username=user, password=password,
service=kerberos_service_name)
return TSaslClientTransport(sasl_factory, auth_mechanism, socket) | 71e7763066eaf5234b767101930505077954939f | 3,652,449 |
def _read_node( data, pos, md_total ):
"""
2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2
The quantity of child nodes.
The quantity of metadata entries.
Zero or more child nodes (as specified in the header).
"""
child_count = data[ pos ]
pos += 1
md_count = data[ pos ]
pos += 1
for i in range( child_count ):
pos, md_total = _read_node( data, pos, md_total )
for m in range( md_count ):
md_total += data[ pos ]
pos += 1
return ( pos, md_total ) | 768036031ab75b532d667769477f8d3144129ac8 | 3,652,450 |
def _large_flag_fit(x, yDat, yFit, initz, speciesDict, minSize, errBound):
"""
Attempts to more robustly fit saturated lyman alpha regions that have
not converged to satisfactory fits using the standard tools.
Uses a preselected sample of a wide range of initial parameter guesses
designed to fit saturated lines (see get_test_lines).
**Parameters**
:x: (N) ndarray
array of wavelength
:ydat: (N) ndarray
array of desired flux profile to be fitted for the wavelength
space given by x. Same size as x.
:yFit: (N) ndarray
array of flux profile fitted for the wavelength
space given by x already. Same size as x.
:initz: float
redshift to try putting first line at
(maximum absorption for region)
:speciesDict: dictionary
dictionary containing all relevant parameters needed
to create an absorption line of a given species (f,Gamma,lambda0)
as well as max and min values for parameters to be fit
:minsize: float
minimum absorption allowed for a line to still count as a line
given in normalized flux (ie: for minSize=.9, only lines with minimum
flux less than .9 will be fitted)
:errbound: float
maximum total error allowed for an acceptable fit
**Returns**
:bestP: (3,) ndarray
array of best parameters if a good enough fit is found in
the form [[N1,b1,z1], [N2,b2,z2],...]
"""
#Set up some initial line guesses
lineTests = _get_test_lines(initz)
#Keep track of the lowest achieved error
bestError = 1000
#Iterate through test line guesses
for initLines in lineTests:
if initLines[1,0]==0:
initLines = np.delete(initLines,1,axis=0)
#Do fitting with initLines as first guess
linesP,flag=_complex_fit(x,yDat,yFit,initz,
minSize,errBound,speciesDict,initP=initLines)
#Find error of last fit
yNewFit=_gen_flux_lines(x,linesP,speciesDict)
dif = yFit*yNewFit-yDat
errSq=sum(dif**2)
#If error lower, keep track of the lines used to make that fit
if errSq < bestError:
bestError = errSq
bestP = linesP
if bestError>10*errBound*len(x):
return []
else:
return bestP | c065e9e1500977dfa1a522ebd238d2e71e188c6a | 3,652,451 |
from typing import Type
def mse_large_arrays_masked(dataA: 'LargeArray', dataB: 'LargeArray', mask: 'LargeArray',
dtype: Type, batchSizeFlat=1e8):
"""
Compute MSE between two HDF datasets, considering elements where the mask is set to true (one).
Computation is performed in batches to decrease memory requirements.
"""
if dataA.shape != dataB.shape or dataA.shape != mask.shape:
raise ValueError("Arguments should have equal shapes, {}, {} and {} given."
.format(dataA.shape, dataB.shape, mask.shape))
sum = 0.0
count = 0
for batchStart, batchEnd in get_batch_indices(dataA.shape, dtype, batchSizeFlat):
batchMask = mask[batchStart:batchEnd]
diff = batchMask * (dataA[batchStart:batchEnd].astype(dtype) - dataB[batchStart:batchEnd].astype(dtype))
square = np.square(diff)
nonzeroNumber = np.count_nonzero(batchMask)
sum += np.sum(square)
count += nonzeroNumber
return sum / count if count > 0 else float('nan') | 27636fc32d208d544b0b2f9790015e6f3d86a69d | 3,652,452 |
import copy
def xml_elem_or_str_to_text(elem_or_xmlstr, default_return=""):
"""
Return string with all tags stripped out from either etree element or xml marked up string
If string is empty or None, return the default_return
>>> root = etree.fromstring(test_xml)
>>> xml_elem_or_str_to_text(test_xml, None)[0:100]
'this is just authoring test stuff\\n whatever is in the abstract\\n \\n '
>>> xml_elem_or_str_to_text(root, None)[0:100]
'this is just authoring test stuff\\n whatever is in the abstract\\n \\n '
>>> root = etree.fromstring("<myxml>this <b>is <i>really</i><empty/></b> xml.</myxml>", None) #mixed content element
>>> xml_elem_or_str_to_text(root, None)
'this is really xml.'
>>> isinstance(xml_elem_or_str_to_text(root, None), str) # make sure it's string
True
>>> xml_elem_or_str_to_text(xml_xpath_return_textsingleton(root, "pxxx", ""), None)
"""
ret_val = default_return
if elem_or_xmlstr is None or elem_or_xmlstr == "":
ret_val = default_return
elif isinstance(elem_or_xmlstr, lxml.etree._ElementUnicodeResult):
ret_val = "%s" % elem_or_xmlstr # convert to string
# just in case the caller sent a string.
else:
try:
if isinstance(elem_or_xmlstr, str):
parser = lxml.etree.XMLParser(encoding='utf-8', recover=True)
elem = etree.fromstring(elem_or_xmlstr.encode("utf8"), parser)
else:
elem = copy.copy(elem_or_xmlstr) # etree will otherwise change calling parm elem_or_xmlstr when stripping
except Exception as err:
logger.error(err)
ret_val = default_return
try:
etree.strip_tags(elem, '*')
inner_text = elem.text
if inner_text:
ret_val = inner_text.strip()
else:
ret_val = default_return
except Exception as err:
logger.error("xmlElemOrStrToText: ", err)
ret_val = default_return
if ret_val == "":
ret_val = default_return
return ret_val | 1e13c74d3d7d69fdd1ce8011384e1ee564f366f1 | 3,652,453 |
def sequence_equals(sequence1, sequence2):
"""
Inspired by django's self.assertSequenceEquals
Useful for comparing lists with querysets and similar situations where
simple == fails because of different type.
"""
assert len(sequence1) == len(sequence2), (len(sequence1), len(sequence2))
for item_from_s1, item_from_s2 in zip(sequence1, sequence2):
assert item_from_s1 == item_from_s2, (item_from_s1, item_from_s2)
return True | 38016a347caf79458bb2a872d0fd80d6b813ba33 | 3,652,454 |
def statistical_features(ds, exclude_col_names: list = [],
feature_names=['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew',
'kurt', 'sqr']):
"""
Compute statistical features.
Args:
ds (DataStream): Windowed/grouped DataStream object
exclude_col_names list(str): name of the columns on which features should not be computed
feature_names list(str): names of the features. Supported features are ['mean', 'median', 'stddev', 'variance', 'max', 'min', 'skew',
'kurt', 'sqr', 'zero_cross_rate'
Returns:
DataStream object with all the existing data columns and FFT features
"""
exclude_col_names.extend(["timestamp", "localtime", "user", "version"])
data = ds._data._df.drop(*exclude_col_names)
df_column_names = data.columns
basic_schema = StructType([
StructField("timestamp", TimestampType()),
StructField("localtime", TimestampType()),
StructField("user", StringType()),
StructField("version", IntegerType()),
StructField("start_time", TimestampType()),
StructField("end_time", TimestampType())
])
features_list = []
for cn in df_column_names:
for sf in feature_names:
features_list.append(StructField(cn + "_" + sf, FloatType(), True))
features_schema = StructType(basic_schema.fields + features_list)
def calculate_zero_cross_rate(series):
"""
How often the signal changes sign (+/-)
"""
series_mean = np.mean(series)
series = [v - series_mean for v in series]
zero_cross_count = (np.diff(np.sign(series)) != 0).sum()
return zero_cross_count / len(series)
def get_sqr(series):
sqr = np.mean([v * v for v in series])
return sqr
@pandas_udf(features_schema, PandasUDFType.GROUPED_MAP)
def get_stats_features_udf(df):
results = []
timestamp = df['timestamp'].iloc[0]
localtime = df['localtime'].iloc[0]
user = df['user'].iloc[0]
version = df['version'].iloc[0]
start_time = timestamp
end_time = df['timestamp'].iloc[-1]
df.drop(exclude_col_names, axis=1, inplace=True)
if "mean" in feature_names:
df_mean = df.mean()
df_mean.index += '_mean'
results.append(df_mean)
if "median" in feature_names:
df_median = df.median()
df_median.index += '_median'
results.append(df_median)
if "stddev" in feature_names:
df_stddev = df.std()
df_stddev.index += '_stddev'
results.append(df_stddev)
if "variance" in feature_names:
df_var = df.var()
df_var.index += '_variance'
results.append(df_var)
if "max" in feature_names:
df_max = df.max()
df_max.index += '_max'
results.append(df_max)
if "min" in feature_names:
df_min = df.min()
df_min.index += '_min'
results.append(df_min)
if "skew" in feature_names:
df_skew = df.skew()
df_skew.index += '_skew'
results.append(df_skew)
if "kurt" in feature_names:
df_kurt = df.kurt()
df_kurt.index += '_kurt'
results.append(df_kurt)
if "sqr" in feature_names:
df_sqr = df.apply(get_sqr)
df_sqr.index += '_sqr'
results.append(df_sqr)
output = pd.DataFrame(pd.concat(results)).T
basic_df = pd.DataFrame([[timestamp, localtime, user, int(version), start_time, end_time]],
columns=['timestamp', 'localtime', 'user', 'version', 'start_time', 'end_time'])
return basic_df.assign(**output)
# check if datastream object contains grouped type of DataFrame
if not isinstance(ds._data, GroupedData):
raise Exception(
"DataStream object is not grouped data type. Please use 'window' operation on datastream object before running this algorithm")
data = ds._data.apply(get_stats_features_udf)
return DataStream(data=data, metadata=Metadata()) | 544b32b3f909c8f98ae18ae43006719105627a85 | 3,652,455 |
def second_order_difference(t, y):
""" Calculate the second order difference.
Args:
t: ndarray, the list of the three independent variables
y: ndarray, three values of the function at every t
Returns:
double: the second order difference of given points
"""
# claculate the first order difference
first_order_difference = (y[1:] - y[:-1]) / (t[1:] - t[:-1])
return (first_order_difference[1] - first_order_difference[0]) / (t[2] - t[0]) | 40e37d2b34104772966afc34e41c1ebc742c9adf | 3,652,456 |
def timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 ):
"""
timeDelay( gpsTime, rightAscension, declination, unit, det1, det2 )
Calculates the time delay in seconds between the detectors
'det1' and 'det2' (e.g. 'H1') for a sky location at (rightAscension
and declination) which must be given in certain units
('radians' or 'degree'). The time is passes as GPS time.
A positive time delay means the GW arrives first at 'det2', then at 'det1'.
Example:
antenna.timeDelay( 877320548.000, 355.084,31.757, 'degree','H1','L1')
0.0011604683260994519
Given these values, the signal arrives first at detector L1,
and 1.16 ms later at H2
"""
# check the input arguments
if unit =='radians':
ra_rad = rightAscension
de_rad = declination
elif unit =='degree':
ra_rad = rightAscension/180.0*pi
de_rad = declination/180.0*pi
else:
raise ValueError("Unknown unit %s" % unit)
# check input values
if ra_rad<0.0 or ra_rad> 2*pi:
raise ValueError( "ERROR. right ascension=%f "\
"not within reasonable range."\
% (rightAscension))
if de_rad<-pi or de_rad> pi:
raise ValueError( "ERROR. declination=%f not within reasonable range."\
% (declination))
if det1 == det2:
return 0.0
gps = lal.LIGOTimeGPS( gpsTime )
x1 = lalsimulation.DetectorPrefixToLALDetector(det1).location
x2 = lalsimulation.DetectorPrefixToLALDetector(det2).location
timedelay = lal.ArrivalTimeDiff(list(x1), list(x2), ra_rad, de_rad, gps)
return timedelay | eb0f444ad2a2be0cf10d62fdbe8b41c8d924c798 | 3,652,457 |
def RngBinStr(n):
"""
Takes a int which represents the length of the final binary number.
Returns a string which represents a number in binary where each char was randomly generated and has lenght n.
"""
num = ""
for i in range(n):
if rng.random() < 0.5:
num += "0"
else:
num += "1"
return num | cf063532425b51243f3ba95f90df892bda121363 | 3,652,458 |
def get_bdbox_from_heatmap(heatmap, threshold=0.2, smooth_radius=20):
"""
Function to extract bounding boxes of objects in heatmap
Input :
Heatmap : matrix extracted with GradCAM.
threshold : value defining the values we consider , increasing it increases the size of bounding boxes.
smooth_radius : radius on which each pixel is blurred.
Output :
returned_objects : List of bounding boxes, N_objects * [ xmin, xmax, ymin, ymax, width, height ]
"""
# If heatmap is all zeros i initialize a default bounding box which wraps entire image
xmin = 0
xmax = heatmap.shape[1]
ymin = 0
ymax = heatmap.shape[0]
width = xmax-xmin
height = ymax-ymin
returned_objects = []
# Count if there is any "hot" value on the heatmap
count = (heatmap > threshold).sum()
# Blur the image to have continuous regions
heatmap = ndimage.uniform_filter(heatmap, smooth_radius)
# Threshold the heatmap with 1 for values > threshold and 0 else
thresholded = np.where(heatmap > threshold, 1, 0)
# Apply morphological filter to fill potential holes in the heatmap
thresholded = ndimage.morphology.binary_fill_holes(thresholded)
# Detect all independant objects in the image
labeled_image, num_features = ndimage.label(thresholded)
objects = ndimage.measurements.find_objects(labeled_image)
# We loop in each object ( if any is detected ) and append it to a global list
if count > 0:
for obj in objects:
x = obj[1]
y = obj[0]
xmin = x.start
xmax = x.stop
ymin = y.start
ymax = y.stop
width = xmax-xmin
height = ymax-ymin
returned_objects.append([xmin, xmax, ymin, ymax, width, height])
else:
returned_objects.append([xmin, xmax, ymin, ymax, width, height])
return returned_objects | 0a7397263cf2b8b238679f3cd54b8bcb67553387 | 3,652,459 |
def get_request(request_id, to_json=False, session=None):
"""
Get a request or raise a NoObject exception.
:param request_id: The id of the request.
:param to_json: return json format.
:param session: The database session in use.
:raises NoObject: If no request is founded.
:returns: Request.
"""
try:
query = session.query(models.Request).with_hint(models.Request, "INDEX(REQUESTS REQUESTS_SCOPE_NAME_IDX)", 'oracle')\
.filter(models.Request.request_id == request_id)
ret = query.first()
if not ret:
return None
else:
if to_json:
return ret.to_dict_json()
else:
return ret.to_dict()
except sqlalchemy.orm.exc.NoResultFound as error:
raise exceptions.NoObject('request request_id: %s cannot be found: %s' % (request_id, error)) | 41d34057b859a88818866a03392ec6f96d2b4983 | 3,652,460 |
def gen_multi_correlated(N, n, c_mat, p_arr, use_zscc=False, verify=False, test_sat=False, pack_output=True, print_stat=False):
"""Generate a set of bitstreams that are correlated according to the supplied correlation matrix"""
#Test if the desired parameters are satisfiable
sat_result = corr_sat(N, n, c_mat, p_arr, for_gen=True, print_stat=print_stat, use_zscc=use_zscc)
if not sat_result:
if print_stat:
print("SCC MATRIX NOT SATISFIABLE")
return test_sat #Don't fail the test if we were intending to check correlation satisfiability
sat = sat_result[0]
if not test_sat and not sat:
if print_stat:
print("SCC MATRIX NOT SATISFIABLE")
return False
Dij = sat_result[1]
N_arr = sat_result[2]
if print_stat:
print(c_mat)
print(p_arr)
#Perform the generation
bs_arr = np.zeros((n,N), dtype=np.uint8)
def gmc_rec(i):
"""Recursive portion of gen_multi_correlated"""
nonlocal N, n, N_arr, Dij, bs_arr
if i == n-1:
sentinel = 's'
last_cand = next(next_cand(N, N_arr[i], Dij, bs_arr, i), sentinel)
if last_cand is not sentinel:
bs_arr[i, :] = last_cand
return True
else:
return False
else:
for cand in next_cand(N, N_arr[i], Dij, bs_arr, i):
bs_arr[i, :] = cand
if gmc_rec(i+1):
return True
return False
gmc_result = gmc_rec(0)
if not test_sat and not gmc_result:
if print_stat:
print("GEN_MULTI_CORRELATED FAILED: Couldn't find a valid solution")
return False
if test_sat:
if gmc_result != sat:
print("Generation result: '{}' did not match scc sat result: '{}'. Corr mat: \n{}. p arr: {}" \
.format(gmc_result, sat, c_mat, p_arr))
return False
else:
print("SCC SAT TEST PASS. Corr mat: \n{}. p arr: {}".format(c_mat, p_arr))
#Verify the generation
if print_stat:
print(bs_arr)
if verify and gmc_result:
cmat_actual = bs.get_corr_mat(bs_arr, bs_len=N, use_zscc=use_zscc)
if np.any(np.abs(cmat_actual - c_mat) > 1e-3):
if print_stat:
print("GEN_MULTI_CORRELATED FAILED: Resulting SCC Matrix doesn't match: \n {} \n should be \n {}"
.format(cmat_actual, c_mat))
return False
for idx, bs_i in enumerate(bs_arr):
p_actual = bs.bs_mean(np.packbits(bs_i), bs_len=N)
if np.any(np.abs(p_actual - p_arr[idx]) > 1e-3):
if print_stat:
print("GEN_MULTI_CORRELATED FAILED: Resulting probability is incorrect: {} (should be {})".format(p_actual, p_arr[idx]))
return False
if print_stat:
print("GEN_MULTI_CORRELATED PASS")
if pack_output:
return True, np.packbits(bs_arr, axis=1)
else:
return True, bs_arr | 0b1cf206e92363910877b0202b9fb94d377358a3 | 3,652,461 |
def rxzero_traj_eval_grad(parms, t_idx):
"""
Analytical gradient for evaluated trajectory with respect to the log-normal parameters
It is expected to boost the optimization performance when the parameters are high-dimensional...
"""
v_amp_array = np.array([rxzero_vel_amp_eval(parm, t_idx) for parm in parms])
phi_array = np.array([rxzero_normal_Phi_eval(parm, t_idx) for parm in parms])
v_amp_grad_array = np.array([np.vstack([rxzero_vel_amp_eval_grad(parm[0:4], t_idx).T, np.zeros((2, len(t_idx)))]).T for parm in parms])
phi_grad_array = np.array([rxzero_normal_Phi_eval_grad(parm, t_idx) for parm in parms])
v_x_grad = np.concatenate([(v_amp_grad_array[parm_idx].T * np.cos(phi_array[parm_idx]) - v_amp_array[parm_idx] * np.sin(phi_array[parm_idx]) * phi_grad_array[parm_idx].T).T for parm_idx in range(len(parms))], axis=1)
v_y_grad = np.concatenate([(v_amp_grad_array[parm_idx].T * np.sin(phi_array[parm_idx]) + v_amp_array[parm_idx] * np.cos(phi_array[parm_idx]) * phi_grad_array[parm_idx].T).T for parm_idx in range(len(parms))], axis=1)
dt = t_idx[1] - t_idx[0]
pos_x_grad = np.cumsum(v_x_grad, axis=0) * dt
pos_y_grad = np.cumsum(v_y_grad, axis=0) * dt
return np.array([pos_x_grad, pos_y_grad]), np.array([v_x_grad, v_y_grad]) | 47aa04aa2096f472dd0f5612c95903fd638cb1d0 | 3,652,462 |
import traceback
def exec_geoprocessing_model():
"""算法模型试运行测试
根据算法模型的guid标识,算法模型的输入参数,运行算法模型
---
tags:
- system_manage_api/geoprocessing_model
parameters:
- in: string
name: guid
type: string
required: true
description: 流程模型的guid
- in: array
name: param
type: array
required: true
description: 算法模型的初始化参数
responses:
200:
description: 算法模型运行的结果,结果数组
schema:
properties:
geoprocessing_model_result:
type: object
description: 结果数组,[{"function_name":"","value":""},{},...]
500:
description: 服务运行错误,异常信息
schema:
properties:
errMessage:
type: string
description: 异常信息,包括异常信息的类型
traceMessage:
type: string
description: 异常更加详细的信息,包括异常的位置
"""
try:
# exe_functinons_param = {}
# exe_functinons_already = {}
# exe_functinons_result = {}
# param_dic = {x["guid"]: x["default_value"] for x in list(request.json.get('param', []))}
# #根据算法模型的guid,从数据库中获取所有的函数信息
# #包括模块、名函数名、参数名称等
# pg_helper = PgHelper()
# records = pg_helper.query_datatable(
# '''select module_name,function_name,parameter_name,guid,
# from_module_name,from_function_name,from_name
# from gy_geoprocessing_model_node
# where geoprocessing_model_guid=%s''', (request.json.get('guid', None),))
# for x in records:
# if not (x["module_name"], x["function_name"]) in exe_functinons_param:
# exe_functinons_param[(x["module_name"], x["function_name"])] = {}
# exe_functinons_already[(x["module_name"], x["function_name"])] = False
# if x["guid"] in param_dic:
# exe_functinons_param[(x["module_name"], x["function_name"])][x["parameter_name"]] = param_dic[x["guid"]]
# else:
# exe_functinons_param[(x["module_name"], x["function_name"])][x["parameter_name"]] = None
# exe_functinons_result[(x["from_module_name"], x["from_function_name"], x["from_name"])] = (x["module_name"], x["function_name"],
# x["parameter_name"])
# flag_loop = True
# latest_result = {}
# while flag_loop:
# flag_loop = False
# #循环每一个函数
# for key_f in exe_functinons_param:
# #函数已经运行过
# if exe_functinons_already[key_f]:
# continue
# #如果一个函数的所有参数值都不是None,在运行所有的函数
# func_exeable = True
# for key_p in exe_functinons_param[key_f]:
# if exe_functinons_param[key_f][key_p] is None:
# func_exeable = False
# flag_loop = True
# break
# #运行函数
# if func_exeable:
# latest_result = {}
# exe_functinons_already[key_f] = True
# temp_result = geoprocessing_algorithm.__dict__[key_f[0]].__dict__[key_f[1]](**exe_functinons_param[key_f])
# #将结果赋给相应的参数
# for key_re in temp_result:
# if key_f + (key_re,) in exe_functinons_result:
# exe_functinons_param[exe_functinons_result[key_f +
# (key_re,)][:-1]][exe_functinons_result[key_f +
# (key_re,)][-1]] = temp_result[key_re]
# latest_result[key_f] = temp_result
# #将最新一次的运行结果进行解析,返回前段
# ret_string = ""
# for key_f in latest_result:
# for x in geoprocessing_algorithm.__dict__[key_f[0]].__dict__[key_f[1]].__annotations__["return"]:
# if x["name_en"] in latest_result[key_f]:
# ret_string = ret_string + x["name_zh_cn"] + ":" + str(latest_result[key_f][x["name_en"]]) + "\n"
# return jsonify({"geoprocessing_model_result": ret_string}), 200
return jsonify({}), 200
except Exception as exception:
return jsonify({"errMessage": repr(exception), "traceMessage": traceback.format_exc()}), 500 | 8cfcc56117747c78d8b2c4fc10dc29fa8115aa67 | 3,652,463 |
import requests
def perform_extra_url_query(url):
"""Performs a request to the URL supplied
Arguments:
url {string} -- A URL directing to another page of results from the NASA API
Returns:
Response object -- The response received from the NASA API
"""
response = requests.request("GET", url)
check_query_was_successful(response)
return response | 7d5fe2d6467d90e1f7e85d2fc51187a36f62305d | 3,652,464 |
from org.optaplanner.optapy import PythonWrapperGenerator # noqa
from org.optaplanner.core.api.domain.solution import \
def problem_fact_property(fact_type: Type) -> Callable[[Callable[[], List]],
Callable[[], List]]:
"""Specifies that a property on a @planning_solution class is a problem fact.
A problem fact must not change during solving (except through a ProblemFactChange event). The constraints in a
ConstraintProvider rely on problem facts for ConstraintFactory.from(Class).
Do not annotate planning entities as problem facts: they are automatically available as facts for
ConstraintFactory.from(Class).
"""
def problem_fact_property_function_mapper(getter_function: Callable[[], Any]):
ensure_init()
ProblemFactProperty as JavaProblemFactProperty
getter_function.__optapy_return = get_class(fact_type)
getter_function.__optaplannerPlanningEntityCollectionProperty = {
'annotationType': JavaProblemFactProperty
}
return getter_function
return problem_fact_property_function_mapper | 068cdbc1a8dab95b5a742740195b4fdaf595de2a | 3,652,465 |
def _load_method_arguments(name, argtypes, args):
"""Preload argument values to avoid freeing any intermediate data."""
if not argtypes:
return args
if len(args) != len(argtypes):
raise ValueError(f"{name}: Arguments length does not match argtypes length")
return [
arg if hasattr(argtype, "_type_") else argtype.from_param(arg)
for (arg, argtype) in zip(args, argtypes)
] | 0eb6a16c2e4c1cd46a114923f81e93af331c3d6e | 3,652,466 |
import json
def crash_document_add(key=None):
"""
POST: api/vX/crash/<application_key>
add a crash document by web service
"""
if 'Content-Type' not in request.headers or request.headers['Content-Type'].find('multipart/form-data') < 0:
return jsonify({ 'success': False, 'message': 'input error' })
reports = request.files.getlist('reports')
if reports:
ds = DocumentService()
for report in reports:
documents = json.loads(report.read())
if not isinstance(documents, list): documents = [documents]
for document in documents:
result, msg = ds.add_document(key, document, DocumentModel.crash)
if not result:
# error
return abort(417, {'message': msg})
# success
return jsonify({'success': True, 'message': None})
# no reports
return abort(400, {'message': 'input error'}) | 669c30141d5fb50128b3c60577433938daec5a2a | 3,652,467 |
def log_data(model, action, before, after, instance):
"""Logs mutation signals for Favourite and Category models
Args:
model(str): the target class of the audit-log: favourite or category
action(str): the type of mutation to be logged: create, update, delete
before(dict): the previous value of the data mutated
after(dict): the new value of the data mutated
instance(object): the favourite or category instance being mutated
Returns:
object: instance of AuditLog created for the mutation
"""
log = {
'model': model,
'action': action,
'date': timezone.now(),
'before': before,
'after': after,
'resource_id': instance.id
}
return AuditLog.objects.create(**log) | f23ef8d2a759130ac55d3dc55f4497099776414f | 3,652,468 |
import requests
def download(url, local_filename, chunk_size=1024 * 10):
"""Download `url` into `local_filename'.
:param url: The URL to download from.
:type url: str
:param local_filename: The local filename to save into.
:type local_filename: str
:param chunk_size: The size to download chunks in bytes (10Kb by default).
:type chunk_size: int
:rtype: str
:returns: The path saved to.
"""
response = requests.get(url)
with open(local_filename, 'wb') as fp:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
fp.write(chunk)
return fp.name | 0a86b8600e72e349a4e1344d2ce1ad2bb00b889d | 3,652,469 |
import glob
import os
def main(args=None):
"""Command line interface.
:param list args: command line options (defaults to sys.argv)
:returns: exit code
:rtype: int
"""
parser = ArgumentParser(
prog='baseline',
description='Overwrite script with baseline update.')
parser.add_argument(
'path', nargs='*',
help='module or directory path')
parser.add_argument(
'-w', '--walk', action='store_true',
help='recursively walk directories')
args = parser.parse_args(args)
paths = args.path or ['.']
paths = [path for pattern in paths for path in glob(pattern)]
if args.walk:
for dirpath in (p for p in paths if os.path.isdir(p)):
for root, _dirs, files in os.walk(dirpath):
paths += (os.path.join(root, filename) for filename in files)
else:
for dirpath in (p for p in paths if os.path.isdir(p)):
paths += (os.path.join(dirpath, pth) for pth in os.listdir(dirpath))
update_paths = [
os.path.abspath(p) for p in paths if p.lower().endswith(UPDATE_EXT)]
if update_paths:
script_paths = [pth[:-len(UPDATE_EXT)] + '.py' for pth in update_paths]
print('Found updates for:')
for path in script_paths:
print(' ' + os.path.relpath(path))
print()
try:
input('Hit [ENTER] to update, [Ctrl-C] to cancel ')
except KeyboardInterrupt:
print()
print('Update canceled.')
else:
print()
for script_path, update_path in zip(script_paths, update_paths):
with open(update_path) as update:
new_content = update.read()
with open(script_path, 'w') as script:
script.write(new_content)
os.remove(update_path)
print(
os.path.relpath(update_path) +
' -> ' +
os.path.relpath(script_path))
return 0 | 8af697724070557fbf0df669ad9b324e51e19a39 | 3,652,470 |
def tokenize(data, tok="space", lang="en"):
"""Tokenize text data.
There are 5 tokenizers supported:
- "space": split along whitespaces
- "char": split in characters
- "13a": Official WMT tokenization
- "zh": Chinese tokenization (See ``sacrebleu`` doc)
- "moses": Moses tokenizer (you can specify lthe language).
Uses the `sacremoses <https://github.com/alvations/sacremoses>`_
Args:
data (list, str): String or list (of lists...) of strings.
tok (str, optional): Tokenization. Defaults to "space".
lang (str, optional): Language (only useful for the moses tokenizer).
Defaults to "en".
Returns:
list, str: Tokenized data
"""
if tok is "space":
def tokenizer(x): return x.split()
elif tok is "char":
def tokenizer(x): return list(x)
elif tok is "13a":
def tokenizer(x): return sacrebleu.tokenize_13a(x).split(" ")
elif tok is "zh":
def tokenizer(x): return sacrebleu.tokenize_zh(x).split(" ")
elif tok is "moses":
moses_tok = sacremoses.MosesTokenizer(lang=lang)
def tokenizer(x): return moses_tok.tokenize(x)
else:
raise ValueError(f"Unknown tokenizer {tok}")
return _tokenize(data, tokenizer) | 0974edc3a4d66b90add101002fbcc1486c21e5ce | 3,652,471 |
def ift2(x, dim=(-2, -1)):
"""
Process the inverse 2D fast fourier transform and swaps the axis to get correct results using ftAxis
Parameters
----------
x: (ndarray) the array on which the FFT should be done
dim: the axis (or a tuple of axes) over which is done the FFT (default is the last of the array)
Returns
-------
See Also
--------
ftAxis, ftAxis_time, ift, ft2, ift2
"""
assert isinstance(x, np.ndarray)
if hasattr(dim, '__iter__'):
for d in dim:
if not isinstance(d, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert d <= len(x.shape)
else:
if not isinstance(dim, int):
raise TypeError(
'elements in dim should be an integer specifying the array dimension over which to do the calculation')
assert dim <= len(x.shape)
out = np.fft.fftshift(np.fft.ifft2(np.fft.fftshift(x, axes=dim)), axes=dim)
return out | 50377bb81fa17c152f8b8053cdae1502dbc791ad | 3,652,472 |
def chi2_test_independence(prediction_files: list, confidence_level: float):
"""Given a list of prediction files and a required confidence level,
return whether the sentiment probability is independent on which prediction
file it comes from.
Returns True if the sentiment probability is independent of source."""
df = generate_sentiment_counts_multiple_files(prediction_files)
observed = df[:-1].drop(columns='row_sum')
expected = np.outer(df['row_sum'][:-1],
df.loc['col_sum'][:-1]) / df.loc['col_sum']['row_sum']
expected = pd.DataFrame(expected)
expected.columns = df.columns[:-1]
expected.index = df.index[:-1]
chi2_stats = ((observed - expected)**2 / expected).sum().sum()
degs_of_freedom = len(observed) * len(observed.iloc[0])
critical_value = chi2.ppf(q=confidence_level, df=degs_of_freedom)
p_value = 1 - chi2.cdf(x=chi2_stats, df=degs_of_freedom)
LOGGER.info(
f"chi2_stats = {chi2_stats}, critical_value = {critical_value}, p_value = {p_value:.10f}"
)
return p_value > (1 - confidence_level) | 870a91afa202b19398c620756492bd5297c4eb69 | 3,652,473 |
import os
import yaml
import shutil
def fetch_all_device_paths():
"""
Return all device paths inside worker nodes
Returns:
list : List containing all device paths
"""
path = os.path.join(constants.EXTERNAL_DIR, "device-by-id-ocp")
clone_repo(constants.OCP_QE_DEVICEPATH_REPO, path)
os.chdir(path)
logger.info("Running script to fetch device paths...")
run_cmd("ansible-playbook devices_by_id.yml")
with open("local-storage-block.yaml") as local_storage_block:
local_block = yaml.load(local_storage_block, Loader=yaml.FullLoader)
dev_paths = local_block["spec"]["storageClassDevices"][0]["devicePaths"]
logger.info(f"All devices are {dev_paths}")
os.chdir(constants.TOP_DIR)
shutil.rmtree(path)
return dev_paths | ff2086b2b17aa41af5d775b6007757bdab32ad3c | 3,652,474 |
import os
def align(fastq_file, pair_file, index_dir, names, align_dir, data):
"""Perform piped alignment of fastq input files, generating sorted, deduplicated BAM.
"""
umi_ext = "-cumi" if "umi_bam" in data else ""
out_file = os.path.join(align_dir, "{0}-sort{1}.bam".format(dd.get_sample_name(data), umi_ext))
num_cores = data["config"]["algorithm"].get("num_cores", 1)
rg_info = "rgid={rg} rgpl={pl} rgpu={pu} rgsm={sample}".format(**names)
pair_file = pair_file if pair_file else ""
final_file = None
if data.get("align_split"):
# BBMap does not accept input fastq streams
raise ValueError("bbmap is not compatible with alignment splitting, set `align_split: false`")
pair_arg = "in2=%s" % pair_file if pair_file else ""
if not utils.file_exists(out_file) and (final_file is None or not utils.file_exists(final_file)):
with postalign.tobam_cl(data, out_file, pair_file != "") as (tobam_cl, tx_out_file):
if index_dir.endswith(("/ref", "/ref/")):
index_dir = os.path.dirname(index_dir)
# sam=1.3 required for compatibility with strelka2
cmd = ("bbmap.sh sam=1.3 mdtag=t {rg_info} path={index_dir} in1={fastq_file} "
"{pair_arg} out=stdout.sam | ")
do.run(cmd.format(**locals()) + tobam_cl, "bbmap alignment: %s" % dd.get_sample_name(data))
data["work_bam"] = out_file
return data | 0725dc73016e6b58af6948f7ca8611ab1c9819dd | 3,652,475 |
import json
async def insert(cls:"PhaazeDatabase", WebRequest:Request, DBReq:DBRequest) -> Response:
""" Used to insert a new entry into a existing container """
# prepare request for a valid insert
try:
DBInsertRequest:InsertRequest = InsertRequest(DBReq)
return await performInsert(cls, DBInsertRequest)
except (MissingIntoField, InvalidContent, ContainerNotFound, ContainerBroken, SysLoadError, SysStoreError) as e:
res = dict(
code = e.code,
status = e.status,
msg = e.msg()
)
return cls.response(status=e.code, body=json.dumps(res))
except Exception as ex:
return await cls.criticalError(ex) | 20772f847137422a1da227da38946c9b1a01106a | 3,652,476 |
def eulerAngleXYZ(t123, unit=np.pi/180., dtype=np.float32):
"""
::
In [14]: eulerAngleXYZ([45,0,0])
Out[14]:
array([[ 1. , 0. , 0. , 0. ],
[-0. , 0.7071, 0.7071, 0. ],
[ 0. , -0.7071, 0.7071, 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [15]: eulerAngleXYZ([0,45,0])
Out[15]:
array([[ 0.7071, 0. , -0.7071, 0. ],
[-0. , 1. , 0. , 0. ],
[ 0.7071, -0. , 0.7071, 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [16]: eulerAngleXYZ([0,0,45])
Out[16]:
array([[ 0.7071, 0.7071, 0. , 0. ],
[-0.7071, 0.7071, 0. , 0. ],
[ 0. , -0. , 1. , 0. ],
[ 0. , 0. , 0. , 1. ]], dtype=float32)
In [11]: extractEulerAnglesXYZ(eulerAngleXYZ([45,0,0]))
Out[11]: array([ 45., 0., 0.], dtype=float32)
In [12]: extractEulerAnglesXYZ(eulerAngleXYZ([0,45,0]))
Out[12]: array([ 0., 45., -0.], dtype=float32)
In [13]: extractEulerAnglesXYZ(eulerAngleXYZ([0,0,45]))
Out[13]: array([ 0., 0., 45.], dtype=float32)
https://github.com/g-truc/glm/blob/master/glm/gtx/euler_angles.inl
::
template<typename T>
GLM_FUNC_QUALIFIER mat<4, 4, T, defaultp> eulerAngleXYZ
(
T const & t1,
T const & t2,
T const & t3
)
{
T c1 = glm::cos(-t1);
T c2 = glm::cos(-t2);
T c3 = glm::cos(-t3);
T s1 = glm::sin(-t1);
T s2 = glm::sin(-t2);
T s3 = glm::sin(-t3);
mat<4, 4, T, defaultp> Result;
Result[0][0] = c2 * c3;
Result[0][1] =-c1 * s3 + s1 * s2 * c3;
Result[0][2] = s1 * s3 + c1 * s2 * c3;
Result[0][3] = static_cast<T>(0);
Result[1][0] = c2 * s3;
Result[1][1] = c1 * c3 + s1 * s2 * s3;
Result[1][2] =-s1 * c3 + c1 * s2 * s3;
Result[1][3] = static_cast<T>(0);
Result[2][0] =-s2;
Result[2][1] = s1 * c2;
Result[2][2] = c1 * c2;
Result[2][3] = static_cast<T>(0);
Result[3][0] = static_cast<T>(0);
Result[3][1] = static_cast<T>(0);
Result[3][2] = static_cast<T>(0);
Result[3][3] = static_cast<T>(1);
return Result;
}
"""
a = np.asarray(t123, dtype=dtype)
a *= unit
t1 = a[0]
t2 = a[1]
t3 = a[2]
c1 = np.cos(-t1);
c2 = np.cos(-t2);
c3 = np.cos(-t3);
s1 = np.sin(-t1);
s2 = np.sin(-t2);
s3 = np.sin(-t3);
Result = np.eye(4, dtype=dtype);
Result[0][0] = c2 * c3;
Result[0][1] =-c1 * s3 + s1 * s2 * c3;
Result[0][2] = s1 * s3 + c1 * s2 * c3;
Result[0][3] = 0;
Result[1][0] = c2 * s3;
Result[1][1] = c1 * c3 + s1 * s2 * s3;
Result[1][2] =-s1 * c3 + c1 * s2 * s3;
Result[1][3] = 0;
Result[2][0] =-s2;
Result[2][1] = s1 * c2;
Result[2][2] = c1 * c2;
Result[2][3] = 0;
Result[3][0] = 0;
Result[3][1] = 0;
Result[3][2] = 0;
Result[3][3] = 1;
return Result; | a0e6f0b58c1510aa27cb5064ddebd40b3688de37 | 3,652,477 |
def is_on_cooldown(data):
""" Checks to see if user is on cooldown. Based on Castorr91's Gamble"""
# check if command is on cooldown
cooldown = Parent.IsOnCooldown(ScriptName, CGSettings.Command)
user_cool_down = Parent.IsOnUserCooldown(ScriptName, CGSettings.Command, data.User)
caster = Parent.HasPermission(data.User, "Caster", "")
if (cooldown or user_cool_down) and caster is False and not CGSettings.CasterCD:
if CGSettings.UseCD:
cooldownDuration = Parent.GetCooldownDuration(ScriptName, CGSettings.Command)
userCDD = Parent.GetUserCooldownDuration(ScriptName, CGSettings.Command, data.User)
if cooldownDuration > userCDD:
m_CooldownRemaining = cooldownDuration
message = CGSettings.OnCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
else:
m_CooldownRemaining = userCDD
message = CGSettings.OnUserCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
return True
elif (cooldown or user_cool_down) and CGSettings.CasterCD:
if CGSettings.UseCD:
cooldownDuration = Parent.GetCooldownDuration(ScriptName, CGSettings.Command)
userCDD = Parent.GetUserCooldownDuration(ScriptName, CGSettings.Command, data.User)
if cooldownDuration > userCDD:
m_CooldownRemaining = cooldownDuration
message = CGSettings.OnCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
else:
m_CooldownRemaining = userCDD
message = CGSettings.OnUserCoolDown.format(data.UserName, m_CooldownRemaining)
SendResp(data, CGSettings.Usage, message)
return True
return False | b15180c7e890298cc1949ddfb199b42156ee66d9 | 3,652,478 |
def human_readable_size(num):
"""
To show size as 100K, 100M, 10G instead of
showing in bytes.
"""
for s in reversed(SYMBOLS):
power = SYMBOLS.index(s)+1
if num >= 1024**power:
value = float(num) / (1024**power)
return '%.1f%s' % (value, s)
# if size less than 1024 or human readable not required
return '%s' % num | 3c4ad148bc717b7058e90b3abf5efd67f6d92651 | 3,652,479 |
def sum_2_level_dict(two_level_dict):
"""Sum all entries in a two level dict
Parameters
----------
two_level_dict : dict
Nested dict
Returns
-------
tot_sum : float
Number of all entries in nested dict
"""
'''tot_sum = 0
for i in two_level_dict:
for j in two_level_dict[i]:
tot_sum += two_level_dict[i][j]
'''
tot_sum = 0
for _, j in two_level_dict.items():
tot_sum += sum(j.values())
return tot_sum | 6b5be015fb84fa20006c11e9a3e0f094a6761e74 | 3,652,480 |
import os
def file_ref(name):
"""Helper function for getting paths to testing spectra."""
file = os.path.join(os.path.dirname(test_analyzer.__file__),
"test_analyzer", name)
return file | 60d6ef96ada77c2f0fe7b1d9e9012fec9f7f415c | 3,652,481 |
def q_values_from_q_func(q_func, num_grid_cells, state_bounds, action_n):
"""Computes q value tensor from a q value function
Args:
q_func (funct): function from state to q value
num_grid_cells (int): number of grid_cells for resulting q value tensor
state_bounds (list of tuples): state bounds for resulting q value
tensor
action_n (int): number of actions in action space
Returns:
np.ndarray: q value tensor
"""
q_values = np.zeros(num_grid_cells + (action_n,))
it = np.nditer(q_values, flags=['multi_index'])
while not it.finished:
qs = q_func(
index_to_state(
num_grid_cells, state_bounds=state_bounds,
discrete=it.multi_index[:-1]
)
)
q_values[it.multi_index] = qs[0]
it.iternext()
return q_values | 2378f2021e16678b75622a23c9e57ba6b2f6d1d7 | 3,652,482 |
import requests
import sys
def list_keypairs(k5token, project_id, region):
"""Summary - list K5 project keypairs
Args:
k5token (TYPE): valid regional domain scoped token
project_id (TYPE): Description
region (TYPE): K5 region
Returns:
TYPE: http response object
Deleted Parameters:
userid(TYPE): K5 user id
"""
try:
serverURL = 'https://compute.' + region + \
'.cloud.global.fujitsu.com/v2/' + project_id + '/os-keypairs'
response = requests.get(serverURL,
headers={
'X-Auth-Token': k5token,
'Content-Type': 'application/json',
'Accept': 'application/json'})
return response
except:
return ("\nUnexpected error:", sys.exc_info()) | 9b7851ffbec5bb46f88a767ca5caca8368ead0eb | 3,652,483 |
import re
def check_ip(ip):
"""
Check whether the IP is valid or not.
Args:
IP (str): IP to check
Raises:
None
Returns:
bool: True if valid, else False
"""
ip = ip.strip()
if re.match(r'^(?:(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9][0-9]|[0-9])'
'(\.(?!$)|$)){4}$', ip):
return True
else:
return False | 2ff9a9262e46546fcb8854edee4b3b18ae1e2cc4 | 3,652,484 |
from typing import Iterator
from typing import Optional
def _stream_lines(blob: bytes) -> Iterator[bytes]:
"""
Split bytes into lines (newline (\\n) character) on demand.
>>> iter = _stream_lines(b"foo\\nbar\\n")
>>> next(iter)
b'foo'
>>> next(iter)
b'bar'
>>> next(iter)
Traceback (most recent call last):
...
StopIteration
>>> iter = _stream_lines(b"\\x00")
>>> next(iter)
b'\\x00'
:param blob: the bytes to split.
:return: a generated list of lines.
"""
start = 0
def _index(needle: bytes) -> Optional[int]:
try:
return blob.index(needle, start)
except ValueError:
return None
line_index = _index(b"\n")
while line_index is not None:
yield blob[start:line_index]
start = line_index + 1
line_index = _index(b"\n")
# Deal with blobs that do not end in a newline.
if start < len(blob):
yield blob[start:] | 8a166af1f765ca9eb70728d4c4bb21c00d7ddbf8 | 3,652,485 |
from typing import Dict
async def fetch_all_organizations(session: ClientSession) -> Dict:
"""Fetch all organizations from organization-catalog."""
url = f"{Config.org_cat_uri()}/organizations"
org_list = await fetch_json_data(url, None, session)
return {org["organizationId"]: org for org in org_list} if org_list else dict() | bf033ed85671214d9282acba3361fbdc1e6d4f6e | 3,652,486 |
from typing import Optional
import tqdm
def create_splits_random(df: pd.DataFrame, val_frac: float,
test_frac: float = 0.,
test_split: Optional[set[tuple[str, str]]] = None,
) -> dict[str, list[tuple[str, str]]]:
"""
Args:
df: pd.DataFrame, contains columns ['dataset', 'location', 'label']
each row is a single image
assumes each image is assigned exactly 1 label
val_frac: float, desired fraction of dataset to use for val set
test_frac: float, desired fraction of dataset to use for test set,
must be 0 if test_split is given
test_split: optional set of (dataset, location) tuples to use as test
split
Returns: dict, keys are ['train', 'val', 'test'], values are lists of locs,
where each loc is a tuple (dataset, location)
"""
if test_split is not None:
assert test_frac == 0
train_frac = 1. - val_frac - test_frac
targets = {'train': train_frac, 'val': val_frac, 'test': test_frac}
# merge dataset and location into a single string '<dataset>/<location>'
df['dataset_location'] = df['dataset'] + '/' + df['location']
# create DataFrame of counts. rows = locations, columns = labels
loc_label_counts = (df.groupby(['label', 'dataset_location']).size()
.unstack('label', fill_value=0))
num_locs = len(loc_label_counts)
# label_count: label => number of examples
# loc_count: label => number of locs containing that label
label_count = loc_label_counts.sum()
loc_count = (loc_label_counts > 0).sum()
best_score = np.inf # lower is better
best_splits = None
for _ in tqdm(range(10_000)):
# generate a new split
num_train = int(num_locs * (train_frac + np.random.uniform(-.03, .03)))
if test_frac > 0:
num_val = int(num_locs * (val_frac + np.random.uniform(-.03, .03)))
else:
num_val = num_locs - num_train
permuted_locs = loc_label_counts.index[np.random.permutation(num_locs)]
split_to_locs = {'train': permuted_locs[:num_train],
'val': permuted_locs[num_train:num_train + num_val]}
if test_frac > 0:
split_to_locs['test'] = permuted_locs[num_train + num_val:]
# score the split
score = 0.
for split, locs in split_to_locs.items():
split_df = loc_label_counts.loc[locs]
target = targets[split]
# SSE for # of images per label (with 2x weight)
crop_frac = split_df.sum() / label_count
score += 2 * ((crop_frac - target) ** 2).sum()
# SSE for # of locs per label
loc_frac = (split_df > 0).sum() / loc_count
score += ((loc_frac - target) ** 2).sum()
if score < best_score:
tqdm.write(f'New lowest score: {score}')
best_score = score
best_splits = split_to_locs
assert best_splits is not None
split_to_locs = {
s: sorted(locs.map(lambda x: tuple(x.split('/', maxsplit=1))))
for s, locs in best_splits.items()
}
if test_split is not None:
split_to_locs['test'] = test_split
return split_to_locs | b8410d8672d11c8133b7d6d8dcdead46e668b3aa | 3,652,487 |
def ha_close(close,high,low,open, n=2, fillna=False):
"""Relative Strength Index (RSI)
Compares the magnitude of recent gains and losses over a specified time
period to measure speed and change of price movements of a security. It is
primarily used to attempt to identify overbought or oversold conditions in
the trading of an asset.
https://www.investopedia.com/terms/r/rsi.asp
Args:
close(pandas.Series): dataset 'Close' column.
n(int): n period.
fillna(bool): if True, fill nan values.
Returns:
pandas.Series: New feature generated.
"""
indicator = Heikin_Ashi(close=df[close],high=df[high],low=df[low],open=df[open],n=2,fillna = fillna)
return indicator.ha_close() | 655ce9be20f56a22cbe32ed0eaf7615d2b891577 | 3,652,488 |
def PLUGIN_ENTRY():
"""
Required plugin entry point for IDAPython Plugins.
"""
return funcref_t() | 5c669321d8fc890b8b352e4041dc75773d191664 | 3,652,489 |
def chao1_var_no_doubletons(singles, chao1):
"""Calculates chao1 variance in absence of doubletons.
From EstimateS manual, equation 7.
chao1 is the estimate of the mean of Chao1 from the same dataset.
"""
s = float(singles)
return s*(s-1)/2 + s*(2*s-1)**2/4 - s**4/(4*chao1) | 6b93743a35c70c9ed5b9f3fc9bece1e9363c5802 | 3,652,490 |
def inBarrel(chain, index):
"""
Establish if the outer hit of a muon is in the barrel region.
"""
if abs(chain.muon_outerPositionz[index]) < 108:
return True | 9cbc5dad868d6e0ca221524ef8fc5ed5501adaa4 | 3,652,491 |
import logging
def load_pretrained_embeddings(pretrained_fname: str) -> np.array:
"""
Load float matrix from one file
"""
logging.log(logging.INFO, "Loading pre-trained embedding file: %s" % pretrained_fname)
# TODO: np.loadtxt refuses to work for some reason
# pretrained_embeddings = np.loadtxt(self.args.word_embedding_file, usecols=range(1, word_embedding_size+1))
pretrained_embeddings = []
with open(pretrained_fname, 'r') as f:
for line in f:
embedding = [float(s) for s in line.split()[1:]]
pretrained_embeddings.append(embedding)
pretrained_embeddings = np.array(pretrained_embeddings)
pretrained_embeddings /= np.std(pretrained_embeddings)
return pretrained_embeddings | 7d64851b9e602a7f588abec6fd88908e695f0c5c | 3,652,492 |
from typing import Union
from typing import Callable
from typing import Optional
from typing import Any
def text(message: Text,
default: Text = "",
validate: Union[Validator,
Callable[[Text], bool],
None] = None, # noqa
qmark: Text = DEFAULT_QUESTION_PREFIX,
style: Optional[Style] = None,
path_autocomplete=False,
exec_autocomplete=False,
custom_autocomplete=None,
** kwargs: Any) -> Question:
"""Prompt the user to enter a free text message.
This question type can be used to prompt the user for some text input.
Args:
message: Question text
default: Default value will be returned if the user just hits
enter.
validate: Require the entered value to pass a validation. The
value can not be submited until the validator accepts
it (e.g. to check minimum password length).
This can either be a function accepting the input and
returning a boolean, or an class reference to a
subclass of the prompt toolkit Validator class.
qmark: Question prefix displayed in front of the question.
By default this is a `?`
style: A custom color and style for the question parts. You can
configure colors as well as font types for different elements.
Returns:
Question: Question instance, ready to be prompted (using `.ask()`).
"""
merged_style = merge_styles([DEFAULT_STYLE, style])
validator = build_validator(validate)
def get_prompt_tokens():
return [("class:qmark", qmark),
("class:question", ' {} '.format(message))]
promptArgs = dict({
'style': merged_style,
'validator': validator,
'complete_style': CompleteStyle.READLINE_LIKE,
})
if path_autocomplete:
promptArgs['completer'] = PathCompleter(
expanduser=True, delimiters=' \t\n;,')
elif exec_autocomplete:
promptArgs['completer'] = ExecutableCompleter(delimiters=' \t\n;,')
elif custom_autocomplete is not None and len(custom_autocomplete):
promptArgs['completer'] = WordCompleter(
custom_autocomplete, ignore_case=True, sentence=True)
p = PromptSession(get_prompt_tokens,
**promptArgs,
**kwargs)
p.default_buffer.reset(Document(default))
return Question(p.app) | 74a79a0ce10503cf10841e8370de870c7e42f8e9 | 3,652,493 |
def nav_bar(context):
"""
Define an active tab for the navigation bar
"""
home_active = ''
about_active = ''
detail_active = ''
list_active = ''
logout_active = ''
signup_active = ''
login_active = ''
friends_active = ''
snippets_active = ''
request = context['request']
url_name = resolve(request.path_info).url_name
if url_name == 'home':
home_active = 'active'
elif url_name == 'about':
about_active = 'active'
elif url_name == 'detail':
detail_active = 'active'
elif url_name == 'list':
list_active = 'active'
elif url_name == 'friends':
friends_active = 'active'
elif url_name == 'account_logout':
logout_active = 'active'
elif url_name == 'account_signup':
signup_active = 'active'
elif url_name == 'account_login':
login_active = 'active'
elif url_name == 'snippets' or url_name == 'snippet':
snippets_active = 'active'
return {
'request': request,
'home_active': home_active,
'about_active': about_active,
'detail_active': detail_active,
'list_active': list_active,
'friends_active': friends_active,
'logout_active': logout_active,
'signup_active': signup_active,
'login_active': login_active,
'snippets_active': snippets_active,
} | 77b5a8bb367228cc31a0f2454e494d97a5e2b411 | 3,652,494 |
def setup_models(basedir, name, lc=True):
"""
Setup model container for simulation
Parameters
----------
basedir : string
Base directory
name : string
Name of source component
Returns
-------
models : `~gammalib.GModels()`
Model container
"""
# Initialise model container
models = gammalib.GModels()
# Extract binary component
binaries = gammalib.GModels(basedir+'/1dc/models/model_galactic_binaries.xml')
binary = binaries[name]
# Optionally remove lightcurve
if not lc:
binary.temporal(gammalib.GModelTemporalConst())
# Append binary to model container
models.append(binary)
# Append background model to container
models.extend(gammalib.GModels(basedir+'/1dc/models/model_bkg.xml'))
# Return model container
return models | 8b8db045d5c7b669f579a8f3b74abe204c82c285 | 3,652,495 |
def create_csm(image):
"""
Given an image file create a Community Sensor Model.
Parameters
----------
image : str
The image filename to create a CSM for
Returns
-------
model : object
A CSM sensor model (or None if no associated model is available.)
"""
isd = csmapi.Isd(image)
plugins = csmapi.Plugin.getList()
for plugin in plugins:
num_models = plugin.getNumModels()
for model_index in range(num_models):
model_name = plugin.getModelName(model_index)
if plugin.canModelBeConstructedFromISD(isd, model_name):
return plugin.constructModelFromISD(isd, model_name) | 681c3b5886346e793b26d2e7c801b924ca82b546 | 3,652,496 |
def walk(obj, path='', skiphidden=True):
"""Returns a recursive iterator over all Nodes starting from
findnode(obj, path).
If skiphidden is True (the default) then structure branches starting with
an underscore will be ignored.
"""
node = findnode(obj, path)
return walknode(node, skiphidden) | efd3e10329d7e8832fa33c9425974ea2cd80938c | 3,652,497 |
import os
import requests
def setThermalMode(host, args, session):
"""
Set thermal control mode
@param host: string, the hostname or IP address of the bmc
@param args: contains additional arguments used for setting the thermal
control mode
@param session: the active session to use
@param args.zone: the zone to set the mode on
@param args.mode: the mode to enable
@return: Session object
"""
url = "https://" + host + "/xyz/openbmc_project/control/thermal/" + \
args.zone + "/attr/Current"
# Check args.mode against supported modes using `getThermalMode` output
modes = getThermalMode(host, args, session)
modes = os.linesep.join([m for m in modes.splitlines() if m])
modes = modes.replace("\n", ";").strip()
modesDict = dict(m.split(': ') for m in modes.split(';'))
sModes = ''.join(s for s in modesDict['Supported Modes'] if s not in '[ ]')
if args.mode.casefold() not in \
(m.casefold() for m in sModes.split(',')) or not args.mode:
result = ("Unsupported mode('" + args.mode + "') given, " +
"select a supported mode: \n" +
getThermalMode(host, args, session))
return result
data = '{"data":"' + args.mode + '"}'
try:
res = session.get(url, headers=jsonHeader, verify=False, timeout=30)
except(requests.exceptions.Timeout):
return(connectionErrHandler(args.json, "Timeout", None))
except(requests.exceptions.ConnectionError) as err:
return connectionErrHandler(args.json, "ConnectionError", err)
except(requests.exceptions.RequestException) as err:
return connectionErrHandler(args.json, "RequestException", err)
if (data and res.status_code != 404):
try:
res = session.put(url, headers=jsonHeader,
data=data, verify=False,
timeout=30)
except(requests.exceptions.Timeout):
return(connectionErrHandler(args.json, "Timeout", None))
except(requests.exceptions.ConnectionError) as err:
return connectionErrHandler(args.json, "ConnectionError", err)
except(requests.exceptions.RequestException) as err:
return connectionErrHandler(args.json, "RequestException", err)
if res.status_code == 403:
return "The specified thermal control zone(" + args.zone + ")" + \
" does not exist"
return res.text
else:
return "Setting thermal control mode(" + args.mode + ")" + \
" not supported or operation not available" | 198b5c112e1f217665c78d6b9a8b87551844e073 | 3,652,498 |
def to_string(class_name):
"""
Magic method that is used by the Metaclass created for Itop object.
"""
string = "%s : { " % type(class_name)
for attribute, value in class_name.__dict__.iteritems():
string += "%s : %s, " % (attribute, value)
string += "}"
return string | a7e155c92c4e62c1f070a474905a7e0c654f45ff | 3,652,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.