content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
import os
import platform
def get_os():
"""
if called in powershell returns "powershell"
if called in cygwin returns "cygwin"
if called in darwin/osx returns "osx"
for linux returns "linux"
"""
env = os.environ
p = platform.system().lower()
terminal = p
operating_system = p
if p == 'windows':
terminal = "powershell"
if 'TERM' in env:
terminal = env['TERM']
if p == 'darwin':
terminal = 'osx'
return terminal | 843dc64f40b50e7adc45f1f4c092550c578cddd3 | 3,655,400 |
def setup_model_and_optimizer(args):
"""Setup model and optimizer."""
print ("setting up model...")
model = get_model(args)
print ("setting up optimizer...")
optimizer = get_optimizer(model, args)
print ("setting up lr scheduler...")
lr_scheduler = get_learning_rate_scheduler(optimizer, args)
if DEEPSPEED_WRAP and args.deepspeed:
print_rank_0("DeepSpeed is enabled.")
print ("Calling deepspeed.initialize with our model, optimizer and scheduler")
model, optimizer, _, lr_scheduler = DEEPSPEED_WRAP.deepspeed.initialize(
model=model,
optimizer=optimizer,
args=args,
lr_scheduler=lr_scheduler,
mpu=mpu,
dist_init_required=False
)
print ("We've wrapped our model, optimizer and scheduler in DeepSpeed")
if args.load is not None:
print_rank_0("Load checkpoint from " + args.load)
args.iteration = load_checkpoint(model, optimizer, lr_scheduler, args, deepspeed=DEEPSPEED_WRAP and args.deepspeed)
print_rank_0("Checkpoint loaded")
else:
args.iteration = 0
print ("returning our model, optimizer and scheduler")
return model, optimizer, lr_scheduler | 9283ec825b55ff6619ac2ee2f7ac7cce9e4bced7 | 3,655,401 |
def ensure_str(origin, decode=None):
"""
Ensure is string, for display and completion.
Then add double quotes
Note: this method do not handle nil, make sure check (nil)
out of this method.
"""
if origin is None:
return None
if isinstance(origin, str):
return origin
if isinstance(origin, int):
return str(origin)
elif isinstance(origin, list):
return [ensure_str(b) for b in origin]
elif isinstance(origin, bytes):
if decode:
return origin.decode(decode)
return _literal_bytes(origin)
else:
raise Exception(f"Unknown type: {type(origin)}, origin: {origin}") | 0409bc75856b012cf3063d9ed2530c2d7d5bf3e4 | 3,655,402 |
from typing import Union
from typing import Dict
from typing import Any
from typing import Optional
def restore(
collection: str, id: Union[str, int, Dict[str, Any]]
) -> Optional[Dict[str, Any]]:
"""Restrieve cached data from database.
:param collection: The collection to be retrieved. Same name as API commands.
:type collection: str
:param id: The unique identifier for a particular collection. This varies by command.
:type id: Union[str, int]
:return: The retrieved data if exists, else None.
:rtype: Optional[Dict[str, Any]]
"""
db = _get_connection()
if not db:
return None
if not isinstance(id, dict):
id = dict(_id=id)
return db[collection].find_one(id, dict(_id=0)) | a0b2ccb995661b5c3286dee3b3f5f250cb728011 | 3,655,403 |
def encode_bits(data, number):
"""Turn bits into n bytes of modulation patterns"""
# 0000 00BA gets encoded as:
# 128 64 32 16 8 4 2 1
# 1 B B 0 1 A A 0
# i.e. a 0 is a short pulse, a 1 is a long pulse
#print("modulate_bits %s (%s)" % (ashex(data), str(number)))
shift = number-2
encoded = []
for i in range(int(number/2)):
bits = (data >> shift) & 0x03
#print(" shift %d bits %d" % (shift, bits))
encoded.append(ENCODER[bits])
shift -= 2
#print(" returns:%s" % ashex(encoded))
return encoded | 0299a30c4835af81e97e518e116a51fa08006999 | 3,655,404 |
import io
import os
def compute_session_changes(session, task=None, asset=None, app=None):
"""Compute the changes for a Session object on asset, task or app switch
This does *NOT* update the Session object, but returns the changes
required for a valid update of the Session.
Args:
session (dict): The initial session to compute changes to.
This is required for computing the full Work Directory, as that
also depends on the values that haven't changed.
task (str, Optional): Name of task to switch to.
asset (str or dict, Optional): Name of asset to switch to.
You can also directly provide the Asset dictionary as returned
from the database to avoid an additional query. (optimization)
app (str, Optional): Name of app to switch to.
Returns:
dict: The required changes in the Session dictionary.
"""
changes = dict()
# If no changes, return directly
if not any([task, asset, app]):
return changes
# Get asset document and asset
asset_document = None
if asset:
if isinstance(asset, dict):
# Assume asset database document
asset_document = asset
asset = asset["name"]
else:
# Assume asset name
asset_document = io.find_one({"name": asset,
"type": "asset"})
assert asset_document, "Asset must exist"
# Detect any changes compared session
mapping = {
"AVALON_ASSET": asset,
"AVALON_TASK": task,
"AVALON_APP": app,
}
changes = {key: value for key, value in mapping.items()
if value and value != session.get(key)}
if not changes:
return changes
# Update silo and hierarchy when asset changed
if "AVALON_ASSET" in changes:
# Update silo
changes["AVALON_SILO"] = asset_document.get("silo") or ""
# Update hierarchy
parents = asset_document['data'].get('parents', [])
hierarchy = ""
if len(parents) > 0:
hierarchy = os.path.sep.join(parents)
changes['AVALON_HIERARCHY'] = hierarchy
# Compute work directory (with the temporary changed session so far)
project = io.find_one({"type": "project"})
_session = session.copy()
_session.update(changes)
anatomy = Anatomy(project["name"])
template_data = template_data_from_session(_session)
anatomy_filled = anatomy.format(template_data)
changes["AVALON_WORKDIR"] = anatomy_filled["work"]["folder"]
return changes | 2705a9ef6fe0afbfa375f03c28915c6f84c2211b | 3,655,405 |
from typing import Tuple
def _tf_get_negs(
all_embed: "tf.Tensor", all_raw: "tf.Tensor", raw_pos: "tf.Tensor", num_neg: int
) -> Tuple["tf.Tensor", "tf.Tensor"]:
"""Get negative examples from given tensor."""
if len(raw_pos.shape) == 3:
batch_size = tf.shape(raw_pos)[0]
seq_length = tf.shape(raw_pos)[1]
else: # len(raw_pos.shape) == 2
batch_size = tf.shape(raw_pos)[0]
seq_length = 1
raw_flat = _tf_make_flat(raw_pos)
total_candidates = tf.shape(all_embed)[0]
all_indices = tf.tile(
tf.expand_dims(tf.range(0, total_candidates, 1), 0),
(batch_size * seq_length, 1),
)
shuffled_indices = tf.transpose(
tf.random.shuffle(tf.transpose(all_indices, (1, 0))), (1, 0)
)
neg_ids = shuffled_indices[:, :num_neg]
bad_negs = _tf_get_bad_mask(raw_flat, all_raw, neg_ids)
if len(raw_pos.shape) == 3:
bad_negs = tf.reshape(bad_negs, (batch_size, seq_length, -1))
neg_embed = _tf_sample_neg(batch_size * seq_length, all_embed, neg_ids)
if len(raw_pos.shape) == 3:
neg_embed = tf.reshape(
neg_embed, (batch_size, seq_length, -1, all_embed.shape[-1])
)
return neg_embed, bad_negs | 9cef1cf3fc869108d400704f8cd90d432382ac2e | 3,655,406 |
import os
def remove(store_config, shardid): # FIXME require config instead
"""Remove a shard from the store.
Args:
store_config: Dict of storage paths to optional attributes.
limit: The dir size limit in bytes, 0 for no limit.
use_folder_tree: Files organized in a folder tree
(always on for fat partitions).
shardid: Id of the shard to be removed.
Raises:
AssertionError: If input not valid.
Example:
import storjlib
id = "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
store_config = {"path/alpha": None, "path/beta": None}
storjlib.store.manager.remove(store_config, id)
"""
shard_path = find(store_config, shardid)
if shard_path is not None:
_log.info("Removing shard {0} from store.".format(shardid))
return os.remove(shard_path) | a02f686f34707b0cfd940a2990f0a561ba7e33d3 | 3,655,407 |
from typing import Union
from typing import Optional
import sqlite3
import os
def init_db(path: Union[str, 'os.PathLike'] = db_constants.DB_PATH) -> Optional[sqlite3.dbapi2.Connection]:
"""Initialises the DB. Returns a sqlite3 connection,
which will be passed to the db thread.
"""
# TODO: change saving version from float to string
def db_layout(cursor: sqlite3.dbapi2.Cursor) -> None:
c = cursor
# version
c.execute("""
CREATE TABLE IF NOT EXISTS version(version REAL)
""")
c.execute("""INSERT INTO version(version) VALUES(?)""", (db_constants.CURRENT_DB_VERSION,))
log_i("Constructing database layout")
log_d("Database Layout:\n\t{}".format(STRUCTURE_SCRIPT))
c.executescript(STRUCTURE_SCRIPT)
def new_db(p: Union[str, 'os.PathLike'], new: bool = False) -> sqlite3.dbapi2.Connection:
connection = sqlite3.connect(p, check_same_thread=False)
connection.row_factory = sqlite3.Row
if new:
c = connection.cursor()
db_layout(c)
connection.commit()
return connection
if os.path.isfile(path):
conn = new_db(path)
if path == db_constants.DB_PATH and not check_db_version(conn):
return None
else:
create_db_path()
conn = new_db(path, True)
conn.isolation_level = None
conn.execute("PRAGMA foreign_keys = on")
return conn | 5faced34ef7ee8698ed188801efe90c26f05ae16 | 3,655,408 |
def load_pdb(path):
"""
Loads all of the atomic positioning/type arrays from a pdb file.
The arrays can then be transformed into density (or "field") tensors before
being sent through the neural network.
Parameters:
path (str, required): The full path to the pdb file being voxelized.
Returns:
dictionary: A dictionary containing the following arrays from
the pdb file: num_atoms, atom_types, positions, atom_type_set,
xcoords, ycoords, zcoords, residues, residue_set
"""
pdb = PandasPdb().read_pdb(path)
# This just creates a dataframe from the pdb file using biopandas
#print('This is vars',vars(pdb))
pdf = pdb.df['ATOM']
# atomic coordinates
x_coords = pdf['x_coord'].values
y_coords = pdf['y_coord'].values
z_coords = pdf['z_coord'].values
# create an array containing tuples of x,y,z for every atom
positions = []
for i, x in enumerate(x_coords):
position_tuple = (x_coords[i], y_coords[i], z_coords[i])
positions.append(position_tuple)
positions = np.array(positions)
# names of all the atoms contained in the protein
atom_types = pdf['atom_name'].values
num_atoms = len(atom_types)
atom_type_set = np.unique(atom_types)
num_atom_types = len(atom_type_set)
# residue names
residue_names = pdf['residue_name'].values
residue_set = np.unique(residue_names)
protein_dict = {'x_coords':x_coords, 'y_coords':y_coords, 'z_coords':z_coords,
'positions':positions, 'atom_types':atom_types,
'num_atoms':num_atoms, 'atom_type_set':atom_type_set,
'num_atom_types':num_atom_types, 'residues':residue_names,
'residue_set':residue_set}
# add a value to the dictionary, which is all of the atomic coordinates just
# shifted to the origin
protein_dict = shift_coords(protein_dict)
return protein_dict | aa7fe0f338119b03f00a2acb727608afcd5c1e0d | 3,655,409 |
from typing import List
import os
def validate_workspace(
workspace_option: str, available_paths: List[str] = list(WORKSPACE_PATHS.values())
) -> str:
"""Validate and return workspace.
:param workspace_option: A string of the workspace to validate.
:type workspace_option: string
:param available_paths: A list of the available workspaces.
:type available_paths: list
:returns: A string of the validated workspace.
"""
if workspace_option:
available = any(
os.path.join(os.path.abspath(workspace_option), "").startswith(
os.path.join(os.path.abspath(path), "")
)
for path in available_paths
)
if not available:
raise REANAValidationError(
f'Desired workspace "{workspace_option}" is not valid.\n'
f'Available workspace prefix values are: {", ".join(available_paths)}',
)
return workspace_option | d84e60aa5073333503b62361d77c32c4823a9da8 | 3,655,410 |
def __filter_handler(query_set, model, params):
"""
Handle user-provided filtering requests.
Args:
query_set: SQLAlchemy query set to be filtered.
model: Data model from which given query set is generated.
params: User-provided filter params, with format {"query": [...], ...}.
For query format see "__build_filter_exp" function.
Returns:
A query set with user-provided filters applied.
"""
query = params.get("query")
if query:
filter_exp = __build_filter_exp(query, model)
return query_set.filter(filter_exp)
else:
return query_set | c91b7d55795399f453106d0dda52e80a0c998075 | 3,655,411 |
def split_data_set(data_set, axis, value):
"""
按照给定特征划分数据集,筛选某个特征为指定特征值的数据
(然后因为是按该特征进行划分了,该特征在以后的划分中就不用再出现,所以把该特征在新的列表中移除)
:param data_set: 待划分的数据集,格式如下,每一行是一个list,list最后一个元素就是标签,其他元素是特征
:param axis: 划分数据集的特征(特征的序号)
:param value: 需要返回的特征的值(筛选特征的值要等于此值)
:return:
>>>myDat = [[1, 1, 'yes'], [1, 1, 'yes'], [1, 0, 'no'], [0, 1, 'no'], [0, 1, 'no']]
>>>split_data_set(myDat,0,1)
[[1, 'yes'], [1, 'yes'], [0, 'no']]
>>>split_data_set(myDat,0,0)
[[1, 'no'], [1, 'no']]
"""
# 创建新的list对象
ret_data_set = []
for feature_vec in data_set:
if feature_vec[axis] == value:
# 抽取, 把指定特征从列表中去掉,组成一个新的特征+标签的列表
reduced_feature_vec = feature_vec[:axis]
reduced_feature_vec.extend(feature_vec[axis + 1:])
ret_data_set.append(reduced_feature_vec)
return ret_data_set | f90fdffee3bbee4b4477e371a9ed43094051126a | 3,655,412 |
import ast
import six
import types
import inspect
import textwrap
def get_ast(target_func_or_module):
"""
See :func:``bettertimeit`` for acceptable types.
:returns: an AST for ``target_func_or_module``
"""
if isinstance(target_func_or_module, ast.AST):
return target_func_or_module
if not isinstance(target_func_or_module,
(six.string_types, six.binary_type)):
handled_types = (
types.ModuleType,
types.FunctionType,
getattr(types, "UnboundMethodType", types.MethodType),
types.MethodType,
)
if not isinstance(target_func_or_module, handled_types):
raise TypeError("Don't know how to handle objects of types '%s'"
% type(target_func_or_module))
target_func_or_module = inspect.getsource(target_func_or_module)
target_func_or_module = textwrap.dedent(target_func_or_module)
return ast.parse(target_func_or_module) | 929a8f1b915850c25369edf0dcf0dc8bc2fe16e9 | 3,655,413 |
from typing import List
from re import T
from typing import Callable
from typing import Tuple
def enumerate_spans(sentence: List[T],
offset: int = 0,
max_span_width: int = None,
min_span_width: int = 1,
filter_function: Callable[[List[T]], bool] = None) -> List[Tuple[int, int]]:
"""
Given a sentence, return all token spans within the sentence. Spans are `inclusive`.
Additionally, you can provide a maximum and minimum span width, which will be used
to exclude spans outside of this range.
Finally, you can provide a function mapping ``List[T] -> bool``, which will
be applied to every span to decide whether that span should be included. This
allows filtering by length, regex matches, pos tags or any Spacy ``Token``
attributes, for example.
Parameters
----------
sentence : ``List[T]``, required.
The sentence to generate spans for. The type is generic, as this function
can be used with strings, or Spacy ``Tokens`` or other sequences.
offset : ``int``, optional (default = 0)
A numeric offset to add to all span start and end indices. This is helpful
if the sentence is part of a larger structure, such as a document, which
the indices need to respect.
max_span_width : ``int``, optional (default = None)
The maximum length of spans which should be included. Defaults to len(sentence).
min_span_width : ``int``, optional (default = 1)
The minimum length of spans which should be included. Defaults to 1.
filter_function : ``Callable[[List[T]], bool]``, optional (default = None)
A function mapping sequences of the passed type T to a boolean value.
If ``True``, the span is included in the returned spans from the
sentence, otherwise it is excluded..
"""
max_span_width = max_span_width or len(sentence)
filter_function = filter_function or (lambda x: True)
spans: List[Tuple[int, int]] = []
for start_index in range(len(sentence)):
last_end_index = min(start_index + max_span_width, len(sentence))
first_end_index = min(start_index + min_span_width - 1, len(sentence))
for end_index in range(first_end_index, last_end_index):
start = offset + start_index
end = offset + end_index
# add 1 to end index because span indices are inclusive.
if filter_function(sentence[slice(start_index, end_index + 1)]):
spans.append((start, end))
return spans | 68df595e4fd55d2b36645660df6fa9198a8d28ef | 3,655,414 |
def fixed_mu(mu, data, qty, comp='muAI', beads_2_M=1):
"""
"""
return fixed_conc(mu*np.ones([len(data.keys())]), data, qty, comp=comp,
beads_2_M=beads_2_M) | f7241e8b1534d5d537a1817bce4f019e5a4081a7 | 3,655,415 |
def error_nrmse(y_true, y_pred, time_axis=0):
""" Computes the Normalized Root Mean Square Error (NRMSE).
The NRMSE index is computed separately on each channel.
Parameters
----------
y_true : np.array
Array of true values. If must be at least 2D.
y_pred : np.array
Array of predicted values. If must be compatible with y_true'
time_axis : int
Time axis. All other axes define separate channels.
Returns
-------
NRMSE : np.array
Array of r_squared value.
"""
SSE = np.mean((y_pred - y_true)**2, axis=time_axis)
RMSE = np.sqrt(SSE)
NRMSE = RMSE/np.std(y_true, axis=time_axis)
return NRMSE | 39461cdf0337c9d681d4247168ba32d7a1cbd364 | 3,655,416 |
import shutil
def rmdir_empty(f):
"""Returns a count of the number of directories it has deleted"""
if not f.is_dir():
return 0
removable = True
result = 0
for i in f.iterdir():
if i.is_dir():
result += rmdir_empty(i)
removable = removable and not i.exists()
else:
removable = removable and (i.name == '.DS_Store')
if removable:
items = list(f.iterdir())
assert not items or items[0].name == '.DS_Store'
print(f)
shutil.rmtree(f)
result += 1
return result | f2dba5bb7e87c395886574ca5f3844a8bab609d9 | 3,655,417 |
def generic_exception_json_response(code):
"""
Turns an unhandled exception into a JSON payload to respond to a service call
"""
payload = {
"error": "TechnicalException",
"message": "An unknown error occured",
"code": code
}
resp = make_response(jsonify(payload), code)
resp.headers["Content-type"] = "application/json"
return resp | fc2f0edfc774a56e6b6ccfc8a746b37ad19f6536 | 3,655,418 |
def UnN(X, Z, N, sampling_type, kernel="prod"):
"""Computes block-wise complete U-statistic."""
def fun_block(x, z):
return Un(x, z, kernel=kernel)
return UN(X, Z, N, fun_block, sampling_type=sampling_type) | 962788706d3b4d71a0f213f925e89fd78f220791 | 3,655,419 |
def delete_card(request):
"""Delete card"""
return delete_container_element(request) | 128b521ed89077ebae019942147fc3b4af1a5cdf | 3,655,420 |
import os
def build_estimator(tf_transform_dir, config, hidden_units=None):
"""Build an estimator for predicting the tipping behavior of taxi riders.
Args:
tf_transform_dir: directory in which the tf-transform model was written
during the preprocessing step.
config: tf.contrib.learn.RunConfig defining the runtime environment for the
estimator (including model_dir).
hidden_units: [int], the layer sizes of the DNN (input layer first)
Returns:
Resulting DNNLinearCombinedClassifier.
"""
metadata_dir = os.path.join(tf_transform_dir,
transform_fn_io.TRANSFORMED_METADATA_DIR)
transformed_metadata = metadata_io.read_metadata(metadata_dir)
transformed_feature_spec = transformed_metadata.schema.as_feature_spec()
transformed_feature_spec.pop(transformed_name(LABEL_KEY))
real_valued_columns = [
tf.feature_column.numeric_column(key, shape=())
for key in transformed_names(DENSE_FLOAT_FEATURE_KEYS)
]
categorical_columns = [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=VOCAB_SIZE + OOV_SIZE, default_value=0)
for key in transformed_names(VOCAB_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=FEATURE_BUCKET_COUNT, default_value=0)
for key in transformed_names(BUCKET_FEATURE_KEYS)
]
categorical_columns += [
tf.feature_column.categorical_column_with_identity(
key, num_buckets=num_buckets, default_value=0)
for key, num_buckets in zip(
transformed_names(CATEGORICAL_FEATURE_KEYS), #
MAX_CATEGORICAL_FEATURE_VALUES)
]
#return tf.estimator.DNNLinearCombinedClassifier(
return tf.estimator.DNNLinearCombinedRegressor(
config=config,
linear_feature_columns=categorical_columns,
dnn_feature_columns=real_valued_columns,
dnn_hidden_units=hidden_units or [100, 70, 50, 25]) | 474aeffb6c44b50a9e81c8db7b340489230a9216 | 3,655,421 |
def get_plot_grid_size(num_plots, fewer_rows=True):
"""
Returns the number of rows and columns ideal for visualizing multiple (identical) plots within a single figure
Parameters
----------
num_plots : uint
Number of identical subplots within a figure
fewer_rows : bool, optional. Default = True
Set to True if the grid should be short and wide or False for tall and narrow
Returns
-------
nrows : uint
Number of rows
ncols : uint
Number of columns
"""
assert isinstance(num_plots, Number), 'num_plots must be a number'
# force integer:
num_plots = int(num_plots)
if num_plots < 1:
raise ValueError('num_plots was less than 0')
if fewer_rows:
nrows = int(np.floor(np.sqrt(num_plots)))
ncols = int(np.ceil(num_plots / nrows))
else:
ncols = int(np.floor(np.sqrt(num_plots)))
nrows = int(np.ceil(num_plots / ncols))
return nrows, ncols | e83f14db347cd679e9e7b0761d928cd563444712 | 3,655,422 |
def check_source(module):
"""
Check that module doesn't have any globals.
Example::
def test_no_global(self):
result, line = check_source(self.module)
self.assertTrue(result, "Make sure no code is outside functions.\\nRow: " + line)
"""
try:
source = module.__file__
except Exception:
raise Exception('Varmista, että koodin suoritus onnistuu')
allowed = [
"import ",
"from ",
"def ",
"class ",
" ",
"\t",
"#",
"if __name__",
"@",
]
with open(source) as file:
for line in file.readlines():
if line.strip() == "":
continue
for prefix in allowed:
if line.startswith(prefix):
break
else:
return (False, line)
return (True, "") | 6bc012892d6ec7bb6788f20a565acac0f6d1c662 | 3,655,423 |
def pre_processing(X):
""" Center and sphere data."""
eps = 1e-18
n = X.shape[0]
cX = X - np.mean(X, axis=0) # centering
cov_mat = 1.0/n * np.dot(cX.T, cX)
eigvals, eigvecs = eigh(cov_mat)
D = np.diag(1./np.sqrt(eigvals+eps))
W = np.dot(np.dot(eigvecs, D), eigvecs.T) # whitening matrix
wcX = np.dot(cX, W)
return wcX | 802ce958c6616dcf03de5842249be8480e6a5a7c | 3,655,424 |
def get_as_tags(bundle_name, extension=None, config="DEFAULT", attrs=""):
"""
Get a list of formatted <script> & <link> tags for the assets in the
named bundle.
:param bundle_name: The name of the bundle
:param extension: (optional) filter by extension, eg. "js" or "css"
:param config: (optional) the name of the configuration
:param attrs: (optional) further attributes on the tags
:return: a list of formatted tags as strings
"""
bundle = _get_bundle(bundle_name, extension, config)
return _render_tags(bundle, attrs) | ec54184ff2b13bd4f37de8395276685191535948 | 3,655,425 |
def psql(statement, timeout=30):
"""Execute a statement using the psql client."""
LOG.debug('Sending to local db: {0}'.format(statement))
return execute('psql', '-c', statement, timeout=timeout) | 86eb9775bf4e3da3c18b23ebdf397747947914c9 | 3,655,426 |
def devpiserver_get_credentials(request):
"""Search request for X-Remote-User header.
Returns a tuple with (X-Remote-User, '') if credentials could be
extracted, or None if no credentials were found.
The first plugin to return credentials is used, the order of plugin
calls is undefined.
"""
if 'X-Remote-User' in request.headers:
remote_user = request.headers['X-Remote-User']
threadlog.info("Found X-Remote-User in request: %s", remote_user)
return remote_user, '' | bc6ccaa52b719c25d14784c758d6b78efeae104d | 3,655,427 |
def vader_sentiment(
full_dataframe,
grading_column_name,
vader_columns=COLUMN_NAMES,
logger=config.LOGGER
):
"""apply vader_sentiment analysis to dataframe
Args:
full_dataframe (:obj:`pandas.DataFrame`): parent dataframe to apply analysis to
grading_column_name (str): column with the data to grade
vader_columns (:obj:`list`. optional): names to map vader results to ['neu', 'pos', 'compound', 'neg']
logger (:obj:`logging.logger`, optional): logging handle
Returns;
(:obj:`pandas.DataFrame`): updated dataframe with vader sentiment
"""
logger.info('applying vader sentiment analysis to `%s`', grading_column_name)
logger.info('--applying vader_lexicon')
vader_df = map_vader_sentiment(
full_dataframe[grading_column_name],
column_names=vader_columns
)
logger.info('--merging results into original dataframe')
joined_df = full_dataframe.merge(
vader_df,
how='left',
on=grading_column_name
)
return joined_df | 43572857ecc382f800b243ee12e6f3fe3b1f5d5a | 3,655,428 |
def _overlayPoints(points1, points2):
"""Given two sets of points, determine the translation and rotation that matches them as closely as possible.
Parameters
----------
points1 (numpy array of simtk.unit.Quantity with units compatible with distance) - reference set of coordinates
points2 (numpy array of simtk.unit.Quantity with units compatible with distance) - set of coordinates to be rotated
Returns
-------
translate2 - vector to translate points2 by in order to center it
rotate - rotation matrix to apply to centered points2 to map it on to points1
center1 - center of points1
Notes
-----
This is based on W. Kabsch, Acta Cryst., A34, pp. 828-829 (1978).
"""
if len(points1) == 0:
return (mm.Vec3(0, 0, 0), np.identity(3), mm.Vec3(0, 0, 0))
if len(points1) == 1:
return (points1[0], np.identity(3), -1*points2[0])
# Compute centroids.
center1 = unit.sum(points1)/float(len(points1))
center2 = unit.sum(points2)/float(len(points2))
# Compute R matrix.
R = np.zeros((3, 3))
for p1, p2 in zip(points1, points2):
x = p1-center1
y = p2-center2
for i in range(3):
for j in range(3):
R[i][j] += y[i]*x[j]
# Use an SVD to compute the rotation matrix.
(u, s, v) = lin.svd(R)
return (-1*center2, np.dot(u, v).transpose(), center1) | c3d1df9569705bcee33e112596e8ab2a332e947e | 3,655,429 |
def return_request(data):
"""
Arguments:
data
Return if call detect: list[dist1, dist2, ...]:
dist = {
"feature": feature
}
Return if call extract: list[dist1, dist2, ...]:
dist = {
"confidence_score": predict probability,
"class": face,
"bounding_box": [xmin, ymin, xmax, ymax],
"keypoints": {'left_eye': (x,y), 'right_eye':(x,y), 'nose': (x,y), 'mouth_left': (x,y), 'mouth_right': (x,y)}
}
"""
contents = []
try:
boxs = data['predictions']
print(type(boxs))
print(boxs)
# for box in boxs:
# contents.append({
# "confidence_score": box[4],
# "class": 'face',
# "bounding_box": [box[0], box[1], box[2], box[3]]
# })
except:
pass
try:
features = data['features']
for feature in features:
contents.append({
"feature": feature
})
except:
pass
return contents | 11887921c89a846ee89bc3cbb79fb385382262fa | 3,655,430 |
def get_recent_messages_simple(e: TextMessageEventObject):
"""
Command to get the most recent messages with default count.
This command has a cooldown of ``Bot.RecentActivity.CooldownSeconds`` seconds.
This command will get the most recent ``Bot.RecentActivity.DefaultLimitCountDirect`` messages without the message
that called this command.
:param e: message event that called this command
:return: default count of most recent messages with a link to the recent activity page
"""
return get_recent_messages(e, Bot.RecentActivity.DefaultLimitCountLink) | 58d03a4b34254dc532ad6aa53747ea730446cd31 | 3,655,431 |
def parse_systemctl_units(stdout:str, stderr:str, exitcode:int) -> dict:
"""
UNIT LOAD ACTIVE SUB DESCRIPTION
mono-xsp4.service loaded active running LSB: Mono XSP4
motd-news.service loaded inactive dead Message of the Day
● mountkernfs.service masked inactive dead mountkernfs.service
systemd-machine-id-commit.service loaded inactive dead Commit a transient machine-id on disk
● systemd-modules-load.service loaded failed failed Load Kernel Modules
systemd-networkd-resolvconf-update.service loaded inactive dead Update resolvconf for networkd DNS
sysinit.target loaded active active System Initialization
● syslog.target not-found inactive dead syslog.target
time-sync.target loaded active active System Time Synchronized
LOAD = Reflects whether the unit definition was properly loaded.
ACTIVE = The high-level unit activation state, i.e. generalization of SUB.
SUB = The low-level unit activation state, values depend on unit type.
354 loaded units listed.
To show all installed unit files use 'systemctl list-unit-files'.
"""
if exitcode != 0:
raise Exception()
# split into list of lines
lines = LineList(stdout)
assert isinstance(lines, LineList)
# now we must separate a trailing description.
lineNumbers = lines.getLineNumbersOfEmptyLines()
assert lineNumbers
assert lineNumbers[0] > 0
del lines[lineNumbers[0]:]
# get column split positions
wordPos = [ 0 ] + getPositionsOfWords(lines[0])
table = lines.createDataTableFromColumns(wordPos, bLStrip=True, bRStrip=True, bFirstLineIsHeader=True, columnDefs=[
ColumnDef("MARK", _parseMark),
ColumnDef("UNIT"),
ColumnDef("LOAD"),
ColumnDef("ACTIVE"),
ColumnDef("SUB"),
ColumnDef("DESCRIPTION"),
])
# build output matrix: use service names as keys
ret = {}
for record in table:
key = record[1]
pos = key.rfind(".")
category = key[pos+1:] + "s" # pluralize the category
key = key[:pos]
if category not in ret:
ret[category] = {}
ret[category][key] = record
return ret | d9e7e4c71f418311799345c7dacfb9655912475f | 3,655,432 |
def resnet_model_fn(is_training, feature, label, data_format, params):
"""Build computation tower (Resnet).
Args:
is_training: true if is training graph.
feature: a Tensor.
label: a Tensor.
data_format: channels_last (NHWC) or channels_first (NCHW).
params: params for the model to consider
Returns:
A tuple with the loss for the tower, the gradients and parameters, and
predictions.
"""
num_layers = params.num_layers
batch_norm_decay = params.batch_norm_decay
batch_norm_epsilon = params.batch_norm_epsilon
weight_decay = params.weight_decay
model = cifar10_with_resnet_model.ResNetCifar10(
num_layers,
batch_norm_decay=batch_norm_decay,
batch_norm_epsilon=batch_norm_epsilon,
is_training=is_training,
data_format=data_format)
logits = model.forward_pass(feature, input_data_format='channels_last')
predictions = {
'classes': tf.argmax(input=logits, axis=1),
'probabilities': tf.nn.softmax(logits)
}
loss = tf.losses.sparse_softmax_cross_entropy(
logits=logits, labels=label)
loss = tf.reduce_mean(loss)
model_params = tf.trainable_variables()
loss += weight_decay * tf.add_n(
[tf.nn.l2_loss(v) for v in model_params])
gradients = tf.gradients(loss, model_params)
return loss, zip(gradients, model_params), predictions | 70f30b4c5b4485ed1c4f362cc7b383cb192c57c4 | 3,655,433 |
def permutate_touched_latent_class(untouched_classes, class_info_np, gran_lvl_info):
"""untouch certain class num latent class, permute the rest (reserve H(Y))"""
# get untouched instance index
untouched_instance_index = []
for i in untouched_classes:
index = np.where(class_info_np == i)[0]
untouched_instance_index.append(index)
untouched_instance_index_np = np.concatenate(untouched_instance_index)
# permutate touched id
my_gran_lvl_info = gran_lvl_info * np.ones(gran_lvl_info.shape) # replicate the gran_lvl_info
untouched_latent_class_np = my_gran_lvl_info[untouched_instance_index_np]
touched_index = np.delete(np.arange(my_gran_lvl_info.shape[0]), untouched_instance_index_np, 0) # exclude untouched index
tourched_latent_class = my_gran_lvl_info[touched_index]
my_gran_lvl_info[touched_index] = np.random.permutation(tourched_latent_class)
return my_gran_lvl_info.astype(np.int32) | ebae2213508260474a9e9c581f6b42fd81006a22 | 3,655,434 |
from datetime import datetime
import pytz
def get_interarrival_times(arrival_times, period_start):
"""
Given a list of report dates, it returns the list corresponding to the interrival times.
:param arrival_times: List of arrival times.
:return: List of inter-arrival times.
"""
interarrival_times = []
for position, created_date in enumerate(arrival_times):
if position > 0:
distance = created_date - arrival_times[position - 1]
interarrival_times.append(get_distance_in_hours(distance))
else:
if isinstance(created_date, np.datetime64):
created_date = datetime.datetime.utcfromtimestamp(created_date.tolist() / 1e9)
created_date = pytz.utc.localize(created_date)
distance = get_distance_in_hours(created_date - period_start)
if distance > 0:
interarrival_times.append(distance)
return pd.Series(data=interarrival_times) | b6d345ff73e16d8c7502509ddd36e2a1d7f12252 | 3,655,435 |
def _indexOp(opname):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
func = getattr(self.view(np.ndarray), opname)
return func(other)
return wrapper | a9bdccca9d0bc1ffa2334132c6cfd4b965b95878 | 3,655,436 |
def safe_log(a):
"""
Return the element-wise log of an array, checking for negative
array elements and avoiding divide-by-zero errors.
"""
if np.any([a < 0]):
raise ValueError('array contains negative components')
return np.log(a + 1e-12) | 7ac5f01272f4c90110c4949aba8dfb9f783c82b9 | 3,655,437 |
import os
import sys
def parse_treepath(k=23):
"""Parse treepath results"""
results = {}
treepath_file = "treepath.k{}.csv".format(k)
if not os.path.isfile(treepath_file):
print("No treepath results found", file=sys.stderr)
return
with open(treepath_file, 'rb') as f:
f.readline() # skip header
for line in f:
temp = line.strip().split(",")
sample = temp[0]
strains = []
for strain in temp[5].split(" "):
strains.append(":".join(strain.split(":")[:-1]))
results[sample] = strains
return results | e43ed769ee13813953b92dfe093f169107d6ec72 | 3,655,438 |
from typing import List
from typing import Dict
from functools import reduce
def deduplicate(
timeseries: List[TimeseriesEntry], margins: Dict[str, float] = {},
) -> List[TimeseriesEntry]:
""" Remove duplicates from the supplied `timeseries`.
Currently the deduplication relies on `timemseries` being formatted
according to how data is stored in `Weather.series.values()`. The function
removes duplicates if the start and stop timestamps of consecutive segments
are equal and the values are either equal or, if they are numeric, if their
differences are smaller than a certain margin of error.
Parameters
----------
timeseries : List[TimeseriesEntry]
The timeseries to duplicate.
margins : Dict[str, float]
The margins of error. Can contain one or both of the strings
:code:`"absolute"` and :code:`"relative"` as keys with the numbers
stored under these keys having the following meaning:
- for :code:`absolute` value of the difference between the two
values has to be smaller than or equal to this while
- for :code:`relative` this difference has to be smaller than or
equal to this when interpreted as a fraction of the maximum of
the absolute values of the two compared values.
By default these limits are set to be infinitely big.
Returns
-------
timeseries : List[TimeseriesEntry]
A copy of the input data with duplicate values removed.
Raises
------
ValueError
If the data contains duplicates outside of the allowed margins.
"""
# TODO: Fix the data. If possible add a constraint preventing this from
# happending again alongside the fix.
# This is just here because there's duplicate data (that we know)
# at the end of 2017. The last timestamp of 2017 is duplicated in
# the first timespan of 2018. And unfortunately it's not exactly
# duplicated. The timestamps are equal, but the values are only
# equal within a certain margin.
# TODO: Use [`unique_iter`][0] for unsafe removal, i.e. if both margins
# are infinite. Or find an alternative in [`more-itertools`][1].
# [0]: https://boltons.readthedocs.io/en/latest/iterutils.html
# #boltons.iterutils.unique_iter
# [1]: https://pypi.org/project/more-itertools/
margins = {
**{"absolute": float("inf"), "relative": float("inf")},
**margins,
}
multiples = [
run
for run in reduce(runs, enumerate(timeseries), [[]])
if len(run) > 1
]
compressed = [compress(m, margins) for m in multiples]
errors = [c for c in compressed if len(c) > 1]
if errors:
raise ValueError(
"Found duplicate timestamps while retrieving data:\n{}".format(
pformat(errors)
)
)
compressed.reverse()
result = timeseries.copy()
for c in compressed:
result[c[0][0]] = (c[0][1],)
return result | de61b31250c5fd4317becfb4bca1d582d4b7e465 | 3,655,439 |
import os
import textwrap
import time
def build_windows_subsystem(profile, make_program):
""" The AutotoolsDeps can be used also in pure Makefiles, if the makefiles follow
the Autotools conventions
"""
# FIXME: cygwin in CI (my local machine works) seems broken for path with spaces
client = TestClient(path_with_spaces=False)
client.run("new hello/0.1 --template=cmake_lib")
# TODO: Test Windows subsystems in CMake, at least msys is broken
os.rename(os.path.join(client.current_folder, "test_package"),
os.path.join(client.current_folder, "test_package2"))
client.save({"profile": profile})
client.run("create . --profile=profile")
main = gen_function_cpp(name="main", includes=["hello"], calls=["hello"])
makefile = gen_makefile(apps=["app"])
conanfile = textwrap.dedent("""
from conans import ConanFile
from conan.tools.gnu import AutotoolsToolchain, Autotools, AutotoolsDeps
class TestConan(ConanFile):
requires = "hello/0.1"
settings = "os", "compiler", "arch", "build_type"
exports_sources = "Makefile"
generators = "AutotoolsDeps", "AutotoolsToolchain"
def build(self):
autotools = Autotools(self)
autotools.make()
""")
client.save({"app.cpp": main,
"Makefile": makefile,
"conanfile.py": conanfile,
"profile": profile}, clean_first=True)
client.run("install . --profile=profile")
cmd = environment_wrap_command(["conanbuildenv",
"conanautotoolstoolchain",
"conanautotoolsdeps"], make_program, cwd=client.current_folder)
client.run_command(cmd)
client.run_command("app")
# TODO: fill compiler version when ready
check_exe_run(client.out, "main", "gcc", None, "Release", "x86_64", None)
assert "hello/0.1: Hello World Release!" in client.out
client.save({"app.cpp": gen_function_cpp(name="main", msg="main2",
includes=["hello"], calls=["hello"])})
# Make sure it is newer
t = time.time() + 1
touch(os.path.join(client.current_folder, "app.cpp"), (t, t))
client.run("build .")
client.run_command("app")
# TODO: fill compiler version when ready
check_exe_run(client.out, "main2", "gcc", None, "Release", "x86_64", None, cxx11_abi=0)
assert "hello/0.1: Hello World Release!" in client.out
return client.out | 0e8b1902c8eff0d902af991b4299b954518b75d5 | 3,655,440 |
def make_batch_keys(args, extras=None):
"""depending on the args, different data are used by the listener."""
batch_keys = ['objects', 'tokens', 'target_pos'] # all models use these
if extras is not None:
batch_keys += extras
if args.obj_cls_alpha > 0:
batch_keys.append('class_labels')
if args.lang_cls_alpha > 0:
batch_keys.append('target_class')
return batch_keys | a86c2a5cff58f811a67cbdd5eed322c86aa3e0e0 | 3,655,441 |
import collections
from re import DEBUG
def init_dic_OP(universe_woH, dic_atname2genericname, resname):
"""Initialize the dictionary of result (`dic_op`).
Initialize also the dictionary of correspondance
between residue number (resid) and its index in dic_OP (`dic_corresp_numres_index_dic_OP`).
To calculate the error, we need to first average over the
trajectory, then over residues.
Thus in dic_OP, we want for each key a list of lists, for example:
OrderedDict([
(('C1', 'H11'), [[], [], ..., [], []]),
(('C1', 'H12'), [[], ..., []]),
...
])
Thus each sublist will contain OPs for one residue.
e.g. ('C1', 'H11'), [[OP res 1 frame1, OP res1 frame2, ...],
[OP res 2 frame1, OP res2 frame2, ...], ...]
Parameters
----------
universe_woH : MDAnalysis universe instance
This is the universe *without* hydrogen.
dic_atname2genericname: ordered dictionary
dict of correspondance between generic H names and PDB names.
resname: str
The name of the lipid.
Returns
-------
ordered dictionary
Each key of this dict is a couple carbon/H, and at the beginning it
contains an empty list.
dictionary
contains the correspondance between the residue number and
its index in dic_op
"""
dic_OP = collections.OrderedDict()
# Get list of residue id from the lipid name
all_resids = universe_woH.select_atoms( f"resname {resname}").residues.resids
nb_residus = len(all_resids)
# Each key contain a list which contains a number of list equals to
# the number of residus
for key in dic_atname2genericname:
dic_OP[key] = [[] for _ in range(nb_residus)]
# We also need the correspondance between residue number (resid) and
# its index in dic_OP.
# the index will always start at 0 and goes to the number of residus = range(nb_residus)
dic_corresp_numres_index_dic_OP = dict(zip(all_resids, range(nb_residus)))
if DEBUG:
print("Initial dic_OP:", dic_OP)
print("dic_corresp_numres_index_dic_OP:", dic_corresp_numres_index_dic_OP)
return dic_OP, dic_corresp_numres_index_dic_OP | ce981df06717387286171b172c0400e42747dbfc | 3,655,442 |
from typing import List
def magic_split(value: str, sep=",", open="(<", close=")>"):
"""Split the value according to the given separator, but keeps together elements
within the given separator. Useful to split C++ signature function since type names
can contain special characters...
Examples:
- magic_split("a,b,c", sep=",") -> ["a", "b", "c"]
- magic_split("a<b,c>,d(e,<k,c>),p) -> ["a<b,c>", "d(e,<k,c>)", "p"]
Args:
value: String to split.
sep: Separator to use.
open: List of opening characters.
close: List of closing characters. Order must match open.
Returns: The list of split parts from value.
"""
i, j = 0, 0
s: List[str] = []
r = []
while i < len(value):
j = i + 1
while j < len(value):
c = value[j]
# Separator found and the stack is empty:
if c == sep and not s:
break
# Check close/open:
if c in open:
s.append(open.index(c))
elif c in close:
# The stack might be empty if the separator is also an opening element:
if not s and sep in open and j + 1 == len(value):
pass
else:
t = s.pop()
if t != close.index(c):
raise ValueError(
"Found closing element {} for opening element {}.".format(
c, open[t]
)
)
j += 1
r.append(value[i:j])
i = j + 1
return r | 9f152c9cfa82778dcf5277e3342b5cab25818a55 | 3,655,443 |
def update_group_annotation(name=None, annotation_name=None, x_pos=None, y_pos=None, angle=None, opacity=None,
canvas=None, z_order=None, network=None, base_url=DEFAULT_BASE_URL):
"""Update Group Annotation
Updates a group annotation, changing the given properties.
Args:
name (UUID or str): Single UUID or str naming group object
annotation_name (UUID or str): Name of annotation by UUID or name
x_pos (int): X position in pixels from left; default is center of current view
y_pos (int): Y position in pixels from top; default is center of current view
angle (float): Angle of text orientation; default is 0.0 (horizontal)
canvas (str): Canvas to display annotation, i.e., foreground (default) or background
z_order (int): Arrangement order specified by number (larger values are in front of smaller values); default is 0
network (SUID or str or None): Name or SUID of the network. Default is the "current" network active in Cytoscape.
base_url (str): Ignore unless you need to specify a custom domain,
port or version to connect to the CyREST API. Default is http://localhost:1234
and the latest version of the CyREST API supported by this version of py4cytoscape.
Returns:
dict: A named list of annotation properties, including UUID
Raises:
CyError: if invalid name
requests.exceptions.HTTPError: if can't connect to Cytoscape or Cytoscape returns an error
Examples:
>>> update_group_annotation(annotation_name='Group 1', angle=180)
{'canvas': 'foreground', 'rotation': '180.0', 'name': 'Group 1', 'x': '2450.0', 'y': '1883.0', 'z': '0', 'type': 'org.cytoscape.view.presentation.annotations.GroupAnnotation', 'uuid': 'b9bf3184-3c5a-4e8b-9651-4bc4403af158', 'memberUUIDs': 'bb3061c5-d8d5-4fca-ac5c-9b7bf8fb9fd0,32f89c1d-e987-4867-9b8a-787aaac6e165,ec73aad8-b00b-4f4d-9361-a4b93f70c8f8'}
>>> update_group_annotation(name='2c0a77f8-a6d0-450d-b6ee-1bfe3c8f8aea', annotation_name=group_uuid, x_pos=101, y_pos=201, angle=180, canvas='foreground')
{'canvas': 'foreground', 'rotation': '180.0', 'name': 'Group 1', 'x': '101.0', 'y': '201.0', 'z': '0', 'type': 'org.cytoscape.view.presentation.annotations.GroupAnnotation', 'uuid': '2c0a77f8-a6d0-450d-b6ee-1bfe3c8f8aea', 'memberUUIDs': '8872c2f6-42ad-4b6a-8fb9-1d1b13da504d,2c830227-7f6a-4e58-bbef-2070f1b5a603,8d04e34d-86b8-486f-9927-581184cbe03e'}
"""
cmd_string, net_suid = _build_base_cmd_string('annotation update group', network, base_url) # a good start
cmd_string += _get_annotation_name_cmd_string(annotation_name, 'Must provide the UUID or name of group')
# x and y position
cmd_string += _get_x_y_pos_cmd_string(x_pos, y_pos, net_suid, base_url)
# optional params
cmd_string += _get_angle_cmd_string(angle)
cmd_string += _get_name_cmd_string(name, network, base_url)
cmd_string += _get_canvas_cmd_string(canvas)
cmd_string += _get_z_order_cmd_string(z_order)
# execute command
res = commands.commands_post(cmd_string, base_url=base_url)
return res | fd515728ee3a3dece381bb65e2c6816b9c96b41e | 3,655,444 |
import urllib
def _extract_properties(properties_str):
"""Return a dictionary of properties from a string in the format
${key1}={value1}&${key2}={value2}...&${keyn}={valuen}
"""
d = {}
kv_pairs = properties_str.split("&")
for entry in kv_pairs:
pair = entry.split("=")
key = urllib.parse.unquote(pair[0]).lstrip("$")
value = urllib.parse.unquote(pair[1])
d[key] = value
return d | 4f22cae8cbc2dd5b73e6498d5f8e6d10e184f91c | 3,655,445 |
from typing import Any
def first_fail_second_succeed(_: Any, context: Any) -> str:
""" Simulate Etherscan saying for the first time 'wait', but for the second time 'success'. """
context.status_code = 200
try:
if first_fail_second_succeed.called: # type: ignore
return '{ "status": "1", "result" : "Pass - Verified", "message" : "" }'
except AttributeError: # first time
pass
first_fail_second_succeed.called = True # type: ignore
return '{ "status": "0", "result" : "wait for a moment", "message" : "" }' | 5feb3188bdee2d0d758584709df13dc876c37391 | 3,655,446 |
def get_ipsw_url(device, ios_version, build):
"""Get URL of IPSW by specifying device and iOS version."""
json_data = fw_utils.get_json_data(device, "ipsw")
if build is None:
build = fw_utils.get_build_id(json_data, ios_version, "ipsw")
fw_url = fw_utils.get_firmware_url(json_data, build)
if fw_url is None:
print("[w] could not get IPSW url, exiting...")
return fw_url | 75b9d85d93b03b1ebda681aeb51ac1c9b0a30474 | 3,655,447 |
from typing import Any
def escape_parameter(value: Any) -> str:
"""
Escape a query parameter.
"""
if value == "*":
return value
if isinstance(value, str):
value = value.replace("'", "''")
return f"'{value}'"
if isinstance(value, bytes):
value = value.decode("utf-8")
return f"'{value}'"
if isinstance(value, bool):
return "TRUE" if value else "FALSE"
if isinstance(value, (int, float)):
return str(value)
return f"'{value}'" | 00b706681b002a3226874f04e74acbb67d54d12e | 3,655,448 |
def Get_Query(Fq):
""" Get_Query
"""
Q = ""
EoF = False
Ok = False
while True:
l = Fq.readline()
if ("--" in l) :
# skip line
continue
elif l=="":
EoF=True
break
else:
Q += l
if ";" in Q:
Ok = True
break
return EoF, Ok, Q | a1850799f7c35e13a5b61ba8ebbed5d49afc08df | 3,655,449 |
def get_path_segments(url):
"""
Return a list of path segments from a `url` string. This list may be empty.
"""
path = unquote_plus(urlparse(url).path)
segments = [seg for seg in path.split("/") if seg]
if len(segments) <= 1:
segments = []
return segments | fe8daff2269d617516a22f7a2fddc54bd76c5025 | 3,655,450 |
import typing
from pathlib import Path
def get_config_file(c: typing.Union[str, ConfigFile, None]) -> typing.Optional[ConfigFile]:
"""
Checks if the given argument is a file or a configFile and returns a loaded configFile else returns None
"""
if c is None:
# See if there's a config file in the current directory where Python is being run from
current_location_config = Path("flytekit.config")
if current_location_config.exists():
logger.info(f"Using configuration from Python process root {current_location_config.absolute()}")
return ConfigFile(str(current_location_config.absolute()))
# If not, see if there's a config in the user's home directory
home_dir_config = Path(Path.home(), ".flyte", "config") # _default_config_file_name in main.py
if home_dir_config.exists():
logger.info(f"Using configuration from home directory {home_dir_config.absolute()}")
return ConfigFile(str(home_dir_config.absolute()))
# If not, see if the env var that flytectl sandbox tells the user to set is set,
# or see if there's something in the default home directory location
flytectl_path = Path(Path.home(), ".flyte", "config.yaml")
flytectl_path_from_env = getenv(FLYTECTL_CONFIG_ENV_VAR, None)
if flytectl_path_from_env:
flytectl_path = Path(flytectl_path_from_env)
if flytectl_path.exists():
logger.info(f"Using flytectl/YAML config {flytectl_path.absolute()}")
return ConfigFile(str(flytectl_path.absolute()))
# If not, then return None and let caller handle
return None
if isinstance(c, str):
return ConfigFile(c)
return c | 6e176167a81bccaa0e0f4570c918fcb32a406edb | 3,655,451 |
import logging
def get_logger(tag: str) -> Logger:
"""
Produces logger with given message tag
which will write logs to the console and file stored in LOG_FILE_PATH directory
:param tag: tag for messages of the logger
:return: logger object
"""
logger = logging.getLogger(tag)
logger.setLevel(logging.DEBUG)
# create console handler which logs even debug messages
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.DEBUG)
# create file handler which logs info messages
file_handler = TimedRotatingFileHandler(LOG_FILE_PATH,
when='midnight', interval=1, backupCount=1)
file_handler.setLevel(logging.INFO)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(levelname)s - ' + tag + ': %(message)s')
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(console_handler)
logger.addHandler(file_handler)
return logger | baebbd8b6724a00c52a730897a22d7df3092e94d | 3,655,452 |
def get_index():
"""Redirects the index to /form """
return redirect("/form") | e52323397156a5a112e1d6b5d619136ad0fea3f0 | 3,655,453 |
def _register_network(network_id: str, chain_name: str):
"""Register a network.
"""
network = factory.create_network(network_id, chain_name)
cache.infra.set_network(network)
# Inform.
utils.log(f"registered {network.name_raw} - metadata")
return network | 8e9b84670057974a724df1387f4a8cf9fc886a56 | 3,655,454 |
from pathlib import Path
def file(base_path, other_path):
"""
Returns a single file
"""
return [[Path(base_path), Path(other_path)]] | 3482041757b38929a58d7173731e84a915225809 | 3,655,455 |
import io
def get_md_resource(file_path):
"""Read the file and parse into an XML tree.
Parameters
----------
file_path : str
Path of the file to read.
Returns
-------
etree.ElementTree
XML tree of the resource on disk.
"""
namespaces = Namespaces().get_namespaces(keys=('gmd', 'gmi'))
with io.open(file_path, mode='r', encoding='utf-8') as f:
data = f.read().encode('utf-8')
data = etree.fromstring(data)
mdelem = data.find('.//' + util.nspath_eval(
'gmd:MD_Metadata', namespaces))
if mdelem is None:
mdelem = data.find(
'.//' + util.nspath_eval('gmi:MI_Metadata', namespaces))
if mdelem is None and data.tag in ['{http://www.isotc211.org/2005/gmd}MD_Metadata',
'{http://www.isotc211.org/2005/gmi}MI_Metadata']:
mdelem = data
return mdelem | 809ea7cef3c9191db9589e0eacbba4016c2e9893 | 3,655,456 |
def fmin_style(sfmin):
"""convert sfmin to style"""
return Struct(
is_valid=good(sfmin.is_valid, True),
has_valid_parameters=good(sfmin.has_valid_parameters, True),
has_accurate_covar=good(sfmin.has_accurate_covar, True),
has_posdef_covar=good(sfmin.has_posdef_covar, True),
has_made_posdef_covar=good(sfmin.has_made_posdef_covar, False),
hesse_failed=good(sfmin.hesse_failed, False),
has_covariance=good(sfmin.has_covariance, True),
is_above_max_edm=good(sfmin.is_above_max_edm, False),
has_reached_call_limit=caution(sfmin.has_reached_call_limit, False),
) | 44ecba0a25c38a5a61cdba7750a6b8ad53d78c3d | 3,655,457 |
def xirr(cashflows,guess=0.1):
"""
Calculate the Internal Rate of Return of a series of cashflows at irregular intervals.
Arguments
---------
* cashflows: a list object in which each element is a tuple of the form (date, amount), where date is a python datetime.date object and amount is an integer or floating point number. Cash outflows (investments) are represented with negative amounts, and cash inflows (returns) are positive amounts.
* guess (optional, default = 0.1): a guess at the solution to be used as a starting point for the numerical solution.
Returns
--------
* Returns the IRR as a single value
Notes
----------------
* The Internal Rate of Return (IRR) is the discount rate at which the Net Present Value (NPV) of a series of cash flows is equal to zero. The NPV of the series of cash flows is determined using the xnpv function in this module. The discount rate at which NPV equals zero is found using the secant method of numerical solution.
* This function is equivalent to the Microsoft Excel function of the same name.
* For users that do not have the scipy module installed, there is an alternate version (commented out) that uses the secant_method function defined in the module rather than the scipy.optimize module's numerical solver. Both use the same method of calculation so there should be no difference in performance, but the secant_method function does not fail gracefully in cases where there is no solution, so the scipy.optimize.newton version is preferred.
_irr = xirr( [ (date(2010, 12, 29), -10000),
(date(2012, 1, 25), 20),
(date(2012, 3, 8), 10100)] )
"""
val = -666
try:
val = optimize.newton(lambda r: xnpv(r,cashflows),guess)
except:
print("Failed to converge after, returning: -666")
return val | a6adbd091fd5a742c7b27f0816021c2e8499c42f | 3,655,458 |
def check_rt_druid_fields(rt_table_columns, druid_columns):
"""
对比rt的字段,和druid物理表字段的区别
:param rt_table_columns: rt的字段转换为druid中字段后的字段信息
:param druid_columns: druid物理表字段
:return: (append_fields, bad_fields),需变更增加的字段 和 有类型修改的字段
"""
append_fields, bad_fields = [], []
for key, value in rt_table_columns.items():
col_name, col_type = key.lower(), value.lower()
if druid_columns[col_name]:
# 再对比类型
druid_col_type = druid_columns[col_name]
ok = (
(col_type == druid_col_type)
or (col_type == STRING and druid_col_type == VARCHAR)
or (col_type == LONG and druid_col_type == BIGINT)
)
if not ok:
bad_fields.append({col_name: f"difference between rt and druid({col_type} != {druid_col_type})"})
else:
append_fields.append({FIELD_NAME: col_name, FIELD_TYPE: col_type})
return append_fields, bad_fields | 1c60f49e4316cf78f1396689a55d9a1c71123fe8 | 3,655,459 |
from operator import ne
def is_stuck(a, b, eta):
""" Check if the ricci flow is stuck. """
return ne.evaluate("a-b<eta/50").all() | 53f4bb934cc48d2890289fda3fdb3574d5f6aa4c | 3,655,460 |
def make_map(mapping):
"""
Takes a config.yml mapping, and returns a dict of mappers.
"""
# TODO: Is this the best place for this? Should it be a @staticmethod,
# or even part of its own class?
fieldmap = {}
for field, config in mapping.items():
if type(config) is str:
# Default case: Map directly from spreadsheet
fieldmap[field] = Map({"field": mapping[field]})
else:
# Complex case!
classname = map_type[config.get("type", "map")]
fieldmap[field] = classname(mapping[field])
return fieldmap | e388bb3789690e443fd17814071e4c24faa12d90 | 3,655,461 |
from typing import Sized
def stable_seasoal_filter(time_series: Sized, freq: int):
"""
Стабильный сезонный фильтр для ряда.
:param time_series: временной ряд
:param freq: частота расчета среднего значения
:return: значения сезонной составляющей
"""
length = len(time_series)
if length < freq:
raise ValueError(f'Length of time series is less than freq ({length} < {freq}')
if not isinstance(freq, int):
raise TypeError(f'freq must be an integer')
if freq < 1:
raise ValueError(f'freq must be greater than zero (actually is {freq})')
values = time_series.values if isinstance(time_series, pd.DataFrame) else time_series
seasonal = list()
for i in range(freq):
seasonal_values = [values[i + j * freq] for j in range(length) if i + j * freq < length]
seasonal.append(np.mean(seasonal_values))
seasonals = [seasonal for i in range(length)]
return pd.DataFrame([i for i in chain(*seasonals)][:length]) | fb4997b637d5229ee7f7a645ce19bbd5fcbab0bc | 3,655,462 |
import os
def load_dataset(dataset, batch_size=512):
"""Load dataset with given dataset name.
Args:
dataset (str): name of the dataset, it has to be amazoncat-13k, amazoncat-14k,
eurlex-4.3k or rcv1-2k
batch_size (int): batch size of tf dataset
Returns:
(tf.dataset, tf.dataset, int, int, int, int): training dataset, testing dataset,
number of training data, number of testing data,
number of features, number of labels
"""
if dataset not in ['amazoncat-13k', 'amazoncat-14k', 'eurlex-4.3k', 'rcv1-2k']:
raise ValueError(
'dataset has to be amazoncat-13k, amazoncat-14k, eulrex-4.3k or rcv1-2k')
# Download dataset
downloader.dataset(task='extreme')
path_to_train = None
path_to_test = None
if dataset == 'amazoncat-13k':
path_to_train = os.path.join('dataset', 'AmazonCat-13K', 'train.txt')
path_to_test = os.path.join('dataset', 'AmazonCat-13K', 'test.txt')
elif dataset == 'amazoncat-14k':
path_to_train = os.path.join('dataset', 'AmazonCat-14K', 'train.txt')
path_to_test = os.path.join('dataset', 'AmazonCat-14K', 'test.txt')
elif dataset == 'eurlex-4.3k':
path_to_train = os.path.join('dataset', 'EURLex-4.3K', 'train.txt')
path_to_test = os.path.join('dataset', 'EURLex-4.3K', 'test.txt')
elif dataset == 'rcv1-2k':
path_to_train = os.path.join('dataset', 'RCV1-2K', 'train.txt')
path_to_test = os.path.join('dataset', 'RCV1-2K', 'test.txt')
assert path_to_train is not None and path_to_test is not None
num_train, num_test, num_features, num_labels = obtain_dataset_info(
path_to_train, path_to_test)
ds_train = tf.data.TextLineDataset(path_to_train)
ds_train = ds_train.skip(1).map(lambda x: tf_function(x, num_features=num_features, num_labels=num_labels)).batch(batch_size)
ds_test = tf.data.TextLineDataset(path_to_test)
ds_test = ds_test.skip(1).map(lambda x: tf_function(x, num_features=num_features, num_labels=num_labels)).batch(batch_size)
return ds_train, ds_test, num_train, num_test, num_features, num_labels | f62f7bd4681811e86e075fa9c9439352bb5bd56e | 3,655,463 |
def make_str_lst_unc_val(id, luv):
"""
make_str_lst_unc_val(id, luv)
Make a formatted string from an ID string and a list of uncertain values.
Input
-----
id A number or a string that will be output as a string.
luv A list of DTSA-II UncertainValue2 items. These will be printed
as comma-delimited pairs with 6 digits following the decimal.
Return
------
A string with comma-delimited values with the ID and mean and uncertainty
for each item in the list. This is suitable for writing output to a .csv
file.
Example:
--------
import dtsa2.jmGen as jmg
import gov.nist.microanalysis.Utility as epu
nmZnO1 = 40.1
uvOKa1 = epu.UncertainValue2(0.269157,0.000126)
uvZnLa1 = epu.UncertainValue2(0.259251,9.4e-05)
uvSiKa1 = epu.UncertainValue2(0.654561,8.4e-05)
l_uvals = [uvOKa1, uvZnLa1, uvSiKa1]
out = jmg.make_list_unc_val_string(nmZnO1, l_uvals)
print(out)
1> 40.1, 0.269157, 0.000126, 0.259251, 0.000094, 0.654561, 0.000084
"""
lv = len(luv)
i = 0
rv = "%s, " % (id)
for uv in luv:
rc = round(uv.doubleValue(), 6)
uc = round(uv.uncertainty(), 6)
if i == lv-1:
rv += "%g, %.6f" % (rc, uc)
else:
rv += "%g, %.6f, " % (rc, uc)
i += 1
return(rv) | c65b9bb0c6539e21746a06f7a864acebc2bade03 | 3,655,464 |
def plot_faces(ax, coordinates, meta, st):
"""plot the faces"""
for s in st.faces:
# check that this face isnt in the cut region
def t_param_difference(v1, v2):
return abs(meta["t"][v1] - meta["t"][v2])
if all(all(t_param_difference(v1, v2) < 2 for v2 in s) for v1 in s):
pts = np.array([coordinates[v] for v in s])
pts = np.array([nearest(np.max(pts, 0), p) for p in pts])
center = np.mean(pts, 0)
pts = (pts - center) / 1.8 + center
color = (0, 0, 1, .5)
if meta["s_type"][s] == (2, 1):
color = (1, 0, 0, .5)
p = Polygon(pts, closed=False, color=color)
ax.add_patch(p) | a7eef2d209f7c15d8ba232b25d20e4c751075013 | 3,655,465 |
import typing
def translate_null_strings_to_blanks(d: typing.Dict) -> typing.Dict:
"""Map over a dict and translate any null string values into ' '.
Leave everything else as is. This is needed because you cannot add TableCell
objects with only a null string or the client crashes.
:param Dict d: dict of item values.
:rtype Dict:
"""
# Beware: locally defined function.
def translate_nulls(s):
if s == "":
return " "
return s
new_d = {k: translate_nulls(v) for k, v in d.items()}
return new_d | 1a6cfe2f8449d042eb01774054cddde08ba56f8c | 3,655,466 |
import json
def HttpResponseRest(request, data):
"""
Return an Http response into the correct output format (JSON, XML or HTML),
according of the request.format parameters.
Format is automatically added when using the
:class:`igdectk.rest.restmiddleware.IGdecTkRestMiddleware` and views decorators.
"""
if request.format == Format.JSON:
encoded = json.dumps(data, cls=ComplexEncoder)
return HttpResponse(encoded, content_type=Format.JSON.content_type)
elif request.format == Format.HTML:
return HttpResponse(data)
elif request.format == Format.XML:
encoded = igdectk.xmlio.dumps(data)
return HttpResponse(encoded, content_type=Format.XML.content_type)
elif request.format == Format.TEXT:
return HttpResponse(data, content_type=Format.TEXT.content_type)
else:
return None | 56682e808dcb9778ea47218d48fb74612ac44b5d | 3,655,467 |
def build_server_update_fn(model_fn, server_optimizer_fn, server_state_type,
model_weights_type):
"""Builds a `tff.tf_computation` that updates `ServerState`.
Args:
model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
server_optimizer_fn: A no-arg function that returns a
`tf.keras.optimizers.Optimizer`.
server_state_type: type_signature of server state.
model_weights_type: type_signature of model weights.
Returns:
A `tff.tf_computation` that updates `ServerState`.
"""
@tff.tf_computation(server_state_type, model_weights_type.trainable)
def server_update_tf(server_state, model_delta):
"""Updates the `server_state`.
Args:
server_state: The `ServerState`.
model_delta: The model difference from clients.
Returns:
The updated `ServerState`.
"""
model = model_fn()
server_optimizer = server_optimizer_fn()
# Create optimizer variables so we have a place to assign the optimizer's
# state.
server_optimizer_vars = _create_optimizer_vars(model, server_optimizer)
return server_update(model, server_optimizer, server_optimizer_vars,
server_state, model_delta)
return server_update_tf | c0b8285a5d12c40157172d3b48a49cc5306a567b | 3,655,468 |
def madgraph_tarball_filename(physics):
"""Returns the basename of a MadGraph tarball for the given physics"""
# Madgraph tarball filenames do not have a part number associated with them; overwrite it
return svj_filename("step0_GRIDPACK", Physics(physics, part=None)).replace(
".root", ".tar.xz"
) | a0a8bacb5aed0317b5c0fd8fb3de5382c98e267d | 3,655,469 |
def _mk_cmd(verb, code, payload, dest_id, **kwargs) -> Command:
"""A convenience function, to cope with a change to the Command class."""
return Command.from_attrs(verb, dest_id, code, payload, **kwargs) | afd5804937a55d235fef45358ee12088755f9dc9 | 3,655,470 |
def getobjname(item):
"""return obj name or blank """
try:
objname = item.Name
except BadEPFieldError as e:
objname = ' '
return objname | f8875b6e9c9ed2b76affe39db583c091257865d8 | 3,655,471 |
def process_fire_data(filename=None, fire=None, and_save=False, timezone='Asia/Bangkok', to_drop=True):
""" Add datetime, drop duplicate data and remove uncessary columns.
"""
if filename:
fire = pd.read_csv(filename)
# add datetime
fire = add_datetime_fire(fire, timezone)
# drop duplicate data
print('before drop', fire.shape)
# sort values by brightness
try:
# for MODIS file
fire = fire.sort_values(
['datetime', 'lat_km', 'long_km', 'brightness'], ascending=False)
except BaseException:
# for VIIRS
fire = fire.sort_values(
['datetime', 'lat_km', 'long_km', 'bright_ti4'], ascending=False)
if to_drop:
fire = fire.drop_duplicates(['datetime', 'lat_km', 'long_km'])
# drop unncessary columns
try:
columns_to_drop = [
'acq_date',
'satellite',
'instrument',
'version',
'daynight',
'bright_t31',
'type']
columns_to_drop = [s for s in columns_to_drop if s in fire.columns]
fire = fire.drop(columns_to_drop, axis=1)
except BaseException:
columns_to_drop = [
'acq_date',
'satellite',
'instrument',
'version',
'daynight',
'bright_ti5',
'type']
columns_to_drop = [s for s in columns_to_drop if s in fire.columns]
fire = fire.drop(columns_to_drop, axis=1)
fire = fire.sort_values('datetime')
fire = fire.set_index('datetime')
# remove the data before '2002-07-04' because there is only one satellite
fire = fire.loc['2002-07-04':]
print('after drop', fire.shape)
if and_save:
fire.to_csv(filename, index=False)
else:
return fire | 767bb77db2b3815a5646f185b72727aec74ee8d8 | 3,655,472 |
def create_controllable_source(source, control, loop, sleep):
"""Makes an observable controllable to handle backpressure
This function takes an observable as input makes it controllable by
executing it in a dedicated worker thread. This allows to regulate
the emission of the items independently of the asyncio event loop.
Args:
- source: An observable emitting the source items.
- control: [Optional] The control observable emitting delay items in seconds.
- sleep: the sleep function to used. Needed only for testing.
Returns:
An observable similar to the source observable, with emission being
controlled by the control observable.
"""
if control is not None:
typed_control = control.pipe(
ops.observe_on(NewThreadScheduler()),
ops.map(ControlItem),
)
scheduled_source = source.pipe(
ops.subscribe_on(NewThreadScheduler()),
ops.merge(typed_control),
ops.map(lambda i: control_sync(i, sleep)),
ops.filter(lambda i: i is not ControlItem),
ops.observe_on(AsyncIOThreadSafeScheduler(loop)),
)
else:
scheduled_source = source.pipe(
ops.subscribe_on(NewThreadScheduler()),
ops.observe_on(AsyncIOThreadSafeScheduler(loop)),
)
return scheduled_source | 2092de1aaeace275b2fea2945e8d30f529309874 | 3,655,473 |
def getE5():
"""
Returns the e5
Args:
"""
return E5.get() | 35526332b957628a6aa3fd90f7104731749e10ed | 3,655,474 |
def triangulate(pts_subset):
"""
This function encapsulates the whole triangulation algorithm into four
steps. The function takes as input a list of points. Each point is of the
form [x, y], where x and y are the coordinates of the point.
Step 1) The list of points is split into groups. Each group has exactly
two or three points.
Step 2) For each group of two point, a single edge is generated. For each
group of three points, three edges forming a triangle are
generated. These are the 'primitive' triangulations.
Step 3) The primitive triangulations are paired into groups.
Step 4) The groups are then recursively merged until there is only a
single triangulation of all points remaining.
Parameters
----------
pts_subset : list
A list of points with the form [ [x1, y1], [x2, y2], ..., [xn, yn] ]
The first element of each list represents the x-coordinate, the second
entry the y-coordinate.
Returns
-------
out : list
List with a single element. The TriangulationEdges class object with
the completed Delauney triangulation of the input points.
See TriangulationEdges docstring for further info.
"""
split_pts = split_list.groups_of_3(pts_subset)
primitives = make_primitives(split_pts)
groups = [primitives[i:i+2] for i in range(0, len(primitives), 2)]
groups = recursive_group_merge(groups)
return groups[0][0] | 55e145a44303409a4e1ede7f14e4193c06efd769 | 3,655,475 |
def get_session(region, default_bucket):
"""Gets the sagemaker session based on the region.
Args:
region: the aws region to start the session
default_bucket: the bucket to use for storing the artifacts
Returns:
`sagemaker.session.Session instance
"""
boto_session = boto3.Session(region_name=region)
sagemaker_client = boto_session.client("sagemaker")
runtime_client = boto_session.client("sagemaker-runtime")
return sagemaker.session.Session(
boto_session=boto_session,
sagemaker_client=sagemaker_client,
sagemaker_runtime_client=runtime_client,
default_bucket=default_bucket,
) | 1bfbea7aeb30f33772c1b748580f0776463203a4 | 3,655,476 |
def intp_sc(x, points):
"""
SCurve spline based interpolation
args:
x (list) : t coordinate list
points (list) : xyz coordinate input points
returns:
x (relative coordinate point list)
o (xyz coordinate points list, resplined)
"""
sc = vtk.vtkSCurveSpline()
for i in points:
sc.AddPoint(i[0], i[1])
o = []
for i in x:
o.append(sc.Evaluate(i))
return x, o | 13df2e19c91ff0469ce68467e9e4df36c0e4831b | 3,655,477 |
def backend():
"""Publicly accessible method
for determining the current backend.
# Returns
String, the name of the backend PyEddl is currently using.
# Example
```python
>>> eddl.backend.backend()
'eddl'
```
"""
return _BACKEND | b811dd6a760006e572aa02fc246fdf72ac7e608c | 3,655,478 |
import json
def query_collection_mycollections():
"""
Query Content API Collection with access token.
"""
access_token = request.args.get("access_token", None)
if access_token is not None and access_token != '':
# Construct an Authorization header with the value of 'Bearer <access token>'
headers = {
"Accept": "application/json",
"Authorization": "Bearer " + access_token
}
url = APP_CONFIG['CONTENTAPI_COLLECTIONS_URL'] + 'mycollections'
r = s.get(url, headers=headers, verify=(app.config['SSLVERIFY'] == 'True'))
if r.status_code in (400,500):
# Handle known errors
result = r.json()
return jsonify(result)
elif r.status_code == 200:
result = r.json()
params = {
'access_token': access_token,
'endpoint_path': '/mycollections',
'mycollections_results': json.dumps(result, indent=2),
'mycollections_results_obj': result
}
return render_template('mycollections.html', **params)
else:
# Handle unknown error
return (r.text, r.status_code, r.headers.items())
else:
return "access_token not specified" | 98b8a75ea515255fde327d11c959f5e9b6d9ea43 | 3,655,479 |
def xmlbuildmanual() -> __xml_etree:
"""
Returns a empty xml ElementTree obj to build/work with xml data
Assign the output to var
This is using the native xml library via etree shipped with the python standard library.
For more information on the xml.etree api, visit: https://docs.python.org/3/library/xml.etree.elementtree.html#module-xml.etree.ElementTree
"""
return __xml_etree | e08d83aca4b140c2e289b5173e8877e3c3e5fee1 | 3,655,480 |
def graclus_cluster(row, col, weight=None, num_nodes=None):
"""A greedy clustering algorithm of picking an unmarked vertex and matching
it with one its unmarked neighbors (that maximizes its edge weight).
Args:
row (LongTensor): Source nodes.
col (LongTensor): Target nodes.
weight (Tensor, optional): Edge weights. (default: :obj:`None`)
num_nodes (int, optional): The number of nodes. (default: :obj:`None`)
Examples::
>>> row = torch.LongTensor([0, 1, 1, 2])
>>> col = torch.LongTensor([1, 0, 2, 1])
>>> weight = torch.Tensor([1, 1, 1, 1])
>>> cluster = graclus_cluster(row, col, weight)
"""
num_nodes = row.max().item() + 1 if num_nodes is None else num_nodes
if row.is_cuda: # pragma: no cover
row, col = sort_row(row, col)
else:
row, col = randperm(row, col)
row, col = randperm_sort_row(row, col, num_nodes)
row, col = remove_self_loops(row, col)
cluster = row.new_empty((num_nodes, ))
graclus(cluster, row, col, weight)
return cluster | c586ffd325697302a2413e613a75fe4302741af6 | 3,655,481 |
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn | 801bfde2b72823b2ba7b8329f080774ca9aa536f | 3,655,482 |
import decimal
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES) | 94adc63d984e7797590a2dd3eb33c8d98b09c76e | 3,655,483 |
import os
from pathlib import Path
import glob
import re
def load_all_functions(path, tool, factorize=True, agents_quantities=False, rewards_only=False, f_only=False):
""" Loads all results of parameter synthesis from *path* folder into two maps - f list of rational functions for each property, and rewards list of rational functions for each reward
Args:
path (string): file name regex
factorize (bool): if true it will factorise polynomial results
rewards_only (bool): if true it parse only rewards
f_only (bool): if true it will parse only standard properties
agents_quantities (list of numbers or False): of population sizes to be used, if False, the whole path used
tool (string): a tool of which is the output from (PRISM/STORM)
Returns:
(f,reward), where
f: dictionary N -> list of rational functions for each property
rewards: dictionary N -> list of rational functions for each reward
"""
## Setting the current directory
default_directory = os.getcwd()
if not Path(path).is_absolute():
if tool.lower().startswith("p"):
os.chdir(prism_results)
elif tool.lower().startswith("s"):
os.chdir(storm_results)
else:
print("Selected tool unsupported.")
return ({}, {})
f = {}
rewards = {}
# print(str(path))
new_dir = os.getcwd()
if not glob.glob(str(path)):
if not Path(path).is_absolute():
os.chdir(default_directory)
print("No files match the pattern " + os.path.join(new_dir, path))
return ({}, {})
no_files = True
## Choosing files with the given pattern
for functions_file in glob.glob(str(path)):
try:
population_size = int(re.findall(r'\d+', functions_file)[0])
except IndexError:
population_size = 0
## Parsing only selected agents quantities
if agents_quantities:
if population_size not in agents_quantities:
continue
else:
no_files = False
print("parsing ", os.path.join(os.getcwd(), functions_file))
# print(os.getcwd(), file)
with open(functions_file, "r") as file:
i = -1
here = ""
f[population_size] = []
rewards[population_size] = []
## PARSING PRISM/STORM OUTPUT
line_index = 0
if tool == "unknown":
# print(line)
if line.lower().startswith("prism"):
tool = "prism"
elif line.lower().startswith("storm"):
tool = "storm"
else:
print("Tool not recognised!!")
for line in file:
if line.startswith('Parametric model checking:') or line.startswith('Model checking property'):
i = i + 1
here = ""
## STORM check if rewards
if "R[exp]" in line:
here = "r"
## PRISM check if rewards
if line.startswith('Parametric model checking: R'):
here = "r"
if i >= 0 and line.startswith('Result'):
## PARSE THE EXPRESSIONload_pickled_data
# print("line:", line)
if tool.lower().startswith("p"):
line = line.split(":")[2]
elif tool.lower().startswith("s"):
line = line.split(":")[1]
## CONVERT THE EXPRESSION TO PYTHON FORMAT
line = line.replace("{", "")
line = line.replace("}", "")
## PUTS "* " BEFORE EVERY WORD (VARIABLE)
line = re.sub(r'([a-z|A-Z]+)', r'* \1', line)
# line = line.replace("p", "* p")
# line = line.replace("q", "* q")
line = line.replace("**", "*")
line = line.replace("* *", "*")
line = line.replace("* *", "*")
line = line.replace("+ *", "+")
line = line.replace("^", "**")
line = line.replace(" ", "")
line = line.replace("*|", "|")
line = line.replace("|*", "|")
line = line.replace("|", "/")
line = line.replace("(*", "(")
line = line.replace("+*", "+")
line = line.replace("-*", "-")
if line.startswith('*'):
line = line[1:]
if line[-1] == "\n":
line = line[:-1]
if here == "r" and not f_only:
# print(f"pop: {N}, formula: {i+1}", line)
if factorize:
try:
rewards[population_size].append(str(factor(line)))
except TypeError:
print("Error while factorising rewards, used not factorised instead")
rewards[population_size].append(line)
# os.chdir(cwd)
else:
rewards[population_size].append(line)
elif not here == "r" and not rewards_only:
# print(f"pop: {N}, formula: {i+1}", line[:-1])
if factorize:
try:
f[population_size].append(str(factor(line)))
except TypeError:
print(f"Error while factorising polynomial f[{population_size}][{i + 1}], used not factorised instead")
f[population_size].append(line)
# os.chdir(cwd)
else:
f[population_size].append(line)
line_index = line_index + 1
os.chdir(default_directory)
if no_files and agents_quantities:
print("No files match the pattern " + os.path.join(new_dir, path) + " and restriction " + str(agents_quantities))
return (f, rewards) | 262fdd5ad796889028e61541cc0347b3263407c8 | 3,655,484 |
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards)) | d9f5e33e00eaeedfdff7ebb4d3a7731d679beff1 | 3,655,485 |
def _max_pool(heat, kernel=3):
"""
NCHW
do max pooling operation
"""
# print("heat.shape: ", heat.shape) # default: torch.Size([1, 1, 152, 272])
pad = (kernel - 1) // 2
h_max = nn.functional.max_pool2d(heat, (kernel, kernel), stride=1, padding=pad)
# print("h_max.shape: ", h_max.shape) # default: torch.Size([1, 1, 152, 272])
keep = (h_max == heat).float() # 将boolean类型的Tensor转换成Float类型的Tensor
# print("keep.shape: ", keep.shape, "keep:\n", keep)
return heat * keep | 68edc979d78ee2fc11f94efd2dfb5e9140762a0e | 3,655,486 |
def getBool(string):
"""
Stub function, set PshellServer.py softlink to PshellServer-full.py for full functionality
"""
return (True) | de7f6a4b124b6a1f6e1b878daf01cc14e5d5eb08 | 3,655,487 |
from typing import Counter
def multi_dists(
continuous,
categorical,
count_cutoff,
summary_type,
ax=None,
stripplot=False,
order="ascending",
newline_counts=False,
xtick_rotation=45,
xtick_ha="right",
seaborn_kwargs={},
stripplot_kwargs={},
):
"""
Compare the distributions of a continuous variable when grouped
by a categorical one.
Parameters
----------
continuous : Series
continuous values to plot
categorical : Series
categorical values (groups) to plot
count_cutoff : boolean
minimum number of samples per groups to include
summary_type : string, "box" or "violin"
type of summary plot to make
ax : MatPlotLib axis
axis to plot in (will create new one if not provided)
stripplot : boolean
whether or not to plot the raw values
order : "ascending", "descending", or list of categories
how to sort categories in the plot
newline_counts : boolean
whether to add category counts as a separate line
in the axis labels
xtick_rotation : float
how much to rotate the xtick labels by (in degree)
xtick_ha : string
horizontal alignment of the xtick labels
seaborn_kwargs : dictionary
additional arguments to pass to Seaborn boxplot/violinplot
stripplot_kwargs : dictionary
additional arguments to pass to Seaborn stripplot (if stripplot=True)
Returns
-------
ax : MatPlotLib axis
axis with plot data
"""
if ax is None:
ax = plt.subplot(111)
# remove NaNs and convert continuous
continuous = pd.Series(continuous).dropna()
categorical = pd.Series(categorical).dropna().astype(str)
# series names
continuous_name = str(continuous.name)
categorical_name = str(categorical.name)
# handle cases where series names are missing or identical
if continuous_name is None:
continuous_name = "continuous"
if categorical_name is None:
categorical_name = "categorical"
if continuous_name == categorical_name:
continuous_name += "_continuous"
categorical_name += "_categorical"
merged = pd.concat([continuous, categorical], axis=1, join="inner")
merged.columns = [continuous_name, categorical_name]
# counts per category, with cutoff
categorical_counts = Counter(merged[categorical_name])
merged["count"] = merged[categorical_name].apply(categorical_counts.get)
merged = merged[merged["count"] >= count_cutoff]
merged_sorted = (
merged.groupby([categorical_name])[continuous_name]
.aggregate(np.median)
.reset_index()
)
# sort categories by mean
if order == "ascending":
merged_sorted = merged_sorted.sort_values(
continuous_name, ascending=True
)
order = merged_sorted[continuous_name]
elif order == "descending":
merged_sorted = merged_sorted.sort_values(
continuous_name, ascending=False
)
order = merged_sorted[continuous_name]
else:
merged_sorted["continuous_idx"] = merged_sorted[
categorical_name
].apply(order.index)
merged_sorted = merged_sorted.sort_values(
"continuous_idx", ascending=True
)
# recompute category counts after applying cutoff
counts = merged_sorted[categorical_name].apply(categorical_counts.get)
counts = counts.astype(str)
# x-axis labels with counts
if newline_counts:
x_labels = merged_sorted[categorical_name] + "\n(" + counts + ")"
else:
x_labels = merged_sorted[categorical_name] + " (" + counts + ")"
if summary_type == "violin":
sns.violinplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
inner=None,
ax=ax,
**seaborn_kwargs,
)
elif summary_type == "box":
sns.boxplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
notch=True,
ax=ax,
**seaborn_kwargs,
)
if stripplot:
sns.stripplot(
x=categorical_name,
y=continuous_name,
data=merged,
order=merged_sorted[categorical_name],
size=2,
alpha=0.5,
linewidth=1,
jitter=0.1,
edgecolor="black",
ax=ax,
**stripplot_kwargs,
)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.set_xticklabels(x_labels, rotation=xtick_rotation, ha=xtick_ha)
return ax | 84302dfc359c62c67ef0757ea7c0841e5292b19d | 3,655,488 |
import xdg
def expand_xdg(xdg_var: str, path: str) -> PurePath:
"""Return the value of an XDG variable prepended to path.
This function expands an XDG variable, and then concatenates to it the
given path. The XDG variable name can be passed both uppercase or
lowercase, and either with or without the 'XDG_' prefix.
"""
xdg_var = xdg_var if xdg_var.startswith('XDG_') else 'XDG_' + xdg_var
return getattr(xdg, xdg_var.upper()) / path | 13b8885a08c384d29636c5f5070a86e05c30b43a | 3,655,489 |
def follow_index(request):
"""Просмотр подписок"""
users = request.user.follower.all()
paginator = Paginator(users, 3)
page_number = request.GET.get('page')
page = paginator.get_page(page_number)
return render(request, 'recipes/follow_index.html',
{'page': page, 'paginator': paginator}) | 00851ccffde0e76544e5732d0f651f6e00bc45d4 | 3,655,490 |
def test_drawcounties_cornbelt():
"""draw counties on the map"""
mp = MapPlot(sector="cornbelt", title="Counties", nocaption=True)
mp.drawcounties()
return mp.fig | 8bba5c33c374bf3f4e892bc984b72de22c97e38d | 3,655,491 |
import torch
def atomic_degrees(mol: IndigoObject) -> dict:
"""Get the number of atoms direct neighbors (except implicit hydrogens) in a molecule.
Args:
IndigoObject: molecule object
Returns:
dict: key - feature name, value - torch.tensor of atomic degrees
"""
degrees = []
for atom in mol.iterateAtoms():
degrees.append(atom.degree())
return {"degrees": torch.tensor(degrees).unsqueeze(1)} | 114431622574985bd016276a7a809560c896e1bc | 3,655,492 |
def hotspots(raster, kernel, x='x', y='y'):
"""Identify statistically significant hot spots and cold spots in an input
raster. To be a statistically significant hot spot, a feature will have a
high value and be surrounded by other features with high values as well.
Neighborhood of a feature defined by the input kernel, which currently
support a shape of circle, annulus, or custom kernel.
The result should be a raster with the following 7 values:
90 for 90% confidence high value cluster
95 for 95% confidence high value cluster
99 for 99% confidence high value cluster
-90 for 90% confidence low value cluster
-95 for 95% confidence low value cluster
-99 for 99% confidence low value cluster
0 for no significance
Parameters
----------
raster: xarray.DataArray
Input raster image with shape=(height, width)
kernel: Kernel
Returns
-------
hotspots: xarray.DataArray
"""
# validate raster
if not isinstance(raster, DataArray):
raise TypeError("`raster` must be instance of DataArray")
if raster.ndim != 2:
raise ValueError("`raster` must be 2D")
if not (issubclass(raster.values.dtype.type, np.integer) or
issubclass(raster.values.dtype.type, np.floating)):
raise ValueError(
"`raster` must be an array of integers or float")
raster_dims = raster.dims
if raster_dims != (y, x):
raise ValueError("raster.coords should be named as coordinates:"
"(%s, %s)".format(y, x))
# apply kernel to raster values
mean_array = convolve_2d(raster.values, kernel / kernel.sum(), pad=True)
# calculate z-scores
global_mean = np.nanmean(raster.values)
global_std = np.nanstd(raster.values)
if global_std == 0:
raise ZeroDivisionError("Standard deviation "
"of the input raster values is 0.")
z_array = (mean_array - global_mean) / global_std
out = _hotspots(z_array)
result = DataArray(out,
coords=raster.coords,
dims=raster.dims,
attrs=raster.attrs)
return result | ab091924bb36576e338c38d752df3f856de331cb | 3,655,493 |
import os
def create_from_image(input_path, output_path=None,
fitimage="FITDEF",
compress="NORMAL",
zoom=0, # %; 0=100%
size=Point(0, 0), # Point (in mm), int or str; 1,2..10=A3R,A3..B5
align=("CENTER", "CENTER"), # LEFT/CENTER/RIGHT, TOP/CENTER/BOTTOM
maxpapersize="DEFAULT",
):
"""XDW generator from image file.
fitimage 'FITDEF' | 'FIT' | 'FITDEF_DIVIDEBMP' |
'USERDEF' | 'USERDEF_FIT'
compress 'NORMAL' | 'LOSSLESS' | 'NOCOMPRESS' |
'HIGHQUALITY' | 'HIGHCOMPRESS' |
'JPEG' | 'JPEG_TTN2' | 'PACKBITS' | 'G4' |
'MRC_NORMAL' | 'MRC_HIGHQUALITY' | 'MRC_HIGHCOMPRESS'
zoom (float) in percent; 0 means 100%. < 1/1000 is ignored.
size (Point) in mm; for fitimange 'userdef' or 'userdef_fit'
(int) 1=A3R, 2=A3, 3=A4R, 4=A4, 5=A5R, 6=A5,
7=B4R, 8=B4, 9=B5R, 10=B5
align (horiz, vert) where:
horiz 'CENTER' | 'LEFT' | 'RIGHT'
vert 'CENTER' | 'TOP' | 'BOTTOM'
maxpapersize 'DEFAULT' | 'A3' | '2A0'
Returns actual pathname of generated document, which may be different
from `output_path' argument.
"""
input_path = adjust_path(input_path)
root, ext = os.path.splitext(input_path)
output_path = adjust_path(output_path or root, ext=".xdw")
output_path = derivative_path(output_path)
opt = XDW_CREATE_OPTION_EX2()
opt.nFitImage = XDW_CREATE_FITIMAGE.normalize(fitimage)
opt.nCompress = XDW_COMPRESS.normalize(compress)
#opt.nZoom = int(zoom)
opt.nZoomDetail = int(zoom * 1000) # .3f
# NB. Width and height are valid only for XDW_CREATE_USERDEF(_FIT).
if not isinstance(size, Point):
size = XDW_SIZE.normalize(size)
size = XDW_SIZE_MM[size or 3] # default=A4R
size = Point(*size)
opt.nWidth = int(size.x * 100) # .2f
opt.nHeight = int(size.y * 100) # .2f;
opt.nHorPos = XDW_CREATE_HPOS.normalize(align[0])
opt.nVerPos = XDW_CREATE_VPOS.normalize(align[1])
opt.nMaxPaperSize = XDW_CREATE_MAXPAPERSIZE.normalize(maxpapersize)
if XDWVER < 8:
XDW_CreateXdwFromImageFile(cp(input_path), cp(output_path), opt)
else:
XDW_CreateXdwFromImageFileW(input_path, output_path, opt)
return output_path | 68328baeaae8fa804f2182592158b9e1de7e8cba | 3,655,494 |
import configparser
import logging
def _read_config(filename):
"""Reads configuration file.
Returns DysonLinkCredentials or None on error.
"""
config = configparser.ConfigParser()
logging.info('Reading "%s"', filename)
try:
config.read(filename)
except configparser.Error as ex:
logging.critical('Could not read "%s": %s', filename, ex)
return None
try:
username = config['Dyson Link']['username']
password = config['Dyson Link']['password']
country = config['Dyson Link']['country']
return DysonLinkCredentials(username, password, country)
except KeyError as ex:
logging.critical('Required key missing in "%s": %s', filename, ex)
return None | bb1282e94500c026072a0e8d76fdf3dcd68e9062 | 3,655,495 |
def view_share_link(request, token):
"""
Translate a given sharelink to a proposal-detailpage.
:param request:
:param token: sharelink token, which includes the pk of the proposal
:return: proposal detail render
"""
try:
pk = signing.loads(token, max_age=settings.MAXAGESHARELINK)
except signing.SignatureExpired:
return render(request, "base.html", {
"Message": "Share link has expired!"
})
except signing.BadSignature:
return render(request, "base.html", {
"Message": "Invalid token in share link!"
})
obj = get_object_or_404(Proposal, pk=pk)
return render(request, "proposals/detail_project.html", {
"proposal": obj,
"project": obj
}) | 9d88375f1f3c9c0b94ad2beab4f47ce74ea2464e | 3,655,496 |
from sklearn.pipeline import Pipeline
def create(pdef):
"""Scikit-learn Pipelines objects creation (deprecated).
This function creates a list of sklearn Pipeline objects starting from the
list of list of tuples given in input that could be created using the
adenine.core.define_pipeline module.
Parameters
-----------
pdef : list of list of tuples
This arguments contains the specification needed by sklearn in order
to create a working Pipeline object.
Returns
-----------
pipes : list of sklearn.pipeline.Pipeline objects
The list of Piplines, each of them can be fitted and trasformed
with some data.
"""
return [Pipeline(p) for p in pdef] | 552014c652d7de236ba917592108315acfd9c694 | 3,655,497 |
def pressure_differentiable(altitude):
"""
Computes the pressure at a given altitude with a differentiable model.
Args:
altitude: Geopotential altitude [m]
Returns: Pressure [Pa]
"""
return np.exp(interpolated_log_pressure(altitude)) | a6a9e7dcc38ac855f3ba5c9a117506a87a981217 | 3,655,498 |
def create_optimizer(hparams, global_step, use_tpu=False):
"""Creates a TensorFlow Optimizer.
Args:
hparams: ConfigDict containing the optimizer configuration.
global_step: The global step Tensor.
use_tpu: If True, the returned optimizer is wrapped in a
CrossShardOptimizer.
Returns:
A TensorFlow optimizer.
Raises:
ValueError: If hparams.optimizer is unrecognized.
"""
optimizer_name = hparams.optimizer.lower()
optimizer_params = {}
if optimizer_name == "momentum":
optimizer_class = tf.train.MomentumOptimizer
optimizer_params["momentum"] = hparams.get("momentum", 0.9)
optimizer_params["use_nesterov"] = hparams.get("use_nesterov", False)
elif optimizer_name == "sgd":
optimizer_class = tf.train.GradientDescentOptimizer
elif optimizer_name == "adagrad":
optimizer_class = tf.train.AdagradOptimizer
elif optimizer_name == "adam":
optimizer_class = tf.train.AdamOptimizer
elif optimizer_name == "rmsprop":
optimizer_class = tf.RMSPropOptimizer
else:
raise ValueError("Unknown optimizer: {}".format(hparams.optimizer))
# Apply weight decay wrapper.
optimizer_class = (
tf.contrib.opt.extend_with_decoupled_weight_decay(optimizer_class))
# Create optimizer.
learning_rate, weight_decay = create_learning_rate_and_weight_decay(
hparams, global_step)
optimizer = optimizer_class(
weight_decay=weight_decay,
learning_rate=learning_rate,
**optimizer_params)
if use_tpu:
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
return optimizer | 9ff18644fe513e3d01b297e30538c396546746ab | 3,655,499 |
Subsets and Splits