content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def actor_path(data, actor_id_1, goal_test_function):
"""
Creates the shortest possible path from the given actor ID to
any actor that satisfies the goal test function.
Returns a a list containing actor IDs.
If no actors satisfy the goal condition, returns None.
"""
agenda = {actor_id_1,}
seen = {actor_id_1,}
relations = {}
map_of_actors = mapped_actors(data)
while agenda:
# Get the children of the parent
next_agenda = set()
for i in agenda:
for j in map_of_actors[i]:
if j not in seen and j not in agenda:
next_agenda.add(j)
# Map child to parent
relations[j] = i
# If actor satisfies function condition, return constructed path
for id_ in agenda:
if goal_test_function(id_):
final_path = construct_path(relations, id_, actor_id_1)
return final_path
for next_ in agenda:
if next_ not in seen:
seen.add(next_)
# Update agenda to next bacon number/layer
agenda = next_agenda
# No path exists
return None | 8e41d7075b3ade8f75481959f9aa376a096aaa1c | 3,651,700 |
from typing import Optional
from pathlib import Path
from typing import List
import sys
def create_script_run(snapshot_root_directory: Optional[Path] = None,
entry_script: Optional[PathOrString] = None,
script_params: Optional[List[str]] = None) -> ScriptRunConfig:
"""
Creates an AzureML ScriptRunConfig object, that holds the information about the snapshot, the entry script, and
its arguments.
:param entry_script: The script that should be run in AzureML.
:param snapshot_root_directory: The directory that contains all code that should be packaged and sent to AzureML.
All Python code that the script uses must be copied over.
:param script_params: A list of parameter to pass on to the script as it runs in AzureML. If empty (or None, the
default) these will be copied over from sys.argv, omitting the --azureml flag.
:return:
"""
if snapshot_root_directory is None:
print("No snapshot root directory given. All files in the current working directory will be copied to AzureML.")
snapshot_root_directory = Path.cwd()
else:
print(f"All files in this folder will be copied to AzureML: {snapshot_root_directory}")
if entry_script is None:
entry_script = Path(sys.argv[0])
print("No entry script given. The current main Python file will be executed in AzureML.")
elif isinstance(entry_script, str):
entry_script = Path(entry_script)
if entry_script.is_absolute():
try:
# The entry script always needs to use Linux path separators, even when submitting from Windows
entry_script_relative = entry_script.relative_to(snapshot_root_directory).as_posix()
except ValueError:
raise ValueError("The entry script must be inside of the snapshot root directory. "
f"Snapshot root: {snapshot_root_directory}, entry script: {entry_script}")
else:
entry_script_relative = str(entry_script)
script_params = _get_script_params(script_params)
print(f"This command will be run in AzureML: {entry_script_relative} {' '.join(script_params)}")
return ScriptRunConfig(
source_directory=str(snapshot_root_directory),
script=entry_script_relative,
arguments=script_params) | 1c2aaaae087dfd8eb583d2c4a641c585ffda3be4 | 3,651,701 |
def M_to_E(M, ecc):
"""Eccentric anomaly from mean anomaly.
.. versionadded:: 0.4.0
Parameters
----------
M : float
Mean anomaly (rad).
ecc : float
Eccentricity.
Returns
-------
E : float
Eccentric anomaly.
"""
with u.set_enabled_equivalencies(u.dimensionless_angles()):
E = optimize.newton(_kepler_equation, M, _kepler_equation_prime,
args=(M, ecc))
return E | 071f33a294edf6627ad77caa256de48e94afad76 | 3,651,702 |
import os
def load_beijing():
"""Load and return the Beijing air quality dataset."""
module_path = os.path.dirname(__file__)
data = pd.read_csv(
os.path.join(module_path, 'data', 'beijing_air_quality.csv'))
return data | cd33c1a7034e5b0a7f397ff644631c8d1aaa7a0c | 3,651,703 |
def encrypt(plaintext, a, b):
"""
加密函数:E(x) = (ax + b)(mod m) m为编码系统中的字母数,一般为26
:param plaintext:
:param a:
:param b:
:return:
"""
cipher = ""
for i in plaintext:
if not i.isalpha():
cipher += i
else:
n = "A" if i.isupper() else "a"
cipher += chr((a * (ord(i) - ord(n)) + b) % 26 + ord(n))
return cipher | 0cbb57250d8d7a18740e19875f79127b8057ab06 | 3,651,704 |
import pathlib
import shlex
import os
def construct_gn_command(output_path, gn_flags, python2_command=None, shell=False):
"""
Constructs and returns the GN command
If shell is True, then a single string with shell-escaped arguments is returned
If shell is False, then a list containing the command and arguments is returned
"""
gn_args_string = " ".join(
[flag + "=" + value for flag, value in gn_flags.items()])
command_list = [str(pathlib.Path("tools", "gn", "bootstrap", "bootstrap.py")),
"-v", "-s", "-o", str(output_path),
"--gn-gen-args=" + gn_args_string]
if python2_command:
command_list.insert(0, python2_command)
if shell:
command_string = " ".join([shlex.quote(x) for x in command_list])
if python2_command:
return command_string
else:
return os.path.join(".", command_string)
else:
return command_list | 2177ea4436305733268a427e0c4b006785e41b2d | 3,651,705 |
import subprocess
def reads_in_file(file_path):
""" Find the number of reads in a file.
Count number of lines with bash wc -l and divide by 4 if fastq, otherwise by 2 (fasta) """
return round(int(subprocess.check_output(["wc", "-l", file_path]).split()[0]) /
(4 if bin_classify.format == "fastq" else 2)) | 2a1bbee200564fb8e439b9af5910d75ee1a275ab | 3,651,706 |
def _url_as_filename(url: str) -> str:
"""Return a version of the url optimized for local development.
If the url is a `file://` url, it will return the remaining part
of the url so it can be used as a local file path. For example,
'file:///logs/example.txt' will be converted to
'/logs/example.txt'.
Parameters
----------
url: str The url to check and optaimize.
Returns
-------
str: The url converted to a filename.
"""
return url.replace('file://', '') | d1aef7a08221c7788f8a7f77351ccb6e6af9416b | 3,651,707 |
from typing import Dict
def hard_max(node: NodeWrapper,
params: Dict[str, np.ndarray],
xmap: Dict[str, XLayer]):
""" ONNX Hardmax to XLayer AnyOp conversion function
Input tensor shape: N dims
Output tensor shape: 2D
"""
logger.info("ONNX Hardmax -> XLayer AnyOp")
assert len(node.get_outputs()) == 1
name = node.get_outputs()[0]
bottoms = node.get_inputs()
node_attrs = node.get_attributes()
iX = xmap[bottoms[0]]
d = len(iX.shapes)
axis = int(node_attrs['axis']) if 'axis' in node_attrs else 1
if axis < 0:
axis = d + axis
in_shape = iX.shapes.tolist()
dim_0 = int(np.prod(in_shape[:axis]))
dim_1 = int(np.prod(in_shape[axis:]))
X = px.ops.any_op(
op_name=px.stringify(name),
in_xlayers=[iX],
any_shape=[dim_0, dim_1],
onnx_id=name
)
return [X] | 5f412e98836cd377d40a759ab0487aa81cc4f3dc | 3,651,708 |
from typing import AnyStr
from typing import List
def sol_files_by_directory(target_path: AnyStr) -> List:
"""Gathers all the .sol files inside the target path
including sub-directories and returns them as a List.
Non .sol files are ignored.
:param target_path: The directory to look for .sol files
:return:
"""
return files_by_directory(target_path, ".sol") | e41ad3da26ffa1d3c528f34362ac1aeeadeb2b3c | 3,651,709 |
def _call(sig, *inputs, **kwargs):
"""Adds a node calling a function.
This adds a `call` op to the default graph that calls the function
of signature `sig`, passing the tensors in `inputs` as arguments.
It returns the outputs of the call, which are one or more tensors.
`sig` is OpDefArg.a `_DefinedFunction` object.
You can pass an optional keyword parameter `name=string` to name the
added operation.
You can pass an optional keyword parameter `noinline=True|False` to
instruct the runtime not to inline the function body into the call
site.
Args:
sig: OpDefArg. The signature of the function.
*inputs: arguments to the function.
**kwargs: Optional keyword arguments. Can only contain 'name' or
'noinline'.
Returns:
A 2-element tuple. First element: a Tensor if the function returns a single
value; a list of Tensors if the function returns multiple value; the
Operation if the function returns no values. Second element: the Operation.
Raises:
ValueError: if the arguments are invalid.
"""
if len(inputs) != len(sig.input_arg):
raise ValueError("Expected number of arguments: %d, received: %d" % (len(
sig.input_arg), len(inputs)))
name = kwargs.pop("name", None)
g = ops.get_default_graph()
func_name = sig.name
if name is None:
name = func_name
attrs = _parse_kwargs_as_attrs(func_name, **kwargs)
output_types = [dtypes.DType(x.type) for x in sig.output_arg]
op = g._create_op_internal( # pylint: disable=protected-access
func_name, list(inputs), output_types, name=name, attrs=attrs, op_def=sig)
if op.outputs:
if len(op.outputs) == 1:
ret = op.outputs[0]
else:
ret = tuple(op.outputs)
else:
ret = op
return ret, op | 6fd65281118e33bbcd9d567a7c528d85976e75e7 | 3,651,710 |
def api_url_for(view_name, _absolute=False, _xml=False, *args, **kwargs):
"""Reverse URL lookup for API routes (that use the JSONRenderer or XMLRenderer).
Takes the same arguments as Flask's url_for, with the addition of
`_absolute`, which will make an absolute URL with the correct HTTP scheme
based on whether the app is in debug mode. The _xml flag sets the renderer to use.
"""
renderer = 'XMLRenderer' if _xml else 'JSONRenderer'
url = url_for('{0}__{1}'.format(renderer, view_name), *args, **kwargs)
if _absolute:
# We do NOT use the url_for's _external kwarg because app.config['SERVER_NAME'] alters
# behavior in an unknown way (currently breaks tests). /sloria /jspies
return urlparse.urljoin(website_settings.DOMAIN, url)
return url | 6efcfbe15003652fd95294e941426ece07b37e9d | 3,651,711 |
import torch
def cov(x, rowvar=False, bias=False, ddof=None, aweights=None):
"""Estimates covariance matrix like numpy.cov"""
# ensure at least 2D
if x.dim() == 1:
x = x.view(-1, 1)
# treat each column as a data point, each row as a variable
if rowvar and x.shape[0] != 1:
x = x.t()
if ddof is None:
if bias == 0:
ddof = 1
else:
ddof = 0
w = aweights
if w is not None:
if not torch.is_tensor(w):
w = torch.tensor(w, dtype=torch.float)
w_sum = torch.sum(w)
avg = torch.sum(x * (w/w_sum)[:,None], 0)
else:
avg = torch.mean(x, 0)
# Determine the normalization
if w is None:
fact = x.shape[0] - ddof
elif ddof == 0:
fact = w_sum
elif aweights is None:
fact = w_sum - ddof
else:
fact = w_sum - ddof * torch.sum(w * w) / w_sum
xm = x.sub(avg.expand_as(x))
if w is None:
X_T = xm.t()
else:
X_T = torch.mm(torch.diag(w), xm).t()
c = torch.mm(X_T, xm)
c = c / fact
return c.squeeze() | 6b5666a3e7fa6fe0c0e115286e10d2e756ba8ee9 | 3,651,712 |
def threadsafe_generator(f):
"""A decorator that takes a generator function and makes it thread-safe.
Args:
f(function): Generator function
Returns:
None
"""
def g(*args, **kwargs):
"""
Args:
*args(list): List of non-key worded,variable length arguments.
**kwargs(dict): List of key-worded,variable length arguments.
Returns:
function: The thread-safe function.
"""
return threadsafe_iter_3(f(*args, **kwargs))
return g | 6a3e53984c85c951e5ffefa2ed238af86d8fc3e3 | 3,651,713 |
def load_many_problems(file, collection):
"""Given a ZIP file containing several ZIP files (each one a problem),
insert the problems into collection"""
problems = list()
try:
with ZipFile(file) as zfile:
for filename in zfile.infolist():
with zfile.open(filename) as curr_file:
problem = load_problem_from_file(curr_file)
problem.collection = collection
problem.author = collection.author
problems.append(problem)
except ZipFileParsingException as excp:
raise ZipFileParsingException('{}: {}'.format(filename.filename, excp)) from excp
except Exception as excp:
raise ZipFileParsingException("{}: {}".format(type(excp), excp)) from excp
return problems | 08d60f5c7905397254715f80e74019f3496d84e5 | 3,651,714 |
def CheckStructuralModelsValid(rootGroup, xyzGridSize=None, verbose=False):
"""
**CheckStricturalModelsValid** - Checks for valid structural model group data
given a netCDF root node
Parameters
----------
rootGroup: netCDF4.Group
The root group node of a Loop Project File
xyzGridSize: [int,int,int] or None
The 3D grid shape to test data in this node to adhere to
verbose: bool
A flag to indicate a higher level of console logging (more if True)
Returns
-------
bool
True if valid structural model data in project file, False otherwise.
"""
valid = True
if "StructuralModels" in rootGroup.groups:
if verbose: print(" Structural Models Group Present")
smGroup = rootGroup.groups.get("StructuralModels")
# if verbose: print(smGroup)
if "easting" in smGroup.ncattrs() and "northing" in smGroup.ncattrs() and "depth" in smGroup.ncattrs():
if xyzGridSize != None:
# Check gridSize from extents matches models sizes
smGridSize = [smGroup.dimensions["easting"].size,smGroup.dimensions["northing"].size,smGroup.dimensions["depth"].size]
if smGridSize != xyzGridSize:
print("(INVALID) Extents grid size and Structural Models Grid Size do NOT match")
print("(INVALID) Extents Grid Size : ", xyzGridSize)
print("(INVALID) Structural Models Grid Size : ", smGridSize)
valid = False
else:
if verbose: print(" Structural Models grid size adheres to extents")
else:
if verbose: print("No structural models extents in project file")
else:
if verbose: print("No Structural Models Group Present")
return valid | d11ce42b041b8be7516f827883a37b40f6f98477 | 3,651,715 |
def get_load_balancers():
"""
Return all load balancers.
:return: List of load balancers.
:rtype: list
"""
return elbv2_client.describe_load_balancers()["LoadBalancers"] | b535f47ce94106a4c7ebe3d84ccfba7c57f22ba9 | 3,651,716 |
import glob
import os
def get_datasets(folder):
"""
Returns a dictionary of dataset-ID: dataset directory paths
"""
paths = glob(f"{folder}/*")
return {os.path.split(p)[-1]: p for p in paths if os.path.isdir(p)} | 1d26bddaa82624c5edb7f0e2fe0b11d5287f6f61 | 3,651,717 |
def file_preview(request):
"""
Live preview of restructuredtext payload - currently not wired up
"""
f = File(
heading=request.POST['heading'],
content=request.POST['content'],
)
rendered_base = render_to_string('projects/doc_file.rst.html', {'file': f})
rendered = restructuredtext(rendered_base)
json_response = simplejson.dumps({'payload': rendered})
return HttpResponse(json_response, mimetype='text/javascript') | e83570b7b31b4a2d526f1699f8b65c5623d6f7ee | 3,651,718 |
def makeMask(n):
"""
return a mask of n bits as a long integer
"""
return (long(2) << n - 1) - 1 | c0fe084ec9d6be1519115563cce3c0d3649947c6 | 3,651,719 |
def link_name_to_index(model):
""" Generate a dictionary for link names and their indicies in the
model. """
return {
link.name : index for index, link in enumerate(model.links)
} | ba0e768b1160218908b6ecf3b186a73c75a69894 | 3,651,720 |
import tqdm
import os
def create_audio_dataset(
dataset_path: str, dataset_len=100, **kwargs
) -> pd.DataFrame:
""" Creates audio dataset from file structure.
Args:
playlist_dir: Playlist directory path.
# TODO dataset_len (optional): Number of audio files to include.
Returns:
df: Compiled dataframe representing this dataset.
"""
# dir_dict = _crawl_dir(dataset_path)
num_songs = 0
song_names = []
songs = []
break_outer = False
for root, dirs, files in (
dir_iterator := tqdm(os.walk(dataset_path), leave=False)
):
if break_outer:
dir_iterator.close()
break
rel_root = root.replace(dataset_path, "", 1)
for file in tqdm(files, leave=False):
if num_songs >= dataset_len:
break_outer = True
continue
song_name = file
if rel_root != "":
song_name = f"{rel_root}/{song_name}"
song_names.append(song_name)
try:
songs.append(
create_audio_datum(f"{root}/{file}", file, **kwargs)
)
except NoBackendError:
song_names.pop()
continue
num_songs += 1
data = {
"index": song_names,
"columns": ["sampling_rate", "time_signal", "stft"],
"data": songs,
"index_names": ["songs"],
"column_names": ["audio components"],
}
return pd.DataFrame.from_dict(data, orient="tight")
# make into correct df?
# for file in os.listdir(dataset_path):
# if os.path.isdir(file):
# _create_audio_dataset(
# file, dataset_len=dataset_len-file_count, **kwargs
# )
# else:
# create_audio_datum(file, **kwargs)
# file_count += 1
# songs = os.listdir(playlist_dir)[:dataset_len]
# if ".DS_Store" in songs:
# songs.remove(".DS_Store")
# songs = [song for song in songs if ".json" not in song]
# df_structure = dict(zip(songs, [None] * len(songs)))
# for song_name in df_structure.keys():
# song_path = playlist_dir / song_name
# if os.path.isdir(song_path):
# components = os.listdir(song_path)
# else:
# components = [song_name]
# df_structure[song_name] = {}
# df_structure[song_name]["time_signals"] = dict(zip(components, [None] * len(components)))
# df_structure[song_name]["stfts"] = dict(zip(components, [None] * len(components)))
# df_structure[song_name]["sampling_rate"] = None
# df = pd.DataFrame(df_structure)
# for song_name, song_data in tqdm(df_structure.items()):
# sr = None
# song_path = playlist_dir / song_name
# for component in song_data["time_signals"]:
# if component == song_name:
# filepath = song_path
# else:
# filepath = song_path / component
# df[song_name]["time_signals"][component], sr_tmp = librosa.load(
# filepath, sr=None
# )
# # assumes all songs at same sampling rate
# assert(not sr or sr == sr_tmp)
# sr = sr_tmp
# df[song_name]["sampling_rate"] = sr
# # calculate STFTs
# for key in tqdm(songs):
# song = df[key]
# for component, data in tqdm(song["time_signals"].items(), leave=False):
# X = librosa.stft(data, **kwargs)
# song["stfts"][component] = X
# return df | 054b2ee756beeeade248ce75f9369e9224e093f4 | 3,651,721 |
import json
def photos_page():
"""
Example view demonstrating rendering a simple HTML page.
"""
context = make_context()
with open('data/featured.json') as f:
context['featured'] = json.load(f)
return make_response(render_template('photos.html', **context)) | dfb172e01f659be163c7dffdb13cc5cbaa28ab10 | 3,651,722 |
import json
def get_user_by_id(current_user, uid):
""" Получение одного пользователя по id в json"""
try:
user_schema = CmsUsersSchema(exclude=['password'])
user = CmsUsers.query.get(uid)
udata = user_schema.dump(user)
response = Response(
response=json.dumps(udata.data),
status=200,
mimetype='application/json'
)
except Exception:
response = server_error(request.args.get("dbg"))
return response | 9f91319020fb0b386d506b4365c2912af3ed5874 | 3,651,723 |
def update_bond_lists_mpi(bond_matrix, comm, size, rank):
"""
update_bond_lists(bond_matrix)
Return atom indicies of angular terms
"""
N = bond_matrix.shape[0]
"Get indicies of bonded beads"
bond_index_full = np.argwhere(bond_matrix)
"Create index lists for referring to in 2D arrays"
indices_full = create_index(bond_index_full)
angle_indices = []
angle_bond_indices = []
"Count number of unique bonds"
count = np.unique(bond_index_full.T[0]).shape[0]
"""
"Find indicies of ends of fibrils"
fib_end_check = np.argwhere(np.sum(bond_matrix, axis=1) <= 1)
n_fib_end = fib_end_check.shape[0]
fib_end_check_ind = np.tile(fib_end_check, n_fib_end)
fib_end_check_ind = np.stack((fib_end_check_ind, fib_end_check_ind.T), axis=2)
fib_end_check_ind = create_index(fib_end_check_ind[np.where(~np.eye(n_fib_end,dtype=bool))])
fib_end = np.zeros(bond_matrix.shape)
fib_end[fib_end_check_ind] += 1
"""
for n in range(N):
slice_full = np.argwhere(bond_index_full.T[0] == n)
if slice_full.shape[0] > 1:
angle_indices.append(np.unique(bond_index_full[slice_full].flatten()))
angle_bond_indices.append(bond_index_full[slice_full][::-1])
bond_indices = np.nonzero(np.array_split(bond_matrix, size)[rank])
angle_indices = np.array_split(angle_indices, size)[rank]
angle_bond_indices = create_index(np.array_split(angle_bond_indices, size)[rank].reshape((2 * len(angle_indices), 2)))
return bond_indices, angle_indices, angle_bond_indices | 60fd4e5ee7418d182f0c29b0d69e0f148a5a40ee | 3,651,724 |
from sys import flags
def RepoRegion(args, cluster_location=None):
"""Returns the region for the Artifact Registry repo.
The intended behavior is platform-specific:
* managed: Same region as the service (run/region or --region)
* gke: Appropriate region based on cluster zone (cluster_location arg)
* kubernetes: The run/region config value will be used or an exception
raised when unset.
Args:
args: Namespace, the args namespace.
cluster_location: The zone which a Cloud Run for Anthos cluster resides.
When specified, this will result in the region for this zone being
returned.
Returns:
The appropriate region for the repository.
"""
if cluster_location:
return _RegionFromZone(cluster_location)
region = flags.GetRegion(args, prompt=False)
if region:
return region
raise exceptions.ArgumentError(
'To deploy from source with this platform, you must set run/region via '
'"gcloud config set run/region REGION".') | 8a0e16ebbdedd82490a2ca8cc358c74386c963d2 | 3,651,725 |
from ibis.omniscidb.compiler import to_sql
def compile(expr: ibis.Expr, params=None):
"""Compile a given expression.
Note you can also call expr.compile().
Parameters
----------
expr : ibis.Expr
params : dict
Returns
-------
compiled : string
"""
return to_sql(expr, dialect.make_context(params=params)) | 01bfe1be13b9a78adba04ca37a08aadbf551c827 | 3,651,726 |
def get_border(border, size):
"""
Get border
"""
i = 1
while size - border // i <= border // i: # size > 2 * (border // i)
i *= 2
return border // i | 45233f53cdf6f0edb5b4a9262b61f2a70ac42661 | 3,651,727 |
def load_normalized_data(file_path, log1p=True):
"""load normalized data
1. Load filtered data for both FACS and droplet
2. Size factor normalization to counts per 10 thousand
3. log(x+1) transform
4. Combine the data
Args:
file_path (str): file path.
Returns:
adata_combine (AnnData): Combined data for FACS and droplet
"""
# Load filtered data
# adata_facs = read_h5ad(f'{file_path}/facs_filtered.h5ad')
adata_facs = read_h5ad(f'{file_path}/facs_filtered_reannotated-except-for-marrow-lung-kidney.h5ad')
adata_droplet = read_h5ad(f'{file_path}/droplet_filtered.h5ad')
# Size factor normalization
sc.pp.normalize_per_cell(adata_facs, counts_per_cell_after=1e4)
sc.pp.normalize_per_cell(adata_droplet, counts_per_cell_after=1e4)
# log(x+1) transform
if log1p:
sc.pp.log1p(adata_facs)
sc.pp.log1p(adata_droplet)
# Combine the data
ind_select = adata_facs.obs['age'].isin(['3m', '18m', '24m'])
adata_facs = adata_facs[ind_select,]
adata_combine = AnnData.concatenate(adata_facs, adata_droplet, batch_key='b_method',
batch_categories = ['facs','droplet'])
return adata_combine | 3c180c1f2ba1e118678331795eb42b7132686ed6 | 3,651,728 |
def from_copy_number(
model: cobra.Model,
index: pd.Series,
cell_copies: pd.Series,
stdev: pd.Series,
vol: float,
dens: float,
water: float,
) -> cobra.Model:
"""Convert `cell_copies` to mmol/gDW and apply them to `model`.
Parameters
----------
model: cobra.Model
cobra or geckopy Model (will be converted to geckopy.Model). It is NOT
modified inplace.
index: pd.Series
uniprot IDs
cell_copies: pd.Series
cell copies/ cell per proteins
stdev: pd.Series
standard deviation of the cell copies
vol: float
cell volume
dens: float
cell density
water: float
water content fraction (0-1)
Returns
-------
geckopy.Model
with the proteomics constraints applied
"""
df = pd.DataFrame({"cell_copies": cell_copies, "CV": stdev})
# from molecules/cell to mmol/gDW
df["copies_upper"] = df["cell_copies"] + 0.5 * df["CV"] / 100 * df["cell_copies"]
df["mmol_per_cell"] = df["copies_upper"] / 6.022e21
proteomics = df["mmol_per_cell"] / (vol * dens * water)
proteomics.index = index
return from_mmol_gDW(model, proteomics) | 858d563ad0f4ae16e83b36db3908895671809431 | 3,651,729 |
def getstatusoutput(cmd):
"""Return (exitcode, output) of executing cmd in a shell.
Execute the string 'cmd' in a shell with 'check_output' and
return a 2-tuple (status, output). The locale encoding is used
to decode the output and process newlines.
A trailing newline is stripped from the output.
The exit status for the command can be interpreted
according to the rules for the function 'wait'. Example:
>>> import subprocess
>>> subprocess.getstatusoutput('ls /bin/ls')
(0, '/bin/ls')
>>> subprocess.getstatusoutput('cat /bin/junk')
(1, 'cat: /bin/junk: No such file or directory')
>>> subprocess.getstatusoutput('/bin/junk')
(127, 'sh: /bin/junk: not found')
>>> subprocess.getstatusoutput('/bin/kill $$')
(-15, '')
"""
try:
data = check_output(cmd, shell=True, text=True, stderr=STDOUT)
exitcode = 0
except CalledProcessError as ex:
data = ex.output
exitcode = ex.returncode
if data[-1:] == '\n':
data = data[:-1]
return exitcode, data | 9a243e5e138731fe6d2c6525fea3c5c36a1d1119 | 3,651,730 |
import re
def _get_values(attribute, text):
"""Match attribute in text and return all matches.
:returns: List of matches.
"""
regex = '{}\s+=\s+"(.*)";'.format(attribute)
regex = re.compile(regex)
values = regex.findall(text)
return values | 59a0fdb7a39221e5f728f512ba0aa814506bbc37 | 3,651,731 |
def time_axis(tpp=20e-9, length=20_000) -> np.ndarray:
"""Return the time axis used in experiments.
"""
ts = tpp * np.arange(length)
ten_percent_point = np.floor(length / 10) * tpp
ts -= ten_percent_point
ts *= 1e6 # convert from seconds to microseconds
return ts | 6cd18bcbfa6949fe98e720312b07cfa20fde940a | 3,651,732 |
from cyder.core.ctnr.models import CtnrUser
def _has_perm(user, ctnr, action, obj=None, obj_class=None):
"""
Checks whether a user (``request.user``) has permission to act on a
given object (``obj``) within the current session CTNR. Permissions will
depend on whether the object is within the user's current CTNR and
the user's permissions level within that CTNR. Plebs are people that don't
have any permissions except for dynamic registrations.
Guests of a CTNR have view access to all objects within the current CTNR.
Users have full access to objects within the current CTNR, except
for exceptional types of objects (domains, SOAs) and the CTNR itself.
CTNR admins are like users except they can modify the CTNR itself
and assign permissions to other users.
Cyder admins are CTNR admins to every CTNR. Though the object has to
be within the CURRENT CTNR for permissions to be granted, for purposes
of encapsulation.
Superusers (Uber-admins/Elders) have complete access to everything
including the ability to create top-level domains, SOAs, and global DHCP
objects.
Plebs are not assigned to any CTNR.
CTNR Guests have level 0 to a CTNR.
CTNR Users have level 1 to a CTNR.
CTNR Admins have level 2 to a CTNR.
Cyder Admins have level 2 to the 'global' CTNR (``pk=1``).
Superusers are Django superusers.
:param request: A django request object.
:type request: :class:`request`
:param obj: The object being tested for permission.
:type obj: :class:`object`
:param action: ``0`` (view), ``1`` (create), ``2`` (update), ``3`` (delete)
:type action: :class: `int`
An example of checking whether a user has 'create' permission on a
:class:`Domain` object.
>>> perm = request.user.get_profile().has_perm(request, \'create\',
... obj_class=Domain)
>>> perm = request.user.get_profile().has_perm(request, \'update\',
... obj=domain)
"""
user_level = None
if user.is_superuser:
return True
ctnr_level = -1
assert LEVEL_ADMIN > LEVEL_USER > LEVEL_GUEST > ctnr_level
if obj:
ctnr = None
ctnrs = None
if hasattr(obj, "get_ctnrs"):
try:
ctnrs = obj.get_ctnrs()
except TypeError:
pass
if ctnrs is not None:
for c in ctnrs:
try:
level = CtnrUser.objects.get(ctnr=c, user=user).level
except CtnrUser.DoesNotExist:
continue
if level > ctnr_level:
ctnr_level = level
ctnr = c
if ctnr_level == LEVEL_ADMIN:
break
elif ctnr and user and not obj:
try:
ctnr_level = CtnrUser.objects.get(ctnr=ctnr, user=user).level
except CtnrUser.DoesNotExist:
pass
if obj and ctnr and not ctnr.check_contains_obj(obj):
return False
# Get user level.
is_ctnr_admin = ctnr_level == LEVEL_ADMIN
is_ctnr_user = ctnr_level == LEVEL_USER
is_ctnr_guest = ctnr_level == LEVEL_GUEST
try:
cyder_level = CtnrUser.objects.get(ctnr=1, user=user).level
except CtnrUser.DoesNotExist:
cyder_level = -1
is_cyder_admin = cyder_level == LEVEL_ADMIN
is_cyder_guest = CtnrUser.objects.filter(user=user).exists()
if is_cyder_admin:
user_level = 'cyder_admin'
elif is_ctnr_admin:
user_level = 'ctnr_admin'
elif is_ctnr_user:
user_level = 'ctnr_user'
elif is_ctnr_guest:
user_level = 'ctnr_guest'
elif is_cyder_guest:
user_level = 'cyder_guest'
else:
user_level = 'pleb'
# Dispatch to appropriate permissions handler.
if obj:
obj_type = obj.__class__.__name__
elif obj_class:
if isinstance(obj_class, basestring):
obj_type = str(obj_class)
else:
obj_type = obj_class.__name__
else:
return False
if (obj_type and obj_type.endswith('AV')
and obj_type != 'WorkgroupAV'):
obj_type = obj_type[:-len('AV')]
handling_functions = {
# Administrative.
'Ctnr': has_administrative_perm,
'User': has_administrative_perm,
'UserProfile': has_administrative_perm,
'CtnrUser': has_ctnr_user_perm,
'CtnrObject': has_ctnr_object_perm,
'SOA': has_soa_perm,
'Domain': has_domain_perm,
# Domain records.
'AddressRecord': has_domain_record_perm,
'CNAME': has_domain_record_perm,
'MX': has_domain_record_perm,
'Nameserver': has_name_server_perm,
'SRV': has_domain_record_perm,
'SSHFP': has_domain_record_perm,
'TXT': has_domain_record_perm,
'PTR': has_reverse_domain_record_perm,
# DHCP.
'Network': has_network_perm,
'Range': has_range_perm,
'Site': has_site_perm,
'System': has_system_perm,
'Vlan': has_vlan_perm,
'Vrf': has_vrf_perm,
'Workgroup': has_workgroup_perm,
'StaticInterface': has_static_registration_perm,
'DynamicInterface': has_dynamic_registration_perm,
'Supernet': has_supernet_perm,
'WorkgroupAV': has_workgroupav_perm,
'Token': has_token_perm
}
handling_function = handling_functions.get(obj_type, None)
if not handling_function:
if '_' in obj_type:
obj_type = obj_type.replace('_', '')
if 'Intr' in obj_type:
obj_type = obj_type.replace('Intr', 'interface')
for key in handling_functions.keys():
if obj_type.lower() == key.lower():
handling_function = handling_functions[key]
if handling_function:
return handling_function(user_level, obj, ctnr, action)
else:
raise Exception('No handling function for {0}'.format(obj_type)) | 998119c3aa9b50fcdd9fdec1f734374f04fe51c6 | 3,651,733 |
def read_chunk(file: File, size: int=400) -> bytes:
""" Reads first [size] chunks from file, size defaults to 400 """
file = _path.join(file.root, file.name) # get full path of file
with open(file, 'rb') as file:
# read chunk size
chunk = file.read(size)
return chunk | dfa1fd576fe14c5551470fb76a674dccd136e200 | 3,651,734 |
def parse_input(file_path):
"""
Turn an input file of newline-separate bitrate samples into
input and label arrays. An input file line should look like this:
4983 1008073 1591538 704983 1008073 1008073 704983
Adjacent duplicate entries will be removed and lines with less than
two samples will be filtered out.
@return a tuple of the x, x sequence length, and y arrays parsed
from the input file
"""
bitrate_inputs = []
inputs_length = []
bitrate_labels = []
with open(file_path, 'r') as file:
for line in file:
samples = map(lambda x: [float(x) * bps_to_MBps], line.strip().split(' '))[0:MAX_SAMPLES + 1]
if (len(samples) < 2):
# skip lines without enough samples
continue
bitrate_labels.append(samples.pop())
inputs_length.append(len(samples))
samples += [[-1] for i in range(MAX_SAMPLES - len(samples))]
bitrate_inputs += [samples]
return bitrate_inputs, inputs_length, bitrate_labels | 1e1aada5b8da01d362f7deb0b2145209bb55bcc0 | 3,651,735 |
import os
def read(rel_path):
""" Docstring """
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, rel_path), 'r') as fp:
return fp.read() | ec84c8ccf878e7f9ad8ccfb0239e7d82c7ba7f99 | 3,651,736 |
import sys
def main(argv=None):
"""Main command line interface."""
if argv is None:
argv = sys.argv[1:]
cli = CommandLineTool()
try:
return cli.run(argv)
except KeyboardInterrupt:
print('Canceled')
return 3 | a4a5dec5c09c6f7ee7a354aea34b98899841ed0f | 3,651,737 |
from typing import Dict
from typing import Any
import torch
def extract_attrs_for_lowering(mod: nn.Module) -> Dict[str, Any]:
"""If `mod` is in `module_fetch_book`, fetch the mod's attributes that in the `module_fetch_book`
after checking module's version is compatible with the `module_fetch_book`.
"""
attrs_for_lowering: Dict[str, Any] = {}
attrs_for_lowering["name"] = torch.typename(mod)
if type(mod) in module_fetch_book:
version, param_to_fetch, matching_method = module_fetch_book[type(mod)]
if version < mod._version:
raise RuntimeError(f"Fetcher version {version} try to fetch {torch.typename(mod)} version {mod._version}, "
"please upgrade the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
for attr in param_to_fetch:
attrs_for_lowering[attr] = getattr(mod, matching_method(attr, mod._version))
else:
raise RuntimeError(f"{torch.typename(mod)} is not in the module_fetch_book yet, "
"please add it to the module_fetch_book, open an issue and @842974287 "
"or report a bug to AIACC team directly.")
return attrs_for_lowering | ec2ff68f2164eabdc0aae9acdcc7ff51b83a77dd | 3,651,738 |
from typing import Set
import requests
def get_filter_fields(target: str, data_registry_url: str, token: str) -> Set[str]:
"""
Returns a list of filterable fields from a target end point by calling OPTIONS
:param target: target end point of the data registry
:param data_registry_url: the url of the data registry
:param token: personal access token
:return: the set of filterable fields on this target end point
"""
end_point = get_end_point(data_registry_url, target)
result = requests.options(end_point, headers=get_headers(token))
result.raise_for_status()
options = result.json()
return set(options.get("filter_fields", [])) | 34b6cccc2f8529391357ab70b212f1fddbd9e37d | 3,651,739 |
def azimuthalAverage(image, center=None):
"""
Calculate the azimuthally averaged radial profile.
image - The 2D image
center - The [x,y] pixel coordinates used as the center. The default is
None, which then uses the center of the image (including
fracitonal pixels).
http://www.astrobetter.com/blog/2010/03/03/fourier-transforms-of-images-in-python/
v0.1
"""
# Calculate the indices from the image
y, x = np.indices(image.shape)
if not center:
center = np.array([(x.max()-x.min())/2.0, (y.max()-y.min())/2.0])
r = np.hypot(x - center[0], y - center[1])
# Get sorted radii
ind = np.argsort(r.flat)
r_sorted = r.flat[ind]
i_sorted = image.flat[ind]
# Get the integer part of the radii (bin size = 1)
r_int = r_sorted.astype(int)
# Find all pixels that fall within each radial bin.
deltar = r_int[1:] - r_int[:-1] # Assumes all radii represented
rind = np.where(deltar)[0] # location of changed radius
nr = rind[1:] - rind[:-1] # number of radius bin
# Cumulative sum to figure out sums for each radius bin
csim = np.cumsum(i_sorted, dtype=float)
tbin = csim[rind[1:]] - csim[rind[:-1]]
radial_prof = tbin / nr
return radial_prof | f086d0868bd56b01de976f346e2a66f5e0d7a10b | 3,651,740 |
def train_transforms_fisheye(sample, image_shape, jittering):
"""
Training data augmentation transformations
Parameters
----------
sample : dict
Sample to be augmented
image_shape : tuple (height, width)
Image dimension to reshape
jittering : tuple (brightness, contrast, saturation, hue)
Color jittering parameters
Returns
-------
sample : dict
Augmented sample
"""
if len(image_shape) > 0:
sample = resize_sample_fisheye(sample, image_shape)
sample = duplicate_sample(sample)
if len(jittering) > 0:
sample = colorjitter_sample(sample, jittering)
sample = to_tensor_sample(sample)
return sample | c815d28a5e9e62234544adc4f2ba816e9f1c366a | 3,651,741 |
from typing import Any
def build_json_output_request(**kwargs: Any) -> HttpRequest:
"""A Swagger with XML that has one operation that returns JSON. ID number 42.
See https://aka.ms/azsdk/python/protocol/quickstart for how to incorporate this request builder
into your code flow.
:return: Returns an :class:`~azure.core.rest.HttpRequest` that you will pass to the client's
`send_request` method. See https://aka.ms/azsdk/python/protocol/quickstart for how to
incorporate this response into your code flow.
:rtype: ~azure.core.rest.HttpRequest
Example:
.. code-block:: python
# response body for status code(s): 200
response.json() == {
"id": 0 # Optional.
}
"""
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", "/xml/jsonoutput")
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=url, headers=header_parameters, **kwargs) | b920f0955378f0db1fdddadefc4037a33bedecad | 3,651,742 |
def merge_extended(args_container: args._ArgumentContainer, hold: bool, identificator: str) -> int:
"""
Merge the args_container into the internal, like merge_named, but hold specifies if the internal container should not be cleared.
:param args_container: The argument container with the data to merge
:param hold: When True, does not clear the internal data.
:param identificator: The identificator to pass to the MERGE_END event
:raises TypeError: if the arguments passed are not the expected type
"""
if (
not isinstance(args_container, args._ArgumentContainer)
or not isinstance(hold, int) # noqa W503
or not isinstance(identificator, str) # noqa W503
):
raise TypeError("The given parameters do not match the types required.")
return _grm.grm_merge_extended(args_container.ptr, c_int(1 if hold else 0), _encode_str_to_char_p(identificator)) | 111765aa0a62a1050387670472d3718aca8a015f | 3,651,743 |
def video_path_file_name(instance, filename):
""" Callback for video node field to get path file name
:param instance: the image field
:param filename: the file name
:return: the path file name
"""
return path_file_name(instance, 'video', filename) | b34b96961ad5f9275cd89809828b8dd0aed3dafb | 3,651,744 |
def radiative_processes_mono(flux_euv, flux_fuv,
average_euv_photon_wavelength=242.0,
average_fuv_photon_wavelength=2348.0):
"""
Calculate the photoionization rate of helium at null optical depth based
on the EUV spectrum arriving at the planet.
Parameters
----------
flux_euv (``float``):
Monochromatic extreme-ultraviolet (0 - 504 Angstrom) flux arriving at
the planet in units of erg / s / cm ** 2. Attention: notice that this
``flux_euv`` is different from the one used for hydrogen, since helium
ionization happens at a shorter wavelength.
flux_fuv (``float``):
Monochromatic far- to middle-ultraviolet (911 - 2593 Angstrom) flux
arriving at the planet in units of erg / s / cm ** 2.
average_euv_photon_wavelength (``float``):
Average wavelength of EUV photons ionizing the He singlet state, in unit
of Angstrom. Default value is 242 Angstrom. The default value is based
on a flux-weighted average of the solar spectrum between 0 and 504
Angstrom.
average_fuv_photon_wavelength (``float``):
Average wavelength of FUV-NUV photons ionizing the He triplet state, in
unit of Angstrom. Default value is 2348 Angstrom. The default value is
based on a flux-weighted average of the solar spectrum between 911 and
2593 Angstrom.
Returns
-------
phi_1 (``float``):
Ionization rate of helium singlet at null optical depth in unit of
1 / s.
phi_3 (``float``):
Ionization rate of helium triplet at null optical depth in unit of
1 / s.
a_1 (``float``):
Flux-averaged photoionization cross-section of helium singlet in unit of
cm ** 2.
a_3 (``float``):
Flux-averaged photoionization cross-section of helium triplet in unit of
cm ** 2.
a_h_1 (``float``):
Flux-averaged photoionization cross-section of hydrogen in the range
absorbed by helium singlet in unit of cm ** 2.
a_h_3 (``float``):
Flux-averaged photoionization cross-section of hydrogen in the range
absorbed by helium triplet in unit of cm ** 2.
"""
# Average cross-section to ionize helium singlet
a_1 = microphysics.helium_singlet_cross_section(average_euv_photon_wavelength)
# The photoionization cross-section of He triplet
wavelength_3, a_lambda_3 = microphysics.helium_triplet_cross_section()
# # Average cross-section to ionize helium triplet
a_3 = np.interp(average_fuv_photon_wavelength, wavelength_3, a_lambda_3)
# The flux-averaged photoionization cross-section of H is also going to be
# needed because it adds to the optical depth that the He atoms see.
# Contribution to the optical depth seen by He singlet atoms:
# Hydrogen cross-section within the range important to helium singlet
a_h_1 = 6.3E-18 * (average_euv_photon_wavelength / 13.6) ** (-3)
# Unit 1 / cm ** 2.
# Contribution to the optical depth seen by He triplet atoms:
if average_fuv_photon_wavelength < 911.0:
a_h_3 = microphysics.hydrogen_cross_section(
wavelength=average_fuv_photon_wavelength)
else:
a_h_3 = 0.0
# Convert the fluxes from erg to eV and calculate the photoionization rates
energy_1 = 12398.419843320025 / average_euv_photon_wavelength
energy_3 = 12398.419843320025 / average_fuv_photon_wavelength
phi_1 = flux_euv * 6.24150907e+11 * a_1 / energy_1
phi_3 = flux_fuv * 6.24150907e+11 * a_3 / energy_3
return phi_1, phi_3, a_1, a_3, a_h_1, a_h_3 | b555fe1af2bdcdab4ac8f78ed2e64bef35a2cdab | 3,651,745 |
import typing
from datetime import datetime
def date_yyyymmdd(now: typing.Union[datetime.datetime, None] = None, day_delta: int = 0, month_delta: int = 0) -> str:
"""
:param day_delta:
:param month_delta:
:return: today + day_delta + month_delta -> str YYYY-MM-DD
"""
return date_delta(now, day_delta, month_delta).strftime("%Y-%m-%d") | 8a3ff535964aba6e3eeaa30dc6b98bfcab1b5794 | 3,651,746 |
from .geocoder import description_for_number as real_fn
def description_for_number(*args, **kwargs):
"""Return a text description of a PhoneNumber object for the given language.
The description might consist of the name of the country where the phone
number is from and/or the name of the geographical area the phone number
is from. This function explicitly checks the validity of the number passed in
Arguments:
numobj -- The PhoneNumber object for which we want to get a text description.
lang -- A 2-letter lowercase ISO 639-1 language code for the language in
which the description should be returned (e.g. "en")
script -- A 4-letter titlecase (first letter uppercase, rest lowercase)
ISO script code as defined in ISO 15924, separated by an
underscore (e.g. "Hant")
region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB")
Returns a text description in the given language code, for the given phone
number, or an empty string if no description is available."""
return real_fn(*args, **kwargs) | c60423fb26d892a43db6017a08cce3d589481cb6 | 3,651,747 |
def get_pathway_nodes(pathway):
"""Return single nodes in pathway.
:param pathme_viewer.models.Pathway pathway: pathway entry
:return: BaseAbundance nodes
:rtype: list[pybel.dsl.BaseAbundance]
"""
# Loads the BELGraph
graph = from_bytes(pathway.blob)
collapse_to_genes(graph)
# Return BaseAbundace BEL nodes
return {
node.as_bel()
for node in graph
if isinstance(node, BaseAbundance)
} | 5ec451d9e9192b7d07230b05b2e3493df7ab3b4d | 3,651,748 |
def check_currrent_user_privilege():
"""
Check if our user has interesting tokens
"""
# Interesting Windows Privileges
# - SeDebug
# - SeRestore
# - SeBackup
# - SeTakeOwnership
# - SeTcb
# - SeCreateToken
# - SeLoadDriver
# - SeImpersonate
# - SeAssignPrimaryToken
interesting_priv = (
u'SeDebug', u'SeRestore', u'SeBackup', u'SeTakeOwnership',
u'SeTcb', u'SeCreateToken', u'SeLoadDriver', u'SeImpersonate',
u'SeAssignPrimaryToken'
)
privs = get_currents_privs()
priv = []
for (privilege, enabled) in privs:
if enabled:
string = privilege
for p in interesting_priv:
if p in privilege:
string += ' => Could be used to elevate our privilege'
break
priv.append(string)
return priv | 7780717dfbf887f80ff90151ba2f00e49b810e2e | 3,651,749 |
import copy
def handle_domain_addition_commands(client: Client, demisto_args: dict) -> CommandResults:
"""
Adds the domains to the inbound blacklisted list.
:type client: ``Client``
:param client: Client to use.
:type demisto_args: ``dict``
:param demisto_args: The demisto arguments.
:return: The command results which contains the added domains to the inbound blacklisted list.
:rtype: ``CommandResults``
"""
demisto_args = handle_args(demisto_args)
domain = demisto_args.get('domain')
if not domain:
raise DemistoException(
'A domain must be provided in order to add it to the inbound blacklisted list.')
demisto_args['domain'] = ','.join(argToList(domain))
raw_result = client.inbound_blacklisted_domain_add_command(demisto_args)
domains_list = copy.deepcopy(raw_result.get('domains', [raw_result]))
msg = 'Domains were successfully added to the inbound blacklisted list\n'
objects_time_to_readable_time(domains_list, 'updateTime')
readable_output = msg + tableToMarkdown('Added Domains', domains_list,
headers=['domain', 'pgid', 'cid', 'update_time', 'annotation'],
headerTransform=string_to_table_header, removeNull=True)
return CommandResults(
outputs_prefix='NetscoutAED.InboundBlacklistDomain',
outputs_key_field='domain',
outputs=domains_list,
raw_response=raw_result,
readable_output=readable_output,
) | b5b281e3254a433c9431e77631001cb2be4e37e3 | 3,651,750 |
import torch
def _tc4(dom: AbsDom):
""" Validate that my AcasNet module can be optimized at the inputs. """
mse = nn.MSELoss()
max_retries = 100
max_iters = 30 # at each retry, train at most 100 iterations
def _loss(outputs_lb):
lows = outputs_lb[..., 0]
distances = 0 - lows
distances = F.relu(distances)
prop = torch.zeros_like(distances)
return mse(distances, prop)
retried = 0
while retried < max_retries:
# it is possible to get inputs optimized to some local area, thus retry multiple times
net = AcasNet(dom, 2, 2, [2]).to(device)
inputs = torch.randn(2, 2, 2, device=device)
inputs_lb, _ = torch.min(inputs, dim=-1)
inputs_ub, _ = torch.max(inputs, dim=-1)
inputs_lb = inputs_lb.requires_grad_()
inputs_ub = inputs_ub.requires_grad_()
ins = dom.Ele.by_intvl(inputs_lb, inputs_ub)
with torch.no_grad():
outputs_lb, outputs_ub = net(ins).gamma()
if _loss(outputs_lb) <= 0:
# found something to optimize
continue
retried += 1
# Now the network has something to optimize
print(f'\n===== TC4: ({retried}th try) =====')
print('Using inputs LB:', inputs_lb)
print('Using inputs UB:', inputs_ub)
print('Before any optimization, the approximated output is:')
print('Outputs LB:', outputs_lb)
print('Outputs UB:', outputs_ub)
# This sometimes work and sometimes doesn't. It may stuck on a fixed loss and never decrease anymore.
orig_inputs_lb = inputs_lb.clone()
orig_inputs_ub = inputs_ub.clone()
opti = torch.optim.Adam([inputs_lb, inputs_ub], lr=0.1)
iters = 0
while iters < max_iters:
iters += 1
# after optimization, lb ≤ ub may be violated
_inputs_lbub = torch.stack((inputs_lb, inputs_ub), dim=-1)
_inputs_lb, _ = torch.min(_inputs_lbub, dim=-1)
_inputs_ub, _ = torch.max(_inputs_lbub, dim=-1)
ins = dom.Ele.by_intvl(_inputs_lb, _inputs_ub)
opti.zero_grad()
outputs_lb, outputs_ub = net(ins).gamma()
loss = _loss(outputs_lb)
if loss <= 0:
# until the final output's 1st element is >= 0
break
loss.backward()
opti.step()
print(f'Iter {iters} - loss {loss.item()}')
if iters < max_iters:
# successfully trained
break
assert retried < max_retries
with torch.no_grad():
print(f'At {retried} retry, all optimized after {iters} iterations. ' +
f'Now the outputs 1st element should be >= 0 given the latest input.')
outputs_lb, outputs_ub = net(ins).gamma()
print('Outputs LB:', outputs_lb)
print('Outputs UB:', outputs_ub)
print('Original inputs LB:', orig_inputs_lb)
print('Optimized inputs LB:', inputs_lb)
print('Original inputs UB:', orig_inputs_ub)
print('Optimized inputs UB:', inputs_ub)
assert (outputs_lb[:, 0] >= 0.).all()
return | f008fd4bff6e1986f2354ef9338a3990e947656c | 3,651,751 |
def skip_url(url):
"""
Skip naked username mentions and subreddit links.
"""
return REDDIT_PATTERN.match(url) and SUBREDDIT_OR_USER.search(url) | 60c54b69916ad0bce971df06c5915cfbde10018c | 3,651,752 |
def registry():
"""
Return a dictionary of problems of the form:
```{
"problem name": {
"params": ...
},
...
}```
where `flexs.landscapes.AdditiveAAVPackaging(**problem["params"])` instantiates the
additive AAV packaging landscape for the given set of parameters.
Returns:
dict: Problems in the registry.
"""
problems = {
"heart": {"params": {"phenotype": "heart", "start": 450, "end": 540}},
"lung": {"params": {"phenotype": "lung", "start": 450, "end": 540}},
"kidney": {"params": {"phenotype": "kidney", "start": 450, "end": 540}},
"liver": {"params": {"phenotype": "liver", "start": 450, "end": 540}},
"blood": {"params": {"phenotype": "blood", "start": 450, "end": 540}},
"spleen": {"params": {"phenotype": "spleen", "start": 450, "end": 540}},
}
return problems | 5dd2e4e17640e0831daf02d0a2a9b9f90305a1c4 | 3,651,753 |
import time
import random
def ecm(n, rounds, b1, b2, wheel=2310, output=True):
"""Elliptic Curve Factorization Method. In each round, the following steps are performed:
0. Generate random point and curve.
1. Repeatedly multiply the current point by small primes raised to some power, determined
by b1.
2. Standard continuation from b1 to b2 with Brent-Suyama's Extension and Polyeval.
Returns when a non-trivial factor is found.
Args:
n (int): Number to be factorized. n >= 12.
rounds (int): Number of random curves to try.
b1 (int): Bound for primes used in step 1.
b2 (int): Bound for primes searched for in step 2. b1 < b2.
wheel (int, optional): Wheel, where only numbers coprime to wheel will be considered in
step 2. Defaults to 2310.
output (bool, optional): Whether to print progress to stdout. Defaults to True.
Raises:
ValueError: Thrown when n < 12.
Returns:
int: Non-trivial factor if found, otherwise returns None.
"""
if n < 12:
raise ValueError
j_list = [j for j in range(1, wheel // 2) if gcd(j, wheel) == 1]
block_size = 1 << (len(j_list) - 1).bit_length() - 1
for round_i in range(rounds):
if output:
st = time.time()
print("Round {}...".format(round_i))
count = 0
success = False
while not success and count < 20:
try:
count += 1
sigma = random.randint(6, n - 6)
mnt_pt, mnt_curve = mnt.get_curve_suyama(sigma, n)
success = True
except InverseNotFound as e:
res = gcd(e.x, n)
if 1 < res < n:
return res
except CurveInitFail:
pass
if not success:
print(" - Curve Init Failed.")
break
try:
# Step 1
if output:
print("{:>5.2f}: Step 1".format(time.time() - st))
for p in PRIME_GEN(b1):
for _ in range(int(np.log(b1) / np.log(p))):
mnt_pt = mnt.mul_pt_exn(mnt_pt, mnt_curve, p)
# Step 2
if output:
print("{:>5.2f}: Step 2".format(time.time() - st))
polynomial = (2, 0, 9, 0, 6, 0, 1) # f(x) = x^6 + 6x^4 + 9x^2 + 2
q, wst_curve = mnt.to_weierstrass(mnt_pt, mnt_curve)
c1 = b1 // wheel
c2 = b2 // wheel + 2
c = 0
k_ls = [
apply_polynomial(polynomial, j) for j in j_list
] + get_difference_seq(polynomial, c1 * wheel, wheel)
mul_res = wst.mul_pt_multi(q, wst_curve, k_ls)
xj_list = []
for i in range(len(j_list)):
xj_list.append(mul_res[i][0])
cq_list = mul_res[len(j_list) :]
f_tree = product_tree([Polynomial([n - xj, 1], n) for xj in xj_list], n)
f_recip_tree = recip_tree(f_tree)
H = Polynomial([1], n)
g_poly_list = []
while c < c2 - c1:
for _ in range(min(block_size, c2 - c1 - c)):
g_poly_list.append(Polynomial([n - cq_list[0][0], 1], n))
step_difference_seq_exn(cq_list, wst_curve)
c += 1
G = product_tree(g_poly_list, n)[0]
H = (H * G).mod_with_recip(f_tree[0], f_recip_tree[0])
g_poly_list.clear()
rem_tree = remainder_tree(H, f_tree, f_recip_tree, n)
res = gcd(rem_tree[0], n)
if 1 < res < n:
return res
elif res == n:
for rem in rem_tree[len(rem_tree) // 2 :]:
res = gcd(rem, n)
if 1 < res < n:
return res
assert False
if output:
print("{:>5.2f}: End".format(time.time() - st))
except InverseNotFound as e:
res = gcd(e.x, n)
if 1 < res < n:
return res
return None | 9490e6ac4308aed9835e85b3093a1c2b18877fd1 | 3,651,754 |
from typing import Optional
import re
from datetime import datetime
import logging
def dc_mode_option(update: Update, contex: CallbackContext) -> Optional[int]:
"""Get don't care response mode option"""
ndc = contex.user_data[0]
if ndc.response_mode == DoesntCare.ResponseMode.TIME:
if not re.match(r"[0-9]+:[0-9]+:[0-9]+", update.effective_message.text):
update.effective_message.reply_text(
'Invalid time format, please send in this format: Hours:Minutes:Seconds')
return None
hms = update.effective_message.text.split(':')
ndc.response_mode_option = \
datetime.timedelta(hours=int(hms[0]), minutes=int(hms[1]), seconds=int(hms[2])).total_seconds()
else:
if ((not update.effective_message.text.isdigit()) or
(not (int(update.effective_message.text) > 1))):
update.effective_message.reply_text('Invalid number. Please send a positive integer more than 1.')
return None
ndc.response_mode_option = float(update.effective_message.text)
if ndc.add():
update.effective_message.reply_text("Added user to your don't care list!")
logging.info(
"Add: DCU: \"{}\", NIU: \"{}\", Chat: \"{}\", RM: \"{}\", RMO: \"{}\""
.format(ndc.doesnt_care_id, ndc.not_important_id, ndc.chat_id, ndc.response_mode,
ndc.response_mode_option)
)
else:
update.effective_message.reply_text("Sorry, an error occurred! Please try again later.")
logging.error(
"Add, DCU: \"{}\", NIU: \"{}\", Chat: \"{}\""
.format(ndc.doesnt_care_id, ndc.not_important_id, ndc.chat_id)
)
return ConversationHandler.END | accf998e660898d9de2d17d45e18b6d49ba90f4c | 3,651,755 |
def is_in_period(datetime_, start, end):
"""指定した日時がstartからendまでの期間に含まれるか判定する"""
return start <= datetime_ < end | 3b830cb8d9e74934a09430c9cd6c0940cf36cf2e | 3,651,756 |
def create_experiment_summary():
"""Returns a summary proto buffer holding this experiment"""
# Convert TEMPERATURE_LIST to google.protobuf.ListValue
temperature_list = struct_pb2.ListValue().extend(TEMPERATURE_LIST)
return summary.experiment_pb(
hparam_infos=[
api_pb2.HParamInfo(name="initial_temperature",
display_name="initial temperature",
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list),
api_pb2.HParamInfo(name="ambient_temperature",
display_name="ambient temperature",
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list),
api_pb2.HParamInfo(name="heat_coefficient",
display_name="heat coefficient",
type=api_pb2.DATA_TYPE_FLOAT64,
domain_discrete=temperature_list)
],
metric_infos=[
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag="temparature/current/scalar_summary"),
display_name="Current Temp."),
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag="temparature/difference_to_ambient/scalar_summary"),
display_name="Difference To Ambient Temp."),
api_pb2.MetricInfo(
name=api_pb2.MetricName(
tag="delta/scalar_summary"),
display_name="Delta T")
]
) | 678a9f1b004f4c5a60784ccf814082731eace826 | 3,651,757 |
import requests
def get_session(token, custom_session=None):
"""Get requests session with authorization headers
Args:
token (str): Top secret GitHub access token
custom_session: e.g. betamax's session
Returns:
:class:`requests.sessions.Session`: Session
"""
session = custom_session or requests.Session()
session.headers = {
"Authorization": "token " + token,
"User-Agent": "testapp"
}
return session | 88bf566144a55cf36daa46d3f9a9886d3257d767 | 3,651,758 |
def mass_to_tbint_to_energy_map(dpath, filterfn=lambda x: True,
fpath_list=None):
"""Given a directory, creates a mapping
mass number -> ( a, b, c, d, j -> energy )
using the files in the directory
:param fpath_list:
:param dpath: the directory which is a direct parent to the files from
which to generate the map
:param filterfn: the filter function to apply to the files before
constructing the map
"""
mida_map = _mass_tbme_data_map(
dpath, filterfn, fpath_list)
for k in mida_map.keys():
v = mida_map[k]
nextv = dict()
for row in v:
tup = tuple(row[0:6])
energy = float(row[6])
nextv[tup] = energy
mida_map[k] = nextv
return mida_map | a13caba5ff41e2958d7f4e6104eb809de1cda1c1 | 3,651,759 |
import unicodedata
def strip_accents(text):
"""
Strip accents from input String.
:param text: The input string.
:type text: String.
:returns: The processed String.
:rtype: String.
"""
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text) | 4a6e11e0a72438a7e604e90e44a7220b1426df69 | 3,651,760 |
import json
def json_formatter(result, _verbose):
"""Format result as json."""
if isinstance(result, list) and "data" in result[0]:
res = [json.dumps(record) for record in result[0]["data"]]
output = "\n".join(res)
else:
output = json.dumps(result, indent=4, sort_keys=True)
return output | 68aae87577370d3acf584014651af21c7cbfa309 | 3,651,761 |
def show_all_companies():
"""Show all companies a user has interest in."""
# redirect if user is not logged in
if not session:
return redirect('/')
else:
# get user_id from session
user_id = session['user_id']
user = User.query.filter(User.user_id == user_id).one()
user_companies = user.companies
companies = {}
for company in user_companies:
count = Job.query.filter(Job.company_id == company.company_id).count()
companies[company] = count
return render_template('companies.html', companies=companies) | 7f2d7215627747ff44caff4f58324dce2e3aa749 | 3,651,762 |
def ll_combined_grad(x, item_ids, judge_ids, pairwise=[], individual=[]):
"""
This function computes the _negative_ gradient of the loglikelihood for
each parameter in x, for both the individual and pairwise data.
Keyword arguments:
x -- the current parameter estimates.
item_ids -- the ids of the items being evaluated
judge_ids -- the ids of the judges being evaluted
pairwise -- an iterator for the pairwise ratings
individual -- an iterator for the individual ratings
>>> ll_combined_grad([0,0,1,1,3,1], [0,1], [0], [], [])
array([-0. , -0. , -0. , -1.33333333, 2. , -0. ])
"""
item_val = {i:idx for idx, i in enumerate(item_ids)}
discrim = {i:idx + len(item_val) for idx, i in enumerate(judge_ids)}
bias = {i:idx + len(item_val) + len(judge_ids) for idx, i in enumerate(judge_ids)}
precision = {i:idx + len(item_val) + 2*len(judge_ids) for idx, i in enumerate(judge_ids)}
likert_mean = x[-1]
likert_prec = x[-2]
grad = np.zeros(len(x))
#grad = np.array([0.0 for v in x])
for r in pairwise:
left = x[item_val[r.left.id]]
right = x[item_val[r.right.id]]
d = x[discrim[r.judge.id]]
y = r.value
z = d * (left - right)
#z = (left - right)
p = invlogit(z)
g = y - p
#grad[item_val[r.left.id]] += g
#grad[item_val[r.right.id]] += -1 * g
grad[item_val[r.left.id]] += d * g
grad[item_val[r.right.id]] += -1 * d * g
grad[discrim[r.judge.id]] += (left - right) * g
for l in individual:
u = x[item_val[l.item.id]]
b = x[bias[l.judge.id]]
prec = x[precision[l.judge.id]]
#n = sqrt(1/prec)
p0 = likert_prec
s = 1 / sqrt(p0)
error = (l.value - likert_mean - s * (b + u))
grad[item_val[l.item.id]] += prec * p0 * error * s
grad[bias[l.judge.id]] += prec * p0 * error * s
grad[-1] += prec * p0 * error
grad[precision[l.judge.id]] += (1 / (2 * prec)) - (p0 / 2) * (error * error)
grad[-2] += (1 / (2 * p0)) - (prec / 2) * ((b + u) * s * error + error * error)
#error = (l.value - likert_mean - b - u)
#grad[item_val[l.item.id]] += prec * error
#grad[bias[l.judge.id]] += prec * error
#grad[-1] += prec * error # likert mean
#grad[precision[l.judge.id]] += (1 / (2 * prec)) - (error * error)/2
# Regularization
# Normal prior on means
item_reg = np.array([0.0 for v in x])
for i in item_val:
item_reg[item_val[i]] += (x[item_val[i]] - item_mean)
item_reg = -1 * item_prec * item_reg
#item_reg = (-1.0 / (item_std * item_std)) * item_reg
# Normal prior on discriminations
judge_reg = np.array([0.0 for v in x])
for i in discrim:
judge_reg[discrim[i]] += (x[discrim[i]] - discrim_mean)
judge_reg = -1 * discrim_prec * judge_reg
#judge_reg = (-1.0 / (discrim_std * discrim_std)) * judge_reg
# Normal prior on bias
bias_reg = np.array([0.0 for v in x])
for i in bias:
bias_reg[bias[i]] += (x[bias[i]] - bias_mean)
bias_reg = (-1.0 / (bias_std * bias_std)) * bias_reg
# Normal prior on noise
prec_reg = np.array([0.0 for v in x])
for i in precision:
prec_reg[precision[i]] += (x[precision[i]] - prec_mean)
prec_reg = (-1.0 / (prec_std * prec_std)) * prec_reg
return -1 * (grad + item_reg + judge_reg + bias_reg + prec_reg) | 54936fe9b0e9b7a17acb7455c606bf754532a8b8 | 3,651,763 |
def relu(inp): # ReLu function as activation function
"""
ReLu neural network activation function
:param inp: Node value before activation
:return: Node value after activation
"""
return np.max(inp, 0) | fbe6caf2246684a62d00956e38579fab3dff3418 | 3,651,764 |
from typing import List
from typing import Tuple
import logging
def augment_sentence(tokens: List[str], augmentations: List[Tuple[List[tuple], int, int]], begin_entity_token: str,
sep_token: str, relation_sep_token: str, end_entity_token: str) -> str:
"""
Augment a sentence by adding tags in the specified positions.
Args:
tokens: Tokens of the sentence to augment.
augmentations: List of tuples (tags, start, end).
begin_entity_token: Beginning token for an entity, e.g. '['
sep_token: Separator token, e.g. '|'
relation_sep_token: Separator token for relations, e.g. '='
end_entity_token: End token for an entity e.g. ']'
An example follows.
tokens:
['Tolkien', 'was', 'born', 'here']
augmentations:
[
([('person',), ('born in', 'here')], 0, 1),
([('location',)], 3, 4),
]
output augmented sentence:
[ Tolkien | person | born in = here ] was born [ here | location ]
"""
# sort entities by start position, longer entities first
augmentations = list(sorted(augmentations, key=lambda z: (z[1], -z[2])))
# check that the entities have a tree structure (if two entities overlap, then one is contained in
# the other), and build the entity tree
root = -1 # each node is represented by its position in the list of augmentations, except that the root is -1
entity_tree = {root: []} # list of children of each node
current_stack = [root] # where we are in the tree
for j, x in enumerate(augmentations):
tags, start, end = x
if any(augmentations[k][1] < start < augmentations[k][2] < end for k in current_stack):
# tree structure is not satisfied!
logging.warning(f'Tree structure is not satisfied! Dropping annotation {x}')
continue
while current_stack[-1] >= 0 and \
not (augmentations[current_stack[-1]][1] <= start <= end <= augmentations[current_stack[-1]][2]):
current_stack.pop()
# add as a child of its father
entity_tree[current_stack[-1]].append(j)
# update stack
current_stack.append(j)
# create empty list of children for this new node
entity_tree[j] = []
return ' '.join(expand_tokens(
tokens, augmentations, entity_tree, root, begin_entity_token, sep_token, relation_sep_token, end_entity_token
)) | 916745727dd6ce19e67a28bdadb2bd74b54075a3 | 3,651,765 |
import multiprocessing
def evaluate_model_recall_precision(mat, num_items, testRatings, K_recall, K_precision, num_thread):
"""
Evaluate the performance (Hit_Ratio, NDCG) of top-K recommendation
Return: score of each test rating.
"""
global _mat
global _testRatings
global _K_recall
global _K_precision
global _K_max
global _num_items
_mat = mat
_testRatings = testRatings
_K_recall = K_recall
_K_precision = K_precision
_K_max = max(_K_precision,_K_recall)
_num_items = num_items
recalls, precisions = [], []
if (num_thread > 1): # Multi-thread
pool = multiprocessing.Pool(processes=num_thread)
res = pool.map(eval_recall_precision, range(len(_testRatings)))
pool.close()
pool.join()
recalls = [r[0] for r in res]
precisions = [r[1] for r in res]
return (recalls, precisions)
# Single thread
for idx in range(len(_testRatings)):
(recall, precision) = eval_recall_precision(idx)
recalls.append(recall)
precisions.append(precision)
return (recalls, precisions) | bb504053937faf6e3017f8d79fee6a4a4e864b15 | 3,651,766 |
def pipe_hoop_stress(P, D, t):
"""Calculate the hoop (circumferential) stress in a pipe
using Barlow's formula.
Refs: https://en.wikipedia.org/wiki/Barlow%27s_formula
https://en.wikipedia.org/wiki/Cylinder_stress
:param P: the internal pressure in the pipe.
:type P: float
:param D: the outer diameter of the pipe.
:type D: float
:param t: the pipe wall thickness.
:type t: float
:returns: the hoop stress in the pipe.
:rtype: float
"""
return P * D / 2 / t | 9985d35c2c55e697ce21a880bb2234c160178f33 | 3,651,767 |
def node_constraints(node):
"""
Returns all constraints a node is linked to
:param node: str
:return: list(str)
"""
return maya.cmds.listRelatives(node, type='constraint') | 85c619f4c1b6ec24feb8c3dac3e73b92f8fdf7fc | 3,651,768 |
import os
def save_data_file(sourceFile, destination = None, subdirectory = None, user = None, verbose = True):
""" Function used to save (i.e copy) a data file into a directory of choice after an experimental session
Parameters: sourceFile - the path of the file that was generated by the experimental session and that resides
in the local file system.
destination - An optional destination path where to save the file. File name may be included
or not at the end of the path.
subdirectory - An optional subdirectory, i.e folder, to add to the destination path. For example,
if the destination path is a folder called "experiments", the subdirectory can be
a child folder of "experiments", named after the experiment type ("behaviour"
for instance).
user - An optional parameter to indicate which user is conducting the experiments.
If supplied, and if no destination is passed, a configuration file is looked
up to retrieve the folder into which the user is usually copying data files.
If no destination and no user is provided, a default directory is looked up
in the configuration file as the default destination of the file to be copied.
Either way, a save as dialog box will appear and the user will have final say.
"""
# Validate file parameter passed. Also check to see if the path provided is lacking the default .h5 extension
if not os.path.exists(sourceFile):
if not os.path.exists(sourceFile+".h5"):
# Error message if the source file path could not be found in the system
error(None,"Woah there!\n\n1. Couldn't find the file that you want to copy.\
\n2. Check to see if it exists in the file system and the path provided is correct"\
, "File Finding Police report")
return
else:
# File exists but has an extension and one was not provided in the path given.
# Add it to file path descriptor
sourceFile += ".h5"
# information(None, "the filename of source provided lacked the \".h5\" extension.\
# \n\nA file with the extension was found and presumed to be the source meant"\
# ,"Path Police report")
# Get file extension
fileExtension = os.path.splitext(sourceFile)[-1]
# Get the destination file name from the path provided
destinationFile = os.path.split(sourceFile)[-1]
destinationFolder = ""
# If file has no extension, add the default .h5 extension to destination file name
if fileExtension == "":
warning(None, "The file you are trying to save has no extension\n\nAdding \".h5\" to the name of destination file"\
, ".h5 Extension Police")
destinationFile = file + ".h5"
# The file provided has different extension. Display a warning but do nothing.
elif fileExtension != ".h5":
warning(None, "Your file to be copied does not have an \".h5\" extension\n\nNo action taken."\
, "h5 Extension Police")
# Display confirmation dialog for copying the file
dlg = ConfirmationDialog(title = "You there!", yes_label = "Yes Please!",
no_label = "Nah...", message = "Would you like to copy the data file generated after the session?\
\n\nIf you say Nah... and change your mind, you'll have to copy it manually later")
# Open the dialog GUI
dlg.open()
# User provided a destination path
if destination:
# Check to see if destination is a file name with an extension.
destinationExtension = os.path.splitext(destination)[-1]
if destinationExtension:
# Is it .h5? If not, warn but don't override.
if destinationExtension != ".h5":
warning(None, "Your destination filename does not have an \".h5\" extension\n\nNo action taken."\
, "h5 Extension Police")
destinationFolder, destinationFile = os.path.split(destination)
# Assume destination is directory since there is no extension.
else:
destinationFolder = destination
# Look up a default destination from the config file since no <destination> parameter was provided.
else:
configFile = os.environ.get("Voyeur_config")
config = ConfigObj(configFile)
# A user specific folder was provided.
if user:
destinationFolder = config['server']['folder']['data']['user']
# Use default data folder as read from the config file.
else:
destinationFolder = config['server']['folder']['data']['default']
# User provided a subdirectory, i.e subfolder, into which to place the file.
if subdirectory:
# The subdirectory provided has common path with the directory provided. Display warning but do nothing.
if os.path.commonprefix((destination,subdirectory)):
warning(None, "Friendly warning!\n<subdirectory> parameter provided has a common path with the <destination>\
path parameter\n\n1. No action taken.\n2. Check your final destination path to make sure it is what you want"\
, "Path Police report")
destinationFolder = os.path.join(destinationFolder,subdirectory)
# Path of the destination of file to be copied.
destinationPath = os.path.join(destinationFolder,destinationFile)
if dlg.return_code == YES:
# A file with same name exists.
if os.path.isfile(destinationPath):
warning(None, "A file with given path already exists!\n\n1. No action taken\
\n2. Make sure to either rename file or choose different folder", "Path Police report")
# Provided folder does not exist. Make one and inform the user.
elif not os.path.isdir(destinationFolder):
information(None, "Making a new folder to put the file into...", "Information Transparency report")
# TODO: What if this results in an exception? Catch and do something?
# TODO: Keep track of made directories so we may delete them later
os.makedirs(os.path.abspath(destinationFolder))
# The save as dialog box.
# TODO: change wildcard to current extension wildcard
dialog = FileDialog(action="save as", title = "Select directory into which the data file will be copied",\
wildcard = "*.*", default_directory = destinationFolder, default_filename = destinationFile) #*.h5|||
elif dlg.return_code == NO and verbose:
information(None, "No file was copied.\n\nIf you change your mind, you will have to transfer the data file manually."\
, "Information Transparency report")
return
dialog.open()
# User clicked Save and successful input received.
if dialog.return_code == OK:
# The actual copying of the file. TODO: See if the copy2 function throws an exception
copy2(sourceFile, dialog.path)
# The user clicked Cancel.
elif dialog.return_code == CANCEL:
information(None, "No file was copied.\n\nIf you change your mind, you will have to transfer the data file manually."\
, "Information Transparency report")
#TODO: update the Voyeur config file after asking user
return dialog.path | 1950fd776ccee91d0b2500297b2b3f94cb734415 | 3,651,769 |
import os
def parse_file_name(filename):
"""
Parse the file name of a DUD mol2 file to get the target name and the y label
:param filename: the filename string
:return: protein target name, y_label string (ligand or decoy)
"""
bname = os.path.basename(filename)
splitted_bname = bname.split('_')
if len(splitted_bname) == 3:
target_name = splitted_bname[0]
y_label_str = splitted_bname[1]
elif len(splitted_bname) == 4:
target_name = '_'.join([splitted_bname[0], splitted_bname[1]])
y_label_str = splitted_bname[2]
else:
raise ValueError('File name has not expected format. Can not parse file name.')
if y_label_str == 'decoys':
y_label = 0
elif y_label_str == 'ligands':
y_label = 1
else:
raise ValueError('File name has not expected format. Can not parse file name.')
return target_name, y_label | 8f9de132e622feffc513453be36b80f386b36c9c | 3,651,770 |
def load_opencv_stereo_calibration(path):
"""
Load stereo calibration information from xml file
@type path: str
@param path: video_path to xml file
@return stereo calibration: loaded from the given xml file
@rtype calib.data.StereoRig
"""
tree = etree.parse(path)
stereo_calib_elem = tree.find("Rig")
return rig.Rig.from_xml(stereo_calib_elem) | 07ace05e8d377ba1fdcef632e5afa1d9ea309185 | 3,651,771 |
def _IsSingleElementTuple(token):
"""Check if it's a single-element tuple."""
close = token.matching_bracket
token = token.next_token
num_commas = 0
while token != close:
if token.value == ',':
num_commas += 1
if token.OpensScope():
token = token.matching_bracket
else:
token = token.next_token
return num_commas == 1 | 8d675bcee737ddb106817db79e2b989509d2efaa | 3,651,772 |
def exportBufferView(gltf: GLTF2, primaryBufferIndex: int, byteOffset: int, byteLength: int) -> GLTFIndex:
"""Creates a glTF bufferView with the specified offset and length, referencing the default glB buffer.
Args:
gltf: Gltf object to append new buffer onto.
primaryBufferIndex: Index of the primary glb buffer.
byteOffset: Index of the starting byte in the referenced buffer.
byteLength: Length in bytes of the bufferView.
Returns: The index of the exported bufferView in the glTF bufferViews list.
"""
bufferView = BufferView()
bufferView.buffer = primaryBufferIndex # index of the default glB buffer.
bufferView.byteOffset = byteOffset
bufferView.byteLength = byteLength
return appendGetIndex(gltf.bufferViews, bufferView) | 6905f3544470860a125b0d28f5f422a39bc7b91f | 3,651,773 |
import numpy
def ReadCan(filename):
"""Reads the candump in filename and returns the 4 fields."""
trigger = []
trigger_velocity = []
trigger_torque = []
trigger_current = []
wheel = []
wheel_velocity = []
wheel_torque = []
wheel_current = []
trigger_request_time = [0.0]
trigger_request_current = [0.0]
wheel_request_time = [0.0]
wheel_request_current = [0.0]
with open(filename, 'r') as fd:
for line in fd:
data = line.split()
can_id = int(data[1], 16)
if can_id == 0:
data = [int(d, 16) for d in data[3:]]
trigger.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0)
trigger_velocity.append(
((data[2] + (data[3] << 8)) - 32768) / 32768.0)
trigger_torque.append(
((data[4] + (data[5] << 8)) - 32768) / 32768.0)
trigger_current.append(
((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0)
elif can_id == 1:
data = [int(d, 16) for d in data[3:]]
wheel.append(((data[0] + (data[1] << 8)) - 32768) / 32768.0)
wheel_velocity.append(
((data[2] + (data[3] << 8)) - 32768) / 32768.0)
wheel_torque.append(
((data[4] + (data[5] << 8)) - 32768) / 32768.0)
wheel_current.append(
((data[6] + ((data[7] & 0x3f) << 8)) - 8192) / 8192.0)
elif can_id == 2:
data = [int(d, 16) for d in data[3:]]
trigger_request_current.append(
((data[4] + (data[5] << 8)) - 32768) / 32768.0)
trigger_request_time.append(len(trigger) * 0.001)
elif can_id == 3:
data = [int(d, 16) for d in data[3:]]
wheel_request_current.append(
((data[4] + (data[5] << 8)) - 32768) / 32768.0)
wheel_request_time.append(len(wheel) * 0.001)
trigger_data_time = numpy.arange(0, len(trigger)) * 0.001
wheel_data_time = numpy.arange(0, len(wheel)) * 0.001
# Extend out the data in the interpolation table.
trigger_request_time.append(trigger_data_time[-1])
trigger_request_current.append(trigger_request_current[-1])
wheel_request_time.append(wheel_data_time[-1])
wheel_request_current.append(wheel_request_current[-1])
return (trigger_data_time, wheel_data_time, trigger, wheel,
trigger_velocity, wheel_velocity, trigger_torque, wheel_torque,
trigger_current, wheel_current, trigger_request_time,
trigger_request_current, wheel_request_time, wheel_request_current) | 773657474462aa3a129ea7459c72ea0b0dc0cefa | 3,651,774 |
def retrieve(func):
"""
Decorator for Zotero read API methods; calls _retrieve_data() and passes
the result to the correct processor, based on a lookup
"""
def wrapped_f(self, *args, **kwargs):
"""
Returns result of _retrieve_data()
func's return value is part of a URI, and it's this
which is intercepted and passed to _retrieve_data:
'/users/123/items?key=abc123'
the atom doc returned by _retrieve_data is then
passed to _etags in order to extract the etag attributes
from each entry, then to feedparser, then to the correct processor
"""
if kwargs:
self.add_parameters(**kwargs)
retrieved = self._retrieve_data(func(self, *args))
# determine content and format, based on url params
content = self.content.search(
self.request.get_full_url()) and \
self.content.search(
self.request.get_full_url()).group(0) or 'bib'
fmt = self.fmt.search(
self.request.get_full_url()) and \
self.fmt.search(
self.request.get_full_url()).group(0) or 'atom'
# step 1: process atom if it's atom-formatted
if fmt == 'atom':
parsed = feedparser.parse(retrieved)
processor = self.processors.get(content)
# step 2: if the content is JSON, extract its etags
if processor == self._json_processor:
self.etags = etags(retrieved)
# extract next, previous, first, last links
self.links = self._extract_links(parsed)
return processor(parsed)
# otherwise, just return the unparsed content as is
else:
return retrieved
return wrapped_f | 442f18f4c00a13b5eb68285202088b009f9f351b | 3,651,775 |
from typing import Dict
async def health() -> Dict[str, str]:
"""Health check function
:return: Health check dict
:rtype: Dict[str, str]
"""
health_response = schemas.Health(name=settings.PROJECT_NAME,
api_version=__version__)
return health_response.dict() | 8c2841cea1fb9118cbc063d9352d375188025614 | 3,651,776 |
def detail(video_id):
""" return value is
[
{
'video_path' : s
},
{
'person_id': n,
'person_info_list' : [
{
'frame' : n
'millisec' : n
'age' : n
'gender' : s
'img_person' : s
'top_color' : n
'bottom_color' : n
},
{
...
}
]
},
{
'person_id' : n,
...
},
...
]
"""
video = VideoList.query.get_or_404(video_id)
tableName = videoNameToTable(video.video_name)
VideoTable = getVideoTable(tableName)
returnJson = list()
returnJson.append({'video_name' : tableName + '.mp4' })
people = db.session.query(VideoTable.person_id.distinct()).all()
for person in people:
personDict = dict()
person_id = person[0]
personDict['person_id'] = person_id
personDict['person_info_list'] = list()
personInfoList = VideoTable.query.filter(VideoTable.person_id == person_id).all()
for personInfo in personInfoList:
# change 'personInfo.img_person' from abs path to relative path
index = personInfo.img_person.find('images')
img_person = personInfo.img_person[index + 7:]
personDict['person_info_list'].append(
{
'frame' : personInfo.frame,
'millisec' : personInfo.millisec,
'age' : personInfo.age,
'gender' : personInfo.gender,
'img_person' : img_person,
'top_color' : personInfo.top_color,
'bottom_color' : personInfo.bottom_color
}
)
returnJson.append(personDict)
return jsonify(returnJson), 200 | 7447f5ea45ab6fa1c6d10f97ac7d57add68fdf40 | 3,651,777 |
import os
def list_terminologies():
""" Get the list of available Amazon Translate Terminologies for this region
Returns:
This is a proxy for boto3 get_terminology and returns the output from that SDK method.
See `the boto3 documentation for details <https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/translate.html#Translate.Client.list_terminologies>`_
Raises:
See the boto3 documentation for details
500: Internal server error
"""
# This function returns a list of saved terminologies
print('list_terminologies request: '+app.current_request.raw_body.decode())
translate_client = boto3.client('translate', region_name=os.environ['AWS_REGION'])
response = translate_client.list_terminologies(MaxResults=100)
terminologies = response['TerminologyPropertiesList']
while ('NextToken' in response):
response = translate_client.list_terminologies(MaxResults=100, NextToken=response['NextToken'])
terminologies = terminologies + response['TerminologyPropertiesList']
# Convert time field to a format that is JSON serializable
for item in terminologies:
item['CreatedAt'] = item['CreatedAt'].isoformat()
item['LastUpdatedAt'] = item['LastUpdatedAt'].isoformat()
return response | 7eeaa65fa5d20d508d12d010fcf0d4410cc8b45d | 3,651,778 |
import logging
def RunLinters(prefix, name, data, settings=None):
"""Run linters starting with |prefix| against |data|."""
ret = []
if settings is None:
settings = ParseOptions([])
ret += settings.errors
linters = [x for x in FindLinters(prefix) if x not in settings.skip]
for linter in linters:
functor = globals().get(linter)
for result in functor(data):
ret.append(LintResult(linter, name, result, logging.ERROR))
return ret | 9b8c780fe3684405d17e59897bee11118dff5590 | 3,651,779 |
def element_norm_spatial_exoao(processes,
comp_sol,
test_time,
test_var_list,
exact_solution,
subel_ints = 1,
zfill=None,
exact_time=None,
block_ids=[]):
"""
This is element_norm_spatial but input solution types are limited. An
exodus.ExodusFile object is expected for the computed solution, and an
analytic solution object is expected for the exact solution.
if exact_time is not given, the exact_solution is evaluated at test_time
"""
# Accept an exodus object as the computed solution.
if not isinstance(comp_sol, exodus.ExodusFile):
# Unrecognized type
print "Computed solution is not a recognized type."
print "It should be either an exodus.ExodusFile object."
sys.exit(1)
# Get the (1-based) index of the time for the computed solution
comp_t_idx1 = find_time_index(comp_sol, test_time)
# The (0-based) index of the variable in the computed solution
comp_var_idx0 = comp_sol.findVar(exodus.EX_ELEM_BLOCK,
test_var_list[0])
# Add error checking for test_var_list?
# If no list of block ids is given, generate a list including all blocks
if block_ids == []:
for block_idx0 in range(comp_sol.getNumber(exodus.EX_ELEM_BLOCK)):
block_ids.append(comp_sol.getId(exodus.EX_ELEM_BLOCK, block_idx0) )
# Accept a solution object as the exact solution
if hasattr(exact_solution, test_var_list[1]):
exact_sol = exact_solution
# If not overridden by exact_time argument, ensure the
# analytic solution time matches the simulation data time
if exact_time == None:
exact_time = comp_sol.getTimes()[comp_t_idx1 - 1]
# Refer directly to the attribute (method) we want
func_direct = getattr(exact_sol, test_var_list[1])
# Get nodal coords here rather than over and over for each element block
# for subel_ints == 1 restructure after computing center coordinates,
# which happens in the block loop
current_coordinates = get_current_coordinates(comp_sol, comp_t_idx1)
if subel_ints > 1:
restructured_coords = restructure_coordinates(current_coordinates)
else:
# Unrecognized type
print "Exact solution is not a recognized type."
print "It should be an analytic solution object."
sys.exit(1)
# Initialize
varET = WeightedErrorTally()
######## The work proper ########
for block_id in block_ids:
element_volumes = get_element_volumes(comp_sol,
block_id,
comp_t_idx1)
comp_var = comp_sol.readVar(comp_t_idx1,
exodus.EX_ELEM_BLOCK,
block_id,
comp_var_idx0)
exact_var = array.array('d')
# exact solution will be calculated from a function
if subel_ints == 1:
# Evaluate the exact solution at the center of the element
ctr_coords = comp_sol.computeCenters(exodus.EX_ELEM_BLOCK,
block_id,
current_coordinates)
# Have to add the fill here because computeCenters knows
# the true number of dimensions
if comp_sol.getDimension()==2 and not zfill==None:
x2_fill = array.array(comp_sol.storageType())
for i in range(len(ctr_coords[0])):
x2_fill.append(zfill)
ctr_coords.append(x2_fill)
r_coords = restructure_coordinates(ctr_coords)
len_r_coords = len(r_coords)
if processes <= 2:
# No point in parallelizing for 2 processes, since only 1 child process would be created.
exact_var = map_func(func_direct, 0, len_r_coords, r_coords, exact_time)
else:
child_processes = processes - 1
exact_var = [None for i in range(len_r_coords)]
pipes = [(None, None) for i in range(child_processes)]
process_list = [None for i in range(child_processes)]
for process_number in range(child_processes):
idx_start = (process_number * len_r_coords) / child_processes
idx_end = ((process_number+1) * len_r_coords) / child_processes
pipes[process_number] = multiprocessing.Pipe(False)
p = multiprocessing.Process(target=map_func_parallel, args=(pipes[process_number][1], func_direct, idx_start, idx_end, r_coords, exact_time,))
process_list[process_number] = p
p.start()
for process_number in range(child_processes):
p = process_list[process_number]
idx_start = (process_number * len_r_coords) / child_processes
idx_end = ((process_number+1) * len_r_coords) / child_processes
conn_obj = pipes[process_number][0]
exact_var_local = conn_obj.recv()
for idx in range(idx_start, idx_end):
exact_var[idx] = exact_var_local[idx - idx_start]
conn_obj.close()
p.join()
else:
avg_evar_on_block(processes,
comp_sol,
block_id,
comp_t_idx1,
restructured_coords,
func_direct,
subel_ints,
zfill,
evar_array = exact_var)
varET.w_accumulate(exact_var, comp_var, element_volumes)
return varET | 323fe13213a5ae8ad980d760943bc5cf1fc46074 | 3,651,780 |
from typing import Iterator
def generate_close_coordinates(
draw: st.DrawFn, prev_coord: Coordinates[str, np.float64]
) -> Coordinates[str, np.float64]:
"""Create coordinates using Hypothesis."""
diff = [
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
draw(hynp.from_dtype(np.dtype(np.float64), min_value=0.1, max_value=1.0)),
]
coord = vectorize(prev_coord) + diff
formatted: Iterator[np.float64] = (np.float64(i) for i in coord)
return dict(zip(SIXAXES, formatted)) | 8b207d5989f59a30e0c99eebd4654b609a03be93 | 3,651,781 |
from typing import Union
def redistribute_vertices(
geom: Union[LineString, MultiLineString],
distance: float
) -> Union[LineString, MultiLineString]:
"""Redistribute the vertices of input line strings
Parameters
----------
geom : LineString or MultiLineString
Input line strings whose vertices is to be redistributed.
distance : float
The distance to be used for redistribution.
Returns
-------
LineString or MultiLineString
The resulting line strings with redistributed vertices.
Raises
------
ValueError
If input geometry is not LineString or MultiLineString.
"""
if geom.geom_type == 'LineString': # pylint: disable=R1705
num_vert = int(round(geom.length / distance))
if num_vert == 0:
num_vert = 1
return LineString(
[geom.interpolate(float(n) / num_vert, normalized=True)
for n in range(num_vert + 1)])
elif geom.geom_type == 'MultiLineString':
parts = [redistribute_vertices(part, distance)
for part in geom]
return type(geom)([p for p in parts if not p.is_empty])
raise ValueError(f'unhandled geometry {geom.geom_type}') | 1a5f0c3f409d5f3de46831bfa8456a734985d2b8 | 3,651,782 |
def get_boolean_value(value):
"""Get the boolean value of the ParameterValue."""
if value.type == ParameterType.PARAMETER_BOOL:
return value.bool_value
else:
raise ValueError('Expected boolean value.') | fc5452a45983d16f30433ffe54b8883c24c1eb94 | 3,651,783 |
import torch
def eval_bayesian_optimization(net: torch.nn.Module, input_picture: DATA,\
label_picture: DATA, ) -> float:
""" Compute classification accuracy on provided dataset to find the optimzed hyperparamter
settings.
Args:
net: trained neural network
Input: The image
Label: Th label to the respective image
Returns:
float: classification accuracy """
# Define the data
x_valid = input_picture
y_valid = label_picture
# Pre-locating memory
correct = 0
# Get the number of samples and batches before testing the network
num_samples = x_valid.shape[0]
num_batches = int(np.ceil(num_samples / float(BATCH_SIZE)))
net.eval()
with torch.no_grad():
for i in range(num_batches):
idx = range(i*BATCH_SIZE, np.minimum((i+1) * BATCH_SIZE, num_samples))
x_batch_val = get_variable(Variable(torch.from_numpy(x_valid[idx])))
y_batch_val = get_variable(Variable(torch.from_numpy(y_valid[idx]).long()))
output, _ = net(x_batch_val)
_, predicted = torch.max(output.data, 1)
correct += (predicted == y_batch_val).float().mean()
# Calculating the accuracy
return float(correct/num_batches) | 4833627f5239f7c713f11a1ab9f97e6898a303b1 | 3,651,784 |
import urllib
def parse(url):
"""
URL-parsing function that checks that
- port is an integer 0-65535
- host is a valid IDNA-encoded hostname with no null-bytes
- path is valid ASCII
Args:
A URL (as bytes or as unicode)
Returns:
A (scheme, host, port, path) tuple
Raises:
ValueError, if the URL is not properly formatted.
"""
parsed = urllib.parse.urlparse(url)
if not parsed.hostname:
raise ValueError("No hostname given")
if isinstance(url, bytes):
host = parsed.hostname
# this should not raise a ValueError,
# but we try to be very forgiving here and accept just everything.
# decode_parse_result(parsed, "ascii")
else:
host = parsed.hostname.encode("idna")
parsed = encode_parse_result(parsed, "ascii")
port = parsed.port
if not port:
port = 443 if parsed.scheme == b"https" else 80
full_path = urllib.parse.urlunparse(
(b"", b"", parsed.path, parsed.params, parsed.query, parsed.fragment)
)
if not full_path.startswith(b"/"):
full_path = b"/" + full_path
if not check.is_valid_host(host):
raise ValueError("Invalid Host")
if not check.is_valid_port(port):
raise ValueError("Invalid Port")
return parsed.scheme, host, port, full_path | d1af42d9ee5b9c786cae9a6a16da89a545d27e33 | 3,651,785 |
def is_amicable(num: int) -> bool:
""" Returns whether the number is part of an amicable number pair """
friend = sum(divisors(num)) - num
# Only those in pairs are amicable numbers. If the sum is the number itself, it's a perfect number
return friend != num and sum(divisors(friend)) - friend == num | e5fc62d4f390a95f6d54d57979c4e39b9d4e4316 | 3,651,786 |
import html
def no_data_info():
"""Returns information about not having enough information yet to display"""
return html.Div(children=[dcc.Markdown('''
# Please wait a little bit...
The MongoDB database was probably just initialized and is currently empty. You will need to wait a bit (~30 min) for it to populate with initial data before using the application.
''', className='eleven columns', style={'paddingLeft': '5%'})], className="row") | 59ce4a2a0e2b18298006746be31f30b8c2cb4a6a | 3,651,787 |
def delta_t(soil_type):
""" Displacement at Tu
"""
delta_ts = {
"dense sand": 0.003,
"loose sand": 0.005,
"stiff clay": 0.008,
"soft clay": 0.01,
}
return delta_ts.get(soil_type, ValueError("Unknown soil type.")) | c542adb7c302bc1f50eb4c49bf9da70932758814 | 3,651,788 |
def extractPlate(imgOriginal, listOfMatchingChars, PlateWidthPaddingFactor, PlateHeightPaddingFactor):
""" Extract license-plate in the provided image, based on given contours group that corresponds for matching characters """
# Sort characters from left to right based on x position:
listOfMatchingChars.sort(key=lambda matchingChar_: matchingChar_.intCenterX)
# Calculate the plate centroid (average of leftmost and righhtmost characters):
fltPlateCenterX = (listOfMatchingChars[0].intCenterX + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterX) / 2.0
fltPlateCenterY = (listOfMatchingChars[0].intCenterY + listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY) / 2.0
ptPlateCenter = fltPlateCenterX, fltPlateCenterY
# Calculate plate width (rightmost - leftmost characters):
intPlateWidth = int(PlateWidthPaddingFactor * (listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectX +
listOfMatchingChars[len(listOfMatchingChars) - 1].intBoundingRectWidth -
listOfMatchingChars[0].intBoundingRectX))
# Calculate plate height (average over all characters):
intTotalOfCharHeights = 0
for matchingChar in listOfMatchingChars:
intTotalOfCharHeights = intTotalOfCharHeights + matchingChar.intBoundingRectHeight
fltAverageCharHeight = intTotalOfCharHeights / len(listOfMatchingChars)
intPlateHeight = int(fltAverageCharHeight * PlateHeightPaddingFactor)
# Calculate correction angle of plate region (simple geometry calculation):
fltOpposite = listOfMatchingChars[len(listOfMatchingChars) - 1].intCenterY - listOfMatchingChars[0].intCenterY
fltHypotenuse = (listOfMatchingChars[0] - listOfMatchingChars[len(listOfMatchingChars) - 1])
fltCorrectionAngleInRad = asin(fltOpposite / fltHypotenuse)
fltCorrectionAngleInDeg = fltCorrectionAngleInRad * (180.0 / pi)
# Rotate the entire image (affine warp), for compensating the angle of the plate region:
rotationMatrix = getRotationMatrix2D(tuple(ptPlateCenter), fltCorrectionAngleInDeg, 1.0)
height, width, _ = imgOriginal.shape
imgRotated = warpAffine(imgOriginal, rotationMatrix, (width, height))
# Crop the plate from the image:
imgCropped = getRectSubPix(imgRotated, (intPlateWidth, intPlateHeight), tuple(ptPlateCenter))
# Create and return possiblePlate object, which packs most the above information:
possiblePlate = PossiblePlate()
possiblePlate.rrLocationOfPlateInScene = (tuple(ptPlateCenter), (intPlateWidth, intPlateHeight), fltCorrectionAngleInDeg)
possiblePlate.imgPlate = imgCropped
return possiblePlate | f6d726727762b752003ae16c3cf9d286a0ebe990 | 3,651,789 |
def create_stratified_name(stem, stratification_name, stratum_name):
"""
generate a standardised stratified compartment name
:param stem: str
the previous stem to the compartment or parameter name that needs to be extended
:param stratification_name: str
the "stratification" or rationale for implementing the current stratification process
:param stratum_name: str
name of the stratum within the stratification
:return: str
the composite name with the standardised stratification name added on to the old stem
"""
return stem + create_stratum_name(stratification_name, stratum_name) | 2677dec386dfd235e7fb5d088c5481987acf4beb | 3,651,790 |
import inspect
import typing
def bind_args_kwargs(sig: inspect.Signature, *args: typing.Any, **kwargs: typing.Any) -> typing.List[BoundParameter]:
"""Bind *args and **kwargs to signature and get Bound Parameters.
:param sig: source signature
:type sig: inspect.Signature
:param args: not keyworded arguments
:type args: typing.Any
:param kwargs: keyworded arguments
:type kwargs: typing.Any
:return: Iterator for bound parameters with all information about it
:rtype: typing.List[BoundParameter]
.. versionadded:: 3.3.0
.. versionchanged:: 5.3.1 return list
"""
result: typing.List[BoundParameter] = []
bound: typing.MutableMapping[str, inspect.Parameter] = sig.bind(*args, **kwargs).arguments
for param in sig.parameters.values():
result.append(BoundParameter(parameter=param, value=bound.get(param.name, param.default)))
return result | 3fc8b16449981e920998ff84839a71cbbfc26d28 | 3,651,791 |
def user(user_type):
"""
:return: instance of a User
"""
return user_type() | a8c8cd4ef57915c555864f6fc09dce63c2a1c6fb | 3,651,792 |
def true_or_false(item):
"""This function is used to assist in getting appropriate
values set with the PythonOption directive
"""
try:
item = item.lower()
except:
pass
if item in ['yes','true', '1', 1, True]:
return True
elif item in ['no', 'false', '0', 0, None, False]:
return False
else:
raise Exception | 3e7c0cee07f6796c6134b182572a7d5ff95cf42d | 3,651,793 |
import copy
def validate_task(task, variables, config=None):
""" Validate that a simulation can be executed with OpenCOR
Args:
task (:obj:`Task`): request simulation task
variables (:obj:`list` of :obj:`Variable`): variables that should be recorded
config (:obj:`Config`, optional): BioSimulators common configuration
Returns:
:obj:`tuple:`:
* :obj:`Task`: possibly alternate task that OpenCOR should execute
* :obj:`lxml.etree._ElementTree`: element tree for model
* :obj:`dict`: dictionary that maps the id of each SED variable to the name that OpenCOR uses to reference it
"""
config = config or get_config()
model = task.model
sim = task.simulation
if config.VALIDATE_SEDML:
raise_errors_warnings(validation.validate_task(task),
error_summary='Task `{}` is invalid.'.format(task.id))
raise_errors_warnings(validation.validate_model_language(model.language, ModelLanguage.CellML),
error_summary='Language for model `{}` is not supported.'.format(model.id))
raise_errors_warnings(validation.validate_model_change_types(model.changes, (ModelAttributeChange,)),
error_summary='Changes for model `{}` are not supported.'.format(model.id))
raise_errors_warnings(*validation.validate_model_changes(model),
error_summary='Changes for model `{}` are invalid.'.format(model.id))
raise_errors_warnings(validation.validate_simulation_type(sim, (UniformTimeCourseSimulation, )),
error_summary='{} `{}` is not supported.'.format(sim.__class__.__name__, sim.id))
raise_errors_warnings(*validation.validate_simulation(sim),
error_summary='Simulation `{}` is invalid.'.format(sim.id))
raise_errors_warnings(*validation.validate_data_generator_variables(variables),
error_summary='Data generator variables for task `{}` are invalid.'.format(task.id))
# read model; TODO: support imports
model_etree = lxml.etree.parse(model.source)
# validate variables
opencor_variable_names = validate_variable_xpaths(variables, model_etree)
# validate simulation
opencor_simulation = validate_simulation(task.simulation)
# check that OpenCOR can execute the request algorithm (or a similar one)
opencor_algorithm = get_opencor_algorithm(task.simulation.algorithm, config=config)
# create new task to manage configuration for OpenCOR
opencor_task = copy.deepcopy(task)
opencor_task.simulation = opencor_simulation
opencor_task.simulation.algorithm = opencor_algorithm
return opencor_task, model_etree, opencor_variable_names | d1b65ead34f3fa1c83bc451ac297411d02c33978 | 3,651,794 |
import time
def time_ms():
"""currently pypy only has Python 3.5.3, so we are missing Python 3.7's time.time_ns() with better precision
see https://www.python.org/dev/peps/pep-0564/
the function here is a convenience; you shall use `time.time_ns() // 1e6` if using >=Python 3.7
"""
return int(time.time() * 1e3) | 1bff241db79007314d7a876ddd007af137ba7306 | 3,651,795 |
def _calculate_mk(tp, fp, tn, fn):
"""Calculate mk."""
ppv = np.where((tp + fp) > 0, tp / (tp + fp), np.array(float("nan")))
npv = np.where((tn + fn) > 0, tn / (tn + fn), np.array(float("nan")))
npv = tn / (tn + fn)
numerator = ppv + npv - 1.0
denominator = 1.0
return numerator, denominator | d777db3abd9296b2a67e038396d29e8ef8529a74 | 3,651,796 |
def geometric_progression_for_stepsize(
x, update, dist, decision_function, current_iteration
):
"""Geometric progression to search for stepsize.
Keep decreasing stepsize by half until reaching
the desired side of the boundary.
"""
epsilon = dist / np.sqrt(current_iteration)
while True:
updated = x + epsilon * update
success = decision_function(updated)[0]
if success:
break
else:
epsilon = epsilon / 2.0
return epsilon | d5a043f434efa68e827ff89f6f469eab37a79383 | 3,651,797 |
def absorption_two_linear_known(freq_list, interaction_strength, decay_rate):
""" The absorption is half the imaginary part of the susecptibility. """
return susceptibility_two_linear_known(freq_list, interaction_strength,
decay_rate).imag/2.0 | 9d4819715150ce63753f4e356c406685852fc761 | 3,651,798 |
def mad(x, mask, base_size=(11, 3), mad_size=(21, 21), debug=False, sigma=True):
"""Calculate the MAD of freq-time data.
Parameters
----------
x : np.ndarray
Data to filter.
mask : np.ndarray
Initial mask.
base_size : tuple
Size of the window to use in (freq, time) when
estimating the baseline.
mad_size : tuple
Size of the window to use in (freq, time) when
estimating the MAD.
sigma : bool, optional
Rescale the output into units of Gaussian sigmas.
Returns
-------
mad : np.ndarray
Size of deviation at each point in MAD units.
"""
xs = medfilt(x, mask, size=base_size)
dev = np.abs(x - xs)
mad = medfilt(dev, mask, size=mad_size)
if sigma:
mad *= 1.4826 # apply the conversion from MAD->sigma
if debug:
return dev / mad, dev, mad
return dev / mad | 7c62ed0af54bcab2e32a12d98580c989cdfd42ef | 3,651,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.