content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import List
from typing import Set
def knapsack_with_budget(vals: List[float], weights: List[int], budget: int,
cap: int) -> Set[int]:
"""
Solves the knapsack problem (with budget) of the items with the given values
and weights, with the given budget and capacity, in an bottom-up way.
:param vals: list[float]
:param weights: list[int]
:param budget: int
:param cap: int
:return: set{int}
"""
# Check whether the input arrays are None or empty
if not vals:
return set()
# Check whether the input budget is non-negative
if budget < 0:
return set()
# Check whether the input capacity is non-negative
if cap < 0:
raise set()
n = len(vals)
# Initialization
subproblems = [
[[0.0] * (cap + 1) for _ in range(budget + 1)]
for _ in range(n)
]
for b in range(budget + 1):
for x in range(cap + 1):
if b >= 1 and weights[0] <= x:
subproblems[0][b][x] = vals[0]
# Bottom-up calculation
for item in range(1, n):
for b in range(budget + 1):
for x in range(cap + 1):
if b <= 0 or weights[item] > x:
subproblems[item][b][x] = subproblems[item - 1][b][x]
else:
result_without_curr = subproblems[item - 1][b][x]
result_with_curr = \
subproblems[item - 1][b - 1][x - weights[item]] + \
vals[item]
subproblems[item][b][x] = max(result_without_curr,
result_with_curr)
return _reconstruct(vals, weights, budget, cap, subproblems)
# Overall running time complexity: O(n*k*W), where k is the budget and W is
# the knapsack capacity | 3d91f18f8be7b82f17ebcda9dbfa419eadeec0ea | 3,655,800 |
import functools
def _basemap_redirect(func):
"""
Docorator that calls the basemap version of the function of the
same name. This must be applied as the innermost decorator.
"""
name = func.__name__
@functools.wraps(func)
def wrapper(self, *args, **kwargs):
if getattr(self, 'name', '') == 'basemap':
return getattr(self.projection, name)(*args, ax=self, **kwargs)
else:
return func(self, *args, **kwargs)
wrapper.__doc__ = None
return wrapper | f3cee9113a6f8044255d3013e357742e231ea98e | 3,655,801 |
def embedding_lookup(input_ids,
vocab_size,
embedding_size=128,
initializer_range=0.02,
word_embedding_name="word_embeddings"):
"""Looks up words embeddings for id tensor.
Args:
input_ids: int32 Tensor of shape [batch_size, seq_length] containing word
ids.
vocab_size: int. Size of the embedding vocabulary.
embedding_size: int. Width of the word embeddings.
initializer_range: float. Embedding initialization range.
word_embedding_name: string. Name of the embedding table.
Returns:
float Tensor of shape [batch_size, seq_length, embedding_size].
"""
# This function assumes that the input is of shape [batch_size, seq_length,
# num_inputs].
#
# If the input is a 2D tensor of shape [batch_size, seq_length], we
# reshape to [batch_size, seq_length, 1].
if input_ids.shape.ndims == 2:
input_ids = tf.expand_dims(input_ids, axis=[-1])
embedding_table = tf.get_variable(
name=word_embedding_name,
shape=[vocab_size, embedding_size],
initializer=create_initializer(initializer_range))
output = tf.nn.embedding_lookup(embedding_table, input_ids)
input_shape = get_shape_list(input_ids)
output = tf.reshape(output,
input_shape[0:-1] + [input_shape[-1] * embedding_size])
return output, embedding_table | 2f66d05ab70f4fb38d990e66ec5829cb62fdc934 | 3,655,802 |
import re
def find_version(infile):
"""
Given an open file (or some other iterator of lines) holding a
configure.ac file, find the current version line.
"""
for line in infile:
m = re.search(r'AC_INIT\(\[tor\],\s*\[([^\]]*)\]\)', line)
if m:
return m.group(1)
return None | 35ac18757ee1156f046bbd9ffa68ed4898bc317a | 3,655,803 |
import math
def linear_warmup_decay(warmup_steps, total_steps, cosine=True, linear=False):
"""
Linear warmup for warmup_steps, optionally with cosine annealing or
linear decay to 0 at total_steps
"""
# check if both decays are not True at the same time
assert not (linear and cosine)
def fn(step):
if step < warmup_steps:
return float(step) / float(max(1, warmup_steps))
if not (cosine or linear):
# no decay
return 1.0
progress = float(step - warmup_steps) / float(
max(1, total_steps - warmup_steps)
)
if cosine:
# cosine decay
return 0.5 * (1.0 + math.cos(math.pi * progress))
# linear decay
return 1.0 - progress
return fn | 9326622a07be677cb82744a30850674ca3c5f789 | 3,655,804 |
def query_anumbers(bbox,bbox2,bounds2):
"""
Queries anumbers of the reports within region defined
Args:
`bbox`= bounds of the region defined
Returns:
`anumberscode`=list of anumbers
"""
try:
collars_file='http://geo.loop-gis.org/geoserver/loop/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=loop:collar_4326&bbox='+bbox2+'&srs=EPSG:4326'
collars = gpd.read_file(collars_file, bbox=bbox)
print("Connected to Loop Server")
anumbers=gpd.GeoDataFrame(collars, columns=["anumber"])
anumbers = pd.DataFrame(anumbers.drop_duplicates(subset=["anumber"]))
except HTTPError as err:
if err.code == 404 or err.code == 500 or err.code == 503:
query="""SELECT DISTINCT (collar.anumber)
FROM public.collar
WHERE(longitude BETWEEN %s AND %s) AND
(latitude BETWEEN %s AND %s)
ORDER BY collar.anumber ASC"""
conn = psycopg2.connect(host="130.95.198.59", port = 5432,
database="gswa_dh", user="postgres", password="loopie123pgpw")
cur = conn.cursor()
cur.execute(query, bounds2)
anumbers=pd.DataFrame(cur, columns=["anumber"])
print("Connected to PostgreSQL Server")
else:
raise
#collars_file='http://geo.loop-gis.org/geoserver/loop/wfs?service=WFS&version=1.0.0&request=GetFeature&typeName=loop:collar_4326&bbox='+bbox2+'&srs=EPSG:4326'
#collars = gpd.read_file(collars_file, bbox=bbox)
#anumbers=gpd.GeoDataFrame(collars, columns=["anumber"])
#anumbers = pd.DataFrame(anumbers.drop_duplicates(subset=["anumber"]))
#print(anumbers)
anumbers['anumberlength']=anumbers['anumber'].astype(str).map(len)
anumberscode=[]
for index, row in anumbers.iterrows():
if (int(row[1])==5):
text=str("a0"+ str(row[0]))
text2=str("a"+ str(row[0]))
elif (int(row[1])==4):
text=str("a00"+ str(row[0]))
text2=str("a"+ str(row[0]))
elif (int(row[1])==3):
text=str("a000"+ str(row[0]))
text2=str("a"+ str(row[0]))
elif (int(row[1])==2):
text=str("a0000"+ str(row[0]))
text2=str("a"+ str(row[0]))
elif (int(row[1])==1):
text=str("a00000"+ str(row[0]))
text2=str("a"+ str(row[0]))
else:
text= str("a"+ str(row[0]))
anumberscode.append(text)
anumberscode.append(text2)
print("Report Numbers:", anumberscode)
return anumberscode | 91a31ba05df1a88f1c665f7d4dbb1c2d26bb2cc9 | 3,655,805 |
def Parse(spec_name, arg_r):
# type: (str, args.Reader) -> args._Attributes
"""Parse argv using a given FlagSpec."""
spec = FLAG_SPEC[spec_name]
return args.Parse(spec, arg_r) | 9dc2de95e8f9001eff82f16de6e14f51f768306f | 3,655,806 |
def get_path_url(path: PathOrString) -> str:
"""Covert local path to URL
Arguments:
path {str} -- path to file
Returns:
str -- URL to file
"""
path_obj, path_str = get_path_forms(path)
if is_supported_scheme(path_str):
return build_request(path_str)
return path_obj.absolute().as_uri() | 812471da77d59cc0f331b5a031282abb5847f054 | 3,655,807 |
def process_keyqueue(codes, more_available):
"""
codes -- list of key codes
more_available -- if True then raise MoreInputRequired when in the
middle of a character sequence (escape/utf8/wide) and caller
will attempt to send more key codes on the next call.
returns (list of input, list of remaining key codes).
"""
code = codes[0]
if code >= 32 and code <= 126:
key = chr(code)
return [key], codes[1:]
if code in _keyconv:
return [_keyconv[code]], codes[1:]
if code >0 and code <27:
return ["ctrl %s" % chr(ord('a')+code-1)], codes[1:]
if code >27 and code <32:
return ["ctrl %s" % chr(ord('A')+code-1)], codes[1:]
em = str_util.get_byte_encoding()
if (em == 'wide' and code < 256 and
within_double_byte(chr(code),0,0)):
if not codes[1:]:
if more_available:
raise MoreInputRequired()
if codes[1:] and codes[1] < 256:
db = chr(code)+chr(codes[1])
if within_double_byte(db, 0, 1):
return [db], codes[2:]
if em == 'utf8' and code>127 and code<256:
if code & 0xe0 == 0xc0: # 2-byte form
need_more = 1
elif code & 0xf0 == 0xe0: # 3-byte form
need_more = 2
elif code & 0xf8 == 0xf0: # 4-byte form
need_more = 3
else:
return ["<%d>"%code], codes[1:]
for i in range(need_more):
if len(codes)-1 <= i:
if more_available:
raise MoreInputRequired()
else:
return ["<%d>"%code], codes[1:]
k = codes[i+1]
if k>256 or k&0xc0 != 0x80:
return ["<%d>"%code], codes[1:]
s = bytes3(codes[:need_more+1])
assert isinstance(s, bytes)
try:
return [s.decode("utf-8")], codes[need_more+1:]
except UnicodeDecodeError:
return ["<%d>"%code], codes[1:]
if code >127 and code <256:
key = chr(code)
return [key], codes[1:]
if code != 27:
return ["<%d>"%code], codes[1:]
result = input_trie.get(codes[1:], more_available)
if result is not None:
result, remaining_codes = result
return [result], remaining_codes
if codes[1:]:
# Meta keys -- ESC+Key form
run, remaining_codes = process_keyqueue(codes[1:],
more_available)
if urwid.util.is_mouse_event(run[0]):
return ['esc'] + run, remaining_codes
if run[0] == "esc" or run[0].find("meta ") >= 0:
return ['esc']+run, remaining_codes
return ['meta '+run[0]]+run[1:], remaining_codes
return ['esc'], codes[1:] | 8a49f55ca760853176c319487936c8e93911535e | 3,655,808 |
from typing import Dict
from typing import List
from typing import Tuple
def allowed_transitions(constraint_type: str, labels: Dict[int, str]) -> List[Tuple[int, int]]:
"""
Given labels and a constraint type, returns the allowed transitions. It will
additionally include transitions for the start and end states, which are used
by the conditional random field.
# Parameters
constraint_type : `str`, required
Indicates which constraint to apply. Current choices are
"BIO", "IOB1", "BIOUL", and "BMES".
labels : `Dict[int, str]`, required
A mapping {label_id -> label}.
# Returns
`List[Tuple[int, int]]`
The allowed transitions (from_label_id, to_label_id).
"""
num_labels = len(labels)
start_tag = num_labels
end_tag = num_labels + 1
labels_with_boundaries = list(labels.items()) + [(start_tag, "START"), (end_tag, "END")]
allowed = []
for from_label_index, from_label in labels_with_boundaries:
if from_label in ("START", "END"):
from_tag = from_label
from_entity = ""
else:
from_tag = from_label[0]
from_entity = from_label[1:]
for to_label_index, to_label in labels_with_boundaries:
if to_label in ("START", "END"):
to_tag = to_label
to_entity = ""
else:
to_tag = to_label[0]
to_entity = to_label[1:]
if is_transition_allowed(constraint_type, from_tag, from_entity, to_tag, to_entity):
allowed.append((from_label_index, to_label_index))
return allowed | 173dd26c17156ecd73ba1181022183b68f158331 | 3,655,809 |
import argparse
def get_args(**kwargs):
"""
"""
cfg = deepcopy(kwargs)
parser = argparse.ArgumentParser(
description="Train the Model on CINC2019",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument(
# "-l", "--learning-rate",
# metavar="LR", type=float, nargs="?", default=0.001,
# help="Learning rate",
# dest="learning_rate")
parser.add_argument(
"-b", "--batch-size",
type=int, default=128,
help="the batch size for training",
dest="batch_size")
parser.add_argument(
"-m", "--model-name",
type=str, default="crnn",
help="name of the model to train, `cnn` or `crnn`",
dest="model_name")
parser.add_argument(
"-c", "--cnn-name",
type=str, default="multi_scopic",
help="choice of cnn feature extractor",
dest="cnn_name")
parser.add_argument(
"-r", "--rnn-name",
type=str, default="lstm",
help="choice of rnn structures",
dest="rnn_name")
parser.add_argument(
"-a", "--attn-name",
type=str, default="se",
help="choice of attention block",
dest="attn_name")
parser.add_argument(
"--keep-checkpoint-max", type=int, default=50,
help="maximum number of checkpoints to keep. If set 0, all checkpoints will be kept",
dest="keep_checkpoint_max")
parser.add_argument(
"--optimizer", type=str, default="adam",
help="training optimizer",
dest="train_optimizer")
parser.add_argument(
"--debug", type=str2bool, default=False,
help="train with more debugging information",
dest="debug")
args = vars(parser.parse_args())
cfg.update(args)
return CFG(cfg) | 07411949f0219d4c20351c8cefbdc161cafc6ed4 | 3,655,810 |
import time
from datetime import datetime
import os
def initialize(cfg, args):
""" purpose: load information and add to config """
if cfg.sessionId in (None, '') or cfg.useSessionTimestamp is True:
cfg.useSessionTimestamp = True
cfg.sessionId = utils.dateStr30(time.localtime())
else:
cfg.useSessionTimestamp = False
# MERGE WITH PARAMS
if args.runs != '' and args.scans != '':
# use the run and scan numbers passed in as parameters
cfg.runNum = [int(x) for x in args.runs.split(',')]
cfg.scanNum = [int(x) for x in args.scans.split(',')]
else: # when you're not specifying on the command line it's already in a list
cfg.runNum = [int(x) for x in cfg.runNum]
cfg.scanNum = [int(x) for x in cfg.scanNum]
# GET DICOM DIRECTORY
if cfg.buildImgPath:
imgDirDate = datetime.now()
dateStr = cfg.date.lower()
if dateStr != 'now' and dateStr != 'today':
try:
imgDirDate = parser.parse(cfg.date)
except ValueError as err:
raise RequestError('Unable to parse date string {} {}'.format(cfg.date, err))
datestr = imgDirDate.strftime("%Y%m%d")
imgDirName = "{}.{}.{}".format(datestr, cfg.subjectName, cfg.subjectName)
cfg.dicomDir = os.path.join(cfg.local.dicomDir,imgDirName)
else:
cfg.dicomDir = cfg.local.dicomDir # then the whole path was supplied
########
cfg.bids_id = 'sub-{0:03d}'.format(cfg.subjectNum)
cfg.ses_id = 'ses-{0:02d}'.format(cfg.subjectDay)
# specify local directories
cfg.local.codeDir = os.path.join(cfg.local.rtcloudDir, 'projects', cfg.projectName)
cfg.local.dataDir = os.path.join(cfg.local.codeDir, 'data')
cfg.local.subject_full_day_path = os.path.join(cfg.local.dataDir, cfg.bids_id, cfg.ses_id)
cfg.local.subject_reg_dir = os.path.join(cfg.local.subject_full_day_path, 'registration_outputs')
cfg.local.wf_dir = os.path.join(cfg.local.dataDir, cfg.bids_id, 'ses-01', 'registration')
cfg.local.maskDir = os.path.join(cfg.local.codeDir, 'ROI')
cfg.subject_reg_dir = cfg.local.subject_reg_dir
cfg.wf_dir = cfg.local.wf_dir
cfg.n_masks = len(cfg.MASK)
if args.filesremote: # here we will need to specify separate paths for processing
cfg.server.codeDir = os.path.join(cfg.server.rtcloudDir, 'projects', cfg.projectName)
cfg.server.dataDir = os.path.join(cfg.server.codeDir, cfg.server.serverDataDir)
cfg.server.subject_full_day_path = os.path.join(cfg.server.dataDir, cfg.bids_id, cfg.ses_id)
cfg.server.subject_reg_dir = os.path.join(cfg.server.subject_full_day_path, 'registration_outputs')
cfg.server.wf_dir = os.path.join(cfg.server.dataDir, cfg.bids_id, 'ses-01', 'registration')
cfg.server.maskDir = os.path.join(cfg.server.codeDir, 'ROI')
cfg.subject_reg_dir = cfg.server.subject_reg_dir
cfg.wf_dir = cfg.server.wf_dir
cfg.ref_BOLD = os.path.join(cfg.wf_dir,'ref_image.nii.gz')
cfg.MNI_ref_filename = os.path.join(cfg.wf_dir, cfg.MNI_ref_BOLD)
cfg.T1_to_BOLD = os.path.join(cfg.wf_dir, 'affine.txt')
cfg.MNI_to_T1 = os.path.join(cfg.wf_dir, 'ants_t1_to_mniInverseComposite.h5')
cfg.MASK_transformed = [''] * cfg.n_masks
cfg.local_MASK_transformed = [''] * cfg.n_masks
for m in np.arange(cfg.n_masks):
mask_name = cfg.MASK[m].split('.')[0] + '_space-native.nii.gz'
cfg.MASK_transformed[m] = os.path.join(cfg.subject_reg_dir, mask_name)
cfg.local_MASK_transformed[m] = os.path.join(cfg.local.subject_reg_dir, mask_name)
# get conversion to flip dicom to nifti files
cfg.axesTransform = getTransform(('L', 'A', 'S'),('P', 'L', 'S'))
return cfg | dac199c20c02a5467ba240d592085a7ac8df40c0 | 3,655,811 |
def simple_linear(parent = None, element_count=16, element_pitch=7e-3):
"""1D line of elements, starting at xyz=0, along y, with given element_pitch
Parameters
----------
parent : handybeam.world.World
the world to give to this array as parent
element_count : int
count of elements.
element_pitch : float
distance between elements
"""
this = TxArray(parent)
this.name = 'a line of elements, starting at xyz=0, along y, spaced by {:0.1f}mm'.format(element_pitch*1e3)
this.tx_array_element_descriptor = np.zeros((element_count, 16), dtype=np.float32)
half_length = (element_count*element_pitch)/2
for array_element_iy in range(element_count):
# add an element at that indexed location
element_idx = array_element_iy
loc_x = 0
loc_y = (array_element_iy-(element_pitch/2)+0.5) * element_pitch - half_length
this.tx_array_element_descriptor[element_idx, :] = \
this.generate_tx_array_element(x=loc_x, y=loc_y, amplitude_ratio_setting=1.0)
return this | 7cb7a2f5de6ea4ecbe0a67ca8f383bae2bd0f5b0 | 3,655,812 |
def for_all_methods(decorator, exclude_methods=None):
"""
Class decorator
"""
if exclude_methods is None:
exclude_methods = []
def decorate(cls):
for attr in cls.__dict__:
if (
callable(getattr(cls, attr))
and attr not in DO_NOT_DECORATE_METHODS
and attr not in exclude_methods
):
setattr(cls, attr, decorator(getattr(cls, attr)))
return cls
return decorate | 6a24961ebd512a20f3b0cad9c3657fa6ff5997ea | 3,655,813 |
import os
def download_if_not_there(file, url, path, force=False, local_file=None):
"""Downloads a file from the given url if and only if the file doesn't
already exist in the provided path or ``force=True``
Args:
file (str): File name
url (str): Url where the file can be found (without the filename)
path (str): Path to the local folder where the file should be stored
force (bool, optional): Force the file download (useful if you suspect
that the file might have changed)
file (str): File name for the local file (defaults to ``file``)
"""
# Path to local file
abs_path = os.path.abspath(path)
local_file = local_file or file
local_file_path = os.path.join(abs_path, local_file)
# Create dir if it doesn't exist
if not os.path.isdir(abs_path):
os.mkdir(abs_path)
# Download if needed
if force or not os.path.isfile(local_file_path):
print(f"Downloading file {local_file} to folder {abs_path} from {url}")
file_url = urljoin(url, file)
return urlretrieve(file_url, local_file_path)
return None | 4b470aeb3fb45e4f10784c2074d14209026dbdbd | 3,655,814 |
def _kuramoto_sivashinsky_old(dimensions, system_size, dt, time_steps):
""" This function INCORRECTLY simulates the Kuramoto–Sivashinsky PDE
It is kept here only for historical reasons.
DO NOT USE UNLESS YOU WANT INCORRECT RESULTS
Even though it doesn't use the RK4 algorithm, it is bundled with the other
simulation functions in simulate_trajectory() for consistency.
Reference for the numerical integration:
"fourth order time stepping for stiff pde-kassam trefethen 2005" at
https://people.maths.ox.ac.uk/trefethen/publication/PDF/2005_111.pdf
Python implementation at: https://github.com/E-Renshaw/kuramoto-sivashinsky
Args:
dimensions (int): nr. of dimensions of the system grid
system_size (int): physical size of the system
dt (float): time step size
time_steps (int): nr. of time steps to simulate
Returns:
(np.ndarray): simulated trajectory of shape (time_steps, dimensions)
"""
n = dimensions # No. of grid points in real space (and hence dimensionality of the output)
size = system_size #
# Define initial conditions and Fourier Transform them
x = np.transpose(np.conj(np.arange(1, n + 1))) / n
u = np.cos(2 * np.pi * x / size) * (1 + np.sin(2 * np.pi * x / size))
v = np.fft.fft(u)
h = dt # time step
nmax = time_steps # No. of time steps to simulate
# Wave numbers
k = np.transpose(
np.conj(np.concatenate((np.arange(0, n / 2), np.array([0]), np.arange(-n / 2 + 1, 0))))) * 2 * np.pi / size
# Just copied from the paper, it works
L = k ** 2 - k ** 4
E = np.exp(h * L)
E_2 = np.exp(h * L / 2)
M = 16
# M = (size * np.pi) //2
r = np.exp(1j * np.pi * (np.arange(1, M + 1) - 0.5) / M)
LR = h * np.transpose(np.repeat([L], M, axis=0)) + np.repeat([r], n, axis=0)
Q = h * np.real(np.mean((np.exp(LR / 2) - 1) / LR, axis=1))
f1 = h * np.real(np.mean((-4 - LR + np.exp(LR) * (4 - 3 * LR + LR ** 2)) / LR ** 3, axis=1))
f2 = h * np.real(np.mean((2 + LR + np.exp(LR) * (-2 + LR)) / LR ** 3, axis=1))
f3 = h * np.real(np.mean((-4 - 3 * LR - LR ** 2 + np.exp(LR) * (4 - LR)) / LR ** 3, axis=1))
uu = [np.array(u)] # List of Real space solutions, later converted to a np.array
g = -0.5j * k
# See paper for details
for n in range(1, nmax + 1):
Nv = g * np.fft.fft(np.real(np.fft.ifft(v)) ** 2)
a = E_2 * v + Q * Nv
Na = g * np.fft.fft(np.real(np.fft.ifft(a)) ** 2)
b = E_2 * v + Q * Na
Nb = g * np.fft.fft(np.real(np.fft.ifft(b)) ** 2)
c = E_2 * a + Q * (2 * Nb - Nv)
Nc = g * np.fft.fft(np.real(np.fft.ifft(c)) ** 2)
v = E * v + Nv * f1 + 2 * (Na + Nb) * f2 + Nc * f3
u = np.real(np.fft.ifft(v))
uu.append(np.array(u))
uu = np.array(uu)
# print("PDE simulation finished")
return uu | 3c0158946b1220e0fa56bea201e2ee31d6df51e5 | 3,655,815 |
def get_geneids_of_user_entity_ids(cursor, unification_table, user_entity_ids):
"""
Get the Entrez Gene IDs of targets using their BIANA user entity ids
"""
query_geneid = ("""SELECT G.value, G.type
FROM externalEntityGeneID G, {} U
WHERE U.externalEntityID = G.externalEntityID AND U.userEntityID = %s
""".format(unification_table))
print('\nRETRIEVING GENE IDS ASSOCIATED TO USER ENTITY IDS...\n')
ueid_to_geneid_to_types = {}
for ueid in user_entity_ids:
cursor.execute(query_geneid, (ueid,))
for row in cursor:
geneid, geneid_type = row
#print(ueid, geneid, geneid_type)
ueid_to_geneid_to_types.setdefault(ueid, {})
ueid_to_geneid_to_types[ueid].setdefault(str(geneid), set()).add(geneid_type.lower())
print('NUMBER OF USER ENTITIES ASSOCIATED WITH GENE IDS: {}'.format(len(ueid_to_geneid_to_types)))
return ueid_to_geneid_to_types | bf192c192352da64716ecab6b4523b50fea5cd0f | 3,655,816 |
import sys
def alpha_015(enddate, index='all'):
"""
Inputs:
enddate: 必选参数,计算哪一天的因子
index: 默认参数,股票指数,默认为所有股票'all'
Outputs:
Series:index 为成分股代码,values为对应的因子值
公式:
(-1\*sum(rank(correlation(rank(high), rank(volume), 3)), 3))
"""
enddate = to_date_str(enddate)
func_name = sys._getframe().f_code.co_name
return JQDataClient.instance().get_alpha_101(**locals()) | 2c1c56b9184c46c04b2f05d2ce317e4129a92e07 | 3,655,817 |
import os
def get_path(root, path):
"""
Shortcut for ``os.path.join(os.path.dirname(root), path)``.
:param root: root path
:param path: path to file or folder
:returns: path to file or folder relative to root
"""
return os.path.join(os.path.dirname(root), path) | 73974d6d54210615b51d3765d1e5dd0d715080f1 | 3,655,818 |
def int_array_to_hex(iv_array):
"""
Converts an integer array to a hex string.
"""
iv_hex = ''
for b in iv_array:
iv_hex += '{:02x}'.format(b)
return iv_hex | f3332b7672a266ad9cae9fc52bc8e1152bcee58b | 3,655,819 |
from io import StringIO
import logging
import tempfile
def minimal_sphinx_app(
configuration=None, sourcedir=None, with_builder=False, raise_on_warning=False
):
"""Create a minimal Sphinx environment; loading sphinx roles, directives, etc."""
class MockSphinx(Sphinx):
"""Minimal sphinx init to load roles and directives."""
def __init__(self, confoverrides=None, srcdir=None, raise_on_warning=False):
self.extensions = {}
self.registry = SphinxComponentRegistry()
self.html_themes = {}
self.events = EventManager(self)
# logging
self.verbosity = 0
self._warncount = 0
self.warningiserror = raise_on_warning
self._status = StringIO()
self._warning = StringIO()
logging.setup(self, self._status, self._warning)
self.tags = Tags([])
self.config = Config({}, confoverrides or {})
self.config.pre_init_values()
self._init_i18n()
for extension in builtin_extensions:
self.registry.load_extension(self, extension)
# fresh env
self.doctreedir = ""
self.srcdir = srcdir
self.confdir = None
self.outdir = ""
self.project = Project(srcdir=srcdir, source_suffix={".md": "markdown"})
self.project.docnames = {"mock_docname"}
self.env = BuildEnvironment()
self.env.setup(self)
self.env.temp_data["docname"] = "mock_docname"
# Ignore type checkers because we disrespect superclass typing here
self.builder = None # type: ignore[assignment]
if not with_builder:
return
# this code is only required for more complex parsing with extensions
for extension in self.config.extensions:
self.setup_extension(extension)
buildername = "dummy"
self.preload_builder(buildername)
self.config.init_values()
self.events.emit("config-inited", self.config)
with tempfile.TemporaryDirectory() as tempdir:
# creating a builder attempts to make the doctreedir
self.doctreedir = tempdir
self.builder = self.create_builder(buildername)
self.doctreedir = ""
app = MockSphinx(
confoverrides=configuration, srcdir=sourcedir, raise_on_warning=raise_on_warning
)
return app | 55c911a16748e61ff3461833e82661314c5ffdca | 3,655,820 |
def calc_Mo_from_M(M, C=C):
"""
Calculate seismic moment (Mo) from
moment magnitude (M) given a scaling law.
C is a scaling constant; should be set at 6,
but is defined elsewhere in the module so
that all functions using it share a value.
"""
term1 = 3/2. * C * (np.log(2) + np.log(5) )
term2 = 3/2. * M * (np.log(2) + np.log(5) )
Mo = np.exp( term1 + term2)
return Mo | f72033100829126a353d7682f449d0ff4cd3efa8 | 3,655,821 |
import sys
from sys import path
def resource_path(*args):
""" Get absolute path to resource, works for dev and for PyInstaller """
base_path = getattr(sys, '_MEIPASS', path.dirname(path.abspath(__file__)))
return path.join(base_path, *args) | b6094b28f6a0cb1be5f4e4c05349971dbd559863 | 3,655,822 |
import pathlib
def _file_format_from_filename(filename):
"""Determine file format from its name."""
filename = pathlib.Path(filename).name
return _file_formats[filename] if filename in _file_formats else "" | 25f90333696491ddd7b522ca2ac24c84a09e8d07 | 3,655,823 |
def r2k(value):
"""
converts temperature in R(degrees Rankine) to K(Kelvins)
:param value: temperature in R(degrees Rankine)
:return: temperature in K(Kelvins)
"""
return const.convert_temperature(value, 'R', 'K') | 93c3a7ead8b6b15fc141cd6339acedc044dd2c61 | 3,655,824 |
import select
def add_version(project, publication_id):
"""
Takes "title", "filename", "published", "sort_order", "type" as JSON data
"type" denotes version type, 1=base text, 2=other variant
Returns "msg" and "version_id" on success, otherwise 40x
"""
request_data = request.get_json()
if not request_data:
return jsonify({"msg": "No data provided."}), 400
title = request_data.get("title", None)
filename = request_data.get("filename", None)
published = request_data.get("published", None)
sort_order = request_data.get("sort_order", None)
version_type = request_data.get("type", None)
publications = get_table("publication")
versions = get_table("publication_version")
query = select([publications]).where(publications.c.id == int_or_none(publication_id))
connection = db_engine.connect()
result = connection.execute(query).fetchone()
if result is None:
connection.close()
return jsonify("No such publication exists."), 404
values = {"publication_id": int(publication_id)}
if title is not None:
values["name"] = title
if filename is not None:
values["original_filename"] = filename
if published is not None:
values["published"] = published
if sort_order is not None:
values["sort_order"] = sort_order
if version_type is not None:
values["type"] = version_type
insert = versions.insert().values(**values)
result = connection.execute(insert)
return jsonify({
"msg": "Created new version object.",
"version_id": int(result.inserted_primary_key[0])
}), 201 | b6887e5d09e54827ed4f5ad50f1c3e404d55e821 | 3,655,825 |
import functools
def to_decorator(wrapped_func):
"""
Encapsulates the decorator logic for most common use cases.
Expects a wrapped function with compatible type signature to:
wrapped_func(func, args, kwargs, *outer_args, **outer_kwargs)
Example:
@to_decorator
def foo(func, args, kwargs):
print(func)
return func(*args, **kwargs)
@foo()
def bar():
print(42)
"""
@functools.wraps(wrapped_func)
def arg_wrapper(*outer_args, **outer_kwargs):
def decorator(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
return wrapped_func(func,
args,
kwargs,
*outer_args,
**outer_kwargs)
return wrapped
return decorator
return arg_wrapper | d7c9d0e759e59c26b7c5f7b098e15b78314c8860 | 3,655,826 |
def _get_unit(my_str):
""" Get unit label from suffix """
#
matches = [my_str.endswith(suffix) for suffix in _known_units]
# check to see if unit makes sense
if not any(matches):
raise KeyError('Unit unit not recognized <{}>!'.format(my_str))
# pick unit that matches, with prefix
matched_unit = [unit for unit,match in zip(_known_units,matches) if match][0]
unit_dict = _unit_dict(matched_unit)
return matched_unit,unit_dict[my_str] | 86cbb00dbd95025fde265461963e45d457d68470 | 3,655,827 |
import random
def spec_augment(spectrogram, time_mask_para=70, freq_mask_para=20, time_mask_num=2, freq_mask_num=2):
"""
Provides Augmentation for audio
Args: spectrogram, time_mask_para, freq_mask_para, time_mask_num, freq_mask_num
spectrogram (torch.Tensor): spectrum
time_mask_para (int): Hyper Parameter for Time Masking to limit time masking length
freq_mask_para (int): Hyper Parameter for Freq Masking to limit freq masking length
time_mask_num (int): how many time-masked area to make
freq_mask_num (int): how many freq-masked area to make
Returns: feat
- **feat**: Augmented feature
Reference:
「SpecAugment: A Simple Data Augmentation Method for Automatic Speech Recognition」Google Brain Team. 2019.
https://github.com/DemisEom/SpecAugment/blob/master/SpecAugment/spec_augment_pytorch.py
Examples::
Generate spec augmentation from a feature
>>> spec_augment(spectrogram, time_mask_para=70, freq_mask_para=20, n_time_mask=2, freq_mask_num=2)
Tensor([[ -5.229e+02, 0, ..., -5.229e+02, -5.229e+02],
[ 7.105e-15, 0, ..., -7.105e-15, -7.105e-15],
...,
[ 0, 0, ..., 0, 0],
[ 3.109e-14, 0, ..., 2.931e-14, 2.931e-14]])
"""
length = spectrogram.size(0)
n_mels = spectrogram.size(1)
# time mask
for _ in range(time_mask_num):
t = np.random.uniform(low=0.0, high=time_mask_para)
t = int(t)
if length - t > 0:
t0 = random.randint(0, length - t)
spectrogram[t0: t0 + t, :] = 0
# freq mask
for _ in range(freq_mask_num):
f = np.random.uniform(low=0.0, high=freq_mask_para)
f = int(f)
f0 = random.randint(0, n_mels - f)
spectrogram[:, f0: f0 + f] = 0
return spectrogram | a2f1c669253250a581a555a531db79fb756b91bb | 3,655,828 |
def scale(a: tuple, scalar: float) -> tuple:
"""Scales the point."""
return a[0] * scalar, a[1] * scalar | 9638b8cfbd792c2deb35da304c5c375e0402404e | 3,655,829 |
def parse_env(env):
"""Parse the given environment and return useful information about it,
such as whether it is continuous or not and the size of the action space.
"""
# Determine whether input is continuous or discrete. Generally, for
# discrete actions, we will take the softmax of the output
# probabilities and for the continuous we will use the linear output,
# rescaled to the action space.
action_is_continuous = False
action_low = None
action_high = None
if isinstance(env.action_space, gym.spaces.Discrete):
action_size = env.action_space.n
else:
action_is_continuous = True
action_low = env.action_space.low
action_high = env.action_space.high
action_size = env.action_space.low.shape[0]
return action_is_continuous, action_size, action_low, action_high | 4f5c97e71b7c1e8a319c28c4c1c26a1b758c731b | 3,655,830 |
def encode_dataset(dataset, tester, mode="gate"):
"""
dataset: object from the `word-embeddings-benchmarks` repo
dataset.X: a list of lists of pairs of word
dataset.y: similarity between these pairs
tester: tester implemented in my `tester.py`"""
words_1 = [x[0] for x in dataset["X"]]
encoded_words_1 = encode_words(
words_1, tester, mode=mode
)
encoded_words_2 = encode_words(
[x[1] for x in dataset["X"]], tester, mode=mode
)
return encoded_words_1, encoded_words_2 | 726aed93e3cef49f014d44f62e5ff73eae47da43 | 3,655,831 |
import io
import os
def _RunSetupTools(package_root, setup_py_path, output_dir):
"""Executes the setuptools `sdist` command.
Specifically, runs `python setup.py sdist` (with the full path to `setup.py`
given by setup_py_path) with arguments to put the final output in output_dir
and all possible temporary files in a temporary directory. package_root is
used as the working directory.
May attempt to run setup.py multiple times with different
environments/commands if any execution fails:
1. Using the Cloud SDK Python environment, with a full setuptools invocation
(`egg_info`, `build`, and `sdist`).
2. Using the system Python environment, with a full setuptools invocation
(`egg_info`, `build`, and `sdist`).
3. Using the Cloud SDK Python environment, with an intermediate setuptools
invocation (`build` and `sdist`).
4. Using the system Python environment, with an intermediate setuptools
invocation (`build` and `sdist`).
5. Using the Cloud SDK Python environment, with a simple setuptools
invocation which will also work for plain distutils-based setup.py (just
`sdist`).
6. Using the system Python environment, with a simple setuptools
invocation which will also work for plain distutils-based setup.py (just
`sdist`).
The reason for this order is that it prefers first the setup.py invocations
which leave the fewest files on disk. Then, we prefer the Cloud SDK execution
environment as it will be the most stable.
package_root must be writable, or setuptools will fail (there are
temporary files from setuptools that get put in the CWD).
Args:
package_root: str, the directory containing the package (that is, the
*parent* of the package itself).
setup_py_path: str, the path to the `setup.py` file to execute.
output_dir: str, path to a directory in which the built packages should be
created.
Returns:
list of str, the full paths to the generated packages.
Raises:
SysExecutableMissingError: if sys.executable is None
RuntimeError: if the execution of setuptools exited non-zero.
"""
# Unfortunately, there doesn't seem to be any easy way to move *all*
# temporary files out of the current directory, so we'll fail here if we
# can't write to it.
with _TempDirOrBackup(package_root) as working_dir:
# Simpler, but more messy (leaves artifacts on disk) command. This will work
# for both distutils- and setuputils-based setup.py files.
sdist_args = ['sdist', '--dist-dir', output_dir]
# The 'build' and 'egg_info commands (which are invoked anyways as a
# subcommands of 'sdist') are included to ensure that the fewest possible
# artifacts are left on disk.
build_args = [
'build', '--build-base', working_dir, '--build-temp', working_dir]
# Some setuptools versions don't support directly running the egg_info
# command
egg_info_args = ['egg_info', '--egg-base', working_dir]
setup_py_arg_sets = (
egg_info_args + build_args + sdist_args,
build_args + sdist_args,
sdist_args)
# See docstring for the reasoning behind this order.
setup_py_commands = []
for setup_py_args in setup_py_arg_sets:
setup_py_commands.append(_CloudSdkPythonSetupPyCommand(
setup_py_path, setup_py_args, package_root))
setup_py_commands.append(_SystemPythonSetupPyCommand(
setup_py_path, setup_py_args, package_root))
for setup_py_command in setup_py_commands:
out = io.StringIO()
return_code = setup_py_command.Execute(out)
if not return_code:
break
else:
raise RuntimeError(out.getvalue())
local_paths = [os.path.join(output_dir, rel_file)
for rel_file in os.listdir(output_dir)]
log.debug('Python packaging resulted in [%s]', ', '.join(local_paths))
return local_paths | 6638cc2f08f6e588cb469c37ea7e31b40d8cddf8 | 3,655,832 |
from metadataStore.userapi.commands import search
def default_search_func(search_dict):
"""
Defaults to calling the data broker's search function
Parameters
----------
search_dict : dict
The search_dict gets unpacked into the databroker's search function
Returns
-------
search_results: list
The results from the data broker's search function
Raises
------
ImportError
Raised if the metadatastore cannot be found
ValueError
Raised if the search dictionary is empty
"""
logger.info("default_search_func() in broker_query_example.py")
print(search_dict)
# check to see if the dictionary is empty
if len(search_dict) == 0:
logger.error("search_dict has no keys. Raising a value error")
raise ValueError("The search_dict input parameter has no keys")
print(search_dict)
logger.info("search_dict")
try:
logger.info("Search command from metadataStore.userapi.commands "
"imported successfully")
except ImportError:
#todo add logging statement about import error
logger.info("The data broker cannot be found, returning an empty "
"search")
return _defaults["empty_search"]
result=search(**search_dict)
print(result)
return result | b6eefe27a757051d9bbb40b21164a6f2702eeffb | 3,655,833 |
import csv
def readData(filename):
"""
Read in our data from a CSV file and create a dictionary of records,
where the key is a unique record ID and each value is dict
"""
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for row in reader:
clean_row = [(k, preProcess(v)) for (k, v) in row.items()]
row_id = str(int(row[fieldNameFileNo])) + '.' + str(int(row[fieldNameIdCol]))
data_d[row_id] = dict(clean_row)
return data_d | 193901c98966f4c0bd2b0e326711b962197ef4da | 3,655,834 |
from datetime import datetime
import os
def share(request, token):
"""
Serve a shared file.
This view does not require login, but requires a token.
"""
share = get_object_or_404(
Share,
Q(expires__isnull=True) | Q(expires__gt=datetime.datetime.now()),
token=token)
# Increment number of views.
share.views += 1
share.save()
return sendfile(
request, os.path.join(settings.MEDIA_ROOT, share.media_file.path),
mimetype=share.media_file.mime_type) | 15d75300784104a165aa31f7b71c0c9099b46622 | 3,655,835 |
def update_roi_mask(roi_mask1, roi_mask2):
"""Y.G. Dec 31, 2016
Update qval_dict1 with qval_dict2
Input:
roi_mask1, 2d-array, label array, same shape as xpcs frame,
roi_mask2, 2d-array, label array, same shape as xpcs frame,
Output:
roi_mask, 2d-array, label array, same shape as xpcs frame, update roi_mask1 with roi_mask2
"""
roi_mask = roi_mask1.copy()
w = np.where(roi_mask2)
roi_mask[w] = roi_mask2[w] + np.max(roi_mask)
return roi_mask | 211d6db69438866ff64c1944fa513ab847d9e641 | 3,655,836 |
import os
def get_latest_recipes(recipe_folder, config, package="*"):
"""
Generator of recipes.
Finds (possibly nested) directories containing a `meta.yaml` file and returns
the latest version of each recipe.
Parameters
----------
recipe_folder : str
Top-level dir of the recipes
config : dict or filename
package : str or iterable
Pattern or patterns to restrict the results.
"""
def toplevel(x):
return x.replace(
recipe_folder, '').strip(os.path.sep).split(os.path.sep)[0]
config = load_config(config)
recipes = sorted(get_recipes(recipe_folder, package), key=toplevel)
for package, group in groupby(recipes, key=toplevel):
group = list(group)
if len(group) == 1:
yield group[0]
else:
def get_version(p):
meta_path = os.path.join(p, 'meta.yaml')
meta = load_first_metadata(meta_path, finalize=False)
version = meta.get_value('package/version')
return VersionOrder(version)
sorted_versions = sorted(group, key=get_version)
if sorted_versions:
yield sorted_versions[-1] | 9441f0fcfce93f094ca50a2b510e4e5c2afb6c1b | 3,655,837 |
from typing import List
from typing import Tuple
from typing import Dict
from typing import Any
def training_loop(
train_sequences: List[Tuple[pd.DataFrame, float]],
val_sequences: List[Tuple[pd.DataFrame, float]],
test_sequences: List[Tuple[pd.DataFrame, float]],
parameters: Dict[str, Any],
dir_path: str,
):
"""
Training loop for the LSTM model.
Parameters
----------
train_sequences: List[Tuple[pd.DataFrame, float]]
List of training sequences.
val_sequences: List[Tuple[pd.DataFrame, float]]
List of validation sequences.
test_sequences: List[Tuple[pd.DataFrame, float]]
List of test sequences.
parameters: Dict[str, Any]
Hyperparameters for the model.
dir_path: str
Path to the directory where the model will be saved.
"""
seed_everything(42, workers=True)
logger = WandbLogger(project=parameters["wandb_project"])
gpu_value = 1 if parameters["run_on_gpu"] is True else 0
model = PricePredictor(
batch_size=parameters["train_batch_size"],
dropout_rate=parameters["dropout_rate"],
hidden_size=parameters["hidden_size"],
learning_rate=parameters["learning_rate"],
number_of_features=parameters["number_of_features"],
number_of_layers=parameters["number_of_layers"],
run_on_gpu=parameters["run_on_gpu"],
)
data_module = LSTMDataLoader(
train_sequences=train_sequences,
val_sequences=val_sequences,
test_sequences=test_sequences,
train_batch_size=parameters["train_batch_size"],
val_batch_size=parameters["val_batch_size"],
train_workers=parameters["train_workers"],
val_workers=parameters["val_workers"],
)
checkpoint_callback = callbacks.ModelCheckpoint(
dirpath=dir_path,
save_top_k=1,
verbose=True,
monitor="valid/loss",
mode="min",
)
early_stopping_callback = callbacks.EarlyStopping(
monitor="valid/loss",
patience=2,
verbose=True,
mode="min",
)
trainer = Trainer(
max_epochs=parameters["max_epochs"],
logger=logger,
callbacks=[checkpoint_callback, early_stopping_callback],
gpus=gpu_value,
log_every_n_steps=parameters["log_n_steps"],
progress_bar_refresh_rate=10,
deterministic=True,
)
trainer.fit(model, data_module)
trainer.test(model, data_module)
return {"training_done": True} | 27b02630173d972a83c82140e0d2c6c957266fa4 | 3,655,838 |
def nmi(X, y):
"""
Normalized mutual information between X and y.
:param X:
:param y:
"""
mi = mutual_info_regression(X, y)
return mi / mi.max() | 5da09b9395883f9b197b2c2add7850d0e1870c44 | 3,655,839 |
from typing import Optional
import re
def attribute_as_str(path: str, name: str) -> Optional[str]:
"""Return the two numbers found behind --[A-Z] in path.
If several matches are found, the last one is returned.
Parameters
----------
path : string
String with path of file/folder to get attribute from.
name : string
Name of attribute to get. Should be A-Z or a-z (implicit converted to
uppercase).
Returns
-------
string
Returns two digit number found in path behind --name.
"""
matches = re.findall("--" + name.upper() + "([0-9]{2})", path)
if matches:
return str(matches[-1])
return None | 257fec03ca911c703e5e06994477cf0b3b75a2ae | 3,655,840 |
def racket_module(value):
""" a very minimal racket -> python interpreter """
_awaiting = object()
_provide_expect = object()
def car(tup): return tup[0]
def cdr(tup): return tup[1:]
def eval(env, tup):
last = None
for value in tup:
if isinstance(value, tuple):
if car(value) == 'module-unexp':
_, file, lang, rest = value
return eval(None, rest)
elif car(value) == 'module-begin':
env = {_provide_expect: set()}
eval(env, cdr(value))
provide_expect = env.pop(_provide_expect)
out = {name:value for name, value in env.items() if name in provide_expect}
missing = provide_expect - set(out)
if missing:
raise ValueError(f'not provided {missing}')
return out
elif car(value) == 'provide':
for v in cdr(value): # FIXME too simple
env[_provide_expect].add(v)
last = None
elif car(value) == 'define':
if isinstance(value[2], tuple) and len(value[2:]) > 1:
raise NotImplementedError('havent implemented functions yet')
env[value[1]] = eval(env, value[2:])
elif car(value) == 'quote':
rest = value[1]
return rest
last = None
else:
last = value
return last
return eval(None, value) | c7fc0937b70bb71143af630cb93699632081ece8 | 3,655,841 |
def validate_inputs(input_data: pd.DataFrame) -> pd.DataFrame:
"""Check model for unprocessable values."""
valudated_data = input_data.copy()
# check for numerical variables with NA not seen during training
return validated_data | 65087650e9a5e85c3a362a5e27f82bf5f27a1f59 | 3,655,842 |
def index():
"""
Check if user is authenticated and render index page
Or login page
"""
if current_user.is_authenticated:
user_id = current_user._uid
return render_template('index.html', score=get_score(user_id), username=get_username(user_id))
else:
return redirect(url_for('login')) | edb8ad552ab34640fc030250659ebd05027712fa | 3,655,843 |
from typing import Iterable
from typing import Callable
from typing import Optional
def pick(
seq: Iterable[_T], func: Callable[[_T], float], maxobj: Optional[_T] = None
) -> Optional[_T]:
"""Picks the object obj where func(obj) has the highest value."""
maxscore = None
for obj in seq:
score = func(obj)
if maxscore is None or maxscore < score:
(maxscore, maxobj) = (score, obj)
return maxobj | 7f29c3aef5086957a1b1bd97f086a6ba6fb22cfd | 3,655,844 |
def rsync_public_key(server_list):
"""
推送PublicKey
:return: 只返回推送成功的,失败的直接写错误日志
"""
# server_list = [('47.100.231.147', 22, 'root', '-----BEGIN RSA PRIVATE KEYxxxxxEND RSA PRIVATE KEY-----', 'false')]
ins_log.read_log('info', 'rsync public key to server')
rsync_error_list = []
rsync_sucess_list = []
sync_key_obj = RsyncPublicKey()
check = sync_key_obj.check_rsa()
if check:
res_data = start_rsync(server_list)
if not res_data.get('status'):
rsync_error_list.append(res_data)
else:
rsync_sucess_list.append(res_data)
if rsync_error_list:
write_error_log(rsync_error_list)
return rsync_sucess_list | 94c9941e3f63caf15b0df8c19dc91ee54d002316 | 3,655,845 |
import re
from bs4 import BeautifulSoup
def create_one(url, alias=None):
"""
Shortens a URL using the TinyURL API.
"""
if url != '' and url is not None:
regex = re.compile(pattern)
searchres = regex.search(url)
if searchres is not None:
if alias is not None:
if alias != '':
payload = {
'url': url,
'submit': 'Make TinyURL!',
'alias': alias
}
data = parse_helper.urlencode(payload)
full_url = API_CREATE_LIST[1] + data
ret = request_helper.urlopen(full_url)
soup = BeautifulSoup(ret, 'html.parser')
check_error = soup.p.b.string
if 'The custom alias' in check_error:
raise errors.AliasUsed(
"The given Alias you have provided is already"
" being used.")
else:
return soup.find_all(
'div', {'class': 'indent'}
)[1].b.string
else:
raise errors.InvalidAlias(
"The given Alias cannot be 'empty'.")
else:
url_data = parse_helper.urlencode(dict(url=url))
byte_data = str.encode(url_data)
ret = request_helper.urlopen(
API_CREATE_LIST[0], data=byte_data).read()
result = str(ret).replace('b', '').replace("\'", '')
return result
else:
raise errors.InvalidURL("The given URL is invalid.")
else:
raise errors.URLError("The given URL Cannot be 'empty'.") | a543c23bc694fe09bae3bb4d59802fa6a5c3897d | 3,655,846 |
def duplicate_each_element(vector: tf.Tensor, repeat: int):
"""This method takes a vector and duplicates each element the number of times supplied."""
height = tf.shape(vector)[0]
exp_vector = tf.expand_dims(vector, 1)
tiled_states = tf.tile(exp_vector, [1, repeat])
mod_vector = tf.reshape(tiled_states, [repeat * height])
return mod_vector | 5b8ea4307d5779929def59805bc5210d8e948a4d | 3,655,847 |
def apk(actual, predicted, k=3):
"""
Computes the average precision at k.
This function computes the average precision at k for single predictions.
Parameters
----------
actual : int
The true label
predicted : list
A list of predicted elements (order does matter)
k : int, optional
The maximum number of predicted elements
Returns
-------
score : double
The average precision at k over the input lists
"""
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
if p == actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i+1.0)
return score | 27c8d1d03f5fe571f89378d1beb60cde9d82f27e | 3,655,848 |
def make_predictor(model):
"""
Factory to build predictor based on model type provided
Args:
model (DeployedModel): model to use when instantiating a predictor
Returns:
BasePredictor Child: instantiated predictor object
"""
verify = False if model.example == '' else True
if model.model_type == ModelType.vw:
return VWPredictor(model=model, verify_on_load=verify)
elif model.model_type == ModelType.sklearn:
return SKLearnPredictor(model=model, sep=app.config.get('SKLEARN_SEPARATOR', None), verify_on_load=verify)
else:
raise ApiException(name='Invalid Input', message='unknown model type: {type}'.format(type=model.model_type)) | 87a89d179c28e971a3c29946e94105542686510e | 3,655,849 |
def get(identifier: str) -> RewardScheme:
"""Gets the `RewardScheme` that matches with the identifier.
Arguments:
identifier: The identifier for the `RewardScheme`
Raises:
KeyError: if identifier is not associated with any `RewardScheme`
"""
if identifier not in _registry.keys():
raise KeyError(
'Identifier {} is not associated with any `RewardScheme`.'.format(identifier))
return _registry[identifier]() | 574126cab1a1c1bd10ca2ada1fe626ba66910b11 | 3,655,850 |
def add_query_params(url: str, query_params: dict) -> str:
"""Add query params dict to a given url (which can already contain some query parameters)."""
path_result = parse.urlsplit(url)
base_url = path_result.path
# parse existing query parameters if any
existing_query_params = dict(parse.parse_qsl(path_result.query))
all_query_params = {**existing_query_params, **query_params}
# add query parameters to url if any
if all_query_params:
base_url += "?" + parse.urlencode(all_query_params)
return base_url | 8ea28c2492343e0f7af3bac5d44751827dd6b7aa | 3,655,851 |
import ast
import torch
def tokenize_lexicon_str(vocab, lexicon_str, pad_max_length, device):
"""
#todo add documentation
:param lexicon_str:
:return: a tensor of ids from vocabulary for each . shape=(batch_size,max_length?)
"""
out_tensor = []
out_mask = []
for row in lexicon_str:
lexicon_words = []
row_lst = ast.literal_eval(row)
for phrase in row_lst:
# handle phrase = False
if not phrase:
continue
tokenized_words = en_tokenizer(phrase)
lexicon_words.extend(tokenized_words)
# remove duplicates
lexicon_words = list(dict.fromkeys(lexicon_words))
lexicon_words.extend(get_specials_program())
# Pad and create a mask
padded = ['<pad>'] * pad_max_length
mask = [0] * pad_max_length
data_len = min(len(lexicon_words), pad_max_length)
padded[:data_len] = lexicon_words[:data_len]
mask[:data_len] = [1] * data_len
tensor = torch.tensor([vocab[token] for token in padded], dtype=torch.long)
mask = torch.tensor(mask, dtype=torch.long)
# Add to list
out_tensor.append(tensor)
out_mask.append(mask)
# Stack
out_tensor = torch.stack(out_tensor).to(device)
out_mask = torch.stack(out_mask).to(device)
return out_tensor, out_mask | a316b4cba9f9ca729de4a49c17dc718ad004f56c | 3,655,852 |
def try_get_mark(obj, mark_name):
"""Tries getting a specific mark by name from an object, returning None if no such mark is found
"""
marks = get_marks(obj)
if marks is None:
return None
return marks.get(mark_name, None) | 1dd8b9635d836bbce16e795900d7ea9d154e5876 | 3,655,853 |
def timedelta_to_seconds(ts):
""" Convert the TimedeltaIndex of a pandas.Series into a numpy
array of seconds. """
seconds = ts.index.values.astype(float)
seconds -= seconds[-1]
seconds /= 1e9
return seconds | 4565d7a691e8ac004d9d529568db0d032a56d088 | 3,655,854 |
def parse_gage(s):
"""Parse a streamgage key-value pair.
Parse a streamgage key-value pair, separated by '='; that's the reverse of ShellArgs.
On the command line (argparse) a declaration will typically look like::
foo=hello or foo="hello world"
:param s: str
:rtype: tuple(key, value)
"""
# Adapted from: https://gist.github.com/fralau/061a4f6c13251367ef1d9a9a99fb3e8d
items = s.split('=')
key = items[0].strip() # we remove blanks around keys, as is logical
value = ''
if len(items) > 1:
# rejoin the rest:
value = '='.join(items[1:])
return key, value | 299b47f3a4757c924620bdc05e74f195a4cb7967 | 3,655,855 |
from typing import Mapping
def get_attribute(instance, attrs):
"""
Similar to Python's built in `getattr(instance, attr)`,
but takes a list of nested attributes, instead of a single attribute.
Also accepts either attribute lookup on objects or dictionary lookups.
"""
for attr in attrs:
try:
# pylint: disable=isinstance-second-argument-not-valid-type
if isinstance(instance, Mapping):
instance = instance[attr]
else:
instance = getattr(instance, attr)
except ObjectDoesNotExist:
return None
return instance | 121ef8d4b0b6b69fda1591e2f372a4cf9ec60129 | 3,655,856 |
import traceback
def pull_cv_project(request, project_id):
"""pull_cv_project.
Delete the local project, parts and images. Pull the
remote project from Custom Vision.
Args:
request:
project_id:
"""
# FIXME: open a Thread/Task
logger.info("Pulling CustomVision Project")
# Check Customvision Project id
customvision_project_id = request.query_params.get(
"customvision_project_id")
logger.info("customvision_project_id: %s", {customvision_project_id})
# Check Partial
try:
is_partial = bool(strtobool(request.query_params.get("partial")))
except Exception:
is_partial = True
logger.info("Loading Project in Partial Mode: %s", is_partial)
try:
pull_cv_project_helper(project_id=project_id,
customvision_project_id=customvision_project_id,
is_partial=is_partial)
return Response({"status": "ok"}, status=status.HTTP_200_OK)
except Exception:
err_msg = traceback.format_exc()
return Response(
{
"status": "failed",
"log":
str(err_msg) # Change line plz...
},
status=status.HTTP_400_BAD_REQUEST) | c21ad6a15c9eafba723cd20b1bafbcc4a36dfc38 | 3,655,857 |
from datetime import datetime
def get_log_line_components(s_line):
"""
given a log line, returns its datetime as a datetime object
and its log level as a string and the message itself as another
string - those three are returned as a tuple. the log level
is returned as a single character (first character of the level's
name, capitalized).
"""
try:
dtime = datetime.strptime(s_line[0:19], "%Y-%m-%d %H:%M:%S")
except ValueError:
raise LogUtilsError("Not a proper date/time at start of log line!")
if dtime is None:
raise LogUtilsError("Not a proper date/time at start of log line!")
log_level = s_line[24]
if log_level == "D":
s_line = s_line[30:]
elif log_level == "I":
s_line = s_line[29:]
elif log_level == "W":
s_line = s_line[32:]
elif log_level == "E":
s_line = s_line[30:]
elif log_level == "C":
s_line = s_line[33:]
else:
raise LogUtilsError("log-level not in log line!")
return s_line, dtime, log_level | 3ec7e5418f39a579ce8b71f3c51a8e1356cb5291 | 3,655,858 |
def is_eligible_for_bulletpoint_vote(recipient, voter):
"""
Returns True if the recipient is eligible to receive an award.
Checks to ensure recipient is not also the voter.
"""
if voter is None:
return True
return (recipient != voter) and is_eligible_user(recipient) | 20d34076c92b7fd9474a7cf4edf7ca38ad3ffba5 | 3,655,859 |
def _get_in_collection_filter_directive(input_filter_name):
"""Create a @filter directive with in_collecion operation and the desired variable name."""
return DirectiveNode(
name=NameNode(value=FilterDirective.name),
arguments=[
ArgumentNode(
name=NameNode(value="op_name"),
value=StringValueNode(value="in_collection"),
),
ArgumentNode(
name=NameNode(value="value"),
value=ListValueNode(
values=[
StringValueNode(value="$" + input_filter_name),
],
),
),
],
) | 3c8b18314aa415d6dbec14b63a956e5fdf73aa9d | 3,655,860 |
def load_labelmap(path):
"""Loads label map proto.
Args:
path: path to StringIntLabelMap proto text file.
Returns:
a StringIntLabelMapProto
"""
with tf.gfile.GFile(path, 'r') as fid:
label_map_string = fid.read()
label_map = string_int_label_map_pb2.StringIntLabelMap()
try:
text_format.Merge(label_map_string, label_map)
except text_format.ParseError:
label_map.ParseFromString(label_map_string)
_validate_label_map(label_map)
return label_map | 3ec29d2dc8fc4bacde5f0dfa49465676f5e8c44c | 3,655,861 |
def calc_internal_hours(entries):
"""
Calculates internal utilizable hours from an array of entry dictionaries
"""
internal_hours = 0.0
for entry in entries:
if entry['project_name'][:22] == "TTS Acq / Internal Acq" and not entry['billable']:
internal_hours = internal_hours + float(entry['hours_spent'])
return internal_hours | 0962ee49f60ac296668294e6d2f075ce981cbc55 | 3,655,862 |
def format_str_strip(form_data, key):
"""
"""
if key not in form_data:
return ''
return form_data[key].strip() | 44c5aaf8c5e11bfee05971d2961e5dcaf4cd8d9f | 3,655,863 |
def get_element(element_path: str):
"""
For base extension to get main window's widget,event and function,\n
pay attention,you must be sure the element's path
grammar: element's path (father>attribute>attribute...) like UI_WIDGETS>textViewer
"""
try:
listed_element_path = element_path.split('>')
attribute = getattr(top, listed_element_path[0])
for nowAttributeName in listed_element_path[1:]:
attribute = getattr(attribute, nowAttributeName)
return attribute
except Exception as msg:
print(msg)
return None | 041ec89a700018ce5a6883a80a1998d7179c7041 | 3,655,864 |
import urllib
def gravatar_for_email(email, size=None, rating=None):
"""
Generates a Gravatar URL for the given email address.
Syntax::
{% gravatar_for_email <email> [size] [rating] %}
Example::
{% gravatar_for_email [email protected] 48 pg %}
"""
gravatar_url = "%savatar/%s" % (GRAVATAR_URL_PREFIX,
_get_gravatar_id(email))
parameters = [p for p in (
('d', GRAVATAR_DEFAULT_IMAGE),
('s', size or GRAVATAR_DEFAULT_SIZE),
('r', rating or GRAVATAR_DEFAULT_RATING),
) if p[1]]
if parameters:
gravatar_url += '?' + urllib.urlencode(parameters, doseq=True)
return gravatar_url | 73f3eed5ea073cd4bf6e4a978983c4ed12cedcd6 | 3,655,865 |
def decmin_to_decdeg(pos, decimals=4):
"""Convert degrees and decimal minutes into decimal degrees."""
pos = float(pos)
output = np.floor(pos / 100.) + (pos % 100) / 60.
return round_value(output, nr_decimals=decimals) | de6490ce5278090b90f87adab57fe8b912307e2c | 3,655,866 |
def get_selector(selector_list, identifiers, specified_workflow=None):
"""
Determine the correct workflow selector from a list of selectors, series of identifiers and user specified workflow if defined.
Parameters
----------
selector_list list
List of dictionaries, where the value of all dictionaries are workflow selectors.
identifiers list
List of identifiers specified in order of precedence that are to be looked up in selector_list.
specified_workflow str
User specified workflow for build.
Returns
-------
selector(BasicWorkflowSelector)
selector object which can specify a workflow configuration that can be passed to `aws-lambda-builders`
"""
# Create a combined view of all the selectors
all_selectors = {}
for selector in selector_list:
all_selectors = {**all_selectors, **selector}
# Check for specified workflow being supported at all and if it's not, raise an UnsupportedBuilderException.
if specified_workflow and specified_workflow not in all_selectors:
raise UnsupportedBuilderException("'{}' does not have a supported builder".format(specified_workflow))
# Loop through all identifers to gather list of selectors with potential matches.
selectors = [all_selectors.get(identifier, None) for identifier in identifiers]
# Intialize a `None` selector.
selector = None
try:
# Find first non-None selector.
# Return the first selector with a match.
selector = next(_selector for _selector in selectors if _selector)
except StopIteration:
pass
return selector | f458a82d2d0e81070eefabd490127567a1b67bbb | 3,655,867 |
import functools
import inspect
import re
def register_pth_hook(fname, func=None):
"""
::
# Add a pth hook.
@setup.register_pth_hook("hook_name.pth")
def _hook():
'''hook contents.'''
"""
if func is None:
return functools.partial(register_pth_hook, fname)
source = inspect.getsource(func)
if not re.match(
rf"@setup\.register_pth_hook.*\ndef {re.escape(func.__name__)}\(",
source):
raise SyntaxError("register_pth_hook must be used as a toplevel "
"decorator to a function")
_, source = source.split("\n", 1)
_pth_hook_mixin._pth_hooks.append((fname, func.__name__, source)) | 1090d4601e0d51ec4c7761bb070318f906c23f87 | 3,655,868 |
def callable_or_raise(obj):
"""Check that an object is callable, else raise a :exc:`ValueError`.
"""
if not callable(obj):
raise ValueError('Object {0!r} is not callable.'.format(obj))
return obj | cb6dd8c03ea41bb94a8357553b3f3998ffcc0d65 | 3,655,869 |
def extractLandMarks(fm_results):
"""TODO: add preprocessing/normalization step here"""
x = []
y = []
z = []
for i in range(468):
x.append(fm_results.multi_face_landmarks[0].landmark[i].x)
y.append(fm_results.multi_face_landmarks[0].landmark[i].y)
z.append(fm_results.multi_face_landmarks[0].landmark[i].z)
return x + y + z | 04370c6e0a3a8a4a68b914e0b7c002b829d0a042 | 3,655,870 |
def conv_coef(posture="standing", va=0.1, ta=28.8, tsk=34.0,):
"""
Calculate convective heat transfer coefficient (hc) [W/K.m2]
Parameters
----------
posture : str, optional
Select posture from standing, sitting or lying.
The default is "standing".
va : float or iter, optional
Air velocity [m/s]. If iter is input, its length should be 17.
The default is 0.1.
ta : float or iter, optional
Air temperature [oC]. If iter is input, its length should be 17.
The default is 28.8.
tsk : float or iter, optional
Skin temperature [oC]. If iter is input, its length should be 17.
The default is 34.0.
Returns
-------
hc : numpy.ndarray
Convective heat transfer coefficient (hc) [W/K.m2].
"""
# Natural convection
if posture.lower() == "standing":
# Ichihara et al., 1997, https://doi.org/10.3130/aija.62.45_5
hc_natural = np.array([
4.48, 4.48, 2.97, 2.91, 2.85,
3.61, 3.55, 3.67, 3.61, 3.55, 3.67,
2.80, 2.04, 2.04, 2.80, 2.04, 2.04,])
elif posture.lower() in ["sitting", "sedentary"]:
# Ichihara et al., 1997, https://doi.org/10.3130/aija.62.45_5
hc_natural = np.array([
4.75, 4.75, 3.12, 2.48, 1.84,
3.76, 3.62, 2.06, 3.76, 3.62, 2.06,
2.98, 2.98, 2.62, 2.98, 2.98, 2.62,])
elif posture.lower() in ["lying", "supine"]:
# Kurazumi et al., 2008, https://doi.org/10.20718/jjpa.13.1_17
# The values are applied under cold environment.
hc_a = np.array([
1.105, 1.105, 1.211, 1.211, 1.211,
0.913, 2.081, 2.178, 0.913, 2.081, 2.178,
0.945, 0.385, 0.200, 0.945, 0.385, 0.200,])
hc_b = np.array([
0.345, 0.345, 0.046, 0.046, 0.046,
0.373, 0.850, 0.297, 0.373, 0.850, 0.297,
0.447, 0.580, 0.966, 0.447, 0.580, 0.966,])
hc_natural = hc_a * (abs(ta - tsk) ** hc_b)
# Forced convection
# Ichihara et al., 1997, https://doi.org/10.3130/aija.62.45_5
hc_a = np.array([
15.0, 15.0, 11.0, 17.0, 13.0,
17.0, 17.0, 20.0, 17.0, 17.0, 20.0,
14.0, 15.8, 15.1, 14.0, 15.8, 15.1,])
hc_b = np.array([
0.62, 0.62, 0.67, 0.49, 0.60,
0.59, 0.61, 0.60, 0.59, 0.61, 0.60,
0.61, 0.74, 0.62, 0.61, 0.74, 0.62,])
hc_forced = hc_a * (va ** hc_b)
# Select natural or forced hc.
# If local va is under 0.2 m/s, the hc valuse is natural.
hc = np.where(va<0.2, hc_natural, hc_forced) # hc [W/K.m2)]
return hc | d351b82d2ffb81396b4e0ce2f05b429cb79ac28c | 3,655,871 |
def _one_formula(lex, fmt, varname, nvars):
"""Return one DIMACS SAT formula."""
f = _sat_formula(lex, fmt, varname, nvars)
_expect_token(lex, {RPAREN})
return f | 166c73c6214a0f6e3e6267804d2dd5c16b43a652 | 3,655,872 |
def _split_variables(variables):
"""Split variables into always passed (std) and specified (file).
We always pass some variables to each step but need to
explicitly define file and algorithm variables so they can
be linked in as needed.
"""
file_vs = []
std_vs = []
for v in variables:
cur_type = v["type"]
while isinstance(cur_type, dict):
if "items" in cur_type:
cur_type = cur_type["items"]
else:
cur_type = cur_type["type"]
if (cur_type in ["File", "null", "record"] or
(isinstance(cur_type, (list, tuple)) and
("File" in cur_type or {'items': 'File', 'type': 'array'} in cur_type))):
file_vs.append(v)
elif v["id"] in ALWAYS_AVAILABLE:
std_vs.append(v)
else:
file_vs.append(v)
return file_vs, std_vs | 2b297bf99153256769d42c3669f3f8f29da95b70 | 3,655,873 |
import numpy
def percentiles_fn(data, columns, values=[0.0, 0.25, 0.5, 0.75, 1.0], remove_missing=False):
"""
Task: Get the data values corresponding to the percentile chosen at
the "values" (array of percentiles) after sorting the data.
return -1 if no data was found
:param data: data structure for partitioning
:type data: numpy.ndarray
:param columns: columns or variable names of the data to be used
:type columns: str array
:param values: percentile values to be processed
:type values: float array
:param remove_missing: flag to remove missing values
:type remove_missing: boolean
"""
result = -1
n_elements = data[columns[0]].shape[0]
if n_elements <= 0:
return result
if remove_missing:
data = nomi(data, columns)
n_elements = data[columns[0]].shape[0]
values = numpy.array(values)
if max(values) > 1.0:
values = values * 0.01
#### Get an array of indices of the sorted data
sorted_index_arr = numpy.argsort(data[columns[0]])
ind = None
#### Iterate through each percentile and get the corresponding
#### value at that percentile of the sorted data
for i in range(len(values)):
if (values[i] < 0.0) or (values[i] > 1.0):
return -1
#### Setting ind to the percentile wanted
if values[i] <= 0.5:
ind = int(values[i] * n_elements)
else:
ind = int(values[i] * (n_elements + 1))
if ind >= n_elements:
ind = n_elements - int(1)
if i == 0:
result = data[columns[0]][sorted_index_arr[ind]]
else:
result = numpy.append(result, data[columns[0]][sorted_index_arr[ind]])
return result | cfacd575e3e1f8183b1e82512859198a973a1f85 | 3,655,874 |
def base_checkout_total(
subtotal: TaxedMoney,
shipping_price: TaxedMoney,
discount: Money,
currency: str,
) -> TaxedMoney:
"""Return the total cost of the checkout."""
zero = zero_taxed_money(currency)
total = subtotal + shipping_price - discount
# Discount is subtracted from both gross and net values, which may cause negative
# net value if we are having a discount that covers whole price.
# Comparing TaxedMoney objects works only on gross values. That is why we are
# explicitly returning zero_taxed_money if total.gross is less or equal zero.
if total.gross <= zero.gross:
return zero
return total | 04017f67249b2415779b8a7bbfa854653ec6c285 | 3,655,875 |
def if_statement(lhs='x', op='is', rhs=0, _then=None, _else=None):
"""Celery Script if statement.
Kind:
_if
Arguments:
lhs (left-hand side)
op (operator)
rhs (right-hand side)
_then (id of sequence to execute on `then`)
_else (id of sequence to execute on `else`)
"""
args = {}
args['lhs'] = lhs
args['op'] = op
args['rhs'] = rhs
if _then is None:
_then_kind = 'nothing'
_then_args = {}
else:
_then_kind = 'execute'
_then_args = {"sequence_id": _then}
if _else is None:
_else_kind = 'nothing'
_else_args = {}
else:
_else_kind = 'execute'
_else_args = {"sequence_id": _else}
args['_then'] = create_node(kind=_then_kind, args=_then_args)
args['_else'] = create_node(kind=_else_kind, args=_else_args)
_if_statement = create_node(kind='_if', args=args)
return _if_statement | c42baa0933be08e89049894acfd3c003832331db | 3,655,876 |
def add_next_open(df, col='next_open'):
"""
找出下根K线的开盘价
"""
df[col] = df[CANDLE_OPEN_COLUMN].shift(-1)
df[col].fillna(value=df[CANDLE_CLOSE_COLUMN], inplace=True)
return df | 185fdd87b437546be63548506adef7bb56c4aa5d | 3,655,877 |
def seasons_used(parameters):
"""
Get a list of the seasons used for this set of parameters.
"""
seasons_used = set([s for p in parameters for s in p.seasons])
# Make sure this list is ordered by SEASONS.
return [season for season in SEASONS if season in seasons_used] | 641e0b4dd01bd30bf9129a9302ad5935a614588f | 3,655,878 |
def get_polyphyletic(cons):
"""get polyphyletic groups and a representative tip"""
tips, taxonstrings = unzip(cons.items())
tree, lookup = make_consensus_tree(taxonstrings, False, tips=tips)
cache_tipnames(tree)
names = {}
for n in tree.non_tips():
if n.name is None:
continue
if (n.name, n.Rank) not in names:
names[(n.name, n.Rank)] = {}
if n.parent is not None:
names[(n.name, n.Rank)][n.parent.name] = n.tip_names[0]
return names | b53a50170b3546f8228aa82013545148918155b7 | 3,655,879 |
from typing import Tuple
from typing import cast
def find_closest_integer_in_ref_arr(query_int: int, ref_arr: NDArrayInt) -> Tuple[int, int]:
"""Find the closest integer to any integer inside a reference array, and the corresponding difference.
In our use case, the query integer represents a nanosecond-discretized timestamp, and the
reference array represents a numpy array of nanosecond-discretized timestamps.
Instead of sorting the whole array of timestamp differences, we just
take the minimum value (to speed up this function).
Args:
query_int: query integer,
ref_arr: Numpy array of integers
Returns:
integer, representing the closest integer found in a reference array to a query
integer, representing the integer difference between the match and query integers
"""
closest_ind = np.argmin(np.absolute(ref_arr - query_int))
closest_int = cast(int, ref_arr[closest_ind]) # mypy does not understand numpy arrays
int_diff = np.absolute(query_int - closest_int)
return closest_int, int_diff | 9d0e43d869b94008fb51b1281041538a85d48d7e | 3,655,880 |
def saver_for_file(filename):
"""
Returns a Saver that can load the specified file, based on the file extension. None if failed to determine.
:param filename: the filename to get the saver for
:type filename: str
:return: the associated saver instance or None if none found
:rtype: Saver
"""
saver = javabridge.static_call(
"weka/core/converters/ConverterUtils", "getSaverForFile",
"(Ljava/lang/String;)Lweka/core/converters/AbstractFileSaver;", filename)
if saver is None:
return None
else:
return Saver(jobject=saver) | 0838a46be5a282849fdf48584e9a8e971b7ef966 | 3,655,881 |
def make(context, name):
"""Create an object in a registered table class.
This function will be stored in that object, so that the new table object
is able to create new table objects in its class.
!!! hint
This is needed when the user wants to insert new records in the table.
Parameters
----------
context: object
The context singleton in which this very function will be stored
under attribute `mkTable`.
name: string
The registered name of the derived table class.
"""
tableObj = factory(name)(context, name)
tableObj.mkTable = make
return tableObj | 2b87aa461f97c1d1e1c6ff9a8c6d4128d8eccbb3 | 3,655,882 |
def cofilter(function, iterator):
"""
Return items in iterator for which `function(item)` returns True.
"""
results = []
def checkFilter(notfiltered, item):
if notfiltered == True:
results.append(item)
def dofilter(item):
d = maybeDeferred(function, item)
d.addCallback(checkFilter, item)
return d
d = _CoFunCaller(resultCollector=dofilter).coiterate(iterator)
d.addCallback(lambda _: results)
return d | 0c14ce3310e1f1a2984b1faf5be21c552ca65b43 | 3,655,883 |
def download_dataset(dataset_name='mnist'):
"""
Load MNIST dataset using keras convenience function
Args:
dataset_name (str): which of the keras datasets to download
dtype (np.dtype): Type of numpy array
Returns tuple[np.array[float]]:
(train images, train labels), (test images, test labels)
"""
if dataset_name == 'mnist':
return tf.keras.datasets.mnist.load_data()
elif dataset_name == 'binarised_mnist':
return load_binarised_mnist_data() | c4bda5981acaf1907d46724f217012bf9349e9da | 3,655,884 |
from typing import Callable
from datetime import datetime
def __listen_for_requests_events(node_id, success, measurement: str = 'locust_requests') -> Callable:
"""
Persist request information to influxdb.
:param node_id: The id of the node reporting the event.
:param measurement: The measurement where to save this point.
:param success: Flag the info to as successful request or not
"""
def event_handler(request_type=None, name=None, response_time=None, response_length=None, exception=None,
**_) -> None:
time = datetime.utcnow()
tags = {
'node_id': node_id,
'request_type': request_type,
'name': name,
'success': success,
'exception': repr(exception),
}
fields = {
'response_time': response_time,
'response_length': response_length,
'counter': 1, # TODO: Review the need of this field
}
point = __make_data_point(measurement, tags, fields, time)
cache.append(point)
return event_handler | de7aef12f2caf77242a863076a2cd8241e611b94 | 3,655,885 |
from typing import Type
from textwrap import dedent
def create_trigger_function_sql(
*,
audit_logged_model: Type[Model],
context_model: Type[Model],
log_entry_model: Type[Model],
) -> str:
"""
Generate the SQL to create the function to log the SQL.
"""
trigger_function_name = f"{ audit_logged_model._meta.db_table }_log_change"
context_table_name = context_model._meta.db_table # noqa
context_fields = ", ".join(
field.column
for field in context_model._meta.get_fields() # noqa
if isinstance(field, Field) and not isinstance(field, AutoField)
)
log_entry_table_name = log_entry_model._meta.db_table
return dedent(
f"""
CREATE FUNCTION { trigger_function_name }()
RETURNS TRIGGER AS $$
DECLARE
-- Id of the inserted row, used to ensure exactly one row is inserted
entry_id int;
content_type_id int;
BEGIN
SELECT id INTO STRICT content_type_id
FROM django_content_type WHERE
app_label = '{ audit_logged_model._meta.app_label }'
AND model = '{ audit_logged_model._meta.model_name }';
IF (TG_OP = 'INSERT') THEN
INSERT INTO { log_entry_table_name } (
{ context_fields },
action,
at,
changes,
content_type_id,
object_id
) SELECT
{ context_fields },
TG_OP as action,
now() as at,
to_jsonb(NEW.*) as changes,
content_type_id,
NEW.id as object_id
-- We rely on this table being created by out Django middleware
FROM { context_table_name }
-- We return the id into the variable to make postgresql check
-- that exactly one row is inserted.
RETURNING id INTO STRICT entry_id;
RETURN NEW;
ELSIF (TG_OP = 'UPDATE') THEN
INSERT INTO { log_entry_table_name } (
{ context_fields },
action,
at,
changes,
content_type_id,
object_id
) SELECT
{ context_fields },
TG_OP as action,
now() as at,
(
SELECT
-- Aggregate back to a single jsonb object, with
-- column name as key and the two values in an array.
jsonb_object_agg(
COALESCE(old_row.key, new_row.key),
ARRAY[old_row.value, new_row.value]
)
FROM
-- Select key value pairs from the old and the new
-- row, and then join them on the key. THis gives
-- us rows with the same key and values from both
-- the old row and the new row.
jsonb_each(to_jsonb(OLD.*)) old_row
FULL OUTER JOIN
jsonb_each(to_jsonb(NEW.*)) new_row
ON old_row.key = new_row.key
WHERE
-- Only select rows that have actually changed
old_row.* IS DISTINCT FROM new_row.*
) as changes,
content_type_id,
NEW.id as object_id
-- We rely on this table being created by out Django middleware
FROM { context_table_name }
-- We return the id into the variable to make postgresql check
-- that exactly one row is inserted.
RETURNING id INTO STRICT entry_id;
RETURN NEW;
ELSIF (TG_OP = 'DELETE') THEN
INSERT INTO { log_entry_table_name } (
{ context_fields },
action,
at,
changes,
content_type_id,
object_id
) SELECT
{ context_fields },
TG_OP as action,
now() as at,
to_jsonb(OLD.*) as changes,
content_type_id,
OLD.id as object_id
-- We rely on this table being created by out Django middleware
FROM { context_table_name }
-- We return the id into the variable to make postgresql check
-- that exactly one row is inserted.
RETURNING id INTO STRICT entry_id;
RETURN NEW;
END IF;
END;
$$ language 'plpgsql';
"""
) | 696443cee7752b74542d259d4a223f419462d18f | 3,655,886 |
def reorder_by_first(*arrays):
"""
Applies the same permutation to all passed arrays,
permutation sorts the first passed array
"""
arrays = check_arrays(*arrays)
order = np.argsort(arrays[0])
return [arr[order] for arr in arrays] | bd9e60cadba4644b06ae55396c7dcae33f1fa1d0 | 3,655,887 |
def embedding_weights(mesh,
vocab_dim,
output_dim,
variable_dtype,
name="embedding",
ensemble_dim=None,
initializer=None):
"""Embedding weights."""
if not ensemble_dim:
ensemble_dim = []
elif not isinstance(ensemble_dim, list):
ensemble_dim = [ensemble_dim]
shape = mtf.Shape(ensemble_dim) + [vocab_dim, output_dim]
if initializer is None:
initializer = tf.random_normal_initializer()
ret = mtf.get_variable(
mesh, name, shape, dtype=variable_dtype, initializer=initializer)
return ret | b89d5a411757d704c57baff6e4a74b7a5807c381 | 3,655,888 |
def generiraj_emso(zenska):
"""Funkcija generira emso stevilko"""
rojstvo = random_date_generator(julijana_zakrajsek)
# Odstranim prvo števko leta
emso_stevke = rojstvo[:4] + rojstvo[5:]
if zenska:
# Malce pretirana poenostavitev zadnjih treh cifer, lahko se zgodi da pridejo iste + zanemarjam take, ki imajo niclo na zacetku,...
return (emso_stevke + '505' + str(np.random.randint(100, 999)))
else:
return (emso_stevke + '500' + str(np.random.randint(100, 999))) | 89734021fd0d6f863a309b5c23c0a4ee6d385edf | 3,655,889 |
def pdf_markov2(x, y, y_offset=1, nlevels=3):
"""
Compute the empirical joint PDF for two processes of Markov order 2. This
version is a bit quicker than the more general pdf() function.
See the docstring for pdf for more info.
"""
y_offset = np.bool(y_offset)
# out = np.ones((nlevels,)*6, np.uint32)
out = np.zeros((nlevels,) * 5, np.float64)
n = x.size
for tt in xrange(2, x.size):
# out[x[tt], x[tt - 1], x[tt - 2], y[tt], y[tt - 1], y[tt - 2]] += 1
# offset signal y by +1 if we want to allow same-timebin interactions
out[x[tt], x[tt - 1], x[tt - 2],
y[tt - 1 + y_offset], y[tt - 2 + y_offset]] += 1
return out / (n - 2.) | 6d789d1ef9ff88c27f610e9904bdbc27fbe10e5b | 3,655,890 |
import typing
from typing import Union
def cvt_dotted_as_name(node: pytree.Base, ctx: Ctx) -> ast_cooked.Base:
"""dotted_as_name: dotted_name ['as' NAME]"""
assert ctx.is_REF, [node]
dotted_name = xcast(ast_cooked.DottedNameNode, cvt(node.children[0], ctx.to_BARE()))
if len(node.children) == 1:
# `import os.path` creates a binding for `os`.
return ast_cooked.ImportDottedAsNameNode(dotted_name=dotted_name, as_name=None)
else:
as_name = typing.cast(Union[ast_cooked.NameBindsNode, ast_cooked.NameBindsGlobalNode],
cvt(node.children[2], ctx.to_BINDING()))
assert isinstance(
as_name,
(ast_cooked.NameBindsNode, ast_cooked.NameBindsGlobalNode)) # TODO: delete
return ast_cooked.ImportDottedAsNameNode(dotted_name=dotted_name, as_name=as_name) | 5edf3f7c17db85ca43dcf263e30b127fba7f37fc | 3,655,891 |
import os
def train(
model,
data,
epochs=10,
batch_size=100,
lr=0.001,
lr_decay_mul=0.9,
lam_recon=0.392,
save_dir=None,
weights_save_dir=None,
save_freq=100,
):
"""Train a given Capsule Network model.
Args:
model: The CapsuleNet model to train.
data: The dataset that you want to train: ((x_train, y_train), (x_test, y_test)).
epochs: Number of epochs for the training.
batch_size: Size of the batch used for the training.
lr: Initial learning rate value.
lr_decay_mul: The value multiplied by lr at each epoch. Set a larger value for larger epochs.
lam_recon: The coefficient for the loss of decoder (if present).
save_dir: Directory that will contain the logs of the training. `None` if you don't want to save the logs.
weights_save_dir: Directory that will contain the weights saved. `None` if you don't want to save the weights.
save_freq: The number of batches after which weights are saved.
Returns:
The trained model.
"""
# Unpack data
(x_train, y_train), (x_test, y_test) = data
# Understand if the model uses the decoder or not
n_output = len(model.outputs)
# Compile the model
model.compile(
optimizer=optimizers.Adam(lr=lr),
loss=[margin_loss, "mse"] if n_output == 2 else [margin_loss],
loss_weights=[1.0, lam_recon] if n_output == 2 else [1.0],
metrics=["accuracy"],
)
# Define a callback to reduce learning rate
cbacks = [
callbacks.LearningRateScheduler(
schedule=lambda epoch: lr * (lr_decay_mul ** epoch)
)
]
# Define a callback to save training datas
if save_dir:
cbacks.append(callbacks.CSVLogger(os.path.join(save_dir, "training.csv")))
# Define a callback to save weights during the training
if weights_save_dir:
cbacks.append(WeightsSaver(weights_save_dir, save_freq))
# Simple training without data augmentation
model.fit(
x=(x_train, y_train) if n_output == 2 else x_train,
y=(y_train, x_train) if n_output == 2 else y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=((x_test, y_test), (y_test, x_test))
if n_output == 2
else (x_test, y_test),
callbacks=cbacks,
)
# Save final weights at the end of the training
if weights_save_dir:
model.save_weights(os.path.join(weights_save_dir, "trained.h5"))
return model | 4cb8b8555c3fd1c62d2cf5741471cbf757029054 | 3,655,892 |
def check_if_recipe_skippable(recipe, channels, repodata_dict, actualname_to_idname):
"""
check_if_recipe_skippable
=========================
Method used to check if a recipe should be skipped or not.
Skip criteria include:
- If the version of the recipe in the channel repodata is greater than or equal to the query recipe.
- If the query recipe's version and build are equal to or less than the recipe in the repodata
Non-Skip Citeria include:
- Opposite of skip criteria
- If the recipe is not in any channel
Parameters:
-----------
1) recipe: (str) The directory path to the query recipe
2) channels: (list) A list of channels to check against
3) repodata_dict: (dict) A dictionary of repodata by channel (From get_repodata() method)
4) actualname_to_idname: (dict) Dict of recipe names as keys as id names in the repodata_dict as keys. (From get_repodata() method)
Returns:
++++++++
- Return True if recipe building is skippable
- Return False if recipe building cannot be skipped
"""
platform, metas = load_platform_metas(recipe, finalize=False)
# The recipe likely defined skip: True
if not metas:
return True
## Get each packages name, version, and build number
packages = set(
(meta.name(), float(meta.version()), float(meta.build_number() or 0))
for meta in metas
)
for name, version, build_num in packages:
present = False
for c in channels:
## Check for the recipe in one of the channel's repodata
if name in actualname_to_idname[c].keys():
## Find the newest/highest versioned and build package
present = True
cur_version = -1.0
cur_build = -1.0
for pkg_tar in actualname_to_idname[c][name]:
repo_version = float(repodata_dict[c][pkg_tar]["version"])
repo_build_number = float(repodata_dict[c][pkg_tar]["build_number"])
## If version is greater than the previous version, reset values with this package
if repo_version > cur_version:
cur_version = repo_version
cur_build = repo_build_number
## If version is the same but the build number is greater, reset values with this package
elif version == cur_version and repo_build_number > cur_build:
cur_build = repo_build_number
## Check if the query package is newer then what is repoted in the repodata or not
## If the query package's version is greater than the best in the repodata, update recipe
if cur_version < version:
return False
## If the query package's is the same version but the build number is greater than the best in the repodata, update recipe
elif cur_version == version and cur_build < build_num:
return False
## If package not already in the repodata
if not present:
return False
print(
":ggd:build recipes: FILTER: not building recipe {} because the version and/or build number match what is already in the channel and not forced".format(
recipe
)
)
return True | 604fdcf86ec45826f53fd837d165b234e9d11d91 | 3,655,893 |
import types
def ensure_csv_detections_file(
folder: types.GirderModel, detection_item: Item, user: types.GirderUserModel
) -> types.GirderModel:
"""
Ensures that the detection item has a file which is a csv.
Attach the newly created .csv to the existing detection_item.
:returns: the file document.
TODO: move this to the training job code instead of keeping it
in the request thread
"""
filename, gen = crud.get_annotation_csv_generator(folder, user, excludeBelowThreshold=True)
filename = slugify(filename)
csv_bytes = ("".join([line for line in gen()])).encode()
new_file = File().createFile(
user,
detection_item,
filename,
len(csv_bytes),
Assetstore().getCurrent(),
reuseExisting=True,
)
upload = Upload().createUploadToFile(new_file, user, len(csv_bytes))
new_file = Upload().handleChunk(upload, csv_bytes)
return new_file | 1b9b9a3dff8eedd51193db023b71948118e0fd79 | 3,655,894 |
def hello(name=None):
"""Assuming that name is a String and it checks for user typos to return a name with a first capital letter (Xxxx).
Args:
name (str): A persons name.
Returns:
str: "Hello, Name!" to a given name, or says Hello, World! if name is not given (or passed as an empty String).
"""
return "Hello, World!" if name is None or not name else "Hello, {}!".format(name.title()) | f1aafbebd49507fd5417d8752f98ae7d0af8ec33 | 3,655,895 |
def computePCA(inputMatrix, n_components=None):
"""Compute Principle Component Analysis (PCA) on feature space. n_components specifies the number of dimensions in the transformed basis to keep."""
pca_ = PCA(n_components)
pca_.fit(inputMatrix)
return pca_ | 4061f998bfca9ed294b312ae746a63ea0eef8438 | 3,655,896 |
def tag(repo, subset, x):
"""The specified tag by name, or all tagged revisions if no name is given.
Pattern matching is supported for `name`. See
:hg:`help revisions.patterns`.
"""
# i18n: "tag" is a keyword
args = getargs(x, 0, 1, _("tag takes one or no arguments"))
cl = repo.changelog
if args:
pattern = getstring(args[0],
# i18n: "tag" is a keyword
_('the argument to tag must be a string'))
kind, pattern, matcher = stringutil.stringmatcher(pattern)
if kind == 'literal':
# avoid resolving all tags
tn = repo._tagscache.tags.get(pattern, None)
if tn is None:
raise error.RepoLookupError(_("tag '%s' does not exist")
% pattern)
s = {repo[tn].rev()}
else:
s = {cl.rev(n) for t, n in repo.tagslist() if matcher(t)}
else:
s = {cl.rev(n) for t, n in repo.tagslist() if t != 'tip'}
return subset & s | d4ceadb7ef03ae6ed950c60c7bbf06b4d26f8671 | 3,655,897 |
def _embed_json(service, targetid):
"""
Returns oEmbed JSON for a given URL and service
"""
return d.http_get(_OEMBED_MAP[service] % (urlquote(targetid),)).json() | 347d38e2b4f69c853e8085308e334b7cc778d4ad | 3,655,898 |
import re
def is_blank(s):
"""Returns True if string contains only space characters."""
return re.search(reNonSpace, s) is None | 40b4ec62a2882d100b80fd951c6b9e4d31220581 | 3,655,899 |
Subsets and Splits