content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return dict(form=form) | 4bed46f095b31a61746c382460b6f477a4aa215e | 3,654,900 |
def countingsort(A):
"""
Sort the list A. A has to be a list of integers.
Every element of the list A has to be non-negative.
@param A: the list that should get sorted
@return the sorted list
"""
if len(A) == 0:
return []
C = [0] * (max(A)+1)
B = [""] * len(A)
# Count the number of elements
for el in A:
C[el] += 1
# Now C[i] contains how often i is in A
for index in xrange(1, len(C)):
C[index] += C[index-1]
for el in A[::-1]:
B[C[el]-1] = el
C[el] -= 1
return B | ebdaac4580f910873f77878978b57e193334a4ea | 3,654,901 |
import math
def calc_obstacle_map(ox, oy, resolution, vr):
"""
Build obstacle map according to the distance of a
certain grid to obstacles. Treat the area near the
obstacle within the turning radius of the vehicle
as the obstacle blocking area and mark it as TRUE.
"""
min_x = round(min(ox))
min_y = round(min(oy))
max_x = round(max(ox))
max_y = round(max(oy))
x_width = round(max_x - min_x)
y_width = round(max_y - min_y)
# obstacle map generation
obstacle_map = [[False for _ in range(y_width)] for _ in range(x_width)]
for ix in range(x_width):
x = ix + min_x
for iy in range(y_width):
y = iy + min_y
# print(x, y)
for iox, ioy in zip(ox, oy):
d = math.sqrt((iox - x)**2 + (ioy - y)**2)
if d * resolution <= vr:
obstacle_map[ix][iy] = True
break
return obstacle_map, min_x, min_y, max_x, max_y, x_width, y_width | 87d44c5eb799bf3b2ea64ac0717b8d7f260a4a37 | 3,654,902 |
import itertools
def dilate(poly,eps):
"""
The function dilates a polytope.
For a given polytope a polytopic over apoproximation of the $eps$-dilated set is computed.
An e-dilated Pe set of P is defined as:
Pe = {x+n|x in P ^ n in Ball(e)}
where Ball(e) is the epsilon neighborhood with norm |n|<e
The current implementation is quite crude, hyper-boxes are placed over the original vertices
and the returned polytope is a qhull of these new vertices.
:param poly: original polytope
:param eps: positive scalar value with which the polytope is dilated
:return: polytope
"""
if isinstance(poly,polytope.Region):
dil_reg = []
for pol in poly.list_poly:
assert isinstance(pol,polytope.Polytope)
dil_reg += [dilate(pol, eps)]
return polytope.Region(dil_reg)
vertices = extreme(poly)
dim = len(vertices[0]) # this is the dimensionality of the space
dil_eps = dim * [[-eps,eps]]
dil_eps_v = [np.array(n) for n in itertools.product(*dil_eps)] # vectors with (+- eps,+- eps, +- eps,...)
new_vertices = []
for v,d in itertools.product(vertices,dil_eps_v):
new_vertices += [[np.array(v).flatten() + np.array(d).flatten()]]
# make box
# print("add vertices part:", np.array(v).flatten() + np.array(d).flatten())
VV = np.concatenate(new_vertices)
# print("V", VV)
return qhull(VV) | 0ae4d8ea9cb6977939e4d3bed6454ed55e8855cf | 3,654,903 |
def read_files_to_vardf(map_df, df_dict, gridclimname, dataset, metadata,
file_start_date, file_end_date, file_delimiter,
file_time_step, file_colnames,
subset_start_date, subset_end_date, min_elev, max_elev, variable_list=None):
"""
# reads in the files to generate variables dataframes
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
df_dict: (dict) an existing dictionary where new computations will be stored
gridclimname: (str) the suffix for the dataset to be named; if None provided, default to dataset name
dataset: (str) the name of the dataset catalogged into map_df
metadata: (str) the dictionary that contains the metadata explanations; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_delimiter: (str) a file parsing character to be used for file reading
file_time_step: (str) the timedelta code for the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
min_elev: (float) minimum elevation permitted
max_elev: (float) maximum elevation permitted
"""
# start time
starttime = pd.datetime.now()
# date range from ogh_meta file
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step)
met_daily_subdates=pd.date_range(subset_start_date, subset_end_date, freq=file_time_step)
# omit null entries or missing data file
map_df = map_df.loc[pd.notnull(map_df[dataset]), :]
print('Number of data files within elevation range ({0}-{1} m): {2}'.format(min_elev, max_elev, len(map_df)))
# establish default list of variables
if isinstance(variable_list, type(None)):
variable_list = metadata[dataset]['variable_list']
# iterate through each data file
for eachvar in metadata[dataset]['variable_list']:
# exclude YEAR, MONTH, and DAY
if eachvar not in ['YEAR', 'MONTH', 'DAY'] and eachvar in variable_list:
# identify the variable column index
usecols = [metadata[dataset]['variable_list'].index(eachvar)]
# initiate df as a list
df_list=[]
# loop through each file
for ind, row in map_df.iterrows():
# consider rewriting the params to just select one column by index at a time
var_series = dask.delayed(pd.read_table)(filepath_or_buffer=row[dataset],
delimiter=file_delimiter,header=None,usecols=usecols,
names=[tuple(row[['FID', 'LAT', 'LONG_']])])
# append the series into the list of series
df_list.append(var_series)
# concatenate list of series (axis=1 is column-wise) into a dataframe
df1 = dask.delayed(pd.concat)(df_list, axis=1)
# set and subset date_range index
df2 = df1.set_index(met_daily_dates, inplace=False).loc[met_daily_subdates]
# assign dataframe to dictionary object
df_dict['_'.join([eachvar, gridclimname])] = dask.compute(df2)[0]
print(eachvar+ ' dataframe reading complete:' + str(pd.datetime.now()-starttime))
return(df_dict) | 31bc460eb0035d3bbd51f266c96a53f537495a53 | 3,654,904 |
import pickle
def read_file(pickle_file_name):
"""Reads composite or non-composite class-activation maps from Pickle file.
:param pickle_file_name: Path to input file (created by
`write_standard_file` or `write_pmm_file`).
:return: gradcam_dict: Has the following keys if not a composite...
gradcam_dict['denorm_predictor_matrices']: See doc for
`write_standard_file`.
gradcam_dict['cam_matrices']: Same.
gradcam_dict['guided_cam_matrices']: Same.
gradcam_dict['full_storm_id_strings']: Same.
gradcam_dict['storm_times_unix_sec']: Same.
gradcam_dict['model_file_name']: Same.
gradcam_dict['target_class']: Same.
gradcam_dict['target_layer_name']: Same.
gradcam_dict['sounding_pressure_matrix_pa']: Same.
...or the following keys if composite...
gradcam_dict['mean_denorm_predictor_matrices']: See doc for
`write_pmm_file`.
gradcam_dict['mean_cam_matrices']: Same.
gradcam_dict['mean_guided_cam_matrices']: Same.
gradcam_dict['model_file_name']: Same.
gradcam_dict['non_pmm_file_name']: Same.
gradcam_dict['pmm_max_percentile_level']: Same.
gradcam_dict['mean_sounding_pressures_pa']: Same.
:return: pmm_flag: Boolean flag. True if `gradcam_dict` contains
composite, False otherwise.
:raises: ValueError: if dictionary does not contain expected keys.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
gradcam_dict = pickle.load(pickle_file_handle)
pickle_file_handle.close()
pmm_flag = MEAN_PREDICTOR_MATRICES_KEY in gradcam_dict
if pmm_flag:
missing_keys = list(
set(PMM_FILE_KEYS) - set(gradcam_dict.keys())
)
else:
missing_keys = list(
set(STANDARD_FILE_KEYS) - set(gradcam_dict.keys())
)
if len(missing_keys) == 0:
return gradcam_dict, pmm_flag
error_string = (
'\n{0:s}\nKeys listed above were expected, but not found, in file '
'"{1:s}".'
).format(str(missing_keys), pickle_file_name)
raise ValueError(error_string) | 3f2f7fb1a5a904f494e64f840f6a8d6ae207c900 | 3,654,905 |
import string
def tacodev(val=None):
"""a valid taco device"""
if val in ('', None):
return ''
val = string(val)
if not tacodev_re.match(val):
raise ValueError('%r is not a valid Taco device name' % val)
return val | 4cffd52f9e7673ad45e697aadfbb3515ecd3d209 | 3,654,906 |
import torch
import os
def VioNet_densenet(config, home_path):
"""
Load DENSENET model
config.device
config.pretrained_model
config.sample_size
config.sample_duration
"""
device = config.device
ft_begin_idx = config.ft_begin_idx
sample_size = config.sample_size[0]
sample_duration = config.sample_duration
model = densenet121(num_classes=2,
sample_size=sample_size,
sample_duration=sample_duration).to(device)
# state_dict = torch.load(g_path +'/VioNet/'+ 'weights/DenseNet_Kinetics.pth')
state_dict = torch.load(os.path.join(home_path, VIONET_WEIGHTS, 'DenseNet_Kinetics.pth'))
model.load_state_dict(state_dict)
params = dn.get_fine_tuning_params(model, ft_begin_idx)
return model, params | bdaa386b68d41190b4bf711ece571df14bb8012d | 3,654,907 |
def decode_layout_example(example, input_range=None):
"""Given an instance and raw labels, creates <inputs, label> pair.
Decoding includes.
1. Converting images from uint8 [0, 255] to [0, 1.] float32.
2. Mean subtraction and standardization using hard-coded mean and std.
3. Convert boxes from yxyx [0-1] to xyxy un-normalized.
4. Add 1 to all labels to account for background/padding object at label 0.
5. Shuffling dictionary keys to be consistent with the rest of the code.
Args:
example: dict; Input image and raw labels.
input_range: tuple; Range of input. By default we use Mean and StdDev
normalization.
Returns:
A dictionary of {'inputs': input image, 'labels': task label}.
"""
image = tf.image.convert_image_dtype(example['image'], dtype=tf.float32)
# Normalize.
if input_range:
image = image * (input_range[1] - input_range[0]) + input_range[0]
else:
mean_rgb = tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=tf.float32)
std_rgb = tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=tf.float32)
image = (image - mean_rgb) / std_rgb
boxes = example['objects']['boxes']
target = {
'boxes': boxes,
'labels': example['objects']['label'] + 1, # 0'th class is padding.
'binary_labels': example['objects']['binary_label'] + 1,
'desc_id': example['objects']['desc_id'],
'resource_id': example['objects']['resource_id'],
'name_id': example['objects']['name_id'],
'obj_mask': example['objects']['obj_mask'],
}
# Filters objects to exclude degenerate boxes.
valid_bbx = tf.logical_and(boxes[:, 2] > boxes[:, 0],
boxes[:, 3] > boxes[:, 1])
# -1 is ROOT node, remove it for training & eval.
valid_node = tf.greater(example['objects']['label'], -1)
keep = tf.where(tf.logical_and(valid_bbx, valid_node))[:, 0]
target_kept = {k: tf.gather(v, keep) for k, v in target.items()}
target_kept['orig_size'] = tf.cast(tf.shape(image)[0:2], dtype=tf.int32)
target_kept['size'] = tf.identity(target_kept['orig_size'])
return {
'inputs': image,
'label': target_kept,
} | a54b26a8b4d82a6a9e5bc093f9f59b7a74450916 | 3,654,908 |
import plotly.figure_factory as ff
def bact_plot(samples, bacteroidetes, healthiest_sample):
"""
Returns a graph of the distribution of the data in a graph
==========
samples : pandas.DataFrame
The sample data frame. Must contain column `Bacteroidetes` and
`Firmicutes` that contain the percentage of those phyla.
Returns
=======
plotly graph
"""
hist_data = [samples["Bacteroidetes"]]
group_labels = ["Bacteroidetes"]
bact = ff.create_distplot(hist_data, group_labels, show_hist=False)
bact["layout"].update(title="Bacteroidetes Sample Distribution ")
bact["layout"].update(
showlegend=False,
annotations=[
dict(
x=bacteroidetes,
y=0,
xref="x",
yref="y",
text="You are here!",
showarrow=True,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor="#0e0f36",
ax=70,
ay=-30,
bordercolor="#06a300",
borderwidth=2,
borderpad=4,
bgcolor="#69f564",
opacity=0.8,
),
dict(
x=healthiest_sample["Bacteroidetes"],
y=0,
xref="x",
yref="y",
text="Healthiest Sample",
showarrow=True,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor="#0e0f36",
ax=70,
ay=30,
bordercolor="#4c0acf",
borderwidth=2,
borderpad=4,
bgcolor="#b977f2",
opacity=0.8,
),
],
)
return bact | d21bb3bd534f92cc6eed3bb467fe355abcf1afd2 | 3,654,909 |
import os
def dynamic_upload_to(instance, filename):
"""
根据链接类型,决定存储的目录
"""
file_dir = (LogoImgRelatedDirEnum.APP.value
if instance.link_type == LinkTypeEnum.LIGHT_APP.value else LogoImgRelatedDirEnum.ICON.value)
return os.path.join(file_dir, filename) | ce8ca6083a8014d7613b7ffa09a1c0c8f0cb0323 | 3,654,910 |
def xdraw_lines(lines, **kwargs):
"""Draw lines and optionally set individual name, color, arrow, layer, and
width properties.
"""
guids = []
for l in iter(lines):
sp = l['start']
ep = l['end']
name = l.get('name', '')
color = l.get('color')
arrow = l.get('arrow')
layer = l.get('layer')
width = l.get('width')
guid = add_line(Point3d(*sp), Point3d(*ep))
if not guid:
continue
obj = find_object(guid)
if not obj:
continue
attr = obj.Attributes
if color:
attr.ObjectColor = FromArgb(*color)
attr.ColorSource = ColorFromObject
else:
attr.ColorSource = ColorFromLayer
if arrow == 'end':
attr.ObjectDecoration = EndArrowhead
if arrow == 'start':
attr.ObjectDecoration = StartArrowhead
if layer and find_layer_by_fullpath:
index = find_layer_by_fullpath(layer, True)
if index >= 0:
attr.LayerIndex = index
if width:
attr.PlotWeight = width
attr.PlotWeightSource = PlotWeightFromObject
attr.Name = name
obj.CommitChanges()
guids.append(guid)
return guids | bebeb2d400ed8c779281b67f01007e953f15460f | 3,654,911 |
def _emit_params_file_action(ctx, path, mnemonic, cmds):
"""Helper function that writes a potentially long command list to a file.
Args:
ctx (struct): The ctx object.
path (string): the file path where the params file should be written.
mnemonic (string): the action mnemomic.
cmds (list<string>): the command list.
Returns:
(File): an executable file that runs the command set.
"""
filename = "%s.%sFile.params" % (path, mnemonic)
f = ctx.new_file(ctx.configuration.bin_dir, filename)
ctx.file_action(output = f,
content = "\n".join(["set -e"] + cmds),
executable = True)
return f | adafb75e24b2023ad2926e4248e8b2e1e6966b8e | 3,654,912 |
import gettext
def annotate_validation_results(results, parsed_data):
"""Annotate validation results with potential add-on restrictions like
denied origins."""
if waffle.switch_is_active('record-install-origins'):
denied_origins = sorted(
DeniedInstallOrigin.find_denied_origins(parsed_data['install_origins'])
)
for origin in denied_origins:
insert_validation_message(
results,
message=gettext(
'The install origin {origin} is not permitted.'.format(
origin=origin
)
),
)
return results | 659ec92f98c2678de2ee8f2552da77c5394047c5 | 3,654,913 |
import sys
import subprocess
import threading
from io import StringIO
import time
def RunCommand(command,
input=None,
pollFn=None,
outStream=None,
errStream=None,
killOnEarlyReturn=True,
verbose=False,
debug=False,
printErrorInfo=False):
"""
Run a command, with optional input and polling function.
Args:
command: list of the command and its arguments.
input: optional string of input to feed to the command, it should be
short enough to fit in an i/o pipe buffer.
pollFn: if present will be called occasionally to check if the command
should be finished early. If pollFn() returns true then the command
will finish early.
outStream: if present, the stdout output of the command will be written to
outStream.
errStream: if present, the stderr output of the command will be written to
errStream.
killOnEarlyReturn: if true and pollFn returns true, then the subprocess will
be killed, otherwise the subprocess will be detached.
verbose: if true, the command is echoed to stderr.
debug: if true, prints debugging information to stderr.
printErrorInfo: if true, prints error information when the subprocess
returns a non-zero exit code.
Returns: the output of the subprocess.
Exceptions:
Raises Error if the subprocess returns an error code.
Raises ValueError if called with invalid arguments.
"""
if verbose:
sys.stderr.write("command %s\n" % command)
stdin = None
if input:
stdin = subprocess.PIPE
try:
process = subprocess.Popen(
args=command,
stdin=stdin,
bufsize=1,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except OSError as e:
if not isinstance(command, basestring):
command = ' '.join(command)
if printErrorInfo:
sys.stderr.write("Command failed: '%s'\n" % command)
raise Error(e)
def StartThread(out):
queue = Queue.Queue()
def EnqueueOutput(out, queue):
for line in iter(out.readline, b''):
queue.put(line)
out.close()
thread = threading.Thread(target=EnqueueOutput, args=(out, queue))
thread.daemon = True
thread.start()
return queue
outQueue = StartThread(process.stdout)
errQueue = StartThread(process.stderr)
def ReadQueue(queue, out, out2):
try:
while True:
line = queue.get(False)
out.write(line)
if out2 != None:
out2.write(line)
except Queue.Empty:
pass
outBuf = StringIO.StringIO()
errorBuf = StringIO.StringIO()
if input:
process.stdin.write(input)
while True:
returncode = process.poll()
if returncode != None:
break
ReadQueue(errQueue, errorBuf, errStream)
ReadQueue(outQueue, outBuf, outStream)
if pollFn != None and pollFn():
returncode = 0
if killOnEarlyReturn:
process.kill()
break
time.sleep(0.1)
# Drain queue
ReadQueue(errQueue, errorBuf, errStream)
ReadQueue(outQueue, outBuf, outStream)
out = outBuf.getvalue()
error = errorBuf.getvalue()
if returncode:
if not isinstance(command, basestring):
command = ' '.join(command)
if printErrorInfo:
sys.stderr.write("Command failed: '%s'\n" % command)
sys.stderr.write(" stdout: '%s'\n" % out)
sys.stderr.write(" stderr: '%s'\n" % error)
sys.stderr.write(" returncode: %d\n" % returncode)
raise Error("Command failed: %s" % command)
if debug:
sys.stderr.write("output: %s\n" % out)
return out | 4ad8a94f6079c33bfbd64e4d975b8c50317e03e9 | 3,654,914 |
import textwrap
def ignore_firstline_dedent(text: str) -> str:
"""Like textwrap.dedent(), but ignore first empty lines
Args:
text: The text the be dedented
Returns:
The dedented text
"""
out = []
started = False
for line in text.splitlines():
if not started and not line.strip():
continue
if not started:
started = True
out.append(line)
return textwrap.dedent("\n".join(out)) | 04bde49e72e07552f2f88e9112546d00b85a2879 | 3,654,915 |
def read_file(filename):
"""
Read a file and return its binary content. \n
@param filename : filename as string. \n
@return data as bytes
"""
with open(filename, mode='rb') as file:
file_content = file.read()
return file_content | 2417aa5cfa0d43303f9f6103e8b1fee9e8d652e2 | 3,654,916 |
def getdictkeys(value):
"""
Returns the ordered keys of a dict
"""
if type(value) == dict:
keys = list(value.keys())
keys.sort(key=toint)
return keys
return [] | adf49dbfa46f5174aa1435756c6e099b08b7c6c9 | 3,654,917 |
def exp_lr_scheduler(optimizer, epoch, init_lr=5e-3, lr_decay_epoch=40):
"""Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (0.1**(epoch // lr_decay_epoch))
if epoch % lr_decay_epoch == 0:
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer | 520a7960ee589e033920cf182d75ea896cc8b8b7 | 3,654,918 |
from datetime import datetime
import os
def _check_draw_graph(graph_file):
"""Check whether the specified graph file should be redrawn.
Currently we use the following heuristic: (1) if graph is older than N
minutes, redraw it; (2) if admin has active session(s), redraw on every
cron run (we detect this by ajax active timestamp).
We could also redraw if an interesting parameter has changed (user or s2s
count, license limits, timezones, etc). But because these entail RRD accesses,
we just use the simpler heuristic above.
"""
now = datetime.datetime.utcnow()
# consider ajax "off-line" if timestamp older than this (or negative)
ajax_limit = datetime.timedelta(0, 5*60, 0) # XXX: constants?
# redraw graphs if graph age below zero or over limit below
graph_zero = datetime.timedelta(0, 0, 0)
graph_maxage = datetime.timedelta(0, 15*60, 0) # XXX: constants?
if helpers.check_marker_file(constants.WEBUI_ADMIN_ACTIVE_TIMESTAMP):
dt = helpers.read_datetime_marker_file(constants.WEBUI_ADMIN_ACTIVE_TIMESTAMP)
diff = now - dt
if diff < ajax_limit:
# ajax active, draw
_log.info('ajax active, redraw graph %s' % graph_file)
return True
else:
# fall through, check graph file
pass
if os.path.exists(graph_file):
mtime = datetime.datetime.utcfromtimestamp(os.stat(graph_file).st_mtime)
diff = now - mtime
if (diff < graph_zero) or (diff > graph_maxage):
# bogus or too old, redraw
_log.info('graph too old, redraw graph %s' % graph_file)
return True
else:
_log.info('graph not too old, skipping redraw for %s' % graph_file)
return False
# no graph file, redraw always
_log.info('graph does not exist, redraw graph %s' % graph_file)
return True | 8a1b48f3e38c4cd6e52a6515e11088cb1c35eb0f | 3,654,919 |
def random_shadow(image):
"""
Function to add shadow in images randomly at random places, Random shadows meant to make the Convolution model learn
Lanes and lane curvature patterns effectively in dissimilar places.
"""
if np.random.rand() < 0.5:
# (x1, y1) and (x2, y2) forms a line
# xm, ym gives all the locations of the image
x1, y1 = image.shape[1] * np.random.rand(), 0
x2, y2 = image.shape[1] * np.random.rand(), image.shape[0]
xm, ym = np.mgrid[0:image.shape[0], 0:image.shape[1]]
mask = np.zeros_like(image[:, :, 1])
mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1
# choose which side should have shadow and adjust saturation
cond = mask == np.random.randint(2)
s_ratio = np.random.uniform(low=0.2, high=0.5)
# adjust Saturation in HLS(Hue, Light, Saturation)
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio
return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)
else:
return image | 118fbffa04bbd3551eff3f4298ba14235b00b7c3 | 3,654,920 |
def build_channel_header(type, tx_id, channel_id,
timestamp, epoch=0, extension=None,
tls_cert_hash=None):
"""Build channel header.
Args:
type (common_pb2.HeaderType): type
tx_id (str): transaction id
channel_id (str): channel id
timestamp (grpc.timestamp): timestamp
epoch (int): epoch
extension: extension
Returns:
common_proto.Header instance
"""
channel_header = common_pb2.ChannelHeader()
channel_header.type = type
channel_header.version = 1
channel_header.channel_id = proto_str(channel_id)
channel_header.tx_id = proto_str(tx_id)
channel_header.epoch = epoch
channel_header.timestamp.CopyFrom(timestamp)
if tls_cert_hash:
channel_header.tls_cert_hash = tls_cert_hash
if extension:
channel_header.extension = extension
return channel_header | cfd7524de77a61fe75d3b3be58e2ebde4d743393 | 3,654,921 |
def get_character(data, index):
"""Return one byte from data as a signed char.
Args:
data (list): raw data from sensor
index (int): index entry from which to read data
Returns:
int: extracted signed char value
"""
result = data[index]
if result > 127:
result -= 256
return result | 5a08102cb9dc8ae7e2adcab9b5653b77ee2c6ae3 | 3,654,922 |
def df_to_embed(df, img_folder):
""" Extract image embeddings, sentence embeddings and concatenated embeddings from dataset and image folders
:param df: dataset file to use
:param img_folder: folder where the corresponding images are stored
:return: tuple containing sentence embeddings, image embeddings, concatenated embeddings
"""
sent_embed = extract_all_sentences(df)
img_embed = extract_all_images("xception", img_folder)
concat = np.concatenate((sent_embed, img_embed), axis=1)
return sent_embed, img_embed, concat | cda55f06a74c1b0475bc6a9e35e657b4f3ce0392 | 3,654,923 |
from sys import path
import pickle
def load_release_data():
"""
Load the release data. This always prints a warning if the release data
contains any release data.
:return:
"""
filen = path.join(PATH_ROOT, PATH_RELEASE_DATA)
try:
with open(filen, "r") as in_file:
data = pickle.load(in_file)
if data:
print_warning("You are continuing an existing release. If this "
"an error, delete the release data file and try "
"again. "
"Filename = %s" % filen)
return data
except:
return {} | 9ccc7d95638f2af8702065d58efd52016da27dc7 | 3,654,924 |
import random
def generate_player_attributes():
"""
Return a list of 53 dicts with player attributes
that map to Player model fields.
"""
# Get player position distribution
position_dist = get_position_distribution()
# Get player attribute distribution
attr_dist = get_attribute_distribution()
# Get player names from CSV
player_names = read_player_names_from_csv()
player_list = []
# Generate 53 players per team
for roster_spot in range(0, 53):
player = {}
# Set player names from parsed CSV data
player['first_name'] = player_names[roster_spot][0]
player['last_name'] = player_names[roster_spot][1]
# Only assign player a position that isn't filled on the roster
for pos, dist in position_dist.items():
if dist[0] < dist[1]:
player['position'] = pos
# Pick a random prototype based on position
player['prototype'] = random.choice(list(attr_dist[pos]))
dist[0] += 1
break
else:
continue
# Assign player ages based on normal distribution
player['age'] = int(random.gauss(1, 0.1) * random.randint(25, 35))
default_rookie_age = 22
player['experience'] = player['age'] - default_rookie_age
if player['age'] < 22:
player['experience'] = 0
# Generate ratings based on weights and normal distribution
base_rating = int(random.gauss(70, 20))
position, prototype = player['position'], player['prototype']
pos_weights = attr_dist[position][prototype]
# Apply position and prototype weights
after_pos_weights = []
for pw in range(len(pos_weights)):
after_pos_weights.append(pos_weights[pw] + base_rating)
# Sigmas for standard deviation
sigmas = [20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
final_ratings = list(map(random.gauss, after_pos_weights, sigmas))
i = 0
calc_overall = []
# Assign final ratings to player key
for attribute in ('potential', 'confidence', 'iq',
'speed', 'strength', 'agility',
'awareness', 'stamina', 'injury',
'run_off', 'pass_off', 'special_off',
'run_def', 'pass_def', 'special_def'):
rating = int(final_ratings[i])
if rating > 99:
rating = 99
elif rating < 0:
rating = 0
player[attribute] = rating
calc_overall.append(rating)
i += 1
# Calculate overall rating and add player to list
player['overall_rating'] = int(sum(calc_overall) / len(calc_overall))
player_list.append(player)
return player_list | 57c16d998348b9db1384dc98412bd69e62d0c73d | 3,654,925 |
def colour_from_loadings(loadings, maxLoading=None, baseColor="#FF0000"):
"""Computes colors given loading values.
Given an array of loading values (loadings), returns an array of
colors that graphviz can understand that can be used to colour the
nodes. The node with the greatest loading uses baseColor, and a node
with zero loading uses white (#FFFFFF).
This is achieved through clever sneaky use of the alpha channel."""
if maxLoading is None:
maxLoading = max(loadings)
return [baseColor + hex(int(loading / maxLoading * 255))[2:]
for loading in loadings] | 8bd65e5b4aa54558d3710a8518bbbe6400559046 | 3,654,926 |
def determineDocument(pdf):
""" Scans the pdf document for certain text lines and determines the type of investment vehicle traded"""
if 'turbop' in pdf or 'turboc' in pdf:
return 'certificate'
elif 'minil' in pdf:
return 'certificate'
elif 'call' in pdf or 'put' in pdf:
return 'warrant'
else:
return 'stock' | e6c5adc10168321fd6a534dd8e9fbf2e8ccb1615 | 3,654,927 |
import argparse
def setup_parser() -> argparse.ArgumentParser:
"""Set default values and handle arg parser"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="wlanpi-core provides backend services for the WLAN Pi. Read the manual with: man wlanpi-core",
)
parser.add_argument(
"--reload", dest="livereload", action="store_true", default=False
)
parser.add_argument(
"--version", "-V", "-v", action="version", version=f"{__version__}"
)
return parser | 7144ee5d790fbf703e0809d43122e1e94cd68378 | 3,654,928 |
from typing import Any
import pickle
def deserialize_api_types(class_name: str, d: dict) -> Any:
"""
Deserializes an API type. Allowed classes are defined in:
* :mod:`maestral.core`
* :mod:`maestral.model`
* :mod:`maestral.exceptions`
:param class_name: Name of class to deserialize.
:param d: Dictionary of serialized class.
:returns: Deserialized object.
"""
bytes_message = serpent.tobytes(d["object"])
check_signature(d["signature"], bytes_message)
return pickle.loads(bytes_message) | f9c3962a1c18bd6dfb385af37e90b5062d1e0eef | 3,654,929 |
from click.testing import CliRunner
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return testing.CliRunner() | 39f241b8192a3c06750e850c8c953822e4db5634 | 3,654,930 |
import torch
import os
def cal_quant_model_accuracy(model, gpu_index, val_loader, args, config_file, record_file):
"""Save the quantized model and infer the accuracy of the quantized model."""
torch.save({'state_dict': model.state_dict()}, os.path.join(TMP, 'model_best.pth.tar'))
print('==> AMCT step3: save_quant_retrain_model..')
quantized_pb_path = os.path.join(OUTPUTS, 'ResNet101')
amct.save_quant_retrain_model(
config_file, model, record_file, quantized_pb_path, get_input_data([(1, 3, SIZE, SIZE)], model),
input_names=['input'], output_names=['output'],
dynamic_axes={'input': {0: 'batch_size'}, 'output': {0: 'batch_size'}})
print("=> validating fake quant model")
quant_top1, quant_top5 = validate_onnx(
val_loader, ''.join([quantized_pb_path, '_fake_quant_model.onnx']), args.print_freq)
return quant_top1, quant_top5 | e3bbc5e26a6824cd7a9d494348441a611a9f6b9d | 3,654,931 |
import math
def give_color_to_direction_dynamic(dir):
"""
Assigns a color to the direction (dynamic-defined colors)
Parameters
--------------
dir
Direction
Returns
--------------
col
Color
"""
dir = 0.5 + 0.5 * dir
norm = mpl.colors.Normalize(vmin=0, vmax=1)
nodes = [0.0, 0.01, 0.25, 0.4, 0.45, 0.55, 0.75, 0.99, 1.0]
colors = ["deepskyblue", "skyblue", "lightcyan", "lightgray", "gray", "lightgray", "mistyrose", "salmon", "tomato"]
cmap = mpl.colors.LinearSegmentedColormap.from_list("mycmap2", list(zip(nodes, colors)))
#cmap = cm.plasma
m = cm.ScalarMappable(norm=norm, cmap=cmap)
rgba = m.to_rgba(dir)
r = get_string_from_int_below_255(math.ceil(rgba[0] * 255.0))
g = get_string_from_int_below_255(math.ceil(rgba[1] * 255.0))
b = get_string_from_int_below_255(math.ceil(rgba[2] * 255.0))
return "#" + r + g + b | ece62af230cda4870df099eae50a26b72848b2de | 3,654,932 |
def pfilter(plugins, plugin_type=Analyser, **kwargs):
""" Filter plugins by different criteria """
if isinstance(plugins, models.Plugins):
plugins = plugins.plugins
elif isinstance(plugins, dict):
plugins = plugins.values()
logger.debug('#' * 100)
logger.debug('plugin_type {}'.format(plugin_type))
if plugin_type:
if isinstance(plugin_type, PluginMeta):
plugin_type = plugin_type.__name__
try:
plugin_type = plugin_type[0].upper() + plugin_type[1:]
pclass = globals()[plugin_type]
logger.debug('Class: {}'.format(pclass))
candidates = filter(lambda x: isinstance(x, pclass), plugins)
except KeyError:
raise models.Error('{} is not a valid type'.format(plugin_type))
else:
candidates = plugins
if 'name' in kwargs:
kwargs['name'] = kwargs['name'].lower()
logger.debug(candidates)
def matches(plug):
res = all(getattr(plug, k, None) == v for (k, v) in kwargs.items())
logger.debug("matching {} with {}: {}".format(plug.name, kwargs, res))
return res
if kwargs:
candidates = filter(matches, candidates)
return candidates | 8a291743e410ca98716c8f71a87346ca1e86115f | 3,654,933 |
import subprocess
def tmux_session_detection(session_name: str) -> bool:
"""
Function checks if session already exists.
"""
cmd = ['tmux', 'has-session', '-t', session_name]
result = subprocess.call(cmd, stderr=subprocess.DEVNULL)
if result == 0:
return True
else:
return False | 275d85e087fa271c76fe44f2a67ea4c719e0c031 | 3,654,934 |
def prune_arms(active_arms, sample_arms, verbose=False):
"""Remove all arms from ``active_arms`` that have an allocation less than two standard deviations below the current best arm.
:param active_arms: list of coordinate-tuples corresponding to arms/cohorts currently being sampled
:type active_arms: list of tuple
:param sample_arms: all arms from prev and current cohorts, keyed by coordinate-tuples
Arm refers specifically to a :class:`moe.bandit.data_containers.SampleArm`
:type sample_arms: dict
:param verbose: whether to print status messages to stdout
:type verbose: bool
:return: list of coordinate-tuples that are the *well-performing* members of ``active_arms``
length is at least 1 and at most ``len(active_arms)``
:rtype: list of tuple
"""
# Find all active sample arms
active_sample_arms = {}
for active_arm in active_arms:
active_sample_arms[active_arm] = sample_arms[active_arm]
# Find the best arm
# Our objective is a relative CTR, so status_quo is 0.0; we
# know that the best arm cannot be worse than status_quo
best_arm_val = 0.0
for sample_arm_point, sample_arm in active_sample_arms.iteritems():
arm_value, arm_variance = objective_function(
sample_arm,
sample_arms[tuple(STATUS_QUO_PARAMETER)],
)
if arm_value > best_arm_val:
best_arm_val = arm_value
# Remove all arms that are more than two standard deviations worse than the best arm
pruned_arms = copy.copy(active_arms)
for sample_arm_point, sample_arm in active_sample_arms.iteritems():
arm_value, arm_variance = objective_function(
sample_arm,
sample_arms[tuple(STATUS_QUO_PARAMETER)],
)
if sample_arm.total > 0 and arm_value + 2.0 * numpy.sqrt(arm_variance) < best_arm_val:
if verbose:
print "Removing underperforming arm: {0}".format(sample_arm_point)
pruned_arms.remove(sample_arm_point)
return pruned_arms | bd82f77503a9f0fa6a49b9f24ce9846849544b00 | 3,654,935 |
def prepare_string(dist, digits=None, exact=False, tol=1e-9,
show_mask=False, str_outcomes=False):
"""
Prepares a distribution for a string representation.
Parameters
----------
dist : distribution
The distribution to be stringified.
digits : int or None
The probabilities will be rounded to the specified number of
digits, using NumPy's around function. If `None`, then no rounding
is performed. Note, if the number of digits is greater than the
precision of the floats, then the resultant number of digits will
match that smaller precision.
exact : bool
If `True`, then linear probabilities will be displayed, even if
the underlying pmf contains log probabilities. The closest
rational fraction within a tolerance specified by `tol` is used
as the display value.
tol : float
If `exact` is `True`, then the probabilities will be displayed
as the closest rational fraction within `tol`.
show_mask : bool
If `True`, show the mask for marginal distributions.
str_outcomes : bool
If `True`, then attempt to convert outcomes which are tuples to just
strings. This is only a dislplay technique.
Returns
-------
pmf : sequence
The formatted pmf. This could be a NumPy array (possibly rounded)
or a list of Fraction instances.
outcomes : sequence
The formated outcomes.
base : str or float
The base of the formatted pmf.
colsep : str
The column separation for printing.
max_length : int
The length of the largest outcome, as a string.
pstr : str
A informative string representing the probability of an outcome.
This will be 'p(x)' xor 'log p(x)'.
"""
colsep = ' '
# Create outcomes with wildcards, if desired and possible.
if show_mask:
if not dist.is_joint():
msg = '`show_mask` can be `True` only for joint distributions'
raise ditException(msg)
if show_mask not in [True, False]:
# The user is specifying what the mask should look like.
wc = show_mask
else:
wc = '*'
ctor = dist._outcome_ctor
def outcome_wc(outcome):
"""
Builds the wildcarded outcome.
"""
i = 0
e = []
for is_masked in dist._mask:
if is_masked:
symbol = wc
else:
symbol = outcome[i]
i += 1
e.append(symbol)
e = ctor(e)
return e
outcomes = map(outcome_wc, dist.outcomes)
else:
outcomes = dist.outcomes
# Convert outcomes to strings, if desired and possible.
if str_outcomes:
if not dist.is_joint():
msg = '`str_outcomes` can be `True` only for joint distributions'
raise ditException(msg)
try:
# First, convert the elements of the outcome to strings.
outcomes_ = [map(str, outcome) for outcome in outcomes]
# Now convert the entire outcome to a string
outcomes_ = map(lambda o: ''.join(o), outcomes_)
# Force the iterators to expand in case there are exceptions.
outcomes = list(outcomes_)
except:
outcomes = map(str, outcomes)
else:
outcomes = map(str, outcomes)
outcomes = list(outcomes)
if len(outcomes):
max_length = max(map(len, outcomes))
else:
max_length = 0
# 1) Convert to linear probabilities, if necessary.
if exact:
# Copy to avoid precision loss
d = dist.copy(base='linear')
else:
d = dist
# 2) Round, if necessary, possibly after converting to linear probabilities.
if digits is not None and digits is not False:
pmf = d.pmf.round(digits)
else:
pmf = d.pmf
# 3) Construct fractions, if necessary.
if exact:
pmf = [approximate_fraction(x, tol) for x in pmf]
if d.is_log():
pstr = 'log p(x)'
else:
pstr = 'p(x)'
base = d.get_base()
return pmf, outcomes, base, colsep, max_length, pstr | 09abba1e5027049b9a43cb83e8de6f95daf5b431 | 3,654,936 |
def verifier(func):
"""
Creates a `Verifier` by given specifier.
Parameters
----------
func: callable, [callable], (str, callable), [(str, callable)]
The specifier of `Verifier` which can take various forms and determines the attributes and behaviors of `Verifier`.
When it is declared as a list having a specifier,
the `Verifier` deals with an input as iterable object and tries to apply inner verifying function to each value.
If a tuple of string and callable is given, the string is used as the name of the `Verifier`.
Otherwise, its name is determined by `__name__` attribute of the callable object.
The callable should be a function taking an input and returns boolean value representing the result of the verification.
Returns
-------
Verifier
Created `Verifier`.
"""
func, is_iter = (func[0], True) if isinstance(func, list) else (func, False)
if isinstance(func, Verifier):
return func
elif isinstance(func, Variable):
return func._verifier
elif isinstance(func, partial):
ff, n, t_in, t_out, args, kwargs = analyze_specifier(func, (), {})
return Verifier(n, func, is_iter, *args, **kwargs)
elif callable(func):
return Verifier(func.__name__, func, is_iter)
elif isinstance(func, tuple):
ff, n, t_in, t_out, args, kwargs = analyze_specifier(func[1], (), {})
return Verifier(func[0], func[1], is_iter, *args, **kwargs)
else:
raise TypeError("Given value is not valid Verifier specifier.") | 665bc9cf5039e568fb2325a1cf0a25f72311eab8 | 3,654,937 |
import os
def obtain_celeba_images(n_people:int) -> pd.DataFrame:
"""
Unique labels: 10,177
It is expected for the structure to be as following:
<CELEBA_PATH>/
├─ identity_CelebA.txt
├─ img_align_celeba/
├─<images>
* 'identity_CelebA.txt' is the downloaded identity text annotations without
change from the dataset.
* 'img_align_celeba' is the folder with all the downloaded images.
@returns a pandas DataFrame of a n size sample with the following cols:
- path: path to the location of the image
- label: name of the person within the image
"""
df = pd.read_csv(
os.path.join(CELEBA_PATH, "identity_CelebA.txt"),
names = ["path", "label"],
sep=' '
)
# Extract according to unique number of people
df = extract_people_images(df, n_people)
root = os.path.join(CELEBA_PATH, "img_align_celeba/")
df["path"] = root + df["path"]
return df | dcaa72c6ef32939dd55ba62a05bcbfba35236402 | 3,654,938 |
def get_add_diff_file_list(git_folder):
"""List of new files.
"""
repo = Repo(str(git_folder))
repo.git.add("sdk")
output = repo.git.diff("HEAD", "--name-only")
return output.splitlines() | af6ff7ffb076fb382aaa946e11e473f2f45bad0e | 3,654,939 |
from PIL import Image
import os
def _save_qr_code(qr_code: str, filepath: str = qr_path, filename: str = qr_name) -> str:
"""Use it for save QrCode from web.whatsapp.com (copied as string)
to PNG file to your path and your filename.
:param qr_code: QrCode string from web.whatsapp.com.
:param filepath: Your path for saving file.
:param filename: Your name for file.
:return: Absolute path to saved file.
"""
path = os.path.join(filepath, filename)
background = Image.new('RGB', (background_width, background_height), color='white')
img = qrcode.make(qr_code)
img_w, img_h = img.size
bg_w, bg_h = background.size
offset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)
background.paste(img, offset)
background.save(path)
return path | fa5118f00b3174235fe455c678e08ee5d460fe2d | 3,654,940 |
def has_read_perm(user, group, is_member, is_private):
""" Return True if the user has permission to *read*
Articles, False otherwise.
"""
if (group is None) or (is_member is None) or is_member(user, group):
return True
if (is_private is not None) and is_private(group):
return False
return True | 6c1bc51abd50a5af76e16e7723957c758822c988 | 3,654,941 |
import tempfile
import os
import shutil
def _create_docker_build_ctx(
launch_project: LaunchProject,
dockerfile_contents: str,
) -> str:
"""Creates build context temp dir containing Dockerfile and project code, returning path to temp dir."""
directory = tempfile.mkdtemp()
dst_path = os.path.join(directory, "src")
assert launch_project.project_dir is not None
shutil.copytree(
src=launch_project.project_dir,
dst=dst_path,
symlinks=True,
)
shutil.copy(
os.path.join(os.path.dirname(__file__), "templates", "_wandb_bootstrap.py"),
os.path.join(directory),
)
if launch_project.python_version:
runtime_path = os.path.join(dst_path, "runtime.txt")
with open(runtime_path, "w") as fp:
fp.write(f"python-{launch_project.python_version}")
# TODO: we likely don't need to pass the whole git repo into the container
# with open(os.path.join(directory, ".dockerignore"), "w") as f:
# f.write("**/.git")
with open(os.path.join(directory, _GENERATED_DOCKERFILE_NAME), "w") as handle:
handle.write(dockerfile_contents)
return directory | 3c5fc48d025494c2f92b07a5700b827382945d29 | 3,654,942 |
def normalize_df(dataframe, columns):
"""
normalized all columns passed to zero mean and unit variance, returns a full data set
:param dataframe: the dataframe to normalize
:param columns: all columns in the df that should be normalized
:return: the data, centered around 0 and divided by it's standard deviation
"""
for column in columns:
data = dataframe.loc[:, column].values
sd = np.std(data)
mean = np.mean(data)
dataframe.loc[:, column] = (data - mean) / sd
return dataframe | 39b23a6f11794323f1d732396021d669410c7de1 | 3,654,943 |
import json
def PeekTrybotImage(chromeos_root, buildbucket_id):
"""Get the artifact URL of a given tryjob.
Args:
buildbucket_id: buildbucket-id
chromeos_root: root dir of chrome os checkout
Returns:
(status, url) where status can be 'pass', 'fail', 'running',
and url looks like:
gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
"""
command = (
'cros buildresult --report json --buildbucket-id %s' % buildbucket_id)
rc, out, _ = RunCommandInPath(chromeos_root, command)
# Current implementation of cros buildresult returns fail when a job is still
# running.
if rc != 0:
return ('running', None)
results = json.loads(out)[buildbucket_id]
return (results['status'], results['artifacts_url'].rstrip('/')) | c74b7c5a120d3d489e6990bd03e74bb0d22fea27 | 3,654,944 |
def frozenset_code_repr(value: frozenset) -> CodeRepresentation:
"""
Gets the code representation for a frozenset.
:param value: The frozenset.
:return: It's code representation.
"""
return container_code_repr("frozenset({",
"})",
((el,) for el in value),
lambda el: el) | b4a3b283c7d21d0ae888c588471f9dea650215fb | 3,654,945 |
def SRMI(df, n):
"""
MI修正指标
Args:
df (pandas.DataFrame): Dataframe格式的K线序列
n (int): 参数n
Returns:
pandas.DataFrame: 返回的DataFrame包含2列, 是"a", "mi", 分别代表A值和MI值
Example::
# 获取 CFFEX.IF1903 合约的MI修正指标
from tqsdk import TqApi, TqSim
from tqsdk.ta import SRMI
api = TqApi(TqSim())
klines = api.get_kline_serial("CFFEX.IF1903", 24 * 60 * 60)
srmi = SRMI(klines, 9)
print(list(srmi["a"]))
print(list(srmi["mi"]))
# 预计的输出是这样的:
[..., 0.10362397961836425, 0.07062591892459567, -0.03341929372138309, ...]
[..., 0.07583104758041452, 0.0752526999519902, 0.06317803398828206, ...]
"""
new_df = pd.DataFrame()
new_df["a"] = np.where(df["close"] < df["close"].shift(n),
(df["close"] - df["close"].shift(n)) / df["close"].shift(n),
np.where(df["close"] == df["close"].shift(n), 0,
(df["close"] - df["close"].shift(n)) / df["close"]))
new_df["mi"] = tafunc.sma(new_df["a"], n, 1)
return new_df | 29726385da068446cd3dd3ee13f8d95b88c36245 | 3,654,946 |
def get_purchase_rows(*args, **kwargs):
"""
获取列表
:param args:
:param kwargs:
:return:
"""
return db_instance.get_rows(Purchase, *args, **kwargs) | 505ace358b619a736bc7a71139e307110cd7c27d | 3,654,947 |
def depart_delete(request):
""" 删除部门 """
nid = request.GET.get('nid')
models.Department.objects.filter(id=nid).delete()
return redirect("/depart/list/") | 753c01771ad59b789f324a0cb95e94dcf9e48e9d | 3,654,948 |
def create_condor_scheduler(name, host, username=None, password=None, private_key_path=None, private_key_pass=None):
"""
Creates a new condor scheduler
Args:
name (str): The name of the scheduler
host (str): The hostname or IP address of the scheduler
username (str, optional): The username to use when connecting to the scheduler
password (str, optional): The password for the username
private_key_path (str, optional): The path to the location of the SSH private key file
private_key_pass (str, optional): The passphrase for the private key
Returns:
The newly created condor scheduler
Note:
The newly created condor scheduler object is not committed to the database.
"""
condor_scheduler = CondorScheduler(name, host, username=username, password=password,
private_key_path=private_key_path, private_key_pass=private_key_pass)
return condor_scheduler | d47c8c69fea249139698564b52520d95fbb1a75f | 3,654,949 |
from meerschaum.config._edit import general_edit_config
from typing import Optional
from typing import List
def edit_stack(
action : Optional[List[str]] = None,
debug : bool = False,
**kw
):
"""
Open docker-compose.yaml or .env for editing
"""
if action is None:
action = []
files = {
'compose' : STACK_COMPOSE_PATH,
'docker-compose' : STACK_COMPOSE_PATH,
'docker-compose.yaml' : STACK_COMPOSE_PATH,
}
return general_edit_config(action=action, files=files, default='compose', debug=debug) | b9d969040daf30924f72fafca71980ed3ec153b6 | 3,654,950 |
def dot_to_underscore(instring):
"""Replace dots with underscores"""
return instring.replace(".", "_") | cf9441702ffb128678a031eabb4fa48be881cae5 | 3,654,951 |
def get_birthday_weekday(current_weekday: int, current_day: int,
birthday_day: int) -> int:
"""Return the day of the week it will be on birthday_day,
given that the day of the week is current_weekday and the
day of the year is current_day.
current_weekday is the current day of the week and is in
the range 1-7, indicating whether today is Sunday (1),
Monday (2), ..., Saturday (7).
current_day and birthday_day are both in the range 1-365.
>>> get_birthday_weekday(5, 3, 4)
6
>>> get_birthday_weekday(5, 3, 116)
6
>>> get_birthday_weekday(6, 116, 3)
5
"""
days_diff = days_difference(current_day, birthday_day)
return get_weekday(current_weekday, days_diff) | 5b4ba9f2a0efcdb9f150b421c21bb689604fbb11 | 3,654,952 |
import matlab.engine
import os
def matlab_kcit(X: np.ndarray, Y: np.ndarray, Z: np.ndarray, seed: int = None, matlab_engine_instance=None, installed_at=None):
"""Python-wrapper for original implementation of KCIT by Zhang et al. (2011)
References
----------
Zhang, K., Peters, J., Janzing, D., & Schölkopf, B. (2011). Kernel-based Conditional Independence Test and Application in Causal Discovery.
In Proceedings of the 27th Conference on Uncertainty in Artificial Intelligence (pp. 804–813). Corvallis, Oregon: AUAI Press.
"""
not_given = matlab_engine_instance is None
try:
if not_given:
matlab_engine_instance = matlab.engine.start_matlab()
dir_at = os.path.expanduser(installed_at)
matlab_engine_instance.addpath(matlab_engine_instance.genpath(dir_at))
if seed is not None:
matlab_engine_instance.RandStream.setGlobalStream(matlab_engine_instance.RandStream('mcg16807', 'Seed', seed))
statistic, v2, boot_p_value, v3, appr_p_value = matlab_engine_instance.CInd_test_new_withGP(np2matlab(X), np2matlab(Y), np2matlab(Z), 0.01, 0, nargout=5)
return statistic, v2, boot_p_value, v3, appr_p_value
finally:
if not_given and matlab_engine_instance is not None:
matlab_engine_instance.quit() | 0b941742ad4c8c31009064c29bfc003767aad098 | 3,654,953 |
def _check(err, msg=""):
"""Raise error for non-zero error codes."""
if err < 0:
msg += ': ' if msg else ''
if err == _lib.paUnanticipatedHostError:
info = _lib.Pa_GetLastHostErrorInfo()
hostapi = _lib.Pa_HostApiTypeIdToHostApiIndex(info.hostApiType)
msg += 'Unanticipated host API {0} error {1}: {2!r}'.format(
hostapi, info.errorCode, _ffi.string(info.errorText).decode())
else:
msg += _ffi.string(_lib.Pa_GetErrorText(err)).decode()
raise PortAudioError(msg)
return err | 2f0b2ccd055bbad814e48b451eb72c60e62f9273 | 3,654,954 |
def RunPackage(output_dir, target, package_path, package_name,
package_deps, package_args, args):
"""Copies the Fuchsia package at |package_path| to the target,
executes it with |package_args|, and symbolizes its output.
output_dir: The path containing the build output files.
target: The deployment Target object that will run the package.
package_path: The path to the .far package file.
package_name: The name of app specified by package metadata.
package_args: The arguments which will be passed to the Fuchsia process.
args: Structure of arguments to configure how the package will be run.
Returns the exit code of the remote package process."""
system_logger = (
_AttachKernelLogReader(target) if args.system_logging else None)
try:
if system_logger:
# Spin up a thread to asynchronously dump the system log to stdout
# for easier diagnoses of early, pre-execution failures.
log_output_quit_event = multiprocessing.Event()
log_output_thread = threading.Thread(
target=lambda: _DrainStreamToStdout(system_logger.stdout,
log_output_quit_event))
log_output_thread.daemon = True
log_output_thread.start()
tuf_root = tempfile.mkdtemp()
pm_serve_task = None
# Publish all packages to the serving TUF repository under |tuf_root|.
subprocess.check_call([PM, 'newrepo', '-repo', tuf_root])
all_packages = [package_path] + package_deps
for next_package_path in all_packages:
PublishPackage(tuf_root, next_package_path)
# Serve the |tuf_root| using 'pm serve' and configure the target to pull
# from it.
# TODO(kmarshall): Use -q to suppress pm serve output once blob push
# is confirmed to be running stably on bots.
serve_port = common.GetAvailableTcpPort()
pm_serve_task = subprocess.Popen(
[PM, 'serve', '-d', os.path.join(tuf_root, 'repository'), '-l',
':%d' % serve_port, '-q'])
remote_port = common.ConnectPortForwardingTask(target, serve_port, 0)
_RegisterAmberRepository(target, tuf_root, remote_port)
# Install all packages.
for next_package_path in all_packages:
install_package_name, package_version = GetPackageInfo(next_package_path)
logging.info('Installing %s version %s.' %
(install_package_name, package_version))
return_code = target.RunCommand(['amber_ctl', 'get_up', '-n',
install_package_name, '-v',
package_version],
timeout_secs=_INSTALL_TIMEOUT_SECS)
if return_code != 0:
raise Exception('Error while installing %s.' % install_package_name)
if system_logger:
log_output_quit_event.set()
log_output_thread.join(timeout=_JOIN_TIMEOUT_SECS)
if args.install_only:
logging.info('Installation complete.')
return
logging.info('Running application.')
command = ['run', _GetComponentUri(package_name)] + package_args
process = target.RunCommandPiped(command,
stdin=open(os.devnull, 'r'),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
if system_logger:
output_fd = MergedInputStream([process.stdout,
system_logger.stdout]).Start()
else:
output_fd = process.stdout.fileno()
# Run the log data through the symbolizer process.
build_ids_path = os.path.join(os.path.dirname(package_path), 'ids.txt')
output_stream = SymbolizerFilter(output_fd, build_ids_path)
for next_line in output_stream:
print next_line.rstrip()
process.wait()
if process.returncode == 0:
logging.info('Process exited normally with status code 0.')
else:
# The test runner returns an error status code if *any* tests fail,
# so we should proceed anyway.
logging.warning('Process exited with status code %d.' %
process.returncode)
finally:
if system_logger:
logging.info('Terminating kernel log reader.')
log_output_quit_event.set()
log_output_thread.join()
system_logger.kill()
_UnregisterAmberRepository(target)
if pm_serve_task:
pm_serve_task.kill()
shutil.rmtree(tuf_root)
return process.returncode | f5251fdbef0a209aa4bfa97a99085cae91acf3e3 | 3,654,955 |
def make_flood_fill_unet(input_fov_shape, output_fov_shape, network_config):
"""Construct a U-net flood filling network.
"""
image_input = Input(shape=tuple(input_fov_shape) + (1,), dtype='float32', name='image_input')
if network_config.rescale_image:
ffn = Lambda(lambda x: (x - 0.5) * 2.0)(image_input)
else:
ffn = image_input
mask_input = Input(shape=tuple(input_fov_shape) + (1,), dtype='float32', name='mask_input')
ffn = concatenate([ffn, mask_input])
# Note that since the Keras 2 upgrade strangely models with depth > 3 are
# rejected by TF.
ffn = add_unet_layer(ffn, network_config, network_config.unet_depth - 1, output_fov_shape,
n_channels=network_config.convolution_filters)
mask_output = Conv3D(
1,
(1, 1, 1),
kernel_initializer=network_config.initialization,
padding=network_config.convolution_padding,
name='mask_output',
activation=network_config.output_activation)(ffn)
ffn = Model(inputs=[image_input, mask_input], outputs=[mask_output])
return ffn | ff8c90b3eecc26384b33fd64afa0a2c4dd44b82d | 3,654,956 |
def FRAC(total):
"""Returns a function that shows the average percentage of the values from
the total given."""
def realFrac(values, unit):
r = toString(sum(values) / len(values) / total * 100)
r += '%'
if max(values) > min(values):
r += ' avg'
return [r]
return realFrac | 41946163d5c185d1188f71d615a67d72e6eaee4f | 3,654,957 |
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if len(ints) == 0: return (None, None)
low = ints[0]
high = ints[0]
for i in ints:
if i < low:
low = i
elif i > high:
high = i
return (low, high) | 14c7d4cc73947c8de38bb598e295d9a1b4b7e5f6 | 3,654,958 |
def gen_sweep_pts(start: float=None, stop: float=None,
center: float=0, span: float=None,
num: int=None, step: float=None, endpoint=True):
"""
Generates an array of sweep points based on different types of input
arguments.
Boundaries of the array can be specified using either start/stop or
using center/span. The points can be specified using either num or step.
Args:
start (float) : start of the array
stop (float) : end of the array
center (float) : center of the array
N.B. 0 is chosen as a sensible default for the span.
it is argued that no such sensible default exists
for the other types of input.
span (float) : span the total range of values to span
num (int) : number of points in the array
step (float) : the stepsize between points in the array
endpoint (bool): whether to include the endpoint
"""
if (start is not None) and (stop is not None):
if num is not None:
return np.linspace(start, stop, num, endpoint=endpoint)
elif step is not None:
# numpy arange does not natively support endpoint
return np.arange(start, stop + endpoint*step/100, step)
else:
raise ValueError('Either "num" or "step" must be specified')
elif (center is not None) and (span is not None):
if num is not None:
return span_num(center, span, num, endpoint=endpoint)
elif step is not None:
return span_step(center, span, step, endpoint=endpoint)
else:
raise ValueError('Either "num" or "step" must be specified')
else:
raise ValueError('Either ("start" and "stop") or '
'("center" and "span") must be specified') | fb67623acfea433331babf7b7e1217cfa4e9e7ae | 3,654,959 |
def set_lang_owner(cursor, lang, owner):
"""Set language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
owner (str): name of new owner.
"""
query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
executed_queries.append(query)
cursor.execute(query)
return True | 07cf4a33ca766a8ccf468f59d33318bab88c4529 | 3,654,960 |
def rstrip_tuple(t: tuple):
"""Remove trailing zeroes in `t`."""
if not t or t[-1]:
return t
right = len(t) - 1
while right > 0 and t[right - 1] == 0:
right -= 1
return t[:right] | a10e74ea4a305d588fbd1555f32dda1d4b95266e | 3,654,961 |
def _calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces, out=None):
"""Calculate divergence of face-based fluxes at nodes (active faces only).
Given a flux per unit width across each face in the grid, calculate the net
outflux (or influx, if negative) divided by cell area, at each node that
lies within a cell.
Construction::
_calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces,
out=None)
Parameters
----------
grid : ModelGrid
A ModelGrid.
unit_flux_at_faces : ndarray or field name (x number of faces)
Flux per unit width associated with faces.
out : ndarray (x number of nodes), optional
Buffer to hold the result.
Returns
-------
ndarray (x number of nodes)
Flux divergence at nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> rg = RasterModelGrid(3, 4, 10.0)
>>> z = rg.add_zeros('node', 'topographic__elevation')
>>> z[5] = 50.0
>>> z[6] = 36.0
>>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces
>>> fg
array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6])
>>> _calc_active_face_flux_divergence_at_node(rg, -fg)
array([ 0. , 0. , 0. , 0. , 0. , 1.64, 0.94, 0. , 0. ,
0. , 0. , 0. ])
>>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY)
>>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY)
>>> _calc_active_face_flux_divergence_at_node(rg, -fg)
array([ 0. , 0. , 0. , 0. , 0. , 1.14, 0.22, 0. , 0. ,
0. , 0. , 0. ])
Notes
-----
Performs a numerical flux divergence operation on cells, and returns the
result in an array of length equal to the number of nodes. Nodes without
cells (those on the grid perimeter) are not affected (i.e., their value
is either zero, or if `out` is given, whatever the prior value in `out`
was).
"""
if out is None:
out = grid.zeros(at='node')
out[grid.node_at_cell] = \
_calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces) \
/ grid.area_of_cell
return out | 82c485935a3190c07ab12f7c838d52f5fecb78d0 | 3,654,962 |
def get_EL(overlaps):
"""
a) 1 +++++++++|---|---
2 --|---|++++++++++
b) 1 ---|---|+++++++++++++
2 ++++++++++|---|---
"""
EL1a = overlaps['query_start']
EL2a = overlaps['target_len'] - overlaps['target_end'] - 1
EL1b = overlaps['query_len'] - overlaps['query_end'] - 1
EL2b = overlaps['target_start']
final_EL = []
for i in range(overlaps.shape[0]):
if extend_right(overlaps['query_end'][i], overlaps['query_len'][i], overlaps['target_end'][i], overlaps['target_len'][i]):
final_EL.append([EL1b[i], EL2b[i]])
elif extend_left(overlaps['query_start'][i], overlaps['target_start'][i]):
final_EL.append([EL1a[i], EL2a[i]])
else:
# TODO filtriraj one koji uopce ne produzuju nista ni sa jedne strane
continue
final_EL = np.array(final_EL).reshape(-1,2)
return np.split(final_EL, 2, axis=1) | c0416091ecbfe40110fcda75cf12aae411cc4eba | 3,654,963 |
from typing import NoReturn
def get_line(prompt: str = '') -> Effect[HasConsole, NoReturn, str]:
"""
Get an `Effect` that reads a `str` from stdin
Example:
>>> class Env:
... console = Console()
>>> greeting = lambda name: f'Hello {name}!'
>>> get_line('What is your name? ').map(greeting).run(Env())
name? # input e.g 'John Doe'
'Hello John Doe!'
Args:
prompt: prompt to display in console
Return:
an `Effect` that produces a `str` read from stdin
"""
return depend(HasConsole).and_then(lambda env: env.console.input(prompt)) | 47c58bb6ab794fdf789f0812dc1dc6d977106b60 | 3,654,964 |
def reconstruct_wave(*args: ndarray, kwargs_istft, n_sample=-1) -> ndarray:
"""
construct time-domain wave from complex spectrogram
Args:
*args: the complex spectrogram.
kwargs_istft: arguments of Inverse STFT.
n_sample: expected audio length.
Returns:
audio (numpy)
"""
if len(args) == 1:
spec = args[0].squeeze()
mag = None
phase = None
assert np.iscomplexobj(spec)
elif len(args) == 2:
spec = None
mag = args[0].squeeze()
phase = args[1].squeeze()
assert np.isrealobj(mag) and np.isrealobj(phase)
else:
raise ValueError
kwarg_len = dict(length=n_sample) if n_sample != -1 else dict()
if spec is None:
spec = mag * np.exp(1j * phase)
wave = librosa.istft(spec, **kwargs_istft, **kwarg_len)
return wave | 8624602efe1ab90304da05c602fb46ac52ec86e0 | 3,654,965 |
def perfect_score(student_info):
"""
:param student_info: list of [<student name>, <score>] lists
:return: first `[<student name>, 100]` or `[]` if no student score of 100 is found.
"""
#
first = []
student_names = []
score = []
print (student_info)
for name in student_info:
print('1', 'name', name[0])
print ('2','score',name[1])
print(type(name[1]))
score = int(name[1])
print(type(score))
if (score == 100 ):
print('3', score)
print(name)
return name
return first | ac7580cce134627e08764031ef2812e1b70ba00f | 3,654,966 |
import argparse
def parse_args():
"""Parses command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--cl_kernel_dir",
type=str,
default="./mace/ops/opencl/cl/",
help="The cl kernels directory.")
parser.add_argument(
"--output_path",
type=str,
default="./mace/examples/codegen/opencl/opencl_encrypted_program.cc",
help="The path of encrypted opencl kernels.")
return parser.parse_known_args() | 86b45bfeb0ebfbc4e3e4864b55736b6c5bb42954 | 3,654,967 |
def get_composite_component(current_example_row, cache, model_config):
"""
maps component_id to dict of {cpu_id: False, ...}
:param current_example_row:
:param cache:
:param model_config:
:return: nested mapping_dict = { #there can be multiple components
component_id = { #components can be deployed on multiple servers
cpu_id: False,
...
},
...
}
"""
mapping_dict = defaultdict(lambda: {})
# for context in
for column_name in model_config["components"]:
allocation_name = column_name.replace("AllocationDegreeImpl:", "")
context = get_element_by_identifier(element_tree=cache.get_xml_tree("allocation"),
search_string=allocation_name,
attribute="entityName")
system_id = get_linkage_id(identifier="assemblyContext_AllocationContext", element_tree=context)
assembly_context = get_by_id(element=cache.get_xml_tree("system"), element_id=system_id)
component = assembly_context.find("./encapsulatedComponent__AssemblyContext")
if component.get(get_xml_schema_type()) == "repository:CompositeComponent":
repo_id = get_linkage_id(element_tree=assembly_context, identifier="encapsulatedComponent__AssemblyContext")
composite_component = get_by_id(element=cache.get_xml_tree("repository"), element_id=repo_id)
for composed_structure in composite_component.findall("./assemblyContexts__ComposedStructure"):
component_id = composed_structure.get("encapsulatedComponent__AssemblyContext")
# check if column (with name of component) of current test data is allocated to existing server
if current_example_row[column_name] in model_config["server"].keys():
# if component is allocated to existing server append allocation to list
for server_id in model_config["server"]:
# if component is part of composite
if current_example_row[column_name] == server_id:
temp_server_id = model_config["server"][current_example_row[column_name]]
mapping_dict[component_id].update({temp_server_id: False})
return mapping_dict | 201db2016ea59cbf4a20ce081813bfd60d58bf67 | 3,654,968 |
def presigned_url_both(filename, email):
"""
Return presigned urls both original image url and thumbnail image url
:param filename:
:param email:
:return:
"""
prefix = "photos/{0}/".format(email_normalize(email))
prefix_thumb = "photos/{0}/thumbnails/".format(email_normalize(email))
key_thumb = "{0}{1}".format(prefix_thumb, filename)
key_origin = "{0}{1}".format(prefix, filename)
try:
s3_client = boto3.client('s3')
thumb_url = s3_client.generate_presigned_url(
'get_object',
Params={'Bucket': conf['S3_PHOTO_BUCKET'], 'Key': key_thumb},
ExpiresIn=conf['S3_PRESIGNED_EXP'])
origin_url = s3_client.generate_presigned_url(
'get_object',
Params={'Bucket': conf['S3_PHOTO_BUCKET'], 'Key': key_origin},
ExpiresIn=conf['S3_PRESIGNED_EXP'])
except Exception as e:
raise ChaliceViewError(e)
return thumb_url, origin_url | 7f37cf388ef944d740f2db49c5125435b819e0e8 | 3,654,969 |
def check_if_event_exists(service, new_summary):
"""
Description: checks if the event summary exists using a naive approach
"""
event_exists = False
page_token = None
calendarId = gcalendarId
while True:
events = (
service.events().list(calendarId=calendarId, pageToken=page_token).execute()
)
for event in events["items"]:
# purge location from summary string
if new_summary in event["summary"]:
event_exists = True
break
page_token = events.get("nextPageToken")
if not page_token:
break
return event_exists | c6cc8bd3e4548cda11f9eaad6fd2d3da7a5c7e20 | 3,654,970 |
def retry(func, *args, **kwargs):
"""
You can use the kwargs to override the 'retries' (default: 5) and
'use_account' (default: 1).
"""
global url, token, parsed, conn
retries = kwargs.get('retries', 5)
use_account = 1
if 'use_account' in kwargs:
use_account = kwargs['use_account']
del kwargs['use_account']
use_account -= 1
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
if not url[use_account] or not token[use_account]:
url[use_account], token[use_account] = \
get_auth(swift_test_auth, swift_test_user[use_account],
swift_test_key[use_account])
parsed[use_account] = conn[use_account] = None
if not parsed[use_account] or not conn[use_account]:
parsed[use_account], conn[use_account] = \
http_connection(url[use_account])
return func(url[use_account], token[use_account],
parsed[use_account], conn[use_account], *args, **kwargs)
except (socket.error, HTTPException):
if attempts > retries:
raise
parsed[use_account] = conn[use_account] = None
except AuthError, err:
url[use_account] = token[use_account] = None
continue
except InternalServerError, err:
pass
if attempts <= retries:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries) | 7749fcd63f8d795692097b0257adde4147ecb569 | 3,654,971 |
def eval_f(angles, data=None):
"""
function to minimize
"""
x1, x2, d, zt, z, alpha, beta, mask, b1, b2 = data
thetaxm, thetaym, thetazm, thetaxp, thetayp, thetazp = angles
rm = rotation(thetaxm, thetaym, thetazm)
rp = rotation(thetaxp, thetayp, thetazp)
x1r = rm.dot(x1.T).T
x2r = rp.dot(x2.T).T + d
obj = poisson_complete_ll(x1r, x2r, zt, z, alpha, beta, mask, b1, b2)
return obj | 622c18d21224ab40d597a165bff3e0493db4cdcc | 3,654,972 |
def clamp(min_v, max_v, value):
"""
Clamps a value between a min and max value
Args:
min_v: Minimum value
max_v: Maximum value
value: Value to be clamped
Returns:
Returns the clamped value
"""
return min_v if value < min_v else max_v if value > max_v else value | 1a9aaf3790b233f535fb864215444b0426c17ad8 | 3,654,973 |
def collatz(n):
"""Sequence generation."""
l = []
while n > 1:
l.append(n)
if n % 2 == 0:
n = n / 2
else:
n = (3 * n) + 1
l.append(n)
return l | 69d993147604889fe6b03770efbfa6fb7f034258 | 3,654,974 |
import os
import transformers
def _load_tokenizer(path, **kwargs):
"""TODO: add docstring."""
if not os.path.isdir(path):
raise ValueError(
"transformers.AutoTokenizer.from_pretrained"
" should be called with a path to a model directory."
)
return transformers.AutoTokenizer.from_pretrained(path, **kwargs) | 07b061448017960bca4e146a628c860e25ff4a19 | 3,654,975 |
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2 ** np.arange(3, 6), stride=16):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | 083bfad62fac67e0f7fb02251bc3db7904629bd5 | 3,654,976 |
import re
def number_format(number_string, fill=2):
"""
add padding zeros to make alinged numbers
ex.
>>> number_format('2')
'02'
>>> number_format('1-2')
'01-02'
"""
output = []
digits_spliter = r'(?P<digit>\d+)|(?P<nondigit>.)'
for token in [m.groups() for m in re.finditer(digits_spliter, number_string)]:
if token[0] is None:
output.append(token[1])
else:
output.append(token[0].zfill(2))
return ''.join(output) | ee44167b4597fbe7c9f01fa5b26e02d7608c3677 | 3,654,977 |
def box_postp2use(pred_boxes, nms_iou_thr=0.7, conf_thr=0.5):
"""Postprocess prediction boxes to use
* Non-Maximum Suppression
* Filter boxes with Confidence Score
Args:
pred_boxes (np.ndarray dtype=np.float32): pred boxes postprocessed by yolo_output2boxes. shape: [cfg.cell_size * cfg.cell_size *cfg.boxes_per_cell, 6]
nms_iou_thr (float): Non-Maximum Suppression IoU Threshold
conf_thr (float): Confidence Score Threshold
Returns:
np.ndarray (dtype=np.float32)
"""
boxes_nms = nms(pred_boxes=pred_boxes, iou_thr=nms_iou_thr)
boxes_conf_filtered = boxes_nms[boxes_nms[:, 4] >= conf_thr]
return boxes_conf_filtered | 07be8b953b82dbbcc27daab0afa71713db96efc1 | 3,654,978 |
from functools import reduce
def many_hsvs_to_rgb(hsvs):
"""Combine list of hsvs otf [[(h, s, v), ...], ...] and return RGB list."""
num_strips = len(hsvs[0])
num_leds = len(hsvs[0][0])
res = [[[0, 0, 0] for ll in range(num_leds)] for ss in range(num_strips)]
for strip in range(num_strips):
for led in range(num_leds):
# for some reason the conversion screws this up?
#
# import bibliopixel as bp
# c1 = bp.colors.conversions.hsv2rgb((0, 0, 0))
# c2 = bp.colors.conversions.hsv2rgb((0, 0, 0))
# c3 = bp.colors.conversions.hsv2rgb((0, 0, 0))
# bp.colors.arithmetic.color_blend(
# bp.colors.arithmetic.color_blend(c1, c2),
# c3)
#
# = (2, 2, 2)
if all(hsv[strip][led][2] == 0 for hsv in hsvs):
rgb = (0, 0, 0)
else:
rgbs = [bp.colors.conversions.hsv2rgb(hsv[strip][led])
for hsv in hsvs]
rgb = reduce(bp.colors.arithmetic.color_blend, rgbs)
res[strip][led] = rgb
return res | 0842ecb4a42560fb6dae32a91ae12588152db621 | 3,654,979 |
def _convert_paths_to_flask(transmute_paths):
"""flask has it's own route syntax, so we convert it."""
paths = []
for p in transmute_paths:
paths.append(p.replace("{", "<").replace("}", ">"))
return paths | f8ea95e66c68481f0eb5a6d83cf61d098806f6be | 3,654,980 |
def check_isup(k, return_client=None):
"""
Checks ping and returns status
Used with concurrent decorator for parallel checks
:param k: name to ping
:param return_client: to change return format as '{k: {'comments': comments}}'
:return(str): ping ok / -
"""
if is_up(k):
comments = 'ping ok'
else:
comments = ' - '
if return_client:
comments = {k: {'comments': comments}}
return comments | 8ebb346eb74cb54aa978b4fff7cd310b344ece50 | 3,654,981 |
def percent_uppercase(text):
"""Calculates percentage of alphabetical characters that are uppercase, out of total alphabetical characters.
Based on findings from spam.csv that spam texts have higher uppercase alphabetical characters
(see: avg_uppercase_letters())"""
alpha_count = 0
uppercase_count = 0
for char in text:
if char.isalpha():
alpha_count += 1
if char.isupper():
uppercase_count += 1
# calculate percentage - make sure not to divide by 0
try:
perc_uppercase = float(uppercase_count) / float(alpha_count)
return str(perc_uppercase)
except ZeroDivisionError:
return "0" | 61ccf42d06ffbae846e98d1d68a48de21f52c299 | 3,654,982 |
def get_move() -> tuple:
"""
Utility function to get the player's move.
:return: tuple of the move
"""
return get_tuple('What move to make?') | ef35ab6fcb9cdfd60ecc91ff044b39600659c80f | 3,654,983 |
def set_value(parent, type, name, value) :
"""
Sets a value in the format Mitsuba Renderer expects
"""
curr_elem = etree.SubElement(parent, type)
curr_elem.set("name", name)
curr_elem.set("id" if type in ["ref", "shapegroup"] else "value", value) # The can be an id
return curr_elem | 6ace9a4a5858f3fb80bbe585580345369d7f6e63 | 3,654,984 |
def create_bst(nodes) -> BST:
"""Creates a BST from a specified nodes."""
root = BST(nodes[0])
for i in range(1, len(nodes)):
root.insert(nodes[i])
return root | d93164dcc94f36ea9d5643467cf157d0f387c149 | 3,654,985 |
from typing import Generator
import math
def get_dec_arch(gen: Generator) -> nn.Sequential:
"""
Get decoder architecture associated with given generator.
Args:
gen (Generator): Generator associated with the decoder.
Returns:
nn.Sequential: Decoder architecture.
"""
# As defined in the paper.
len_z = len(gen.latent_space_mean())
h_size = math.floor(len_z / 2)
decoder = nn.Sequential(
nn.Linear(len_z, h_size),
nn.ReLU(inplace=True),
nn.Linear(h_size, len_z),
)
return decoder | dbadcb662e0c9a3089a4f152675d91b0d3e8898d | 3,654,986 |
import xml
def add_page_to_xml(alto_xml, alto_xml_page, page_number=0):
"""
Add new page to end of alto_xml or replace old page.
"""
# If book empty
if (alto_xml == None):
page_dom = xml.dom.minidom.parseString(alto_xml_page)
page_dom.getElementsByTagName("Page")[0].setAttribute("ID", 'page_1')
alto_xml_page = page_dom.toxml(encoding="utf-8")
return(alto_xml_page)
# If not
book_dom = xml.dom.minidom.parseString(alto_xml)
page_dom = xml.dom.minidom.parseString(alto_xml_page)
page = page_dom.getElementsByTagName("Page")[0]
if(page_number==0):
# Find last page
page_number = book_dom.getElementsByTagName("Page").length
# and add page to end
book_dom.getElementsByTagName("Layout")[0].appendChild(page)
page.setAttribute("ID", 'page_%d' % (page_number+1))
# If new page is not last page
else:
old_page = book_dom.getElementsByTagName("Page")[page_number-1]
book_dom.getElementsByTagName("Layout")[0].replaceChild(page, old_page)
page.setAttribute("ID", 'page_%d' % page_number)
return(book_dom.toxml(encoding="utf-8")) | d9303db2e6ce9c672d0548f81ddece36b1e59be3 | 3,654,987 |
def calculate_performance(all_data):
"""
Calculates the performance metrics as found in "benchmarks" folder of
scikit-optimize and prints them in console.
Parameters
----------
* `all_data`: dict
Traces data collected during run of algorithms. For more details, see
'evaluate_optimizer' function.
"""
sorted_traces = defaultdict(list)
for model in all_data:
for dataset in all_data[model]:
for algorithm in all_data[model][dataset]:
data = all_data[model][dataset][algorithm]
# leave only best objective values at particular iteration
best = [[v[-1] for v in d] for d in data]
supervised_learning_type = "Regression" if ("Regressor" in model) else "Classification"
# for every item in sorted_traces it is 2d array, where first dimension corresponds to
# particular repeat of experiment, and second dimension corresponds to index
# of optimization step during optimization
key = (algorithm, supervised_learning_type)
sorted_traces[key].append(best)
# calculate averages
for key in sorted_traces:
# the meta objective: average over multiple tasks
mean_obj_vals = np.mean(sorted_traces[key], axis=0)
minimums = np.min(mean_obj_vals, axis=1)
f_calls = np.argmin(mean_obj_vals, axis=1)
min_mean = np.mean(minimums)
min_stdd = np.std(minimums)
min_best = np.min(minimums)
f_mean = np.mean(f_calls)
f_stdd = np.std(f_calls)
f_best = np.min(f_calls)
def fmt(float_value):
return ("%.3f" % float_value)
output = str(key[0]) + " | " + " | ".join(
[fmt(min_mean) + " +/- " + fmt(min_stdd)] + [fmt(v) for v in [min_best, f_mean, f_stdd, f_best]])
result = table_template + output
print("")
print(key[1])
print(result) | 0b2d185c2cafdddc632ee5b94a6e37c7450f096b | 3,654,988 |
def connected_components(num_nodes, Ap, Aj, components):
"""connected_components(int const num_nodes, int const [] Ap, int const [] Aj, int [] components) -> int"""
return _amg_core.connected_components(num_nodes, Ap, Aj, components) | a3306dd4357b6db91bdbb3033b53effe8f5e376d | 3,654,989 |
def get_drm_version():
"""
Return DRM library version.
Returns:
str: DRM library version.
"""
path = _join(PROJECT_DIR, "CMakeLists.txt")
with open(path, "rt") as cmakelists:
for line in cmakelists:
if line.startswith("set(ACCELIZEDRM_VERSION "):
version = f"v{line.split(' ')[1].strip().strip(')')}"
print(f"Detected DRM library version: {version}")
return version
raise ValueError(f'ACCELIZEDRM_VERSION not found in "{path}"') | b94da9049be428fc38992c34c829e202e98cb69d | 3,654,990 |
def pmx(p1, p2):
"""Perform Partially Mapped Crossover on p1 and p2."""
return pmx_1(p1, p2), pmx_1(p2, p1) | 60ac365efe3fd66eea24859afd9cfa470c061de2 | 3,654,991 |
import os
def get_met_rxn_names(raw_data_dir: str, model_name: str) -> tuple:
"""
Gets the names of metabolites and reactions in the model.
Args:
raw_data_dir: path to folder with the raw data.
model_name: named of the model.
Returns:
A list with the metabolite names and another with the reaction names.
"""
file_met_names = os.path.join(raw_data_dir, f'{model_name}_metsActive.dat')
met_names = pd.read_csv(file_met_names, sep='\n').values
met_names = list(met_names.transpose()[0])
met_names = [met_name.replace('m_m_', '') for met_name in met_names]
# get reaction names
file_rxn_names = os.path.join(raw_data_dir, f'{model_name}_rxnsActive.dat')
rxn_names = pd.read_csv(file_rxn_names, sep='\n').values
rxn_names = list(rxn_names.transpose()[0])
rxn_names = [rxn_name.replace('r_', '') for rxn_name in rxn_names]
return met_names, rxn_names | 8036037b83b92756d2d5e703387bcb6a25fb1436 | 3,654,992 |
def meshparameterspace(shape=(20, 20), psi_limits=(None, None),
eta_limits=(None, None),
psi_spacing="linear",
eta_spacing="linear",
user_spacing=(None, None)):
"""Builds curvilinear mesh inside parameter space.
:param psi_spacing and eta_spacing:
- 'linear': uniform spacing on interior of the surface
- 'cosine': cosine spacing
- 'uniform': spacing matches the spacing along edge
- 'user': user spacing that is passed in through user_spacing
:param psi_limits and eta_limits: only define if 'uniform'. Should be
points where intersection is located.
"""
if psi_spacing == "cosine":
x_spacing = cosine_spacing()
elif psi_spacing == "linear":
x_spacing = np.linspace
elif psi_spacing == "uniform":
x_spacing = _uniform_spacing(eta_limits, 0)
elif psi_spacing == "user":
if user_spacing[0] is not None:
x_spacing = user_spacing[0]
else:
raise RuntimeError("must provide user_spacing w/ psi_spacing=user")
else:
raise RuntimeError("specified spacing not recognized")
if eta_spacing == "cosine":
y_spacing = cosine_spacing()
elif eta_spacing == "linear":
y_spacing = np.linspace
elif eta_spacing == "uniform":
y_spacing = _uniform_spacing(psi_limits, 1)
elif eta_spacing == "user":
if user_spacing[1] is not None:
y_spacing = user_spacing[1]
else:
raise RuntimeError("must provide user_spacing w/ psi_spacing=user")
else:
raise RuntimeError("specified spacing not recognized")
n_psi, n_eta = shape
psi_lower, psi_upper = psi_limits
eta_lower, eta_upper = eta_limits
# if limits aren't specified, set lower to 0 and upper to 1
if psi_lower is None:
psi_lower = np.full((n_eta, 2), 0.)
eta_min = eta_lower[0, 1] if eta_lower is not None else 0.
eta_max = eta_upper[0, 1] if eta_upper is not None else 1.
psi_lower[:, 1] = y_spacing(eta_min, eta_max, n_eta)
if psi_upper is None:
psi_upper = np.full((n_eta, 2), 1.)
eta_min = eta_lower[-1, 1] if eta_lower is not None else 0.
eta_max = eta_upper[-1, 1] if eta_upper is not None else 1.
psi_upper[:, 1] = y_spacing(eta_min, eta_max, n_eta)
if eta_lower is None:
eta_lower = np.full((n_psi, 2), 0.)
psi_min = psi_lower[0, 0] if psi_lower is not None else 0.
psi_max = psi_upper[0, 0] if psi_upper is not None else 1.
eta_lower[:, 0] = x_spacing(psi_min, psi_max, n_psi)
if eta_upper is None:
eta_upper = np.full((n_psi, 2), 1.)
psi_min = psi_lower[-1, 0] if psi_lower is not None else 0.
psi_max = psi_upper[-1, 0] if psi_upper is not None else 1.
eta_upper[:, 0] = x_spacing(psi_min, psi_max, n_psi)
grid = mesh_curvilinear(psi_lower, psi_upper, eta_lower, eta_upper,
x_spacing, y_spacing)
# TODO: the following probably belongs outside the scope of this class
# if flip:
# grid = np.flipud(grid)
return grid[:, :, 0], grid[:, :, 1] | 87c928bb598bfd86f29c9ecf83534c6994a11441 | 3,654,993 |
def get_key_information(index, harness_result: HarnessResult, testbed_parser, esapi_instance: ESAPI):
"""
1. key_exception_dic是以引擎名为key的字典,若能提取错误信息,value为引擎的关键报错信息,若所有引擎均没有报错信息,则value为引擎的完整输出
返回[double_output_id, engine_name, key_exception_dic, api_name, 过滤类型]。过滤类型分为两种:第一类型是指异常结果
存在错误信息,第二类型是指异常结果没有错误信息的, 第三类型是指所有引擎均没有报错(即不一致是由于执行结果不一致导致的)。
其值的取值是[1,2,3],其含义是[第一类型,第二类型,第三类型]
"""
suspicious_output = None
for output in harness_result.outputs:
if output.id == index:
suspicious_output = output
if suspicious_output is None:
raise Exception("Harness result does not contain special index")
key_exception = list_normalized_essential_exception_message(suspicious_output.stderr + suspicious_output.stdout)
key_exception_dic = {}
double_output_id = index
engine_name = testbed_parser.parse_engine_name(suspicious_output.testbed)
no_exception_info_engine_counter = 0
es_api_node_ast_in_testcase = None
# 差分测试不一致的的结果中存在报错信息,第一类型
if key_exception != "":
filter_type = FilerType.TYPE1.value
[api_name, es_api_node_ast_in_testcase] = getExecptionStatementApi.get_exception_statement_api(
esapi_instance,
harness_result.testcase,
suspicious_output.stderr + suspicious_output.stdout,
es_api_node_ast_in_testcase)
if api_name is None:
api_name = "NoApi"
key_exception_dic = {engine_name: key_exception}
# 差分测试不一致的结果中不存在报错信息,第二类型
else:
filter_type = FilerType.TYPE2.value
no_exception_info_engine_counter += 1 # 差分后得到的测试结果无法提取错误信息
api_list = []
for output in harness_result.outputs:
if output.id != index:
exception_engine_name = testbed_parser.parse_engine_name(output.testbed)
exception_info = list_normalized_essential_exception_message(output.stderr + output.stdout)
if exception_info == "":
no_exception_info_engine_counter += 1
key_exception_dic.update({exception_engine_name: exception_info})
[api, es_api_node_ast_in_testcase] = getExecptionStatementApi.get_exception_statement_api(
esapi_instance,
harness_result.testcase,
output.stderr + output.stdout,
es_api_node_ast_in_testcase)
api = "NoApi" if api is None else api
api_list.append(api)
most_frequent_api, most_frequent_count = get_highest_frequency(api_list)
if most_frequent_count < len(api_list) * 1 / 2:
api_name = "NoApi"
else:
api_name = most_frequent_api
# 所有引擎均为报错,仅仅是输出不一致,第三类型
if no_exception_info_engine_counter == len(harness_result.outputs):
# return None
filter_type = FilerType.TYPE3.value
for output in harness_result.outputs:
exception_engine_name = testbed_parser.parse_engine_name(output.testbed)
output = output.stderr + output.stdout
key_exception_dic.update({exception_engine_name: output})
api_name = "NoApi"
return [double_output_id, engine_name, key_exception_dic, api_name, filter_type] | 734838a08760e5203701bc2bb21ff38c6e579873 | 3,654,994 |
from io import StringIO
def open_image(asset):
"""Opens the image represented by the given asset."""
try:
asset_path = asset.get_path()
except NotImplementedError:
return Image.open(StringIO(asset.get_contents()))
else:
return Image.open(asset_path) | 11e2ca552ab898801dba4dea9e8776b93532ac11 | 3,654,995 |
def gather_audio_video_eavesdropping(x) :
"""
@param x : a Analysis instance
@rtype : a list strings for the concerned category, for exemple [ 'This application makes phone calls', "This application sends an SMS message 'Premium SMS' to the '12345' phone number" ]
"""
result = []
result.extend ( detect_MediaRecorder_Voice_record(x) )
result.extend ( detect_MediaRecorder_Video_capture(x) )
return result | 01a4cc13865e8810a26d8d08ec3cf684d4a1c8a1 | 3,654,996 |
def vdw_radius_single(element):
"""
Get the Van-der-Waals radius of an atom from the given element. [1]_
Parameters
----------
element : str
The chemical element of the atoms.
Returns
-------
The Van-der-Waals radius of the atom.
If the radius is unknown for the element, `None` is returned.
See also
--------
vdw_radius_protor
References
----------
.. [1] A Bondi,
"Van der Waals volumes and radii."
J Phys Chem, 86, 441-451 (1964).
Examples
--------
>>> print(vdw_radius_single("C"))
1.7
"""
return _SINGLE_RADII.get(element.upper()) | 6c705ce2309b470c3b6d8445701e831df35853ec | 3,654,997 |
import typing
def evaluate_ins_to_proto(ins: typing.EvaluateIns) -> ServerMessage.EvaluateIns:
"""Serialize flower.EvaluateIns to ProtoBuf message."""
parameters_proto = parameters_to_proto(ins.parameters)
config_msg = metrics_to_proto(ins.config)
return ServerMessage.EvaluateIns(parameters=parameters_proto, config=config_msg) | e7cbbf7d78f2ac37b6248d61fe7e797b151bba31 | 3,654,998 |
def avatar_synth_df(dir, batch_size, num_threads):
"""
Get data for training and evaluating the AvatarSynthModel.
:param dir: The data directory.
:param batch_size: The minibatch size.
:param num_threads: The number of threads to read and process data.
:return: A dataflow for parameter to bitmoji data
"""
df = AvatarSynthDataFlow(dir)
df = process_avatar_synth_data(df, batch_size, num_threads)
return df | 1fcecb5769d7c38c84bcd02cff8159381e113861 | 3,654,999 |
Subsets and Splits