content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def get_default_accept_image_formats():
"""With default bentoML config, this returns:
['.jpg', '.png', '.jpeg', '.tiff', '.webp', '.bmp']
"""
return [
extension.strip()
for extension in config("apiserver")
.get("default_image_handler_accept_file_extensions")
.split(",")
] | 9f2e8514ed1dcc4d533be0e3f2e501a9a9784abb | 3,654,860 |
import copy
def cells_handler(results, cl):
"""
Changes result cell attributes based on object instance and field name
"""
suit_cell_attributes = getattr(cl.model_admin, 'suit_cell_attributes', None)
if not suit_cell_attributes:
return results
class_pattern = 'class="'
td_pattern = '<td'
th_pattern = '<th'
for row, result in enumerate(results):
instance = cl.result_list[row]
for col, item in enumerate(result):
field_name = cl.list_display[col]
attrs = copy(suit_cell_attributes(instance, field_name))
if not attrs:
continue
# Validate
if not isinstance(attrs, dict):
raise TypeError('"suit_cell_attributes" method must return dict. '
'Got: %s: %s' % (
attrs.__class__.__name__, attrs))
# Merge 'class' attribute
if class_pattern in item.split('>')[0] and 'class' in attrs:
css_class = attrs.pop('class')
replacement = '%s%s ' % (class_pattern, css_class)
result[col] = mark_safe(
item.replace(class_pattern, replacement))
# Add rest of attributes if any left
if attrs:
cell_pattern = td_pattern if item.startswith(
td_pattern) else th_pattern
result[col] = mark_safe(
result[col].replace(cell_pattern,
td_pattern + dict_to_attrs(attrs)))
return results | c49bdb89597e191d0c6b65df1b58a80ac6bd5f9e | 3,654,862 |
def dynamic_import(import_string):
"""
Dynamically import a module or object.
"""
# Use rfind rather than rsplit for Python 2.3 compatibility.
lastdot = import_string.rfind('.')
if lastdot == -1:
return __import__(import_string, {}, {}, [])
module_name, attr = import_string[:lastdot], import_string[lastdot + 1:]
parent_module = __import__(module_name, {}, {}, [attr])
return getattr(parent_module, attr) | f6418ff17f3d480b22abac1146d946a5f990cb3c | 3,654,863 |
from typing import Union
from typing import List
def _split_on_parenthesis(text_in: Union[str, list[str]]) -> List[str]:
"""Splits text up into a list of strings based on parenthesis locations."""
if isinstance(text_in, list):
if None in text_in:
return text_in
text_list = text_in
elif isinstance(text_in, str):
text_list = [text_in]
else:
return text_in
for i, text in enumerate(text_list):
if isinstance(text, str) and "(" in text:
text_inside = text[text.find("(")+1:text.rfind(")")]
out_add = _split_list(text, text_inside)
out_add[0] = out_add[0][:-1] # remove (
out_add[2] = out_add[2][1:] # remove )
out_add[1] = _get_unit(text_inside)
out_add = [text for text in out_add if text != ""]
out_add = [text for text in out_add if text != None]
text_list[i] = out_add
return _flatten_list(text_list) | 7c7994590838c0293869786841eb7f97c60b16e8 | 3,654,864 |
import requests
def getExternalIP():
""" Returns external ip of system """
ip = requests.get("http://ipv4.myexternalip.com/raw").text.strip()
if ip == None or ip == "":
ip = requests.get("http://ipv4.icanhazip.com").text.strip()
return ip | 77847063a2da7c6484dd6e569786a012b3a0a62f | 3,654,866 |
def intersection_indices(a, b):
"""
:param list a, b: two lists of variables from different factors.
returns a tuple of
(indices in a of the variables that are in both a and b,
indices of those same variables within the list b)
For example, intersection_indices([1,2,5,4,6],[3,5,1,2]) returns
([0, 1, 2], [2, 3, 1]).
"""
bind = {}
for i, elt in enumerate(b):
if elt not in bind:
bind[elt] = i
mapA = []
mapB = []
for i, itm in enumerate(a):
if itm in bind:
mapA.append(i)
mapB.append(bind.get(itm))
return mapA, mapB | 55264faaa4fd5e6dc5365b675ebd3b7f6a1e1280 | 3,654,867 |
def test_extract_requested_slot_from_entity_with_intent():
"""Test extraction of a slot value from entity with the different name
and certain intent
"""
# noinspection PyAbstractClass
class CustomFormAction(FormAction):
def slot_mappings(self):
return {"some_slot": self.from_entity(entity="some_entity",
intent="some_intent")}
form = CustomFormAction()
tracker = Tracker('default', {'requested_slot': 'some_slot'},
{'intent': {'name': 'some_intent', 'confidence': 1.0},
'entities': [{'entity': 'some_entity',
'value': 'some_value'}]},
[], False, None, {}, 'action_listen')
slot_values = form.extract_requested_slot(CollectingDispatcher(),
tracker, {})
# check that the value was extracted for correct intent
assert slot_values == {'some_slot': 'some_value'}
tracker = Tracker('default', {'requested_slot': 'some_slot'},
{'intent': {'name': 'some_other_intent',
'confidence': 1.0},
'entities': [{'entity': 'some_entity',
'value': 'some_value'}]},
[], False, None, {}, 'action_listen')
slot_values = form.extract_requested_slot(CollectingDispatcher(),
tracker, {})
# check that the value was not extracted for incorrect intent
assert slot_values == {} | 0b457700781183f275a8512e16bac53aa058d762 | 3,654,868 |
def graph_cases(selenium, host):
"""
Factory method that allows to draw preconfigured graphs and manipulate them
with a series of helpful methods.
:type selenium: selenium.webdriver.remote.webdriver.WebDriver
:type host: qmxgraph.server.Host
:rtype: GraphCaseFactory
:return: Factory able to create cases.
"""
return GraphCaseFactory(selenium=selenium, host=host) | 2df048d35a337e8d335844b7a1bb98db77816e5d | 3,654,869 |
def figure_8():
"""
Notes
-----
Colors from Bang Wong's color-blind friendly colormap. Available at:
https://www.nature.com/articles/nmeth.1618
Wong's map acquired from David Nichols page. Available at:
https://davidmathlogic.com/colorblind/.
"""
# choosing test sample and network.
sample = const.SAMPLE_232p3_wet
network_folder = const.FOLDER_PRED_UNET
# we will return a 10 x 10 matthews matrix; each for a crop
matthews_coefs = np.ones((10, 10))
worst_indexes = np.zeros((10, 10))
# a variable to obtain inlay data.
inlay_data = []
# reading input data.
is_registered = sample['registered_path'] is not None
data_pred, data_gs = _pred_and_goldstd(sample,
folder_prediction=network_folder,
is_registered=is_registered,
is_binary=True)
data_pred = data_pred[slice(*sample['segmentation_interval'])]
# comp_color starts as gray (background).
comp_color = np.ones(
(*data_pred[0].shape, 3)
) * (np.asarray((238, 238, 238)) / 255)
for idx, (img_pred, img_gs) in enumerate(zip(data_pred, data_gs)):
# crop images in 100 (256, 256) pieces.
crop_pred = util.view_as_blocks(img_pred,
block_shape=(256, 256))
crop_gs = util.view_as_blocks(img_gs,
block_shape=(256, 256))
for i, _ in enumerate(crop_pred):
for j, _ in enumerate(crop_pred[i]):
# calculate the Matthews coefficient for each crop.
aux_conf = _confusion_matrix(crop_gs[i, j],
crop_pred[i, j])
aux_matthews = _measure_matthews(aux_conf)
# if smaller than previously, save results.
# restricting aux_matthews > 0.1 due to errors in all-TN regions
if (0.1 < aux_matthews < matthews_coefs[i, j]):
matthews_coefs[i, j] = aux_matthews
worst_indexes[i, j] = idx
aux_comp = _comparison_color(crop_gs[i, j], crop_pred[i, j])
comp_color[i*256:(i+1)*256, j*256:(j+1)*256] = aux_comp
# grab inlay data from crops we want to highlight.
for i, j in [(2, 2), (8, 7)]:
inlay_data.append(comp_color[i*256:(i+1)*256, j*256:(j+1)*256])
# Figure 8(a).
plt.figure(figsize=FIGURE_SIZE)
plt.imshow(comp_color)
for idx in np.arange(start=0, stop=2560, step=256): # according to image
plt.axvline(idx, color='white')
plt.axhline(idx, color='white')
matthews_coefs = np.round(matthews_coefs * 100, decimals=2)
for i, j in product(range(10), repeat=2):
facecolor, textcolor = _label_color(matthews_coefs[j, i])
plt.text(x=i*256 + 30, y=j*256 + 50,
s=str(matthews_coefs[j, i]),
fontsize=8,
color=textcolor,
bbox=dict(facecolor=facecolor, alpha=0.9))
_check_if_folder_exists(folder='./figures')
plt.savefig('figures/Fig_08a' + SAVE_FIG_FORMAT, bbox_inches='tight')
plt.close()
# Figures 8(b, c).
indexes = {0: 'b', 1: 'c'}
for idx in indexes.keys():
plt.figure(figsize=FIGURE_SIZE)
plt.imshow(inlay_data[idx])
_check_if_folder_exists(folder='./figures')
plt.savefig(f'figures/Fig_08{indexes[idx]}' + SAVE_FIG_FORMAT,
bbox_inches='tight')
plt.close()
return None | 2a72f24673b96b577fc4f4a23a1869740e90c3ec | 3,654,870 |
import re
def check_threats(message):
"""Return list of threats found in message"""
threats = []
for threat_check in get_threat_checks():
for expression in threat_check["expressions"]:
if re.search(expression, message, re.I | re.U):
del threat_check["expressions"]
threats += [threat_check]
break
return threats | 091d370e4a2e6cbdf674d6dde73bf616b994498b | 3,654,871 |
def data_processing_max(data, column):
"""Compute the max of a column."""
return costly_compute_cached(data, column).max() | 299075ea3e1953abe0ffbd71afb42525c6270c49 | 3,654,872 |
from typing import Sequence
def type_of_target(y):
"""Determine the type of data indicated by the target.
Note that this type is the most specific type that can be inferred.
For example:
* ``binary`` is more specific but compatible with ``multiclass``.
* ``multiclass`` of integers is more specific but compatible with
``continuous``.
* ``multilabel-indicator`` is more specific but compatible with
``multiclass-multioutput``.
Parameters
----------
y : array-like
Returns
-------
target_type : string
One of:
* 'continuous': `y` is an array-like of floats that are not all
integers, and is 1d or a column vector.
* 'continuous-multioutput': `y` is a 2d array of floats that are
not all integers, and both dimensions are of size > 1.
* 'binary': `y` contains <= 2 discrete values and is 1d or a column
vector.
* 'multiclass': `y` contains more than two discrete values, is not a
sequence of sequences, and is 1d or a column vector.
* 'multiclass-multioutput': `y` is a 2d array that contains more
than two discrete values, is not a sequence of sequences, and both
dimensions are of size > 1.
* 'multilabel-indicator': `y` is a label indicator matrix, an array
of two dimensions with at least two columns, and at most 2 unique
values.
* 'unknown': `y` is array-like but none of the above, such as a 3d
array, sequence of sequences, or an array of non-sequence objects.
Examples
--------
>>> import numpy as np
>>> type_of_target([0.1, 0.6])
'continuous'
>>> type_of_target([1, -1, -1, 1])
'binary'
>>> type_of_target(['a', 'b', 'a'])
'binary'
>>> type_of_target([1.0, 2.0])
'binary'
>>> type_of_target([1, 0, 2])
'multiclass'
>>> type_of_target([1.0, 0.0, 3.0])
'multiclass'
>>> type_of_target(['a', 'b', 'c'])
'multiclass'
>>> type_of_target(np.array([[1, 2], [3, 1]]))
'multiclass-multioutput'
>>> type_of_target([[1, 2]])
'multilabel-indicator'
>>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))
'continuous-multioutput'
>>> type_of_target(np.array([[0, 1], [1, 1]]))
'multilabel-indicator'
"""
valid = ((isinstance(y, (Sequence, spmatrix)) or hasattr(y, '__array__'))
and not isinstance(y, str))
if not valid:
raise ValueError('Expected array-like (array or non-string sequence), '
'got %r' % y)
sparse_pandas = (y.__class__.__name__ in ['SparseSeries', 'SparseArray'])
if sparse_pandas:
raise ValueError("y cannot be class 'SparseSeries' or 'SparseArray'")
if is_multilabel(y):
return 'multilabel-indicator'
try:
y = np.asarray(y)
except ValueError:
# Known to fail in numpy 1.3 for array of arrays
return 'unknown'
# The old sequence of sequences format
try:
if (not hasattr(y[0], '__array__') and isinstance(y[0], Sequence)
and not isinstance(y[0], str)):
raise ValueError('You appear to be using a legacy multi-label data'
' representation. Sequence of sequences are no'
' longer supported; use a binary array or sparse'
' matrix instead - the MultiLabelBinarizer'
' transformer can convert to this format.')
except IndexError:
pass
# Invalid inputs
if y.ndim > 2 or (y.dtype == object and len(y) and
not isinstance(y.flat[0], str)):
return 'unknown' # [[[1, 2]]] or [obj_1] and not ["label_1"]
if y.ndim == 2 and y.shape[1] == 0:
return 'unknown' # [[]]
if y.ndim == 2 and y.shape[1] > 1:
suffix = "-multioutput" # [[1, 2], [1, 2]]
else:
suffix = "" # [1, 2, 3] or [[1], [2], [3]]
# check float and contains non-integer float values
if y.dtype.kind == 'f' and np.any(y != y.astype(int)):
# [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]
_assert_all_finite(y)
return 'continuous' + suffix
if (len(np.unique(y)) > 2) or (y.ndim >= 2 and len(y[0]) > 1):
return 'multiclass' + suffix # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]
else:
return 'binary' | 2c721ab04cdba3209794a21b2b25fe10485be106 | 3,654,873 |
from typing import List
def extract_data_from_csv_stream(client: Client, alert_id: str,
attachment_id: str, delimiter: bytes = b'\r\n') -> List[dict]:
"""
Call the attachment download API and parse required fields.
Args:
client (Client): Cyberint API client.
alert_id (str): ID of the alert the attachment belongs to.
attachment_id (str): ID of the attachment itself.
delimiter (bytes): Delimeter for the CSV file.
Returns:
list(dict): List of all the data found using the wanted fields.
"""
first_line = True
field_indexes = {} # {wanted_field_name: wanted_field_index...}
information_found = []
for csv_line in client.get_csv_file(alert_id, attachment_id, delimiter):
csv_line_separated = csv_line.split(',')
if first_line:
for field in CSV_FIELDS_TO_EXTRACT:
try:
field_indexes[field] = csv_line_separated.index(field)
except ValueError:
pass
first_line = False
else:
try:
extracted_field_data = {field_name.lower(): csv_line_separated[field_index]
for field_name, field_index in field_indexes.items()}
if extracted_field_data:
information_found.append(extracted_field_data)
except IndexError:
pass
return information_found | 992679004ae94da2731b04eaf41918a755d8306a | 3,654,874 |
import re
def validate_password(password, password_repeat=None):
"""
Validate user password.
:param password: password as string
:param password_repeat: repeat password
:return: False - valid password
"""
if password_repeat:
if password != password_repeat:
return "Passwords did not match."
flag = False
if len(password) < 8:
flag = True
elif not re.search("[a-z]", password):
flag = True
elif not re.search("[A-Z]", password):
flag = True
elif not re.search("[0-9]", password):
flag = True
elif re.search("\s", password):
flag = True
if flag:
return (
"Password must contain at least a lower case, an upper case, a number, no spaces "
"and be at least 9 characters."
)
return False | 2987a1bec151e173156ab6a72345864c84dcb61c | 3,654,875 |
def get_large_circuit(backend: IBMBackend) -> QuantumCircuit:
"""Return a slightly larger circuit that would run a bit longer.
Args:
backend: Backend on which the circuit will run.
Returns:
A larger circuit.
"""
n_qubits = min(backend.configuration().n_qubits, 20)
circuit = QuantumCircuit(n_qubits, n_qubits)
for qubit in range(n_qubits - 1):
circuit.h(qubit)
circuit.cx(qubit, qubit + 1)
circuit.measure(list(range(n_qubits)), list(range(n_qubits)))
return circuit | a35a9ee67d6268911f49936095a703b4fd227a56 | 3,654,876 |
import torch
def top_k(loc_pred, loc_true, topk):
"""
count the hit numbers of loc_true in topK of loc_pred, used to calculate Precision, Recall and F1-score,
calculate the reciprocal rank, used to calcualte MRR,
calculate the sum of DCG@K of the batch, used to calculate NDCG
Args:
loc_pred: (batch_size * output_dim)
loc_true: (batch_size * 1)
topk:
Returns:
tuple: tuple contains:
hit (int): the hit numbers \n
rank (float): the sum of the reciprocal rank of input batch \n
dcg (float): dcg
"""
assert topk > 0, "top-k ACC评估方法:k值应不小于1"
loc_pred = torch.FloatTensor(loc_pred)
val, index = torch.topk(loc_pred, topk, 1)
index = index.numpy()
hit = 0
rank = 0.0
dcg = 0.0
for i, p in enumerate(index):
target = loc_true[i]
if target in p:
hit += 1
rank_list = list(p)
rank_index = rank_list.index(target)
# rank_index is start from 0, so need plus 1
rank += 1.0 / (rank_index + 1)
dcg += 1.0 / np.log2(rank_index + 2)
return hit, rank, dcg | 8796312e1fa4d43fb992c0dd7903070a9e061e1b | 3,654,878 |
def enviar_contacto(request):
"""
Enviar email con el formulario de contacto
a soporte tecnico
"""
formulario = ContactoForm()
if request.method == 'POST':
formulario = ContactoForm(request.POST)
if formulario.is_valid():
mail = EmailMessage(subject='HPC Contacto',
from_email=formulario.cleaned_data['email'],
to=EMAIL_TO)
mail.body = 'El usuario %s ha comentado: %s' \
% (formulario.cleaned_data['nombre'], formulario.cleaned_data['mensaje'])
mail.send()
messages.success(request, "El personal de soporte técnico ha recibido su consulta, "
"pronto nos pondremos en contacto.")
return HttpResponseRedirect('/')
ctx = {'form': formulario}
return render_to_response('contacto/enviar_contacto.html', ctx, context_instance=RequestContext(request)) | 2f17e0cd0fbd5c5df345484c5fe08a420272785a | 3,654,879 |
def dict_depth(d):
"""
递归地获取一个dict的深度
d = {'a':1, 'b': {'c':{}}} --> depth(d) == 3
"""
if isinstance(d, dict):
return 1 + (max(map(dict_depth, d.values())) if d else 0)
return 0 | 16f4164fdea08af9d5846a5866428c81848726b9 | 3,654,880 |
def apercorr(psf,image,objects,psfobj,verbose=False):
"""
Calculate aperture correction.
Parameters
----------
psf : PSF object
The best-fitting PSF model.
image : string or CCDData object
The input image to fit. This can be the filename or CCDData object.
objects : table
The output table of best-fit PSF values for all of the sources.
psfobj : table
The table of PSF objects.
verbose : boolean, optional
Verbose output to the screen. Default is False.
Returns
-------
objects : table
The output table with an "apcorr" column inserted and the aperture correction
applied to "psfmag".
apcor : float
The aperture correction in mag.
cgrow : numpy array
The cumulative aperture correction array.
Example
-------
apcor = apercorr(psf,image,objects,psfobj)
"""
# Get model of all stars except the PSF stars
ind1,ind2 = dln.match(objects['id'],psfobj['id'])
left = np.delete(np.arange(len(objects)),ind1)
neiobj = objects[left]
neimodel = image.copy()
neimodel.data *= 0
neimodel.error[:] = 1
neimodelim = psf.add(neimodel,neiobj)
neimodel.data = neimodelim
# Subtract everything except the PSF stars from the image
resid = image.copy()
if image.mask is not None:
resid.data[~resid.mask] -= neimodel.data[~resid.mask]
else:
resid.data -= modelnei.data
residim = np.maximum(resid.data-resid.sky,0)
resid.data = residim
resid.sky[:] = 0.0
# Do aperture photometry with lots of apertures on the PSF
# stars
# rk = (20/3.)**(1/11.) * rk-1 for k=2,..,12
rseeing = psf.fwhm()*0.5
apers = np.cumprod(np.hstack((3.0,np.ones(11,float)*(20/3.)**(1/11.))))
#apers = np.array([3.0,3.7965,4.8046,6.0803,7.6947,9.7377,12.3232,
# 15.5952,19.7360,24.9762,31.6077,40.0000])
apercat = aperphot(resid,psfobj,apers)
# Fit curve of growth
# use magnitude differences between successive apertures.
apars, agrow, derr = fitgrowth(apercat,apers,rseeing=psf.fwhm()*0.5)
# Get magnitude difference errors
nstars = len(apercat)
napers = len(apers)
derr = np.zeros((nstars,napers-1),float)
for i in range(len(apers)-1):
err1 = apercat['magerr_aper'+str(i+1)]
err2 = apercat['magerr_aper'+str(i+2)]
derr[:,i] = np.sqrt(err1**2+err2**2)
wt = 1/derr**2
# THE CURVE TURNS OVER AT LARGE RADIUS!!!!???
# It shouldn't EVER do that.
# Calculate empirical growth curve
egrow,egrowerr = empgrowth(apercat,apers)
# Get "adopted" growth curve by taking the weighted average
# of the analytical and empirical growth curves
# with the empirical weighted higher at small r and
# the analytical weighted higher at large r
gwt = np.mean(wt,axis=0) # mean weights over the stars
adopgrow = (egrow*gwt + agrow*(1/(0.1*agrow))**2) / (gwt+(1/(0.1*agrow))**2)
adopgrowerr = 1 / (gwt+(1/(0.1*agrow))**2)
# Adopted cumulative growth curve
# sum from the outside in, with an outer tail given by
# extrapolation of the analytic model to 2*outer aperture
cadopgrow = np.cumsum(adopgrow[::-1])[::-1]
# add extrapolation from rlast t=o2*rlast
tail = diffprofile([2*apers[-1],apers[-1]],*apars)
cadopgrow += tail
cadopgrow = np.hstack((cadopgrow,tail)) # add value for outer aperture
cadopgrowerr = np.hstack((adopgrowerr,0.0))
# Calculate "total" magnitude for the PSF stars
totmag,toterr = totphot(apercat,apers,cadopgrow,cadopgrowerr)
# Calculate median offset between total and PSF magnitude
# psf - total
ind1,ind2 = dln.match(objects['id'],psfobj['id'])
diffmag = objects['psfmag'][ind1] - totmag[ind2]
apcor = np.median(diffmag) # positive value
# Apply aperture correction to the data
# add apcorr column and keep initial mags in instmag
objects['apcorr'] = apcor
objects['inst_psfmag'] = objects['psfmag']
objects['psfmag'] -= apcor # make brighter
if verbose:
print('Aperture correction = %.3f mag' % apcor)
return objects, apcor, cadopgrow | bc4bb936801fe06a55648ed9a11545eacb24fd7d | 3,654,881 |
from typing import Dict
from typing import Tuple
def product_loading_factor_single_discount(skus: str, product_list: Dict[str, object], product: Dict[str, int], product_name: str, rules: list) -> Tuple[int, str]:
"""
Single product loading factor for calculating discounts with one rule
Parameters
----------
skus: str
String containing indiviudal product skus
product_list: Dict[str, object]
Product discount list used for applying discounts
product: Dict[str, int]
Product list used for returning the current products price
product_name: str
The name of the product
rules: List
List of discount rules names to apply
Returns
-------
Tuple:
price: int
Calculated price
skus: str
Updated skus list
"""
number_of_products = skus.count(product_name)
product_price = product[product_name]
product_discount_data_object = product_list[product_name][rules[0]]
discount_threshold = product_discount_data_object['discount_threshold']
while number_of_products > 0:
if number_of_products > 0 and number_of_products % discount_threshold == 0:
product_discount_data_object['count'] += 1
number_of_products -= discount_threshold
else:
number_of_products -= 1
applied_discount = product_discount_data_object['count']
remainder_product_count = skus.count(product_name) - (applied_discount * discount_threshold)
discount_to_apply = product_discount_data_object['discount']
apply_discount = (applied_discount * product_price * discount_threshold) - (applied_discount * discount_to_apply)
price = apply_discount + (remainder_product_count * product_price)
return price, skus | 44e12d02be7c8b54d1ea64ef2dc3cbec29a870bc | 3,654,882 |
import re
def clean_text(page):
"""Return the clean-ish running text parts of a page."""
return re.sub(_UNWANTED, "", _unescape_entities(page)) | 8042cc5049b2d8b6646c10655b84c5552e315274 | 3,654,883 |
def calculate_recall(tp, n):
"""
:param tp: int
Number of True Positives
:param n: int
Number of total instances
:return: float
Recall
"""
if n == 0:
return 0
return tp / n | b8a36488af59e036acdb50821716ae34287e6b8f | 3,654,884 |
def authenticate_user_password(password : 'bytes', encryption_dict : 'dict', id_array : 'list'):
"""
Authenticate the user password.
Parameters
----------
password : bytes
The password to be authenticated as user password.
encryption_dict : dict
The dictionary containing all the information about the encryption procedure.
id_array : list
The two elements array ID, contained in the trailer dictionary.
Returns
-------
The encryption key if the user password is valid, None otherwise.
"""
R = encryption_dict["R"]
U = encryption_dict["U"]
U = U.value if isinstance(U, PDFLiteralString) else unhexlify(U.value)
encryption_key = compute_encryption_key(password, encryption_dict, id_array)
if R == 2:
cipher = rc4(PASSWORD_PADDING, encryption_key)
else:
input_to_md5 = bytearray()
input_to_md5.extend(PASSWORD_PADDING)
input_to_md5.extend(id_array[0])
computed_hash = md5(input_to_md5).digest()
cipher = rc4(computed_hash, encryption_key)
for counter in range(1, 20):
cipher = rc4(cipher, bytes(x ^ counter for x in encryption_key))
correct_password = (U[:16] == cipher[:16]) if R >= 3 else (U == cipher)
return encryption_key if correct_password else None | b608a921fb02cedf9da9d8ea8e0d8f8139a6a9bd | 3,654,885 |
def date_to_num(date):
"""Convert datetime to days since 1901"""
num = (date.year - 1901) * 365.25
num += [
0, 31, 59.25, 90.25, 120.25,
151.25, 181.25, 212.25, 243.25,
273.25, 304.25, 334.25
][date.month - 1]
num += date.day
return int(num) | 88e342e0fc80a5998df8e5f1ab0002e0f7fe808e | 3,654,886 |
from typing import Tuple
def load_world(filename: str, size: Tuple[int, int], resolution: int) -> np.array:
"""Load a preconstructred track to initialize world.
Args:
filename: Full path to the track file (png).
size: Width and height of the map
resolution: Resolution of the grid map (i.e. into how many cells)
one meter is divided into.
Returns:
An initialized gridmap based on the preconstructed track as
an n x m dimensional numpy array, where n is the width (num cells)
and m the height (num cells) - (after applying resolution).
"""
width_in_cells, height_in_cells = np.multiply(size, resolution)
world = np.array(png_to_ogm(
filename, normalized=True, origin='lower'))
# If the image is already in our desired shape, no need to rescale it
if world.shape == (height_in_cells, width_in_cells):
return world
# Otherwise, scale the image to our desired size.
resized_world = resize(world, (width_in_cells, height_in_cells))
return resized_world | 8ccf97efb83b3c365fb95a2732d0737100d5f254 | 3,654,887 |
import torch
def generate_image(model, img_size, n_flow, n_block, n_sample, temp=0.7, ctx=None, label=None):
"""Generate a single image from a Glow model."""
# Determine sizes of each layer
z_sample = []
z_shapes = calc_z_shapes(3, img_size, n_flow, n_block)
for z in z_shapes:
z_new = torch.randn(n_sample, *z) * temp
z_sample.append(z_new.to(device))
assert ctx is None or label is None # can either insert label or context
if label is not None:
return model.reverse(z_sample, label=label)
else:
# handles both cases where only context is provided or no label or context is provided
return model.reverse(z_sample, ctx=ctx) | bee9c45cbbd028351e580729da51092604f87288 | 3,654,888 |
def quote_spaces(arg):
"""Generic function for putting double quotes around any string that
has white space in it."""
if ' ' in arg or '\t' in arg:
return '"%s"' % arg
else:
return str(arg) | e0171c3b0eee18c7fcc44cbdfe007949feabba9a | 3,654,889 |
from pathlib import Path
import requests
import shutil
def download_file(url) -> Path:
"""Better download"""
name = Path(urlparse(unquote(url)).path).name
with mktempdir() as tmpdir:
@backoff.on_exception(backoff.expo, requests.exceptions.RequestException, max_time=30)
def get():
with requests.get(url, stream=True) as r:
save_path = tmpdir.joinpath(name)
with open(save_path, "wb") as f:
shutil.copyfileobj(r.raw, f, length=16 * 1024 * 1024)
return save_path
yield get() | 5ff6c05e5e1eb3379918c65d945d57af7e8d56be | 3,654,891 |
def creategui(handlerfunctions):
"""Initializes and returns the gui."""
gui = GUI(handlerfunctions)
# root.title('DBF Utility')
return gui | 17be3bae6eb105aca770327898a01027271e6f9c | 3,654,892 |
import torch
def rank_src_trgs(enc_dec_gen, src_list, trg_list):
"""
"""
batch_size = len(trg_list)
x, y = enc_dec_gen.encode_inputs(src_list,
trg_list,
add_bos=True,
add_eos=True)
y_len = torch.sum(y.ne(enc_dec_gen.model.PAD), -1)
with torch.no_grad():
y_target = y[:, 1:]
y = y[:, :-1]
enc_self_attn_mask = enc_dec_gen.model.get_attn_mask(x, x)
enc_outputs = enc_dec_gen.model.encoder(x,
enc_self_attn_mask)
enc_output = enc_outputs[0]
n = y.size(0)//x.size(0)
x = x.repeat([1,n]).view(y.size(0), -1)
enc_output = enc_output.repeat([1, n, 1]).view(x.size(0), x.size(1), -1)
dec_self_attn_mask = enc_dec_gen.model.get_subsequent_mask(y)
dec_self_attn_mask = dec_self_attn_mask | enc_dec_gen.model.get_attn_mask(y, y)
dec_enc_attn_mask = enc_dec_gen.model.get_attn_mask(y, x)
trg_embedding = None
if enc_dec_gen.model.share_src_trg_emb == True:
trg_embedding = enc_dec_gen.model.encoder.src_embedding
dec_outputs = enc_dec_gen.model.decoder(y,
enc_output,
dec_self_attn_mask,
dec_enc_attn_mask,
trg_embedding=trg_embedding)
logits = dec_outputs[0]
logits = logits.view(-1, enc_dec_gen.trg_vocab_size)
log_probs = -F.nll_loss(F.log_softmax(logits, -1),
y_target.contiguous().view(-1),
ignore_index=enc_dec_gen.model.PAD,
reduction='none')
log_probs = torch.sum(log_probs.view(batch_size, -1), -1)
norm = 1
if enc_dec_gen.normalize == "gnmt":
norm = torch.pow(5. + y_len, enc_dec_gen.gamma) / np.power(6., enc_dec_gen.gamma)
elif enc_dec_gen.normalize == "linear":
norm = y_len
log_probs = log_probs / norm
log_probs = log_probs.cpu().numpy()
return log_probs | f5472889489676e21a7bec032e13ef99c850f2da | 3,654,893 |
def plugin_poll(handle):
""" Extracts data from the sensor and returns it in a JSON document as a Python dict.
Available for poll mode only.
Args:
handle: handle returned by the plugin initialisation call
Returns:
returns a sensor reading in a JSON document, as a Python dict, if it is available
None - If no reading is available
Raises:
Exception
"""
try:
time_stamp = utils.local_timestamp()
data = {'asset': handle['assetName']['value'],
'timestamp': time_stamp,
'readings': {"random": next(generate_data())}}
except (Exception, RuntimeError) as ex:
_LOGGER.exception("Exception is {}".format(str(ex)))
raise ex
else:
return data | c3d7b32b6816c81d244f689ce4185d1dcd9a16fe | 3,654,894 |
import torch
def ltria2skew(L):
"""
assume L has already passed the assertion check
:param L: lower triangle matrix, shape [N, 3]
:return: skew sym A [N, 3, 3]
"""
if len(L.shape) == 2:
N = L.shape[0]
# construct the skew-sym matrix
A = torch.zeros(N, 3, 3).cuda() # [N, 3, 3]
A[:, 1, 0] = L[:, 0]
A[:, 2, 0] = L[:, 1]
A[:, 2, 1] = L[:, 2]
A[:, 0, 1] = -L[:, 0]
A[:, 0, 2] = -L[:, 1]
A[:, 1, 2] = -L[:, 2]
return A
elif len(L.shape) == 1:
A = torch.zeros(3, 3).cuda()
A[1, 0] = L[0]
A[2, 0] = L[1]
A[2, 1] = L[2]
A[0, 1] = -L[0]
A[0, 2] = -L[1]
A[1, 2] = -L[2]
return A
else:
raise NotImplementedError | 6e74c181fc8efcdc28ba35578f31fb6f2a7fa1bb | 3,654,896 |
def gamma_contrast(data_sample, num_patches=324, num_channel=2, shape_data=None,
gamma_range=(0.5, 1.7), invert_image=False, per_channel=False,
retain_stats=False):
"""Performs gamma contrast transformation"""
epsilon = 1e-7
data_sample_patch = []
gamma_range_tensor = tf.convert_to_tensor(gamma_range)
for patch in range(num_patches):
if invert_image:
data_sample = - data_sample
if not per_channel:
# if np.random.random() < 0.5 and gamma_range[0] < 1:
# gamma = np.random.uniform(gamma_range[0], 1)
# else:
# gamma = np.random.uniform(max(gamma_range[0], 1), gamma_range[1])
def true_fn():
gamma_fn = tf.random.uniform(shape=(), minval=gamma_range[0], maxval=1, seed=1)
return gamma_fn
def false_fn():
gamma_fn = tf.random.uniform(shape=(), minval=tf.math.maximum(gamma_range[0], 1),
maxval=gamma_range[1], seed=1)
return gamma_fn
cond = tf.math.logical_and(tf.math.less(tf.random.uniform(shape=(), minval=0, maxval=0.99, seed=1), 0.5),
tf.math.less(gamma_range_tensor[0], 1))
gamma = tf.cond(cond, true_fn, false_fn)
min_val_ten = tf.math.reduce_min(data_sample[patch, ...])
range_tensor = tf.math.reduce_max(data_sample[patch, ...]) - min_val_ten
data_sample_norm = tf.math.divide(tf.math.subtract(data_sample[patch, ...], min_val_ten),
tf.math.add(range_tensor, epsilon))
data_img = tf.image.adjust_gamma(image=data_sample_norm, gamma=gamma,
gain=tf.math.add(range_tensor, epsilon))
data_img = tf.math.add(data_img, min_val_ten)
data_sample_patch.append(data_img)
else:
data_sample_per_channel = []
for c in range(num_channel):
def true_fn():
gamma_fn = tf.random_uniform_initializer(minval=gamma_range[0], maxval=1, seed=1)
return gamma_fn
def false_fn():
gamma_fn = tf.random_uniform_initializer(minval=tf.math.maximum(gamma_range[0], 1),
maxval=gamma_range[1], seed=1)
return gamma_fn
cond = tf.math.logical_and(tf.math.less(tf.random.uniform(shape=(), minval=0, maxval=0.99, seed=1), 0.5),
tf.math.less(gamma_range_tensor[0], 1))
gamma = tf.cond(cond, true_fn, false_fn)
min_val_ten = tf.math.reduce_min(data_sample[patch, :, :, :, c])
#rnge_tensor = tf.math.reduce_max(data_sample[patch, :, :, :, c]) - min_val_ten
data_sample_norm = tf.math.divide(tf.math.subtract(data_sample[patch, ..., c], min_val_ten),
tf.math.add(range_tensor, epsilon))
data_img = tf.image.adjust_gamma(image=data_sample_norm, gamma=gamma,
gain=tf.math.add(range_tensor, epsilon))
data_img = tf.math.add(data_img, min_val_ten)
data_sample_per_channel.append(data_img)
data_sample_channel = tf.stack(data_sample_per_channel)
data_sample_channel = tf.transpose(data_sample_channel, perm=[1, 2, 3, 0])
data_sample_patch.append(data_sample_channel)
data_sample_return = tf.stack(data_sample_patch)
# data_sample_return = tf.transpose(data_sample_return, perm=[1, 2, 3, 4, 0])
return data_sample_return | 373f3f7e602de69c1cbce328ec3ff1322a44d013 | 3,654,897 |
def _converge(helper, rcs, group):
"""
Function to be passed to :func:`_oob_disable_then` as the ``then``
parameter that triggers convergence.
"""
return group.trigger_convergence(rcs) | 8aab701dc7e29d83d6c8ab8b71c37837feb72847 | 3,654,898 |
def HybridClientFactory(jid, password):
"""
Client factory for XMPP 1.0.
This is similar to L{client.XMPPClientFactory} but also tries non-SASL
autentication.
"""
a = HybridAuthenticator(jid, password)
return xmlstream.XmlStreamFactory(a) | 283d9182c0e7bce254bc9f04cd42c15b9e3aed46 | 3,654,899 |
def home():
"""Home page."""
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash('You are logged in.', 'success')
redirect_url = request.args.get('next') or url_for('user.members')
return redirect(redirect_url)
else:
flash_errors(form)
return dict(form=form) | 4bed46f095b31a61746c382460b6f477a4aa215e | 3,654,900 |
def countingsort(A):
"""
Sort the list A. A has to be a list of integers.
Every element of the list A has to be non-negative.
@param A: the list that should get sorted
@return the sorted list
"""
if len(A) == 0:
return []
C = [0] * (max(A)+1)
B = [""] * len(A)
# Count the number of elements
for el in A:
C[el] += 1
# Now C[i] contains how often i is in A
for index in xrange(1, len(C)):
C[index] += C[index-1]
for el in A[::-1]:
B[C[el]-1] = el
C[el] -= 1
return B | ebdaac4580f910873f77878978b57e193334a4ea | 3,654,901 |
import math
def calc_obstacle_map(ox, oy, resolution, vr):
"""
Build obstacle map according to the distance of a
certain grid to obstacles. Treat the area near the
obstacle within the turning radius of the vehicle
as the obstacle blocking area and mark it as TRUE.
"""
min_x = round(min(ox))
min_y = round(min(oy))
max_x = round(max(ox))
max_y = round(max(oy))
x_width = round(max_x - min_x)
y_width = round(max_y - min_y)
# obstacle map generation
obstacle_map = [[False for _ in range(y_width)] for _ in range(x_width)]
for ix in range(x_width):
x = ix + min_x
for iy in range(y_width):
y = iy + min_y
# print(x, y)
for iox, ioy in zip(ox, oy):
d = math.sqrt((iox - x)**2 + (ioy - y)**2)
if d * resolution <= vr:
obstacle_map[ix][iy] = True
break
return obstacle_map, min_x, min_y, max_x, max_y, x_width, y_width | 87d44c5eb799bf3b2ea64ac0717b8d7f260a4a37 | 3,654,902 |
import itertools
def dilate(poly,eps):
"""
The function dilates a polytope.
For a given polytope a polytopic over apoproximation of the $eps$-dilated set is computed.
An e-dilated Pe set of P is defined as:
Pe = {x+n|x in P ^ n in Ball(e)}
where Ball(e) is the epsilon neighborhood with norm |n|<e
The current implementation is quite crude, hyper-boxes are placed over the original vertices
and the returned polytope is a qhull of these new vertices.
:param poly: original polytope
:param eps: positive scalar value with which the polytope is dilated
:return: polytope
"""
if isinstance(poly,polytope.Region):
dil_reg = []
for pol in poly.list_poly:
assert isinstance(pol,polytope.Polytope)
dil_reg += [dilate(pol, eps)]
return polytope.Region(dil_reg)
vertices = extreme(poly)
dim = len(vertices[0]) # this is the dimensionality of the space
dil_eps = dim * [[-eps,eps]]
dil_eps_v = [np.array(n) for n in itertools.product(*dil_eps)] # vectors with (+- eps,+- eps, +- eps,...)
new_vertices = []
for v,d in itertools.product(vertices,dil_eps_v):
new_vertices += [[np.array(v).flatten() + np.array(d).flatten()]]
# make box
# print("add vertices part:", np.array(v).flatten() + np.array(d).flatten())
VV = np.concatenate(new_vertices)
# print("V", VV)
return qhull(VV) | 0ae4d8ea9cb6977939e4d3bed6454ed55e8855cf | 3,654,903 |
def read_files_to_vardf(map_df, df_dict, gridclimname, dataset, metadata,
file_start_date, file_end_date, file_delimiter,
file_time_step, file_colnames,
subset_start_date, subset_end_date, min_elev, max_elev, variable_list=None):
"""
# reads in the files to generate variables dataframes
map_df: (dataframe) the mappingfile clipped to the subset that will be read-in
df_dict: (dict) an existing dictionary where new computations will be stored
gridclimname: (str) the suffix for the dataset to be named; if None provided, default to dataset name
dataset: (str) the name of the dataset catalogged into map_df
metadata: (str) the dictionary that contains the metadata explanations; default is None
file_start_date: (date) the start date of the files that will be read-in; default is None
file_end_date: (date) the end date for the files that will be read in; default is None
file_delimiter: (str) a file parsing character to be used for file reading
file_time_step: (str) the timedelta code for the difference between time points; default is 'D' (daily)
file_colnames: (list) the list of shorthand variables; default is None
subset_start_date: (date) the start date of a date range of interest
subset_end_date: (date) the end date of a date range of interest
min_elev: (float) minimum elevation permitted
max_elev: (float) maximum elevation permitted
"""
# start time
starttime = pd.datetime.now()
# date range from ogh_meta file
met_daily_dates=pd.date_range(file_start_date, file_end_date, freq=file_time_step)
met_daily_subdates=pd.date_range(subset_start_date, subset_end_date, freq=file_time_step)
# omit null entries or missing data file
map_df = map_df.loc[pd.notnull(map_df[dataset]), :]
print('Number of data files within elevation range ({0}-{1} m): {2}'.format(min_elev, max_elev, len(map_df)))
# establish default list of variables
if isinstance(variable_list, type(None)):
variable_list = metadata[dataset]['variable_list']
# iterate through each data file
for eachvar in metadata[dataset]['variable_list']:
# exclude YEAR, MONTH, and DAY
if eachvar not in ['YEAR', 'MONTH', 'DAY'] and eachvar in variable_list:
# identify the variable column index
usecols = [metadata[dataset]['variable_list'].index(eachvar)]
# initiate df as a list
df_list=[]
# loop through each file
for ind, row in map_df.iterrows():
# consider rewriting the params to just select one column by index at a time
var_series = dask.delayed(pd.read_table)(filepath_or_buffer=row[dataset],
delimiter=file_delimiter,header=None,usecols=usecols,
names=[tuple(row[['FID', 'LAT', 'LONG_']])])
# append the series into the list of series
df_list.append(var_series)
# concatenate list of series (axis=1 is column-wise) into a dataframe
df1 = dask.delayed(pd.concat)(df_list, axis=1)
# set and subset date_range index
df2 = df1.set_index(met_daily_dates, inplace=False).loc[met_daily_subdates]
# assign dataframe to dictionary object
df_dict['_'.join([eachvar, gridclimname])] = dask.compute(df2)[0]
print(eachvar+ ' dataframe reading complete:' + str(pd.datetime.now()-starttime))
return(df_dict) | 31bc460eb0035d3bbd51f266c96a53f537495a53 | 3,654,904 |
import pickle
def read_file(pickle_file_name):
"""Reads composite or non-composite class-activation maps from Pickle file.
:param pickle_file_name: Path to input file (created by
`write_standard_file` or `write_pmm_file`).
:return: gradcam_dict: Has the following keys if not a composite...
gradcam_dict['denorm_predictor_matrices']: See doc for
`write_standard_file`.
gradcam_dict['cam_matrices']: Same.
gradcam_dict['guided_cam_matrices']: Same.
gradcam_dict['full_storm_id_strings']: Same.
gradcam_dict['storm_times_unix_sec']: Same.
gradcam_dict['model_file_name']: Same.
gradcam_dict['target_class']: Same.
gradcam_dict['target_layer_name']: Same.
gradcam_dict['sounding_pressure_matrix_pa']: Same.
...or the following keys if composite...
gradcam_dict['mean_denorm_predictor_matrices']: See doc for
`write_pmm_file`.
gradcam_dict['mean_cam_matrices']: Same.
gradcam_dict['mean_guided_cam_matrices']: Same.
gradcam_dict['model_file_name']: Same.
gradcam_dict['non_pmm_file_name']: Same.
gradcam_dict['pmm_max_percentile_level']: Same.
gradcam_dict['mean_sounding_pressures_pa']: Same.
:return: pmm_flag: Boolean flag. True if `gradcam_dict` contains
composite, False otherwise.
:raises: ValueError: if dictionary does not contain expected keys.
"""
pickle_file_handle = open(pickle_file_name, 'rb')
gradcam_dict = pickle.load(pickle_file_handle)
pickle_file_handle.close()
pmm_flag = MEAN_PREDICTOR_MATRICES_KEY in gradcam_dict
if pmm_flag:
missing_keys = list(
set(PMM_FILE_KEYS) - set(gradcam_dict.keys())
)
else:
missing_keys = list(
set(STANDARD_FILE_KEYS) - set(gradcam_dict.keys())
)
if len(missing_keys) == 0:
return gradcam_dict, pmm_flag
error_string = (
'\n{0:s}\nKeys listed above were expected, but not found, in file '
'"{1:s}".'
).format(str(missing_keys), pickle_file_name)
raise ValueError(error_string) | 3f2f7fb1a5a904f494e64f840f6a8d6ae207c900 | 3,654,905 |
import string
def tacodev(val=None):
"""a valid taco device"""
if val in ('', None):
return ''
val = string(val)
if not tacodev_re.match(val):
raise ValueError('%r is not a valid Taco device name' % val)
return val | 4cffd52f9e7673ad45e697aadfbb3515ecd3d209 | 3,654,906 |
def decode_layout_example(example, input_range=None):
"""Given an instance and raw labels, creates <inputs, label> pair.
Decoding includes.
1. Converting images from uint8 [0, 255] to [0, 1.] float32.
2. Mean subtraction and standardization using hard-coded mean and std.
3. Convert boxes from yxyx [0-1] to xyxy un-normalized.
4. Add 1 to all labels to account for background/padding object at label 0.
5. Shuffling dictionary keys to be consistent with the rest of the code.
Args:
example: dict; Input image and raw labels.
input_range: tuple; Range of input. By default we use Mean and StdDev
normalization.
Returns:
A dictionary of {'inputs': input image, 'labels': task label}.
"""
image = tf.image.convert_image_dtype(example['image'], dtype=tf.float32)
# Normalize.
if input_range:
image = image * (input_range[1] - input_range[0]) + input_range[0]
else:
mean_rgb = tf.constant(MEAN_RGB, shape=[1, 1, 3], dtype=tf.float32)
std_rgb = tf.constant(STDDEV_RGB, shape=[1, 1, 3], dtype=tf.float32)
image = (image - mean_rgb) / std_rgb
boxes = example['objects']['boxes']
target = {
'boxes': boxes,
'labels': example['objects']['label'] + 1, # 0'th class is padding.
'binary_labels': example['objects']['binary_label'] + 1,
'desc_id': example['objects']['desc_id'],
'resource_id': example['objects']['resource_id'],
'name_id': example['objects']['name_id'],
'obj_mask': example['objects']['obj_mask'],
}
# Filters objects to exclude degenerate boxes.
valid_bbx = tf.logical_and(boxes[:, 2] > boxes[:, 0],
boxes[:, 3] > boxes[:, 1])
# -1 is ROOT node, remove it for training & eval.
valid_node = tf.greater(example['objects']['label'], -1)
keep = tf.where(tf.logical_and(valid_bbx, valid_node))[:, 0]
target_kept = {k: tf.gather(v, keep) for k, v in target.items()}
target_kept['orig_size'] = tf.cast(tf.shape(image)[0:2], dtype=tf.int32)
target_kept['size'] = tf.identity(target_kept['orig_size'])
return {
'inputs': image,
'label': target_kept,
} | a54b26a8b4d82a6a9e5bc093f9f59b7a74450916 | 3,654,908 |
import plotly.figure_factory as ff
def bact_plot(samples, bacteroidetes, healthiest_sample):
"""
Returns a graph of the distribution of the data in a graph
==========
samples : pandas.DataFrame
The sample data frame. Must contain column `Bacteroidetes` and
`Firmicutes` that contain the percentage of those phyla.
Returns
=======
plotly graph
"""
hist_data = [samples["Bacteroidetes"]]
group_labels = ["Bacteroidetes"]
bact = ff.create_distplot(hist_data, group_labels, show_hist=False)
bact["layout"].update(title="Bacteroidetes Sample Distribution ")
bact["layout"].update(
showlegend=False,
annotations=[
dict(
x=bacteroidetes,
y=0,
xref="x",
yref="y",
text="You are here!",
showarrow=True,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor="#0e0f36",
ax=70,
ay=-30,
bordercolor="#06a300",
borderwidth=2,
borderpad=4,
bgcolor="#69f564",
opacity=0.8,
),
dict(
x=healthiest_sample["Bacteroidetes"],
y=0,
xref="x",
yref="y",
text="Healthiest Sample",
showarrow=True,
arrowhead=2,
arrowsize=1,
arrowwidth=2,
arrowcolor="#0e0f36",
ax=70,
ay=30,
bordercolor="#4c0acf",
borderwidth=2,
borderpad=4,
bgcolor="#b977f2",
opacity=0.8,
),
],
)
return bact | d21bb3bd534f92cc6eed3bb467fe355abcf1afd2 | 3,654,909 |
def xdraw_lines(lines, **kwargs):
"""Draw lines and optionally set individual name, color, arrow, layer, and
width properties.
"""
guids = []
for l in iter(lines):
sp = l['start']
ep = l['end']
name = l.get('name', '')
color = l.get('color')
arrow = l.get('arrow')
layer = l.get('layer')
width = l.get('width')
guid = add_line(Point3d(*sp), Point3d(*ep))
if not guid:
continue
obj = find_object(guid)
if not obj:
continue
attr = obj.Attributes
if color:
attr.ObjectColor = FromArgb(*color)
attr.ColorSource = ColorFromObject
else:
attr.ColorSource = ColorFromLayer
if arrow == 'end':
attr.ObjectDecoration = EndArrowhead
if arrow == 'start':
attr.ObjectDecoration = StartArrowhead
if layer and find_layer_by_fullpath:
index = find_layer_by_fullpath(layer, True)
if index >= 0:
attr.LayerIndex = index
if width:
attr.PlotWeight = width
attr.PlotWeightSource = PlotWeightFromObject
attr.Name = name
obj.CommitChanges()
guids.append(guid)
return guids | bebeb2d400ed8c779281b67f01007e953f15460f | 3,654,911 |
def _emit_params_file_action(ctx, path, mnemonic, cmds):
"""Helper function that writes a potentially long command list to a file.
Args:
ctx (struct): The ctx object.
path (string): the file path where the params file should be written.
mnemonic (string): the action mnemomic.
cmds (list<string>): the command list.
Returns:
(File): an executable file that runs the command set.
"""
filename = "%s.%sFile.params" % (path, mnemonic)
f = ctx.new_file(ctx.configuration.bin_dir, filename)
ctx.file_action(output = f,
content = "\n".join(["set -e"] + cmds),
executable = True)
return f | adafb75e24b2023ad2926e4248e8b2e1e6966b8e | 3,654,912 |
import gettext
def annotate_validation_results(results, parsed_data):
"""Annotate validation results with potential add-on restrictions like
denied origins."""
if waffle.switch_is_active('record-install-origins'):
denied_origins = sorted(
DeniedInstallOrigin.find_denied_origins(parsed_data['install_origins'])
)
for origin in denied_origins:
insert_validation_message(
results,
message=gettext(
'The install origin {origin} is not permitted.'.format(
origin=origin
)
),
)
return results | 659ec92f98c2678de2ee8f2552da77c5394047c5 | 3,654,913 |
import textwrap
def ignore_firstline_dedent(text: str) -> str:
"""Like textwrap.dedent(), but ignore first empty lines
Args:
text: The text the be dedented
Returns:
The dedented text
"""
out = []
started = False
for line in text.splitlines():
if not started and not line.strip():
continue
if not started:
started = True
out.append(line)
return textwrap.dedent("\n".join(out)) | 04bde49e72e07552f2f88e9112546d00b85a2879 | 3,654,915 |
def read_file(filename):
"""
Read a file and return its binary content. \n
@param filename : filename as string. \n
@return data as bytes
"""
with open(filename, mode='rb') as file:
file_content = file.read()
return file_content | 2417aa5cfa0d43303f9f6103e8b1fee9e8d652e2 | 3,654,916 |
def getdictkeys(value):
"""
Returns the ordered keys of a dict
"""
if type(value) == dict:
keys = list(value.keys())
keys.sort(key=toint)
return keys
return [] | adf49dbfa46f5174aa1435756c6e099b08b7c6c9 | 3,654,917 |
def exp_lr_scheduler(optimizer, epoch, init_lr=5e-3, lr_decay_epoch=40):
"""Decay learning rate by a factor of 0.1 every lr_decay_epoch epochs."""
lr = init_lr * (0.1**(epoch // lr_decay_epoch))
if epoch % lr_decay_epoch == 0:
print('LR is set to {}'.format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer | 520a7960ee589e033920cf182d75ea896cc8b8b7 | 3,654,918 |
def random_shadow(image):
"""
Function to add shadow in images randomly at random places, Random shadows meant to make the Convolution model learn
Lanes and lane curvature patterns effectively in dissimilar places.
"""
if np.random.rand() < 0.5:
# (x1, y1) and (x2, y2) forms a line
# xm, ym gives all the locations of the image
x1, y1 = image.shape[1] * np.random.rand(), 0
x2, y2 = image.shape[1] * np.random.rand(), image.shape[0]
xm, ym = np.mgrid[0:image.shape[0], 0:image.shape[1]]
mask = np.zeros_like(image[:, :, 1])
mask[(ym - y1) * (x2 - x1) - (y2 - y1) * (xm - x1) > 0] = 1
# choose which side should have shadow and adjust saturation
cond = mask == np.random.randint(2)
s_ratio = np.random.uniform(low=0.2, high=0.5)
# adjust Saturation in HLS(Hue, Light, Saturation)
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
hls[:, :, 1][cond] = hls[:, :, 1][cond] * s_ratio
return cv2.cvtColor(hls, cv2.COLOR_HLS2RGB)
else:
return image | 118fbffa04bbd3551eff3f4298ba14235b00b7c3 | 3,654,920 |
def build_channel_header(type, tx_id, channel_id,
timestamp, epoch=0, extension=None,
tls_cert_hash=None):
"""Build channel header.
Args:
type (common_pb2.HeaderType): type
tx_id (str): transaction id
channel_id (str): channel id
timestamp (grpc.timestamp): timestamp
epoch (int): epoch
extension: extension
Returns:
common_proto.Header instance
"""
channel_header = common_pb2.ChannelHeader()
channel_header.type = type
channel_header.version = 1
channel_header.channel_id = proto_str(channel_id)
channel_header.tx_id = proto_str(tx_id)
channel_header.epoch = epoch
channel_header.timestamp.CopyFrom(timestamp)
if tls_cert_hash:
channel_header.tls_cert_hash = tls_cert_hash
if extension:
channel_header.extension = extension
return channel_header | cfd7524de77a61fe75d3b3be58e2ebde4d743393 | 3,654,921 |
def get_character(data, index):
"""Return one byte from data as a signed char.
Args:
data (list): raw data from sensor
index (int): index entry from which to read data
Returns:
int: extracted signed char value
"""
result = data[index]
if result > 127:
result -= 256
return result | 5a08102cb9dc8ae7e2adcab9b5653b77ee2c6ae3 | 3,654,922 |
def df_to_embed(df, img_folder):
""" Extract image embeddings, sentence embeddings and concatenated embeddings from dataset and image folders
:param df: dataset file to use
:param img_folder: folder where the corresponding images are stored
:return: tuple containing sentence embeddings, image embeddings, concatenated embeddings
"""
sent_embed = extract_all_sentences(df)
img_embed = extract_all_images("xception", img_folder)
concat = np.concatenate((sent_embed, img_embed), axis=1)
return sent_embed, img_embed, concat | cda55f06a74c1b0475bc6a9e35e657b4f3ce0392 | 3,654,923 |
import random
def generate_player_attributes():
"""
Return a list of 53 dicts with player attributes
that map to Player model fields.
"""
# Get player position distribution
position_dist = get_position_distribution()
# Get player attribute distribution
attr_dist = get_attribute_distribution()
# Get player names from CSV
player_names = read_player_names_from_csv()
player_list = []
# Generate 53 players per team
for roster_spot in range(0, 53):
player = {}
# Set player names from parsed CSV data
player['first_name'] = player_names[roster_spot][0]
player['last_name'] = player_names[roster_spot][1]
# Only assign player a position that isn't filled on the roster
for pos, dist in position_dist.items():
if dist[0] < dist[1]:
player['position'] = pos
# Pick a random prototype based on position
player['prototype'] = random.choice(list(attr_dist[pos]))
dist[0] += 1
break
else:
continue
# Assign player ages based on normal distribution
player['age'] = int(random.gauss(1, 0.1) * random.randint(25, 35))
default_rookie_age = 22
player['experience'] = player['age'] - default_rookie_age
if player['age'] < 22:
player['experience'] = 0
# Generate ratings based on weights and normal distribution
base_rating = int(random.gauss(70, 20))
position, prototype = player['position'], player['prototype']
pos_weights = attr_dist[position][prototype]
# Apply position and prototype weights
after_pos_weights = []
for pw in range(len(pos_weights)):
after_pos_weights.append(pos_weights[pw] + base_rating)
# Sigmas for standard deviation
sigmas = [20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20, 20]
final_ratings = list(map(random.gauss, after_pos_weights, sigmas))
i = 0
calc_overall = []
# Assign final ratings to player key
for attribute in ('potential', 'confidence', 'iq',
'speed', 'strength', 'agility',
'awareness', 'stamina', 'injury',
'run_off', 'pass_off', 'special_off',
'run_def', 'pass_def', 'special_def'):
rating = int(final_ratings[i])
if rating > 99:
rating = 99
elif rating < 0:
rating = 0
player[attribute] = rating
calc_overall.append(rating)
i += 1
# Calculate overall rating and add player to list
player['overall_rating'] = int(sum(calc_overall) / len(calc_overall))
player_list.append(player)
return player_list | 57c16d998348b9db1384dc98412bd69e62d0c73d | 3,654,925 |
def colour_from_loadings(loadings, maxLoading=None, baseColor="#FF0000"):
"""Computes colors given loading values.
Given an array of loading values (loadings), returns an array of
colors that graphviz can understand that can be used to colour the
nodes. The node with the greatest loading uses baseColor, and a node
with zero loading uses white (#FFFFFF).
This is achieved through clever sneaky use of the alpha channel."""
if maxLoading is None:
maxLoading = max(loadings)
return [baseColor + hex(int(loading / maxLoading * 255))[2:]
for loading in loadings] | 8bd65e5b4aa54558d3710a8518bbbe6400559046 | 3,654,926 |
def determineDocument(pdf):
""" Scans the pdf document for certain text lines and determines the type of investment vehicle traded"""
if 'turbop' in pdf or 'turboc' in pdf:
return 'certificate'
elif 'minil' in pdf:
return 'certificate'
elif 'call' in pdf or 'put' in pdf:
return 'warrant'
else:
return 'stock' | e6c5adc10168321fd6a534dd8e9fbf2e8ccb1615 | 3,654,927 |
from typing import Any
import pickle
def deserialize_api_types(class_name: str, d: dict) -> Any:
"""
Deserializes an API type. Allowed classes are defined in:
* :mod:`maestral.core`
* :mod:`maestral.model`
* :mod:`maestral.exceptions`
:param class_name: Name of class to deserialize.
:param d: Dictionary of serialized class.
:returns: Deserialized object.
"""
bytes_message = serpent.tobytes(d["object"])
check_signature(d["signature"], bytes_message)
return pickle.loads(bytes_message) | f9c3962a1c18bd6dfb385af37e90b5062d1e0eef | 3,654,929 |
from click.testing import CliRunner
def runner() -> CliRunner:
"""Fixture for invoking command-line interfaces."""
return testing.CliRunner() | 39f241b8192a3c06750e850c8c953822e4db5634 | 3,654,930 |
import math
def give_color_to_direction_dynamic(dir):
"""
Assigns a color to the direction (dynamic-defined colors)
Parameters
--------------
dir
Direction
Returns
--------------
col
Color
"""
dir = 0.5 + 0.5 * dir
norm = mpl.colors.Normalize(vmin=0, vmax=1)
nodes = [0.0, 0.01, 0.25, 0.4, 0.45, 0.55, 0.75, 0.99, 1.0]
colors = ["deepskyblue", "skyblue", "lightcyan", "lightgray", "gray", "lightgray", "mistyrose", "salmon", "tomato"]
cmap = mpl.colors.LinearSegmentedColormap.from_list("mycmap2", list(zip(nodes, colors)))
#cmap = cm.plasma
m = cm.ScalarMappable(norm=norm, cmap=cmap)
rgba = m.to_rgba(dir)
r = get_string_from_int_below_255(math.ceil(rgba[0] * 255.0))
g = get_string_from_int_below_255(math.ceil(rgba[1] * 255.0))
b = get_string_from_int_below_255(math.ceil(rgba[2] * 255.0))
return "#" + r + g + b | ece62af230cda4870df099eae50a26b72848b2de | 3,654,932 |
def prune_arms(active_arms, sample_arms, verbose=False):
"""Remove all arms from ``active_arms`` that have an allocation less than two standard deviations below the current best arm.
:param active_arms: list of coordinate-tuples corresponding to arms/cohorts currently being sampled
:type active_arms: list of tuple
:param sample_arms: all arms from prev and current cohorts, keyed by coordinate-tuples
Arm refers specifically to a :class:`moe.bandit.data_containers.SampleArm`
:type sample_arms: dict
:param verbose: whether to print status messages to stdout
:type verbose: bool
:return: list of coordinate-tuples that are the *well-performing* members of ``active_arms``
length is at least 1 and at most ``len(active_arms)``
:rtype: list of tuple
"""
# Find all active sample arms
active_sample_arms = {}
for active_arm in active_arms:
active_sample_arms[active_arm] = sample_arms[active_arm]
# Find the best arm
# Our objective is a relative CTR, so status_quo is 0.0; we
# know that the best arm cannot be worse than status_quo
best_arm_val = 0.0
for sample_arm_point, sample_arm in active_sample_arms.iteritems():
arm_value, arm_variance = objective_function(
sample_arm,
sample_arms[tuple(STATUS_QUO_PARAMETER)],
)
if arm_value > best_arm_val:
best_arm_val = arm_value
# Remove all arms that are more than two standard deviations worse than the best arm
pruned_arms = copy.copy(active_arms)
for sample_arm_point, sample_arm in active_sample_arms.iteritems():
arm_value, arm_variance = objective_function(
sample_arm,
sample_arms[tuple(STATUS_QUO_PARAMETER)],
)
if sample_arm.total > 0 and arm_value + 2.0 * numpy.sqrt(arm_variance) < best_arm_val:
if verbose:
print "Removing underperforming arm: {0}".format(sample_arm_point)
pruned_arms.remove(sample_arm_point)
return pruned_arms | bd82f77503a9f0fa6a49b9f24ce9846849544b00 | 3,654,935 |
def prepare_string(dist, digits=None, exact=False, tol=1e-9,
show_mask=False, str_outcomes=False):
"""
Prepares a distribution for a string representation.
Parameters
----------
dist : distribution
The distribution to be stringified.
digits : int or None
The probabilities will be rounded to the specified number of
digits, using NumPy's around function. If `None`, then no rounding
is performed. Note, if the number of digits is greater than the
precision of the floats, then the resultant number of digits will
match that smaller precision.
exact : bool
If `True`, then linear probabilities will be displayed, even if
the underlying pmf contains log probabilities. The closest
rational fraction within a tolerance specified by `tol` is used
as the display value.
tol : float
If `exact` is `True`, then the probabilities will be displayed
as the closest rational fraction within `tol`.
show_mask : bool
If `True`, show the mask for marginal distributions.
str_outcomes : bool
If `True`, then attempt to convert outcomes which are tuples to just
strings. This is only a dislplay technique.
Returns
-------
pmf : sequence
The formatted pmf. This could be a NumPy array (possibly rounded)
or a list of Fraction instances.
outcomes : sequence
The formated outcomes.
base : str or float
The base of the formatted pmf.
colsep : str
The column separation for printing.
max_length : int
The length of the largest outcome, as a string.
pstr : str
A informative string representing the probability of an outcome.
This will be 'p(x)' xor 'log p(x)'.
"""
colsep = ' '
# Create outcomes with wildcards, if desired and possible.
if show_mask:
if not dist.is_joint():
msg = '`show_mask` can be `True` only for joint distributions'
raise ditException(msg)
if show_mask not in [True, False]:
# The user is specifying what the mask should look like.
wc = show_mask
else:
wc = '*'
ctor = dist._outcome_ctor
def outcome_wc(outcome):
"""
Builds the wildcarded outcome.
"""
i = 0
e = []
for is_masked in dist._mask:
if is_masked:
symbol = wc
else:
symbol = outcome[i]
i += 1
e.append(symbol)
e = ctor(e)
return e
outcomes = map(outcome_wc, dist.outcomes)
else:
outcomes = dist.outcomes
# Convert outcomes to strings, if desired and possible.
if str_outcomes:
if not dist.is_joint():
msg = '`str_outcomes` can be `True` only for joint distributions'
raise ditException(msg)
try:
# First, convert the elements of the outcome to strings.
outcomes_ = [map(str, outcome) for outcome in outcomes]
# Now convert the entire outcome to a string
outcomes_ = map(lambda o: ''.join(o), outcomes_)
# Force the iterators to expand in case there are exceptions.
outcomes = list(outcomes_)
except:
outcomes = map(str, outcomes)
else:
outcomes = map(str, outcomes)
outcomes = list(outcomes)
if len(outcomes):
max_length = max(map(len, outcomes))
else:
max_length = 0
# 1) Convert to linear probabilities, if necessary.
if exact:
# Copy to avoid precision loss
d = dist.copy(base='linear')
else:
d = dist
# 2) Round, if necessary, possibly after converting to linear probabilities.
if digits is not None and digits is not False:
pmf = d.pmf.round(digits)
else:
pmf = d.pmf
# 3) Construct fractions, if necessary.
if exact:
pmf = [approximate_fraction(x, tol) for x in pmf]
if d.is_log():
pstr = 'log p(x)'
else:
pstr = 'p(x)'
base = d.get_base()
return pmf, outcomes, base, colsep, max_length, pstr | 09abba1e5027049b9a43cb83e8de6f95daf5b431 | 3,654,936 |
def verifier(func):
"""
Creates a `Verifier` by given specifier.
Parameters
----------
func: callable, [callable], (str, callable), [(str, callable)]
The specifier of `Verifier` which can take various forms and determines the attributes and behaviors of `Verifier`.
When it is declared as a list having a specifier,
the `Verifier` deals with an input as iterable object and tries to apply inner verifying function to each value.
If a tuple of string and callable is given, the string is used as the name of the `Verifier`.
Otherwise, its name is determined by `__name__` attribute of the callable object.
The callable should be a function taking an input and returns boolean value representing the result of the verification.
Returns
-------
Verifier
Created `Verifier`.
"""
func, is_iter = (func[0], True) if isinstance(func, list) else (func, False)
if isinstance(func, Verifier):
return func
elif isinstance(func, Variable):
return func._verifier
elif isinstance(func, partial):
ff, n, t_in, t_out, args, kwargs = analyze_specifier(func, (), {})
return Verifier(n, func, is_iter, *args, **kwargs)
elif callable(func):
return Verifier(func.__name__, func, is_iter)
elif isinstance(func, tuple):
ff, n, t_in, t_out, args, kwargs = analyze_specifier(func[1], (), {})
return Verifier(func[0], func[1], is_iter, *args, **kwargs)
else:
raise TypeError("Given value is not valid Verifier specifier.") | 665bc9cf5039e568fb2325a1cf0a25f72311eab8 | 3,654,937 |
def get_add_diff_file_list(git_folder):
"""List of new files.
"""
repo = Repo(str(git_folder))
repo.git.add("sdk")
output = repo.git.diff("HEAD", "--name-only")
return output.splitlines() | af6ff7ffb076fb382aaa946e11e473f2f45bad0e | 3,654,939 |
def has_read_perm(user, group, is_member, is_private):
""" Return True if the user has permission to *read*
Articles, False otherwise.
"""
if (group is None) or (is_member is None) or is_member(user, group):
return True
if (is_private is not None) and is_private(group):
return False
return True | 6c1bc51abd50a5af76e16e7723957c758822c988 | 3,654,941 |
def normalize_df(dataframe, columns):
"""
normalized all columns passed to zero mean and unit variance, returns a full data set
:param dataframe: the dataframe to normalize
:param columns: all columns in the df that should be normalized
:return: the data, centered around 0 and divided by it's standard deviation
"""
for column in columns:
data = dataframe.loc[:, column].values
sd = np.std(data)
mean = np.mean(data)
dataframe.loc[:, column] = (data - mean) / sd
return dataframe | 39b23a6f11794323f1d732396021d669410c7de1 | 3,654,943 |
import json
def PeekTrybotImage(chromeos_root, buildbucket_id):
"""Get the artifact URL of a given tryjob.
Args:
buildbucket_id: buildbucket-id
chromeos_root: root dir of chrome os checkout
Returns:
(status, url) where status can be 'pass', 'fail', 'running',
and url looks like:
gs://chromeos-image-archive/trybot-elm-release-tryjob/R67-10468.0.0-b20789
"""
command = (
'cros buildresult --report json --buildbucket-id %s' % buildbucket_id)
rc, out, _ = RunCommandInPath(chromeos_root, command)
# Current implementation of cros buildresult returns fail when a job is still
# running.
if rc != 0:
return ('running', None)
results = json.loads(out)[buildbucket_id]
return (results['status'], results['artifacts_url'].rstrip('/')) | c74b7c5a120d3d489e6990bd03e74bb0d22fea27 | 3,654,944 |
def frozenset_code_repr(value: frozenset) -> CodeRepresentation:
"""
Gets the code representation for a frozenset.
:param value: The frozenset.
:return: It's code representation.
"""
return container_code_repr("frozenset({",
"})",
((el,) for el in value),
lambda el: el) | b4a3b283c7d21d0ae888c588471f9dea650215fb | 3,654,945 |
def SRMI(df, n):
"""
MI修正指标
Args:
df (pandas.DataFrame): Dataframe格式的K线序列
n (int): 参数n
Returns:
pandas.DataFrame: 返回的DataFrame包含2列, 是"a", "mi", 分别代表A值和MI值
Example::
# 获取 CFFEX.IF1903 合约的MI修正指标
from tqsdk import TqApi, TqSim
from tqsdk.ta import SRMI
api = TqApi(TqSim())
klines = api.get_kline_serial("CFFEX.IF1903", 24 * 60 * 60)
srmi = SRMI(klines, 9)
print(list(srmi["a"]))
print(list(srmi["mi"]))
# 预计的输出是这样的:
[..., 0.10362397961836425, 0.07062591892459567, -0.03341929372138309, ...]
[..., 0.07583104758041452, 0.0752526999519902, 0.06317803398828206, ...]
"""
new_df = pd.DataFrame()
new_df["a"] = np.where(df["close"] < df["close"].shift(n),
(df["close"] - df["close"].shift(n)) / df["close"].shift(n),
np.where(df["close"] == df["close"].shift(n), 0,
(df["close"] - df["close"].shift(n)) / df["close"]))
new_df["mi"] = tafunc.sma(new_df["a"], n, 1)
return new_df | 29726385da068446cd3dd3ee13f8d95b88c36245 | 3,654,946 |
def get_purchase_rows(*args, **kwargs):
"""
获取列表
:param args:
:param kwargs:
:return:
"""
return db_instance.get_rows(Purchase, *args, **kwargs) | 505ace358b619a736bc7a71139e307110cd7c27d | 3,654,947 |
def depart_delete(request):
""" 删除部门 """
nid = request.GET.get('nid')
models.Department.objects.filter(id=nid).delete()
return redirect("/depart/list/") | 753c01771ad59b789f324a0cb95e94dcf9e48e9d | 3,654,948 |
def create_condor_scheduler(name, host, username=None, password=None, private_key_path=None, private_key_pass=None):
"""
Creates a new condor scheduler
Args:
name (str): The name of the scheduler
host (str): The hostname or IP address of the scheduler
username (str, optional): The username to use when connecting to the scheduler
password (str, optional): The password for the username
private_key_path (str, optional): The path to the location of the SSH private key file
private_key_pass (str, optional): The passphrase for the private key
Returns:
The newly created condor scheduler
Note:
The newly created condor scheduler object is not committed to the database.
"""
condor_scheduler = CondorScheduler(name, host, username=username, password=password,
private_key_path=private_key_path, private_key_pass=private_key_pass)
return condor_scheduler | d47c8c69fea249139698564b52520d95fbb1a75f | 3,654,949 |
def dot_to_underscore(instring):
"""Replace dots with underscores"""
return instring.replace(".", "_") | cf9441702ffb128678a031eabb4fa48be881cae5 | 3,654,951 |
def get_birthday_weekday(current_weekday: int, current_day: int,
birthday_day: int) -> int:
"""Return the day of the week it will be on birthday_day,
given that the day of the week is current_weekday and the
day of the year is current_day.
current_weekday is the current day of the week and is in
the range 1-7, indicating whether today is Sunday (1),
Monday (2), ..., Saturday (7).
current_day and birthday_day are both in the range 1-365.
>>> get_birthday_weekday(5, 3, 4)
6
>>> get_birthday_weekday(5, 3, 116)
6
>>> get_birthday_weekday(6, 116, 3)
5
"""
days_diff = days_difference(current_day, birthday_day)
return get_weekday(current_weekday, days_diff) | 5b4ba9f2a0efcdb9f150b421c21bb689604fbb11 | 3,654,952 |
def _check(err, msg=""):
"""Raise error for non-zero error codes."""
if err < 0:
msg += ': ' if msg else ''
if err == _lib.paUnanticipatedHostError:
info = _lib.Pa_GetLastHostErrorInfo()
hostapi = _lib.Pa_HostApiTypeIdToHostApiIndex(info.hostApiType)
msg += 'Unanticipated host API {0} error {1}: {2!r}'.format(
hostapi, info.errorCode, _ffi.string(info.errorText).decode())
else:
msg += _ffi.string(_lib.Pa_GetErrorText(err)).decode()
raise PortAudioError(msg)
return err | 2f0b2ccd055bbad814e48b451eb72c60e62f9273 | 3,654,954 |
def make_flood_fill_unet(input_fov_shape, output_fov_shape, network_config):
"""Construct a U-net flood filling network.
"""
image_input = Input(shape=tuple(input_fov_shape) + (1,), dtype='float32', name='image_input')
if network_config.rescale_image:
ffn = Lambda(lambda x: (x - 0.5) * 2.0)(image_input)
else:
ffn = image_input
mask_input = Input(shape=tuple(input_fov_shape) + (1,), dtype='float32', name='mask_input')
ffn = concatenate([ffn, mask_input])
# Note that since the Keras 2 upgrade strangely models with depth > 3 are
# rejected by TF.
ffn = add_unet_layer(ffn, network_config, network_config.unet_depth - 1, output_fov_shape,
n_channels=network_config.convolution_filters)
mask_output = Conv3D(
1,
(1, 1, 1),
kernel_initializer=network_config.initialization,
padding=network_config.convolution_padding,
name='mask_output',
activation=network_config.output_activation)(ffn)
ffn = Model(inputs=[image_input, mask_input], outputs=[mask_output])
return ffn | ff8c90b3eecc26384b33fd64afa0a2c4dd44b82d | 3,654,956 |
def FRAC(total):
"""Returns a function that shows the average percentage of the values from
the total given."""
def realFrac(values, unit):
r = toString(sum(values) / len(values) / total * 100)
r += '%'
if max(values) > min(values):
r += ' avg'
return [r]
return realFrac | 41946163d5c185d1188f71d615a67d72e6eaee4f | 3,654,957 |
def get_min_max(ints):
"""
Return a tuple(min, max) out of list of unsorted integers.
Args:
ints(list): list of integers containing one or more integers
"""
if len(ints) == 0: return (None, None)
low = ints[0]
high = ints[0]
for i in ints:
if i < low:
low = i
elif i > high:
high = i
return (low, high) | 14c7d4cc73947c8de38bb598e295d9a1b4b7e5f6 | 3,654,958 |
def gen_sweep_pts(start: float=None, stop: float=None,
center: float=0, span: float=None,
num: int=None, step: float=None, endpoint=True):
"""
Generates an array of sweep points based on different types of input
arguments.
Boundaries of the array can be specified using either start/stop or
using center/span. The points can be specified using either num or step.
Args:
start (float) : start of the array
stop (float) : end of the array
center (float) : center of the array
N.B. 0 is chosen as a sensible default for the span.
it is argued that no such sensible default exists
for the other types of input.
span (float) : span the total range of values to span
num (int) : number of points in the array
step (float) : the stepsize between points in the array
endpoint (bool): whether to include the endpoint
"""
if (start is not None) and (stop is not None):
if num is not None:
return np.linspace(start, stop, num, endpoint=endpoint)
elif step is not None:
# numpy arange does not natively support endpoint
return np.arange(start, stop + endpoint*step/100, step)
else:
raise ValueError('Either "num" or "step" must be specified')
elif (center is not None) and (span is not None):
if num is not None:
return span_num(center, span, num, endpoint=endpoint)
elif step is not None:
return span_step(center, span, step, endpoint=endpoint)
else:
raise ValueError('Either "num" or "step" must be specified')
else:
raise ValueError('Either ("start" and "stop") or '
'("center" and "span") must be specified') | fb67623acfea433331babf7b7e1217cfa4e9e7ae | 3,654,959 |
def set_lang_owner(cursor, lang, owner):
"""Set language owner.
Args:
cursor (cursor): psycopg2 cursor object.
lang (str): language name.
owner (str): name of new owner.
"""
query = "ALTER LANGUAGE \"%s\" OWNER TO \"%s\"" % (lang, owner)
executed_queries.append(query)
cursor.execute(query)
return True | 07cf4a33ca766a8ccf468f59d33318bab88c4529 | 3,654,960 |
def rstrip_tuple(t: tuple):
"""Remove trailing zeroes in `t`."""
if not t or t[-1]:
return t
right = len(t) - 1
while right > 0 and t[right - 1] == 0:
right -= 1
return t[:right] | a10e74ea4a305d588fbd1555f32dda1d4b95266e | 3,654,961 |
def _calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces, out=None):
"""Calculate divergence of face-based fluxes at nodes (active faces only).
Given a flux per unit width across each face in the grid, calculate the net
outflux (or influx, if negative) divided by cell area, at each node that
lies within a cell.
Construction::
_calc_active_face_flux_divergence_at_node(grid, unit_flux_at_faces,
out=None)
Parameters
----------
grid : ModelGrid
A ModelGrid.
unit_flux_at_faces : ndarray or field name (x number of faces)
Flux per unit width associated with faces.
out : ndarray (x number of nodes), optional
Buffer to hold the result.
Returns
-------
ndarray (x number of nodes)
Flux divergence at nodes.
Examples
--------
>>> from landlab import RasterModelGrid, CLOSED_BOUNDARY
>>> rg = RasterModelGrid(3, 4, 10.0)
>>> z = rg.add_zeros('node', 'topographic__elevation')
>>> z[5] = 50.0
>>> z[6] = 36.0
>>> fg = rg.calc_grad_at_link(z)[rg.link_at_face] # there are 7 faces
>>> fg
array([ 5. , 3.6, 5. , -1.4, -3.6, -5. , -3.6])
>>> _calc_active_face_flux_divergence_at_node(rg, -fg)
array([ 0. , 0. , 0. , 0. , 0. , 1.64, 0.94, 0. , 0. ,
0. , 0. , 0. ])
>>> rg.set_status_at_node_on_edges(right=CLOSED_BOUNDARY)
>>> rg.set_status_at_node_on_edges(top=CLOSED_BOUNDARY)
>>> _calc_active_face_flux_divergence_at_node(rg, -fg)
array([ 0. , 0. , 0. , 0. , 0. , 1.14, 0.22, 0. , 0. ,
0. , 0. , 0. ])
Notes
-----
Performs a numerical flux divergence operation on cells, and returns the
result in an array of length equal to the number of nodes. Nodes without
cells (those on the grid perimeter) are not affected (i.e., their value
is either zero, or if `out` is given, whatever the prior value in `out`
was).
"""
if out is None:
out = grid.zeros(at='node')
out[grid.node_at_cell] = \
_calc_net_active_face_flux_at_cell(grid, unit_flux_at_faces) \
/ grid.area_of_cell
return out | 82c485935a3190c07ab12f7c838d52f5fecb78d0 | 3,654,962 |
from typing import NoReturn
def get_line(prompt: str = '') -> Effect[HasConsole, NoReturn, str]:
"""
Get an `Effect` that reads a `str` from stdin
Example:
>>> class Env:
... console = Console()
>>> greeting = lambda name: f'Hello {name}!'
>>> get_line('What is your name? ').map(greeting).run(Env())
name? # input e.g 'John Doe'
'Hello John Doe!'
Args:
prompt: prompt to display in console
Return:
an `Effect` that produces a `str` read from stdin
"""
return depend(HasConsole).and_then(lambda env: env.console.input(prompt)) | 47c58bb6ab794fdf789f0812dc1dc6d977106b60 | 3,654,964 |
def reconstruct_wave(*args: ndarray, kwargs_istft, n_sample=-1) -> ndarray:
"""
construct time-domain wave from complex spectrogram
Args:
*args: the complex spectrogram.
kwargs_istft: arguments of Inverse STFT.
n_sample: expected audio length.
Returns:
audio (numpy)
"""
if len(args) == 1:
spec = args[0].squeeze()
mag = None
phase = None
assert np.iscomplexobj(spec)
elif len(args) == 2:
spec = None
mag = args[0].squeeze()
phase = args[1].squeeze()
assert np.isrealobj(mag) and np.isrealobj(phase)
else:
raise ValueError
kwarg_len = dict(length=n_sample) if n_sample != -1 else dict()
if spec is None:
spec = mag * np.exp(1j * phase)
wave = librosa.istft(spec, **kwargs_istft, **kwarg_len)
return wave | 8624602efe1ab90304da05c602fb46ac52ec86e0 | 3,654,965 |
def perfect_score(student_info):
"""
:param student_info: list of [<student name>, <score>] lists
:return: first `[<student name>, 100]` or `[]` if no student score of 100 is found.
"""
#
first = []
student_names = []
score = []
print (student_info)
for name in student_info:
print('1', 'name', name[0])
print ('2','score',name[1])
print(type(name[1]))
score = int(name[1])
print(type(score))
if (score == 100 ):
print('3', score)
print(name)
return name
return first | ac7580cce134627e08764031ef2812e1b70ba00f | 3,654,966 |
def get_composite_component(current_example_row, cache, model_config):
"""
maps component_id to dict of {cpu_id: False, ...}
:param current_example_row:
:param cache:
:param model_config:
:return: nested mapping_dict = { #there can be multiple components
component_id = { #components can be deployed on multiple servers
cpu_id: False,
...
},
...
}
"""
mapping_dict = defaultdict(lambda: {})
# for context in
for column_name in model_config["components"]:
allocation_name = column_name.replace("AllocationDegreeImpl:", "")
context = get_element_by_identifier(element_tree=cache.get_xml_tree("allocation"),
search_string=allocation_name,
attribute="entityName")
system_id = get_linkage_id(identifier="assemblyContext_AllocationContext", element_tree=context)
assembly_context = get_by_id(element=cache.get_xml_tree("system"), element_id=system_id)
component = assembly_context.find("./encapsulatedComponent__AssemblyContext")
if component.get(get_xml_schema_type()) == "repository:CompositeComponent":
repo_id = get_linkage_id(element_tree=assembly_context, identifier="encapsulatedComponent__AssemblyContext")
composite_component = get_by_id(element=cache.get_xml_tree("repository"), element_id=repo_id)
for composed_structure in composite_component.findall("./assemblyContexts__ComposedStructure"):
component_id = composed_structure.get("encapsulatedComponent__AssemblyContext")
# check if column (with name of component) of current test data is allocated to existing server
if current_example_row[column_name] in model_config["server"].keys():
# if component is allocated to existing server append allocation to list
for server_id in model_config["server"]:
# if component is part of composite
if current_example_row[column_name] == server_id:
temp_server_id = model_config["server"][current_example_row[column_name]]
mapping_dict[component_id].update({temp_server_id: False})
return mapping_dict | 201db2016ea59cbf4a20ce081813bfd60d58bf67 | 3,654,968 |
def presigned_url_both(filename, email):
"""
Return presigned urls both original image url and thumbnail image url
:param filename:
:param email:
:return:
"""
prefix = "photos/{0}/".format(email_normalize(email))
prefix_thumb = "photos/{0}/thumbnails/".format(email_normalize(email))
key_thumb = "{0}{1}".format(prefix_thumb, filename)
key_origin = "{0}{1}".format(prefix, filename)
try:
s3_client = boto3.client('s3')
thumb_url = s3_client.generate_presigned_url(
'get_object',
Params={'Bucket': conf['S3_PHOTO_BUCKET'], 'Key': key_thumb},
ExpiresIn=conf['S3_PRESIGNED_EXP'])
origin_url = s3_client.generate_presigned_url(
'get_object',
Params={'Bucket': conf['S3_PHOTO_BUCKET'], 'Key': key_origin},
ExpiresIn=conf['S3_PRESIGNED_EXP'])
except Exception as e:
raise ChaliceViewError(e)
return thumb_url, origin_url | 7f37cf388ef944d740f2db49c5125435b819e0e8 | 3,654,969 |
def check_if_event_exists(service, new_summary):
"""
Description: checks if the event summary exists using a naive approach
"""
event_exists = False
page_token = None
calendarId = gcalendarId
while True:
events = (
service.events().list(calendarId=calendarId, pageToken=page_token).execute()
)
for event in events["items"]:
# purge location from summary string
if new_summary in event["summary"]:
event_exists = True
break
page_token = events.get("nextPageToken")
if not page_token:
break
return event_exists | c6cc8bd3e4548cda11f9eaad6fd2d3da7a5c7e20 | 3,654,970 |
def retry(func, *args, **kwargs):
"""
You can use the kwargs to override the 'retries' (default: 5) and
'use_account' (default: 1).
"""
global url, token, parsed, conn
retries = kwargs.get('retries', 5)
use_account = 1
if 'use_account' in kwargs:
use_account = kwargs['use_account']
del kwargs['use_account']
use_account -= 1
attempts = 0
backoff = 1
while attempts <= retries:
attempts += 1
try:
if not url[use_account] or not token[use_account]:
url[use_account], token[use_account] = \
get_auth(swift_test_auth, swift_test_user[use_account],
swift_test_key[use_account])
parsed[use_account] = conn[use_account] = None
if not parsed[use_account] or not conn[use_account]:
parsed[use_account], conn[use_account] = \
http_connection(url[use_account])
return func(url[use_account], token[use_account],
parsed[use_account], conn[use_account], *args, **kwargs)
except (socket.error, HTTPException):
if attempts > retries:
raise
parsed[use_account] = conn[use_account] = None
except AuthError, err:
url[use_account] = token[use_account] = None
continue
except InternalServerError, err:
pass
if attempts <= retries:
sleep(backoff)
backoff *= 2
raise Exception('No result after %s retries.' % retries) | 7749fcd63f8d795692097b0257adde4147ecb569 | 3,654,971 |
def eval_f(angles, data=None):
"""
function to minimize
"""
x1, x2, d, zt, z, alpha, beta, mask, b1, b2 = data
thetaxm, thetaym, thetazm, thetaxp, thetayp, thetazp = angles
rm = rotation(thetaxm, thetaym, thetazm)
rp = rotation(thetaxp, thetayp, thetazp)
x1r = rm.dot(x1.T).T
x2r = rp.dot(x2.T).T + d
obj = poisson_complete_ll(x1r, x2r, zt, z, alpha, beta, mask, b1, b2)
return obj | 622c18d21224ab40d597a165bff3e0493db4cdcc | 3,654,972 |
def clamp(min_v, max_v, value):
"""
Clamps a value between a min and max value
Args:
min_v: Minimum value
max_v: Maximum value
value: Value to be clamped
Returns:
Returns the clamped value
"""
return min_v if value < min_v else max_v if value > max_v else value | 1a9aaf3790b233f535fb864215444b0426c17ad8 | 3,654,973 |
def collatz(n):
"""Sequence generation."""
l = []
while n > 1:
l.append(n)
if n % 2 == 0:
n = n / 2
else:
n = (3 * n) + 1
l.append(n)
return l | 69d993147604889fe6b03770efbfa6fb7f034258 | 3,654,974 |
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2 ** np.arange(3, 6), stride=16):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in range(ratio_anchors.shape[0])])
return anchors | 083bfad62fac67e0f7fb02251bc3db7904629bd5 | 3,654,976 |
import re
def number_format(number_string, fill=2):
"""
add padding zeros to make alinged numbers
ex.
>>> number_format('2')
'02'
>>> number_format('1-2')
'01-02'
"""
output = []
digits_spliter = r'(?P<digit>\d+)|(?P<nondigit>.)'
for token in [m.groups() for m in re.finditer(digits_spliter, number_string)]:
if token[0] is None:
output.append(token[1])
else:
output.append(token[0].zfill(2))
return ''.join(output) | ee44167b4597fbe7c9f01fa5b26e02d7608c3677 | 3,654,977 |
def box_postp2use(pred_boxes, nms_iou_thr=0.7, conf_thr=0.5):
"""Postprocess prediction boxes to use
* Non-Maximum Suppression
* Filter boxes with Confidence Score
Args:
pred_boxes (np.ndarray dtype=np.float32): pred boxes postprocessed by yolo_output2boxes. shape: [cfg.cell_size * cfg.cell_size *cfg.boxes_per_cell, 6]
nms_iou_thr (float): Non-Maximum Suppression IoU Threshold
conf_thr (float): Confidence Score Threshold
Returns:
np.ndarray (dtype=np.float32)
"""
boxes_nms = nms(pred_boxes=pred_boxes, iou_thr=nms_iou_thr)
boxes_conf_filtered = boxes_nms[boxes_nms[:, 4] >= conf_thr]
return boxes_conf_filtered | 07be8b953b82dbbcc27daab0afa71713db96efc1 | 3,654,978 |
from functools import reduce
def many_hsvs_to_rgb(hsvs):
"""Combine list of hsvs otf [[(h, s, v), ...], ...] and return RGB list."""
num_strips = len(hsvs[0])
num_leds = len(hsvs[0][0])
res = [[[0, 0, 0] for ll in range(num_leds)] for ss in range(num_strips)]
for strip in range(num_strips):
for led in range(num_leds):
# for some reason the conversion screws this up?
#
# import bibliopixel as bp
# c1 = bp.colors.conversions.hsv2rgb((0, 0, 0))
# c2 = bp.colors.conversions.hsv2rgb((0, 0, 0))
# c3 = bp.colors.conversions.hsv2rgb((0, 0, 0))
# bp.colors.arithmetic.color_blend(
# bp.colors.arithmetic.color_blend(c1, c2),
# c3)
#
# = (2, 2, 2)
if all(hsv[strip][led][2] == 0 for hsv in hsvs):
rgb = (0, 0, 0)
else:
rgbs = [bp.colors.conversions.hsv2rgb(hsv[strip][led])
for hsv in hsvs]
rgb = reduce(bp.colors.arithmetic.color_blend, rgbs)
res[strip][led] = rgb
return res | 0842ecb4a42560fb6dae32a91ae12588152db621 | 3,654,979 |
def _convert_paths_to_flask(transmute_paths):
"""flask has it's own route syntax, so we convert it."""
paths = []
for p in transmute_paths:
paths.append(p.replace("{", "<").replace("}", ">"))
return paths | f8ea95e66c68481f0eb5a6d83cf61d098806f6be | 3,654,980 |
def check_isup(k, return_client=None):
"""
Checks ping and returns status
Used with concurrent decorator for parallel checks
:param k: name to ping
:param return_client: to change return format as '{k: {'comments': comments}}'
:return(str): ping ok / -
"""
if is_up(k):
comments = 'ping ok'
else:
comments = ' - '
if return_client:
comments = {k: {'comments': comments}}
return comments | 8ebb346eb74cb54aa978b4fff7cd310b344ece50 | 3,654,981 |
def percent_uppercase(text):
"""Calculates percentage of alphabetical characters that are uppercase, out of total alphabetical characters.
Based on findings from spam.csv that spam texts have higher uppercase alphabetical characters
(see: avg_uppercase_letters())"""
alpha_count = 0
uppercase_count = 0
for char in text:
if char.isalpha():
alpha_count += 1
if char.isupper():
uppercase_count += 1
# calculate percentage - make sure not to divide by 0
try:
perc_uppercase = float(uppercase_count) / float(alpha_count)
return str(perc_uppercase)
except ZeroDivisionError:
return "0" | 61ccf42d06ffbae846e98d1d68a48de21f52c299 | 3,654,982 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.