content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def generic_validator(check, error_message):
"""
Validator factory
>>> v = generic_validator(is_int, "invalid int")
>>> v(6)
6
>>> v("g")
Traceback (most recent call last):
...
ValidationError: [u'invalid int']
"""
# Validator closure
def inner_validator(value, *args, **kwargs):
if not check(value):
raise ValidationError(error_message)
return value
return inner_validator
| 5,351,700 |
def get_molec_shape(mol, conf, confId, vdwScale=1.0,
boxMargin=2.0, spacing=0.2):
"""
Get the shape of a conformer of a molecule as a grid
representation.
"""
box = Chem.ComputeConfBox(conf)
sideLen = (box[1].x-box[0].x + 2*boxMargin,
box[1].y-box[0].y + 2*boxMargin,
box[1].z-box[0].z + 2*boxMargin)
shape = rdGeometry.UniformGrid3D(2*sideLen[0],
2*sideLen[1],
2*sideLen[2],
spacing=spacing)
Chem.EncodeShape(
mol,
shape,
confId=confId,
ignoreHs=False,
vdwScale=vdwScale
)
return box, sideLen, shape
| 5,351,701 |
def autoencoder(dimensions=[784, 512, 256, 64]):
"""Build a deep denoising autoencoder w/ tied weights.
Parameters
----------
dimensions : list, optional
The number of neurons for each layer of the autoencoder.
Returns
-------
x : Tensor
Input placeholder to the network
z : Tensor
Inner-most latent representation
y : Tensor
Output reconstruction of the input
cost : Tensor
Overall cost to use for training
"""
# input to the network
x = tf.placeholder(tf.float32, [None, dimensions[0]], name='x')
# Probability that we will corrupt input.
# This is the essence of the denoising autoencoder, and is pretty
# basic. We'll feed forward a noisy input, allowing our network
# to generalize better, possibly, to occlusions of what we're
# really interested in. But to measure accuracy, we'll still
# enforce a training signal which measures the original image's
# reconstruction cost.
#
# We'll change this to 1 during training
# but when we're ready for testing/production ready environments,
# we'll put it back to 0.
corrupt_prob = tf.placeholder(tf.float32, [1])
current_input = corrupt(x) * corrupt_prob + x * (1 - corrupt_prob)
# Build the encoder
encoder = []
for layer_i, n_output in enumerate(dimensions[1:]):
n_input = int(current_input.get_shape()[1])
W = tf.Variable(
tf.random_uniform([n_input, n_output],
-1.0 / math.sqrt(n_input),
1.0 / math.sqrt(n_input)))
b = tf.Variable(tf.zeros([n_output]))
encoder.append(W)
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# latent representation
z = current_input
encoder.reverse()
# Build the decoder using the same weights
for layer_i, n_output in enumerate(dimensions[:-1][::-1]):
W = tf.transpose(encoder[layer_i])
b = tf.Variable(tf.zeros([n_output]))
output = tf.nn.tanh(tf.matmul(current_input, W) + b)
current_input = output
# now have the reconstruction through the network
y = current_input
# cost function measures pixel-wise difference
cost = tf.sqrt(tf.reduce_mean(tf.square(y - x)))
return {'x': x, 'z': z, 'y': y,
'corrupt_prob': corrupt_prob,
'cost': cost}
| 5,351,702 |
def _asymptotic_expansion_of_normalized_black_call(h, t):
"""
Asymptotic expansion of
b = Φ(h+t)·exp(x/2) - Φ(h-t)·exp(-x/2)
with
h = x/s and t = s/2
which makes
b = Φ(h+t)·exp(h·t) - Φ(h-t)·exp(-h·t)
exp(-(h²+t²)/2)
= --------------- · [ Y(h+t) - Y(h-t) ]
√(2π)
with
Y(z) := Φ(z)/φ(z)
for large negative (t-|h|) by the aid of Abramowitz & Stegun (26.2.12) where Φ(z) = φ(z)/|z|·[1-1/z^2+...].
We define
r
A(h,t) := --- · [ Y(h+t) - Y(h-t) ]
t
with r := (h+t)·(h-t) and give an expansion for A(h,t) in q:=(h/r)² expressed in terms of e:=(t/h)² .
:param h:
:type h: float
:param t:
:type t: float
:return:
:rtype: float
"""
e = (t / h) * (t / h)
r = ((h + t) * (h - t))
q = (h / r) * (h / r)
# 17th order asymptotic expansion of A(h,t) in q, sufficient for Φ(h) [and thus y(h)] to have relative accuracy of 1.64E-16 for h <= η with η:=-10.
asymptotic_expansion_sum = (2.0 + q * (-6.0E0 - 2.0 * e + 3.0 * q * (1.0E1 + e * (2.0E1 + 2.0 * e) + 5.0 * q * (
-1.4E1 + e * (-7.0E1 + e * (-4.2E1 - 2.0 * e)) + 7.0 * q * (
1.8E1 + e * (1.68E2 + e * (2.52E2 + e * (7.2E1 + 2.0 * e))) + 9.0 * q * (
-2.2E1 + e * (-3.3E2 + e * (-9.24E2 + e * (-6.6E2 + e * (-1.1E2 - 2.0 * e)))) + 1.1E1 * q * (
2.6E1 + e * (5.72E2 + e * (
2.574E3 + e * (3.432E3 + e * (1.43E3 + e * (1.56E2 + 2.0 * e))))) + 1.3E1 * q * (
-3.0E1 + e * (-9.1E2 + e * (-6.006E3 + e * (-1.287E4 + e * (
-1.001E4 + e * (-2.73E3 + e * (-2.1E2 - 2.0 * e)))))) + 1.5E1 * q * (
3.4E1 + e * (1.36E3 + e * (1.2376E4 + e * (3.8896E4 + e * (
4.862E4 + e * (2.4752E4 + e * (
4.76E3 + e * (2.72E2 + 2.0 * e))))))) + 1.7E1 * q * (
-3.8E1 + e * (-1.938E3 + e * (-2.3256E4 + e * (
-1.00776E5 + e * (-1.84756E5 + e * (
-1.51164E5 + e * (-5.4264E4 + e * (
-7.752E3 + e * (
-3.42E2 - 2.0 * e)))))))) + 1.9E1 * q * (
4.2E1 + e * (2.66E3 + e * (4.0698E4 + e * (
2.3256E5 + e * (5.8786E5 + e * (
7.05432E5 + e * (4.0698E5 + e * (
1.08528E5 + e * (1.197E4 + e * (
4.2E2 + 2.0 * e))))))))) + 2.1E1 * q * (
-4.6E1 + e * (-3.542E3 + e * (
-6.7298E4 + e * (
-4.90314E5 + e * (
-1.63438E6 + e * (
-2.704156E6 + e * (
-2.288132E6 + e * (
-9.80628E5 + e * (
-2.01894E5 + e * (
-1.771E4 + e * (
-5.06E2 - 2.0 * e)))))))))) + 2.3E1 * q * (
5.0E1 + e * (
4.6E3 + e * (
1.0626E5 + e * (
9.614E5 + e * (
4.08595E6 + e * (
8.9148E6 + e * (
1.04006E7 + e * (
6.53752E6 + e * (
2.16315E6 + e * (
3.542E5 + e * (
2.53E4 + e * (
6.0E2 + 2.0 * e))))))))))) + 2.5E1 * q * (
-5.4E1 + e * (
-5.85E3 + e * (
-1.6146E5 + e * (
-1.77606E6 + e * (
-9.37365E6 + e * (
-2.607579E7 + e * (
-4.01166E7 + e * (
-3.476772E7 + e * (
-1.687257E7 + e * (
-4.44015E6 + e * (
-5.9202E5 + e * (
-3.51E4 + e * (
-7.02E2 - 2.0 * e)))))))))))) + 2.7E1 * q * (
5.8E1 + e * (
7.308E3 + e * (
2.3751E5 + e * (
3.12156E6 + e * (
2.003001E7 + e * (
6.919458E7 + e * (
1.3572783E8 + e * (
1.5511752E8 + e * (
1.0379187E8 + e * (
4.006002E7 + e * (
8.58429E6 + e * (
9.5004E5 + e * (
4.7502E4 + e * (
8.12E2 + 2.0 * e))))))))))))) + 2.9E1 * q * (
-6.2E1 + e * (
-8.99E3 + e * (
-3.39822E5 + e * (
-5.25915E6 + e * (
-4.032015E7 + e * (
-1.6934463E8 + e * (
-4.1250615E8 + e * (
-6.0108039E8 + e * (
-5.3036505E8 + e * (
-2.8224105E8 + e * (
-8.870433E7 + e * (
-1.577745E7 + e * (
-1.472562E6 + e * (
-6.293E4 + e * (
-9.3E2 - 2.0 * e)))))))))))))) + 3.1E1 * q * (
6.6E1 + e * (
1.0912E4 + e * (
4.74672E5 + e * (
8.544096E6 + e * (
7.71342E7 + e * (
3.8707344E8 + e * (
1.14633288E9 + e * (
2.07431664E9 + e * (
2.33360622E9 + e * (
1.6376184E9 + e * (
7.0963464E8 + e * (
1.8512208E8 + e * (
2.7768312E7 + e * (
2.215136E6 + e * (
8.184E4 + e * (
1.056E3 + 2.0 * e))))))))))))))) + 3.3E1 * (
-7.0E1 + e * (
-1.309E4 + e * (
-6.49264E5 + e * (
-1.344904E7 + e * (
-1.4121492E8 + e * (
-8.344518E8 + e * (
-2.9526756E9 + e * (
-6.49588632E9 + e * (
-9.0751353E9 + e * (
-8.1198579E9 + e * (
-4.6399188E9 + e * (
-1.6689036E9 + e * (
-3.67158792E8 + e * (
-4.707164E7 + e * (
-3.24632E6 + e * (
-1.0472E5 + e * (
-1.19E3 - 2.0 * e))))))))))))))))) * q)))))))))))))))))
b = ONE_OVER_SQRT_TWO_PI * np.exp((-0.5 * (h * h + t * t))) * (t / r) * asymptotic_expansion_sum
return np.abs(np.maximum(b, 0))
| 5,351,703 |
def date_read(date_string, *, convert_to_current_timezone: bool = False):
"""Read the given date (if possible)."""
return date_parse(date_string, convert_to_current_timezone=convert_to_current_timezone)
| 5,351,704 |
def encode_name(name):
"""
Encode a unicode as utf-8 and then url encode that
string. Use for entity titles in URLs.
"""
return urllib.quote(name.encode('utf-8'), safe='')
| 5,351,705 |
def cmp_str(element1, element2):
"""
compare number in str format correctley
"""
try:
return cmp(int(element1), int(element2))
except ValueError:
return cmp(element1, element2)
| 5,351,706 |
def parse_page_file(page_raw: str, type: str, file_name: str) -> Page:
"""
FIXME: add documentation
"""
page_id = extract_page_id(file_name)
title, fields = parse_md(page_raw)
return Page(
id=page_id,
type=type,
title=title,
fields=fields,
)
| 5,351,707 |
def fetch(name):
"""
Fetches an appropriate model to perform the prediction.
:param name: model's name
:return: a trained model
"""
K.clear_session()
try:
full_weights_path = path.join(path_prefix, *load_weights()[name])
if name == 'svm':
return SVMModel(joblib.load(full_weights_path))
elif name == 'cnn':
return CNNModel(load(full_weights_path))
elif name == 'mlp':
return MLPModel(load(full_weights_path))
except KeyError:
raise ModelNotFoundError(f'Model named {name} does not exist.')
| 5,351,708 |
def post(text, appid=2, touser=None, toparty=None):
"""
party
"""
#print '=========',type(text)
if type(text) is unicode:
text = text.encode('utf8')
if not touser:
touser = []
if not toparty:
toparty = ['2']
url = 'https://qyapi.weixin.qq.com/cgi-bin/message/send?access_token={access_token}'
url = url.format(access_token=get_access_token())
data = {"touser": "|".join(touser),
"toparty": "|".join(toparty),
"msgtype": "text",
"agentid": str(appid),
"text": {"content": text},
"safe": "0",
}
result = requests.post(url, data=json.dumps(data, ensure_ascii=False))
print result.text
return result
| 5,351,709 |
def download_image_from_annotation(
api_key: str,
api_url: str,
annotation_path: Path,
images_path: str,
annotation_format: str,
use_folders: bool,
video_frames: bool,
):
"""Helper function: dispatcher of functions to download an image given an annotation
Parameters
----------
api_key : str
API Key of the current team
api_url : str
Url of the darwin API (e.g. 'https://darwin.v7labs.com/api/')
annotation_path : Path
Path where the annotation is located
images_path : Path
Path where to download the image
annotation_format : str
Format of the annotations. Currently only JSON is supported
use_folders: bool
Recreate folder structure
video_frames: bool
Pulls video frames images instead of video files
"""
if annotation_format == "json":
download_image_from_json_annotation(api_key, api_url, annotation_path, images_path, use_folders, video_frames)
elif annotation_format == "xml":
print("sorry can't let you do that dave")
raise NotImplementedError
# download_image_from_xml_annotation(annotation_path, images_path)
| 5,351,710 |
def hex_encrypt(msg):
"""Hex encrypts a message.
:param bytes msg: string message to be encrypted.
:return: string for encrypted version of msg in hex.
:rtype: bytes
"""
if not cipher:
return msg
if not isinstance(msg, six.binary_type):
raise ValueError('only bytes can be encrypted')
msg = cipher.encrypt(_pad(msg))
msg = codecs.encode(msg, 'hex')
return msg
| 5,351,711 |
def test_TestPermsWorldReadableDir(tmp_path):
"""Test for TestPermissionsWorldReadableDir."""
# setup test case
test_path = tmp_path / 'testdir'
test_path.mkdir()
test_path.chmod(0o755)
# run test case
test = fs_lint.TestPermissionsWorldReadableDir()
assert test(test_path, test_path.lstat()) is False
test.fix(test_path, test_path.lstat())
assert test(test_path, test_path.lstat()) is True
# cleanup
test_path.rmdir()
| 5,351,712 |
def http_request(source_id, endpoint_id, args, kwargs, # pylint: disable=too-many-arguments
service_addr, auth=None):
"""Call http endpoint"""
headers = {"content-type": "application/json"}
if auth is not None:
headers["Authorization"] = basic_auth_header(auth)
payload = _create_http_message(args, endpoint_id, kwargs, source_id)
url = service_addr
if not url.startswith("http"):
url = "http://" + url
LOGGER.debug("Url: %s", url)
response = requests.post(url, data=payload, headers=headers)
return_value = None
if response.status_code < 300:
return_value = json_tricks.loads(response.content.decode("utf-8"))
return return_value, response.status_code
| 5,351,713 |
async def filter_by_game_stats(opsdroid, string, room, action):
"""Match incoming messages against the current games stats."""
if room not in STAT_REGEXES.keys():
gamestats = await get_stat_names(opsdroid, room)
if not gamestats:
return []
STAT_REGEXES[room] = {"set": regex.compile(f"(?:(?:{'|'.join(['!'+s for s in gamestats])}) {MODIFIER_REGEX})",
flags=regex.IGNORECASE),
"roll": regex.compile("|".join(gamestats), flags=regex.IGNORECASE)}
stats = STAT_REGEXES[room][action].findall(string)
return stats
| 5,351,714 |
def password_provider():
"""
Provides the full password check
"""
return [(n,) for n in range(5)]
| 5,351,715 |
def make_combiparameter(*args, **kwargs):
"""
Make a combined qcodes parameter.
Args:
*args : list of gates or parameters
(e.g. make_combiparameter("A1", "A3", station.gates.B1 ))
"""
station = qc.Station.default
parameters = []
for i in args:
if type(i) == str:
parameters.append(getattr(station.gates, i))
else:
parameters.append(i)
label = ""
for i in parameters:
label += i.label + " "
try:
name = kwargs['name']
except:
name = 'combi_par'
return combi_par(parameters, label, name)
| 5,351,716 |
def sort_data(items):
"""
"""
for i in range(len(items)):
for j in range(len(items)-1-i):
if items[j] > items[j+1]:
items[j], items[j+1] = items[j+1], items[j]
print items
| 5,351,717 |
def main():
""" Input Image File / Url """
os.system('clear')
print logo
print '\x1b[1;0mAuthor :', author
print '\x1b[1;0mGithub :', github
img = raw_input('\n\x1b[1;0m[\x1b[1;32m+\x1b[1;0m] Image File / Url : \x1b[1;32m')
if 'https' in img or 'http' in img:
try:
image = urllib.urlopen(img)
if image.headers.maintype == 'image':
image_url(img)
else:
print '\x1b[1;0m[\x1b[1;31m+\x1b[1;0m]\x1b[1;0m Error Url Image Not Found'
time.sleep(2)
main()
except:
print '\x1b[1;0m[\x1b[1;31m+\x1b[1;0m]\x1b[1;0m Error Url Image Not Found'
time.sleep(2)
main()
else:
image_file(img)
| 5,351,718 |
def get_delta_fmt(delta):
"""arbitrary colour formatting of rank delta
more red for bigger losses, more green for bigger gains
"""
col = (0, 0, 0, 255)
n = abs(delta)
s = delta
if delta < 0:
sat = min(n/200 + 0.2, 1)
r, g, b = hsv_to_rgb(0, sat, 1)
col = (r, g, b, 1)
else:
s = "+"+str(n)
sat = min(n/100 + 0.2, 1)
r, g, b = hsv_to_rgb(1/3, sat, 1)
col = (r, g, b, 1)
return "(" + str(s) + ")", col
| 5,351,719 |
def main(args=None):
"""Entry point"""
warnings.showwarning = _warn_redirect
try:
retcode = create_workflow(args)
except Exception as e:
retcode= 1
sys.exit(retcode)
| 5,351,720 |
def api_wait():
"""
Wait 3 seconds to ensure no requests are sent before the 3 second timer
for API calls has refreshed
"""
gevent.sleep(3)
| 5,351,721 |
def displayFrames(frames):
"""Displays the supplied list of frames
@type frames: list<Frame>
@param frames: List of frames to display"""
framesFormat = "%-35s %-11s %-15s %-13s %-12s %-9s %5s %7s %5s"
header = framesFormat % (
"Frame", "Status", "Host", "Start", "End", "Runtime", "Mem", "Retry", "Exit")
print(header + "\n" + "-" * len(header))
for frame in frames:
startTime = cueadmin.format.formatTime(frame.data.start_time)
stopTime = cueadmin.format.formatTime(frame.data.stop_time)
if frame.data.start_time:
duration = cueadmin.format.formatDuration(cueadmin.format.findDuration(frame.data.start_time,
frame.data.stop_time))
else:
duration = ""
memory = cueadmin.format.formatMem(frame.data.max_rss)
exitStatus = frame.data.exit_status
print(framesFormat % (
cueadmin.format.cutoff(frame.data.name, 35),
opencue.compiled_proto.job_pb2.FrameState.Name(frame.data.state),
frame.data.last_resource,
startTime,
stopTime,
duration,
memory,
frame.data.retry_count,
exitStatus))
if len(frames) == 1000:
print("Warning: Only showing first 1000 matches. See frame query options to "
"limit your results.")
| 5,351,722 |
def calculate_wtv(sample_values, epoch_time_interval=WTV_EPOCH_TIME, relative_to_time=None):
"""
Calculate the Wear-Time Validation (30-minute epochs) for a given sample ndarray [[time_seconds, accel_x, accel_y, accel_z]].
Based on the method by van Hees et al in PLos ONE 2011 6(7),
"Estimation of Daily Energy Expenditure in Pregnant and Non-Pregnant Women Using a Wrist-Worn Tri-Axial Accelerometer".
Accelerometer non-wear time is estimated from the standard deviation and range of each accelerometer axis,
calculated for consecutive blocks of 30 minutes.
A block was classified as non-wear time if the standard deviation was less than 3.0 mg
(1 mg = 0.00981 m*s-2) for at least two out of the three axes,
or if the value range, for at least two out of three axes, was less than 50 mg.
:param epoch_time_interval: seconds per epoch (the algorithm is defined for 30 minutes)
:param relative_to_time: None=align epochs to start of data, 0=align epochs to natural time, other=custom alignment
:returns: ndarray of [time,worn], where worn is 0 (not worn), or 1 (worn)
"""
if epoch_time_interval != WTV_EPOCH_TIME:
print('WARNING: WTV algorithm is defined for %d seconds, but currently using %d seconds' % (WTV_EPOCH_TIME, epoch_time_interval), file=sys.stderr)
# Split samples into epochs
epochs = epoch.split_into_epochs(sample_values, epoch_time_interval, relative_to_time=relative_to_time)
# Calculate each epoch
num_epochs = len(epochs)
result = np.empty((num_epochs,2))
for epoch_index in range(num_epochs):
this_epoch = epochs[epoch_index]
# Epoch start time and sample data
epoch_time = this_epoch[0,0]
samples = this_epoch[:,1:4]
# Per-axis/sample standard deviation and range
stddev = np.std(samples, axis=0)
value_range = np.ptp(samples, axis=0)
# Count axes
count_stddev_low = np.sum(stddev < WTV_STD_CUTOFF)
count_range_low = np.sum(value_range < WTV_RANGE_CUTOFF)
# Determine if worn
if count_stddev_low >= WTV_STD_MIN_AXES or count_range_low >= WTV_RANGE_MIN_AXES:
epoch_value = 0
else:
epoch_value = 1
# Result
result[epoch_index,0] = epoch_time
result[epoch_index,1] = epoch_value
return result
| 5,351,723 |
def function(default=None):
"""Docstring comes first.
Possibly many lines.
"""
# FIXME: Some comment about why this function is crap but still in production.
import inner_imports
if inner_imports.are_evil():
# Explains why we have this if.
# In great detail indeed.
x = X()
return x.method1() # type: ignore
# This return is also commented for some reason.
return default
| 5,351,724 |
def fit_pk_parms_1d(p0, x, f, pktype='pvoigt'):
"""
Performs least squares fit to find parameters for 1d analytic functions fit
to diffraction data
Required Arguments:
p0 -- (m) ndarray containing initial guesses for parameters
for the input peaktype
x -- (n) ndarray of coordinate positions
f -- (n) ndarray of intensity measurements at coordinate positions x
pktype -- string, type of analytic function that will be used to
fit the data,
current options are "gaussian","lorentzian","pvoigt" (psuedo voigt), and
"split_pvoigt" (split psuedo voigt)
Outputs:
p -- (m) ndarray containing fit parameters for the input peaktype
(see peak function help for what each parameters corresponds to)
Notes:
1. Currently no checks are in place to make sure that the guess of
parameters has a consistent number of parameters with the requested
peak type
"""
weight = np.max(f)*10. # hard coded should be changed
fitArgs = (x, f, pktype)
if pktype == 'gaussian':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs, Dfun=eval_pk_deriv_1d,
ftol=ftol, xtol=xtol
)
elif pktype == 'lorentzian':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs, Dfun=eval_pk_deriv_1d,
ftol=ftol, xtol=xtol
)
elif pktype == 'pvoigt':
lb = [p0[0]*0.5, np.min(x), 0., 0., 0., None]
ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 1., 2.*p0[4], None]
fitArgs = (x, f, pktype, weight, lb, ub)
p, outflag = optimize.leastsq(
fit_pk_obj_1d_bnded, p0,
args=fitArgs,
ftol=ftol, xtol=xtol
)
elif pktype == 'split_pvoigt':
lb = [p0[0]*0.5, np.min(x), 0., 0., 0., 0., 0., None]
ub = [p0[0]*2.0, np.max(x), 4.*p0[2], 4.*p0[2], 1., 1., 2.*p0[4], None]
fitArgs = (x, f, pktype, weight, lb, ub)
p, outflag = optimize.leastsq(
fit_pk_obj_1d_bnded, p0,
args=fitArgs,
ftol=ftol, xtol=xtol
)
elif pktype == 'tanh_stepdown':
p, outflag = optimize.leastsq(
fit_pk_obj_1d, p0,
args=fitArgs,
ftol=ftol, xtol=xtol)
elif pktype == 'dcs_pinkbeam':
lb = np.array([0.0, x.min(), -100., -100.,
-100., -100., 0., 0.,
-np.inf, -np.inf, -np.inf])
ub = np.array([np.inf, x.max(), 100., 100.,
100., 100., 10., 10.,
np.inf, np.inf, np.inf])
res = optimize.least_squares(
fit_pk_obj_1d, p0,
jac='2-point',
bounds=(lb, ub),
method='trf',
args=fitArgs,
ftol=ftol,
xtol=xtol)
p = res['x']
outflag = res['success']
else:
p = p0
print('non-valid option, returning guess')
if np.any(np.isnan(p)):
p = p0
print('failed fitting, returning guess')
return p
| 5,351,725 |
def is_pipe_registered(
pipe : Union['meerschaum.Pipe', 'meerschaum.Pipe.MetaPipe'],
pipes : dict,
debug : bool = False
):
"""
Check if a Pipe or MetaPipe is inside the pipes dictionary.
"""
from meerschaum.utils.debug import dprint
ck, mk, lk = pipe.connector_keys, pipe.metric_key, pipe.location_key
if debug:
dprint(f'{ck}, {mk}, {lk}')
dprint(f'{pipe}, {pipes}')
return ck in pipes and mk in pipes[ck] and lk in pipes[ck][mk]
| 5,351,726 |
def truncate(s, length=255, killwords=True, end='...'):
"""
Wrapper for jinja's truncate that checks if the object has a
__truncate__ attribute first.
Altering the jinja2 default of killwords=False because of
https://bugzilla.mozilla.org/show_bug.cgi?id=624642, which could occur
elsewhere.
"""
if s is None:
return ''
if hasattr(s, '__truncate__'):
return s.__truncate__(length, killwords, end)
return jinja2.filters.do_truncate(smart_unicode(s), length, killwords, end)
| 5,351,727 |
def tract_segmentation_single_example_lap (kdt_T_A, prototypes_T_A,sid, num_NN,T_A ):
""" step 1: tract segmentation from a single example using Jonker-Volgenant algorithm (LAPJV)
"""
E_t_filename= 'data/example/'+ str(sid) +'_'+str(tract_name)+'.trk'
print("Loading Example tract: %s" % E_t_filename)
E_t, hdr= load(E_t_filename, threshold_short_streamlines=threshold_short_streamlines)
dm_E_t= dissimilarity(E_t, prototypes_T_A,bundles_distances_mam)
#compute the NN of the example tract in order to construcse the cost matrix
NN_E_t_NN_Idx= NN (kdt_T_A, dm_E_t,num_NN)
print("Computing the cost matrix with mam distance (%s x %s) for RLAP " % (len(E_t),
len( NN_E_t_NN_Idx)))
cost_matrix = bundles_distances_mam_smarter_faster(E_t,
T_A[NN_E_t_NN_Idx])
print("Computing optimal assignmnet with LAPJV")
assignment = LinearAssignment(cost_matrix).solution
min_cost_values= cost_matrix[np.arange(len(cost_matrix)), assignment]
return NN_E_t_NN_Idx[assignment], min_cost_values, len(E_t)
| 5,351,728 |
def output_screening_results(parent_aln_obj, aln_obj_lst, style):
"""
Function outputs results according to user input style from the -s flag
Currently there are 3 versions.
1) changes output alignment sequence to lower case, then overwrites to
upper case if there is a match
2) alignment sequence is all dashes unless a site does exist
3) alignment sequence is all dashes unless a site does not exist
Current output is to stdout
"""
# Print top level strings:
print parent_aln_obj.match_aln_str
print parent_aln_obj.aln_seq
# Lower Case Version:
if style == "1":
for aln_obj in aln_obj_lst:
if aln_obj != parent_aln_obj:
aln_seq_lower = aln_obj.aln_seq.lower()
build_list = []
for potn_match_tup in aln_obj.potential_match_list:
if potn_match_tup[2] == True:
build_list.append(potn_match_tup[1])
if build_list == []:
output_str = aln_seq_lower
else:
output_str = aln_obj.build_match_string(aln_seq_lower,
build_list)
print output_str
# Dash Version IS a site:
elif style == "2":
# if aln_obj != parent_aln_obj:
for aln_obj in aln_obj_lst:
dashes = "-" * aln_obj.aln_seq_length
build_list = []
for potn_match_tup in aln_obj.potential_match_list:
if potn_match_tup[2] == True:
build_list.append(potn_match_tup[1])
if build_list == []:
output_str = dashes
else:
output_str = aln_obj.build_match_string(dashes, build_list)
print output_str
# Dash Version NOT a site:
elif style == "3":
# if aln_obj != parent_aln_obj:
for aln_obj in aln_obj_lst:
dashes = "-" * aln_obj.aln_seq_length
build_list = []
for potn_match_tup in aln_obj.potential_match_list:
if potn_match_tup[2] == False:
build_list.append(potn_match_tup[1])
if build_list == []:
output_str = dashes
else:
output_str = aln_obj.build_match_string(dashes, build_list)
print output_str
# Warning that output style does not exist:
else:
print 'Warning output style should be "1", "2", or "3"'
| 5,351,729 |
def url(method):
"""对于每一个URL的请求访问装饰器,在出错时返回对应的信息"""
@wraps(method)
def error_handler(*args, **kwargs):
try:
return success(method(*args, **kwargs))
except RequestError as r:
current_app.logger.exception(r)
# 返回对应异常类的字符串文档
return failed(reason=r.err_num(), message=r.err_msg())
except Exception as e:
current_app.logger.exception(e)
return failed()
return error_handler
| 5,351,730 |
def is_abbreviation(sentence):
"""
Evaluate a word to be an abbreviation if the immediate word before the
period contains a capital letter and not a single word sentence.
"""
sentence_split = sentence.split(" ")
if len(sentence_split) == 1:
return False
elif len(sentence_split[-1]) <= 3 and \
any(x.isupper() for x in sentence_split[-1]):
return True
else:
return False
| 5,351,731 |
async def test_crudrouter_get_one_404(test_app):
"""Tests that select_or_404 will raise a 404 error on an empty return"""
async with AsyncClient(app=test_app[1], base_url="http://test") as client:
response = await client.get("/model/test")
assert response.status_code == 404
| 5,351,732 |
def update_sitedown(cur, site, status):
"""
Update whether the site is down
:param cur: database cursor
:param site: named tuple containing site data
:param status: string 'true' if site is down or 'false' if site is up
"""
cur.execute('''UPDATE watchlist SET sitedown=? WHERE username=? AND url=?''',
(status, site.username, site.url))
| 5,351,733 |
def generate_self_signed(domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
run(cmd)
| 5,351,734 |
def compose_redis_key(vim_name, identifier, identifier_type="vdu"):
"""Compose the key for redis given vim name and vdu uuid
Args:
vim_name (str): The VIM name
identifier (str): The VDU or VNF uuid (NFVI based)
identifier_type (str): the identifier type. Default type is vdu. Also vnf is supported.
Returns:
str: the key for redis
"""
if identifier_type == "vnf":
return "{}:vnf#{}".format(vim_name.lower(), identifier)
else:
return "{}:{}".format(vim_name.lower(), identifier)
| 5,351,735 |
def stack_to_hdf5(stack_path, write_path, dims, dtype):
"""
"""
stack = np.fromfile(stack_path, dtype=dtype).reshape(dims)
writeHDF5(write_path, '/default', stack)
| 5,351,736 |
def calGridID(locs, id, SPLIT = 0.0005):
"""
根据城市网格编号还原经纬度信息
:param locs:
:param id:
:param SPLIT=0.05:
"""
centerincrement = SPLIT/2.0
LNGNUM = int((locs['east'] - locs['west']) / SPLIT + 1)
latind = int(id / LNGNUM)
lngind = id - latind * LNGNUM
lat = (locs['south'] + latind * SPLIT)
lng = (locs['west'] + lngind * SPLIT)
lngcen = (lng + centerincrement)
latcen = (lat + centerincrement)
return "%.3f,%.3f" % (latcen, lngcen)
# {
# 'lat': latcen,
# 'lng': lngcen
# }
| 5,351,737 |
def linear(x, *p):
"""[summary]
Arguments:
x {[type]} -- [description]
Returns:
[type] -- [description]
"""
return p[0] * x + p[1]
| 5,351,738 |
def convert_price_text(t):
"""
convert "$175/month' to 175
:param t:
:return: price, unit (i.e. 175, 'month')
"""
tok = t.split('$')[1]
if '/' in tok:
price, unit = tok.split('/')
else:
price = tok
unit = None
return float(price.strip().strip('$').replace(',', '')), unit
| 5,351,739 |
async def async_setup(hass, config):
"""Set up the WWLLN component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
latitude = conf.get(CONF_LATITUDE, hass.config.latitude)
longitude = conf.get(CONF_LONGITUDE, hass.config.longitude)
identifier = '{0}, {1}'.format(latitude, longitude)
if identifier in configured_instances(hass):
return True
if hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
unit_system = CONF_UNIT_SYSTEM_IMPERIAL
else:
unit_system = CONF_UNIT_SYSTEM_METRIC
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={'source': SOURCE_IMPORT},
data={
CONF_LATITUDE: latitude,
CONF_LONGITUDE: longitude,
CONF_RADIUS: conf[CONF_RADIUS],
CONF_WINDOW: conf[CONF_WINDOW],
CONF_UNIT_SYSTEM: unit_system,
}))
return True
| 5,351,740 |
def __are_nearly_overlapped(
plane_predicted: NDArray[Any, np.int32],
plane_gt: NDArray[Any, np.int32],
required_overlap: np.float64,
) -> (bool, bool):
"""
Calculate if planes are overlapped enough (required_overlap %) to be used for PP-PR metric
:param required_overlap: overlap threshold which will b checked to say that planes overlaps
:param plane_predicted: predicted segmentation
:param plane_gt: ground truth segmentation
:return: true if planes are overlapping by required_overlap % or more, false otherwise
"""
intersection = np.intersect1d(plane_predicted, plane_gt)
return (
intersection.size / plane_predicted.size >= required_overlap
and intersection.size / plane_gt.size >= required_overlap,
intersection.size > 0,
)
| 5,351,741 |
def _get_prob_k_given_L(B, N=None):
"""
Helper function.
"""
if N is None:
N = int(B[0, 1])
return B / N
| 5,351,742 |
def base64_encode(s):
"""unicode-safe base64
base64 API only talks bytes
"""
if not isinstance(s, bytes):
s = s.encode('ascii', 'replace')
encoded = encodebytes(s)
return encoded.decode('ascii')
| 5,351,743 |
def _unpack_tableswitch(bc, offset):
"""
function for unpacking the tableswitch op arguments
"""
jump = (offset % 4)
if jump:
offset += (4 - jump)
(default, low, high), offset = _unpack(_struct_iii, bc, offset)
joffs = list()
for _index in xrange((high - low) + 1):
j, offset = _unpack(_struct_i, bc, offset)
joffs.append(j)
return (default, low, high, joffs), offset
| 5,351,744 |
def test_immi1():
"""
Test immi on redundant distribution.
"""
d = bivariates['redundant']
red = i_mmi(d, ((0,), (1,)), (2,))
assert red == pytest.approx(1)
| 5,351,745 |
def force_orders(self, **kwargs):
"""User's Force Orders (USER_DATA)
GET /fapi/v1/forceOrders
https://binance-docs.github.io/apidocs/futures/en/#user-39-s-force-orders-user_data
Keyword Args:
symbol (str, optional)
autoCloseType (str, optional): "LIQUIDATION" for liquidation orders, "ADL" for ADL orders.
startTime (int, optional)
endTime (int, optional)
limit (int, optional): Default 50; max 100.
recvWindow (int, optional)
Notes:
If "autoCloseType" is not sent, orders with both of the types will be returned
If "startTime" is not sent, data within 7 days before "endTime" can be queried
"""
payload = {**kwargs}
url_path = "/fapi/v1/forceOrders"
return self.sign_request("GET", url_path, payload)
| 5,351,746 |
def main():
"""main"""
args = get_args()
print('color =', args.color)
| 5,351,747 |
def prosp_power_analysis_norm(d, sigma, pow_lev, alpha, direction):
"""
This function conducts pre-testing power analysis and
calculates the minimally required sample size for a normal sample.
@param d: difference between the mean differences under H1 and H0
@param sigma: standard deviation
@param pow_lev: power level
@param alpha: significance level
@param direction: direction of the test, two-sided or one-sided
@return: required minimal sample size
"""
# first calculates for a z test
n_z = np.ceil(z_test_sample_size(d, sigma, alpha, pow_lev, direction))
# first iteration for t test
n_t_1 = np.ceil(t_test_sample_size(d, sigma, n_z-1, alpha, pow_lev, direction))
# second iteration for t test
n_t_2 = np.ceil(t_test_sample_size(d, sigma, n_t_1-1, alpha, pow_lev, direction))
return(np.ceil(n_t_2 ))
| 5,351,748 |
def decision_tree_construction(examples, target_attribute, attributes, depth):
"""
:param examples: The data we will use to train the tree(x)
:param target_attribute: The label we want to classify(y)
:param attributes: The number(index) of the labels/attributes of the data-set
:return: The tree corresponding to the given data
"""
# This is the first base condition of the algorithm. It is used if the attributes variable is empty, then we return
# the single-node tree Root, with label = most common value of target_attribute in examples
# The base condition for the recursion when we check if all the variables are same or not in the node and if they
# are same then we return that value as the node
if len(attributes) == 0 or len(np.unique(target_attribute)) == 1:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion part of the algorithm in which we try to find the sub-tree's by using recursion and
# information gain
else:
Information_Gain = Information_Gain_Heuristic(examples, attributes, target_attribute)
best_attribute_number = attributes[np.argmax(Information_Gain)]
# Since we now have the best_attribute(A in algorithm) we will create the root node of the tree/sub-tree with
# that and name the root as the best attribute among all Here we make the tree as a dictionary for testing
# purposes
tree = dict([(best_attribute_number, dict())])
if isinstance(tree, int):
# If the given value is a int value then it's definitely a leaf node and if it's a dictionary then its a
# node
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
# Here we can have an index error since in some case it may happen that the array has only one type
# of value and thus accessing the index [1] is not possible
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
attributes.remove(best_attribute_number)
# Now we do the recursive algorithm which will be used to create the tree after the root node.
depth_of_node = []
for each_unique_value in np.unique(examples[best_attribute_number]):
# We use those values for which the examples[best_attribute_number] == each_unique_value
class1 = each_unique_value
new_target_attribute = pd.DataFrame(target_attribute)
total_data = pd.concat([examples, new_target_attribute], axis=1, sort=False)
# WE do this step so that we can pick the values which belong to the best_attribute = [0,1], i.e. We now
# want to divide our data so that the values for the best_attribute is divided among the branches. And
# thus we will have 4 arrays now, two for the data and two for target attribute.
new_data_after_partition = total_data.loc[total_data[best_attribute_number] == class1]
new_target_attribute, new_examples_after_partition = get_attributes_and_labels(new_data_after_partition)
# This is also a condition for our algorithm in which we check if the number of examples after the
# partition are positive or not. If the values are less than 1 then we return the most frequent value in
# the node
if len(new_examples_after_partition) == 0:
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
if unique_value_of_attribute[0] == 1:
# More positive values
return 1, depth
elif unique_value_of_attribute[0] == 0:
# More negative values
return 0, depth
# This is the recursion step, in which we make new decision trees till the case when any of the base
# cases are true
new_sub_tree_after_partition, deptha = decision_tree_construction(new_examples_after_partition,
new_target_attribute, attributes,
depth + 1)
depth_of_node.append(deptha)
# Here we are adding the depth of the node so that we can do the depth based pruning
tree[best_attribute_number][each_unique_value] = new_sub_tree_after_partition
if isinstance(new_sub_tree_after_partition, int):
tree[best_attribute_number]["type_of_node"] = "leaf"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
else:
tree[best_attribute_number]["type_of_node"] = "node"
tree[best_attribute_number]["depth"] = depth
unique_value_of_attribute, counts_of_attribute = np.unique(target_attribute, return_counts=True)
try:
if counts_of_attribute[0] < counts_of_attribute[1]:
counts_of_attribute = np.flipud(counts_of_attribute)
unique_value_of_attribute = np.flipud(unique_value_of_attribute)
except IndexError:
i = 0
tree[best_attribute_number]["majority_target_attribute"] = unique_value_of_attribute[0]
tree[best_attribute_number]["best_attribute_number"] = best_attribute_number
return tree, max(depth_of_node)
| 5,351,749 |
def mass_centered(geo):
""" mass-centered geometry
"""
geo = translate(geo, numpy.negative(center_of_mass(geo)))
return geo
| 5,351,750 |
def generate(env):
"""Called when the tool is loaded into the environment at startup of script"""
assert(exists(env))
MkdocsCommon.setup_opts_combiner(env)
mkdocs_scanner = env.Scanner(
MkdocsCommon.MkdocsScanner,
'MkdocsScanner',
)
bld = Builder(
action = __MkdocsCombiner_func,
emitter = MkdocsCommon.MkdocsCombiner_emitter,
source_scanner = mkdocs_scanner,
)
env.Append(BUILDERS = {'MkdocsCombiner' : bld})
| 5,351,751 |
def test_newlines_group():
"""Convert newlines in all groups."""
assert_equal(
gfm('apple\npear\norange\n\nruby\npython\nerlang'),
'apple \npear \norange\n\nruby \npython \nerlang',
)
| 5,351,752 |
def binaryMatrix(l, value=PAD_token):
"""
:param l:
:param value:
:return: seq: [3,4,5,0,0]
m: [[1],[1],[1],[0],[0]]
"""
m = []
for i, seq in enumerate(l):
m.append([])
for token in seq:
if token == PAD_token:
m[i].append(0)
else:
m[i].append(1)
return m
| 5,351,753 |
def show_download(dl_config):
"""
Do the download thing you know
"""
i = 0
config_specs = config.get_specs(dl_config)
url = dl_config[DOWNLOAD]['url']
main_html = urllib.request.urlopen(
url, timeout=int(
dl_config[DOWNLOAD][TIMEOUT])).read()
main_page = BeautifulSoup(main_html, "lxml")
sub_links = __get_list_links(main_page, config_specs)
log.ok("Opened list URL: " + url)
for link in sub_links:
if __episode_not_skipped(i, link.contents[0], config_specs) and \
__episode_not_ignored(i, link.contents[0], config_specs):
video_hrefs = __find_best_hrefs(link['href'], config_specs)
if len(video_hrefs) > 0:
__download_video(video_hrefs, link.contents[0],
config_specs)
else:
log.warn("No downloadable link found at URL: " + link['href'])
i += 1
| 5,351,754 |
def clear_config():
"""Reset pn.config"""
pn.config.raw_css = []
pn.config.js_files = {}
pn.config.css_files = []
| 5,351,755 |
def realTimeIdentification(face_recognizer, subjects):
"""实时识别"""
print("进行实时预测")
face_recognizer.read(r'./models/train.yml')
cap = cv2.VideoCapture(0)
# 视频保存 保存的文件的路径 fourcc:指定编码器 fps:要保存的视频的帧率 frameSize:要保存的文件的画面尺寸 isColor:指示是黑白画面还是彩色的画面
fourcc = cv2.VideoWriter_fourcc('I', '4', '2', '0')
out = cv2.VideoWriter(r'./output.avi', fourcc, 20.0, (640, 480), True)
# 循环检测识别人脸
start_time = time()
while True:
_, frame = cap.read()
sleep(0.01)
try:
face, rect = detect_face(frame)
label = face_recognizer.predict(face)
except Exception as e:
print("错误信息为:", e)
traceback.print_exc()
print('traceback.format_exc():\n%s'%traceback.format_exc())
cv2.imshow('camera', frame)
else:
print(label)
if label[1] > 80:
engine = pyttsx3.init()
end_time = time()
draw_rectangle(frame, rect)
draw_text(frame, subjects[0], rect[0], rect[1] - 5)
out.write(frame)
run_time = end_time - start_time
if frame is not None and run_time > 10:
winsound.Beep(1440, 1500) # 主板蜂鸣器
engine.say("警告,警告,有陌生人靠近")
engine.runAndWait()
start_time = end_time
else:
label_text = subjects[label[0]]
draw_rectangle(frame, rect)
draw_text(frame, label_text, rect[0], rect[1] - 5)
cv2.imshow('camera', frame)
# 等待10毫秒看是否有按键输入
k = cv2.waitKey(10)
# 如果输入q则退出循环
if k & 0xFF == ord('q'):
break
# 释放摄像头并销毁所有窗口
out.release()
cap.release()
cv2.destroyAllWindows()
| 5,351,756 |
def plot_history(model_wrapper, job_config, save_dir=None):
"""Evaluates training result.
Args:
figsize: tuple
Defines plot size.
"""
logger.info("Plotting training history")
plot_config = job_config.apply.cfg_history
train_history = model_wrapper._train_history
num_folds = model_wrapper._num_folds
for metric_key in train_history[0].keys():
if not metric_key.startswith("val_"):
plot_metrics(
metric_key,
train_history,
plot_config,
num_folds=num_folds,
save_dir=save_dir,
)
| 5,351,757 |
def load_map(mappath):
""" Attempt to load map with known loaders
"""
data = None
shirtloader = lambda path: fio.load_map(path)[0][0:3]
maploaders = [load_pfire_map, shirtloader]
for loader in maploaders:
try:
data = loader(mappath)
except (ValueError, OSError):
pass
if data is not None:
break
if data is None:
raise RuntimeError("Failed to load map \"{}\"".format(mappath))
return data
| 5,351,758 |
def client_thread(client_url, i):
"""Basic request-reply client using REQ socket"""
context = zmq.Context()
socket = context.socket(zmq.REQ)
identity = "Client-%d" % (i)
socket.setsockopt(zmq.IDENTITY, identity) #Set client identity. Makes tracing easier
socket.connect(client_url)
# Send request, get reply
socket.send("HELLO")
reply = socket.recv()
print "%s: %s\n" % (identity, reply),
| 5,351,759 |
def ifttt_account_options_topup_source():
""" Option values for topup source account selection"""
return ifttt_account_options(False, "Internal")
| 5,351,760 |
def announce_user_details_updated(
event: UserDetailsUpdated, webhook: OutgoingWebhook
) -> None:
"""Announce that a user's details have been changed."""
text = user.assemble_text_for_user_details_updated(event)
call_webhook(webhook, text)
| 5,351,761 |
def strip_classes(soup:BeautifulSoup, *args:str):
"""
Strip class from given tags in a BeautifulSoup object.
Args:
soup (BeautifulSoup): soup to clean
args ([str]): A list of tags to be unclassed
Returns:
soup (BeautifulSoup)
Modules:
bs4 (BeautifulSoup)
"""
if not args:
args = ['em', 'strong', 'sup']
# delete classes associated with selected tags:
for arg in args:
for tag in soup.find_all(arg):
if tag.has_attr('class'):
del tag.attrs['class']
return(soup)
| 5,351,762 |
def take_measurement(n_grid: np.int, n_rays: np.int, r_theta: np.float64) -> (
np.ndarray, np.ndarray, np.ndarray, np.ndarray):
"""
Take a measurement with the tomograph from direction r_theta.
Arguments:
n_grid: number of cells of grid in each direction
n_rays: number of parallel rays
r_theta: direction of rays (in radians)
Return:
intensities: measured intensities for all <n_rays> rays of the measurement. intensities[n] contains the intensity for the n-th ray
ray_indices: indices of rays that intersect a cell
isect_indices: indices of intersected cells
lengths: lengths of segments in intersected cells
The tuple (ray_indices[n], isect_indices[n], lengths[n]) stores which ray has intersected which cell with which length. n runs from 0 to the amount of ray/cell intersections (-1) of this measurement.
Raised Exceptions:
-
Side Effects:
-
"""
# compute ray direction in Cartesian coordinates
cs = np.cos(r_theta)
sn = np.sin(r_theta)
r_dir = np.array([-cs, -sn])
# compute start positions for rays
r_pos = np.zeros((n_rays, 2))
for i, g in enumerate(np.linspace(-0.99, 0.99, n_rays)):
r_pos[i] = np.array([cs - sn * g, sn + cs * g])
else:
r_pos[0] = np.array([cs, sn])
# compute measures intensities for each ray
intensities = np.zeros(n_rays)
for i, rs in enumerate(r_pos):
intensities[i] = trace(rs, r_dir)
# take exponential fall off into account
intensities = np.log(1.0 / intensities)
# compute traversal distance in each grid cell
ray_indices, isect_indices, lengths = grid_intersect(n_grid, r_pos, r_dir)
return intensities, ray_indices, isect_indices, lengths
| 5,351,763 |
def chrom_karyo_sort(chroms):
"""
:param chroms:
:return:
"""
ordered = []
unordered = []
for cname, size in chroms:
try:
ord = int(cname.lower().strip('chr'))
ordered.append((cname, size, ord * 10))
except ValueError:
ord = check_special_chroms(cname)
if ord > 0:
ordered.append((cname, size, ord))
else:
unordered.append((cname, size, -1))
unordered = sorted(unordered, key=lambda x: x[1], reverse=True)
ordered = sorted(ordered, key=lambda x: x[2])
ordered.extend(unordered)
return [(t[0], t[1]) for t in ordered]
| 5,351,764 |
def merge(A, lo, mid, hi, aux):
"""Merge two (consecutive) runs together."""
aux[lo:hi+1] = A[lo:hi+1]
left = lo
right = mid + 1
for i in range(lo, hi+1):
if left > mid:
A[i] = aux[right]
right += 1
elif right > hi:
A[i] = aux[left]
left += 1
elif aux[right] < aux[left]:
A[i] = aux[right]
right += 1
else:
A[i] = aux[left]
left += 1
| 5,351,765 |
def teardown_function(function):
""" teardown any state that was previously setup with a setup_function
call.
"""
if (resource('ska_mid/tm_subarray_node/1').get('State') == "ON"):
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "IDLE"):
LOGGER.info("tearing down composed subarray (IDLE)")
take_subarray(1).and_release_all_resources()
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "READY"):
LOGGER.info("tearing down configured subarray (READY)")
take_subarray(1).and_end_sb_when_ready().and_release_all_resources()
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "CONFIGURING"):
LOGGER.warn("Subarray is still in CONFIFURING! Please restart MVP manually to complete tear down")
restart_subarray(1)
#raise exception since we are unable to continue with tear down
raise Exception("Unable to tear down test setup")
if (resource('ska_mid/tm_subarray_node/1').get('obsState') == "SCANNING"):
LOGGER.warn("Subarray is still in SCANNING! Please restart MVP manually to complete tear down")
restart_subarray(1)
#raise exception since we are unable to continue with tear down
raise Exception("Unable to tear down test setup")
LOGGER.info("Put Telescope back to standby")
set_telescope_to_standby()
| 5,351,766 |
def test_wrap_predict_method():
"""Check wrap_predict_method output with default inputs."""
from sasctl.utils.pymas.core import wrap_predict_method
target = """
def predict(a, b):
"Output: c, msg"
result = None
msg = None
try:
global _compile_error
if _compile_error is not None:
raise _compile_error
import numpy as np
import pandas as pd
if a is None: a = np.nan
if b is None: b = np.nan
input_array = np.array([a, b]).reshape((1, -1))
columns = ["a", "b"]
input_df = pd.DataFrame(data=input_array, columns=columns)
result = dummy_func(input_df)
result = tuple(result.ravel()) if hasattr(result, "ravel") else tuple(result)
if len(result) == 0:
result = tuple(None for i in range(1))
elif "numpy" in str(type(result[0])):
result = tuple(np.asscalar(i) for i in result)
except Exception as e:
from traceback import format_exc
msg = str(e) + format_exc()
if result is None:
result = tuple(None for i in range(1))
return result + (msg, )
""".rstrip()
code = wrap_predict_method(dummy_func, [DS2Variable('a', float, False),
DS2Variable('b', float, False),
DS2Variable('c', float, True)])
assert code == target
| 5,351,767 |
def calibrate_stereo(observations_left: List, observations_right: List, detector: FiducialCalibrationDetector,
num_radial: int = 4, tangential: bool = False, zero_skew: bool = True) -> (StereoParameters, List):
"""
Calibrates a stereo camera using a Brown camera model
:param observations: List of {"points":(boofcv detections),"width":(image width),"height":(image height)}
:param detector:
:param num_radial:
:param tangential:
:param zero_skew:
:return:
"""
jlayout = detector.java_obj.getLayout(0) # Hard coded for a single target
jcalib_planar = gateway.jvm.boofcv.abst.geo.calibration.CalibrateStereoPlanar(jlayout)
jcalib_planar.configure(zero_skew, int(num_radial), tangential)
for idx in range(len(observations_left)):
jobs_left = convert_into_boof_calibration_observations(observations_left[idx])
jobs_right = convert_into_boof_calibration_observations(observations_right[idx])
jcalib_planar.addPair(jobs_left, jobs_right)
stereo_parameters = StereoParameters(jcalib_planar.process())
errors = []
for jerror in jcalib_planar.computeErrors():
errors.append({"mean": jerror.getMeanError(),
"max_error": jerror.getMaxError(),
"bias_x": jerror.getBiasX(), "bias_y": jerror.getBiasY()})
return (stereo_parameters, errors)
| 5,351,768 |
def register_producer_class(cls: Type[C]) -> Type[C]:
"""Registers the producer class and returns it unmodified."""
if not cls.TYPES:
raise ProducerInterfaceError(
f"Invalid producer. When defining producer, make sure to specify at least 1 type in the TYPES class variable."
)
for artifact_type in cls.ARTIFACT_TYPES:
if not (
isclass(artifact_type) and issubclass(artifact_type, BaseArtifact)
):
raise ProducerInterfaceError(
f"Associated artifact type {artifact_type} for producer is not a class or is not a subclass of BaseArtifact."
)
artifact_types = cls.ARTIFACT_TYPES or (BaseArtifact,)
for t in cls.TYPES:
if not isclass(t):
raise ProducerInterfaceError(
f"Associated type {t} for producer is not a class."
)
producer_registry.register_producer(
t,
cls,
)
type_registry.register_artifact_type(
t,
artifact_types,
)
return cls
| 5,351,769 |
def get_file_list(var, obsname, start_date, end_date):
"""
Get a list of data set files that covers the time period defined by
start_date and end_date provided in the function call.
Parameters
----------
var: str
Input variable, e.g. 'tas'
obsname: str
Name of dataset to use, e.g. 'EOBS'
start_date: str
Start date of time period, format YYYYMM
end_date: str
End date of time period, format YYYYMM
Returns
-------
file_list: list
List of obs data files
"""
meta_data = obs_data()
data_dict = meta_data[var][obsname]
file_pattern = data_dict['file pattern']
sidx = file_pattern.find('YYYYMM')
eidx = file_pattern.rfind('YYYYMM')
obs_path_list = glob.glob(os.path.join(data_dict['path'],
file_pattern[:sidx] + '*.nc'))
obs_path_list.sort()
obs_file_list = [l.split('/')[-1] for l in obs_path_list]
obs_dates = ['{}-{}'.format(f[sidx:sidx+6], f[eidx:eidx+6])
for f in obs_file_list]
idx_start = [d.split('-')[0] <= start_date <= d.split('-')[1]
for d in obs_dates]
msg = "Files not found OR selected start date {} ".format(start_date) +\
"does not match any obs file dates!"
assert np.sum(idx_start) != 0, msg
idx_start = np.where(idx_start)[0][0]
idx_end = [d.split('-')[0] <= end_date <= d.split('-')[1]
for d in obs_dates]
msg = "Files not found OR selected end date {} ".format(end_date) +\
"does not match any obs file dates!"
assert np.sum(idx_end) != 0, msg
idx_end = np.where(idx_end)[0][0]
return obs_path_list[idx_start: idx_end + 1]
| 5,351,770 |
def main():
"""
Main function for data retrieval and loading. See argparse message for usage.
"""
logger.info("Starting SureChEMBL update process")
# Parse core command line arguments
parser = argparse.ArgumentParser(description='Load data into the SureChEMBL database')
parser.add_argument('ftp_user', metavar='fu', type=str, help='Username for accessing the EBI FTP site')
parser.add_argument('ftp_pass', metavar='fp', type=str, help='Password for accessing the EBI FTP site')
parser.add_argument('db_user', metavar='du', type=str, help='Username for accessing the target database')
parser.add_argument('db_pass', metavar='dp', type=str, help='Password for accessing the target database')
parser.add_argument('--db_type', metavar='dt', type=str, help='Database type ("oracle" or "postgres")', default="oracle")
parser.add_argument('--db_host', metavar='dh', type=str, help='Host where the database can be found', default="127.0.0.1")
parser.add_argument('--db_port', metavar='do', type=str, help='Port over which the database is accessed', default="1521")
parser.add_argument('--db_name', metavar='dn', type=str, help='Database name (for connection string)', default="XE")
parser.add_argument('--working_dir', metavar='w', type=str, help='Working directory for downloaded files', default="/tmp/schembl_ftp_data")
# Options that determine what is loaded
group = parser.add_mutually_exclusive_group()
group.add_argument('--year', metavar='y', type=str, help='A year to extract from the back file, format: YYYY')
group.add_argument('--date', metavar='d', type=str, help='A date to extract from the front file, format: YYYYMMDD; defaults to today', default="today")
group.add_argument('--input_dir', metavar='f', type=str, help='A directory of pre-downloaded data files to load (e.g. for overwriting)')
parser.add_argument('--all', help='Download all files, or just new files? Front file only', action="store_true")
# Flags that determine how downloaded files are processed
parser.add_argument('--overwrite', help='Replace any existing document/chemistry records with newly downloaded data', action="store_true")
parser.add_argument('--preload_bib_ids', help='Try to find IDs for documents, instead of waiting for Integrity Errors', action="store_true")
parser.add_argument('--skip_titles', help='Ignore titles when loading document metadata', action="store_true")
parser.add_argument('--skip_classes', help='Ignore classifications when loading document metadata', action="store_true")
args = parser.parse_args()
input_files = _prepare_files(args)
logger.info("Loading data files into DB")
if args.db_type == 'oracle':
db_pkg = cx_Oracle
elif args.db_type == 'postgres':
db_pkg = psycopg2
try:
db = _get_db_engine(args)
loader = DataLoader(db,
load_titles=not args.skip_titles,
load_classifications=not args.skip_classes,
overwrite=args.overwrite,
allow_doc_dups=True)
for bib_file in filter( lambda f: f.endswith("biblio.json"), input_files):
loader.load_biblio( "{}/{}".format( args.working_dir,bib_file ), preload_ids=args.preload_bib_ids )
for chem_file in filter( lambda f: f.endswith("chemicals.tsv"), input_files):
update = "supp" in chem_file
if update: logger.info("Supplementary chemical file detected - setting parameters to handle duplicate records")
loader.load_chems( "{}/{}".format( args.working_dir,chem_file ), update )
logger.info("Processing complete, exiting")
except db_pkg.DatabaseError, exc:
# Specialized display handling for Database exceptions
logger.error( "Database exception detected: {}".format( exc ) )
raise
| 5,351,771 |
def test_facet_size_default():
"""facet_size() has default return value 50"""
assert search_query.facet_size({}) == 50
| 5,351,772 |
def structures_at_boundaries(gdf, datamodel, areas, structures, tolerance, distance):
"""
Check if there are structures near area (typically water-level areas) boundaries.
Parameters
----------
gdf : ExtendedGeoDataframe
ExtendedGeoDataFrame, HyDAMO hydroobject layer
datamodel : HyDAMO
HyDAMO datamodel class
areas : str
HyDAMO datamodel class with areas ("peilgebiedenpraktijk")
structures : str
List with structure-types to be expected on the boundary
tolerance : numeric
Tolerance to dermine if a structure is on the hydroobject
distance : numeric
Max distance between structure and area-boundary
Returns
-------
Pandas Series
Default dtype is bool
"""
areas_gdf = getattr(datamodel, areas)
areas_sindex = areas_gdf.sindex
struc_series = _layers_from_datamodel(structures, datamodel)
struc_sindex = struc_series.sindex
return gdf.apply(
lambda x: _structures_at_boundaries(
x, areas_gdf, areas_sindex, struc_series, struc_sindex, tolerance, distance
),
axis=1,
)
| 5,351,773 |
def create_checkpoint_structure() -> None:
"""
Create a checkpoint structure in the log folder.
* train: Folder for train split tensorboard logs.
* dev: Folder for dev split tensorboard logs.
* test: Folder for test split tensorboard logs.
* checkpoints: Folder for the babilim, pytorch or tensorboard checkpoints.
* images: Folder for images that are generated by your code. (use log_image)
"""
logfolder = get_log_path()
if not os.path.exists(os.path.join(logfolder, "train")):
os.makedirs(os.path.join(logfolder, "train"))
if not os.path.exists(os.path.join(logfolder, "dev")):
os.makedirs(os.path.join(logfolder, "dev"))
if not os.path.exists(os.path.join(logfolder, "test")):
os.makedirs(os.path.join(logfolder, "test"))
if not os.path.exists(os.path.join(logfolder, "checkpoints")):
os.makedirs(os.path.join(logfolder, "checkpoints"))
if not os.path.exists(os.path.join(logfolder, "images")):
os.makedirs(os.path.join(logfolder, "images"))
| 5,351,774 |
def above_cutoff(gene_freq_tup_list: List[Tuple[Union[str, tuple], Tuple[str, str]]], cutoff: int) -> List[str]:
"""Return the genes/edges that are are in at least the given cutoff's networks
Parameters
----------
gene_freq_tup_list : List[Tuple[Union[str, tuple], Tuple[str, str]]]
list of (comparison_object, (frequency_count, percent)) tuples in order of most common
should be return from most_common()
cutoff : int
number to be used as minimum for how many networks the object must be present in to be returned
Returns
-------
list of objects that were in at least as many networks as the cutoff given
"""
above = []
for gene, freq in gene_freq_tup_list:
if count_in_freq(freq) >= cutoff:
above.append(gene)
else:
break # since it's ordered, no need wasting time checking the rest
return above
| 5,351,775 |
def normalizeWindows(X):
"""
Do point centering and sphere normalizing to each window
to control for linear drift and global amplitude
Parameters
----------
X: ndarray(N, Win)
An array of N sliding windows
Returns
XRet: ndarray(N, Win)
An array in which the mean of each row is zero
and the norm of each row is 1
"""
XRet = X - np.mean(X, 1)[:, None]
Norm = np.sqrt(np.sum(XRet**2, 1))
Norm[Norm == 0] = 1
XRet /= Norm[:, None]
return XRet
| 5,351,776 |
def cost_matrix_slow(x, y):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
"""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
# if y is None:
# dist = dist - torch.diag(dist.diag)
return torch.clamp(dist, 0.0, np.inf)
| 5,351,777 |
def get_dists(ts1_sax, ts2_sax, lookup_table):
"""
Compute distance between each symbol of two words (series) using a lookup table
ts1_sax and ts2_sax are two sax representations (strings) built under the same conditions
"""
# Verify integrity
if ts1_sax.shape[0] != ts2_sax.shape[0]:
return -1
# convert symbol series into series of indexes (symbol indexes)
ts1_sax_id = symbol2index(ts1_sax)
ts2_sax_id = symbol2index(ts2_sax)
# array of distances between symbols
dists = np.zeros(ts1_sax.shape[0])
for i in range(ts1_sax_id.shape[0]):
dists[i] = lookup_table[ts1_sax_id[i], ts2_sax_id[i]]
return dists
| 5,351,778 |
def get_dom_coords(string, dom):
"""Get Coordinates of a DOM specified by the string and dom number.
Parameters
----------
string : int
String number (between 1 and 86)
dom : int
DOM number (between 1 and 60)
Returns
-------
tuple(float, float, float)
The x, y, z coordinates of the DOM.
"""
assert string > 0 and string <= 86, 'String must be within [1, 86]'
assert dom > 0 and dom <= 60, 'DOM must be within [1, 86]'
a, b = get_matrix_indices(string)
dom_id = dom - 1
return x_ic78_coords[a, b, dom_id]
| 5,351,779 |
def relevance_ka(x):
"""
based on code from https://www.kaggle.com/aleksandradeis/regression-addressing-extreme-rare-cases
see paper: https://www.researchgate.net/publication/220699419_Utility-Based_Regression
use the sigmoid function to create the relevance function, so that relevance function
has values close to 1 when the target variable is greater than 0.6
Args:
x: the x values for which the relevance should be returned
"""
x = np.array(x)
return sigmoid((x-0.5) * 15)
| 5,351,780 |
def now(mydateformat='%Y%m%dT%H%M%S'):
""" Return current datetime as string.
Just a shorthand to abbreviate the common task to obtain the current
datetime as a string, e.g. for result versioning.
Args:
mydateformat: optional format string (default: '%Y%m%dT%H%M%S')
Returns:
datetime.now(), formated to string with argument mydateformat, e.g.
YYYYMMDDThhmmss ==> 20131007H123456
"""
return datetime.now().strftime(mydateformat)
| 5,351,781 |
def getOnePackageInfo(pkgpath):
"""Gets receipt info for a single bundle-style package"""
pkginfo = {}
plist = getBundleInfo(pkgpath)
if plist:
pkginfo['filename'] = os.path.basename(pkgpath)
try:
if 'CFBundleIdentifier' in plist:
pkginfo['packageid'] = plist['CFBundleIdentifier']
elif 'Bundle identifier' in plist:
# special case for JAMF Composer generated packages.
pkginfo['packageid'] = plist['Bundle identifier']
else:
pkginfo['packageid'] = os.path.basename(pkgpath)
if 'CFBundleName' in plist:
pkginfo['name'] = plist['CFBundleName']
if 'IFPkgFlagInstalledSize' in plist:
pkginfo['installed_size'] = int(plist['IFPkgFlagInstalledSize'])
pkginfo['version'] = getBundleVersion(pkgpath)
except (AttributeError,
FoundationPlist.NSPropertyListSerializationException):
pkginfo['packageid'] = 'BAD PLIST in %s' % \
os.path.basename(pkgpath)
pkginfo['version'] = '0.0'
## now look for applications to suggest for blocking_applications
#bomlist = getBomList(pkgpath)
#if bomlist:
# pkginfo['apps'] = [os.path.basename(item) for item in bomlist
# if item.endswith('.app')]
else:
# look for old-style .info files!
infopath = os.path.join(
pkgpath, 'Contents', 'Resources', 'English.lproj')
if os.path.exists(infopath):
for item in osutils.listdir(infopath):
if os.path.join(infopath, item).endswith('.info'):
pkginfo['filename'] = os.path.basename(pkgpath)
pkginfo['packageid'] = os.path.basename(pkgpath)
infofile = os.path.join(infopath, item)
infodict = parseInfoFile(infofile)
pkginfo['version'] = infodict.get('Version', '0.0')
pkginfo['name'] = infodict.get('Title', 'UNKNOWN')
break
return pkginfo
| 5,351,782 |
def cmp_point_identities(a, b):
"""
Given point identities a, b (may be string, number, date, etc),
collation algorithm compares:
(a) strings case-insensitively
(b) dates and datetimes compared by normalizing date->datetime.
(c) all other types use __cmp__(self, other) defaults from type.
"""
dt = lambda d: datetime(*d.timetuple()[0:6]) # date|datetime -> datetime
if isinstance(a, basestring) and isinstance(b, basestring):
return cmp(a.upper(), b.upper())
if isinstance(a, date) or isinstance(b, date):
return cmp(dt(a), dt(b))
return cmp(a, b)
| 5,351,783 |
def shortest_complement(t, m, l):
"""
Given a primitive slope t and the holonomies of the current
meridian and longitude, returns a shortest complementary slope s
so that s.t = +1.
"""
c, d = t # second slope
_, a, b = xgcd(d, c) # first slope
b = -b
assert a*d - b*c == 1
return a_shortest_lattice_point_on_line((a, b), (c, d), m, l)
| 5,351,784 |
def run_pii(text, lang):
"""
Runs the given set of regexes on the data "lines" and pulls out the
tagged items.
The lines structure stores the language type(s). This can be used for
language-specific regexes, although we're dropping that for now and using
only "default"/non-language-specific regexes.
"""
#print('Detecting....')
# What is this for...?
text = text.encode().decode()
matches = detect_pii(text, lang, high_risk_tags)
#print(matches)
match_set = (text, {})
if len(matches) > 0:
# !!! REDACTION HAPPENS HERE !!!
redacted_str, metadata = redact_pii(text, matches)
metadata_out = {"regex metadata":metadata, "original": text, "redacted": redacted_str}
match_set = (redacted_str, metadata_out)
return match_set
| 5,351,785 |
def get_ref_cat(butler, visit, center_radec, radius=2.1):
"""
Get the reference catalog for the desired visit for the requested
sky location and sky cone radius.
"""
ref_cats = RefCat(butler)
try:
band = list(butler.subset('src', visit=visit))[0].dataId['filter']
except dp.butlerExceptions.NoResults:
band = list(butler.subset('src', expId=visit))[0].dataId['filter']
centerCoord = lsst_geom.SpherePoint(center_radec[0]*lsst_geom.degrees,
center_radec[1]*lsst_geom.degrees)
return ref_cats(centerCoord, band, radius)
| 5,351,786 |
def replace_dict(d, **kwargs):
"""
Replace values by keyword on a dict, returning a new dict.
"""
e = d.copy()
e.update(kwargs)
return e
| 5,351,787 |
def handle_pdb(sig, frame): # pylint: disable=unused-argument
""" Signal handler """
pdb.Pdb().set_trace(frame)
| 5,351,788 |
def optimizer(settings_filepath):
"""
Performs Gaussian-process optimization to maximise agreement
between SOLPS and the given experimental data.
:param settings_filepath: The path to the settings file.
:return:
"""
# check the validity of the input file and return its contents
settings = parse_inputs(settings_filepath, check_training_data=True)
# set-up the log file
logger_setup(settings_filepath)
# data & results filepaths
reference_directory = settings['solps_ref_directory']
training_data_file = settings['training_data_file']
diagnostics = settings['diagnostics']
# SOLPS settings
solps_n_proc = settings['solps_n_proc']
set_divertor_transport = settings['set_divertor_transport']
solps_timeout_hours = settings['solps_timeout_hours']
transport_profile_bounds = settings['transport_profile_bounds']
# optimiser settings
max_iterations = settings['max_iterations']
fixed_parameter_values = settings['fixed_parameter_values']
optimisation_bounds = settings['optimisation_bounds']
acquisition_function = settings['acquisition_function']
cross_validation = settings['cross_validation']
error_model = settings['error_model']
covariance_kernel_class = settings['covariance_kernel']
trust_region = settings['trust_region']
trust_region_width = settings['trust_region_width']
log_scale_bounds = settings['log_scale_bounds']
all_parameters = [key for key in fixed_parameter_values.keys()]
free_parameters = [key for key, value in fixed_parameter_values.items() if value is None]
fixed_parameters = [key for key, value in fixed_parameter_values.items() if value is not None]
# start the optimisation loop
while True:
# load the training data
df = read_hdf(reference_directory + training_data_file, 'training')
# break the loop if we've hit the max number of iterations
if df['iteration'].max() >= max_iterations:
logging.info('[optimiser] Optimisation loop broken due to reaching the maximum allowed iterations')
break
# get the current iteration number
itr = df['iteration'].max()+1
logging.info(f"--- Starting iteration {itr} ---")
# extract the training data
logprob_key = check_error_model(error_model)
log_posterior = df[logprob_key].to_numpy().copy()
parameters = []
for tup in zip(*[df[p] for p in free_parameters]):
parameters.append( array(tup) )
# convert the data to the normalised coordinates:
free_parameter_bounds = [optimisation_bounds[k] for k in free_parameters]
normalised_parameters = [bounds_transform(p, free_parameter_bounds) for p in parameters]
# build the set of grid-transformed points
grid_set = {grid_transform(p) for p in normalised_parameters}
# set the covariance kernel parameter bounds
amplitude = log(log_posterior.ptp())
hyperpar_bounds = [(amplitude-3, amplitude+3)]
hyperpar_bounds.extend( [log_scale_bounds for _ in free_parameters] )
# construct the GP
covariance_kernel = covariance_kernel_class(hyperpar_bounds=hyperpar_bounds)
GP = GpRegressor(
normalised_parameters,
log_posterior,
cross_val=cross_validation,
kernel=covariance_kernel,
optimizer="diffev"
)
bfgs_hps = GP.multistart_bfgs(starts=300, n_processes=solps_n_proc)
if GP.model_selector(bfgs_hps) > GP.model_selector(GP.hyperpars):
mode = bfgs_hps
else:
mode = GP.hyperpars
logging.info('[optimiser] GP hyper-parameter tuning complete - hyper-parameter values are:')
logging.info(mode)
# If a trust-region approach is being used, limit the search area
# to a region around the current maximum
trhw = 0.5*trust_region_width
if trust_region:
max_ind = log_posterior.argmax()
max_point = normalised_parameters[max_ind]
search_bounds = [(max(0., v-trhw), min(1., v+trhw)) for v in max_point]
else:
search_bounds = [(0., 1.) for i in range(len(free_parameters))]
# build the GP-optimiser
covariance_kernel = covariance_kernel_class(hyperpar_bounds=hyperpar_bounds)
GPopt = GpOptimiser(
normalised_parameters,
log_posterior,
hyperpars=GP.hyperpars,
bounds=search_bounds,
cross_val=cross_validation,
kernel=covariance_kernel,
acquisition=acquisition_function
)
# maximise the acquisition both by multi-start bfgs and differential evolution,
# and use the best of the two
bfgs_prop = GPopt.propose_evaluation(optimizer="bfgs")
diff_prop = GPopt.propose_evaluation(optimizer="diffev")
bfgs_acq = GPopt.acquisition(bfgs_prop)
diff_acq = GPopt.acquisition(diff_prop)
new_point = bfgs_prop if bfgs_acq > diff_acq else diff_prop
logging.info('[optimiser] Acquisition function maximisation complete - max function value was:')
logging.info(max(bfgs_acq, diff_acq))
# calculate the convergence metric
convergence = GPopt.acquisition.convergence_metric(new_point)
# get predicted log-probability at the new point
mu_lp, sigma_lp = GPopt.gp(new_point)
# back-transform to get the new point as model parameters
new_parameters = bounds_transform(new_point, free_parameter_bounds, inverse=True)
# create the dictionary for this iteration
row_dict = {}
# add values for all the fixed parameters
for key in fixed_parameters:
row_dict[key] = fixed_parameter_values[key]
# add the new free parameter values
for key, val in zip(free_parameters, new_parameters):
row_dict[key] = val
# check to see if the grid-transformed new point is already in the evaluated set
if grid_transform(new_point) in grid_set:
raise ValueError(
"""
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The latest proposed evaluation is a point which has been
previously evaluated - this may indicate that a local
maximum has been reached.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
)
logging.info('New chi parameters:')
logging.info([row_dict[k] for k in conductivity_profile])
logging.info('New D parameters:')
logging.info([row_dict[k] for k in diffusivity_profile])
logging.info('Divertor parameters:')
logging.info([row_dict['D_div'], row_dict['chi_div']])
# Run SOLPS for the new point
run_id, run_dir = launch_solps(
iteration=itr,
reference_directory=reference_directory,
parameter_dictionary=row_dict,
transport_profile_bounds=transport_profile_bounds,
n_proc=solps_n_proc,
set_div_transport=set_divertor_transport
)
launch_time = time()
while not solps_run_complete(run_id):
runtime_hours = (time() - launch_time) / 3600
if runtime_hours >= solps_timeout_hours:
logging.info("[ time-out warning ]")
logging.info(f">> iteration {itr}, job {run_id} has timed-out")
sleep(10)
# evaluate the chi-squared
logprobs = evaluate_log_posterior(
directory=run_dir,
diagnostics=diagnostics
)
gaussian_logprob, cauchy_logprob, laplace_logprob, logistic_logprob = logprobs
# build a new row for the dataframe
row_dict['iteration'] = itr
row_dict['gaussian_logprob'] = gaussian_logprob
row_dict['cauchy_logprob'] = cauchy_logprob
row_dict['laplace_logprob'] = laplace_logprob
row_dict['logistic_logprob'] = logistic_logprob
row_dict['prediction_mean'] = mu_lp,
row_dict['prediction_error'] = sigma_lp,
row_dict['convergence_metric'] = convergence
df.loc[df.index.max()+1] = row_dict # add the new row
df.to_hdf(reference_directory + training_data_file, key='training', mode='w') # save the data
del df
| 5,351,789 |
def pageHeader(
headline="",
tagline=""):
"""
*Generate a pageHeader - TBS style*
**Key Arguments:**
- ``headline`` -- the headline text
- ``tagline`` -- the tagline text for below the headline
**Return:**
- ``pageHeader`` -- the pageHeader
"""
pageHeader = """
<div class="page-header" id=" ">
<h1>%(headline)s<br><small>%(tagline)s</small></h1>
</div>""" % locals()
return pageHeader
| 5,351,790 |
def get_counter_merge_suggestion(merge_suggestion_tokens):
"""Return opposite of merge suggestion
Args:
merge_suggestion_tokens (list): tokens in merge suggestion
Returns:
str: opposite of merge suggestion
"""
counter_merge_suggestion = ' '.join(merge_suggestion_tokens)
if merge_suggestion_tokens[-1][-1] == '་':
counter_merge_suggestion += " "
return counter_merge_suggestion
| 5,351,791 |
def parse_rfc3339_utc_string(rfc3339_utc_string):
"""Converts a datestamp from RFC3339 UTC to a datetime.
Args:
rfc3339_utc_string: a datetime string in RFC3339 UTC "Zulu" format
Returns:
A datetime.
"""
# The timestamp from the Google Operations are all in RFC3339 format, but
# they are sometimes formatted to millisconds, microseconds, sometimes
# nanoseconds, and sometimes only seconds:
# * 2016-11-14T23:05:56Z
# * 2016-11-14T23:05:56.010Z
# * 2016-11-14T23:05:56.010429Z
# * 2016-11-14T23:05:56.010429380Z
m = re.match(r'(\d{4})-(\d{2})-(\d{2})T(\d{2}):(\d{2}):(\d{2}).?(\d*)Z',
rfc3339_utc_string)
# It would be unexpected to get a different date format back from Google.
# If we raise an exception here, we can break people completely.
# Instead, let's just return None and people can report that some dates
# are not showing up.
# We might reconsider this approach in the future; it was originally
# established when dates were only used for display.
if not m:
return None
groups = m.groups()
if len(groups[6]) not in (0, 3, 6, 9):
return None
# Create a UTC datestamp from parsed components
# 1- Turn components 0-5 from strings to integers
# 2- If the last component does not exist, set it to 0.
# If it does exist, make sure to interpret it as milliseconds.
g = [int(val) for val in groups[:6]]
fraction = groups[6]
if not fraction:
micros = 0
elif len(fraction) == 3:
micros = int(fraction) * 1000
elif len(fraction) == 6:
micros = int(fraction)
elif len(fraction) == 9:
# When nanoseconds are provided, we round
micros = int(round(int(fraction) // 1000))
else:
assert False, 'Fraction length not 0, 6, or 9: {}'.format(len(fraction))
try:
return datetime.datetime(
g[0], g[1], g[2], g[3], g[4], g[5], micros, tzinfo=pytz.utc)
except ValueError as e:
assert False, 'Could not parse RFC3339 datestring: {} exception: {}'.format(
rfc3339_utc_string, e)
| 5,351,792 |
def response(code, body='', etag=None, last_modified=None, expires=None, **kw):
"""Helper to build an HTTP response.
Parameters:
code
: An integer status code.
body
: The response body. See `Response.__init__` for details.
etag
: A value for the ETag header. Double quotes will be added unless the
string starts and ends with a double quote.
last_modified
: A value for the Last-Modified header as a datetime.datetime object
or Unix timestamp.
expires
: A value for the Expires header as number of seconds, datetime.timedelta
or datetime.datetime object.
Note: a value of type int or float is interpreted as a number of
seconds in the future, *not* as Unix timestamp.
**kw
: All other keyword arguments are interpreted as response headers.
The names will be converted to header names by replacing
underscores with hyphens and converting to title case
(e.g. `x_powered_by` => `X-Powered-By`).
"""
if etag is not None:
if not (etag[0] == '"' and etag[-1] == '"'):
etag = '"%s"' % etag
kw['etag'] = etag
if last_modified is not None:
kw['last_modified'] = datetime_to_httpdate(last_modified)
if expires is not None:
if isinstance(expires, datetime):
kw['expires'] = datetime_to_httpdate(expires)
else:
kw['expires'] = timedelta_to_httpdate(expires)
headers = [(k.replace('_', '-').title(), v) for k, v in sorted(kw.items())]
return Response(code, headers, body)
| 5,351,793 |
def oauth2callback():
"""
The 'flow' has this one place to call back to. We'll enter here
more than once as steps in the flow are completed, and need to keep
track of how far we've gotten. The first time we'll do the first
step, the second time we'll skip the first step and do the second,
and so on.
"""
app.logger.debug("Entering oauth2callback")
app.logger.debug(flask.url_for('oauth2callback', _external=True))
#return_url = "http://localhost:5000" + flask.url_for('oauth2callback')
flow = client.flow_from_clientsecrets(
CLIENT_SECRET_FILE,
scope= SCOPES,
redirect_uri=flask.url_for('oauth2callback', _external=True))
## Note we are *not* redirecting above. We are noting *where*
## we will redirect to, which is this function.
## The *second* time we enter here, it's a callback
## with 'code' set in the URL parameter. If we don't
## see that, it must be the first time through, so we
## need to do step 1.
app.logger.debug("Got flow")
if 'code' not in flask.request.args:
app.logger.debug("Code not in flask.request.args")
auth_uri = flow.step1_get_authorize_url()
return flask.redirect(auth_uri)
## This will redirect back here, but the second time through
## we'll have the 'code' parameter set
else:
## It's the second time through ... we can tell because
## we got the 'code' argument in the URL.
app.logger.debug("Code was in flask.request.args")
auth_code = flask.request.args.get('code')
credentials = flow.step2_exchange(auth_code)
flask.session['credentials'] = credentials.to_json()
## Now I can build the service and execute the query,
## but for the moment I'll just log it and go back to
## the main screen
app.logger.debug("Got credentials")
return flask.redirect(flask.url_for('getCalendars',muID=flask.session['meetupId']))
| 5,351,794 |
def parse_tweet(raw_tweet, source, now=None):
"""
Parses a single raw tweet line from a twtxt file
and returns a :class:`Tweet` object.
:param str raw_tweet: a single raw tweet line
:param Source source: the source of the given tweet
:param Datetime now: the current datetime
:returns: the parsed tweet
:rtype: Tweet
"""
if now is None:
now = datetime.now(timezone.utc)
raw_created_at, text = raw_tweet.split("\t", 1)
created_at = parse_iso8601(raw_created_at)
if created_at > now:
raise ValueError("Tweet is from the future")
return Tweet(click.unstyle(text.strip()), created_at, source)
| 5,351,795 |
def cmd_ssh(argv, args):
"""
Usage:
localstack ssh [options]
Commands:
ssh Obtain a shell in the running LocalStack container
Options:
"""
args.update(docopt(cmd_ssh.__doc__.strip(), argv=argv))
if not docker_container_running(MAIN_CONTAINER_NAME):
raise Exception('Expected 1 running "%s" container, but found none' % MAIN_CONTAINER_NAME)
try:
process = run('docker exec -it %s bash' % MAIN_CONTAINER_NAME, tty=True)
process.wait()
except KeyboardInterrupt:
pass
| 5,351,796 |
def scopes(request, coalition_id):
"""
Update coalition required scopes with a specific set of scopes
"""
scopes = []
for key in request.POST:
if key in ESI_SCOPES:
scopes.append(key)
url = f"{GLOBAL_URL}/{coalition_id}"
headers = global_headers(request, {"Content-type": "application/json"})
data = "{\"mandatory_esi_scopes\": [\"" + "\",\"".join(scopes) + "\"]}"
request_change_scopes = requests.put(url, headers=headers, data=data)
if request_change_scopes.status_code != 200:
return render_error(request_change_scopes)
params = urlencode({"changed_scopes": "true"})
return_url = reverse("coalition-sheet", args=[coalition_id]) + "?" + params
return redirect(return_url)
| 5,351,797 |
def test_opening_hour_close():
"""
The POI should already be closed since it's 21h30 UTC while
the POI closes at 22h00 in UTC+3.
"""
oh_block = get_moscow_oh("Mo-Su 10:00-22:00")
assert oh_block.status == "closed"
assert oh_block.next_transition_datetime == "2018-06-15T10:00:00+03:00"
assert oh_block.seconds_before_next_transition == 34200
assert oh_block.is_24_7 is False
assert oh_block.raw == "Mo-Su 10:00-22:00"
assert len(oh_block.days) == 7
assert all(d.status == "open" for d in oh_block.days)
| 5,351,798 |
def recursive_dictionary_cleanup(dictionary):
"""Recursively enrich the dictionary and replace object links with names etc.
These patterns are replaced:
[phobostype, bpyobj] -> {'object': bpyobj, 'name': getObjectName(bpyobj, phobostype)}
Args:
dictionary(dict): dictionary to enrich
Returns:
: dict -- dictionary with replace/enriched patterns
"""
for key, value in dictionary.items():
# handle everything as list, so we can loop over it
unlist = False
if not isinstance(value, list):
value = [value]
unlist = True
itemlist = []
for item in value:
if isinstance(item, list) and item:
# (phobostype, bpyobj) -> {'object': bpyobj, 'name': getObjectName(bpyobj)}
if (
len(item) == 2
and isinstance(item[0], str)
and (item[0] in ['joint'] + [enum[0] for enum in defs.phobostypes])
and isinstance(item[1], bpy.types.Object)
):
itemlist.append(
{
'object': item[1],
'name': nUtils.getObjectName(item[1], phobostype=item[0]),
}
)
# recursion on subdictionaries
elif isinstance(item, dict):
itemlist.append(recursive_dictionary_cleanup(item))
else:
itemlist.append(item)
# extract single items back out of the list
dictionary[key] = itemlist if not unlist else itemlist[0]
return dictionary
| 5,351,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.