content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def pyregion_subset(region, data, mywcs):
"""
Return a subset of an image (`data`) given a region.
Parameters
----------
region : `pyregion.parser_helper.Shape`
A Shape from a pyregion-parsed region file
data : np.ndarray
An array with shape described by WCS
mywcs : `astropy.wcs.WCS`
A world coordinate system describing the data
"""
import pyregion
shapelist = pyregion.ShapeList([region])
if shapelist[0].coord_format not in ('physical','image'):
# Requires astropy >0.4...
# pixel_regions = shapelist.as_imagecoord(self.wcs.celestial.to_header())
# convert the regions to image (pixel) coordinates
celhdr = mywcs.sub([wcs.WCSSUB_CELESTIAL]).to_header()
pixel_regions = shapelist.as_imagecoord(celhdr)
else:
# For this to work, we'd need to change the reference pixel after cropping.
# Alternatively, we can just make the full-sized mask... todo....
raise NotImplementedError("Can't use non-celestial coordinates with regions.")
pixel_regions = shapelist
# This is a hack to use mpl to determine the outer bounds of the regions
# (but it's a legit hack - pyregion needs a major internal refactor
# before we can approach this any other way, I think -AG)
mpl_objs = pixel_regions.get_mpl_patches_texts()[0]
# Find the minimal enclosing box containing all of the regions
# (this will speed up the mask creation below)
extent = mpl_objs[0].get_extents()
xlo, ylo = extent.min
xhi, yhi = extent.max
all_extents = [obj.get_extents() for obj in mpl_objs]
for ext in all_extents:
xlo = xlo if xlo < ext.min[0] else ext.min[0]
ylo = ylo if ylo < ext.min[1] else ext.min[1]
xhi = xhi if xhi > ext.max[0] else ext.max[0]
yhi = yhi if yhi > ext.max[1] else ext.max[1]
log.debug("Region boundaries: ")
log.debug("xlo={xlo}, ylo={ylo}, xhi={xhi}, yhi={yhi}".format(xlo=xlo,
ylo=ylo,
xhi=xhi,
yhi=yhi))
subwcs = mywcs[ylo:yhi, xlo:xhi]
subhdr = subwcs.sub([wcs.WCSSUB_CELESTIAL]).to_header()
subdata = data[ylo:yhi, xlo:xhi]
mask = shapelist.get_mask(header=subhdr,
shape=subdata.shape)
log.debug("Shapes: data={0}, subdata={2}, mask={1}".format(data.shape, mask.shape, subdata.shape))
return (xlo,xhi,ylo,yhi),mask | 5,357,100 |
def transformer_encoder(encoder_input,
encoder_self_attention_bias,
hparams,
name="encoder",
nonpadding=None,
save_weights_to=None,
make_image_summary=True):
"""A stack of transformer layers.
Args:
encoder_input: a Tensor
encoder_self_attention_bias: bias Tensor for self-attention
(see common_attention.attention_bias())
hparams: hyperparameters for model
name: a string
nonpadding: optional Tensor with shape [batch_size, encoder_length]
indicating what positions are not padding. This must either be
passed in, which we do for "packed" datasets, or inferred from
encoder_self_attention_bias. The knowledge about padding is used
for pad_remover(efficiency) and to mask out padding in convolutional
layers.
save_weights_to: an optional dictionary to capture attention weights
for visualization; the weights tensor will be appended there under
a string key created from the variable scope (including name).
make_image_summary: Whether to make an attention image summary.
Returns:
y: a Tensors
"""
x = encoder_input
attention_dropout_broadcast_dims = (
common_layers.comma_separated_string_to_integer_list(
getattr(hparams, "attention_dropout_broadcast_dims", "")))
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NUM_HIDDEN_LAYERS,
value=hparams.num_encoder_layers or hparams.num_hidden_layers)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DROPOUT,
value=hparams.attention_dropout)
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_ATTENTION_DENSE,
value={
"use_bias": "false",
"num_heads": hparams.num_heads,
"hidden_size": hparams.hidden_size
})
with tf.variable_scope(name):
if nonpadding is not None:
padding = 1.0 - nonpadding
else:
padding = common_attention.attention_bias_to_padding(
encoder_self_attention_bias)
nonpadding = 1.0 - padding
pad_remover = None
if hparams.use_pad_remover and not common_layers.is_xla_compiled():
pad_remover = expert_utils.PadRemover(padding)
for layer in range(hparams.num_encoder_layers or hparams.num_hidden_layers):
initial_sparsity = None
if hparams.get("load_masks_from"):
initial_sparsity = hparams.get("initial_sparsity")
with tf.variable_scope("layer_%d" % layer):
with tf.variable_scope("self_attention"):
y = sparse_attention.multihead_attention(
common_layers.layer_preprocess(x, hparams),
None,
encoder_self_attention_bias,
hparams.attention_key_channels or hparams.hidden_size,
hparams.attention_value_channels or hparams.hidden_size,
hparams.hidden_size,
hparams.num_heads,
hparams.attention_dropout,
attention_type=hparams.self_attention_type,
max_relative_position=hparams.max_relative_position,
heads_share_relative_embedding=(
hparams.heads_share_relative_embedding),
add_relative_to_values=hparams.add_relative_to_values,
save_weights_to=save_weights_to,
make_image_summary=make_image_summary,
dropout_broadcast_dims=attention_dropout_broadcast_dims,
max_length=hparams.get("max_length"),
vars_3d=hparams.get("attention_variables_3d"),
sparsity_technique=hparams.get("sparsity_technique"),
threshold=hparams.get("log_alpha_threshold"),
training=hparams.get("mode") == tf.estimator.ModeKeys.TRAIN,
clip_alpha=hparams.get("clip_log_alpha"),
initial_sparsity=initial_sparsity,
split_heads=hparams.get("split_heads"))
x = common_layers.layer_postprocess(x, y, hparams)
with tf.variable_scope("ffn"):
y = transformer_ffn_layer(
common_layers.layer_preprocess(x, hparams),
hparams,
pad_remover)
x = common_layers.layer_postprocess(x, y, hparams)
# if normalization is done in layer_preprocess, then it should also be done
# on the output, since the output can grow very large, being the sum of
# a whole stack of unnormalized layer outputs.
mlperf_log.transformer_print(
key=mlperf_log.MODEL_HP_NORM,
value={"hidden_size": hparams.hidden_size})
return common_layers.layer_preprocess(x, hparams) | 5,357,101 |
def update(event, context):
"""
Place your code to handle Update events here
To return a failure to CloudFormation simply raise an exception, the exception message will be sent to CloudFormation Events.
"""
region = os.environ['AWS_REGION']
prefix_list_name = event['ResourceProperties']['PrefixListName']
physical_resource_id = 'RetrievedPrefixList'
response_data = get_pl_id(prefix_list_name, region)
return physical_resource_id, response_data | 5,357,102 |
def plotComparison(parameters, optimizationModel, reference, X, Y):
"""
This function generates a figure with the following subplots:
(1) Stress vs. strain for reference and current observation,
(2) Initial effective temperature (Teff) field contour with colorbar,
(3) Final observation Teff field contour with colorbar,
(4) Final observation strain (Exy) field contour with colorbar,
(5) Final reference Teff field contour with colorbar,
(6) Final reference Exy field contour with colorbar.
Note: All colorbar ranges are independent. Contours use grayscale
with black corresponding to largest values. Parameter values are shown
with respective units.
"""
fig = plt.figure(figsize=(11, 8.5))
textStr = '\n'.join((
r'$\beta$ = %.4f eV$^{-1}$' % (parameters.getValue('beta'), ),
r'$u_0$ = %.4f eV' % (parameters.getValue('u0'), ),
r'$T_\infty$ = %d K' % (parameters.getValue('chi_inf'), ),
r'$l_\chi$ = %.3f $\AA$' % (parameters.getValue('chi_len'), ),
r'$c_0$ = %.3f --' % (parameters.getValue('c0'), ),
r'$\epsilon_0$ = %.3f --' % (parameters.getValue('ep'), ),
r'$s_y$ = %.3f GPa' % (parameters.getValue('s_y'), )))
ax = plt.subplot(334)
ax.axis('off')
ax.text(0, 0.5, textStr, verticalalignment='center')
# Plot initial CM effective temperature
fileName = optimizationModel.continuumOutPath + '/' + optimizationModel.temperatureFieldPrefix + '0'
T0 = np.fromfile(fileName, dtype=np.float32)
ny = int(T0[0]) + 1
nx = int(len(T0)/ny)
T0 = T0.reshape(nx, ny)
T0 = T0[1:, 1:]
plt.subplot(333)
plt.title(r'$T_{0}$')
plt.contourf(T0, cmap='viridis')
plt.colorbar()
plt.axis('off')
# Plot Final CM effective temperature
fileName = optimizationModel.continuumOutPath + '/' + optimizationModel.temperatureFieldPrefix + '{}'.format(int(optimizationModel.nContinuumFrames-1))
Tf_CM = np.fromfile(fileName, dtype=np.float32)
Tf_CM = Tf_CM.reshape(nx, ny)
Tf_CM = Tf_CM[1:, 1:]
plt.subplot(335)
plt.title(r'${T_{f}}^{CM}$')
plt.contourf(Tf_CM, cmap='viridis')
plt.colorbar()
plt.axis('off')
# Plot Final CM strain field
fileName = optimizationModel.continuumOutPath + '/' + optimizationModel.strainFieldPrefix + '{}'.format(int(optimizationModel.nContinuumFrames-1))
Exy_CM = np.fromfile(fileName, dtype=np.float32)
Exy_CM = Exy_CM.reshape(nx, ny)
Exy_CM = 2*Exy_CM[1:, 1:]
plt.subplot(336)
plt.title(r'${\Gamma_{f}}^{CM}$')
plt.contourf(Exy_CM, np.linspace(0, 1.6, 9), cmap='viridis')
plt.colorbar()
plt.axis('off')
if reference.fromContinuum:
# Plot Stress-Strain
strain_Ref = np.linspace(0, reference.maxStrain, reference.nFields)
strain_CM = np.linspace(0, reference.maxStrain, optimizationModel.nContinuumFrames)
tau_Ref = np.zeros(strain_Ref.shape)
tau_CM = np.zeros(strain_CM.shape)
for it in np.arange(0, optimizationModel.nContinuumFrames):
tau_CM[it] = parameters.getValue('s_y')*computeContinuumDeviatoricStress(\
optimizationModel.continuumOutPath, it)
tau_Ref[it] = parameters.getValue('s_y')*computeContinuumDeviatoricStress(\
reference.sourceDir, it)
plt.subplot(332)
plt.title(r'$\tau(\gamma)$')
plt.plot(strain_CM, tau_CM, 'k--', label='CM')
plt.plot(strain_Ref, tau_Ref, 'k:', label='MD')
plt.xlabel(r'$\gamma$ [--]')
plt.ylabel(r'$\tau$ [GPa]')
plt.legend()
# Plot Final CM effective Temperature
fileName = reference.sourceDir + '/' + reference.temperatureFieldPrefix + '{}'.format(int(reference.nFields-1))
Tf_Ref = np.fromfile(fileName, dtype=np.float32)
Tf_Ref = Tf_Ref.reshape(nx, ny)
Tf_Ref = Tf_Ref[1:, 1:]
plt.subplot(338)
plt.title(r'${T_{f}}^{Ref}$')
plt.contourf(Tf_Ref, cmap='viridis')
plt.colorbar()
plt.axis('off')
# Plot Final CM strain field
fileName = reference.sourceDir + '/' + reference.strainFieldPrefix + '{}'.format(int(reference.nFields-1))
Exy_Ref = np.fromfile(fileName, dtype=np.float32)
Exy_Ref = Exy_Ref.reshape(nx, ny)
Exy_Ref = Exy_Ref[1:, 1:]
plt.subplot(339)
plt.title(r'${\Gamma_{f}}^{Ref}$')
plt.contourf(Exy_Ref, cmap='viridis')
plt.colorbar()
plt.axis('off')
else:
# Plot Stress-Strain
fileName = reference.sourceDir + '/' + reference.stressFileName
tau_MD = np.loadtxt(fileName, delimiter="\t")
tau_MD = -1*tau_MD[:, reference.stressCol]/10000.
strain_MD = np.linspace(0, reference.maxStrain, len(tau_MD))
strain_CM = np.linspace(0, reference.maxStrain, optimizationModel.nContinuumFrames)
tau_CM = np.zeros(strain_CM.shape)
for it in np.arange(0, optimizationModel.nContinuumFrames):
tau_CM[it] = parameters.getValue('s_y')*computeContinuumDeviatoricStress(\
optimizationModel.continuumOutPath, it)
plt.subplot(332)
plt.title(r'$\tau(\gamma)$')
plt.plot(strain_CM, tau_CM, 'k--', label='CM')
plt.plot(strain_MD, tau_MD, 'k:', label='MD')
plt.xlabel(r'$\gamma$ [--]')
plt.ylabel(r'$\tau$ [GPa]')
plt.legend()
# Plot Final MD reference effective Temperature Field
fileName = reference.sourceDir + '/' + reference.energyFieldPrefix + '{}'.format(int(reference.nFields-1))
Uf_MD = np.loadtxt(fileName, skiprows=1)
Tf_Ref = parameters.getValue('beta')*(Uf_MD-parameters.getValue('u0'))*21000
plt.subplot(338)
plt.title(r'${T_{f}}^{Ref}$')
plt.contourf(Tf_Ref, cmap='viridis')
plt.colorbar()
plt.axis('off')
# Plot Final MD strain field
fileName = reference.sourceDir + '/' + reference.strainFieldPrefix + '{}'.format(int(reference.nFields-1))
Exy_Ref = np.loadtxt(fileName, skiprows=1)
plt.subplot(339)
plt.title(r'${\Gamma_{f}}^{Ref}$')
plt.contourf(Exy_Ref, cmap='viridis')
plt.colorbar()
plt.axis('off')
fig.tight_layout()
figName = 'Response.{:02d}.png'.format(int(len(Y)))
plt.savefig(figName) | 5,357,103 |
def compute_dict(file_path):
"""Computes the dict for a file whose path is file_path"""
file_dict = {}
with open(file_path, encoding = 'utf8') as fin:
for line in fin:
line = line.strip()
txt = re.sub('([^a-zA-Z0-9\s]+)',' \\1 ',line)
txt = re.sub('([\s]+)',' ',txt)
words = txt.split(" ")
for word in words:
w = str(word)
if(w not in file_dict):
file_dict[w] = 1
else:
file_dict[w] = file_dict[w] + 1
return file_dict | 5,357,104 |
def get_scale(lat1, lon1, var, desired_distance, unit='miles'):
"""
Calculate the difference in either latitude or longitude that is equivalent
to some desired distance at a given point on Earth. For example, at a specific
point, how much does latitude need to change (assuming longitude is constant) to
be equal to 60 miles? This is especially important since lines of longitude are
closer together near Earth's poles. This function is helpful when converting
latitude and longitude coordinates to pixel coordinates in order to plot a point
on the screen.
Parameters:
1 - latitude of position in decimal degrees
2 - longitude of position in decimal degrees
3 - "lat" or "lon" to specify if calulating change for latitude or longitude
4 - the desired distance from the given point
5 - unit of measure (optional), "miles" or "km", default is miles
Returns:
The difference in latitude or longitude
"""
# Create a second point that is initially set to the starting point
# The idea is to that push this point farther and farther away (either by lat or lon)
# until it is the desired distance away
lat2 = lat1
lon2 = lon1
# Create a variable for tracking the actual distance between the two points, which
# can be compared against the desired distance
actual_distance = get_distance(lat1, lon1, lat2, lon2, unit)
n = 1 # Place value to increase or decrease lat/lon by (1, .1, .01, .001, etc.)
decrease_n = False # Flag to indicate if n should be decreased
if var == 'lat':
var_value = lat2 # Variable for holding either latitude or longitude (whichever is being modified)
elif var == 'lon':
var_value = lon2
else:
print '\nvalue not recognized: ' + str(var) + '\n'
# Keep looping until the difference between the desired distance and the actual distance
# is less than 0.0001 (in whatever units)... basically until it's really close
while abs(round(desired_distance - actual_distance, 4)) > 0.0001:
# Keep increasing the var_value until the actual distance is too great, then start decreasing until it's too small
# If desired distance is greater than actual, add n to the var_value
if desired_distance > actual_distance:
var_value += n
var_value = round(var_value, 6) # Round to 6 decimal places to clean up floating point messiness
decrease_n = True # Indicate it's ok the decrease n if the following else statement is evaluated
# If actual distance is greater than desired, subtract n from var_value
else:
if decrease_n:
n *= 0.1 # Decrease n by a factor of ten
var_value -= n
var_value = round(var_value, 6)
decrease_n = False # Don't decrease n until after the next time the if statement is evaluated
# Recalculate the actual distance
if var == 'lat':
actual_distance = get_distance(lat1, lon1, var_value, lon2, unit)
else:
actual_distance = get_distance(lat1, lon1, lat2, var_value, unit)
# print round(actual_distance, 4) for testing purposes
# Return the difference between lat2 and lat1 (or lon2/lon1) that is equal to the desired distance
if var == 'lat':
return abs(round(var_value - lat1, 6))
else:
return abs(round(var_value - lon1, 6)) | 5,357,105 |
async def connect_unix(path: Union[str, PathLike]) -> UNIXSocketStream:
"""
Connect to the given UNIX socket.
Not available on Windows.
:param path: path to the socket
:return: a socket stream object
"""
path = str(Path(path))
return await get_asynclib().connect_unix(path) | 5,357,106 |
def isMatch(s, p):
""" Perform regular simple expression matching
Given an input string s and a pattern p, run regular expression
matching with support for '.' and '*'.
Parameters
----------
s : str
The string to match.
p : str
The pattern to match.
Returns
-------
bool
Was it a match or not.
"""
dp = [[False] * (len(p) + 1) for _ in range(len(s) + 1)]
dp[0][0] = True
# The only way to match a length zero string
# is to have a pattern of all *'s.
for ii in range(1, len(p)):
if p[ii] == "*" and dp[0][ii-1]:
dp[0][ii + 1] = True
for ii in range(len(s)):
for jj in range(len(p)):
# Matching a single caracter c or '.'.
if p[jj] in {s[ii], '.'}:
dp[ii+1][jj+1] = dp[ii][jj]
elif p[jj] == '*':
# Double **, which is equivalent to *
if p[jj-1] not in {s[ii], '.'}:
dp[ii+1][jj+1] = dp[ii+1][jj-1]
# We can match .* or c* multiple times, once, or zero
# times (respective clauses in the or's)
else:
dp[ii+1][jj+1] = dp[ii][jj+1] or dp[ii+1][jj] or dp[ii+1][jj-1]
return dp[-1][-1] | 5,357,107 |
def cepheid_lightcurve_advanced(band, tarr, m0, period, phaseshift, shape1, shape2, shape3, shape4, datatable=None):
"""
Generate a Cepheid light curve. More flexibility allowed.
band: one of "B", "V", "I"
tarr: times at which you want the light curve evaluated
m0: mean magnitude for the light curve
period: same units as tarr
phaseshift: same units as tarr
shape1-4: parameters determining the shape of the light curve.
These are the first four principle components from Yoachim et al. 2009
They should generally be > 0.
You can use
datatable: which set of templates to use.
By default, it loads the long period templates.
Long period: >10 days; Short period: <10 days
Can also pass an integer.
Even int -> long period, odd int -> short period.
Note: for speed in fitting, read the table you want and pass it in.
"""
allowed_bands = ["I","V","B"]
assert band.upper() in allowed_bands
if datatable is None:
datatable = load_longperiod_datatable()
elif isinstance(datatable,(int,float)):
datatable = int(datatable)
if (datatable % 2) == 1:
datatable = load_shortperiod_datatable()
else:
datatable = load_longperiod_datatable()
Nt = len(tarr)
tstack = np.ravel([tarr for x in range(3)])
#p0 = m0i, m0v, m0b, period, phase shift, tbreak, tbreak2
p0 = [m0,m0,m0,period,phaseshift,Nt,2*Nt, shape1, shape2, shape3, shape4]
lcs = gen_lc(tstack, p0, datatable)
lc = lcs[allowed_bands.index(band)]
return lc | 5,357,108 |
def view_info():
"""View keybindings """
if ALWAYS_SHOW_INFO:
display_info()
return
answer = input(
"Would you like to view the info for the environment [y]/N? "
)
if answer.lower() != "n":
display_info() | 5,357,109 |
def scripter(image_hash):
""" Download an image geeration JavaScript file for a given image hash.
"""
link = script_template.format(image_hash)
path = folder + "js\\" + image_hash[0] + "\\"
filename = path + image_hash + '.js'
if not os.path.exists(path):
with lock:
if not os.path.exists(path):
os.mkdir(path)
if not os.path.isfile(filename):
try:
urllib.request.urlretrieve(link, filename)
except urllib.error.HTTPError:
print("\tERROR: JavaScript file for " + image_hash + " could not be downloaded from: " + link)
return | 5,357,110 |
def PrintAttrSpec(attr_spec, out):
"""Prints a Markdown version of the given proto message (AttrSpec).
See amp.validator.AttrSpec in validator.proto for details of proto message.
Args:
attr_spec: The AttrSpec message.
out: A list of lines to output (without newline characters), to which this
function will append.
"""
out.append('* %s' % UnicodeEscape(attr_spec.name))
if attr_spec.alternative_names:
out.append(' * Alternative Names: %s' %
RepeatedFieldToString(attr_spec.alternative_names))
if attr_spec.mandatory:
out.append(' * Mandatory')
if attr_spec.mandatory_oneof:
out.append(' * Mandatory One of: %s' % attr_spec.mandatory_oneof)
if attr_spec.value:
out.append(' * Required Value: %s' % attr_spec.value) | 5,357,111 |
def pairwise_point_combinations(xs, ys, anchors):
"""
Does an in-place addition of the four points that can be composed by
combining coordinates from the two lists to the given list of anchors
"""
for i in xs:
anchors.append((i, max(ys)))
anchors.append((i, min(ys)))
for i in ys:
anchors.append((max(xs), i))
anchors.append((min(xs), i)) | 5,357,112 |
def copy(src: PathType, dst: PathType, overwrite: bool = False) -> None:
"""Copy a file from the source to the destination."""
src_fs = _get_filesystem(src)
dst_fs = _get_filesystem(dst)
if src_fs is dst_fs:
src_fs.copy(src, dst, overwrite=overwrite)
else:
if not overwrite and file_exists(dst):
raise FileExistsError(
f"Destination file '{convert_to_str(dst)}' already exists "
f"and `overwrite` is false."
)
contents = open(src, mode="rb").read()
open(dst, mode="wb").write(contents) | 5,357,113 |
def app_list(context):
"""
Renders the app list for the admin dashboard widget.
"""
context["dashboard_app_list"] = admin_app_list(context["request"])
return context | 5,357,114 |
def load_vars(executor, dirname, main_program=None, vars=None, predicate=None):
"""
Load variables from directory by executor.
:param executor: executor that save variable
:param dirname: directory path
:param main_program: program. If vars is None, then filter all variables in this
program which fit `predicate`. Default default_main_program().
:param predicate: The Predicate describes a callable that returns a variable
as a bool. If it returns true, the variables will be loaded.
:param vars: variables need to be loaded. If specify vars, program &
predicate will be ignored
:return: None
"""
if vars is None:
if main_program is None:
main_program = default_main_program()
if not isinstance(main_program, Program):
raise TypeError("program's type should be Program")
load_vars(
executor,
dirname=dirname,
vars=filter(predicate, main_program.list_vars()))
else:
load_prog = Program()
load_block = load_prog.global_block()
for each_var in vars:
assert isinstance(each_var, Variable)
new_var = _clone_var_in_block_(load_block, each_var)
load_block.append_op(
type='load',
inputs={},
outputs={"Out": [new_var]},
attrs={'file_path': os.path.join(dirname, new_var.name)})
executor.run(load_prog) | 5,357,115 |
def path_depth(path: str, depth: int = 1) -> str:
"""Returns the `path` up to a certain depth.
Note that `depth` can be negative (such as `-x`) and will return all
elements except for the last `x` components
"""
return path_join(path.split(CONFIG_SEPARATOR)[:depth]) | 5,357,116 |
def read_config_file(filename, preserve_order=False):
"""
Read and parse a configuration file.
Parameters
----------
filename : str
Path of configuration file
Returns
-------
dict
Configuration dictionary
"""
with open(filename) as f:
return parse_config(f, preserve_order) | 5,357,117 |
def languages_list_handler():
"""Get list of supported review languages (language codes from ISO 639-1).
**Example Request:**
.. code-block:: bash
$ curl https://critiquebrainz.org/ws/1/review/languages \\
-X GET
**Example Response:**
.. code-block:: json
{
"languages": [
"aa",
"ab",
"af",
"ak",
"yo",
"za",
"zh",
"zu"
]
}
:resheader Content-Type: *application/json*
"""
return jsonify(languages=supported_languages) | 5,357,118 |
def corpus_subdirs(path):
""" pathの中のdir(txt以外)をlistにして返す """
subdirs = []
for x in listdir(path):
if not x.endswith('.txt'):
subdirs.append(x)
return subdirs | 5,357,119 |
def edit_string_for_tags(tags):
"""
Given list of ``Tag`` instances or tag strings, creates a string
representation of the list suitable for editing by the user, such
that submitting the given string representation back without
changing it will give the same list of tags.
Tag names which contain commas will be double quoted.
If any tag name which isn't being quoted contains whitespace, the
resulting string of tag names will be comma-delimited, otherwise
it will be space-delimited.
Ported from Jonathan Buchanan's `django-tagging
<http://django-tagging.googlecode.com/>`_
"""
names = []
for tag in tags:
if hasattr(tag, 'name'):
name = tag.name
elif isinstance(tag, (str, unicode,)):
name = tag
else:
continue
if u',' in name or u' ' in name:
names.append('"%s"' % name)
else:
names.append(name)
return u', '.join(sorted(names)) | 5,357,120 |
def convert_postgres_array_as_string_to_list(array_as_string: str) -> Optional[list]:
"""
Postgres arrays are stored in CSVs as strings. Elasticsearch is able to handle lists of items, but needs to
be passed a list instead of a string. In the case of an empty array, return null.
For example, "{this,is,a,postgres,array}" -> ["this", "is", "a", "postgres", "array"].
"""
return array_as_string[1:-1].split(",") if len(array_as_string) > 2 else None | 5,357,121 |
def generate_prime_number(min_value=0, max_value=300):
"""Generates a random prime number within the range min_value to max_value
Parameters
----------
min_value : int, optional
The smallest possible prime number you want, by default 0
max_value : int, optional
The largest possible prime number you want, by default 300
Returns
-------
int
A randomly selected prime number in the range min_value to max_value
"""
# Create a list of prime values within the range
primes = [number for number in range(min_value,max_value) if is_prime(number)]
return choice(primes) | 5,357,122 |
def sort_extended_practitioner(practitioner):
"""
sort on date latestDate
Then alpha on other practitioners
:param practitioner:
:return: practitioner
"""
uniques = []
for p in practitioner:
if find_uniques(p, uniques):
uniques.append(p)
return uniques | 5,357,123 |
def lti13_login_params_dict(lti13_login_params):
"""
Return the initial LTI 1.3 authorization request as a dict
"""
utils = LTIUtils()
args = utils.convert_request_to_dict(lti13_login_params)
return args | 5,357,124 |
def calcShannonEnt(dataset):
"""
计算数据集的熵
输入:数据集
输出:熵
"""
numEntris = len(dataset)
labelCounts = {}
for featVec in dataset:
currentLabel = featVec[-1] #每行数据中的最后一个数,即数据的决策结果 label
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel]+=1 #labelCounts记录了叶节点的种类(keys)和每个类的数量(values)
#
shannonEnt = 0
for key in labelCounts:
prob = float(labelCounts[key])/numEntris
shannonEnt -= prob*log(prob,2)
return shannonEnt | 5,357,125 |
def test_task_by_build_id_option(koji_session, koji_module):
"""
Tasks are specified via module's ``--build-id`` option.
"""
task_id, build_id = koji_session
koji_module._config['build-id'] = [build_id]
koji_module.execute()
assert_task_attributes(koji_module, task_id) | 5,357,126 |
def get_boundaries_medians(things, lowers=[], uppers=[]):
"""Return the boundaries and medians of given percentage ranges.
Parameters:
1. things: a list of numbers
2. lowers: lower percentage limits
3. uppers: upper percentage limits
Returns:
lower, median, upper
"""
# if neither list nor array nor tuple, just return None
if type(things)!=list and type(things)!=array and type(things)!=tuple: return [], [], []
n_things = len(things)
if n_things == 0: return [], [], []
sthings = sorted(list(things))
l = map(lambda x: int(round(1.0*x*n_things/100))-1, lowers)
r = map(lambda x: int(round(1.0*x*n_things/100)), uppers)
return map(lambda x: sthings[x], l), map(lambda x, y: median(sthings[max(0, x):min(n_things, y+1)]), l, r), map(lambda y: sthings[y], r) | 5,357,127 |
def getMeanBySweep(abf, markerTime1, markerTime2):
"""
Return the mean value between the markers for every sweep.
"""
assert isinstance(abf, pyabf.ABF)
pointsPerSecond = abf.dataRate
sweepIndex1 = pointsPerSecond * markerTime1
sweepIndex2 = pointsPerSecond * markerTime2
means = []
for i in range(abf.sweepCount):
abf.setSweep(i)
segment = abf.sweepY[sweepIndex1:sweepIndex2]
segmentMean = np.mean(segment)
means.append(segmentMean)
return means | 5,357,128 |
def handler(event, context):
"""
Params:
-------
event (dict):
content (dict):
Both params are standard lambda handler invocation params but not used within this
lambda's code.
Returns:
-------
(string): JSON-encoded dict with top level keys for each of the possible
queries that can be run against the `/datasets` endpoint (key: _all_ contains
result of the LIST operation, each of other keys contain the result of
GET /datasets/{spotlight_id | "global"})
"""
# TODO: defined TypedDicts for these!
datasets = _gather_json_data(DATASETS_JSON_FILEPATH)
sites = _gather_json_data(SITES_JSON_FILEPATH)
result = json.dumps(_gather_datasets_metadata(datasets, sites))
print(
f"Saving generated metadata to {DATASET_METADATA_FILENAME} in bucket {metadata_host_bucket.name}"
)
metadata_host_bucket.put_object(
Body=result, Key=DATASET_METADATA_FILENAME, ContentType="application/json",
)
return result | 5,357,129 |
def add_prefix(key):
"""Dummy key_function for testing index code."""
return "id_" + key | 5,357,130 |
def update_element_key(element_type, old_key, new_key):
"""
Update an element's key in relative tables.
Args:
element_type: (string) object's element type.
old_key: (string) object's old key.
new_key: (string) object's new key
"""
# The object's key has changed.
element = ELEMENT(element_type)
if issubclass(element, ELEMENT("AREA")):
# Update relative room's location.
model_name = ELEMENT("ROOM").model_name
if model_name:
general_query_mapper.filter_records(model_name, area=old_key).update(area=new_key)
elif issubclass(element, ELEMENT("ROOM")):
# Update relative exit's location.
model_name = ELEMENT("EXIT").model_name
if model_name:
general_query_mapper.filter_records(model_name, location=old_key).update(location=new_key)
general_query_mapper.filter_records(model_name, destination=old_key).update(destination=new_key)
# Update relative world object's location.
model_name = ELEMENT("WORLD_OBJECT").model_name
if model_name:
general_query_mapper.filter_records(model_name, location=old_key).update(location=new_key)
# Update relative world NPC's location.
model_name = ELEMENT("WORLD_NPC").model_name
if model_name:
general_query_mapper.filter_records(model_name, location=old_key).update(location=new_key) | 5,357,131 |
def schedule_conv2d_nhwc_tensorcore(cfg, outs):
"""TOPI schedule callback"""
s = te.create_schedule([x.op for x in outs])
def _callback(op):
if "conv2d_nhwc_tensorcore" in op.tag:
schedule_nhwc_tensorcore_cuda(cfg, s, op.output(0))
traverse_inline(s, outs[0].op, _callback)
return s | 5,357,132 |
def simulate(config, show_progress=False):
"""Simulate incarceration contagion dynamics.
Parameters
----------
config : Config
Config object specifying simulation parameters.
Returns
-------
dict
Dictionary specifying simulated population of agents.
"""
popu = initialize(config)
agents = popu.values()
def display(range_obj):
if show_progress:
range_obj = tqdm(range_obj)
return range_obj
# these are in years. need to work in terms of months
for itr in display(range(config.start_iter, config.end_iter)):
for month in range(12):
# infection step
for person in agents:
# random infection, not due to contagion
if valid_age(person, itr, config.min_age):
person["infected"] += infect("*", "*", "*")
# infect connected people
if person["incarcerated"] > 0:
person["incarcerated"] -= 1
person["months_in_prison"] += 1
spread_infection(popu, person, itr, month, config)
# sentencing step
for person in agents:
if person["infected"] and not person["incarcerated"]:
assign_sentence(person, itr, month, config)
person["infected"] = 0
return popu | 5,357,133 |
def get_event_log(file_path: str = None, use_celonis=False):
"""
Gets the event log data structure from the event log file.
Dispatches the methods to be used by file tyoe
:param use_celonis: If the attribute is set to true the event log will be retrieved from celonis
:param file_path: Path to the event-log file
:return:EventLog data structure
"""
if file_path is None and not use_celonis:
raise ValueError("Parameters file_path was None and use_celonis was false at the same time."
"This behavior is not supported")
if use_celonis:
import cel_import
return cel_import.get_event_log_from_celonis()
else:
file_path_lowercase = file_path.lower()
if file_path_lowercase.endswith(".xes"):
return __handle_xes_file(file_path)
else:
raise ValueError('The input file was not a XES file') | 5,357,134 |
def find_peak(corr, method='gaussian'):
"""Peak detection algorithm switch
After loading the correlation window an maximum finder is invoked.
The correlation window is cut down to the necessary 9 points around the maximum.
Afterwards the maximum is checked not to be close to the boarder of the correlation frame.
This cropped window is used in along with the chosen method to interpolate the sub pixel shift.
Each interpolation method returns a tuple with the sub pixel shift in x and y direction.
The maximums position and the sub pixel shift are added and returned.
If an error occurred during the sub pixel interpolation the shift is set to nan.
Also if the interpolation method is unknown an exception in thrown.
:param corr: correlation window
:param method: peak finder algorithm (gaussian, centroid, parabolic, 9point)
:raises: Sub pixel interpolation method not found
:returns: shift in interrogation window
"""
i, j = np.unravel_index(corr.argmax(), corr.shape)
if check_peak_position(corr, i, j) is False:
return np.nan, np.nan
window = corr[i-1:i+2, j-1:j+2]
if method == 'gaussian':
subpixel_interpolation = gaussian
elif method == 'centroid':
subpixel_interpolation = centroid
elif method == 'parabolic':
subpixel_interpolation = parabolic
elif method == '9point':
subpixel_interpolation = gaussian2D
else:
raise Exception('Sub pixel interpolation method not found!')
try:
dx, dy = subpixel_interpolation(window)
except:
return np.nan, np.nan
else:
return (i + dx, j + dy) | 5,357,135 |
def mesh_plot(
mesh: PyEITMesh,
el_pos,
mstr="",
figsize=(9, 6),
alpha=0.5,
offset_ratio=0.075,
show_image=False,
show_mesh=False,
show_electrode=True,
show_number=False,
show_text=True,
):
"""plot mesh structure (base layout)"""
# load mesh structure
pts = mesh.node
tri = mesh.element
fig, ax = plt.subplots(figsize=figsize)
ax.set_facecolor("black")
ax.set_aspect("equal")
# load background
if show_image and os.path.exists(mstr):
image_name = mstr.replace("mes", "bmp")
im = plt.imread(image_name)
ax.imshow(im)
else:
# without imshow, the yaxis should be inverted
ax.invert_yaxis()
# show mesh structure
if show_mesh:
ax.triplot(pts[:, 0], pts[:, 1], tri, alpha=alpha)
# show electrodes markers
if show_electrode:
ax.plot(pts[el_pos, 0], pts[el_pos, 1], "yo")
# annotate electrodes numbering
if show_number:
for i, e in enumerate(el_pos):
ax.text(pts[e, 0], pts[e, 1], np.str(i + 1), color="r", size=12)
# annotate (L) at offset_ratio*d beside node 0
if show_text:
xb, xa = pts[el_pos[8], 0], pts[el_pos[0], 0]
d = np.abs(xa - xb)
offset = d * offset_ratio
x, y = xa + offset, pts[el_pos[0], 1]
ax.text(x, y, "L", size=20, color="w")
# enlarge the right of axes if using annotation
ax.set_xlim([xb - offset, xa + 2 * offset])
# clean up axis
ax.grid("off")
plt.setp(ax.get_xticklines(), visible=False)
plt.setp(ax.get_xticklabels(), visible=False)
plt.setp(ax.get_yticklines(), visible=False)
plt.setp(ax.get_yticklabels(), visible=False)
return fig, ax | 5,357,136 |
def atanx(curvelist):
"""
Take the arctangent of x values of a single curve or curves in a list.
>>> curves = pydvif.read('testData.txt')
>>> pydvif.atanx(curves)
:param curvelist: A single curve or a list of curves
:type curvelist: curve or list
"""
if isinstance(curvelist, list):
for c in curvelist:
c.x = np.atan(c.x)
else:
curvelist.x = np.atan(curvelist.x) | 5,357,137 |
def binaryToString(binary):
"""
从二进制字符串转为 UTF-8 字符串
"""
index = 0
string = []
rec = lambda x, i: x[2:8] + (rec(x[8:], i - 1) if i > 1 else '') if x else ''
fun = lambda x, i: x[i + 1:8] + rec(x[8:], i - 1)
while index + 1 < len(binary):
chartype = binary[index:].index('0') # 存放字符所占字节数,一个字节的字符会存为0
length = chartype * 8 if chartype else 8
string.append(chr(int(fun(binary[index:index + length], chartype), 2)))
index += length
return ''.join(string) | 5,357,138 |
def disable_log_warning(fun):
"""Temporarily set FTP server's logging level to ERROR."""
@functools.wraps(fun)
def wrapper(self, *args, **kwargs):
logger = logging.getLogger('pyftpdlib')
level = logger.getEffectiveLevel()
logger.setLevel(logging.ERROR)
try:
return fun(self, *args, **kwargs)
finally:
logger.setLevel(level)
return wrapper | 5,357,139 |
def create_csv_step_logger(save_dir: pyrado.PathLike, file_name: str = "progress.csv") -> StepLogger:
"""
Create a step-based logger which only safes to a csv-file.
:param save_dir: parent directory to save the results in (usually the algorithm's `save_dir`)
:param file_name: name of the cvs-file (with ending)
:return: step-based logger
"""
logger = StepLogger()
logfile = osp.join(save_dir, file_name)
logger.printers.append(CSVPrinter(logfile))
return logger | 5,357,140 |
def _key_chord_transition_distribution(
key_chord_distribution, key_change_prob, chord_change_prob):
"""Transition distribution between key-chord pairs."""
mat = np.zeros([len(_KEY_CHORDS), len(_KEY_CHORDS)])
for i, key_chord_1 in enumerate(_KEY_CHORDS):
key_1, chord_1 = key_chord_1
chord_index_1 = i % len(_CHORDS)
for j, key_chord_2 in enumerate(_KEY_CHORDS):
key_2, chord_2 = key_chord_2
chord_index_2 = j % len(_CHORDS)
if key_1 != key_2:
# Key change. Chord probability depends only on key and not previous
# chord.
mat[i, j] = (key_change_prob / 11)
mat[i, j] *= key_chord_distribution[key_2, chord_index_2]
else:
# No key change.
mat[i, j] = 1 - key_change_prob
if chord_1 != chord_2:
# Chord probability depends on key, but we have to redistribute the
# probability mass on the previous chord since we know the chord
# changed.
mat[i, j] *= (
chord_change_prob * (
key_chord_distribution[key_2, chord_index_2] +
key_chord_distribution[key_2, chord_index_1] / (len(_CHORDS) -
1)))
else:
# No chord change.
mat[i, j] *= 1 - chord_change_prob
return mat | 5,357,141 |
def eigvals(a: Union[dask.array.core.Array, numpy.ndarray]):
"""
usage.dask: 2
usage.scipy: 2
"""
... | 5,357,142 |
def level_set(
current_price, standard_deviation, cloud, stop_mod, take_profit_mod,
):
"""
Calculates risk and reward levels.
Should return a stop loss and take profit levels.
For opening a new position.
Returns a stop (in the format (StopType, offset)) and a take profit level.
"""
stop = None
take_profit = None
cloud_color = cloud.status[0]
cloud_location = cloud.status[1]
direction_mod = 1
if cloud_color == CloudColor.RED:
direction_mod = -1
take_profit_mod = take_profit_mod * direction_mod
stop_mod = stop_mod * direction_mod
if cloud_location == CloudPriceLocation.INSIDE: # ie passing through long ema
stop = (StopType.EMA_LONG, (standard_deviation * stop_mod * -1))
# If price passes through short EMA from either color cloud.
if cloud_location in (CloudPriceLocation.ABOVE, CloudPriceLocation.BELOW):
stop = (StopType.EMA_LONG, 0)
# Or in case the long EMA is very far away:
if abs(cloud.long_ema - current_price) > abs(current_price -
(cloud.short_ema - (direction_mod * 2 * standard_deviation * -1))):
stop = (
StopType.EMA_SHORT,
(direction_mod * 2 * standard_deviation * -1))
# Or if the long EMA is too close:
elif abs(cloud.long_ema - current_price) < abs(current_price -
(cloud.short_ema - (direction_mod * 0.5 * standard_deviation * -1))):
stop = (
StopType.EMA_SHORT,
(direction_mod * 0.5 * standard_deviation * -1))
take_profit = cloud.short_ema + (standard_deviation * take_profit_mod)
risk_loss = abs(current_price - StopType.stop_tuple_to_level(stop, cloud))
# Enforce max_ratio:1 reward:risk if take_profit is very far away.
max_ratio = 1.5
min_ratio = 1.0
potential_profit = abs(current_price - take_profit)
if potential_profit > max_ratio * risk_loss:
take_profit = current_price + (direction_mod * max_ratio * risk_loss)
if potential_profit < max_ratio * risk_loss:
stop = (current_price, potential_profit * direction_mod * -.95)
return stop, take_profit | 5,357,143 |
def back(deque):
""" returns the last elemement in the que """
if length(deque) > 0:
return deque[-1]
else:
return None | 5,357,144 |
def imshow(image, title=None, norm=None, colorbar_label=None, saveto=None, maximize=False):
"""Shows a 2D image.
Args:
image (np.ndarray, ndim=2):
Image to be plotted.
title (str, optional):
Plot title. Default is None.
norm (str, optional):
Can be set to 'log', for plotting in logarithmic scale. Default is
None.
colorbar_label (str, optional):
Label of the color bar. Default is None.
saveto (str, optional):
Path to save the plot to. Default is None.
maximize (bool, optional):
Set true for showing the plot on full screen. Default is False.
"""
if isinstance(image, np.ndarray):
if image.ndim != 2:
raise SpecklepyValueError('imshow()', 'image.ndim', image.ndim, '2')
if isinstance(image, Quantity):
unit = image.unit
colorbar_label = "({})".format(unit)
image = image.value
else:
raise SpecklepyTypeError('imshow()', 'image', type(image), 'np.ndarray')
if norm == 'log':
norm = clrs.LogNorm()
else:
norm = simple_norm(data=image, percent=99.)
plt.figure()
plt.imshow(image, norm=norm, origin='lower')
plt.title(title)
if maximize:
maximize_plot()
# Color bar
cbar = plt.colorbar(pad=0.0)
if colorbar_label is not None:
cbar.set_label(colorbar_label)
if saveto is not None:
plt.savefig(saveto, dpi=300)
plt.show()
plt.close() | 5,357,145 |
def my_subs_helper(s):
"""Helper function to handle badly formed JSON stored in the database"""
try:
return {'time_created':s.time_created, 'json_obj':sorted(json.loads(s.json_data).iteritems(), key=operator.itemgetter(0)), 'plain_json_obj':json.dumps(json.loads(s.json_data)),'id':s.id, 'json_score_data':json.dumps(s.json_score_data)}
except ValueError:
return {'time_created':s.time_created, 'json_obj':"__ERROR__", 'plain_json_obj':"__ERROR__", 'id':s.id} | 5,357,146 |
def dynamic_features(data_dir, year, data_source, voronoi, radar_buffers, **kwargs):
"""
Load all dynamic features, including bird densities and velocities, environmental data, and derived features
such as estimated accumulation of bird on the ground due to adverse weather.
Missing data is interpolated, but marked as missing.
:param data_dir: directory containing all relevant data
:param year: year of interest
:param data_source: 'radar' or 'abm' (simulated data)
:param voronoi: Voronoi tessellation (geopandas dataframe)
:param radar_buffers: radar buffers with static features (geopandas dataframe)
:return: dynamic features (pandas dataframe)
"""
env_points = kwargs.get('env_points', 100)
season = kwargs.get('season', 'fall')
random_seed = kwargs.get('seed', 1234)
pref_dirs = kwargs.get('pref_dirs', {'spring': 58, 'fall': 223})
pref_dir = pref_dirs[season]
wp_threshold = kwargs.get('wp_threshold', -0.5)
edge_type = kwargs.get('edge_type', 'voronoi')
t_unit = kwargs.get('t_unit', '1H')
print(f'##### load data for {season} {year} #####')
if data_source in ['radar', 'nexrad']:
print(f'load radar data')
radar_dir = osp.join(data_dir, data_source)
voronoi_radars = voronoi.query('observed == True')
birds_km2, _, t_range = datahandling.load_season(radar_dir, season, year, ['vid'],
t_unit=t_unit, mask_days=False,
radar_names=voronoi_radars.radar,
interpolate_nans=False)
radar_data, _, t_range = datahandling.load_season(radar_dir, season, year, ['ff', 'dd', 'u', 'v'],
t_unit=t_unit, mask_days=False,
radar_names=voronoi_radars.radar,
interpolate_nans=True)
bird_speed = radar_data[:, 0, :]
bird_direction = radar_data[:, 1, :]
bird_u = radar_data[:, 2, :]
bird_v = radar_data[:, 3, :]
# rescale according to voronoi cell size
data = birds_km2 * voronoi_radars.area_km2.to_numpy()[:, None]
t_range = t_range.tz_localize('UTC')
elif data_source == 'abm':
print(f'load abm data')
abm_dir = osp.join(data_dir, 'abm')
voronoi_radars = voronoi.query('observed == True')
radar_buffers_radars = radar_buffers.query('observed == True')
data, t_range, bird_u, bird_v = abm.load_season(abm_dir, season, year, voronoi_radars)
buffer_data = abm.load_season(abm_dir, season, year, radar_buffers_radars, uv=False)[0]
# rescale to birds per km^2
birds_km2 = data / voronoi_radars.area_km2.to_numpy()[:, None]
birds_km2_from_buffer = buffer_data / radar_buffers_radars.area_km2.to_numpy()[:, None]
# rescale to birds per voronoi cell
birds_from_buffer = birds_km2_from_buffer * voronoi_radars.area_km2.to_numpy()[:, None]
# time range for solar positions to be able to infer dusk and dawn
solar_t_range = t_range.insert(-1, t_range[-1] + pd.Timedelta(t_range.freq))
print('load env data')
env_vars = kwargs.get('env_vars', ['u', 'v', 'u10', 'v10', 'cc', 'tp', 'sp', 't2m', 'sshf'])
env_vars = [v for v in env_vars if not v in ['night', 'dusk', 'dawn', 'dayofyear', 'solarpos', 'solarpos_dt']]
if len(env_vars) > 0:
if edge_type == 'voronoi':
env_areas = voronoi.geometry
else:
env_areas = radar_buffers.geometry
env_850 = era5interface.compute_cell_avg(osp.join(data_dir, 'env', season, year, 'pressure_level_850.nc'),
env_areas, env_points,
t_range.tz_localize(None), vars=env_vars, seed=random_seed)
env_surface = era5interface.compute_cell_avg(osp.join(data_dir, 'env', season, year, 'surface.nc'),
env_areas, env_points,
t_range.tz_localize(None), vars=env_vars, seed=random_seed)
dfs = []
for ridx, row in voronoi.iterrows():
df = {}
df['radar'] = [row.radar] * len(t_range)
print(f'preprocess radar {row.radar}')
# time related variables for radar ridx
solarpos = np.array(solarposition.get_solarposition(solar_t_range, row.lat, row.lon).elevation)
night = np.logical_or(solarpos[:-1] < -6, solarpos[1:] < -6)
df['solarpos_dt'] = solarpos[:-1] - solarpos[1:]
df['solarpos'] = solarpos[:-1]
df['night'] = night
df['dusk'] = np.logical_and(solarpos[:-1] >=6, solarpos[1:] < 6) # switching from day to night
df['dawn'] = np.logical_and(solarpos[:-1] < 6, solarpos[1:] >=6) # switching from night to day
df['datetime'] = t_range
df['dayofyear'] = pd.DatetimeIndex(t_range).dayofyear
df['tidx'] = np.arange(t_range.size)
# bird measurements for radar ridx
df['birds'] = data[ridx] if row.observed else [np.nan] * len(t_range)
df['birds_km2'] = birds_km2[ridx] if row.observed else [np.nan] * len(t_range)
cols = ['birds', 'birds_km2', 'birds_from_buffer', 'birds_km2_from_buffer', 'bird_u', 'bird_v']
df['bird_u'] = bird_u[ridx] if row.observed else [np.nan] * len(t_range)
df['bird_v'] = bird_v[ridx] if row.observed else [np.nan] * len(t_range)
if data_source == 'abm':
df['birds_from_buffer'] = birds_from_buffer[ridx] if row.observed else [np.nan] * len(t_range)
df['birds_km2_from_buffer'] = birds_km2_from_buffer[ridx] if row.observed else [np.nan] * len(t_range)
else:
df['birds_from_buffer'] = data[ridx] if row.observed else [np.nan] * len(t_range)
df['birds_km2_from_buffer'] = birds_km2[ridx] if row.observed else [np.nan] * len(t_range)
df['bird_speed'] = bird_speed[ridx] if row.observed else [np.nan] * len(t_range)
df['bird_direction'] = bird_direction[ridx] if row.observed else [np.nan] * len(t_range)
cols.extend(['bird_speed', 'bird_direction'])
if len(env_vars) > 0:
# environmental variables for radar ridx
for var in env_vars:
if var in env_850:
print(f'found {var} in env_850 dataset')
df[var] = env_850[var][ridx]
elif var in env_surface:
print(f'found {var} in surface dataset')
df[var] = env_surface[var][ridx]
df['wind_speed'] = np.sqrt(np.square(df['u']) + np.square(df['v']))
# Note that here wind direction is the direction into which the wind is blowing,
# which is the opposite of the standard meteorological wind direction
df['wind_dir'] = (abm.uv2deg(df['u'], df['v']) + 360) % 360
# compute accumulation variables (for baseline models)
groups = [list(g) for k, g in it.groupby(enumerate(df['night']), key=lambda x: x[-1])]
nights = [[item[0] for item in g] for g in groups if g[0][1]]
df['nightID'] = np.zeros(t_range.size)
df['acc_rain'] = np.zeros(t_range.size)
df['acc_wind'] = np.zeros(t_range.size)
df['wind_profit'] = np.zeros(t_range.size)
acc_rain = 0
u_rain = 0
acc_wind = 0
u_wind = 0
for nidx, night in enumerate(nights):
df['nightID'][night] = np.ones(len(night)) * (nidx + 1)
# accumulation due to rain in the past nights
acc_rain = acc_rain/3 + u_rain * 2/3
df['acc_rain'][night] = np.ones(len(night)) * acc_rain
# compute proportion of hours with rain during the night
u_rain = np.mean(df['tp'][night] > 0.01)
# accumulation due to unfavourable wind in the past nights
acc_wind = acc_wind/3 + u_wind * 2/3
df['acc_wind'][night] = np.ones(len(night)) * acc_wind
# compute wind profit for bird with speed 12 m/s and flight direction 223 degree north
v_air = np.ones(len(night)) * 12
alpha = np.ones(len(night)) * pref_dir
df['wind_profit'][night] = v_air - np.sqrt(v_air**2 + df['wind_speed'][night]**2 -
2 * v_air * df['wind_speed'][night] *
np.cos(np.deg2rad(alpha-df['wind_dir'][night])))
u_wind = np.mean(df['wind_profit'][night]) < wp_threshold
radar_df = pd.DataFrame(df)
radar_df['missing'] = 0
for col in cols:
if data_source == 'radar':
# radar quantities being exactly 0 during the night are missing,
# radar quantities during the day are set to 0
print(f'check missing data for column {col}')
radar_df[col] = radar_df.apply(lambda row: np.nan if (row.night and not row[col])
else (0 if not row.night else row[col]), axis=1)
# remember missing radar observations
radar_df['missing'] = radar_df['missing'] | radar_df[col].isna()
# fill missing bird measurements by interpolation
if col == 'bird_direction':
# use "nearest", to avoid artifacts of interpolating between e.g. 350 and 2 degree
radar_df[col].interpolate(method='nearest', inplace=True)
else:
# for all other quantities simply interpolate linearly
radar_df[col].interpolate(method='linear', inplace=True)
else:
radar_df[col] = radar_df.apply(lambda row: np.nan if (row.night and np.isnan(row[col]))
else (0 if not row.night else row[col]), axis=1)
radar_df['missing'] = radar_df['missing'] | radar_df[col].isna()
# fill missing bird measurements with 0
radar_df[col].fillna(0, inplace=True)
dfs.append(radar_df)
print(f'found {radar_df.missing.sum()} misssing time points')
dynamic_feature_df = pd.concat(dfs, ignore_index=True)
print(f'columns: {dynamic_feature_df.columns}')
return dynamic_feature_df | 5,357,147 |
def add_parser_arguments(verb_parser, extension):
"""
Add the arguments and recursive subparsers to a specific verb parser.
If the extension has an `add_arguments` method it is being called with the
subparser being passed as the only argument.
:param verb_parser: The verb parser
:param extension: The verb extension
"""
if hasattr(extension, 'add_arguments'):
retval = extension.add_arguments(parser=verb_parser)
if retval is not None:
colcon_logger.error(
"Exception in verb extension '{extension.VERB_NAME}': "
'add_arguments() should return None'.format_map(locals())) | 5,357,148 |
def __normalize_allele_strand(snp_dfm):
"""
Keep all the alleles on FWD strand.
If `strand` is "-", flip every base in `alleles`; otherwise do not change `alleles`.
"""
on_rev = (snp_dfm.loc[:, "strand"] == "-")
has_alleles = (snp_dfm.loc[:, "alleles"].str.len() > 0)
condition = (on_rev & has_alleles)
if not snp_dfm.loc[condition, :].empty:
snp_dfm.loc[condition, "alleles"] = snp_dfm.loc[condition, "alleles"].apply(flip_allele)
return snp_dfm | 5,357,149 |
def TestNetworkListFields():
"""gnt-network list-fields"""
qa_utils.GenericQueryFieldsTest("gnt-network", list(query.NETWORK_FIELDS)) | 5,357,150 |
def test_init():
"""Test extension initialization."""
app = Flask('testapp')
app.url_map.converters['pid'] = PIDConverter
ext = InvenioDeposit(app)
assert 'invenio-deposit' in app.extensions
app = Flask('testapp')
app.url_map.converters['pid'] = PIDConverter
# check that current_deposit cannot be resolved
with app.app_context():
with pytest.raises(KeyError):
current_deposit.app
ext = InvenioDeposit()
assert 'invenio-deposit' not in app.extensions
ext.init_app(app)
assert 'invenio-deposit' in app.extensions
# check that current_deposit resolves correctly
with app.app_context():
current_deposit.app | 5,357,151 |
def add_to_codetree(tword,codetree,freq=1):
""" Adds one tuple-word to tree structure - one node per symbol
word end in the tree characterized by node[0]>0
"""
unique=0
for pos in range(len(tword)):
s = tword[pos]
if s not in codetree:
codetree[s] = [0,{}]
unique+=1
codetree[s][0] += freq
codetree = codetree[s][1]
return unique | 5,357,152 |
def draw_watch(sw):
""" Renders the stopwatch object <sw> in a digital hr:min:sec format. """
# Compute time in hours, minutes and seconds
seconds = round(sw.elapsed())
time = ''
hours = seconds // 3600 # Compute total hours
seconds %= 3600 # Update seconds remaining
minutes = seconds // 60 # Compute minutes
seconds %= 60 # Update sconds rmaining
# Each digit occupies two spaces; pad with leading zeros, if neccessary
time = '{0:0>2}:{1:0>2}:{2:0>2}'.format(hours, minutes, seconds)
# Draw graphical sting
t.clear()
t.penup()
t.setposition(-200, 0)
t.pendown()
t.write(time, font=('Arial', 64, 'normal'))
t.penup()
t.setposition(-50, -50)
t.pendown()
t.write(sw.counter(), font=('Arial', 24, 'normal')) | 5,357,153 |
def viz_samples(data, trace, num_sweeps, K, viz_interval=3, figure_size=3, title_fontsize=20, marker_size=1.0, opacity=0.3, bound=20, colors=['#AA3377','#0077BB', '#EE7733', '#009988', '#BBBBBB', '#EE3377', '#DDCC77'], save_name=None):
"""
visualize the samples along the sweeps
"""
E_tau, E_mu, E_z = trace['E_tau'].cpu(), trace['E_mu'].cpu(), trace['E_z'].cpu()
num_rows = len(data)
num_cols = 2 + int((num_sweeps-1) / viz_interval)
gs = gridspec.GridSpec(num_rows, num_cols)
gs.update(left=0.0 , bottom=0.0, right=1.0, top=1.0, wspace=0, hspace=0)
fig = plt.figure(figsize=(figure_size * num_cols, figure_size * num_rows))
for row_ind in range(num_rows):
ax = fig.add_subplot(gs[row_ind, 0])
viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=None) ## visualize raw dataset in the 1st column
if row_ind == 0:
ax.set_title('Data', fontsize=title_fontsize)
# col_ind = 1
for col_ind in range(num_cols-1):
sweep = col_ind * viz_interval
ax = fig.add_subplot(gs[row_ind, col_ind+1])
viz_gmm(ax, data[row_ind], K, marker_size, opacity, bound, colors, latents=(E_tau[sweep, row_ind], E_mu[sweep, row_ind], E_z[sweep, row_ind]))
if row_ind == 0:
if sweep == 0:
ax.set_title('RWS', fontsize=title_fontsize)
else:
ax.set_title('sweep %d' % sweep, fontsize=title_fontsize)
if save_name is not None:
plt.savefig(save_name + '.svg', dpi=300) | 5,357,154 |
def TA_ADXm(data, period=10, smooth=10, limit=18):
"""
Moving Average ADX
ADX Smoothing Trend Color Change on Moving Average and ADX Cross. Use on Hourly Charts - Green UpTrend - Red DownTrend - Black Choppy No Trend
Source: https://www.tradingview.com/script/owwws7dM-Moving-Average-ADX/
Parameters
----------
data : (N,) array_like
传入 OHLC Kline 序列。
The OHLC Kline.
period : int or None, optional
DI 统计周期 默认值为 10
DI Length period. Default value is 10.
smooth : int or None, optional
ADX 平滑周期 默认值为 10
ADX smoothing length period. Default value is 10.
limit : int or None, optional
ADX 限制阈值 默认值为 18
ADX MA Active limit threshold. Default value is 18.
Returns
-------
adx, ADXm : ndarray
ADXm 指标和趋势指示方向 (-1, 0, 1) 分别代表 (下跌, 无明显趋势, 上涨)
ADXm indicator and thread directions sequence. (-1, 0, 1) means for (Negatice, No Trend, Postive)
"""
up = data.high.pct_change()
down = data.low.pct_change() * -1
trur = TA_HMA(talib.TRANGE(data.high.values, data.low.values, data.close.values) , period)
plus = 100 * TA_HMA(np.where(((up > down) & (up > 0)), up, 0), period) / trur
minus = 100 * TA_HMA(np.where(((down > up) & (down > 0)), down, 0), period) / trur
# 这里是dropna的替代解决办法,因为我觉得nparray的传递方式如果随便drop了可能会跟 data.index 对不上,所以我选择补零替代dropna
plus = np.r_[np.zeros(period + 2), plus[(period + 2):]]
minus = np.r_[np.zeros(period + 2), minus[(period + 2):]]
sum = plus + minus
adx = 100 * TA_HMA(abs(plus - minus) / (np.where((sum == 0), 1, sum)), smooth)
adx = np.r_[np.zeros(smooth + 2), adx[(smooth + 2):]]
ADXm = np.where(((adx > limit) & (plus > minus)), 1, np.where(((adx > limit) & (plus < minus)), -1, 0))
return adx, ADXm | 5,357,155 |
def remove_from_dict(obj, keys=list(), keep_keys=True):
""" Prune a class or dictionary of all but keys (keep_keys=True).
Prune a class or dictionary of specified keys.(keep_keys=False).
"""
if type(obj) == dict:
items = list(obj.items())
elif isinstance(obj, dict):
items = list(obj.items())
else:
items = list(obj.__dict__.items())
if keep_keys:
return {k: v for k, v in items if k in keys}
else:
return {k: v for k, v in items if k not in keys} | 5,357,156 |
def unify_qso_catalog_uvqs_old(qsos):
"""Unifies the name of columns that are relevant for the analysis"""
qsos.rename_column('RA','ra')
qsos.rename_column('DEC','dec')
qsos.rename_column('FUV','mag_fuv')
qsos.rename_column('Z','redshift')
qsos.add_column(Column(name='id',data=np.arange(len(qsos))+1))
return qsos | 5,357,157 |
def get_hash_key_name(value):
"""Returns a valid entity key_name that's a hash of the supplied value."""
return 'hash_' + sha1_hash(value) | 5,357,158 |
def uniform_generator():
"""
Uses linear congruential generator to generate a random variable between 0 and 1
from uniform distribution.
Basic Equation for generator is
X_(n+1) = (a * X_n + b) mod m
where a, b and m are large numbers.
However, to allow large periods between states, a separate variable is maintained.
"""
b = 3123135
a = 1010012
seed = get_seed()
temp_num = seed
while True:
temp_num = (temp_num * a + b) % 109297270343
random_number = temp_num % limit
yield random_number/limit | 5,357,159 |
def yaml_dumps(value, indent=2):
"""
YAML dumps that supports Unicode and the ``as_raw`` property of objects if available.
"""
return yaml.dump(value, indent=indent, allow_unicode=True, Dumper=YamlAsRawDumper) | 5,357,160 |
def poi_remove(poi_id: int):
"""Removes POI record
Args:
poi_id: ID of the POI to be removed
"""
poi = POI.get_by_id(poi_id)
if not poi:
abort(404)
poi.delete()
db.session.commit()
return redirect_return() | 5,357,161 |
def plot(cmp, noffsets, nsamples, dt):
"""
Plots synthetic cmp gathers
"""
cutoff = 0.1
plt.imshow(cmp, extent=[0.5, noffsets + 0.5, dt*nsamples, 0],
aspect='auto', cmap='Greys', vmin=-cutoff, vmax=cutoff,
interpolation='none')
# following is purely for visual purposes
trace_numbers = list(range(1, noffsets + 1))
plt.xticks(trace_numbers)
plt.title('CMP')
plt.xlabel('Trace number')
plt.ylabel('Time (s)')
plt.show() | 5,357,162 |
def stringToTupleOfFloats(s):
"""
Converts s to a tuple
@param s: string
@return: tuple represented by s
"""
ans = []
for i in s.strip("()").split(","):
if i.strip() != "":
if i == "null":
ans.append(None)
else:
ans.append(float(i))
return tuple(ans) | 5,357,163 |
def create_training(training: TrainingSchema):
"""
Create an training with an TrainingSchema
:param training: training data as TrainingSchema
:return: http response
"""
endpoint_url = Config.get_api_url() + "training"
job_token = Config.get_job_token()
headers = {
'content-type': 'application/json',
'jobtoken': job_token
}
data = json.dumps(training.get_dict())
response = requests.post(endpoint_url, data=data, headers=headers)
return response | 5,357,164 |
def plot_tracks_parameter_space(
tracks,
n_tracks=None,
condition="Condition",
save=False,
palette="deep",
skip_color=0,
context="notebook",
):
"""Plot tracks in velocities-turning-angles-space"""
if "Displacement" not in tracks.columns:
tracks = analyze(tracks)
if condition not in tracks.columns:
tracks[condition] = "Default"
sns.set(
style="ticks",
palette=sns.color_palette(
palette, tracks[condition].unique().__len__() + skip_color
),
context=context,
)
fig, ax = plt.subplots(figsize=(5.5, 5.5))
ax.set_xlabel("Turning Angle")
ax.set_xlim([0, np.pi])
ax.set_xticks([0, np.pi / 2, np.pi])
ax.set_xticklabels([r"$0$", r"$\pi/2$", r"$\pi$"])
ax.set_ylabel("Velocity")
for i, (_, cond_tracks) in enumerate(tracks.groupby(condition)):
color = sns.color_palette()[i + skip_color]
if n_tracks != None:
cond_tracks = cond_tracks[
cond_tracks["Track_ID"].isin(
np.random.choice(cond_tracks["Track_ID"], n_tracks)
)
]
for _, track in cond_tracks.groupby("Track_ID"):
ax.plot(track["Turning Angle"], track["Velocity"], color=color, alpha=0.5)
sns.despine()
plt.tight_layout()
if save:
conditions = [cond.replace("= ", "") for cond in tracks[condition].unique()]
plt.savefig(
"Motility-TracksInParameterSpace_" + "-".join(conditions) + ".png", dpi=300
)
else:
plt.show() | 5,357,165 |
def on_connect():
"""
Handle connection event and send authentication key
"""
send_auth() | 5,357,166 |
def respects_language(fun):
"""Decorator for tasks with respect to site's current language.
You can use this decorator on your tasks together with default @task
decorator (remember that the task decorator must be applied last).
See also the with-statement alternative :func:`respect_language`.
**Example**:
.. code-block:: python
@task
@respects_language
def my_task()
# localize something.
The task will then accept a ``language`` argument that will be
used to set the language in the task, and the task can thus be
called like:
.. code-block:: python
from django.utils import translation
from myapp.tasks import my_task
# Pass the current language on to the task
my_task.delay(language=translation.get_language())
# or set the language explicitly
my_task.delay(language='no.no')
"""
@wraps(fun)
def _inner(*args, **kwargs):
with respect_language(kwargs.pop('language', None)):
return fun(*args, **kwargs)
return _inner | 5,357,167 |
def zero_one_window(data, axis=(0, 1, 2), ceiling_percentile=99, floor_percentile=1, floor=0, ceiling=1,
channels_axis=None):
"""
:param data: Numpy ndarray.
:param axis:
:param ceiling_percentile: Percentile value of the foreground to set to the ceiling.
:param floor_percentile: Percentile value of the image to set to the floor.
:param floor: New minimum value.
:param ceiling: New maximum value.
:param channels_axis:
:return:
"""
data = np.copy(data)
if len(axis) != data.ndim:
floor_threshold = np.percentile(data, floor_percentile, axis=axis)
if channels_axis is None:
channels_axis = find_channel_axis(data.ndim, axis=axis)
data = np.moveaxis(data, channels_axis, 0)
for channel in range(data.shape[0]):
channel_data = data[channel]
# find the background
bg_mask = channel_data <= floor_threshold[channel]
# use background to find foreground
fg = channel_data[bg_mask == False]
# find threshold based on foreground percentile
ceiling_threshold = np.percentile(fg, ceiling_percentile)
# normalize the data for this channel
data[channel] = window_data(channel_data, floor_threshold=floor_threshold[channel],
ceiling_threshold=ceiling_threshold, floor=floor, ceiling=ceiling)
data = np.moveaxis(data, 0, channels_axis)
else:
floor_threshold = np.percentile(data, floor_percentile)
fg_mask = data > floor_threshold
fg = data[fg_mask]
ceiling_threshold = np.percentile(fg, ceiling_percentile)
data = window_data(data, floor_threshold=floor_threshold, ceiling_threshold=ceiling_threshold, floor=floor,
ceiling=ceiling)
return data | 5,357,168 |
def factorial(x):
"""factorial(x) -> Integral
"Find x!. Raise a ValueError if x is negative or non-integral."""
if isinstance(x, float):
fl = int(x)
if fl != x:
raise ValueError("float arguments must be integral")
x = fl
if x > sys.maxsize:
raise OverflowError("Too large for a factorial")
if x <= 100:
if x < 0:
raise ValueError("x must be >= 0")
res = 1
for i in range(2, x + 1):
res *= i
return res
# Experimentally this gap seems good
gap = max(100, x >> 7)
def _fac_odd(low, high):
if low + gap >= high:
t = 1
for i in range(low, high, 2):
t *= i
return t
mid = ((low + high) >> 1) | 1
return _fac_odd(low, mid) * _fac_odd(mid, high)
def _fac1(x):
if x <= 2:
return 1, 1, x - 1
x2 = x >> 1
f, g, shift = _fac1(x2)
g *= _fac_odd((x2 + 1) | 1, x + 1)
return (f * g, g, shift + x2)
res, _, shift = _fac1(x)
return res << shift | 5,357,169 |
def get_event_stderr(e):
"""Return the stderr field (if any) associated with the event."""
if _API_VERSION == google_v2_versions.V2ALPHA1:
return e.get('details', {}).get('stderr')
elif _API_VERSION == google_v2_versions.V2BETA:
for event_type in ['containerStopped']:
if event_type in e:
return e[event_type].get('stderr')
else:
assert False, 'Unexpected version: {}'.format(_API_VERSION) | 5,357,170 |
def xA(alpha, gamma, lsa, lsd, y, xp, nv):
"""Calculate position where the beam hits the analyzer crystal.
:param alpha: the divergence angle of the neutron
:param gamma: the tilt angle of the deflector
:param lsa: the sample-analyzer distance
:param lsd: the sample deflector distance
:param y: the translation of the analyser crystal
:param xp: the point at the sample where the neutron is scattered
:param nv: neutron path: transmitted(0), reflected at the first deflector(1),
reflected at the second deflector(2),
"""
if nv == 0:
return xp + (lsa - y) * tan(radians(alpha))
return xp + lsd * tan(radians(alpha)) + \
(lsa - lsd - y) * tan(radians(2 * gamma - alpha)) | 5,357,171 |
def win2():
"""
This is the configuration for the best performing model from the DPRNN
paper. Training takes very long time with this configuration.
"""
# The model becomes very memory consuming with this small window size.
# You might have to reduce the chunk size as well.
batch_size = 1
trainer = {
'model': {
'encoder_block_size': 2,
'dprnn_window_length': 250,
'dprnn_hop_size': 125, # Half of window length
}
} | 5,357,172 |
def find_recent_login(user_id: UserID) -> Optional[datetime]:
"""Return the time of the user's most recent login, if found."""
recent_login = db.session \
.query(DbRecentLogin) \
.filter_by(user_id=user_id) \
.one_or_none()
if recent_login is None:
return None
return recent_login.occurred_at | 5,357,173 |
def test_get_strategy():
"""
Test rebootmgr.get_strategy without parameters
"""
strategy = "Reboot strategy: best-effort"
salt_mock = {
"cmd.run_all": MagicMock(return_value={"stdout": strategy, "retcode": 0})
}
with patch.dict(rebootmgr.__salt__, salt_mock):
assert rebootmgr.get_strategy() == "best-effort"
salt_mock["cmd.run_all"].assert_called_with(["rebootmgrctl", "get-strategy"]) | 5,357,174 |
async def async_remove_config_entry_device(
hass: HomeAssistant, config_entry: ConfigEntry, device_entry: dr.DeviceEntry
) -> bool:
"""Remove ufp config entry from a device."""
unifi_macs = {
_async_unifi_mac_from_hass(connection[1])
for connection in device_entry.connections
if connection[0] == dr.CONNECTION_NETWORK_MAC
}
api = async_ufp_instance_for_config_entry_ids(hass, {config_entry.entry_id})
assert api is not None
if api.bootstrap.nvr.mac in unifi_macs:
return False
for device in async_get_devices(api.bootstrap, DEVICES_THAT_ADOPT):
if device.is_adopted_by_us and device.mac in unifi_macs:
return False
return True | 5,357,175 |
def batch_unsrt_segment_sum(data, segment_ids, num_segments):
""" Performas the `tf.unsorted_segment_sum` operation batch-wise"""
# create distinct segments per batch
num_batches = tf.shape(segment_ids, out_type=tf.int64)[0]
batch_indices = tf.range(num_batches)
segment_ids_per_batch = segment_ids + num_segments * tf.expand_dims(batch_indices, axis=1)
# do the normal unsegment sum and reshape to original shape
seg_sums = tf.unsorted_segment_sum(data, segment_ids_per_batch, num_segments * num_batches)
return tf.reshape(seg_sums, tf.stack((-1, num_segments))) | 5,357,176 |
def test_main(kube_config, log_to_file_config):
"""Test the main function of the Kubernetes Controller, and verify that it starts,
display the right output and stops without issue.
"""
log_config, file_path = log_to_file_config()
kube_config.api_endpoint = "http://my-krake-api:1234"
kube_config.log = log_config
def wrapper(configuration):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
main(configuration)
# Start the process and let it time to initialize
process = multiprocessing.Process(target=wrapper, args=(kube_config,))
process.start()
time.sleep(2)
# Stop and wait for the process to finish
process.terminate()
process.join()
assert not process.is_alive()
assert process.exitcode == 0
# Verify the output of the process
with open(file_path, "r") as f:
output = f.read()
assert "Controller started" in output
assert "Received signal, exiting..." in output
assert "Controller stopped" in output
# Verify that all "ERROR" lines in the output are only errors that logs the lack of
# connectivity to the API.
attempted_connectivity = False
for line in output.split("\n"):
if "ERROR" in output:
message = (
f"In line {line!r}, an error occurred which was different from the"
f" error from connecting to the API."
)
assert "Cannot connect to host my-krake-api:1234" in output, message
attempted_connectivity = True
assert attempted_connectivity | 5,357,177 |
def read_json(file_name):
"""Read json from file."""
with open(file_name) as f:
return json.load(f) | 5,357,178 |
def get_role(request):
"""Look up the "role" query parameter in the URL."""
query = request.ws_resource.split('?', 1)
if len(query) == 1:
raise LookupError('No query string found in URL')
param = parse.parse_qs(query[1])
if 'role' not in param:
raise LookupError('No role parameter found in the query string')
return param['role'][0] | 5,357,179 |
def login(username: str, password: str) -> Person:
"""通过用户名和密码登录智学网
Args:
username (str): 用户名, 可以为准考证号, 手机号
password (str): 密码
Raises:
ArgError: 参数错误
UserOrPassError: 用户名或密码错误
UserNotFoundError: 未找到用户
LoginError: 登录错误
RoleError: 账号角色未知
Returns:
Person
"""
session = get_session(username, password)
if check_is_student(session):
return StudentAccount(session).set_base_info()
return TeacherAccount(session).set_base_info() | 5,357,180 |
def _decode_end(_fp):
"""Decode the end tag, which has no data in the file, returning 0.
:type _fp: A binary `file object`
:rtype: int
"""
return 0 | 5,357,181 |
def export_private_keys(s_keys, foler_name='./data', file_name='secrets.txt'):
""" Exports a set of private keys to a file.
Each line in the file is one key.
"""
if not os.path.exists(foler_name):
os.makedirs(foler_name)
arch = open(os.path.join(foler_name, file_name), 'w')
for key in s_keys:
arch.write('{}\n'.format(key))
arch.close() | 5,357,182 |
def raise_complete_async() -> NoReturn:
"""Raise an error that says the activity will be completed
asynchronously.
"""
raise _CompleteAsyncError() | 5,357,183 |
def make_dqn_agent(q_agent_type,
arch,
n_actions,
lr=2.5e-4,
noisy_net_sigma=None,
buffer_length=10 ** 6,
final_epsilon=0.01,
final_exploration_frames=10 ** 6,
use_gpu=0,
replay_start_size=5 * 10 **4,
target_update_interval=3 * 10**4,
update_interval=4,
):
"""
given an architecture and an specific dqn
return the agent
args:
q_agent_type: choices=["DQN", "DoubleDQN", "PAL"]
arch: choices=["nature", "nips", "dueling", "doubledqn"]
final_epsilon: Final value of epsilon during training
final_exploration_frames: Timesteps after which we stop annealing exploration rate
replay_start_size: Minimum replay buffer size before performing gradient updates.
target_update_interval: Frequency (in timesteps) at which the target network is updated
update_interval: Frequency (in timesteps) of network updates.
"""
# q function
q_func = parse_arch(arch, n_actions)
# explorer
if noisy_net_sigma is not None:
pnn.to_factorized_noisy(q_func, sigma_scale=noisy_net_sigma)
# turn off explorer
explorer = explorers.Greedy()
else:
# deafult option
explorer = explorers.LinearDecayEpsilonGreedy(
1.0,
final_epsilon,
final_exploration_frames,
lambda: np.random.randint(n_actions),
)
# optimizer
# Use the Nature paper's hyperparameters
opt = pfrl.optimizers.RMSpropEpsInsideSqrt(
q_func.parameters(),
lr=lr,
alpha=0.95,
momentum=0.0,
eps=1e-2,
centered=True,
)
# replay_buffer
rbuf = replay_buffers.ReplayBuffer(buffer_length)
# Feature extractor
def phi(x):
return np.asarray(x, dtype=np.float32) / 255
Agent = parse_agent(q_agent_type)
agent = Agent(
q_func,
opt,
rbuf,
gpu=use_gpu, # 0 or -1
gamma=0.99,
explorer=explorer,
replay_start_size=replay_start_size,
target_update_interval=target_update_interval,
clip_delta=True,
update_interval=update_interval,
batch_accumulator="sum",
phi=phi,
)
return agent | 5,357,184 |
def build_consensus_from_haplotypes(haplotypes):
"""
# ========================================================================
BUILD CONSENSUS FROM HAPLOTYPES
PURPOSE
-------
Builds a consensus from a list of Haplotype objects.
INPUT
-----
[HAPLOTYPE LIST] [haplotypes]
The list of haplotypes.
RETURN
------
[String] consensus
The consensus sequence.
# ========================================================================
"""
pileup = build_pileup_from_haplotypes(haplotypes)
consensus = pileup.build_consensus()
return consensus | 5,357,185 |
def resizeWindow(win, w, h, timeout=2.0):
"""Resize a window and wait until it has the correct size.
This is required for unit testing on some platforms that do not guarantee
immediate response from the windowing system.
"""
QtGui.QApplication.processEvents()
# Sometimes the window size will switch multiple times before settling
# on its final size. Adding qWaitForWindowExposed seems to help with this.
QtTest.QTest.qWaitForWindowExposed(win)
win.resize(w, h)
start = time.time()
while True:
w1, h1 = win.width(), win.height()
if (w,h) == (w1,h1):
return
QtTest.QTest.qWait(10)
if time.time()-start > timeout:
raise TimeoutError("Window resize failed (requested %dx%d, got %dx%d)" % (w, h, w1, h1)) | 5,357,186 |
def convert_data_for_rotation_averaging(
wTi_list: List[Pose3], i2Ti1_dict: Dict[Tuple[int, int], Pose3]
) -> Tuple[Dict[Tuple[int, int], Rot3], List[Rot3]]:
"""Converts the poses to inputs and expected outputs for a rotation averaging algorithm.
Args:
wTi_list: List of global poses.
i2Ti1_dict: Dictionary of (i1, i2) -> i2Ti1 relative poses.
Returns:
i2Ti1_dict's values mapped to relative rotations i2Ri1.
wTi_list mapped to global rotations.
"""
wRi_list = [x.rotation() for x in wTi_list]
i2Ri1_dict = {k: v.rotation() for k, v in i2Ti1_dict.items()}
return i2Ri1_dict, wRi_list | 5,357,187 |
def all_arrays_to_gpu(f):
"""Decorator to copy all the numpy arrays to the gpu before function
invokation"""
def inner(*args, **kwargs):
args = list(args)
for i in range(len(args)):
if isinstance(args[i], np.ndarray):
args[i] = to_gpu(args[i])
return f(*args, **kwargs)
return inner | 5,357,188 |
def fatal(*tokens: Token, exit_code: int = 1, **kwargs: Any) -> None:
"""Print an error message and exit the program
:param tokens: list of `ui` constants or strings, like
``(cli_ui.red, "this is a fatal error")``
:param exit_code: value of the exit code (default: 1)
"""
error(*tokens, **kwargs)
sys.exit(exit_code) | 5,357,189 |
def create_intersect_mask(num_v, max_v):
"""
Creates intersect mask as needed by polygon_intersection_new
in batch_poly_utils (for a single example)
"""
intersect_mask = np.zeros((max_v, max_v), dtype=np.float32)
for i in range(num_v - 2):
for j in range((i + 2) % num_v, num_v - int(i == 0)):
intersect_mask[i, j] = 1.
return intersect_mask | 5,357,190 |
def gdi_abuse_tagwnd_technique_bitmap():
"""
Technique to be used on Win 10 v1703 or earlier. Locate the pvscan0 address with the help of tagWND structures
@return: pvscan0 address of the manager and worker bitmap and the handles
"""
window_address = alloc_free_windows(0)
manager_bitmap_handle = create_bitmap(0x100, 0x6D, 1)
manager_bitmap_pvscan0 = window_address + 0x50
window_address = alloc_free_windows(0)
worker_bitmap_handle = create_bitmap(0x100, 0x6D, 1)
worker_bitmap_pvscan0 = window_address + 0x50
return (manager_bitmap_pvscan0, worker_bitmap_pvscan0, manager_bitmap_handle, worker_bitmap_handle) | 5,357,191 |
def main():
"""CLI entrypoint"""
parser = Parser(
prog='unwad',
description='Default action is to convert files to png format and extract to xdir.',
epilog='example: unwad gfx.wad -d ./out => extract all files to ./out'
)
parser.add_argument(
'file',
metavar='file.wad',
action=ResolvePathAction
)
parser.add_argument(
'-l', '--list',
action='store_true',
help='list files'
)
parser.add_argument(
'-d',
metavar='xdir',
default=os.getcwd(),
dest='dest',
action=ResolvePathAction,
help='extract files into xdir'
)
parser.add_argument(
'-q',
dest='quiet',
action='store_true',
help='quiet mode'
)
parser.add_argument(
'-f',
dest='format',
default='png',
choices=['bmp','gif','png','tga'],
help='image format to convert to'
)
parser.add_argument(
'-v', '--version',
dest='version',
action='version',
help=argparse.SUPPRESS,
version=f'{parser.prog} version {qcli.__version__}'
)
args = parser.parse_args()
archive_name = os.path.basename(args.file)
if not wad.is_wadfile(args.file):
print(f'{parser.prog}: cannot find or open {args.file}', file=sys.stderr)
sys.exit(1)
if args.list:
with wad.WadFile(args.file) as wad_file:
info_list = sorted(wad_file.infolist(), key=lambda i: i.filename)
lump_types = {
0: 'NONE',
1: 'LABEL',
64: 'LUMP',
65: 'QTEX',
66: 'QPIC',
67: 'SOUND',
68: 'MIPTEX'
}
def lump_type(num):
if num in lump_types:
return lump_types[num]
return num
headers = ['Length', 'Type', 'Name']
table = [[i.file_size, lump_type(i.type), i.filename] for i in info_list]
length = sum([i.file_size for i in info_list])
count = len(info_list)
table.append([length, '', f'{count} file{"s" if count > 1 else ""}'])
separator = []
for i in range(len(headers)):
t = max(len(str(length)), len(headers[i]) + 2)
separator.append('-' * t)
table.insert(-1, separator)
print(f'Archive: {archive_name}')
print(tabulate(table, headers=headers))
sys.exit(0)
if not os.path.exists(args.dest):
os.makedirs(args.dest)
with wad.WadFile(args.file) as wad_file:
if not args.quiet:
print(f'Archive: {archive_name}')
# Flatten out palette
palette = []
for p in quake.palette:
palette += p
for item in wad_file.infolist():
filename = item.filename
fullpath = os.path.join(args.dest, filename)
fullpath_ext = '{0}.{1}'.format(fullpath, args.format)
data = None
size = None
# Pictures
if item.type == wad.LumpType.QPIC:
with wad_file.open(filename) as lmp_file:
lump = lmp.Lmp.open(lmp_file)
size = lump.width, lump.height
data = array.array('B', lump.pixels)
# Special cases
elif item.type == wad.LumpType.MIPTEX:
# Console characters
if item.file_size == 128 * 128:
size = 128, 128
with wad_file.open(filename) as lump:
data = lump.read(item.file_size)
else:
# Miptextures
try:
with wad_file.open(filename) as mip_file:
mip = wad.Miptexture.read(mip_file)
data = mip.pixels[:mip.width * mip.height]
data = array.array('B', data)
size = mip.width, mip.height
except:
print(f' failed to extract resource: {item.filename}', file=sys.stderr)
continue
try:
# Convert to image file
if data is not None and size is not None:
img = Image.frombuffer('P', size, data, 'raw', 'P', 0, 1)
img.putpalette(palette)
img.save(fullpath_ext)
if not args.quiet:
print(f' extracting: {fullpath_ext}')
# Extract as raw file
else:
wad_file.extract(filename, args.dest)
if not args.quiet:
print(f' extracting: {fullpath}')
except:
print(f'{parser.prog}: error: {sys.exc_info()[1]}', file=sys.stderr)
sys.exit(0) | 5,357,192 |
def check_stop() -> list:
"""Checks for entries in the stopper table in base db.
Returns:
list:
Returns the flag, caller from the stopper table.
"""
with db.connection:
cursor = db.connection.cursor()
flag = cursor.execute("SELECT flag, caller FROM stopper").fetchone()
return flag | 5,357,193 |
def initialize_debugging(flags, mode, callback, filename):
"""Initializes the logging system.
:param flags: The debug output control flags.
:type flags: DEBUG_FLAGS
:param mode: The output type.
:type mode: DEBUG_MODE
:param callback: Debugging callback, if applicable.
:type callback: DEBUG_CALLBACK like function
:param filename: The file to log to, if applicable.
:param filename: str"""
ckresult(_dll.FMOD_Debug_Initialize(flags, mode, DEBUG_CALLBACK(callback), filename)) | 5,357,194 |
def open_controller(filename,ncircuits,use_sql):
""" starts stat gathering thread """
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.connect((control_host,control_port))
c = PathSupport.Connection(s)
c.authenticate(control_pass) # also launches thread...
c.debug(file(filename+".log", "w", buffering=0))
h = CircStatsGatherer(c,__selmgr,filename,ncircuits)
c.set_event_handler(h)
if use_sql:
from TorCtl import SQLSupport
SQLSupport.setup_db("sqlite:///"+filename+".sqlite", drop=True)
c.add_event_listener(SQLSupport.ConsensusTrackerListener())
c.add_event_listener(SQLSupport.CircuitListener())
global FUDValue
if not FUDValue:
FUDValue = c.get_option("FetchUselessDescriptors")[0][1]
c.set_option("FetchUselessDescriptors", "1")
c.set_events([TorCtl.EVENT_TYPE.STREAM,
TorCtl.EVENT_TYPE.BW,
TorCtl.EVENT_TYPE.NEWCONSENSUS,
TorCtl.EVENT_TYPE.NEWDESC,
TorCtl.EVENT_TYPE.CIRC,
TorCtl.EVENT_TYPE.STREAM_BW], True)
return c | 5,357,195 |
def test_environment_python_version_multi_digit(tmpdir: py.path.local) -> None:
"""
Make sure the constructor for env.Environment can handle multi-digit minor versions of Python to ensure compatibility with
Python 3.10+.
"""
with patch("sys.version_info", new=(3, 123, 0)):
# python version is not included in path on windows
with patch("sys.platform", new="linux"):
assert env.PythonEnvironment(env_path=str(tmpdir)).site_packages_dir == os.path.join(
str(tmpdir), "lib", "python3.123", "site-packages"
) | 5,357,196 |
def get_camelcase_name_chunks(name):
"""
Given a name, get its parts.
E.g: maxCount -> ["max", "count"]
"""
out = []
out_str = ""
for c in name:
if c.isupper():
if out_str:
out.append(out_str)
out_str = c.lower()
else:
out_str += c
out.append(out_str)
return out | 5,357,197 |
def _GetBuildBotUrl(builder_host, builder_port):
"""Gets build bot URL for fetching build info.
Bisect builder bots are hosted on tryserver.chromium.perf, though we cannot
access this tryserver using host and port number directly, so we use another
tryserver URL for the perf tryserver.
Args:
builder_host: Hostname of the server where the builder is hosted.
builder_port: Port number of ther server where the builder is hosted.
Returns:
URL of the buildbot as a string.
"""
if (builder_host == PERF_BISECT_BUILDER_HOST and
builder_port == PERF_BISECT_BUILDER_PORT):
return PERF_TRY_SERVER_URL
else:
return 'http://%s:%s' % (builder_host, builder_port) | 5,357,198 |
def case_two_args_positional_callable_first(replace_by_foo):
""" Tests the decorator with one positional argument @my_decorator(goo) """
return replace_by_foo(goo, 'hello'), goo | 5,357,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.