content
stringlengths 22
815k
| id
int64 0
4.91M
|
---|---|
def _to_original(sequence, result):
""" Cast result into the same type
>>> _to_original([], ())
[]
>>> _to_original((), [])
()
"""
if isinstance(sequence, tuple):
return tuple(result)
if isinstance(sequence, list):
return list(result)
return result | 5,353,700 |
def generate_oauth_service():
"""Prepare the OAuth2Service that is used to make requests later."""
return OAuth2Service(
client_id=os.environ.get('UBER_CLIENT_ID'),
client_secret=os.environ.get('UBER_CLIENT_SECRET'),
name=config.get('name'),
authorize_url=config.get('authorize_url'),
access_token_url=config.get('access_token_url'),
base_url=config.get('base_url'),
) | 5,353,701 |
def is_relative_path(value):
"""Check if the given value is a relative path"""
if urlparse(value).scheme in ('http', 'https', 'file'):
return False
return not os.path.isabs(value) | 5,353,702 |
def add_flops_counter_variable_or_reset(module: torch.nn.Module) -> None:
"""
:param module:
:type module:"""
if is_supported_instance(module):
module.__flops__ = 0 | 5,353,703 |
def get_B_R(Rdot):
"""Get B_R from Q, Qdot"""
return Rdot | 5,353,704 |
def configure_nltk():
"""Downloads any required NLTK data if not already downloaded."""
nltk_resources_folder = get_nltk_resources_folder()
nltk.data.path.append(nltk_resources_folder)
try:
nltk.data.find("tokenizers/punkt")
except:
logger.warning("NLTK punkt tokenizer was not found.")
logger.info("Downloading NLTK punkt tokenizer.")
nltk.download("punkt", download_dir=nltk_resources_folder)
try:
nltk.data.find("corpora/wordnet")
except:
logger.warning("NLTK wordnet corpus was not found.")
logger.info("Downloading NLTK wordnet corpus.")
nltk.download("wordnet", download_dir=nltk_resources_folder)
try:
nltk.data.find("corpora/stopwords")
except:
logger.warning("NLTK stopwords corpus was not found.")
logger.info("Downloading NLTK stopwords corpus.")
nltk.download("stopwords", download_dir=nltk_resources_folder) | 5,353,705 |
def run_coroutine_with_span(span, coro, *args, **kwargs):
"""Wrap the execution of a Tornado coroutine func in a tracing span.
This makes the span available through the get_current_span() function.
:param span: The tracing span to expose.
:param coro: Co-routine to execute in the scope of tracing span.
:param args: Positional args to func, if any.
:param kwargs: Keyword args to func, if any.
"""
with span_in_stack_context(span=span):
return coro(*args, **kwargs) | 5,353,706 |
def set_database_slide_metadata(database,table):
"""this will iterate and update various project related attributes that may not be set on initial parse
such as stain type, tissue_type , etc... """
## update stain_Type first
sql_lookup = "select * from `"+ database + "`.`dzi_pyramid_info` where stain_type is NULL "
metadata_dict_cursor.execute(sql_lookup)
data = metadata_dict_cursor.fetchall()
for row in data:
# print row
(found_tags, patient_id, section_id, stain) = parse_slide_info_for_ADRC_ID( row['pyramid_filename'])
if found_tags:
update_sql = "update `" + database + "`.`"+"dzi_pyramid_info` set stain_type='%s' where pyramid_id='%d'" % ( stain, row['pyramid_id'])
print update_sql
update_cursor.execute(update_sql)
update_annotation_sql = "select * from `" + database + "`.`dzi_pyramid_info` where has_annotation is Null"
metadata_dict_cursor.execute(update_annotation_sql)
data = metadata_dict_cursor.fetchall()
for row in data:
print row | 5,353,707 |
def run_pkg_tests(m, env_prefix):
"""
Run the tests defined in the recipe of a package in the given
environment.
"""
tmpdir = tempfile.mkdtemp()
try:
test_files = conda_build_test.create_test_files(m, tmpdir)
py_files, pl_files, shell_files = test_files
if not (py_files or pl_files or shell_files):
return
env = os.environ
env = prepend_bin_path(env, env_prefix, prepend_prefix=True)
conda_build_test.run_tests(m, env, tmpdir,
py_files, pl_files, shell_files)
finally:
shutil.rmtree(tmpdir) | 5,353,708 |
def calc_bin_centre(bin_edges):
"""
Calculates the centre of a histogram bin from the bin edges.
"""
return bin_edges[:-1] + np.diff(bin_edges) / 2 | 5,353,709 |
def to_matrix(dG, tG, d_mat, t_mat, label_mat, bridges):
"""
Parameters:
tG: target graph
dG: drug graph
d_mat: drug feature matrix
t_mat: target feature matrix
label_mat: label matrix
bridges: known links between drugs and targets
Return:
d_feature, t_feature
"""
drug_feature, target_feature = {},{}
new_label = set()
for d,t,i in label_mat:
if d in dG.nodes and t in tG.nodes:
#d_vector = np.zeros(d_mat[d].shape)
#t_vector = np.zeros(t_mat[t].shape)
#if i == 1:
d_vector = d_mat[d]
t_vector = t_mat[t]
addressed_d = set()
addressed_t = set()
for link in bridges:
if link[0] in dG.nodes and link[1] in tG.nodes:
if nx.has_path(dG, d, link[0]) and nx.has_path(tG, t, link[1]):
if link[0] not in addressed_d:
#print(f'di: {d}, dl: {link[0]}')
max_sim_d = max_sim(d,link[0],dG)
d_vector = sim_vec(d_vector, d_mat[link[0]],max_sim_d)
addressed_d.add(link[0])
elif link[1] not in addressed_t:
#print(f'tj: {t}, tl: {link[1]}')
max_sim_t = max_sim(t,link[1],tG)
t_vector = sim_vec(t_vector, t_mat[link[1]],max_sim_t)
addressed_t.add(link[1])
drug_feature[d] = d_vector
target_feature[t] = t_vector
new_label.add((d,t,i))
return drug_feature, target_feature, new_label | 5,353,710 |
def ensureList(obj):
""" ensures that object is list """
if isinstance(obj, list):
return obj # returns original lis
elif hasattr(obj, '__iter__'): # for python 2.x check if obj is iterablet
return list(obj) # converts to list
else:
return [obj] | 5,353,711 |
def gelu_impl(x):
"""OpenAI's gelu implementation."""
return 0.5 * x * (1.0 + torch.tanh(0.7978845608028654 * x * (1.0 + 0.044715 * x * x))) | 5,353,712 |
async def test_query_inst_chaincodes(org1_gw):
""" Tests querying instantiated chaincodes """
res = await org1_gw.query_instantiated_chaincodes()
assert res.chaincodes | 5,353,713 |
def get_shape(kind='line', x=None, y=None, x0=None, y0=None, x1=None, y1=None, span=0, color='red', dash='solid',
width=1,
fillcolor=None, fill=False, opacity=1, xref='x', yref='y'):
"""
Returns a plotly shape
Parameters:
-----------
kind : string
Shape kind
line
rect
circle
x : float
x values for the shape.
This assumes x0=x1
x0 : float
x0 value for the shape
x1 : float
x1 value for the shape
y : float
y values for the shape.
This assumes y0=y1
y0 : float
y0 value for the shape
y1 : float
y1 value for the shape
color : string
color for shape line
dash : string
line style
solid
dash
dashdot
dot
width : int
line width
fillcolor : string
shape fill color
fill : bool
If True then fill shape
If not fillcolor then the
line color will be used
opacity : float [0,1]
opacity of the fill
xref : string
Sets the x coordinate system
which this object refers to
'x'
'paper'
'x2' etc
yref : string
Sets the y coordinate system
which this object refers to
'y'
'paper'
'y2' etc
"""
if x1 is None:
if x0 is None:
if x is None:
xref = 'paper'
x0 = 0
x1 = 1
else:
x0 = x1 = x
else:
x1 = x0
else:
x
if y1 is None:
if y0 is None:
if y is None:
yref = 'paper'
y0 = 0
y1 = 1
else:
y0 = y1 = y
else:
y1 = y0
shape = {'x0': x0,
'y0': y0,
'x1': x1,
'y1': y1,
'line': {
'color': normalize(color),
'width': width,
'dash': dash
},
'xref': xref,
'yref': yref
}
if kind == 'line':
shape['type'] = 'line'
elif kind == 'circle':
shape['type'] = 'circle'
elif kind == 'rect':
shape['type'] = 'rect'
else:
raise Exception("Invalid or unkown shape type : {0}".format(kind))
if (fill or fillcolor) and kind != 'line':
fillcolor = color if not fillcolor else fillcolor
fillcolor = to_rgba(normalize(fillcolor), opacity)
shape['fillcolor'] = fillcolor
return shape | 5,353,714 |
def get_weights(weights_dic, model, epoch):
"""
callback returns matrix with n arrays where n is the number of features and each
array has m elements where m is the number of neurons
records weights for each layer for each epoch and stores in provided dictionary dictionary
weights_dic: dictionary to save weights
model: keras model
epoch: current epoch number
adapted from: from https://stackoverflow.com/questions/42039548/how-to-check-the-weights-after-every-epoc-in-keras-model
"""
name_nr = 0
for layer in model.layers:
if("dense" in layer.name or "cov" in layer.name or "re_lu" in layer.name or
"softmax" in layer.name or "elu" in layer.name):
# 1st element of get_weights are weights, second is bias input
weights_dic[(epoch, name_nr)] = [layer.get_weights()[0]]
name_nr += 1 | 5,353,715 |
def clean_row(elements: List[Tag]) -> List[Tag]:
"""
Clean MathML row, removing children that should not be considered tokens or child symbols.
One example of cleaning that should take place here is removing 'd' and 'δ' signs that are
used as derivatives, instead of as identifiers.
"""
# Remove whitespace between elements.
elements = [e for e in elements if not (isinstance(e, str) and e.isspace())]
# Remove quantifiers and double bars.
elements = [e for e in elements if e.text not in ["∀", "∃"]]
elements = [e for e in elements if e.text not in ["|", "∥"]]
# Remove 'd's and 'δ's used as signs for derivatives.
derivatives_cleaned = []
DERIVATIVE_GLYPHS = ["d", "δ", "∂"]
for i, e in enumerate(elements):
is_derivative_symbol = (
# Is the glyph a derivative sign?
e.name == "mi"
and e.text in DERIVATIVE_GLYPHS
# Is the next element a symbol?
and (i < len(elements) - 1 and _is_identifier(elements[i + 1]))
# Is the element after that either not a symbol, or another derivative sign?
and (
i == len(elements) - 2
or not _is_identifier(elements[i + 2])
or elements[i + 2].text in DERIVATIVE_GLYPHS
)
)
if not is_derivative_symbol:
derivatives_cleaned.append(e)
elements = derivatives_cleaned
return elements | 5,353,716 |
def models(estimators, cv_search, transform_search):
"""
Grid search prediction workflows. Used by bll6_models, test_models, and product_models.
Args:
estimators: collection of steps, each of which constructs an estimator
cv_search: dictionary of arguments to LeadCrossValidate to search over
transform_search: dictionary of arguments to LeadTransform to search over
Returns: a list drain.model.Predict steps constructed by taking the product of
the estimators with the the result of drain.util.dict_product on each of
cv_search and transform_search.
Each Predict step contains the following in its inputs graph:
- lead.model.cv.LeadCrossValidate
- lead.model.transform.LeadTransform
- drain.model.Fit
"""
steps = []
for cv_args, transform_args, estimator in product(
dict_product(cv_search), dict_product(transform_search), estimators):
cv = lead.model.cv.LeadCrossValidate(**cv_args)
cv.name = 'cv'
X_train = Call('__getitem__', inputs=[MapResults([cv], {'X':'obj', 'train':'key',
'test':None, 'aux':None})])
mean = Call('mean', inputs=[X_train])
mean.name = 'mean'
X_impute = Construct(data.impute,
inputs=[MapResults([cv], {'aux':None, 'test':None, 'train':None}),
MapResults([mean], 'value')])
cv_imputed = MapResults([X_impute, cv], ['X', {'X':None}])
cv_imputed.target = True
transform = lead.model.transform.LeadTransform(inputs=[cv_imputed], **transform_args)
transform.name = 'transform'
fit = model.Fit(inputs=[estimator, transform], return_estimator=True)
fit.name = 'fit'
y = model.Predict(inputs=[fit, transform],
return_feature_importances=True)
y.name = 'predict'
y.target = True
steps.append(y)
return steps | 5,353,717 |
def apply_sync_sensors(sensors_changes: DefaultDict[str, Set[str]]) -> None:
"""
:param sensors_changes:
:return:
"""
from noc.inv.models.object import Object
from noc.inv.models.sensor import sync_object
query = Q()
if "inv.ObjectModel" in sensors_changes:
query |= Q(model__in=list(sensors_changes["inv.ObjectModel"]))
if "inv.Object" in sensors_changes:
query |= Q(id__in=list(sensors_changes["inv.Object"]))
if not query:
return
for o in Object.objects.filter(query):
sync_object(o) | 5,353,718 |
def cross(args: Namespace) -> None:
"""Corss task. Corss validate model on given dataset."""
records, n_state = _get_records(args)
result = cross_validate(n_state, records, args.k)
print(f"Mean Variance: {result.var:.3f}")
print(f"Mean StdDev: {result.std:.3f}")
print(f"Mean Error: {result.err:.3f}%") | 5,353,719 |
def xml_timestamp(location='Europe/Prague'):
"""Method creates timestamp including time zone
Args:
location (str): time zone location
Returns:
str: timestamp
"""
return datetime.datetime.now(pytz.timezone(location)).isoformat() | 5,353,720 |
def create_frame_drop_test_launch(filename,camera0,camera1,display=True):
"""
Creates launch file for the blob_finder based frame drop test.
"""
template_name = 'frame_drop_test_launch.xml'
machine_file = mct_utilities.file_tools.machine_launch_file
image_corr_topic_list = mct_introspection.find_topics_w_ending('seq_and_image_corr')
camera_to_image_corr = {}
for topic in image_corr_topic_list:
for camera in (camera0, camera1):
if '{0}/'.format(camera) in topic:
camera_to_image_corr[camera] = topic
blob_finder_launch = []
for camera, topic in camera_to_image_corr.iteritems():
topic_split = topic.split('/')
machine = topic_split[1]
launch_item = (
'/frame_drop_test/{0}'.format(camera),
topic,
machine
)
blob_finder_launch.append(launch_item)
# Create xml launch file
jinja2_env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = jinja2_env.get_template(template_name)
xml_str = template.render(
machine_file=machine_file,
blob_finder_launch=blob_finder_launch,
display=display,
)
with open(filename,'w') as f:
f.write(xml_str) | 5,353,721 |
def transform_postorder(comp, func):
"""Traverses `comp` recursively postorder and replaces its constituents.
For each element of `comp` viewed as an expression tree, the transformation
`func` is applied first to building blocks it is parameterized by, then the
element itself. The transformation `func` should act as an identity function
on the kinds of elements (computation building blocks) it does not care to
transform. This corresponds to a post-order traversal of the expression tree,
i.e., parameters are alwaysd transformed left-to-right (in the order in which
they are listed in building block constructors), then the parent is visited
and transformed with the already-visited, and possibly transformed arguments
in place.
NOTE: In particular, in `Call(f,x)`, both `f` and `x` are arguments to `Call`.
Therefore, `f` is transformed into `f'`, next `x` into `x'` and finally,
`Call(f',x')` is transformed at the end.
Args:
comp: The computation to traverse and transform bottom-up.
func: The transformation to apply locally to each building block in `comp`.
It is a Python function that accepts a building block at input, and should
return either the same, or transformed building block at output. Both the
intput and output of `func` are instances of `ComputationBuildingBlock`.
Returns:
The result of applying `func` to parts of `comp` in a bottom-up fashion.
Raises:
TypeError: If the arguments are of the wrong computation_types.
NotImplementedError: If the argument is a kind of computation building block
that is currently not recognized.
"""
py_typecheck.check_type(comp,
computation_building_blocks.ComputationBuildingBlock)
if isinstance(
comp,
(computation_building_blocks.CompiledComputation,
computation_building_blocks.Data, computation_building_blocks.Intrinsic,
computation_building_blocks.Placement,
computation_building_blocks.Reference)):
return func(comp)
elif isinstance(comp, computation_building_blocks.Selection):
return func(
computation_building_blocks.Selection(
transform_postorder(comp.source, func), comp.name, comp.index))
elif isinstance(comp, computation_building_blocks.Tuple):
return func(
computation_building_blocks.Tuple([(k, transform_postorder(
v, func)) for k, v in anonymous_tuple.to_elements(comp)]))
elif isinstance(comp, computation_building_blocks.Call):
transformed_func = transform_postorder(comp.function, func)
if comp.argument is not None:
transformed_arg = transform_postorder(comp.argument, func)
else:
transformed_arg = None
return func(
computation_building_blocks.Call(transformed_func, transformed_arg))
elif isinstance(comp, computation_building_blocks.Lambda):
transformed_result = transform_postorder(comp.result, func)
return func(
computation_building_blocks.Lambda(
comp.parameter_name, comp.parameter_type, transformed_result))
elif isinstance(comp, computation_building_blocks.Block):
return func(
computation_building_blocks.Block(
[(k, transform_postorder(v, func)) for k, v in comp.locals],
transform_postorder(comp.result, func)))
else:
raise NotImplementedError(
'Unrecognized computation building block: {}'.format(str(comp))) | 5,353,722 |
def bytes_to_unicode_records(byte_string, delimiter, encoding):
""" Convert a byte string to a tuple containing an array of unicode
records and any remainder to be used as a prefix next time. """
string = byte_string.decode(encoding)
records = string.split(delimiter)
return (records[:-1], records[-1].encode(encoding)) | 5,353,723 |
def administrator():
"""Returns a :class:`t_system.administration.Administrator` instance."""
return Administrator() | 5,353,724 |
def ocr(path, lang='eng'):
"""Optical Character Recognition function.
Parameters
----------
path : str
Image path.
lang : str, optional
Decoding language. Default english.
Returns
-------
"""
image = Image.open(path)
vectorized_image = numpy.asarray(image).astype(numpy.uint8)
vectorized_image[:, :, 0] = 0
vectorized_image[:, :, 2] = 0
im = cv2.cvtColor(vectorized_image, cv2.COLOR_RGB2GRAY)
return pytesseract.image_to_string(
Image.fromarray(im),
lang=lang
)[:5] | 5,353,725 |
def get_rpd_vars():
"""Read RPD variables set by calling and parsing output from init
"""
cmd = get_init_call()
cmd = ' '.join(cmd) + ' && set | grep "^RPD_"'
try:
res = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
logger.fatal("Couldn't call init %s. Result was: %s", cmd, res)
raise
rpd_vars = dict()
for line in res.decode().splitlines():
if line.startswith('RPD_') and '=' in line:
#line = line.replace("export ", "")
#line = ''.join([c for c in line if c not in '";\''])
#logger.debug("line = {}".format(line))
k, v = line.split('=')
rpd_vars[k.strip()] = v.strip()
return rpd_vars | 5,353,726 |
def which(filename):
"""
Emulates the UNIX `which` command in Python.
Raises an IOError if no result is found.
"""
locations = os.environ.get("PATH").split(os.pathsep)
candidates = []
for location in locations:
candidate = os.path.join(location, filename)
if os.path.isfile(candidate) or os.path.islink(candidate):
candidates.append(candidate)
if len(candidates) == 0:
raise IOError("Could not find '{0}' in PATH".format(filename))
return candidates[0] | 5,353,727 |
def _GetAllHypervisorParameters(cluster, instances):
"""Compute the set of all hypervisor parameters.
@type cluster: L{objects.Cluster}
@param cluster: the cluster object
@param instances: list of L{objects.Instance}
@param instances: additional instances from which to obtain parameters
@rtype: list of (origin, hypervisor, parameters)
@return: a list with all parameters found, indicating the hypervisor they
apply to, and the origin (can be "cluster", "os X", or "instance Y")
"""
hvp_data = []
for hv_name in cluster.enabled_hypervisors:
hvp_data.append(("cluster", hv_name, cluster.GetHVDefaults(hv_name)))
for os_name, os_hvp in cluster.os_hvp.items():
for hv_name, hv_params in os_hvp.items():
if hv_params:
full_params = cluster.GetHVDefaults(hv_name, os_name=os_name)
hvp_data.append(("os %s" % os_name, hv_name, full_params))
# TODO: collapse identical parameter values in a single one
for instance in instances:
if instance.hvparams:
hvp_data.append(("instance %s" % instance.name, instance.hypervisor,
cluster.FillHV(instance)))
return hvp_data | 5,353,728 |
def is_solution(system, point):
"""
Checks whether the point is the solution for a given constraints system.
"""
a = np.array(system)
# get the left part
left = a[:, :-1] * point
left = sum(left.T)
# get the right part
right = (-1) * a[:, -1]
return np.all(left <= right) | 5,353,729 |
def git_config_bool(option: str) -> bool:
"""
Return a boolean git config value, defaulting to False.
"""
return git_config(option) == "true" | 5,353,730 |
def read_configs(paths):
"""
Read yaml files and merged dict.
"""
eths = dict()
vlans = dict()
bonds = dict()
for path in paths:
cfg = read_config(path)
ifaces = cfg.get("network", dict())
if "ethernets" in ifaces:
eths.update(ifaces["ethernets"])
if "vlans" in ifaces:
vlans.update(ifaces["vlans"])
if "bonds" in ifaces:
bonds.update(ifaces["bonds"])
return dict(
ethernets=eths,
vlans=vlans,
bonds=bonds
) | 5,353,731 |
def get_client():
""" generates API client with personalized API key """
with open("api_key.json") as json_file:
apikey_data = json.load(json_file)
api_key = apikey_data['perspective_key']
# Generates API client object dynamically based on service name and version.
perspective = discovery.build('commentanalyzer', 'v1alpha1',
developerKey=api_key)
dlp = discovery.build('dlp', 'v2', developerKey=api_key)
return (apikey_data, perspective, dlp) | 5,353,732 |
def tables(
path,
fts4,
fts5,
counts,
nl,
arrays,
csv,
no_headers,
table,
fmt,
json_cols,
columns,
schema,
views=False,
):
"""List the tables in the database"""
db = sqlite_utils.Database(path)
headers = ["view" if views else "table"]
if counts:
headers.append("count")
if columns:
headers.append("columns")
if schema:
headers.append("schema")
def _iter():
if views:
items = db.view_names()
else:
items = db.table_names(fts4=fts4, fts5=fts5)
for name in items:
row = [name]
if counts:
row.append(db[name].count)
if columns:
cols = [c.name for c in db[name].columns]
if csv:
row.append("\n".join(cols))
else:
row.append(cols)
if schema:
row.append(db[name].schema)
yield row
if table:
print(tabulate.tabulate(_iter(), headers=headers, tablefmt=fmt))
elif csv:
writer = csv_std.writer(sys.stdout)
if not no_headers:
writer.writerow(headers)
for row in _iter():
writer.writerow(row)
else:
for line in output_rows(_iter(), headers, nl, arrays, json_cols):
click.echo(line) | 5,353,733 |
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
settings['route_patterns'] = {
'villages': '/geography.cfm',
'parameters': '/thesaurus.cfm',
'sources': '/bibliography.cfm',
'languages': '/languages.cfm',
'florafauna': '/florafauna.cfm',
'bangime': '/bangime.cfm',
'file': r'/_files/{id:[^/\.]+}',
'file_alt': r'/_files/{id:[^/\.]+}.{ext}',
}
config = Configurator(settings=settings)
config.include('clldmpg')
config.register_menu(
('dataset', partial(menu_item, 'dataset', label='Home')),
('languages', partial(menu_item, 'languages')),
('values', partial(menu_item, 'values', label='Lexicon')),
('parameters', partial(menu_item, 'parameters', label='Thesaurus')),
('villages', partial(menu_item, 'villages', label='Villages')),
('florafauna', partial(menu_item, 'florafauna', label='Flora-Fauna')),
#('contributors', partial(menu_item, 'contributors', label='Project members')),
('sources', partial(menu_item, 'sources', label='Materials')),
#('bangime', partial(menu_item, 'bangime', label='Bangime')),
#('other', partial(menu_item, 'other', label='Other Languages')),
('movies', partial(menu_item, 'movies', label='Videos')),
)
home_comp = config.registry.settings['home_comp']
home_comp = [
'bangime', 'other',
'contributors'] + home_comp
config.add_settings({'home_comp': home_comp})
config.register_resource('village', models.Village, IVillage, with_index=True)
config.register_resource('movie', models.Movie, IMovie, with_index=True)
config.register_resource('file', models.File, IFile, with_index=True)
config.registry.registerUtility(CustomFactoryQuery(), ICtxFactoryQuery)
config.add_page('bangime')
config.add_page('florafauna')
config.add_page('other')
config.add_page('typology')
return config.make_wsgi_app() | 5,353,734 |
def overrides(pattern, norminput):
"""Split a date subfield into beginning date and ending date. Needed for fields with
multiple hyphens.
Args:
pattern: date pattern
norminput: normalized date string
Returns:
start date portion of pattern
start date portion of norminput
end date portion of pattern
end date portion of norminput
"""
if pattern == 'NNNN-NNNN-':
return pattern[:4], pattern[5:9], norminput[:4], norminput[5:9]
if pattern == 'NNNN?-NNNN? av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NN---NNNN':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-NNNN av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN--':
return pattern[:4], None, norminput[:4], None
if pattern == 'NNNN-NN--':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'f. NNNN-NN-NN':
return pattern, None, norminput, None
if pattern == 'NNNN?-NNNN av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NN-NN-NNNN':
return pattern, None, norminput, None
if pattern == '-NNNN-':
return None, pattern[:-1], None, norminput[:-1]
if pattern == 'NNNN--NNNN':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'NNNN-NN--?':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNNNNNN':
return pattern, None, norminput, None
if pattern == 'NN..-NNNN av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-NNN-':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'fl. NNNN-NNN-':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN av. j.-c.-NNNN':
return pattern[:-5], pattern[-4:], norminput[:-5], norminput[-4:]
if pattern == 'NNNN-NN-NN-':
return pattern[:-1], None, norminput[:-1], None
if pattern == 'NN-- -NNNN':
return pattern[:4], pattern[-4:], norminput[:4], norminput[-4:]
if pattern == 'NNNN-NN-NN':
return pattern, None, norminput, None
if pattern == 'NN..-NNNN? av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN--...':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'fl. NNN--NNNN':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'fl. NN---NNNN':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'NN---NNNN?':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'fl. NNN--NNN-':
return pattern[:8], pattern[-4:], norminput[:8], norminput[-4:]
if pattern == 'NN..-NN.. av. j.-c.':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NN--':
return pattern, None, norminput, None
if pattern == 'fl. NN--':
return pattern, None, norminput, None
if pattern == 'NN..?-NN..? av. j.-c.':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NNN-NNN av. j.-c.':
return pattern[:3], pattern[4:], norminput[:3], norminput[4:]
if pattern == 'NN---NN--':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNN--NNN-':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NN-..-NN..':
return pattern[:2]+pattern[3:5], pattern[6:], norminput[:2]+norminput[3:5], norminput[6:]
if pattern == 'NN---':
return pattern[:-1], None, norminput[:-1], None
if pattern == 'NNNN?-NNNN?':
return pattern[:5], pattern[6:], norminput[:5], norminput[6:]
if pattern == 'NNNN-NN-NN-NNNN-NN-NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-NN':
return pattern, None, norminput, None
if pattern == 'NNNN-N-N':
return pattern, None, norminput, None
if pattern == 'NNNN-NNNN-NN-NN':
return pattern[:4], pattern[6:], norminput[:4], norminput[6:]
if pattern == 'NNNN-N-NN-NNNN-N-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-NN-NNNN-N-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-N-N-NNNN-N-NN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-N-NN-NNNN-NN-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-NNNN-N-NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'month NN NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NN month NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-N-N-NNNN-N-N':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == '-NNNN-NN-NN':
return None, pattern[1:], None, norminput[1:]
if pattern == 'NNNN-NN-NN-month NN NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-NNNN-NN-NN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NN-NN-NNNN-N-N':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-NN month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-N-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-N-NN-NNNN-NN-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'month N NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-N-N-month NN NNNN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NN-NN-month N NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-N month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-NN-NN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'N month NNNN-NNNN-NN-NN':
p = pattern.split('-', 1)
n = norminput.split('-', 1)
return p[0], p[1], n[0], n[1]
if pattern == 'NNNN-NN-NN-NNNN-NN-N':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-NN-NNNN/NN/NN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-NNNN-NN-N':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-N-NN-NNNN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN-month NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-NN-N-NNNN-N-N':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'NNNN-NN-NN}}':
return pattern, None, norminput, None
if pattern == 'NN-NN-NNNN-NN-NN-NNNN':
return pattern[:10], pattern[11:], norminput[:10], norminput[11:]
if pattern == 'NNNN-N-N-month N NNNN':
return pattern[:8], pattern[9:], norminput[:8], norminput[9:]
if pattern == 'NNNN-NNNN-N-NN':
return pattern[:4], pattern[5:], norminput[:4], norminput[5:]
if pattern == 'NNNN-N-NN-month NNNN':
return pattern[:9], pattern[10:], norminput[:9], norminput[10:]
if pattern == 'c. NNNN-NNNN-NN-NN':
return pattern[:7], pattern[8:], norminput[:7], norminput[8:]
if pattern == 'NNNN-N-N-NNNN':
pattern[:4], pattern[5:], norminput[:4], norminput[5:]
return None | 5,353,735 |
def defaultPolynomialLoad():
"""
pytest fixture that returns a default PolynomialLoad object
:return: PolynomialLoad object initialized with default values
"""
return PolynomialStaticLoad() | 5,353,736 |
def calc_pi(iteration_count, cores_usage):
"""
We calculate pi using Ulam's Monte Carlo method. See the module
documentation. The calculated value of pi is returned.
We use a process pool to offer the option of spreading the
calculation across more then one core.
iteration_count is the number of iterations that are run.
cores_usage is the number of processes to use.
"""
# We're using a multiprocessing pool here, to take advantage of
# multi-core CPUs.
# Calculate stuff for the pool.
pool_size = cores_usage
iterations_per_process = iteration_count // pool_size
work_list = [iterations_per_process] * pool_size
work_list[0] += iteration_count % pool_size
# Set up the pool.
calc_pool = mp.Pool(pool_size)
# Use the pool to obtain random points in the unit circle.
# We'll let the system determine the chunk size.
in_circle_total = sum(calc_pool.map(
count_is_in_cirle,
work_list))
# Finish the calculation. in_circle_total, divided by the total
# number of iterations, is the area of the unit circle
# relative to the [-1, 1] square. Multiply by 4, which is the area
# of the [-1, 1] square, to get the area of the unit circle.
# .NOTE. If you modify this program to run in Python 2.7, remember
# to modify this calculation to use floating point division (or
# import division from future).
return 4 * in_circle_total / iteration_count | 5,353,737 |
def send_postatus_errors():
"""Looks at postatus file and sends an email with errors"""
# Gah! Don't do this on stage!
if settings.STAGE:
return
def new_section(line):
return (line.startswith('dennis ')
or line.startswith('Totals')
or line.startswith('BUSTED')
or line.startswith('COMPILED'))
# Download the postatus file
postatus = requests.get('https://support.mozilla.org/media/postatus.txt')
# Parse it to see which locales have issues
lines = postatus.content.splitlines()
datestamp = lines.pop(0)
errordata = []
while lines:
line = lines.pop(0)
if line.startswith('>>> '):
while lines and not new_section(line):
errordata.append(line)
line = lines.pop(0)
# If we have errors to send, send them
if errordata:
mail_admins(
subject='[SUMO] postatus errors %s' % datestamp,
message=(
'These are the errors in the SUMO postatus file.\n' +
'See http://postatus.paas.allizom.org/p/SUMO for details\n' +
'and bug generation links.\n\n' +
'\n'.join(errordata)
)
) | 5,353,738 |
def pair_range_from_to(x): # cpdef pair_range(np.ndarray[long,ndim=1] x):
"""
Returns a list of half-cycle-amplitudes
x: Peak-Trough sequence (integer list of local minima and maxima)
This routine is implemented according to
"Recommended Practices for Wind Turbine Testing - 3. Fatigue Loads", 2. edition 1990, Appendix A
except that a list of half-cycle-amplitudes are returned instead of a from_level-to_level-matrix
"""
x = x - np.min(x)
k = np.max(x)
n = x.shape[0]
S = np.zeros(n + 1)
A = np.zeros((k + 1, k + 1))
S[1] = x[0]
ptr = 1
p = 1
q = 1
f = 0
# phase 1
while True:
p += 1
q += 1
# read
S[p] = x[ptr]
ptr += 1
if q == n:
f = 1
while p >= 4:
#print S[p - 3:p + 1]
#print S[p - 2], ">", S[p - 3], ", ", S[p - 1], ">=", S[p - 3], ", ", S[p], ">=", S[p - 2], (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2])
#print S[p - 2], "<", S[p - 3], ", ", S[p - 1], "<=", S[p - 3], ", ", S[p], "<=", S[p - 2], (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
#print (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or (S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2])
if (S[p - 2] > S[p - 3] and S[p - 1] >= S[p - 3] and S[p] >= S[p - 2]) or \
(S[p - 2] < S[p - 3] and S[p - 1] <= S[p - 3] and S[p] <= S[p - 2]):
A[S[p - 2], S[p - 1]] += 1
A[S[p - 1], S[p - 2]] += 1
S[p - 2] = S[p]
p -= 2
else:
break
if f == 1:
break # q==n
# phase 2
q = 0
while True:
q += 1
if p == q:
break
else:
#print S[q], "to", S[q + 1]
A[S[q], S[q + 1]] += 1
return A | 5,353,739 |
def serialize_action(
action: RetroReaction, molecule_store: MoleculeSerializer
) -> StrDict:
"""
Serialize a retrosynthesis action
:param action: the (re)action to serialize
:param molecule_store: the molecule serialization object
:return: the action as a dictionary
"""
dict_ = action.to_dict()
dict_["mol"] = molecule_store[dict_["mol"]]
dict_["class"] = f"{action.__class__.__module__}.{action.__class__.__name__}"
return dict_ | 5,353,740 |
def lint(where, strict):
"""Check style with linters"""
errors = []
try:
_lint_py(where, strict)
except CalledProcessError as e:
errors += [e]
try:
_eslint(where, strict)
except CalledProcessError as e:
errors += [e]
exit(len(errors)) | 5,353,741 |
def convert(obj, totype, debug=False, **kwargs):
"""Converto object obj to type totype.
The converter is chosen from gna.converters dictionary based on the type(obj) or one of it's base classes.
:obj: object to convert
:totype: the target type
Order:
1. Set type to type(obj).
2. Try to find converter for the current type. Return if found.
3. Try to find 'base' converter for the current type. Convert obj to base and return convert(newobj) if 'base' converter found.
4. Set type to next base type of obj. Repeat from 2.
Example:
convert( N.array([1, 2, 3]), R.vector('double') )
convert( N.array([1, 2, 3]), R.vector, dtype='double' )
"""
def msg( title, converter=None ):
res = title
if converter:
res+= ' '+converter.__name__
typestr = isinstance(totype, str) and totype or totype.__name__
res+=" to convert '{0}' ({1}) to '{2}'".format(
type(obj).__name__,
', '.join([base.__name__ for base in bases]),
typestr
)
if kwargs:
res+=' [kwargs: %s]'%( str( kwargs ) )
return res
bases = getmro(type(obj))
for base in bases:
bconverters = converters.get( base )
if not bconverters:
continue
converter = bconverters.get( totype )
if converter:
break
if 'base' in bconverters:
if debug:
print( 'Convert', type(obj).__name__, 'to base' )
return convert( bconverters['base'](obj), totype, debug, **kwargs )
else:
raise Exception(msg('Can not find converter'))
if debug:
print( msg( 'Using converter', converter ) )
return converter( obj, **kwargs ) | 5,353,742 |
def test_get_ctf_category():
"""test get_ctf_category function"""
assert get_ctf_category(rawsec_json) == ["binary_exploitation"] | 5,353,743 |
def complement(s):
"""
Return complement of 's'.
"""
c = string.translate(s, __complementTranslation)
return c | 5,353,744 |
def get_state_transitions(actions):
"""
get the next state
@param actions:
@return: tuple (current_state, action, nextstate)
"""
state_transition_pairs = []
for action in actions:
current_state = action[0]
id = action[1][0]
next_path = action[1][1]
next_state = copy.deepcopy(current_state)
if 'NoTrans' not in id:
# change the state
next_state[id] = next_path
state_transition_pairs.append((current_state, action[1], next_state))
return state_transition_pairs | 5,353,745 |
def add_label(
img: Image,
text: str,
x: int,
y: int,
ttf: str,
fontsize: float,
rgb: Tuple[int, int, int],
hjust: float = 0,
vjust: float = 0) -> None:
"""
Adds a label to an image.
"""
draw = ImageDraw.Draw(img)
font = get_font(ttf, fontsize)
w, h = draw.textsize(text, font)
x = justify_to_point(x, w, hjust)
y = justify_to_point(y, h, vjust)
draw.multiline_text((x, y), text, rgb, font=font,
align=get_align_from_hjust(hjust)) | 5,353,746 |
def test_list_unsigned_short_max_length_nistxml_sv_iv_list_unsigned_short_max_length_1_1(mode, save_output, output_format):
"""
Type list/unsignedShort is restricted by facet maxLength with value 5.
"""
assert_bindings(
schema="nistData/list/unsignedShort/Schema+Instance/NISTSchema-SV-IV-list-unsignedShort-maxLength-1.xsd",
instance="nistData/list/unsignedShort/Schema+Instance/NISTXML-SV-IV-list-unsignedShort-maxLength-1-1.xml",
class_name="NistschemaSvIvListUnsignedShortMaxLength1",
version="1.1",
mode=mode,
save_output=save_output,
output_format=output_format,
structure_style="filenames",
) | 5,353,747 |
def parse_pascal_string(characterset, data):
"""
Read a Pascal string from a byte array using the given character set.
:param characterset: Character set to use to decode the string
:param data: binary data
:return: tuple containing string and number of bytes consumed
"""
string_size_format, string_size_size, character_size = get_string_size_format(characterset)
if len(data) < string_size_size:
raise FileParseException("String size truncated")
string_size = struct.unpack("<" + string_size_format, data[0:string_size_size])[0] * character_size
string_data = data[string_size_size:string_size_size + string_size]
result = string_data.decode(CHARACTER_SETS[characterset])
total_size = string_size_size + string_size
return result, total_size | 5,353,748 |
def setup(bot: Neorg) -> None:
"""Add Cog to Bot."""
bot.add_cog(BotControl(bot)) | 5,353,749 |
def add_single_expense_cli():
"""Cli version add single expenses."""
add_single_expense(dev_db) | 5,353,750 |
def percent_cb(name, complete, total):
""" Callback for updating target progress """
Logger.debug('{}: {} transferred out of {}'.format(name,
sizeof_fmt(complete),
sizeof_fmt(total)))
progress.update_target(name, complete, total) | 5,353,751 |
def test_index_availability(client):
"""Index page must be present and HTTP Status Code have to be `200`"""
response = client.get('/')
assert response.status_code == 200 | 5,353,752 |
def folder(initial=None, title='Select Folder'):
"""Request to select an existing folder or to create a new folder.
Parameters
----------
initial : :class:`str`, optional
The initial directory to start in.
title : :class:`str`, optional
The text to display in the title bar of the dialog window.
Returns
-------
:class:`str`
The name of the selected folder or :obj:`None` if the user cancelled
the request to select a folder.
"""
app, title = _get_app_and_title(title)
name = QtWidgets.QFileDialog.getExistingDirectory(app.activeWindow(), title, initial)
return name if len(name) > 0 else None | 5,353,753 |
def build_clusters(
metadata: pd.DataFrame,
ipm_regions: Sequence[str],
min_capacity: float = None,
max_clusters: int = None,
) -> pd.DataFrame:
"""Build resource clusters."""
if max_clusters is None:
max_clusters = np.inf
if max_clusters < 1:
raise ValueError("Max number of clusters must be greater than zero")
df = metadata
cdf = _get_base_clusters(df, ipm_regions).sort_values("lcoe")
if cdf.empty:
raise ValueError(f"No resources found in {ipm_regions}")
if min_capacity:
# Drop clusters with highest LCOE until min_capacity reached
end = cdf["mw"].cumsum().searchsorted(min_capacity) + 1
if end > len(cdf):
raise ValueError(
f"Capacity in {ipm_regions} ({cdf['mw'].sum()} MW) less than minimum ({min_capacity} MW)"
)
cdf = cdf[:end]
# Track ids of base clusters through aggregation
cdf["ids"] = [[x] for x in cdf["id"]]
# Aggregate clusters within each metro area (metro_id)
while len(cdf) > max_clusters:
# Sort parents by lowest LCOE distance of children
diff = lambda x: abs(x.max() - x.min())
parents = (
cdf.groupby("parent_id", sort=False)
.agg(child_ids=("id", list), n=("id", "count"), lcoe=("lcoe", diff))
.sort_values(["n", "lcoe"], ascending=[False, True])
)
if parents.empty:
break
if parents["n"].iloc[0] == 2:
# Choose parent with lowest LCOE
best = parents.iloc[0]
# Compute parent
parent = pd.Series(
_merge_children(
cdf.loc[best["child_ids"]],
ids=_flat(*cdf.loc[best["child_ids"], "ids"]),
**df.loc[best.name],
)
)
# Add parent
cdf.loc[best.name] = parent
# Drop children
cdf.drop(best["child_ids"], inplace=True)
else:
# Promote child with deepest parent
parent_id = df.loc[parents.index, "cluster_level"].idxmax()
parent = df.loc[parent_id]
child_id = parents.loc[parent_id, "child_ids"][0]
# Update child
columns = ["id", "parent_id", "cluster_level"]
cdf.loc[child_id, columns] = parent[columns]
# Update index
cdf.rename(index={child_id: parent_id}, inplace=True)
# Keep only computed columns
columns = _flat(MEANS, SUMS, "ids")
columns = [col for col in columns if col in cdf.columns]
cdf = cdf[columns]
cdf.reset_index(inplace=True, drop=True)
if len(cdf) > max_clusters:
# Aggregate singleton metro area clusters
Z = scipy.cluster.hierarchy.linkage(cdf[["lcoe"]].values, method="ward")
# TODO: Store mask in temporary table
cdf["_keep"] = True
for child_idx in Z[:, 0:2].astype(int):
cdf.loc[child_idx, "_keep"] = False
parent = _merge_children(
cdf.loc[child_idx], _keep=True, ids=_flat(*cdf.loc[child_idx, "ids"])
)
cdf.loc[len(cdf)] = parent
if not cdf["_keep"].sum() > max_clusters:
break
cdf = cdf[cdf["_keep"]]
return cdf[columns] | 5,353,754 |
def plot_lift_cruise_network(results, line_color = 'bo-',line_color2 = 'r^-', save_figure = False, save_filename = "Lift_Cruise_Network", file_type = ".png"):
"""This plots the electronic and propulsor performance of a vehicle with a lift cruise network
Assumptions:
None
Source:
None
Inputs:
results.segments.conditions.propulsion
throttle
lift_rotor_throttle
battery_energy
battery_specfic_power
voltage_under_load
voltage_open_circuit
Outputs:
Plots
Properties Used:
N/A
"""
axis_font = {'size':'14'}
# ------------------------------------------------------------------
# Electronic Conditions
# ------------------------------------------------------------------
fig = plt.figure("Lift_Cruise_Battery_Pack_Conditions")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
eta = results.segments[i].conditions.propulsion.throttle[:,0]
eta_l = results.segments[i].conditions.propulsion.throttle_lift[:,0]
energy = results.segments[i].conditions.propulsion.battery_energy[:,0]/ Units.Wh
specific_power = results.segments[i].conditions.propulsion.battery_specfic_power[:,0]
volts = results.segments[i].conditions.propulsion.battery_voltage_under_load[:,0]
volts_oc = results.segments[i].conditions.propulsion.battery_voltage_open_circuit[:,0]
plt.title('Battery Pack Conditions')
axes = plt.subplot(2,2,1)
axes.set_ylabel('Throttle',axis_font)
set_axes(axes)
plt.ylim((0,1))
if i == 0:
axes.plot(time, eta, line_color,label='Propeller Motor')
axes.plot(time, eta_l, line_color2,label='Lift Rotor Motor')
axes.legend(loc='upper center')
else:
axes.plot(time, eta, line_color)
axes.plot(time, eta_l, line_color2)
axes = plt.subplot(2,2,2)
axes.plot(time, energy, line_color)
axes.set_ylabel('Battery Energy (W-hr)',axis_font)
set_axes(axes)
axes = plt.subplot(2,2,3)
axes.set_ylabel('Battery Voltage (Volts)',axis_font)
axes.set_xlabel('Time (mins)',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, volts, line_color,label='Under Load')
axes.plot(time,volts_oc, line_color2,label='Open Circuit')
axes.legend(loc='upper center')
else:
axes.plot(time, volts, line_color)
axes.plot(time,volts_oc,line_color2)
axes = plt.subplot(2,2,4)
axes.plot(time, specific_power, line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Specific Power',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Lift_Cruise_Battery_Pack_Conditions" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Prop-Rotor Network")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
prop_rpm = results.segments[i].conditions.propulsion.propeller_rpm[:,0]
prop_thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,0]
prop_torque = results.segments[i].conditions.propulsion.propeller_motor_torque[:,0]
prop_effp = results.segments[i].conditions.propulsion.propeller_efficiency[:,0]
prop_effm = results.segments[i].conditions.propulsion.propeller_motor_efficiency[:,0]
prop_Cp = results.segments[i].conditions.propulsion.propeller_power_coefficient[:,0]
lift_rotor_rpm = results.segments[i].conditions.propulsion.lift_rotor_rpm[:,0]
lift_rotor_thrust = -results.segments[i].conditions.frames.body.thrust_force_vector[:,2]
lift_rotor_torque = results.segments[i].conditions.propulsion.lift_rotor_motor_torque[:,0]
lift_rotor_effp = results.segments[i].conditions.propulsion.lift_rotor_efficiency[:,0]
lift_rotor_effm = results.segments[i].conditions.propulsion.lift_rotor_motor_efficiency[:,0]
lift_rotor_Cp = results.segments[i].conditions.propulsion.lift_rotor_power_coefficient[:,0]
# title
plt.title("Prop-Rotor Network")
# plots
axes = plt.subplot(2,3,1)
axes.plot(time, prop_rpm, line_color)
axes.plot(time, lift_rotor_rpm, line_color2)
axes.set_ylabel('RPM',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,2)
axes.plot(time, prop_thrust,line_color)
axes.plot(time, lift_rotor_thrust, line_color2)
axes.set_ylabel('Thrust (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,3)
axes.plot(time, prop_torque, line_color)
axes.plot(time, lift_rotor_torque, line_color2)
axes.set_ylabel('Torque (N-m)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,4)
axes.plot(time, prop_effp, line_color )
axes.plot(time, lift_rotor_effp, line_color2)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Propeller Efficiency, $\eta_{propeller}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,5)
axes.plot(time, prop_effm, line_color )
axes.plot(time, lift_rotor_effm,line_color2)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Motor Efficiency, $\eta_{motor}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,6)
axes.plot(time, prop_Cp,line_color )
axes.plot(time, lift_rotor_Cp, line_color2 )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Power Coefficient, $C_{P}$',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Propulsor_Network" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Lift_Rotor")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
rpm = results.segments[i].conditions.propulsion.lift_rotor_rpm [:,0]
thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,2]
torque = results.segments[i].conditions.propulsion.lift_rotor_motor_torque
effp = results.segments[i].conditions.propulsion.lift_rotor_efficiency[:,0]
effm = results.segments[i].conditions.propulsion.lift_rotor_motor_efficiency[:,0]
Cp = results.segments[i].conditions.propulsion.lift_rotor_power_coefficient[:,0]
# title
plt.title("Lift Rotor")
# plots
axes = plt.subplot(2,3,1)
axes.plot(time, rpm, line_color2)
axes.set_ylabel('RPM',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,2)
axes.plot(time, -thrust, line_color2)
axes.set_ylabel('Thrust (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,3)
axes.plot(time, torque, line_color2 )
axes.set_ylabel('Torque (N-m)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,4)
axes.plot(time, effp, line_color2,label= r'$\eta_{lift rotor}$' )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Propeller Efficiency, $\eta_{lift rotor}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,5)
axes.plot(time, effm, line_color2 )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Motor Efficiency, $\eta_{mot}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,6)
axes.plot(time, Cp , line_color2 )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Power Coefficient, $C_{P}$',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Lift_Rotor" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Propeller")
fig.set_size_inches(16, 8)
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
rpm = results.segments[i].conditions.propulsion.propeller_rpm [:,0]
thrust = results.segments[i].conditions.frames.body.thrust_force_vector[:,0]
torque = results.segments[i].conditions.propulsion.propeller_motor_torque[:,0]
effp = results.segments[i].conditions.propulsion.propeller_efficiency[:,0]
effm = results.segments[i].conditions.propulsion.propeller_motor_efficiency[:,0]
Cp = results.segments[i].conditions.propulsion.propeller_power_coefficient[:,0]
# title
plt.title("Propeller")
# plots
axes = plt.subplot(2,3,1)
axes.plot(time, rpm,line_color)
axes.set_ylabel('RPM')
set_axes(axes)
axes = plt.subplot(2,3,2)
axes.plot(time, thrust,line_color)
axes.set_ylabel('Thrust (N)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,3)
axes.plot(time, torque, line_color)
axes.set_ylabel('Torque (N-m)',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,4)
axes.plot(time, effp,line_color)
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Propeller Efficiency $\eta_{propeller}$',axis_font)
set_axes(axes)
plt.ylim((0,1))
axes = plt.subplot(2,3,5)
axes.plot(time, effm,line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel(r'Motor Efficiency $\eta_{motor}$',axis_font)
set_axes(axes)
axes = plt.subplot(2,3,6)
axes.plot(time, Cp, line_color )
axes.set_xlabel('Time (mins)',axis_font)
axes.set_ylabel('Power Coefficient',axis_font)
set_axes(axes)
plt.tight_layout()
if save_figure:
plt.savefig("Cruise_Propulsor" + file_type)
# ------------------------------------------------------------------
# Propulsion Conditions
# ------------------------------------------------------------------
fig = plt.figure("Tip_Mach")
for i in range(len(results.segments)):
time = results.segments[i].conditions.frames.inertial.time[:,0] / Units.min
rtm = results.segments[i].conditions.propulsion.lift_rotor_tip_mach[:,0]
ptm = results.segments[i].conditions.propulsion.propeller_tip_mach[:,0]
# title
plt.title("Tip Mach Number")
# plots
axes = plt.subplot(1,1,1)
axes.set_ylabel('Mach',axis_font)
set_axes(axes)
if i == 0:
axes.plot(time, ptm, line_color,label='Propeller')
axes.plot(time, rtm, line_color2,label='Lift Rotor')
axes.legend(loc='upper center')
else:
axes.plot(time, ptm, line_color )
axes.plot(time, rtm, line_color2 )
plt.tight_layout()
if save_figure:
plt.savefig("Tip_Mach" + file_type)
return | 5,353,755 |
def add_asset(zs_code, code, name, category):
"""添加资产品种到数据库"""
_, created = Asset.get_or_create(
zs_code=zs_code,
code=code,
name=name,
category=category,
)
if created:
LOGGER.info('created asset in database successfully')
else:
LOGGER.warning('asset is already in database') | 5,353,756 |
def tqdm_hook(t: tqdm) -> Any:
"""Progressbar to visualisation downloading progress."""
last_b = [0]
def update_to(b: int = 1, bsize: int = 1, t_size: Optional[int] = None) -> None:
if t_size is not None:
t.total = t_size
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return update_to | 5,353,757 |
def fix_mapping_versions():
"""
This function monkey patches the mapping version information in arcpy to support the currently installed version,
along with past versions if they are not included (arcpy 10.5 does not have 10.4 supported version, but the
support is there under the hood).
"""
# get ArcGIS version as a number
ags_version = Decimal(re.search(r"^(\d+\.\d+)", arcpy.GetInstallInfo()['Version'], re.IGNORECASE).group(1))
# surrounded in try/pass to fail gracefully in case Esri change the design of this internal API
try:
versions = arcpy._mapping.constants.__args__["version"]
if ags_version >= Decimal("10.4") and "10.4" not in versions:
versions["10.4"] = 104
if ags_version >= Decimal("10.5") and "10.5" not in versions:
versions["10.5"] = 105
if ags_version >= Decimal("10.6") and "10.6" not in versions:
versions["10.6"] = 106
except:
pass | 5,353,758 |
def check_regular_timestamps(
time_series: TimeSeries, time_tolerance_decimals: int = 9, gb_severity_threshold: float = 1.0
):
"""If the TimeSeries uses timestamps, check if they are regular (i.e., they have a constant rate)."""
if (
time_series.timestamps is not None
and len(time_series.timestamps) > 2
and check_regular_series(series=time_series.timestamps, tolerance_decimals=time_tolerance_decimals)
):
timestamps = np.array(time_series.timestamps)
if timestamps.size * timestamps.dtype.itemsize > gb_severity_threshold * 1e9:
severity = Severity.HIGH
else:
severity = Severity.LOW
return InspectorMessage(
severity=severity,
message=(
"TimeSeries appears to have a constant sampling rate. "
f"Consider specifying starting_time={time_series.timestamps[0]} "
f"and rate={time_series.timestamps[1] - time_series.timestamps[0]} instead of timestamps."
),
) | 5,353,759 |
def test_device_bypass(monkeypatch):
"""Test setting the bypass status of a device."""
_was_called = False
def _call_bypass(url, body, **kwargs):
nonlocal _was_called
assert url == "/appservices/v6/orgs/Z100/device_actions"
assert body == {"action_type": "BYPASS", "device_id": [6023], "options": {"toggle": "OFF"}}
_was_called = True
return StubResponse(None, 204)
api = call_cbcloud_api()
patch_cbc_sdk_api(monkeypatch, api, POST=_call_bypass)
api.device_bypass([6023], False)
assert _was_called | 5,353,760 |
def test_run3(client):
"""
Tests that a docker executor raises RuntimeError if no
docker_host and docker_port available in execution
"""
with client.application.app_context():
task, job, execution = JobExecutionFixture.new_defaults()
_, pool_mock, _ = PoolFixture.new_defaults(r"test-.+")
exe = Executor(app=client.application, pool=pool_mock)
del execution.metadata["docker_host"]
del execution.metadata["docker_port"]
msg = "Can't run job without docker_host and docker_port in execution metadata."
with expect.error_to_happen(RuntimeError, message=msg):
exe.run(
task,
job,
execution,
"mock-image",
"latest",
"command",
blacklisted_hosts=set(),
)
expect(execution.metadata).not_to_include("docker_host")
expect(execution.metadata).not_to_include("docker_port") | 5,353,761 |
def import_config_module( cfg_file ):
""" Returns valid imported config module.
"""
cfg_file = re.sub( r'\.py$', '', cfg_file )
cfg_file = re.sub( r'-', '_', cfg_file )
mod_name = 'config.' + cfg_file
cfg_mod = importlib.import_module( mod_name )
if not hasattr( cfg_mod, 'pre_start_config' ):
raise ImportError( 'Config file must define \'pre_start_config\' method' )
if not hasattr( cfg_mod, 'post_start_config' ):
raise ImportError( 'Config file must define \'post_start_config\' method' )
return cfg_mod | 5,353,762 |
def process_batches(args, batches):
"""Runs a set of batches, and merges the resulting output files if more
than one batch is included.
"""
nbatches = min(args.nbatches, len(batches))
pool = multiprocessing.Pool(nbatches, init_worker_thread)
try:
batches = pool.imap(run_batch, batches, 1)
if not merge_batch_results(batches):
pool.terminate()
pool.join()
return 1
pool.close()
pool.join()
return 0
except:
pool.terminate()
pool.join()
raise | 5,353,763 |
def test_vcfield_pk_lookups_work(modelname, name, number, parent_int,
parent_str, expected_pk, testmodels,
make_instance, noise_data):
"""
When a VirtualCompField is a PK on a model, lookups that use `pk`
as a field (instead of the field name) should work.
"""
tmodel = testmodels[modelname]
test_inst = make_instance(modelname, name, number, parent_int, parent_str)
noise = [make_instance(modelname, *f) for f in noise_data(5)]
assert len(tmodel.objects.all()) == 6
assert tmodel.objects.get(pk=expected_pk) == test_inst
assert tmodel.objects.filter(pk=expected_pk)[0] == test_inst | 5,353,764 |
def tripledes_cbc_pkcs5_decrypt(key, data, iv):
"""
Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant
(16 or 24 byte long key) and PKCS#5 padding.
:param key:
The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode)
:param data:
The ciphertext - a byte string
:param iv:
The initialization vector - a byte string 8-bytes long
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
OSError - when an error is returned by OpenSSL
:return:
A byte string of the plaintext
"""
if len(key) != 16 and len(key) != 24:
raise ValueError(pretty_message(
'''
key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s
''',
len(key)
))
if len(iv) != 8:
raise ValueError(pretty_message(
'''
iv must be 8 bytes long - is %s
''',
len(iv)
))
ctx = triple_des(key, mode=DES_CBC, IV=iv, padmode=DES_PAD_PKCS5)
return ctx.decrypt(data) | 5,353,765 |
def kruskal_chi2_test(data=None, alpha=0.05, precision=4):
"""
col = 要比較的 target
row = data for each target
"""
if type(data) == pd.DataFrame:
data = data.copy().to_numpy()
alldata = np.concatenate(data.copy())
else:
alldata = np.concatenate(data.copy())
k = data.shape[1]
alldata.sort()
tmp_df = pd.DataFrame(({'value': alldata}))
tmp_df['rank'] = tmp_df.index + 1 # rank
value_to_rank = tmp_df.groupby('value').mean().reset_index()
T = []
sample_rank_df = []
for i in range(k):
samp = pd.DataFrame(
{'value': data[:, i][~np.isnan(data[:, i])]})
samp = pd.merge(samp, value_to_rank)
sample_rank_df.append(samp)
T.append(samp['rank'].sum())
n = [len(data[:, i][~np.isnan(data[:, i])]) for i in range(k)]
# print(T)
# print(n)
rule_of_five_str = ""
if (np.sum(np.array(n) < 5) > 0):
rule_of_five_str += "!(At least one sample size is less than 5)"
else:
rule_of_five_str += "(All sample size >= 5)"
N = np.sum(n)
t_over_n = 0
for i in range(k):
t_over_n += T[i] ** 2 / n[i]
H = 12 / N / (N + 1) * t_over_n - 3 * (N + 1)
p_value = 1 - stats.chi2.cdf(H, k - 1)
chi2_stat = stats.chi2.ppf(1 - alpha, k - 1)
result_dict = {'H': H, 'p-value': p_value,
'T': T, 'sample_rank_df': sample_rank_df}
flag = p_value < alpha
result = f'''======= Kruskal-Wallis Test with Chi-squared Test =======
{rule_of_five_str}
H statistic value (observed) = {H:.{precision}f}
chi2 critical value = {chi2_stat:.{precision}f}
p-value = {p_value:.{precision}f} ({inter_p_value(p_value)})
Reject H_0 (Not all {k} population locations are the same) → {flag}
'''
print(result)
return result_dict | 5,353,766 |
def add_new_action(action, object_types, preferred, analyst):
"""
Add a new action to CRITs.
:param action: The action to add to CRITs.
:type action: str
:param object_types: The TLOs this is for.
:type object_types: list
:param preferred: The TLOs this is preferred for.
:type preferred: list
:param analyst: The user adding this action.
:returns: True, False
"""
action = action.strip()
idb_action = Action.objects(name=action).first()
if not idb_action:
idb_action = Action()
idb_action.name = action
idb_action.object_types = object_types
idb_action.preferred = []
prefs = preferred.split('\n')
for pref in prefs:
cols = pref.split(',')
if len(cols) != 3:
continue
epa = EmbeddedPreferredAction()
epa.object_type = cols[0].strip()
epa.object_field = cols[1].strip()
epa.object_value = cols[2].strip()
idb_action.preferred.append(epa)
try:
idb_action.save(username=analyst)
except ValidationError:
return False
return True | 5,353,767 |
def plot_history(H, epochs, output_path):
"""
Utility function for plotting model history using matplotlib.
This method was developed for use in class and adjusted for this project.
"""
plt.style.use("fivethirtyeight")
plt.figure()
plt.plot(np.arange(0, epochs), H.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), H.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), H.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), H.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend()
plt.tight_layout()
plt.savefig(output_path) | 5,353,768 |
def dissociate_scaling_group(scaling_group, domain):
"""
Dissociate a domain from a scaling_group.
\b
SCALING_GROUP: The name of a scaling group.
DOMAIN: The name of a domain.
"""
with Session() as session:
try:
data = session.ScalingGroup.dissociate_domain(scaling_group, domain)
except Exception as e:
print_error(e)
sys.exit(1)
if not data['ok']:
print_fail('Dissociating scaling group from domain failed: {0}'.format(data['msg']))
sys.exit(1)
print_done('Scaling group {} is dissociated from domain {}.'.format(scaling_group, domain)) | 5,353,769 |
def test_PipeJsonRpcSendAsync_5():
"""
Specia test case.
Two messages: the first message times out, the second message is send before the response
from the first message is received. Verify that the result returned in response to the
second message is received. (We discard the result of the message that is timed out.)
"""
def method_handler1():
ttime.sleep(0.7)
return 39
def method_handler2():
ttime.sleep(0.2)
return 56
conn1, conn2 = multiprocessing.Pipe()
pc = PipeJsonRpcReceive(conn=conn2, name="comm-server")
pc.add_method(method_handler1, "method1")
pc.add_method(method_handler2, "method2")
pc.start()
async def send_messages():
p_send = PipeJsonRpcSendAsync(conn=conn1, name="comm-client")
p_send.start()
# Submit multiple messages at once. Messages should stay at the event loop
# and be processed one by one.
with pytest.raises(CommTimeoutError):
await p_send.send_msg("method1", timeout=0.5)
result = await p_send.send_msg("method2", timeout=0.5)
assert result == 56, "Incorrect result received"
p_send.stop()
asyncio.run(send_messages())
pc.stop() | 5,353,770 |
def RACEDataset(race_type):
"""
Loads a RACE dataset given the type (see the RACEType enum).
Any error during reading will generate an exception.
Returns a Pandas DataFrame with 5 columns:
* 'article': string
* 'question': string
* 'answers': list[string], length = 4
* 'correct': oneof('A', 'B', 'C', D')
* 'id': string
The returned IDs are unique and have this format: `index`-`passage_id`.
Examples: 1-middle1548.txt, 2-middle1548.txt, etc. The `passage_id` is
frequently the name of the file. All the questions related to the same
passage are grouped in the same file in the RACE dataset (convention).
Because in each RACE file there are multiple questions, the counter is
necessary in order to guarantee that IDs are unique (the file name is
not sufficient). We translate the `passage_id` into the `question_id`
using the per-passage-question counter.
"""
assert(isinstance(race_type, RACEType))
download_dataset(Collection.RACE, check_shallow_integrity)
dirpath = type_to_data_directory(race_type)
all_data = []
q_ids = {}
for path in os.listdir(dirpath):
assert(os.path.isfile(os.path.join(dirpath, path)))
with open(os.path.join(dirpath, path), 'rt') as f:
entry = json.load(f)
"""
Each passage is a JSON file. The JSON file contains these fields:
1. article: A string, which is the passage.
2. questions: A string list. Each string is a query. We have two
types of questions. First one is an interrogative
sentence. Another one has a placeholder, which is
represented by _.
3. options: A list of the options list. Each options list contains
4 strings, which are the candidate option.
4. answers: A list contains the golden label of each query.
5. id: Each passage has an id in this dataset. Note: the ids are
not unique in the question set! Questions in the same file
have the same id (the name of the file). This id is more of
a passage id than a question id.
"""
assert(len(entry) == 5)
assert(set(entry.keys()) == {
"article",
"questions",
"options",
"answers",
"id"
})
article = entry["article"]
questions = entry["questions"]
options = entry["options"]
answers = entry["answers"]
q_id = entry["id"]
assert(isinstance(article, string_types))
assert(isinstance(questions, list))
assert(isinstance(options, list))
assert(isinstance(answers, list))
assert(isinstance(q_id, string_types))
assert(len(questions) == len(options))
assert(len(questions) == len(answers))
for question, option, answer in zip(questions, options, answers):
assert(isinstance(question, string_types))
assert(isinstance(option, list) and len(option) == 4)
assert(isinstance(answer, string_types))
assert(answer in ["A", "B", "C", "D"])
all_data.append({
'article': article,
'question': question,
'answers': option,
'correct': answer,
'id': next_question_id(q_ids, q_id)
})
df = pd.DataFrame(all_data)
return df | 5,353,771 |
def tpc(fastas, **kw):
"""
Function to generate tpc encoding for protein sequences
:param fastas:
:param kw:
:return:
"""
AA = kw['order'] if kw['order'] is not None else 'ACDEFGHIKLMNPQRSTVWY'
encodings = []
triPeptides = [aa1 + aa2 + aa3 for aa1 in AA for aa2 in AA for aa3 in AA]
AADict = {}
for i in range(len(AA)):
AADict[AA[i]] = i
for i in fastas:
name, sequence = i[0], re.sub('-', '', i[1])
code = [name]
tmpCode = [0] * 8000
for j in range(len(sequence) - 3 + 1):
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] = \
tmpCode[AADict[sequence[j]] * 400 + AADict[sequence[j + 1]] * 20 + AADict[sequence[j + 2]]] + 1
if sum(tmpCode) != 0:
tmpCode = [i / sum(tmpCode) for i in tmpCode]
code = code + tmpCode
encodings.append(code)
return encodings | 5,353,772 |
def cli():
"""cli entry point"""
parser = argparse.ArgumentParser("Welcome to SigSticker, providing all of your sticker needs")
parser.add_argument(
"-p",
"--pack",
help="Pass in a pack url inline",
nargs="+",
action="append",
)
args = parser.parse_args()
# Get the packs
packs = sum(args.pack, [])
if packs is None:
packs = []
while True:
name = input("Enter sticker_set url (leave blank to stop): ").strip()
if name == "":
break
packs.append(name)
for pack in packs:
packAttrs = pack.split("#pack_id=")[-1].split("&pack_key=")
asyncio.run(convertPack(*asyncio.run(downloadPack(*packAttrs)))) | 5,353,773 |
def _uninstall_flocker_centos7():
"""
Return an ``Effect`` for uninstalling the Flocker package from a CentOS 7
machine.
"""
return sequence([
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-python-flocker",
]),
run_from_args([
b"yum", b"erase", b"-y", b"clusterhq-release",
]),
]) | 5,353,774 |
def set_se_loop(context: X12ParserContext, segment_data: Dict) -> None:
"""
Sets the transaction set footer loop.
:param context: The X12Parsing context which contains the current loop and transaction record.
:param segment_data: The current segment's data
"""
context.set_loop_context(
TransactionLoops.FOOTER, context.transaction_data["footer"]
) | 5,353,775 |
def get_memory_banks_per_run(coreAssignment, cgroups):
"""Get an assignment of memory banks to runs that fits to the given coreAssignment,
i.e., no run is allowed to use memory that is not local (on the same NUMA node)
to one of its CPU cores."""
try:
# read list of available memory banks
allMems = set(cgroups.read_allowed_memory_banks())
result = []
for cores in coreAssignment:
mems = set()
for core in cores:
coreDir = "/sys/devices/system/cpu/cpu{0}/".format(core)
mems.update(_get_memory_banks_listed_in_dir(coreDir))
allowedMems = sorted(mems.intersection(allMems))
logging.debug(
"Memory banks for cores %s are %s, of which we can use %s.",
cores,
list(mems),
allowedMems,
)
result.append(allowedMems)
assert len(result) == len(coreAssignment)
if any(result) and os.path.isdir("/sys/devices/system/node/"):
return result
else:
# All runs get the empty list of memory regions
# because this system has no NUMA support
return None
except ValueError as e:
sys.exit("Could not read memory information from kernel: {0}".format(e)) | 5,353,776 |
def authenticate(username, password):
"""Authenticate with the API and get a token."""
API_AUTH = "https://api2.xlink.cn/v2/user_auth"
auth_data = {'corp_id': "1007d2ad150c4000", 'email': username,
'password': password}
r = requests.post(API_AUTH, json=auth_data, timeout=API_TIMEOUT)
try:
return (r.json()['access_token'], r.json()['user_id'])
except KeyError:
raise(LaurelException('API authentication failed')) | 5,353,777 |
def filter_hashtags_users(DATAPATH, th, city):
"""
cleans target_hashtags by removing hashtags that are used by less than 2 users
replaces hahstags by ht_id and saves to idhashtags.csv
creates entropy for each ht_id and saves to hashtag_id_entropies.csv
prints std output
:param DATAPATH:
:param th: hashtags are too popular if more than th% of users share them
:param city:
:return:
"""
ht = pd.read_csv(DATAPATH + city + ".target_hashtags")
print ("ht.shape", ht.shape)
ht["hashtags"] = ht['hashtags'].astype('category')
ht["ht_id"] = ht["hashtags"].cat.codes
ht.drop('hashtags', axis=1, inplace=True)
#arrmult = []
entarr = []
gp = ht.groupby('ht_id')
# cnt_df = gp.size().reset_index(name='sizes')
# hashtags are too popular if more than th% of users share them
max_df_ht = th * len(ht.uid.unique())
print ("max_df_ht", max_df_ht)
# removing hashtags that are used by less than 2 users and more than th% of users
for htid, group in gp:
user_count = len(group['uid'].value_counts().values)
if user_count > 1 and user_count <= max_df_ht:
e = entropy(group['uid'].value_counts().values)
c = len(group)
entarr.append([htid, e, c])
#arrmult.append(htid)
# save entropies of hashtags for other calculations
entdf = pd.DataFrame(data=entarr, columns=['ht_id', 'entropy', 'counts'])
sortt = entdf.sort_values(by='entropy')
sortt.to_csv(DATAPATH + "counts_entropies.csv", index=False)
# filtered hashtag df
ht2 = ht[ht.ht_id.isin(entdf.ht_id)]
print ("after removing too popular and too rare hts", ht2.shape)
ht2.to_csv(DATAPATH + str(th) + "filtered_hashtags.csv", index=False)
return entdf, ht2 | 5,353,778 |
def plate_96_bind(form_info):
"""Add the plate container info to the samples in the form.
Arguments:
form_info (api_types.CustomForm): the dataclass holding info on the
form.
"""
smp_locations = {}
for sample in form_info.samples:
sample.con = api_types.Container()
# If 'Container Name' in grid.
if sample.udf_to_value.get("Container Name"):
con_name = sample.udf_to_value["Container Name"]
# If 'container_name' in form.
elif form_info.field_to_values.get("container_name"):
con_name = form_info.field_to_values["container_name"]
else:
raise TypeError(
f"The sample {sample.name} in request {form_info.req_id} on"
f" form {form_info.name} was not given a container name.")
# Replace all not ascii chars with ascii ones.
con_name = _sanitize_text(con_name)
con_name = re.sub(r"[^a-zA-Z0-9\-]", "-", con_name)
sample.con.name = con_name
sample.con.con_type = "96 well plate"
# Make a dictionary of con_name: well, to make sure that there are no
# two samples in the same well.
smp_locations.setdefault(
sample.con.name, []).append(sample.location)
for locs in smp_locations.values():
if len(set(locs)) != len(locs):
raise TypeError(
f"There are two or more samples with the same well"
f" location in request {form_info.req_id}. Please review and"
f" edit your well locations.") | 5,353,779 |
def cluster_set_state(connection: 'Connection', state: int, query_id=None) -> 'APIResult':
"""
Set cluster state.
:param connection: Connection to use,
:param state: State to set,
:param query_id: (optional) a value generated by client and returned as-is
in response.query_id. When the parameter is omitted, a random value
is generated,
:return: API result data object. Contains zero status if a value
is written, non-zero status and an error description otherwise.
"""
return __cluster_set_state(connection, state, query_id) | 5,353,780 |
def print_signatures(client: zeep.Client, out):
"""Print a short summary of each operation signature offered by client."""
# From: https://stackoverflow.com/questions/50089400/introspecting-a-wsdl-with-python-zeep
for service in client.wsdl.services.values():
out.write(f"service: {service.name}\n")
for port in service.ports.values():
out.write(f" port: {port.name}\n")
operations = sorted(
port.binding._operations.values(),
key=operator.attrgetter('name'))
for operation in operations:
action = operation.name
inputs = operation.input.signature()
outputs = operation.output.signature()
out.write(f" {action}({inputs}) --> ({outputs})\n") | 5,353,781 |
def get_all(data, path):
"""Returns a list with all values in data matching the given JsonPath."""
return [x for x in iterate(data, path)] | 5,353,782 |
def extract_information_from_blomap(oneLetterCodes):
"""
extracts isoelectric point (iep) and
hydrophobicity from blomap for each aminoacid
Parameters
----------
oneLetterCodes : list of Strings/Chars
contains oneLetterCode for each aminoacid
Returns
-------
float, float
iep, hydrophobicity
"""
letter_encodings = []
for x in oneLetterCodes:
letter_encodings.append(extended_blomap[x.upper()])
isoelectric_point = []
hydrophobicity = []
for element in letter_encodings:
isoelectric_point.append([element[7]])
hydrophobicity.append([element[8]])
return isoelectric_point, hydrophobicity | 5,353,783 |
def split_train_val_test_detection_data(xml_dir):
"""
prepare train/val/test dataset for detection
:param xml_dir:
:return:
"""
filenames = [_.replace('.xml', '') for _ in os.listdir(xml_dir)]
random.shuffle(filenames)
TEST_RATIO = 0.2
train = filenames[0:int(len(filenames) * (1 - TEST_RATIO))]
test = filenames[int(len(filenames) * (1 - TEST_RATIO)) + 1:]
val = train[0:int(len(train) * 0.1)]
train = train[int(len(train) * 0.1) + 1:]
with open('./train.txt', mode='wt', encoding='utf-8') as f:
f.writelines('\n'.join(train))
with open('./val.txt', mode='wt', encoding='utf-8') as f:
f.writelines('\n'.join(val))
with open('./test.txt', mode='wt', encoding='utf-8') as f:
f.writelines('\n'.join(test)) | 5,353,784 |
def partitions(n):
"""
Return a sequence of lists
Each element is a list of integers which sum to n -
a partition n.
The elements of each partition are in descending order
and the sequence of partitions is in descending lex order.
>>> list(partitions(4))
[[3, 1], [2, 2], [2, 1, 1], [1, 1, 1, 1]]
"""
return partitions_with_max(n, max=n - 1) | 5,353,785 |
def create_request_element(channel_id, file_info, data_id, annotation):
"""
create dataset item from datalake file
:param channel_id:
:param file_id:
:param file_info:
:param label_metadata_key:
:return:
"""
data_uri = 'datalake://{}/{}'.format(channel_id, file_info.file_id)
data = {
'source_data': [
{
'data_uri': data_uri,
'data_type': file_info.content_type
}
],
'attributes': {
'classification': annotation,
'id': data_id
}
}
return data | 5,353,786 |
def _categorise(obj, _regex_adapter=RegexAdapter):
"""
Check type of the object
"""
if obj is Absent:
return Category.ABSENT
obj_t = type(obj)
if issubclass(obj_t, NATIVE_TYPES):
return Category.VALUE
elif callable(obj):
return Category.CALLABLE
elif _regex_adapter.check(obj):
return Category.REGEX
elif issubclass(obj_t, Mapping):
return Category.DICT
elif issubclass(obj_t, Iterable):
return Category.ITERABLE
else: # catch-all for types like decimal.Decimal, uuid.UUID, et cetera
return Category.VALUE | 5,353,787 |
def test_emcee_opitmizer_can_restart(datasets_db):
"""A restart trace can be passed to the Emcee optimizer """
dbf = Database.from_string(CU_MG_TDB, fmt='tdb')
datasets_db.insert(CU_MG_DATASET_ZPF_WORKING)
param = 'VV0001'
opt = EmceeOptimizer(dbf)
restart_tr = -4*np.ones((2, 10, 1)) # 2 chains, 10 iterations, 1 parameter
opt.fit([param], datasets_db, iterations=1, chains_per_parameter=2, restart_trace=restart_tr)
assert opt.sampler.chain.shape == (2, 1, 1) | 5,353,788 |
def remove_key(d, key):
"""Safely remove the `key` from the dictionary.
Safely remove the `key` from the dictionary `d` by first
making a copy of dictionary. Return the new dictionary together
with the value stored for the `key`.
Parameters
----------
d : dict
The dictionary from which to remove the `key`.
key :
The key to remove
Returns
-------
v :
The value for the key
r : dict
The dictionary with the key removed.
"""
r = dict(d)
v = r[key]
del r[key]
return v, r | 5,353,789 |
def test_clear_child_protocol_sets_raise_exc(httpcs_svc_instance, video_format, rewrite_type):
"""Test: Clear protocolSets with exception in Httpcs Service Instance object
Steps:
1. Clear protocolSets, do not set either published or source protocol
2. Check raised exception
Result:
OK: exception is raised
"""
with pytest.raises(HttpCsSvcInstanceBaseException):
httpcs_svc_instance.clear_child_protocol_sets(video_formats=video_format, rewrite_type=rewrite_type) | 5,353,790 |
def graph(x, y, pl, opacity, lbf, title=None, x_title=None, y_title=None, xlim=None, ylim=None):
"""
Creates and displays a matplotlib scatter plot of y vs. x
Parameters
----------
x: iterable, x-coordinates for values to be plotted
y: iterable, y-coordinates for values to be plotted
pl: reference to the already-initialized plot to be used
opacity: float, opacity of each point in the scatter plot
lbf: boolean (optional)
True will graph a line of best fit, False will not
title: string (optional) title for the plot
x_title: string (optional) title for the x-axis of the plot
y_title: string (optional) title for the y-axis of the plot
label: string (optional) label for the plotted error bars
x_lim: tuples, minimum and maximum x-values to be displayed
y_lim: tuples, minimum and maximum x-values to be displayed
"""
pl.scatter(x, y, marker="o", alpha=opacity) # Create the scatter plot
pl.set_title(title)
pl.set_xlabel(x_title)
pl.set_ylabel(y_title)
if ylim is not None:
if len(ylim)==1:
ylim.append(None)
pl.set_ylim(ylim[0], ylim[1])
if xlim is not None:
if len(xlim)==1:
xlim.append(None)
pl.set_xlim(xlim[0], xlim[1])
pl.grid(True)
# Graph a line of best fit
if lbf:
best_fit(x, y, pl) | 5,353,791 |
def tasks_file_to_task_descriptors(tasks, retries, input_file_param_util,
output_file_param_util):
"""Parses task parameters from a TSV.
Args:
tasks: Dict containing the path to a TSV file and task numbers to run
variables, input, and output parameters as column headings. Subsequent
lines specify parameter values, one row per job.
retries: Number of retries allowed.
input_file_param_util: Utility for producing InputFileParam objects.
output_file_param_util: Utility for producing OutputFileParam objects.
Returns:
task_descriptors: an array of records, each containing the task-id,
task-attempt, 'envs', 'inputs', 'outputs', 'labels' that defines the set of
parameters for each task of the job.
Raises:
ValueError: If no job records were provided
"""
task_descriptors = []
path = tasks['path']
task_min = tasks.get('min')
task_max = tasks.get('max')
# Load the file and set up a Reader that tokenizes the fields
param_file = dsub_util.load_file(path)
reader = csv.reader(param_file, delimiter='\t')
# Read the first line and extract the parameters
header = six.advance_iterator(reader)
job_params = parse_tasks_file_header(header, input_file_param_util,
output_file_param_util)
# Build a list of records from the parsed input file
for row in reader:
# Tasks are numbered starting at 1 and since the first line of the TSV
# file is a header, the first task appears on line 2.
task_id = reader.line_num - 1
if task_min and task_id < task_min:
continue
if task_max and task_id > task_max:
continue
if len(row) != len(job_params):
dsub_util.print_error('Unexpected number of fields %s vs %s: line %s' %
(len(row), len(job_params), reader.line_num))
# Each row can contain "envs", "inputs", "outputs"
envs = set()
inputs = set()
outputs = set()
labels = set()
for i in range(0, len(job_params)):
param = job_params[i]
name = param.name
if isinstance(param, job_model.EnvParam):
envs.add(job_model.EnvParam(name, row[i]))
elif isinstance(param, job_model.LabelParam):
labels.add(job_model.LabelParam(name, row[i]))
elif isinstance(param, job_model.InputFileParam):
inputs.add(
input_file_param_util.make_param(name, row[i], param.recursive))
elif isinstance(param, job_model.OutputFileParam):
outputs.add(
output_file_param_util.make_param(name, row[i], param.recursive))
task_descriptors.append(
job_model.TaskDescriptor({
'task-id': task_id,
'task-attempt': 1 if retries else None
}, {
'labels': labels,
'envs': envs,
'inputs': inputs,
'outputs': outputs
}, job_model.Resources()))
# Ensure that there are jobs to execute (and not just a header)
if not task_descriptors:
raise ValueError('No tasks added from %s' % path)
return task_descriptors | 5,353,792 |
def array2raster(newRasterfn, dataset, array, dtype):
"""
Function from https://gist.github.com/jkatagi/a1207eee32463efd06fb57676dcf86c8
Save GeoTiff file from numpy.array
input:
newRasterfn: save file name
dataset : original tif file
array : numpy.array
dtype: Byte or Float32.
"""
cols = array.shape[1]
rows = array.shape[0]
originX, pixelWidth, b, originY, d, pixelHeight = dataset.GetGeoTransform()
driver = gdal.GetDriverByName('GTiff')
# set data type to save.
GDT_dtype = gdal.GDT_Unknown
if dtype == "Byte":
GDT_dtype = gdal.GDT_Byte
elif dtype == "Float32":
GDT_dtype = gdal.GDT_Float32
else:
print("Not supported data type.")
# set number of band.
if array.ndim == 2:
band_num = 1
else:
band_num = array.shape[2]
outRaster = driver.Create(newRasterfn, cols, rows, band_num, GDT_dtype)
outRaster.SetGeoTransform((originX, pixelWidth, 0, originY, 0, pixelHeight))
# Loop over all bands.
for b in range(band_num):
outband = outRaster.GetRasterBand(b + 1)
# Read in the band's data into the third dimension of our array
if band_num == 1:
outband.WriteArray(array)
else:
outband.WriteArray(array[:, :, b])
# setteing srs from input tif file.
prj = dataset.GetProjection()
outRasterSRS = osr.SpatialReference(wkt=prj)
outRaster.SetProjection(outRasterSRS.ExportToWkt())
outband.FlushCache() | 5,353,793 |
def load_model(filename, folder=None):
"""
Load a model from a file.
:param filename: name of the file to load the model from
:param folder: name of the subdirectory folder. If given, the model will be loaded from the subdirectory.
:return: model from the file
"""
if folder is not None:
path = os.path.join("./models", folder, filename)
else:
path = os.path.join("./models", filename)
model = torch.load(path, map_location='cuda:0')
return model | 5,353,794 |
def spreadplayers(self: Client, x: RelativeFloat, y: RelativeFloat,
spread_distance: float, max_range: float,
victim: str) -> str:
"""Spreads players."""
return self.run('spreadplayers', x, y, spread_distance, max_range, victim) | 5,353,795 |
def get_version():
"""Returns single integer number with the serialization version"""
return 2 | 5,353,796 |
def format_result(func):
"""包装结果格式返回给调用者"""
@wraps(func)
def wrapper(*args, **kwargs):
ret = {}
try:
data = func(*args, **kwargs)
if type(data) is Response:
return data
ret['data'] = data
ret['success'] = True
ret['message'] = 'Succeed'
except Exception as e:
ret['message'] = str(e)
ret['data'] = None
ret['success'] = False
logger.info(f"request_{func}, result: {ret}")
return ret
return wrapper | 5,353,797 |
def writeData(filename, data):
"""
MBARBIER: Taken/adapted from https://github.com/ChristophKirst/ClearMap/blob/master/ClearMap/IO/TIF.py
Write image data to tif file
Arguments:
filename (str): file name
data (array): image data
Returns:
str: tif file name
"""
d = len(data.shape);
if d == 2:
tiff.imsave(filename, data.transpose([0,1]));
elif d == 3:
tiff.imsave(filename, data.transpose([2,0,1]), photometric = 'minisblack', planarconfig = 'contig', bigtiff = True);
elif d == 4:
#tiffile (z,y,x,c)
tiff.imsave(filename, data.transpose([0,1,2,3]), photometric = 'minisblack', planarconfig = 'contig', bigtiff = True);
else:
raise RuntimeError('writing multiple channel data to tif not supported!');
return filename; | 5,353,798 |
def test_activate_heater(sht4x, power, duration):
"""
Test if the command is accepted by the device and returns the proper
result.
"""
temperature, humidity = sht4x.activate_heater(power, duration)
assert type(temperature) is Sht4xTemperature
assert type(temperature.ticks) is int
assert type(humidity) is Sht4xHumidity
assert type(humidity.ticks) is int | 5,353,799 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.