content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
def index():
""" Index page """
return render_template("index.html"); | af92fa468122a41ed33d55a591735d400cf68e0d | 3,651,300 |
def solve(in_array):
"""
Similar to 46442a0e, but where new quadrants are flips of the original array rather than rotations
:param in_array: input array
:return: expected output array
"""
array_edgelength = len(in_array[0]) # input array edge length
opp_end = array_edgelength*2-1 # used for getting opposite end of array
prediction = [[-1]*array_edgelength*2 for i in range(array_edgelength*2)] # init 2d array
# iterate through all values
for y in range(len(in_array)):
for x in range(len(in_array[0])):
val = in_array[y][x]
prediction[y][x] = val
# other 3 quadrants are flips
prediction[y][opp_end-x] = val
prediction[opp_end-y][opp_end-x] = val
prediction[opp_end-y][x] = val
return prediction | 0af23e82caf65bea64eeeae6da8400ef6ec03426 | 3,651,301 |
def trim_all(audio, rate, frame_duration, ambient_power=1e-4):
"""Trims ambient silence in the audio anywhere.
params:
audio: A numpy ndarray, which has 1 dimension and values within
-1.0 to 1.0 (inclusive)
rate: An integer, which is the rate at which samples are taken
frame_duration: A float, which is the duration of each frame
to check
ambient_power: A float, which is the Root Mean Square of ambient noise
return: A numpy ndarray, which has 1 dimension and values within
-1.0 to 1.0 (inclusive)
"""
new_audio = []
powers, fr = for_each_frame(audio, rate, frame_duration, calc_rms)
frame_length = round(rate / fr)
for ndx, power in enumerate(powers):
if power > ambient_power:
new_audio += audio[ndx*frame_length:(ndx+1)*frame_length].tolist()
return np.array(new_audio) | 37d7ca77c9ab767c90fedf4008a7a28415c5ce3f | 3,651,302 |
def guess_initializer(var, graph=None):
"""Helper function to guess the initializer of a variable.
The function looks at the operations in the initializer name space for the
variable (e.g. my_scope/my_var_name/Initializer/*). The TF core initializers
have characteristic sets of operations that can be used to determine the
initializer.
Args:
var: `tf.Variable`. The function will use the name to look for initializer
operations in the same scope.
graph: Optional `tf.Graph` that contains the variable. If None the default
graph is used.
Returns:
Tuple of the name of the guessed initializer.
"""
if graph is None:
graph = tf.get_default_graph()
prefix = var.op.name + "/Initializer"
ops = [op for op in graph.get_operations()
if op.name.startswith(prefix)]
assert ops, "No operations found for prefix {}".format(prefix)
op_names = [op.name[len(prefix) + 1:] for op in ops]
if len(op_names) == 1:
if op_names[0] == "Const":
value = ops[0].get_attr("value").float_val[0]
if value == 0.0:
return "zeros"
if np.isclose(value, 1.0):
return "ones"
return "constant"
return op_names[0] # ones or zeros
if "Qr" in op_names and "DiagPart" in op_names:
return "orthogonal"
if "random_uniform" in op_names:
return "glorot_uniform"
stddev_ops = [op for op in ops if op.name.endswith("stddev")]
if stddev_ops:
assert len(stddev_ops) == 1
stddev = stddev_ops[0].get_attr("value").float_val[0]
else:
stddev = None
if "random_normal" in op_names:
return "random_normal"
if "truncated_normal" in op_names:
if len(str(stddev)) > 5:
return "glorot_normal"
return "truncated_normal" | 5a1e4a99037e51d87a8d75bc5f33e105f86a4153 | 3,651,303 |
def get_all(ptype=vendor):
""" returns a dict of all partners """
if ptype == vendor:
d = get_dict_from_json_file( VENDORS_JSON_FILE ) # will create file if not exist
if ptype == customer:
d = get_dict_from_json_file( CUSTOMERS_JSON_FILE )
return d | eb285d9462f85daec9c8b176edc6eaa90a09ff4c | 3,651,304 |
import logging
from pathlib import Path
def validate_read_parameters(file_path, output_path, encryption_key, scrypt_n, scrypt_r, scrypt_p,
block_height_override, block_width_override, max_cpu_cores, save_statistics,
bad_frame_strikes, stop_at_metadata_load, auto_unpackage_stream,
auto_delete_finished_stream):
"""This function verifies the arguments going into read() to ensure they comform with the required format for
processing.
"""
logging.debug("Validating read parameters...")
constants = session.query(Constants).first()
valid_video_formats = constants.return_valid_video_formats()
valid_image_formats = constants.return_valid_image_formats()
if isinstance(file_path, str): # Single video or image file to decode
path = Path(file_path)
if not path.is_dir():
input_type = _file_path_validate(file_path, 'all', valid_video_formats, valid_image_formats)
else:
input_type = 'image'
elif isinstance(file_path, list): # Multiple images
for path in file_path:
input_type = _file_path_validate(path, 'image', valid_video_formats, valid_image_formats)
else:
raise ValueError('file_path can only accept strings for single video file or a directory (with images inside), '
'or list of string for image frames.')
if output_path:
is_valid_directory('file_to_input', output_path)
proper_string_syntax('encryption_key', encryption_key)
is_int_over_zero('bad_frame_strikes', bad_frame_strikes)
is_int_over_zero('scrypt_n', scrypt_n)
is_int_over_zero('scrypt_r', scrypt_r)
is_int_over_zero('scrypt_p', scrypt_p)
is_int_over_zero('block_height_override', block_height_override)
is_int_over_zero('block_width_override', block_width_override)
if not isinstance(max_cpu_cores, int) or max_cpu_cores < 0:
raise ValueError('max_cpu_cores must be an integer greater than or equal to 0.')
is_bool('save_statistics', save_statistics)
is_bool('stop_at_metadata_load', stop_at_metadata_load)
is_bool('auto_unpackage_stream', auto_unpackage_stream)
is_bool('auto_delete_finished_stream', auto_delete_finished_stream)
logging.debug("Read parameters validated.")
return input_type | 40509a71967273154395b6fd847497f25b76a2dc | 3,651,305 |
from catkin.find_in_workspaces import find_in_workspaces
def FindCatkinResource(package, relative_path):
"""
Find a Catkin resource in the share directory or
the package source directory. Raises IOError
if resource is not found.
@param relative_path Path relative to share or package source directory
@param package The package to search in
@return Absolute path to resource
"""
paths = find_in_workspaces(project=package, search_dirs=['share'],
path=relative_path, first_match_only=True)
if paths and len(paths) == 1:
return paths[0]
else:
raise IOError('Loading resource "{:s}" failed.'.format(
relative_path)) | 17fe7bf3fb6b04f031d1bd8e0dd6558312dca92a | 3,651,306 |
from typing import Callable
import urllib3
from typing import Dict
from typing import Any
from typing import Optional
def send_udf_call(
api_func: Callable[..., urllib3.HTTPResponse],
api_kwargs: Dict[str, Any],
decoder: decoders.AbstractDecoder,
id_callback: Optional[IDCallback] = None,
*,
results_stored: bool,
) -> "results.RemoteResult[_T]":
"""Synchronously sends a request to the given API.
This handles the boilerplate parts (exception handling, parsing, response
construction) of calling one of the generated API functions for UDFs.
It runs synchronously and will return a :class:`results.RemoteResult`.
To run the same function asychronously, use
:meth:`Client.wrap_async_base_call` around the function that calls this
(by convention, the ``whatever_api_base`` functions).
This should only be used by callers *inside* this package.
:param api_func: The UDF API function that we want to call from here.
For instance, this might be :meth:`rest_api.SqlApi.run_sql`.
:param api_kwargs: The arguments to pass to the API function as a dict.
This should only include the parameters you want to send to the server,
*not* any of the “meta” parameters that are mixed in with them (e.g.
``_preload_content``; this function will correctly set up the request).
:param decoder: The Decoder to use to decode the response.
:param id_callback: When the request completes (either by success or
failure), this will be called with the UUID from the HTTP response,
or None if the UUID could not be parsed.
:param results_stored: A boolean indicating whether the results were stored.
This does *not affect* the request; the ``store_results`` parameter of
whatever API message the call uses must be set, and this must match
that value.
:return: A response containing the parsed result and metadata about it.
"""
try:
http_response = api_func(_preload_content=False, **api_kwargs)
except rest_api.ApiException as exc:
if id_callback:
id_callback(results.extract_task_id(exc))
raise tiledb_cloud_error.check_exc(exc) from None
task_id = results.extract_task_id(http_response)
if id_callback:
id_callback(task_id)
return results.RemoteResult(
body=http_response.data,
decoder=decoder,
task_id=task_id,
results_stored=results_stored,
) | d34323f1f276f14d0dc947835db490e78ca47691 | 3,651,307 |
import requests
import json
def migration_area_baidu(area="乌鲁木齐市", indicator="move_in", date="20200201"):
"""
百度地图慧眼-百度迁徙-XXX迁入地详情
百度地图慧眼-百度迁徙-XXX迁出地详情
以上展示 top100 结果,如不够 100 则展示全部
迁入来源地比例: 从 xx 地迁入到当前区域的人数与当前区域迁入总人口的比值
迁出目的地比例: 从当前区域迁出到 xx 的人口与从当前区域迁出总人口的比值
https://qianxi.baidu.com/?from=shoubai#city=0
:param area: 可以输入 省份 或者 具体城市 但是需要用全称
:type area: str
:param indicator: move_in 迁入 move_out 迁出
:type indicator: str
:param date: 查询的日期 20200101以后的时间
:type date: str
:return: 迁入地详情/迁出地详情的前50个
:rtype: pandas.DataFrame
"""
city_dict.update(province_dict)
inner_dict = dict(zip(city_dict.values(), city_dict.keys()))
if inner_dict[area] in province_dict.keys():
dt_flag = "province"
else:
dt_flag = "city"
url = "https://huiyan.baidu.com/migration/cityrank.jsonp"
params = {
"dt": dt_flag,
"id": inner_dict[area],
"type": indicator,
"date": date,
}
res = requests.get(url, params=params)
json_data = json.loads(res.text[res.text.find("({") + 1:res.text.rfind(");")])
return pd.DataFrame(json_data["data"]["list"]) | 4bb4afdde77c2b21222bde28a4f93e58cd8c6019 | 3,651,308 |
def ranges(locdata: LocData, loc_properties=None, special=None, epsilon=1):
"""
Provide data ranges for locdata.data property.
If LocData is empty None is returned.
If LocData carries a single value, the range will be (value, value + `epsilon`).
Parameters
----------
locdata : LocData
Localization data.
loc_properties : str, tuple[str], list[str], True, None.
Localization properties for which the range is determined.
If None the ranges for all spatial coordinates are returned.
If True the ranges for all locdata.data properties are returned.
special : None, str
If None (min, max) ranges are determined from data and returned;
if 'zero' (0, max) ranges with max determined from data are returned.
if 'link' (min_all, max_all) ranges with min and max determined from all combined data are returned.
epsilon : float
number to specify the range for single values in locdata.
Returns
-------
numpy.ndarray of float with shape (dimension, 2), None
The data range (min, max) for each localization property.
"""
if locdata.data.empty:
return None
elif len(locdata) == 1:
pass
if loc_properties is None:
ranges_ = locdata.bounding_box.hull.T.copy()
elif loc_properties is True:
ranges_ = np.array([locdata.data.min(), locdata.data.max()]).T
elif isinstance(loc_properties, str):
ranges_ = np.array(
[[locdata.data[loc_properties].min(), locdata.data[loc_properties].max()]]
)
else:
loc_properties = list(loc_properties)
ranges_ = np.array(
[locdata.data[loc_properties].min(), locdata.data[loc_properties].max()]
).T
if len(locdata) == 1:
if ranges_.size == 0:
ranges_ = np.concatenate(
[locdata.coordinates, locdata.coordinates + epsilon], axis=0
).T
else:
ranges_ = ranges_ + [0, epsilon]
if special is None:
pass
elif special == "zero":
ranges_[:, 0] = 0
elif special == "link":
minmax = np.array([ranges_[:, 0].min(axis=0), ranges_[:, 1].max(axis=0)])
ranges_ = np.repeat(minmax[None, :], len(ranges_), axis=0)
else:
raise ValueError(f"The parameter special={special} is not defined.")
return ranges_ | 28a23603dbb2abb52df4f7d2b35b6333050cfe43 | 3,651,309 |
def subnet_create(request, network_id, **kwargs):
"""Create a subnet on a specified network.
:param request: request context
:param network_id: network id a subnet is created on
:param cidr: (optional) subnet IP address range
:param ip_version: (optional) IP version (4 or 6)
:param gateway_ip: (optional) IP address of gateway
:param tenant_id: (optional) tenant id of the subnet created
:param name: (optional) name of the subnet created
:param subnetpool_id: (optional) subnetpool to allocate prefix from
:param prefixlen: (optional) length of prefix to allocate
:returns: Subnet object
Although both cidr+ip_version and subnetpool_id+preifxlen is listed as
optional you MUST pass along one of the combinations to get a successful
result.
"""
LOG.debug("subnet_create(): netid=%(network_id)s, kwargs=%(kwargs)s",
{'network_id': network_id, 'kwargs': kwargs})
body = {'subnet': {'network_id': network_id}}
if 'tenant_id' not in kwargs:
kwargs['tenant_id'] = request.user.project_id
body['subnet'].update(kwargs)
subnet = neutronclient(request).create_subnet(body=body).get('subnet')
return Subnet(subnet) | 29fda0e08494869390cb5c30a2bb2609b56cf8d8 | 3,651,310 |
def evaluate_available(item, type_name, predicate):
"""
Run the check_available predicate and cache the result.
If there is already a cached result, use that and don't
run the predicate command.
:param str item: name of the item to check the type for. i.e. 'server_types
:param str type_name: name of the type. i.e. 'headless'
:param str predicate: the check_available command
:return bool type_available: whether or not the type is available
"""
global cached_available
if (item, type_name) not in cached_available:
exit_code, _, _ = run_command_print_ready(
shell=True,
command=predicate
)
cached_available[(item, type_name)] = exit_code == 0
return cached_available[(item, type_name)] | 872e81613c91141c81f6dafd27aee6e8642c1e59 | 3,651,311 |
def dB_transform(R, metadata=None, threshold=None, zerovalue=None, inverse=False):
"""Methods to transform precipitation intensities to/from dB units.
Parameters
----------
R: array-like
Array of any shape to be (back-)transformed.
metadata: dict, optional
Metadata dictionary containing the transform, zerovalue and threshold
attributes as described in the documentation of
:py:mod:`pysteps.io.importers`.
threshold: float, optional
Optional value that is used for thresholding with the same units as R.
If None, the threshold contained in metadata is used.
If no threshold is found in the metadata,
a value of 0.1 is used as default.
zerovalue: float, optional
The value to be assigned to no rain pixels as defined by the threshold.
It is equal to the threshold - 1 by default.
inverse: bool, optional
If set to True, it performs the inverse transform. False by default.
Returns
-------
R: array-like
Array of any shape containing the (back-)transformed units.
metadata: dict
The metadata with updated attributes.
"""
R = R.copy()
if metadata is None:
if inverse:
metadata = {"transform": "dB"}
else:
metadata = {"transform": None}
else:
metadata = metadata.copy()
# to dB units
if not inverse:
if metadata["transform"] == "dB":
return R, metadata
if threshold is None:
threshold = metadata.get("threshold", 0.1)
zeros = R < threshold
# Convert to dB
R[~zeros] = 10.0 * np.log10(R[~zeros])
threshold = 10.0 * np.log10(threshold)
# Set value for zeros
if zerovalue is None:
zerovalue = threshold - 5 # TODO: set to a more meaningful value
R[zeros] = zerovalue
metadata["transform"] = "dB"
metadata["zerovalue"] = zerovalue
metadata["threshold"] = threshold
return R, metadata
# from dB units
elif inverse:
if metadata["transform"] != "dB":
return R, metadata
if threshold is None:
threshold = metadata.get("threshold", -10.0)
if zerovalue is None:
zerovalue = 0.0
R = 10.0 ** (R / 10.0)
threshold = 10.0 ** (threshold / 10.0)
R[R < threshold] = zerovalue
metadata["transform"] = None
metadata["threshold"] = threshold
metadata["zerovalue"] = zerovalue
return R, metadata | d0b68c10290dd1cd95e7c08cca4e9ec7a4131ccc | 3,651,312 |
import typing
import os
def cut(
video: typing.Union[str, VideoObject],
output_path: str = None,
threshold: float = 0.95,
frame_count: int = 5,
compress_rate: float = 0.2,
target_size: typing.Tuple[int, int] = None,
offset: int = 3,
limit: int = None,
) -> typing.Tuple[VideoCutResult, str]:
"""
cut the video, and get series of pictures (with tag)
:param video: video path or object
:param output_path: output path (dir)
:param threshold: float, 0-1, default to 0.95. decided whether a range is stable. larger => more unstable ranges
:param frame_count: default to 5, and finally you will get 5 frames for each range
:param compress_rate: before_pic * compress_rate = after_pic. default to 0.2
:param target_size: (100, 200)
:param offset:
it will change the way to decided whether two ranges can be merged
before: first_range.end == second_range.start
after: first_range.end + offset >= secord_range.start
:param limit: ignore some ranges which are too short, 5 means ignore stable ranges which length < 5
:return: tuple, (VideoCutResult, data_home)
"""
if isinstance(video, str):
video = VideoObject(video)
cutter = VideoCutter()
res = cutter.cut(video, compress_rate=compress_rate, target_size=target_size)
stable, unstable = res.get_range(threshold=threshold, limit=limit, offset=offset)
data_home = res.pick_and_save(stable, frame_count, to_dir=output_path)
res_json_path = os.path.join(
output_path or data_home, constants.CUT_RESULT_FILE_NAME
)
res.dump(res_json_path)
return res, data_home | 892092075a79923933f73805ca09cbbed4f14fc8 | 3,651,313 |
def parse_args():
"""
Parses command line arguments
"""
parser = ArgumentParser(description="A multi-threaded gemini server")
parser.add_argument("-b", "--host", default=DEFAULT_HOST, help="Host to bind to")
parser.add_argument("-p", "--port", default=DEFAULT_PORT, help="Port to bind to")
parser.add_argument(
"-c", "--cert", default=DEFAULT_CERTFILE, help="SSL certificate in PEM format"
)
parser.add_argument(
"-k", "--key", default=DEFAULT_KEYFILE, help="SSL private key in PEM format"
)
parser.add_argument(
"-w", "--webroot", default=DEFAULT_WEBROOT, help="Webroot directory"
)
parser.add_argument(
"-q", "--queue", default=DEFAULT_QSIZE, help="Size of request queue"
)
parser.add_argument(
"-t", "--threads", default=DEFAULT_THREADS, help="Number of threads"
)
parser.add_argument(
"-u",
"--uid",
default=0,
type=int,
help="uid to use after loading SSL certificate",
)
parser.add_argument(
"-g",
"--gid",
default=0,
type=int,
help="gid to use after loading SSL certificate",
)
return parser.parse_args() | 05dec02ce0f243f46896917c2f25108e6f592bb5 | 3,651,314 |
def get_mapping_rules():
""" Get mappings rules as defined in business_object.js
Special cases:
Aduit has direct mapping to Program with program_id
Request has a direct mapping to Audit with audit_id
Response has a direct mapping to Request with request_id
DocumentationResponse has a direct mapping to Request with request_id
DocumentationResponse has normal mappings with all other objects in
maping modal
Section has a direct mapping to Standard/Regulation/Poicy with directive_id
Anything can be mapped to a request, frotent show audit insted
"""
def filter(object_list):
""" remove all lower case items since real object are CamelCase """
return set([item for item in object_list if item != item.lower()])
# these rules are copy pasted from
# src/ggrc/assets/javascripts/apps/business_objects.js line: 276
business_object_rules = {
"Program": "Issue ControlAssessment Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa # removed Person because Programs have a "Mapped" attribute for people mappings
"Audit": "Issue ControlAssessment Request history Person program program_controls Request", # noqa
"Issue": "ControlAssessment Control Audit Program Regulation Contract Policy Standard Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Issue Request", # noqa
"ControlAssessment": "Issue Objective Program Regulation Contract Policy Standard Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Regulation": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa
"Policy": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa
"Standard": "Program Issue ControlAssessment Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa
"Contract": "Program Issue ControlAssessment Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Request", # noqa
"Clause": "Contract Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Section Policy Regulation Standard Request", # noqa
"Section": "Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Policy Regulation Standard Contract Clause Request", # noqa
"Objective" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Request", # noqa
"Control" : "Issue ControlAssessment Request Program Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Person" : "Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Audit Request", # noqa
"OrgGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Vendor" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"System" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Process" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"DataAsset" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"AccessGroup" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Product" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Project" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Facility" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request", # noqa
"Market" : "Program Issue ControlAssessment Regulation Contract Policy Standard Section Clause Objective Control System Process DataAsset AccessGroup Product Project Facility Market OrgGroup Vendor Person Audit Request" # noqa
}
split_rules = {k: v.split() for k, v in business_object_rules.items()}
filtered_rules = {k: filter(v) for k, v in split_rules.items()}
return filtered_rules | 59b94070d3fe35eca8c356162caf9969c9ea47d0 | 3,651,315 |
import os
def get_src_hash(sls_config, path):
"""Get hash(es) of serverless source."""
funcs = sls_config['functions']
if sls_config.get('package', {}).get('individually'):
hashes = {key: get_hash_of_files(os.path.join(path,
os.path.dirname(funcs[key].get('handler'))))
for key in funcs.keys()}
else:
directories = []
for (key, value) in funcs.items():
func_path = {'path': os.path.dirname(value.get('handler'))}
if func_path not in directories:
directories.append(func_path)
hashes = {sls_config['service']: get_hash_of_files(path, directories)}
return hashes | 0f2d0d98cb55f8d587119408b20caf1b6594ae2f | 3,651,316 |
import os
from pathlib import Path
import zipfile
def extract_zip(zip_path, ret_extracted_path=False):
"""Extract a zip and delete the .zip file."""
dir_parents = os.path.dirname(zip_path)
dir_name = Path(zip_path).stem
extracted_path = os.path.join(dir_parents, dir_name, '')
if ret_extracted_path:
return extracted_path
with zipfile.ZipFile(zip_path, 'r') as zip_ref:
zip_ref.extractall(dir_parents)
os.remove(zip_path)
print(f"Extracted '{Path(zip_path).name}' to '{extracted_path}'.") | 6ff7691ed54ce3941941b3e014f92ee362237b7c | 3,651,317 |
async def get_pipeline(request: web.Request, organization, pipeline) -> web.Response:
"""get_pipeline
Retrieve pipeline details for an organization
:param organization: Name of the organization
:type organization: str
:param pipeline: Name of the pipeline
:type pipeline: str
"""
return web.Response(status=200) | 0bbbe26111542173fda05fe8e3beccec99b6bfe8 | 3,651,318 |
def add_attachment(manager, issue, file):
"""
Replace jira's method 'add_attachment' while don't well fixed this issue
https://github.com/shazow/urllib3/issues/303
And we need to set filename limit equaled 252 chars.
:param manager: [jira.JIRA instance]
:param issue: [jira.JIRA.resources.Issue instance]
:param path: [string]
:return: [jira.JIRA.resources.Attachment instance]
"""
filename = _get_filename(file.name)
return _upload_file(manager, issue, file.file.read(), filename) | 19d2fb57fbd116e27328c075a2899425243856b2 | 3,651,319 |
def _ols_iter(inv_design, sig, min_diffusivity):
""" Helper function used by ols_fit_dki - Applies OLS fit of the diffusion
kurtosis model to single voxel signals.
Parameters
----------
inv_design : array (g, 22)
Inverse of the design matrix holding the covariants used to solve for
the regression coefficients.
sig : array (g,)
Diffusion-weighted signal for a single voxel data.
min_diffusivity : float
Because negative eigenvalues are not physical and small eigenvalues,
much smaller than the diffusion weighting, cause quite a lot of noise
in metrics such as fa, diffusivity values smaller than
`min_diffusivity` are replaced with `min_diffusivity`.
Returns
-------
dki_params : array (27,)
All parameters estimated from the diffusion kurtosis model.
Parameters are ordered as follows:
1) Three diffusion tensor's eigenvalues
2) Three lines of the eigenvector matrix each containing the first,
second and third coordinates of the eigenvector
3) Fifteen elements of the kurtosis tensor
"""
# DKI ordinary linear least square solution
log_s = np.log(sig)
result = np.dot(inv_design, log_s)
# Extracting the diffusion tensor parameters from solution
DT_elements = result[:6]
evals, evecs = decompose_tensor(from_lower_triangular(DT_elements),
min_diffusivity=min_diffusivity)
# Extracting kurtosis tensor parameters from solution
MD_square = (evals.mean(0))**2
KT_elements = result[6:21] / MD_square
# Write output
dki_params = np.concatenate((evals, evecs[0], evecs[1], evecs[2],
KT_elements), axis=0)
return dki_params | da55a73fff02f2088b77d21a4b0a7a7308b0c855 | 3,651,320 |
from typing import Counter
def unarchive_collector(collector):
"""
This code is copied from `Collector.delete` method
"""
# sort instance collections
for model, instances in collector.data.items():
collector.data[model] = sorted(instances, key=attrgetter("pk"))
# if possible, bring the models in an order suitable for databases that
# don't support transactions or cannot defer constraint checks until the
# end of a transaction.
collector.sort()
# number of objects deleted for each model label
unarchived_counter = Counter()
with transaction.atomic(using=collector.using, savepoint=False):
# reverse instance collections
for instances in collector.data.values():
instances.reverse()
# delete instances
for model, instances in collector.data.items():
if not is_archivable_cls(model):
continue
pk_list = [obj.pk for obj in instances]
queryset = model.all_objects.filter(pk__in=pk_list)
count = queryset.update(archived_at=None)
unarchived_counter[model._meta.label] += count
if not model._meta.auto_created:
for obj in instances:
# user post archive instead of post delete
signals.post_unarchive.send(
sender=model, instance=obj, using=collector.using
)
for obj in instances:
setattr(obj, "archived_at", None)
return sum(unarchived_counter.values()), dict(unarchived_counter) | 3c0a05d31fafac34e0503bd5dd154c9201e7e94a | 3,651,321 |
from typing import List
from typing import Optional
from typing import Union
from typing import Dict
def remove_tag_from_issues(
issues: List[GitHubIssue],
tag: str,
scope: str = "all",
ignore_list: Optional[Union[List[int], List[Dict[str, int]]]] = None,
) -> List[GitHubIssue]:
"""remove_tag_from_issues
Removes all of a tag from the given issues.
If scoped to just issues, we still check the first comment as this
comment is the issue body.
"""
if ignore_list is None:
ignore_list = [-1]
for index, issue in enumerate(issues):
if scope in ("all", "issues"):
if tag in issue.metadata:
# If the issue is one we should ignore, continue.
# This is usually due to the issue being empty.
if index in ignore_list:
continue
issue.metadata.remove(tag)
if tag in issue.all_comments[0].tags:
issue.all_comments[0].tags.remove(tag)
if scope in ("all", "comments"):
for comment in issue.all_comments:
if tag in comment.tags:
# If the comment is one we should ignore, continue.
# This is usually due to the comment being empty.
if {"issue": index, "comment": comment.number} in ignore_list:
continue
comment.tags.remove(tag)
return issues | c8709f7e9a01f4c5320748ca181a3a813a9e754f | 3,651,322 |
from datetime import datetime
def days_remaining_context_processor(request):
"""Context processor. Adds days_remaining to context of every view."""
now = datetime.now()
return {'days_remaining' : (wedding_date - now).days} | 1aa9deb40b54627044926820921c4e5550f2050c | 3,651,323 |
from datetime import datetime
import time
def convert_time_range(trange, tz=None):
"""
Converts freeform time range into a tuple of localized
timestamps (start, end).
If `tz` is None, uses settings.TIME_ZONE for localizing
time range.
:param trange: - string representing time-range. The options
are:
* string in format 'x1|x2', where x1 and x2 are start
and end date in the format YYYYmmdd[THH:MM:SS.mmmmm]
(in fact, any other format would work well, the function
tries its best to determine format and parse timestamps)
* string in format 'x1|x2', where x1 and x2 are given in
human readable format, as described in the dateparser doc:
(see https://github.com/scrapinghub/dateparser)
* one of the following keywords:
'today', 'yesterday', 'this week', 'last week',
'this month', 'last month', 'this year', 'last year'
:param tz: - timezone (optional). Either string representing
a timezone (e.g. "America/Lima") or a pytz object.
:return: tuple of two TZ-aware timestamps.
"""
# Form time range as a tuple of naive datetimes.
assert isinstance(trange, str), "Value is not a string: %s" % trange
trange = trange.strip().lower()
_time = lambda d: datetime.combine(d, time())
today = date.today()
if trange == 'today':
ts_from = _time(today)
ts_to = ts_from + timedelta(days=1, seconds=-1)
elif trange == 'yesterday':
ts_from = _time(today+timedelta(days=-1))
ts_to = ts_from + timedelta(days=1, seconds=-1)
elif trange == 'this week':
ts_from = _time(today-timedelta(days=today.weekday()))
ts_to = ts_from + timedelta(days=7, seconds=-1)
elif trange == 'last week':
this_week = _time(today-timedelta(days=today.weekday()))
ts_to = this_week + timedelta(seconds=-1)
ts_from = _time(ts_to - timedelta(days=ts_to.weekday()))
elif trange == 'this month':
ts_from = _time(today.replace(day=1))
next_month = ts_from.replace(day=28) + timedelta(days=4)
this_month_last_day = next_month - timedelta(days=next_month.day)
ts_to = this_month_last_day + timedelta(days=1, seconds=-1)
elif trange == 'last month':
ts_to = _time(today.replace(day=1)) + timedelta(seconds=-1)
ts_from = _time(ts_to.replace(day=1))
elif trange == 'this year':
ts_from = _time(today.replace(month=1, day=1))
this_year_last_day = _time(today.replace(month=12, day=31))
ts_to = this_year_last_day + timedelta(days=1, seconds=-1)
elif trange == 'last year':
ts_to = _time(today.replace(month=1, day=1)) + timedelta(seconds=-1)
ts_from = _time(ts_to.replace(month=1, day=1))
else:
try:
ts_from, ts_to = [dateparser.parse(t) for t in trange.split('|')]
except ValueError:
raise MalformedValueError(
'Cannot parse datetime range: wrong format!\n' + \
'Datetime range should be two date[time] values divided by vertical bar (|)'
)
if (ts_from is None) or (ts_to is None):
raise MalformedValueError('Cannot parse datetime range: wrong format!')
# Stretch date values (without time) to the end of day
# (ignore microseconds).
if ts_to.minute == 0 and ts_to.second == 0:
ts_to += timedelta(days=1, seconds=-1)
# Figure out desired timezone.
time_zone = get_tz(tz)
# Add timezone info to the result.
ts_from = ts_from.replace(tzinfo=time_zone)
ts_to = ts_to.replace(tzinfo=time_zone)
if ts_from > ts_to:
raise MalformedValueError(
'Start date cannot be greater than the end date!'
)
return (ts_from, ts_to) | 64c24c3011418e93111ec856acdd4b6a94abd425 | 3,651,324 |
def process_waiting_time(kernel_data, node_id, phase_id, norm_vehs=False):
"""Processes batched waiting time computation"""
cycle_time = 60
def fn(x):
if (x / 13.89) < 0.1:
return 1.0
else:
return 0.0
wait_times = []
for t in kernel_data:
qt = defaultdict(lambda : 0)
for veh in t[node_id][phase_id]:
key = (veh.edge_id, veh.lane)
qt[key] += fn(veh.speed)
if len(qt) == 0:
wait_times.append(0.0)
else:
if norm_vehs:
wait_times.append(
sum([v / MAX_VEHS_PER_LANE[k] for k, v in qt.items()]))
else:
wait_times.append(sum(qt.values()))
ret = round(sum(wait_times) / cycle_time, 2)
return ret | 4489205a8d3ba58601875a7dee1b086fd7b639af | 3,651,325 |
def get_version():
""" Do this so we don't have to import lottery_ticket_pruner which requires keras which cannot be counted on
to be installed when this package gets installed.
"""
with open('lottery_ticket_pruner/__init__.py', 'r') as f:
for line in f.readlines():
if line.startswith('__version__'):
version = line.split('=')[1].strip().replace('"', '').replace('\'', '')
return version
return '' | 0ab355110918e1c92b056932ba1d03768826c4f2 | 3,651,326 |
import time
def train_model_regression(X, X_test, y, params, folds, model_type='lgb', eval_metric='mae', columns=None,
plot_feature_importance=False, model=None,
verbose=10000, early_stopping_rounds=200, n_estimators=50000):
"""
A function to train a variety of regression models.
Returns dictionary with oof predictions, test predictions, scores and, if necessary, feature importances.
:params: X - training data, can be pd.DataFrame or np.ndarray (after normalizing)
:params: X_test - test data, can be pd.DataFrame or np.ndarray (after normalizing)
:params: y - target
:params: folds - folds to split data
:params: model_type - type of model to use
:params: eval_metric - metric to use
:params: columns - columns to use. If None - use all columns
:params: plot_feature_importance - whether to plot feature importance of LGB
:params: model - sklearn model, works only for "sklearn" model type
"""
columns = X.columns if columns is None else columns
X_test = X_test[columns]
# to set up scoring parameters
metrics_dict = {'mae': {'lgb_metric_name': 'mae',
'catboost_metric_name': 'MAE',
'sklearn_scoring_function': metrics.mean_absolute_error},
'group_mae': {'lgb_metric_name': 'mae',
'catboost_metric_name': 'MAE',
'scoring_function': group_mean_log_mae},
'mse': {'lgb_metric_name': 'mse',
'catboost_metric_name': 'MSE',
'sklearn_scoring_function': metrics.mean_squared_error}
}
result_dict = {}
# out-of-fold predictions on train data
oof = np.zeros(len(X))
# averaged predictions on train data
prediction = np.zeros(len(X_test))
# list of scores on folds
scores = []
feature_importance = pd.DataFrame()
# split and train on folds
for fold_n, (train_index, valid_index) in enumerate(folds.split(X)):
print(f'Fold {fold_n + 1} started at {time.ctime()}')
if type(X) == np.ndarray:
X_train, X_valid = X[columns][train_index], X[columns][valid_index]
y_train, y_valid = y[train_index], y[valid_index]
else:
X_train, X_valid = X[columns].iloc[train_index], X[columns].iloc[valid_index]
y_train, y_valid = y.iloc[train_index], y.iloc[valid_index]
if model_type == 'lgb':
model = lgb.LGBMRegressor(**params, n_estimators=n_estimators, n_jobs=-1)
model.fit(X_train, y_train,
eval_set=[(X_train, y_train), (X_valid, y_valid)],
eval_metric=metrics_dict[eval_metric]['lgb_metric_name'],
verbose=verbose, early_stopping_rounds=early_stopping_rounds)
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test, num_iteration=model.best_iteration_)
if model_type == 'xgb':
train_data = xgb.DMatrix(data=X_train, label=y_train, feature_names=X.columns)
valid_data = xgb.DMatrix(data=X_valid, label=y_valid, feature_names=X.columns)
watchlist = [(train_data, 'train'), (valid_data, 'valid_data')]
model = xgb.train(dtrain=train_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200,
verbose_eval=verbose, params=params)
y_pred_valid = model.predict(xgb.DMatrix(X_valid, feature_names=X.columns),
ntree_limit=model.best_ntree_limit)
y_pred = model.predict(xgb.DMatrix(X_test, feature_names=X.columns), ntree_limit=model.best_ntree_limit)
if model_type == 'sklearn':
model = model
model.fit(X_train, y_train)
y_pred_valid = model.predict(X_valid).reshape(-1, )
score = metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid)
print(f'Fold {fold_n}. {eval_metric}: {score:.4f}.')
print('')
y_pred = model.predict(X_test).reshape(-1, )
if model_type == 'cat':
model = CatBoostRegressor(iterations=20000, eval_metric=metrics_dict[eval_metric]['catboost_metric_name'],
**params,
loss_function=metrics_dict[eval_metric]['catboost_metric_name'])
model.fit(X_train, y_train, eval_set=(X_valid, y_valid), cat_features=[], use_best_model=True,
verbose=False)
y_pred_valid = model.predict(X_valid)
y_pred = model.predict(X_test)
oof[valid_index] = y_pred_valid.reshape(-1, )
if eval_metric != 'group_mae':
scores.append(metrics_dict[eval_metric]['sklearn_scoring_function'](y_valid, y_pred_valid))
else:
scores.append(metrics_dict[eval_metric]['scoring_function'](y_valid, y_pred_valid, X_valid['type']))
prediction += y_pred
if model_type == 'lgb' and plot_feature_importance:
# feature importance
fold_importance = pd.DataFrame()
fold_importance["feature"] = columns
fold_importance["importance"] = model.feature_importances_
fold_importance["fold"] = fold_n + 1
feature_importance = pd.concat([feature_importance, fold_importance], axis=0)
prediction /= folds.n_splits
print('CV mean score: {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))
result_dict['oof'] = oof
result_dict['prediction'] = prediction
result_dict['scores'] = scores
# if model_type == 'lgb':
# if plot_feature_importance:
# feature_importance["importance"] /= folds.n_splits
# cols = feature_importance[["feature", "importance"]].groupby("feature").mean().sort_values(
# by="importance", ascending=False)[:50].index
#
# best_features = feature_importance.loc[feature_importance.feature.isin(cols)]
#
# plt.figure(figsize=(16, 12));
# sns.barplot(x="importance", y="feature", data=best_features.sort_values(by="importance", ascending=False));
# plt.title('LGB Features (avg over folds)');
#
# result_dict['feature_importance'] = feature_importance
return result_dict | 586e82a1efa42e41b0d0dfdddaf9d6d0afdd7bb4 | 3,651,327 |
def extract(d, keys):
"""
Extract a key from a dict.
:param d: The dict.
:param keys: A list of keys, in order of priority.
:return: The most important key with an value found.
"""
if not d:
return
for key in keys:
tmp = d.get(key)
if tmp:
return tmp | 9985e2f1079088251429fa26611fa6e15b920622 | 3,651,328 |
import os
def create_file_list(files, suffices, file_type, logger, root_path=None):
###############################################################################
"""Create and return a master list of files from <files>.
<files> is either a comma-separated string of pathnames or a list.
If a pathname is a directory, all files with extensions in <suffices>
are included.
Wildcards in a pathname are expanded.
<suffices> is a list of allowed file types. Filenames in <files>
with an allowed suffix will be added to the master list.
Filenames with a '.txt' suffix will be parsed to look for allowed
filenames.
<file_type> is a description of the allowed file types.
<logger> is a logger used to print warnings (unrecognized filename types)
and debug messages.
If <root_path> is not None, it is used to create absolute paths for
<files>, otherwise, the current working directory is used.
"""
master_list = list()
txt_files = list() # Already processed txt files
pathname = None
if isinstance(files, str):
file_list = [x.strip() for x in files.split(',') if x.strip()]
elif isinstance(files, (list, tuple)):
file_list = files
else:
raise ParseInternalError("Bad input, <files> = {}".format(files))
# end if
if root_path is None:
root_path = os.getcwd()
# end if
master_list, errors = _create_file_list_int(file_list, suffices, file_type,
logger, txt_files, pathname,
root_path, master_list)
if errors:
emsg = 'Error processing list of {} files:\n {}'
raise CCPPError(emsg.format(file_type, '\n '.join(errors)))
# end if
return master_list | b582d1d1bc2c02d5e68bffad88a082be607813a4 | 3,651,329 |
import os
def get_raster(layer, bbox, path=None, update_cache=False,
check_modified=False, mosaic=False):
"""downloads National Elevation Dataset raster tiles that cover the given bounding box
for the specified data layer.
Parameters
----------
layer : str
dataset layer name. (see get_available_layers for list)
bbox : (sequence of float|str)
bounding box of in geographic coordinates of area to download tiles
in the format (min longitude, min latitude, max longitude, max latitude)
path : ``None`` or path
if ``None`` default path will be used
update_cache: ``True`` or ``False`` (default)
if ``False`` and output file already exists use it.
check_modified: ``True`` or ``False`` (default)
if tile exists in path, check if newer file exists online and download if available.
mosaic: ``True`` or ``False`` (default)
if ``True``, mosaic and clip downloaded tiles to the extents of the bbox provided. Requires
rasterio package and GDAL.
Returns
-------
raster_tiles : geojson FeatureCollection
metadata as a FeatureCollection. local url of downloaded data is in feature['properties']['file']
"""
_check_layer(layer)
raster_tiles = _download_tiles(get_raster_availability(layer, bbox), path=path,
check_modified=check_modified)
if mosaic:
if path is None:
path = os.path.join(util.get_ulmo_dir(), DEFAULT_FILE_PATH)
util.mkdir_if_doesnt_exist(os.path.join(path, 'by_boundingbox'))
xmin, ymin, xmax, ymax = [float(n) for n in bbox]
uid = util.generate_raster_uid(layer, xmin, ymin, xmax, ymax)
output_path = os.path.join(path, 'by_boundingbox', uid + '.tif')
if os.path.isfile(output_path) and not update_cache:
return output_path
raster_files = [tile['properties']['file'] for tile in raster_tiles['features']]
util.mosaic_and_clip(raster_files, xmin, ymin, xmax, ymax, output_path)
return [output_path]
return raster_tiles | fa34ef0b6d07ba77b93c700cf89b8bd9f568b132 | 3,651,330 |
def edit_distance(y, y_hat):
"""Edit distance between two sequences.
Parameters
----------
y : str
The groundtruth.
y_hat : str
The recognition candidate.
the minimum number of symbol edits (i.e. insertions,
deletions or substitutions) required to change one
word into the other.
"""
return _edit_distance_matrix(y, y_hat)[-1, -1] | 42e9ee4169848cd2fc491e6e99b67f96e59dd95b | 3,651,331 |
def sort_predictions(classes, predictions, bboxes):
""" Sorts predictions from most probable to least, generate extra metadata about them. """
results = []
for idx, pred in enumerate(predictions):
results.append({
"class_idx": np.argmax(pred),
"class": classes[np.argmax(pred)],
"prob": pred[np.argmax(pred)],
"fname": get_region_filename(idx),
"coords": bboxes[idx],
})
results.sort(key=itemgetter("prob"), reverse=True)
return results | 1938bb3c1b301d15425a6574e66e136cdd43a867 | 3,651,332 |
def task_bootstrap_for_adming():
"""
"""
return {'actions': [(clushUtils.exec_script, [targetNode, "bootstrap_for_adming.py"],
{
'dependsFiles': [".passwords", f"{homeDir}/.ssh/id_rsa.pub"],
'user':"root",
'manageEnv': False,
'dependsPkgs':['py3-pip', 'py3-psutil', 'curl'],
'logOutput': 'bootstrap_for_adming.log'
}
)
],
'targets': [f'{logDir}/bootstrap_for_adming.log.{targetNode}'],
'file_dep': ["deployConfig.py"],
} | 91180c0b8b9a497488d7b4d1515088f133f5626b | 3,651,333 |
from typing import List
from typing import Tuple
def reassign_clustered(
knn: List[Tuple[npt.NDArray, npt.NDArray]],
clusters: List[Tuple[str, int]],
min_sim_threshold: float = 0.6,
n_iter: int = 20,
epsilon: float = 0.05,
) -> List[Tuple[str, int]]:
"""Reassigns companies to new clusters based on the average similarity to
nearest neighbours belonging to clusters.
Args:
knn: A list of pairs of nearest neighbour index IDs and their
similarities.
clusters: A list of cluster ID and org ID pairs.
min_sim_threshold: Minimum cosine similarity for a cluster
reassignment to be accepted.
n_iter: Number of timer to iteratively reaassign companies to clusters.
epsilon: Minimum fraction of companies required for an iteration of
reassignment to happen. If the fraction of companies being
reassigned falls below this value, then there will be no more
reassignment iterations, even if n_iter has not been reached.
Returns:
clusters: A list of reassigned cluster ID and org ID pairs.
"""
org_ids = [c[1] for c in clusters]
shift = epsilon
complete = 0
while (shift >= epsilon) and (n_iter > complete):
index_id_cluster_lookup = np.array([c[0] for c in clusters])
changed = 0
_clusters = []
agg_clusters = []
agg_cluster_sims = []
for org_id, (knn_ids, sims) in zip(org_ids, knn):
knn_ids, sims, source_id = decompose_knn(
knn_ids,
sims,
source=True,
)
knn_cluster_ids = index_id_cluster_lookup[knn_ids]
unique_clusters, agg_sims = mean_cluster_similarities(knn_cluster_ids, sims)
best_cluster, best_sim = get_best_cluster(unique_clusters, agg_sims)
original_cluster = index_id_cluster_lookup[source_id]
same_cluster = best_cluster == original_cluster
if same_cluster:
_clusters.append((original_cluster, org_id))
else:
if best_sim >= min_sim_threshold:
_clusters.append((best_cluster, org_id))
changed += 1
else:
_clusters.append((original_cluster, org_id))
agg_clusters.append(unique_clusters)
agg_cluster_sims.append(agg_sims)
clusters = _clusters
complete += 1
shift = changed / len(knn)
return clusters, np.array(agg_clusters), np.array(agg_cluster_sims) | e90c61459cfeb8d906f155219cd4b758f4b8b5fe | 3,651,334 |
def boys(n,t):
"""Boys function for the calculation of coulombic integrals.
Parameters
----------
n : int
Order of boys function
t : float
Varible for boys function.
Raises
------
TypeError
If boys function order is not an integer.
ValueError
If boys function order n is not a none negative number.
"""
if not isinstance(n, int):
raise TypeError("Boys function order n must be an integer")
if n < 0:
raise ValueError("Boys function order n must be a none negative number")
if not isinstance(t, float):
raise TypeError("Boys function varible t must be integer or float")
return sc.hyp1f1(n+0.5,n+1.5,-t)/(2.0*n+1.0) | 1232d53898abfd032e570ad7697379f8359a566f | 3,651,335 |
def get_diameter_by_sigma(sigma, proba):
""" Get diameter of nodule given sigma of normal distribution and probability of diameter coverage area.
Transforms sigma parameter of normal distribution corresponding to cancerous nodule
to its diameter using probability of diameter coverage area.
Parameters
----------
sigma : float
square root of normal distribution variance.
proba : float
probability of diameter coverage area.
Returns
-------
float
equivalent diameter.
"""
return 2 * sigma * stats.norm.ppf((1 + proba) / 2) | 0cd32d685b21b71cbae06a0cfb48f226209eff44 | 3,651,336 |
import termcolor
def _colorize(val, color):
"""Colorize a string using termcolor or colorama.
If any of them are available.
"""
if termcolor is not None:
val = termcolor.colored(val, color)
elif colorama is not None:
val = "{}{}{}".format(TERMCOLOR2COLORAMA[color], val, colorama.Style.RESET_ALL)
return val | 77743f99fd845b1f8450c4bd93a52563e7c4c313 | 3,651,337 |
from pathlib import Path
def get_output_filename(output_folder: str, repository_type: str,
repository_name: str, filename: str) -> Path:
"""Returns the output filename for the file fetched from a repository."""
return (
Path(output_folder) / Path(repository_type.lower())
/ Path(Path(repository_name).name) / Path(Path(filename).name)
) | 23b806f98265b45b799dbcc177760d5ceb8248fb | 3,651,338 |
def get_data(cpe):
"""collect data from ser_dev
single value of z-accel"""
cpe.reset_input_buffer()
next = cpe.readline()
light = (float(next.decode("ascii"))) # TODO wrap in TRY?
return light | 7b736420ce5de98ad06d7196866097a7c370833f | 3,651,339 |
def wave_exist_2d_full_v2(b=.8):
"""
plot zeros of -nu1 + G(nu1,nu2) and -nu2 + G(nu2,nu1)
as a function of g
use accurate fourier series
"""
# get data
# nc1 bifurcation values
bif = np.loadtxt('twod_wave_exist_br1.dat')
#bif2 = np.loadtxt('twod_wave_exist_br2.dat')
bif_diag1 = np.loadtxt('twod_wave_exist_diag1.dat')
bif_diag2 = np.loadtxt('twod_wave_exist_diag2.dat')
# clean
bifx,bify = clean(bif[:,3],bif[:,7],tol=.47)
bifx2,bify2 = clean(bif[:,3],bif[:,8],tol=.47)
bif_diag1x,bif_diag1y = clean(bif_diag1[:,0],np.abs(bif_diag1[:,1]),tol=.2)
bif_diag2x,bif_diag2y = clean(bif_diag2[:,0],np.abs(bif_diag2[:,1]),tol=.2)
# remove nans for calculating minima (usually nans are taken to be max/min vals, which is bad)
bifx_nonan = bifx[(~np.isnan(bifx))*(~np.isnan(bify))]
bify_nonan = bify[(~np.isnan(bifx))*(~np.isnan(bify))]
bifx2_nonan = bifx2[(~np.isnan(bifx2))*(~np.isnan(bify2))]
bify2_nonan = bify2[(~np.isnan(bifx2))*(~np.isnan(bify2))]
bif_diag1x_nonan = bif_diag1x[(~np.isnan(bif_diag1x))*(~np.isnan(bif_diag1y))]
bif_diag1y_nonan = bif_diag1y[(~np.isnan(bif_diag1x))*(~np.isnan(bif_diag1y))]
bif_diag2x_nonan = bif_diag2x[(~np.isnan(bif_diag2x))*(~np.isnan(bif_diag2y))]
bif_diag2y_nonan = bif_diag2y[(~np.isnan(bif_diag2x))*(~np.isnan(bif_diag2y))]
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122)
plane1_z = .895
plane2_z = 1.17
# get plane intersection idx
bifx_int_p1 = np.argmin(np.abs(bifx_nonan-plane1_z))
bifx_int_p2 = np.argmin(np.abs(bifx_nonan-plane2_z))
bifx2_int_p1 = np.argmin(np.abs(bifx2_nonan-plane1_z))
bifx2_int_p2 = np.argmin(np.abs(bifx2_nonan-plane2_z))
bif_diagx_int_p1 = np.argmin(np.abs(bif_diag1x_nonan-plane1_z))
bif_diagx_int_p2 = np.argmin(np.abs(bif_diag1x_nonan-plane2_z))
bif_diagx2_int_p1 = np.argmin(np.abs(bif_diag2x_nonan-plane1_z))
bif_diagx2_int_p2 = np.argmin(np.abs(bif_diag2x_nonan-plane2_z))
## plot curves in 3d
# plot off diagonal and axial curves
v1a = bify2[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)]
v2a = bify[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)]
ga = bifx[(bify>=0)*(bify2>=0)*(bify<=1)*(bify2<=1)*(bifx<=2)]
#v1b = bif_diag1y[(bif_diag1y>=0)*(bif_diag2y>=0)*(bif_diag1y<=1)*(bif_diag2y<=1)*(bif_diag1x<=2)]
#v2b = bif_diag1y[(bif_diag1y>=0)*(bif_diag2y>=0)*(bif_diag1y<=1)*(bif_diag2y<=1)*(bif_diag1x<=2)]
gb = np.linspace(np.amin(bif_diag1x[~np.isnan(bif_diag1x)]),np.amax(bif_diag1x[~np.isnan(bif_diag1x)]),20)
# clean
ga,v1a,v2a = clean3d(ga,v1a,v2a,tol=.47)
# remove nans for linewidth stuff later.
ga_nonan = ga[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
v1a_nonan = v1a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
v2a_nonan = v2a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))]
# prep for plotting with different line widths
sol = np.zeros((len(ga),3))
sol[:,0] = v1a
sol[:,1] = ga
sol[:,2] = v2a
sol = np.transpose(sol)
points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3)
segs = np.concatenate([points[:-1],points[1:]],axis = 1)
line3d = Line3DCollection(segs,linewidths=(1.+(v1a_nonan)/np.amax(v1a_nonan)*3.),colors='k')
# add modified curves to figure
ax1.add_collection3d(line3d)
# repleat above to capture remaining axial branch(es)
# prep for plotting with different line widths
sol = np.zeros((len(ga),3))
sol[:,0] = v2a
sol[:,1] = ga
sol[:,2] = v1a
sol = np.transpose(sol)
points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3)
segs = np.concatenate([points[:-1],points[1:]],axis = 1)
line3d = Line3DCollection(segs,linewidths=(1.+(v2a_nonan)/np.amax(v2a_nonan)*3.),colors='k')
# add modified curves to figure
ax1.add_collection3d(line3d)
# plot diagonal guys
# prep for plotting with different line widths
diagx = bif_diag2y[(bif_diag2y<=1)*(bif_diag2x<=2.)]
diagy = bif_diag2x[(bif_diag2y<=1)*(bif_diag2x<=2.)]
diagz = bif_diag2y[(bif_diag2y<=1)*(bif_diag2x<=2.)]
diagx_nonan = diagx[~np.isnan(diagx)]
sol = np.zeros((len(diagx),3))
sol[:,0] = diagx
sol[:,1] = diagy
sol[:,2] = diagz
sol = np.transpose(sol)
points2 = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3)
segs2 = np.concatenate([points2[:-1],points2[1:]],axis = 1)
line3d2 = Line3DCollection(segs2,linewidths=(1.+(diagx_nonan)/np.amax(diagx_nonan)*3.),colors='k')
ax1.add_collection3d(line3d2)
# plot zero solution
ax1.plot([.0,0],[.5,plane1_z],[.0,0],color='black',lw=1)
# plot bifurcation planes
X,Y = np.meshgrid(np.linspace(0,1,10),np.linspace(0,1,10))
ax1.plot_surface(X,0.*X+plane1_z,Y,alpha=.5,color='gray')
ax1.plot_surface(X,0.*X+plane2_z,Y,alpha=.5,color='red')
# plot plane intersections
ax1.scatter(bify[bifx_int_p1],bifx[bifx_int_p1],bify2[bifx_int_p1],color='black',s=20)
#ax1.scatter(bify[bifx_int_p2],bifx[bifx_int_p2],bify2[bifx_int_p2],color='black',s=20)
#ax1.scatter(bif_diag2y_nonan[bif_diagx_int_p2],bif_diag1x_nonan[bif_diagx_int_p2],bif_diag1y_nonan[bif_diagx_int_p2],color='black',s=20)
ax1.scatter(0,1.17,.51,color='red',s=20,zorder=10)
ax1.scatter(.5,1.17,0.,color='red',s=40,zorder=10)
ax1.scatter(.37,1.17,.37,color='red',s=50,zorder=10)
"""
ax1.scatter(L1[g_int_p2],g[g_int_p2],M1[g_int_p2],color='black',s=20)
ax1.scatter(L2[g_int_p1],g[g_int_p1],M2[g_int_p1],color='black',s=20)
ax1.scatter(L2[g_int_p2],g[g_int_p2],M2[g_int_p2],color='black',s=20)
ax1.scatter(L3[g_int_p1],g[g_int_p1],M3[g_int_p1],color='black',s=20)
ax1.scatter(L3[g_int_p2],g[g_int_p2],M3[g_int_p2],color='black',s=20)
ax1.scatter(L4[g_int_p1],g[g_int_p1],M4[g_int_p1],color='black',s=20)
ax1.scatter(L4[g_int_p2],g[g_int_p2],M4[g_int_p2],color='black',s=20)
"""
## plot curves in 2d
# bifurcation lines
ax2.plot([plane1_z,plane1_z],[-1,1.8],color='black',alpha=.5,lw=2)
ax2.plot([plane2_z,plane2_z],[-1,1.8],color='red',alpha=.5,lw=2)
ax2.plot(bifx,bify,color='black')
ax2.plot(bifx2,bify2,color='black')
ax2.plot(bif_diag1x,bif_diag1y,color='black')
ax2.plot(bif_diag2x,bif_diag2y,color='black')
ax2.plot([0,5],[0,0],color='black')
# label curves
ax2.annotate(r'$x$-axis direction',
xy=(1.04,.37),xycoords='data',textcoords='data',
xytext=(.6,.6),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
color='black'),
)
ax2.annotate(r'$y$-axis direction',
xy=(1.0,.0),xycoords='data',textcoords='data',
xytext=(.55,.33),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
color='black'),
)
ax2.annotate(r'$g^*$',
xy=(.9,.0),xycoords='data',textcoords='data',
xytext=(.8,.05),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
color='black'),
)
ax2.annotate('Diagonal',
xy=(1.1,.32),xycoords='data',textcoords='data',
xytext=(1.4,.2),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
color='black'),
)
ax2.annotate('Off-diagonal',
xy=(1.4,.41),xycoords='data',textcoords='data',
xytext=(1.5,.34),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
color='black'),
)
ax2.annotate('Off-diagonal',
alpha=0.,
xy=(1.4,.62),xycoords='data',textcoords='data',
xytext=(1.5,.34),
arrowprops=dict(arrowstyle="-|>",
connectionstyle="arc3",
color='black'),
)
# plot params
ax1.view_init(20,-8)
# set labels
ax1.set_xlabel(r'$\nu_2$')
ax2.set_xlabel(r'$g$')
ax1.set_ylabel(r'$g$')
ax2.set_ylabel(r'$\nu_1$')
ax1.set_zlabel(r'$\nu_1$')
ax1.set_xlim(0.,1.)
ax2.set_xlim(.5,2.)
ax1.set_ylim(.5,2.)
ax2.set_ylim(-.05,1.)
ax1.set_zlim(0.,1.)
#plt.show()
return fig | a471a8b510ed786080e2e5f1b3c8159cc211ff19 | 3,651,340 |
def _parse_variables(vars_list):
"""Transform the list of vars stored in module definition in dictionnary"""
vars = {}
for var in vars_list:
key = var['name']
value = None
for var_type in ATTRIBUTE_TYPE:
if var_type in var:
value = var[var_type]
break
vars[key] = value
return vars | 59c88815abf08efe72dcca9efce4970bcd072b91 | 3,651,341 |
import logging
def get_vertical_axes(nc_file):
"""
Scan input netCDF file and return a list of vertical axis variables, requiring specific
axis names
"""
vertical_axes = []
for var_name, var in nc_file.variables.items():
if var_name in ('full_levels', 'half_levels'):
vertical_axes.append(var)
logging.info('Found %i vertical axes.', len(vertical_axes))
return vertical_axes | f26b89d9d9839759f3b1ed7a990d548f996e29d2 | 3,651,342 |
def update_workload_volumes(workload,config,spec_config):
"""
Return True if some env is updated;otherwise return False
"""
volumemount_configs = get_property(spec_config,("containers",0,"volumeMounts"))
if not volumemount_configs:
del_objs = models.WorkloadVolume.objects.filter(workload=workload).delete()
if del_objs[0]:
logger.debug("Delete the volumes for workload({}),deleted objects = {}".format(workload,del_objs))
return True
else:
return False
updated = False
name = None
del_objs = models.WorkloadVolume.objects.filter(workload=workload).exclude(name__in=[c["name"] for c in volumemount_configs]).delete()
if del_objs[0]:
logger.debug("Delete the volumes for workload({}),deleted objects = {}".format(workload,del_objs))
updated = True
#exact all volumes from yaml file
volume_configs = {}
for volume_config in get_property(spec_config,"volumes") or []:
volume_configs[volume_config["name"]] = volume_config
for volumemount_config in volumemount_configs:
name = volumemount_config["name"]
try:
obj = models.WorkloadVolume.objects.get(workload=workload,name=name)
except ObjectDoesNotExist as ex:
obj = models.WorkloadVolume(workload=workload,name=name)
writable = get_property(volumemount_config,"readOnly",lambda val: False if val else True)
update_fields = set_fields_from_config(obj,volumemount_config,[
("mountpath","mountPath",None),
("subpath","subPath",None)
])
if name not in volume_configs:
continue
volume_config = volume_configs[name]
if "persistentVolumeClaim" in volume_config:
#reference the volume from volume claim
claimname = volume_config["persistentVolumeClaim"]["claimName"]
set_field(obj,"volume_claim", models.PersistentVolumeClaim.objects.get(cluster=workload.cluster,namespace=workload.namespace,name=claimname),update_fields)
set_field(obj,"volume", obj.volume_claim.volume,update_fields)
set_field(obj,"volumepath", obj.volume_claim.volume.volumepath if obj.volume_claim.volume else None ,update_fields)
set_field(obj,"other_config", None,update_fields)
if writable:
writable = obj.volume_claim.writable
elif "hostPath" in volume_config:
hostpath = volume_config["hostPath"]["path"]
set_field(obj,"volume_claim", None,update_fields)
set_field(obj,"volumepath", hostpath,update_fields)
set_field(obj,"volume", models.PersistentVolume.objects.filter(cluster=workload.cluster,volumepath=hostpath).first(),update_fields)
set_field(obj,"other_config", None,update_fields)
if writable and obj.volume:
writable = obj.volume.writable
else:
set_field(obj,"other_config", volume_config,update_fields)
set_field(obj,"writable",writable,update_fields)
if obj.pk is None:
obj.modified = workload.modified
obj.created = workload.modified
obj.save()
updated = True
logger.debug("Create deployment workload volume({})".format(obj))
elif update_fields:
obj.modified = workload.modified
update_fields.append("modified")
update_fields.append("updated")
obj.save(update_fields=update_fields)
updated = True
logger.debug("Update the deployment workload volume({}),update_fields={}".format(obj,update_fields))
else:
logger.debug("The deployment workload volume({}) is not changed".format(obj))
return updated | 2887aaaf6223b1e548d0c35c8a9b300d4ec417d8 | 3,651,343 |
def b_2_d(x):
"""
Convert byte list to decimal
:param x: byte list
:return: decimal
"""
s = 0
for i in range(0, len(x)):
s += x[i]*2**i
return s | e865700ea30be535ad014908d6b6024186cc5ac6 | 3,651,344 |
def get(s, delimiter='', format="diacritical"):
"""Return pinyin of string, the string must be unicode
"""
return delimiter.join(_pinyin_generator(u(s), format=format)) | 7369e133f73e9517fc20f6b95809ba615172feae | 3,651,345 |
def top_dist(g1, g2, name='weight', topology_type=0):
"""
:param g1: graph 1
:param g2: graph 2
:param name: compared edge attribute
:param topology_type: topology distance normalization method
:return: topology distance
"""
max_v = max_edge(g1, name, max_edge(g2, name, 0)) # find max value in a graph
v = 0
nodes_list = set(g1.nodes()) | set(g2.nodes()) # define nodes list in g1 or g2
degree1 = g1.degree(weight=name) # define degree of g1
degree2 = g2.degree(weight=name) # define degree of g2
for node in nodes_list: # consider each node
if node in g1.nodes() and node in g2.nodes(): # node appears in both graphs
nodes1 = set(g1.neighbors(node)) # adjacent nodes in g1
nodes2 = set(g2.neighbors(node)) - nodes1 # distinct adjacent nodes in g2
for node2 in nodes1:
if node2 in g2.neighbors(node):
v += abs(g1[node][node2][name]-g2[node][node2][name])
else:
v += g1[node][node2][name]
for node2 in nodes2:
v += g2[node][node2][name]
else:
if node in g1.nodes(): # node appears only in g1
v += degree1[node]
else:
v += degree2[node] # node appears only in g2
v /= max_v
if topology_type == 0:
return v/len(nodes_list)/len(nodes_list)
else:
num_edges = len(set(g1.edges()) | set(g2.edges()))
return v/num_edges/num_edges | 2abf2e74b3a715861389b75bfce8bc3c609a77c1 | 3,651,346 |
def refresh_track():
"""
For now the interface isn't refreshed
:return:
"""
try:
url = request.form["url"]
except KeyError:
return "nok"
with app.database_lock:
Track.refresh_by_url(app.config["DATABASE_PATH"], url)
return "ok" | 47cf865ec01093735050e7abb15d65ef97d2e1ba | 3,651,347 |
import pickle
def get_weights():
""" Loads uni-modal text and image CNN model weights.
Returns:
tuple: text and image weights.
"""
text_weight_file = open("models/unimodal_text_CNN_weights.pickle", "rb")
text_weights = pickle.load(text_weight_file)
text_weight_file.close()
image_weight_file = open("models/unimodal_image_CNN_LSTM_weights.pickle", "rb")
image_weights = pickle.load(image_weight_file)
image_weight_file.close()
return text_weights, image_weights | abff59a197130f5776fdb0cacc3f895ff5d7393e | 3,651,348 |
def get_data(start_runno, start_fileno, hall, fields): # pylint: disable=too-many-locals,too-many-branches
"""Pull the data requested, starting from first VALID run/file after/including
the specified one"""
val_dict = lambda: {'values': []}
ad_dict = lambda: {f'AD{det}': val_dict()
for det in dets_for(hall, start_runno)}
wp_dict = lambda: {f'WP{det}': val_dict()
for det in ['I', 'O']}
result = {'runnos': [],
'filenos': [],
'metrics': {
field_desc(field): wp_dict() if field.endswith('WP') else ad_dict()
for field in fields
},
# Send 'latest' so that frontend knows whether to disable END button
'latest': all_latest()}
focus = focus_sql(hall, start_runno)
try:
end_runno, end_fileno = get_shifted(start_runno, start_fileno, hall, 1, skipfirst=False)
except EndOfDataException: # return empty result, let caller decide how to proceed
return result
ad_fields = [f for f in fields if not f.endswith('WP')]
wp_fields = [f[:-2] for f in fields if f.endswith('WP')]
uniq_fields = list(set(ad_fields + wp_fields))
if any(f.endswith('counts') for f in uniq_fields):
livetimes = {}
rows = get_livetimes(start_runno, start_fileno, end_runno, end_fileno, hall)
for runno, fileno, lt_ms in rows:
livetimes[(runno, fileno)] = lt_ms / 1000
default_livetime = sum(livetimes.values()) / len(livetimes)
field_sel = f', {",".join(uniq_fields)}' if uniq_fields else ''
loc = loc_pred(start_runno, start_fileno, end_runno, end_fileno)
query = f'''SELECT runno, fileno, detectorid {field_sel}
FROM DqDetectorNew NATURAL JOIN DqDetectorNewVld vld
LEFT JOIN runno_fileno_sitemask USING (runno, fileno)
WHERE ({loc}) AND ({focus}) AND vld.sitemask = {sitemask(hall)}
AND streamtype = 'Physics'
ORDER BY runno, fileno, detectorid, insertdate'''
rows = dq_exec(query).fetchall()
def val_arr(field, det):
if det >= 5:
prefix = 'WP'
det = 'O' if det == 6 else 'I'
else:
prefix = 'AD'
return result['metrics'][field_desc(field)][f'{prefix}{det}']['values']
last_runno, last_fileno = None, None
for row in rows:
runno, fileno, det = row[:3]
if runno != last_runno or fileno != last_fileno:
result['runnos'].append(runno)
result['filenos'].append(fileno)
for each_ad in dets_for(hall, start_runno):
for field in ad_fields:
val_arr(field, each_ad).append(-2) # default value
for each_wp in [5, 6]:
for field in wp_fields:
val_arr(field+'WP', each_wp).append(-2)
for i, field in enumerate(uniq_fields):
val = row[i+3]
if field.endswith('counts'):
try:
norm = livetimes[(runno, fileno)]
except KeyError:
print(f'WARNING: Missing livetime for {runno}, {fileno}')
norm = default_livetime
if val is not None: # in case we got a NULL in this row
val /= norm
if val is None:
val = -3
# NOTE If the loc_pred queries are slow due to IN, consider
# simplifying those and instead doing a more precise AD check
# here
if field in ad_fields and det <= 4:
val_arr(field, det)[-1] = val # replace default/older
elif field in wp_fields and det >= 5:
val_arr(field+'WP', det)[-1] = val
last_runno, last_fileno = runno, fileno
result['xs'] = scale_xs(result['runnos'], result['filenos'],
(start_runno, start_fileno),
(end_runno, end_fileno), hall)
return result | e740952bf5419956bb86f214b01e4a8deb8e6ebc | 3,651,349 |
def timelength_label_to_seconds(
timelength_label: spec.TimelengthLabel,
) -> spec.TimelengthSeconds:
"""convert TimelengthLabel to seconds"""
number = int(timelength_label[:-1])
letter = timelength_label[-1]
base_units = timelength_units.get_base_units()
base_seconds = base_units['1' + letter]
seconds = number * base_seconds
return seconds | d0494fd2fabe07d0cae2dbc7c8c142b7b478533c | 3,651,350 |
from typing import List
def getUrlsAlias()->List[str]:
"""获取所有urls.py的别名"""
obj = getEnvXmlObj()
return obj.get_childnode_lists('alias/file[name=urls]') | be0f5a2b423a4fa9a58d9e60e2cc0d91f1d66949 | 3,651,351 |
def project_xarray(run: BlueskyRun, *args, projection=None, projection_name=None):
"""Produces an xarray Dataset by projecting the provided run.
EXPERIMENTAL: projection code is experimental and could change in the near future.
Projections come with multiple types: linked, and caclulated. Calculated fields are only supported
in the data (not at the top-level attrs).
Projected fields will be inserted into the resulting xarray.Dataset
Parameters
----------
run : BlueskyRun
run to project
projection_name : str, optional
name of a projection to select in the run, by default None
projection : dict, optional
projection not from the run to use, by default None
Returns
-------
xarray.Dataset
The return Dataset will contain:
- single value meta data (from the run start) in the return Dataset's attrs dict, keyed
on the projection key. These are projections marked "location": "start"
- single value meta data (from a streams configuration field) in the return Dataset's xarray's dict, keyed
on the projection key. These are projections marked "location": "configuration"
- multi-value data (from a stream). Keys for the dict-like xarray.Dataset match keys
in the passed-in projection. These are projections with "location": "linked"...note that
every xarray for a field froma given stream will contain a reference to the same set of configuration attrs
for as all fields from the same stream
Dataset
|_attrs
|_'projection_start_field': value
|_data
|_ 'projection_event_field': xarray
|_ attrs
|_'projection_configuration_field': value
Raises
------
ProjectionError
"""
attrs = {} # will populate the return Dataset attrs field
data_vars = {} # will populate the return Dataset DataArrays
stream_configurations = {} # will populate a collection of dicts of stream configurations
def metadata_cb(field, value):
attrs[field] = value
def event_configuration_cb(
projection_field,
stream,
config_index,
config_device,
config_field,
value):
if stream not in stream_configurations:
stream_configurations[stream] = []
if len(stream_configurations[stream]) == 0:
stream_configurations[stream].append({})
if config_device not in stream_configurations[stream][config_index]:
stream_configurations[stream][config_index][config_device] = {}
stream_configurations[stream][config_index][config_device][config_field] = value
def event_field_cb(projection_field,
stream,
field,
xarray: xarray.DataArray):
if projection_field not in stream_configurations:
stream_configurations[stream] = []
# associate the stream configuration to the xarrays's atrtrs
xarray.attrs['configuration'] = stream_configurations[stream]
data_vars[projection_field] = xarray
# Use the callbacks defined above to project the run and build up a return xarray.Dataset
projector = Projector(
metadata_cb=metadata_cb,
event_configuration_cb=event_configuration_cb,
event_field_cb=event_field_cb)
projector.project(run, projection=projection, projection_name=projection_name)
dataset = xarray.Dataset(data_vars, attrs=attrs)
return dataset, projector.issues | 8960b68090601c0a83da4ebb82c4b97f3751282f | 3,651,352 |
def collect_users():
"""Collect a list of all Santas from the user"""
list_of_santas = []
while 1:
item = input("Enter a name\n")
if not item:
break
list_of_santas.append(item)
return list_of_santas | d86ec360518fdb497b86b7f631fee0dc4464e2bb | 3,651,353 |
def check_role_exists(role_name, access_key, secret_key):
"""
Check wheter the given IAM role already exists in the AWS Account
Args:
role_name (str): Role name
access_key (str): AWS Access Key
secret_key (str): AWS Secret Key
Returns:
Boolean: True if env exists else False
"""
iam_client = get_iam_client(access_key, secret_key)
try:
role = iam_client.get_role(RoleName=role_name)
return True if role else False
except:
return False | cd6f118424ca17f6e65e28abefed39e89bd66b95 | 3,651,354 |
def group_delay(group_key, flights):
"""
Group the arrival delay flights based on keys.
:param group_key: Group key to use for categorization.
:param flights: List of flights matching from an origin airport.
:return: Dictionary containing the list of flights grouped.
"""
dict_of_group_flights = defaultdict(list)
if group_key == 'distance':
global distance_range # segmentation every distance range
# Remove duplicate value & Get the maximum distance
distance_set = set()
for flight in flights:
distance_set.add(int(flight['distance']))
distance_list = sorted(list(distance_set))
max_distance = max(distance_list)
# Segment into Ranges
temp_dict = defaultdict(list)
for flight in flights:
distance_limit = 0
while distance_limit <= max_distance:
if int(flight[group_key]) in range(distance_limit, distance_limit + distance_range):
time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None
if time_of_arrival is not None and time_of_arrival < 0:
distance_ranges = str(distance_limit) + " - " + str(distance_limit + distance_range) + " miles"
temp_dict[distance_ranges].append(time_of_arrival)
distance_limit += distance_range
elif group_key == 'day_of_week':
temp_dict = defaultdict(list)
for flight in flights:
time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None
if time_of_arrival is not None and time_of_arrival < 0:
name_of_day = get_day_name(int(flight[group_key]))
temp_dict[name_of_day].append(time_of_arrival)
else:
temp_dict = defaultdict(list)
for flight in flights:
time_of_arrival = int(flight['arr_delay']) if flight['arr_delay'] else None
if time_of_arrival is not None and time_of_arrival < 0:
temp_dict[flight[group_key]].append(time_of_arrival)
# Overall Arrival Delay in "<minimum> - <maximum> minute(s) late" format
for key, delay_list in temp_dict.iteritems():
fastest_delay = str(abs(max(delay_list)))
longest_delay = str(abs(min(delay_list)))
if fastest_delay == longest_delay:
dict_of_group_flights[key].append(fastest_delay + " minute(s) late")
else:
dict_of_group_flights[key].append(fastest_delay + " - " + longest_delay + " minute(s) late")
return dict_of_group_flights | 0ae760f7da7762b97d6d7a5d5503b280ed39f855 | 3,651,355 |
def create_matrix(
score_same_brackets, score_other_brackets, score_reverse_brackets,
score_brackets_dots, score_two_dots, add_score_for_seq_match,
mode='simple'):
"""
Function that create matrix that can be used for further analysis,
please take note, that mode must be the same in case of matrix and
multiple sequence alignment, otherwise random-like effects will occur
:param score_same_brackets: int, score for the same tye of brackets
like ( and (
:param score_other_brackets: int, score for different type of brackets
like ( and [
:param score_reverse_brackets: int, socre for reverse brackets like ( and )
:param score_brackets_dots: int, socre for brakcet and dot like ( and .
:param score_two_dots: int, score for two dots like . and .
:param add_score_for_seq_match: int, value to add
if sequence letter is the same
:param mode: string, simple - only level one pseudoknots, pseudo -
multiplelevel of pseudoknots
:return: string containing matrix that can be saved
"""
header = " A C D E F G H I K L M " \
"N P Q R S T V W Y"
matrix = defaultdict(dict)
if mode == 'simple':
for letter1 in LETTERS:
nucleotide1 = None
dot_bracket1 = None
for nucleotide in SIMPLE_CONVERSION:
for dot_bracket in SIMPLE_CONVERSION[nucleotide]:
if SIMPLE_CONVERSION[nucleotide][dot_bracket] == letter1:
nucleotide1 = nucleotide
dot_bracket1 = dot_bracket
for letter2 in LETTERS:
nucleotide2 = None
dot_bracket2 = None
for nucleotide in SIMPLE_CONVERSION:
for dot_bracket in SIMPLE_CONVERSION[nucleotide]:
if SIMPLE_CONVERSION[nucleotide][dot_bracket] == \
letter2:
nucleotide2 = nucleotide
dot_bracket2 = dot_bracket
score = score_brackets(
dot_bracket1, dot_bracket2, score_same_brackets,
score_other_brackets, score_reverse_brackets,
score_brackets_dots, score_two_dots)
if nucleotide1 == nucleotide2:
score += add_score_for_seq_match
matrix[letter1][letter2] = score
elif mode == 'pseudo':
for letter1 in LETTERS:
dot_bracket1 = None
for dot_bracket in PSEUDOKNOT_CONVERSION:
if PSEUDOKNOT_CONVERSION[dot_bracket] == letter1:
dot_bracket1 = dot_bracket
for letter2 in LETTERS:
score = 0
dot_bracket2 = None
for dot_bracket in PSEUDOKNOT_CONVERSION:
if PSEUDOKNOT_CONVERSION[dot_bracket] == letter2:
dot_bracket2 = dot_bracket
if dot_bracket2 is not None and dot_bracket1 is not None:
score = score_brackets(
dot_bracket1, dot_bracket2, score_same_brackets,
score_other_brackets, score_reverse_brackets,
score_brackets_dots, score_two_dots)
matrix[letter1][letter2] = score
else:
print('Wrong mode')
text = [header]
for letter1 in LETTERS:
string = [letter1, ' ']
for letter2 in LETTERS:
score = matrix[letter1][letter2]
string.append(str(score).rjust(5))
text.append("".join(string))
return "\n".join(text) | 7979a72b70ae2910051943c714676aec3d291dbc | 3,651,356 |
def view_inv(inventory_list):
"""list -> None
empty string that adds Rental attributes
"""
inventory_string = ''
for item in inventory_list:
inventory_string += ('\nRental: ' + str(item[0])+ '\nQuantity: '+ str(item[1])+
'\nDeposit: '+"$"+ str(item[2])+"\nPrice Per Week: "+ "$" + str(item[3])+
'\nReplacement Value: '+ "$" + str(int(item[4]))+ "\n")
return inventory_string | 540b6bb2597ba5686a070749c2526ad09be25d5f | 3,651,357 |
def generate_smb_proto_payload(*protos):
"""Generate SMB Protocol. Pakcet protos in order.
"""
hexdata = []
for proto in protos:
hexdata.extend(proto)
return "".join(hexdata) | 848fdad11941a6d917bd7969fb7ffb77025cd13d | 3,651,358 |
def FeatureGrad_LogDet(grad_feature):
"""Part of the RegTerm inside the integral
It calculates the logarithm of the determinant of the matrix [N_y x N_y] given by the scalar product of the gradients along the N_x axis.
Args:
grad_feature (array_like): [N_samples, N_y, N_x], where N_x is the input space and N_y the feature space.
Returns:
(array_like): [N_samples]
"""
# Case of 1d feature
if len(grad_feature.shape) == 2:
grad_feature = grad_feature[:, np.newaxis, :]
matrix_j = grad_feature@grad_feature.swapaxes(1, -1)
s, d = np.linalg.slogdet(matrix_j)
# return s*d
# We remove terms with zero s (i.e. errors)
return s[s != 0]*d[s != 0] | a32b472c6c69b441be52911f5a2f82011c5cab00 | 3,651,359 |
def get_every_second_indexes(ser: pd.Series,
even_index=True) -> pd.core.series.Series:
"""Return all rows where the index is either even or odd.
If even_index is True return every index where idx % 2 == 0
If even_index is False return every index where idx % 2 != 0
Assume default indexing i.e. 0 -> n
"""
idx = 0 if even_index else 1
return ser.iloc[idx::2] | eb8c3b3a377c34e047d7daa525226cac18e21b7b | 3,651,360 |
def EVLAApplyCal(uv, err, SNver=0, CLin=0, CLout=0, maxInter=240.0, \
doSelf=False,
logfile=None, check=False, debug=False):
"""
Applies an SN table to a CL table and writes another
Returns task error code, 0=OK, else failed
* uv = UV data object to clear
* err = Obit error/message stack
* SNver = SN table to apply, 0=>highest
* CLin = input CL table, 0=>highest
* CLout = output CL table, 0=>create new
* maxInter = Max time (min) over which to interpolate
* doSelf = If true only apply calibrations to same source
* logfile = logfile for messages
* check = Only check script, don't execute tasks
* debug = show input, ObitTasks debug
"""
################################################################
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
if not check:
if SNver<=0:
SNver = uv.GetHighVer("AIPS SN")
if CLin<=0:
CLin = uv.GetHighVer("AIPS CL")
if CLout<=0:
CLout = uv.GetHighVer("AIPS CL")+1
if CLin<1:
mess = "No input CL table to update"
printMess(mess, logfile)
uv.Header(err)
return 1
mess = "Update CL "+str(CLin)+" with SN "+str(SNver)+" to CL "+str(CLout)
printMess(mess, logfile)
clcal = ObitTask.ObitTask("CLCal")
try:
clcal.userno = OSystem.PGetAIPSuser() # This sometimes gets lost
except Exception as exception:
pass
if not check:
setname(uv,clcal)
clcal.solnVer = SNver
clcal.calIn = CLin
clcal.calOut = CLout
clcal.maxInter = maxInter
if doSelf:
clcal.interMode = "SELF"
clcal.taskLog = logfile
clcal.debug = debug
if debug:
clcal.i
# Trap failure
try:
if not check:
clcal.g
except Exception as exception:
print(exception)
mess = "CLCal Failed retCode="+str(clcal.retCode)
printMess(mess, logfile)
return 1
else:
pass
# End CLCal
# Open/close UV to update header
if not check:
uv.Open(UV.READONLY,err)
uv.Close(err)
if err.isErr:
OErr.printErr(err)
mess = "Update UV header failed"
printMess(mess, logfile)
return 1
return 0
# end EVLAApplyCal | 964cecc682aa85dde29c2cc8bf75f94e9a449da2 | 3,651,361 |
def preprocess_input(text):
"""
정제된 텍스트를 토큰화합니다
:param text: 정제된 텍스트
:return: 문장과 단어로 토큰화하여 분석에 투입할 준비를 마친 텍스트
"""
sentences = nltk.sent_tokenize(text)
tokens = [nltk.word_tokenize(sentence) for sentence in sentences]
return tokens | 902c1aa5fc98ad5180ef7db670afbc972089a307 | 3,651,362 |
def create_count_dictionaries_for_letter_placements(all_words_list):
"""Returns a tuple of dictionaries where the index of the tuple is the counts for that index of each word
>>> create_count_dictionaries_for_letter_placements(all_words_list)
(dictPosition0, dictPosition1, dictPosition2, dictPosition3, dictPosition4)
For example:
dictPosition0 has the counts of all characters (a-z) in the first position of all the words.
dictPosition3 has the counts of all characters (a-z) in the fourth position of all the words.
"""
dictPosition0 = create_dictionary_of_characters_at_word_index(0, all_words_list)
dictPosition1 = create_dictionary_of_characters_at_word_index(1, all_words_list)
dictPosition2 = create_dictionary_of_characters_at_word_index(2, all_words_list)
dictPosition3 = create_dictionary_of_characters_at_word_index(3, all_words_list)
dictPosition4 = create_dictionary_of_characters_at_word_index(4, all_words_list)
return dictPosition0, dictPosition1, dictPosition2, dictPosition3, dictPosition4 | 4d45ddda36c64ccfb357367521aa1983d738ab7b | 3,651,363 |
def parse_known(key, val) -> str:
"""
maps string from html to to function for parsing
Args:
key: string from html
val: associated value in html
Returns:
str
"""
key_to_func = {}
key_to_func["left"] = parse_number
key_to_func["top"] = parse_number
key_to_func["width"] = parse_number
key_to_func["font-size"] = parse_number
key_to_func["color"] = parse_color
if key in key_to_func:
return key_to_func[key](key, val)
else:
return val | 680a38496c368e7bd13f5578f4312914ac63c7f7 | 3,651,364 |
def getRecordsPagination(page, filterRecords=''):
""" get all the records created by users to list them in the backend welcome page """
newpage = int(page)-1
offset = str(0) if int(page) == 1 \
else str(( int(conf.pagination) *newpage))
queryRecordsPagination = """
PREFIX prov: <http://www.w3.org/ns/prov#>
PREFIX base: <"""+conf.base+""">
SELECT DISTINCT ?g ?title ?userLabel ?modifierLabel ?date ?stage
WHERE
{ GRAPH ?g {
?s ?p ?o .
OPTIONAL { ?g rdfs:label ?title; prov:wasAttributedTo ?user; prov:generatedAtTime ?date ; base:publicationStage ?stage. ?user rdfs:label ?userLabel .
OPTIONAL {?g prov:wasInfluencedBy ?modifier. ?modifier rdfs:label ?modifierLabel .} }
OPTIONAL {?g rdfs:label ?title; prov:generatedAtTime ?date ; base:publicationStage ?stage . }
BIND(COALESCE(?date, '-') AS ?date ).
BIND(COALESCE(?stage, '-') AS ?stage ).
BIND(COALESCE(?userLabel, '-') AS ?userLabel ).
BIND(COALESCE(?modifierLabel, '-') AS ?modifierLabel ).
BIND(COALESCE(?title, 'none', '-') AS ?title ).
filter not exists {
?g prov:generatedAtTime ?date2
filter (?date2 > ?date)
}
}
"""+filterRecords+"""
FILTER( str(?g) != '"""+conf.base+"""vocabularies/' )
}
ORDER BY DESC(?date)
LIMIT """+conf.pagination+"""
OFFSET """+offset+"""
"""
records = list()
sparql = SPARQLWrapper(conf.myEndpoint)
sparql.setQuery(queryRecordsPagination)
sparql.setReturnFormat(JSON)
results = sparql.query().convert()
for result in results["results"]["bindings"]:
records.append( (result["g"]["value"], result["title"]["value"], result["userLabel"]["value"], result["modifierLabel"]["value"], result["date"]["value"], result["stage"]["value"] ))
return records | 97221f9cfebe615744bc3ef488e8daf3ddc0dca4 | 3,651,365 |
import scipy.integrate
def integrate_intensity(data_sets, id, nθ, iN, NCO2, color1, color2):
"""Integrate intensity ove angle theta
Arguments:
data_sets {[type]} -- [description]
id {[type]} -- [description]
nθ {[type]} -- [description]
iN {[type]} -- [description]
NCO2 {[type]} -- [description]
color1 {[type]} -- [description]
color2 {[type]} -- [description]
Returns:
[type] -- [description]
"""
θ_0 = np.deg2rad(data_sets.get(id, iN, 0, iθ)[0]) # theta
θ_1 = np.deg2rad(data_sets.get(id, iN, 1, iθ)[0]) # theta
θ_2 = np.deg2rad(data_sets.get(id, iN, 2, iθ)[0]) # theta
I_0 = data_sets.get(id, iN, 0, iI)[-1] # intensity at TOA
I_1 = data_sets.get(id, iN, 1, iI)[-1] # intensity at TOA
I_2 = data_sets.get(id, iN, 2, iI)[-1] # intensity at TOA
# qubic approximation of I(θ)
R1 = I_1 - I_0
R2 = I_2 - I_0
a0 = I_0
det = θ_1**2 * θ_2**3 - θ_1**3 * θ_2**2
a2 = (R1 * θ_2**3 - R2 * θ_1**3) / det
a3 = (R2 * θ_1**2 - R1 * θ_2**2) / det
c1 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x), 0.0, np.pi*0.5)# θ_2)
c2 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x)*x**2, 0.0, np.pi*0.5)# θ_2)
c3 = scipy.integrate.quad(lambda x: np.cos(x)*np.sin(x)*x**3, 0.0, np.pi*0.5)# θ_2)
θ = np.mgrid[0.0:np.pi*0.5:100j]
I = a0 + a2*θ**2 + a3*θ**3
# plot
plt.plot(θ, I, color1, label='%d ppm, cubic approximation' % NCO2)
plt.plot([θ_0, θ_1, θ_2], [I_0, I_1, I_2], color2+'o', label="%d ppm, computed" % NCO2)
plt.xlabel("angle θ [rad]")
plt.ylabel("TOA flux I(θ) [W/m²]")
plt.legend(loc='best')
# integrated intensity
Iint = 2.0*np.pi * (a0*c1[0] + a2*c2[0] + a3*c3[0])
return Iint | 8336347f8fbe9c690800ae3efec185ba1a0e610d | 3,651,366 |
def new(request, pk=""):
""" New CodeStand Entry
When user presses 'Associate new project' there is a Project Container
associated, then you need reuse this information in the form
:param request: HttpResponse
:param pk: int - Indicates which project must be loaded
"""
if request.path != request.session[constants.ACTUAL_TEMPLATE]:
clear_session(request)
request.session[constants.REM_LINKS] = []
request.session[constants.REM_TAGS] = []
request.session[constants.REM_DOCS] = []
request.session[constants.REM_CONTACTS] = []
request.session[constants.ADD_CONTACTS] = []
request.session[constants.ADD_LINKS] = []
request.session[constants.ADD_TAGS] = []
request.session[constants.ADD_DOCS] = []
request.session[constants.MAINTAIN_STATE] = True
if pk != "":
request.session[constants.ACTUAL_PROJECT] = get_object_or_404(ProjectContainer, id=pk)
# User must have permission to add new CodeStand
if not is_user_allowed(request.user, "canaddmatch"):
raise Http404
return save_code(request, False, pk) | 432949f5d7ae6869078c729d86bafabac0f17871 | 3,651,367 |
def sideral(
date, longitude=0.0, model="mean", eop_correction=True, terms=106
): # pragma: no cover
"""Sideral time as a rotation matrix
"""
theta = _sideral(date, longitude, model, eop_correction, terms)
return rot3(np.deg2rad(-theta)) | 01f3209db8996ad1e11ded48da26d286253c5f7d | 3,651,368 |
from splitgraph.core.output import conn_string_to_dict
from typing import Type
import click
def _make_mount_handler_command(
handler_name: str, handler: Type[ForeignDataWrapperDataSource]
) -> Command:
"""Turn the mount handler function into a Click subcommand
with help text and kwarg/connection string passing"""
help_text, handler_options_help = _generate_handler_help(handler)
params = [
click.Argument(["schema"]),
click.Option(
["--connection", "-c"],
help="Connection string in the form username:password@server:port",
),
click.Option(
["--handler-options", "-o"], help=handler_options_help, default="{}", type=JsonType()
),
]
def _callback(schema, connection, handler_options):
handler_options.update(conn_string_to_dict(connection))
mount(schema, mount_handler=handler_name, handler_kwargs=handler_options)
cmd = click.Command(handler_name, params=params, callback=_callback, help=help_text)
return cmd | 0e8aa0cf3973c265e0df2b1815afd65042fa5d14 | 3,651,369 |
def test_load_settings_onto_instrument(tmp_test_data_dir):
"""
Test that we can successfully load the settings of a dummy instrument
"""
# Always set datadir before instruments
set_datadir(tmp_test_data_dir)
def get_func():
return 20
tuid = "20210319-094728-327-69b211"
instr = Instrument("DummyInstrument")
# A parameter that is both settable and gettable
instr.add_parameter(
"settable_param", initial_value=10, parameter_class=ManualParameter
)
# A parameter that is only gettable
instr.add_parameter("gettable_param", set_cmd=False, get_cmd=get_func)
# A boolean parameter that is True by defualt
instr.add_parameter(
"boolean_param", initial_value=True, parameter_class=ManualParameter
)
# A parameter which is already set to None
instr.add_parameter(
"none_param",
initial_value=None,
parameter_class=ManualParameter,
vals=validators.Numbers(),
)
# A parameter which our function will try to set to None, giving a warning
instr.add_parameter(
"none_param_warning",
initial_value=1,
parameter_class=ManualParameter,
vals=validators.Numbers(),
)
# The snapshot also contains an 'obsolete_param', that is not included here.
# This represents a parameter which is no longer in the qcodes driver.
with pytest.warns(
UserWarning,
match="Parameter none_param_warning of instrument DummyInstrument could not be "
"set to None due to error",
):
load_settings_onto_instrument(instr, tuid)
with pytest.warns(
UserWarning,
match="Could not set parameter obsolete_param in DummyInstrument. "
"DummyInstrument does not possess a parameter named obsolete_param.",
):
load_settings_onto_instrument(instr, tuid)
assert instr.get("IDN") == {
"vendor": None,
"model": "DummyInstrument",
"serial": None,
"firmware": None,
}
assert instr.get("settable_param") == 5
assert instr.get("gettable_param") == 20
assert instr.get("none_param") is None
assert instr.get("none_param_warning") == 1
assert not instr.get("boolean_param")
instr.close() | 96f3d96b7a83989c390bcc629f39df618c553056 | 3,651,370 |
import html
def dashboard_3_update_graphs(n_intervals):
"""Update all the graphs."""
figures = load_data_make_graphs()
main_page_layout = html.Div(children=[
html.Div(className='row', children=[
make_sub_plot(figures, LAYOUT_COLUMNS[0]),
make_sub_plot(figures, LAYOUT_COLUMNS[1]),
]),
html.Div(className='row', children=[
make_sub_plot(figures, LAYOUT_COLUMNS[2]),
make_sub_plot(figures, LAYOUT_COLUMNS[3]),
]),
])
return main_page_layout | b0b67dd06540ffff3c09d2c0ce87d0e5edb44bdf | 3,651,371 |
import numpy as np
from mvpa2.datasets import Dataset
import copy
def fx(sl, dataset, roi_ids, results):
"""this requires the searchlight conditional attribute 'roi_feature_ids'
to be enabled"""
resmap = None
probmap = None
for resblock in results:
for res in resblock:
if resmap is None:
# prepare the result container
resmap = np.zeros((len(res), dataset.nfeatures), dtype=res.samples.dtype)
observ_counter = np.zeros(dataset.nfeatures, dtype=int)
#project the result onto all features -- love broadcasting!
resmap[:, res.a.roi_feature_ids] += res.samples
# increment observation counter for all relevant features
observ_counter[res.a.roi_feature_ids] += 1
# when all results have been added up average them according to the number
# of observations
observ_mask = observ_counter > 0
resmap[:, observ_mask] /= observ_counter[observ_mask]
# transpose to make broadcasting work -- creates a view, so in-place
# modification still does the job
result_ds = Dataset(resmap,
fa={'observations': observ_counter})
if 'mapper' in dataset.a:
result_ds.a['mapper'] = copy.copy(dataset.a.mapper)
return result_ds | 6b5e968882d0fe2e27c9302bc2b821509cfaafa1 | 3,651,372 |
import pathlib
import stat
def check_file(file_name):
"""
test if file: exists and is writable or can be created
Args:
file_name (str): the file name
Returns:
(pathlib.Path): the path or None if problems
"""
if not file_name:
return None
path = pathlib.Path(file_name)
# if file exists test if writable
if path.exists() and path.is_file():
handle = None
try:
handle = open(path, 'w')
except PermissionError:
return None
finally:
if handle:
handle.close()
# crate file with write permissions
try:
path.touch(stat.S_IWUSR)
except PermissionError:
return None
return path | 5b8ff64795aa66d3be71444e158357c9b7a1b2c0 | 3,651,373 |
async def push(request):
"""Push handler. Authenticate, then return generator."""
if request.method != "POST":
return 405, {}, "Invalid request"
fingerprint = authenticate(request)
if not fingerprint:
return 403, {}, "Access denied"
# Get given file
payload = await request.get_body(100 * 2 ** 20) # 100 MiB limit
# Also validate it
if not validate_payload(request, payload):
return 403, {}, "Payload could not be verified."
# Return generator -> do a deploy while streaming feedback on status
gen = push_generator(fingerprint, payload)
return 200, {"content-type": "text/plain"}, gen | aeedd0c0c336b756898a98460e15a24b3411c5c2 | 3,651,374 |
from urllib.request import urlretrieve
from urllib import urlretrieve
def getfile(url, outdir=None):
"""Function to fetch files using urllib
Works with ftp
"""
fn = os.path.split(url)[-1]
if outdir is not None:
fn = os.path.join(outdir, fn)
if not os.path.exists(fn):
#Find appropriate urlretrieve for Python 2 and 3
try:
except ImportError:
print("Retrieving: %s" % url)
#Add progress bar
urlretrieve(url, fn)
return fn | 9cf70384fd81f702c29316e51fed9ca80802f022 | 3,651,375 |
def isroutine(object):
"""Return true if the object is any kind of function or method."""
return (isbuiltin(object)
or isfunction(object)
or ismethod(object)
or ismethoddescriptor(object)) | 386893f99c8cdd00c5523ef7ce052784e6ae9ca8 | 3,651,376 |
import json
def account_upload_avatar():
"""Admin Account Upload Avatar Action.
*for Ajax only.
Methods:
POST
Args:
files: [name: 'userfile']
Returns:
status: {success: true/false}
"""
if request.method == 'POST':
re_helper = ReHelper()
data = request.files['userfile']
filename = re_helper.r_slash(data.filename.encode('utf-8'))
helper = UpYunHelper()
url = helper.up_to_upyun('account', data, filename)
if url:
return json.dumps({'success': 'true', 'url': url})
else:
return json.dumps({'success': 'false'}) | c17f93eb7e5e750508aa34aedeaffb5b9410f15e | 3,651,377 |
def get_front_end_url_expression(model_name, pk_expression, url_suffix=''):
"""
Gets an SQL expression that returns a front-end URL for an object.
:param model_name: key in settings.DATAHUB_FRONTEND_URL_PREFIXES
:param pk_expression: expression that resolves to the pk for the model
:param url_suffix: Optional: string appended to the end of the url
"""
return Concat(
Value(f'{settings.DATAHUB_FRONTEND_URL_PREFIXES[model_name]}/'),
pk_expression,
Value(url_suffix),
) | c763f1d891f35c36823bb2fb7791a3d145f57164 | 3,651,378 |
def mock_config_entry() -> MockConfigEntry:
"""Return the default mocked config entry."""
return MockConfigEntry(
title="KNX",
domain=KNX_DOMAIN,
data={
CONF_KNX_INDIVIDUAL_ADDRESS: XKNX.DEFAULT_ADDRESS,
ConnectionSchema.CONF_KNX_MCAST_GRP: DEFAULT_MCAST_GRP,
ConnectionSchema.CONF_KNX_MCAST_PORT: DEFAULT_MCAST_PORT,
CONF_KNX_CONNECTION_TYPE: CONF_KNX_AUTOMATIC,
},
) | e8585d7c0f793e0636f29bb91d5533fc22a14a4b | 3,651,379 |
def load_escores(name_dataset, classifier, folds):
"""Excluir xxxxxxx Return escore in fold. """
escores=[]
escores.append(load_dict_file("escores/"+name_dataset +"_"+classifier +"_escore_grid_train"+str(folds)))
return escores
for index in range(folds):
escores.append(load_dict_file("escores/"+name_dataset +"_"+classifier +"_escore_grid_train"+str(index)))
return escores | 6afa5b7db023bec903dbab26203edc5d7c162280 | 3,651,380 |
def get_devices():
""" will also get devices ready
:return: a list of avaiable devices names, e.g., emulator-5556
"""
ret = []
p = sub.Popen(settings.ADB + ' devices', stdout=sub.PIPE, stderr=sub.PIPE, shell=True)
output, errors = p.communicate()
print output
segs = output.split("\n")
for seg in segs:
device = seg.split("\t")[0].strip()
if seg.startswith("emulator-"):
p = sub.Popen(settings.ADB + ' -s ' + device + ' shell getprop init.svc.bootanim', stdout=sub.PIPE, stderr=sub.PIPE, shell=True)
output, errors = p.communicate()
if output.strip() != "stopped":
time.sleep(10)
print "waiting for the emulator:", device
return get_devices()
else:
ret.append(device)
assert len(ret) > 0
return ret | b3b3a377483f694ecac7f57f6f76a40727be4eee | 3,651,381 |
import logging
import tqdm
import multiprocessing
def _proc_event_full(st, **kwargs):
"""
processings including
:param st:
:param kwargs:
:return:
"""
# instrument response removal, spectral whitening, temporal normalization
# autocorrelation and filter, then output results.
def iter3c(stream):
# for an event, there is always "onset"
return IterMultipleComponents(stream, key="onset", number_components=(2, 3))
# resp removal, rotation, spectral whitening, temporal normalization
tasks = iter3c(st)
# loop over streams in each stream containing the 3-component traces
do_work = partial(_proc_event_rst, **kwargs)
njobs = kwargs["njobs"]
numbers = []
logging.info("deep processing for full event correlogram.")
print("deep processing for full event correlogram.")
if njobs == 1:
logging.info('do work sequential (%d cores)', njobs)
for task in tqdm(tasks, total=len(tasks)):
num = do_work(task)
numbers.append(num)
else:
logging.info('do work parallel (%d cores)', njobs)
pool = multiprocessing.Pool(njobs)
for num in tqdm(pool.imap_unordered(do_work, tasks), total=len(tasks)):
numbers.append(num)
pool.close()
pool.join()
logging.info("%d/%d files processed.", sum(numbers), len(tasks)) | cd069f684fba1a8d037aa8d08245859098b25c1f | 3,651,382 |
import types
def optional_is_none(context, builder, sig, args):
"""Check if an Optional value is invalid
"""
[lty, rty] = sig.args
[lval, rval] = args
# Make sure None is on the right
if lty == types.none:
lty, rty = rty, lty
lval, rval = rval, lval
opt_type = lty
opt_val = lval
del lty, rty, lval, rval
opt = context.make_optional(opt_type)(context, builder, opt_val)
res = builder.not_(cgutils.as_bool_bit(builder, opt.valid))
return impl_ret_untracked(context, builder, sig.return_type, res) | a00b6725b43e8d09e8261b228f58319a27b191f9 | 3,651,383 |
import logging
def get_volume_disk_capacity(pod_name, namespace, volume_name):
"""
Find the container in the specified pod that has a volume named
`volume_name` and run df -h or du -sb in that container to determine
the available space in the volume.
"""
api = get_api("v1", "Pod")
res = api.get(name=pod_name, namespace=namespace)
if res.kind == "PodList":
# make sure there is only one pod with the requested name
if len(res.items) == 1:
pod = res.items[0]
else:
return {}
else:
pod = res
containers = pod.spec.get("initContainers", []) + pod.spec.get("containers", [])
for container in containers:
for volume_mount in container.get("volumeMounts", []):
if volume_mount.get("name") == volume_name:
mount_path = volume_mount.get("mountPath")
volume = list(filter(lambda x: x.name == volume_name, pod.spec.volumes))
volume = volume[0] if len(volume) == 1 else {}
if (
"emptyDir" in volume.keys()
and volume["emptyDir"].get("sizeLimit") is not None
):
# empty dir is used for the session
command = ["sh", "-c", f"du -sb {mount_path}"]
used_bytes = parse_du_command(
pod_exec(
pod_name,
namespace,
container.name,
command,
)
)
total_bytes = convert_to_bytes(volume["emptyDir"]["sizeLimit"])
available_bytes = (
0 if total_bytes - used_bytes < 0 else total_bytes - used_bytes
)
return {
"total_bytes": total_bytes,
"used_bytes": used_bytes,
"available_bytes": available_bytes,
}
else:
# PVC is used for the session
command = ["sh", "-c", f"df -Pk {mount_path}"]
try:
disk_cap_raw = pod_exec(
pod_name,
namespace,
container.name,
command,
)
except ApiException:
disk_cap_raw = ""
logging.warning(
f"Checking disk capacity failed with {pod_name}, "
f"{namespace}, {container.name}, {command}."
)
else:
logging.info(
f"Checking disk capacity succeeded with {pod_name}, "
f"{namespace}, {container.name}, {command}."
)
disk_cap = parse_df_command(disk_cap_raw)
# make sure `df -h` returned the results from only one mount point
if len(disk_cap) == 1:
return disk_cap[0]
return {} | 29d20c5dfb481be9a58dec85a23e20b9abd9cc5f | 3,651,384 |
import re
def is_valid_battlefy_id(battlefy_id: str) -> bool:
"""
Verify a str is a Battlefy Id (20 <= length < 30) and is alphanumeric.
:param battlefy_id:
:return: Validity true/false
"""
return 20 <= len(battlefy_id) < 30 and re.match("^[A-Fa-f0-9]*$", battlefy_id) | ba3f79f4897425b87962f04506fdff1da684c122 | 3,651,385 |
def echo(word:str, n:int, toupper:bool=False) -> str:
"""
Repeat a given word some number of times.
:param word: word to repeat
:type word: str
:param n: number of repeats
:type n: int
:param toupper: return in all caps?
:type toupper: bool
:return: result
:return type: str
"""
res=word*n
if (toupper):
res=res.upper()
return res | 62a68c1ff577781a84a58f124beec8d31b0b456c | 3,651,386 |
def verify_package_info(package_info):
"""Check if package_info points to a valid package dir (i.e. contains
at least an osg/ dir or an upstream/ dir).
"""
url = package_info['canon_url']
rev = package_info['revision']
command = ["svn", "ls", url, "-r", rev]
out, err = utils.sbacktick(command, clocale=True, err2out=True)
if err:
raise SVNError("Exit code %d getting SVN listing of %s (rev %s). Output:\n%s" % (err, url, rev, out))
for line in out.split("\n"):
if line.startswith('osg/') or line.startswith('upstream/'):
return True
return False | 6a38e50c2ab121260cbaaa9c188f2470784e65fb | 3,651,387 |
def add(a,b):
""" This function adds two numbers together """
return a+b | 96173657034d469ea43142179cd408e0c1f1e12d | 3,651,388 |
def decode_ADCP(data):
"""
Decodes ADCP data read in over UDP. Returns two lists: header and current.
input: Raw data string from ADCP UDP stream
Output:
header: [timestamp, nCells, nBeams, pressure]
- timestamp in unix format
- nBeams x nCells gives dimensions of current data
- pressure is hydrostatic pressure in dBar
current: nBeams x nCells current values in m/s
"""
data = data.decode("utf-8")
if data.endswith('ZZZZ') and data.startswith('AAAA'):
data = data.split(' ')
timestamp = float(data[1]) + float(data[2])/1000
nCells = int(data[3])
nBeams = int(data[4])
pressure = int(data[5])
current = np.array(list(map(float, list(data[6:-2]))))/1000
current = np.resize(current, (nBeams, nCells)).round(3)
header = [timestamp, nCells, nBeams, pressure]
else:
header = []
current = []
return current, header | 07c5b430ac2321e4e47124e71c83bf8a2440f43f | 3,651,389 |
def RowToModelInput(row, kind):
"""
This converts a patient row into inputs for the SVR.
In this model we use RNAseq values as inputs.
"""
SampleID = row[TissueSampleRow(kind)]
TrueSampleIDs = [r for r in TissueSamples.columns
if r.startswith(SampleID)]
if not TrueSampleIDs:
return None
TrueSampleID = TrueSampleIDs[0]
assert len(TrueSampleIDs) <= 1
try:
sample = TissueSamples[[TrueSampleID]]
Masked = sample[GeneMask[kind]]
return Masked.values.reshape(-1,)
except KeyError as e:
print("Key error: %s" % SampleID)
return None | 1167a7c47be7893252820087098db6e416f6c9bc | 3,651,390 |
from datetime import datetime
def str_to_timedelta(td_str):
"""Parses a human-readable time delta string to a timedelta"""
if "d" in td_str:
day_str, time_str = td_str.split("d", 1)
d = int(day_str.strip())
else:
time_str = td_str
d = 0
time_str = time_str.strip()
if not time_str:
return datetime.timedelta(days=d)
colon_count = time_str.count(":")
if (not colon_count) or colon_count > 2:
raise ValueError("Time format [dd d] hh:mm[:ss] or dd d")
elif colon_count == 1:
h_str, m_str = time_str.split(":", 1)
h, m, s = int(h_str.strip()), int(m_str.strip()), 0
elif colon_count == 2:
h_str, m_str, s_str = time_str.split(":", 2)
h, m, s = int(h_str.strip()), int(m_str.strip()), int(s_str.strip())
return tuple_to_timedelta((d, h, m, s)) | dc3449c708ef4fbe689a9c130745d7ada6ac8f78 | 3,651,391 |
import cgi
def filter_safe_enter(s):
"""正文 换行替换"""
return '<p>' + cgi.escape(s).replace("\n", "</p><p>") + '</p>' | 6091abec0ff87361f1bbe4d146c64ddef3cc99f0 | 3,651,392 |
def _full_rank(X, cmax=1e15):
"""
This function possibly adds a scalar matrix to X
to guarantee that the condition number is smaller than a given threshold.
Parameters
----------
X: array of shape(nrows, ncols)
cmax=1.e-15, float tolerance for condition number
Returns
-------
X: array of shape(nrows, ncols) after regularization
cmax=1.e-15, float tolerance for condition number
"""
U, s, V = np.linalg.svd(X, 0)
smax, smin = s.max(), s.min()
c = smax / smin
if c < cmax:
return X, c
warn('Matrix is singular at working precision, regularizing...')
lda = (smax - cmax * smin) / (cmax - 1)
s = s + lda
X = np.dot(U, np.dot(np.diag(s), V))
return X, cmax | 8f24509fb921877c9f1bcff09fc035285beee69e | 3,651,393 |
from typing import Optional
def create(
session: Session,
instance: Instance,
name: str,
description: Optional[str] = None,
external_id: Optional[str] = None,
unified_dataset_name: Optional[str] = None,
) -> Project:
"""Create a Mastering project in Tamr.
Args:
instance: Tamr instance
name: Project name
description: Project description
external_id: External ID of the project
unified_dataset_name: Unified dataset name. If None, will be set to project name + _'unified_dataset'
Returns:
Project created in Tamr
Raises:
project.AlreadyExists: If a project with these specifications already exists.
requests.HTTPError: If any other HTTP error is encountered.
"""
return project._create(
session=session,
instance=instance,
name=name,
project_type="DEDUP",
description=description,
external_id=external_id,
unified_dataset_name=unified_dataset_name,
) | aac88500ecd60df9a1496e38e33bc212f3e26701 | 3,651,394 |
import random
import time
def users_with_pending_lab(connection, **kwargs):
"""Define comma seperated emails in scope
if you want to work on a subset of all the results"""
check = CheckResult(connection, 'users_with_pending_lab')
# add random wait
wait = round(random.uniform(0.1, random_wait), 1)
time.sleep(wait)
check.action = 'finalize_user_pending_labs'
check.full_output = []
check.status = 'PASS'
cached_items = {} # store labs/PIs for performance
mismatch_users = []
# do not look for deleted/replaced users
scope = kwargs.get('scope')
search_q = '/search/?type=User&pending_lab!=No+value&frame=object'
# want to see all results or a subset defined by the scope
if scope == 'all':
pass
else:
emails = [mail.strip() for mail in scope.split(',')]
for an_email in emails:
search_q += '&email=' + an_email
search_res = ff_utils.search_metadata(search_q, key=connection.ff_keys)
for res in search_res:
user_fields = ['uuid', 'email', 'pending_lab', 'lab', 'title', 'job_title']
user_append = {k: res.get(k) for k in user_fields}
check.full_output.append(user_append)
# Fail if we have a pending lab and lab that do not match
if user_append['lab'] and user_append['pending_lab'] != user_append['lab']:
check.status = 'FAIL'
mismatch_users.append(user_append['uuid'])
continue
# cache the lab and PI contact info
if user_append['pending_lab'] not in cached_items:
to_cache = {}
pending_meta = ff_utils.get_metadata(user_append['pending_lab'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_title'] = pending_meta['display_title']
if 'pi' in pending_meta:
pi_meta = ff_utils.get_metadata(pending_meta['pi'], key=connection.ff_keys,
add_on='frame=object')
to_cache['lab_PI_email'] = pi_meta['email']
to_cache['lab_PI_title'] = pi_meta['title']
to_cache['lab_PI_viewing_groups'] = pi_meta['viewing_groups']
cached_items[user_append['pending_lab']] = to_cache
# now use the cache to fill fields
for lab_field in ['lab_title', 'lab_PI_email', 'lab_PI_title', 'lab_PI_viewing_groups']:
user_append[lab_field] = cached_items[user_append['pending_lab']].get(lab_field)
if check.full_output:
check.summary = 'Users found with pending_lab.'
if check.status == 'PASS':
check.status = 'WARN'
check.description = check.summary + ' Run the action to add lab and remove pending_lab'
check.allow_action = True
check.action_message = 'Will attempt to patch lab and remove pending_lab for %s users' % len(check.full_output)
if check.status == 'FAIL':
check.summary += '. Mismatches found for pending_lab and existing lab'
check.description = check.summary + '. Resolve conflicts for mismatching users before running action. See brief_output'
check.brief_output = mismatch_users
else:
check.summary = 'No users found with pending_lab'
return check | 6136531c523cf344405cda42bbfcae1e4719280d | 3,651,395 |
import pandas
import types
def hpat_pandas_series_lt(self, other, level=None, fill_value=None, axis=0):
"""
Pandas Series method :meth:`pandas.Series.lt` implementation.
.. only:: developer
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
Parameters
----------
self: :class:`pandas.Series`
input arg
other: :obj:`pandas.Series`, :obj:`int` or :obj:`float`
input arg
level: :obj:`int` or name
*unsupported*
fill_value: :obj:`float` or None, default None
*unsupported*
axis: default 0
*unsupported*
Returns
-------
:obj:`pandas.Series`
returns :obj:`pandas.Series` object
"""
_func_name = 'Method lt().'
if not isinstance(self, SeriesType):
raise TypingError('{} The object must be a pandas.series. Given: {}'.format(_func_name, self))
if level is not None or fill_value is not None or axis != 0:
raise TypingError(
'{} Unsupported parameters. Given level: {}, fill_value: {}, axis: {}'.format(_func_name, level, fill_value,
axis))
if isinstance(other, SeriesType):
def hpat_pandas_series_lt_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8
"""
return pandas.Series(self._data < other._data)
return hpat_pandas_series_lt_impl
if isinstance(other, types.Integer) or isinstance(other, types.Float):
def hpat_pandas_series_lt_impl(self, other):
"""
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_integer_scalar
Test: python -m sdc.runtests sdc.tests.test_series.TestSeries.test_series_op8_float_scalar
"""
return pandas.Series(self._data < other)
return hpat_pandas_series_lt_impl
raise TypingError(
'{} The object must be a pandas.series and argument must be a number. Given: {} and other: {}'.format(
_func_name, self, other)) | b22497f64b711f92bbdf472feba657bc9b49115a | 3,651,396 |
import numbers
from typing import Any
def convert_kv(
key: str,
val: str | numbers.Number,
attr_type: bool,
attr: dict[str, Any] = {},
cdata: bool = False,
) -> str:
"""Converts a number or string into an XML element"""
if DEBUGMODE: # pragma: no cover
LOG.info(
f'Inside convert_kv(): key="{str(key)}", val="{str(val)}", type(val) is: "{type(val).__name__}"'
)
key, attr = make_valid_xml_name(key, attr)
if attr_type:
attr["type"] = get_xml_type(val)
attrstring = make_attrstring(attr)
return f"<{key}{attrstring}>{wrap_cdata(val) if cdata else escape_xml(val)}</{key}>" | 944dd77b08ae995fabb1a829d07a064f4ea3859f | 3,651,397 |
def getBanner(host, port):
"""
Connects to host:port and returns the banner.
"""
try:
s = socket.socket()
s.connect((host, port))
banner = s.recv(1024)
return str(banner).strip()
except Exception, e:
error(str(host) + ':' + str(port) + ' ' + str(e)) | 46d497067790ef19521f84345adb8c8369ca8737 | 3,651,398 |
from typing import List
def create_cmd_table(table_data: List[List[str]], width: int = 15) -> BorderedTable:
"""Create a bordered table for cmd2 output.
Args:
table_data: list of lists with the string data to display
width: integer width of the columns. Default is 15 which generally works for ~4 columns
Returns:
BorderedTable: generated table for printing
"""
columns = table_data[0]
auto_column = partial(Column, width=width)
bt = BorderedTable([*map(auto_column, columns)])
rows = table_data[1:]
return bt.generate_table(rows) | 044630072f9927262673d65e9cfeadbd49d44f31 | 3,651,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.