content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Dict
def merge(source: Dict, destination: Dict) -> Dict:
"""
Deep merge two dictionaries
Parameters
----------
source: Dict[Any, Any]
Dictionary to merge from
destination: Dict[Any, Any]
Dictionary to merge to
Returns
-------
Dict[Any, Any]
New dictionary with fields in destination overwritten
with values from source
"""
new_dict = {**destination}
for key, value in source.items():
if isinstance(value, dict):
# get node or create one
node = new_dict.get(key, {})
new_dict[key] = merge(value, node)
else:
new_dict[key] = value
return new_dict | 4ffba933fe1ea939ecaa9f16452b74a4b3859f40 | 3,650,400 |
async def async_api_adjust_volume_step(hass, config, directive, context):
"""Process an adjust volume step request."""
# media_player volume up/down service does not support specifying steps
# each component handles it differently e.g. via config.
# For now we use the volumeSteps returned to figure out if we
# should step up/down
volume_step = directive.payload['volumeSteps']
entity = directive.entity
data = {
ATTR_ENTITY_ID: entity.entity_id,
}
if volume_step > 0:
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_UP,
data, blocking=False, context=context)
elif volume_step < 0:
await hass.services.async_call(
entity.domain, SERVICE_VOLUME_DOWN,
data, blocking=False, context=context)
return directive.response() | 85625118d4185842dd0398ec5dd0adbb951b5d67 | 3,650,401 |
import os
def load_recordings(path: str, activityLabelToIndexMap: dict, limit: int = None) -> 'list[Recording]':
"""
Load the recordings from a folder containing csv files.
"""
recordings = []
recording_files = os.listdir(path)
recording_files = list(filter(lambda file: file.endswith('.csv'), recording_files))
if limit is not None:
recording_files = recording_files[:limit]
recording_files = sorted(recording_files, key=lambda file: int(file.split('_')[0]))
for (index, file) in enumerate(recording_files):
print(f'Loading recording {file}, {index+1} / {len(recording_files)}')
recording_dataframe = pd.read_csv(os.path.join(path, file))
time_frame = recording_dataframe.loc[:, 'SampleTimeFine']
activities = recording_dataframe.loc[:, 'activity'].map(lambda label: activityLabelToIndexMap[label])
sensor_frame = recording_dataframe.loc[:,
recording_dataframe.columns.difference(['SampleTimeFine', 'activity'])]
subject = file.split('_')[1]
recordings.append(Recording(sensor_frame, time_frame, activities, subject, index))
print(f'Loaded {len(recordings)} recordings from {path}')
return recordings | c5081c7f7b27eea080a1b1394a67bff158bdf243 | 3,650,402 |
import warnings
import io
def load_img(path, grayscale=False, color_mode='rgb', target_size=None,
interpolation='nearest'):
"""Loads an image into PIL format.
# Arguments
path: Path to image file.
grayscale: DEPRECATED use `color_mode="grayscale"`.
color_mode: The desired image format. One of "grayscale", "rgb", "rgba".
"grayscale" supports 8-bit images and 32-bit signed integer images.
Default: "rgb".
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported.
Default: "nearest".
# Returns
A PIL Image instance.
# Raises
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if grayscale is True:
warnings.warn('grayscale is deprecated. Please use '
'color_mode = "grayscale"')
color_mode = 'grayscale'
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `load_img` requires PIL.')
with open(path, 'rb') as f:
img = pil_image.open(io.BytesIO(f.read()))
if color_mode == 'grayscale':
# if image is not already an 8-bit, 16-bit or 32-bit grayscale image
# convert it to an 8-bit grayscale image.
if img.mode not in ('L', 'I;16', 'I'):
img = img.convert('L')
elif color_mode == 'rgba':
if img.mode != 'RGBA':
img = img.convert('RGBA')
elif color_mode == 'rgb':
if img.mode != 'RGB':
img = img.convert('RGB')
else:
raise ValueError('color_mode must be "grayscale", "rgb", or "rgba"')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
", ".join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img | 61709400c2bd6379864f2399c2c04c29e0b61b92 | 3,650,403 |
import logging
import platform
def check_compatible_system_and_kernel_and_prepare_profile(args):
"""
Checks if we can do local profiling, that for now is only available
via Linux based platforms and kernel versions >=4.9
Args:
args:
"""
res = True
logging.info("Enabled profilers: {}".format(args.profilers))
logging.info("Checking if system is capable of running those profilers")
if "Linux" not in platform.system():
logging.error(
"Platform needs to be Linux based. Current platform: {}".format(
platform.system()
)
)
res = False
# check for release >= 4.9
release = platform.release()
logging.info("Detected platform release: {}".format(release))
major_minor = release.split(".")[:2]
system_kernel_major_v = major_minor[0]
system_kernel_minor_v = major_minor[1]
if float(system_kernel_major_v) < 4:
logging.error(
"kernel version needs to be >= 4.9. Detected version: {}".format(release)
)
res = False
if float(system_kernel_major_v) == 4 and float(system_kernel_minor_v) < 9:
logging.error(
"kernel version needs to be >= 4.9. Detected version: {}".format(release)
)
res = False
# a map between profiler name and profiler object wrapper
res, profilers_map = get_profilers_map(args.profilers.split(","))
return res, profilers_map | 35da3151109ef13c7966ede991b871dca45f4d0b | 3,650,404 |
import os
import shutil
import subprocess
import glob
def crop_file(in_file, out_file, ext):
"""Crop a NetCDF file to give rectangle using NCO.
The file is automatically converted to [0,360] °E longitude format.
Args:
in_file: Input file path.
out_file: Output file path. It will not be overwritten.
ext: The rectangular region (extent) to crop to, given as a list of
[lon1, lon2, lat1, lat2]. Longitude in [0,360) °E and latitude in
[-90,+90] °N.
Returns:
Name of output file (same as `out_file`).
Raises:
FileNotFoundError: `in_file` doesn’t exist.
RuntimeError: Executable `ncks` is not in the PATH.
RuntimeError: NCKS failed or no output file was created.
"""
if not os.path.isfile(in_file):
raise FileNotFoundError("Input file doesn’t exist: '%s'" % in_file)
if skip(in_file, out_file):
return out_file
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
cprint(f"Directory '{out_dir}' does not exist yet. I will create it.",
'yellow')
os.makedirs(out_dir)
cprint("Cropping file '%s'..." % in_file, 'yellow')
if shutil.which("ncks") is None:
raise RuntimeError("Executable `ncks` not found.")
try:
# ADJUST LONGITUDE
ext_adj = list()
ext_adj[:] = ext
ext_adj[0] = adjust_longitude(in_file, ext[0])
ext_adj[1] = adjust_longitude(in_file, ext[1])
# CROP
subprocess.run(["ncks",
"--overwrite",
"--dimension", "lon,%.2f,%.2f" % (ext_adj[0],
ext_adj[1]),
"--dimension", "lat,%.2f,%.2f" % (ext_adj[2],
ext_adj[3]),
in_file,
out_file], check=True)
# ROTATE LONGITUDE
# See here for the documentation about rotating longitude:
# http://nco.sourceforge.net/nco.html#msa_usr_rdr
# Unlike in the instructions in the documentation, we don’t need to
# re-order the longitude dimension with --msa_usr_rdr because through
# the `ncks` cropping/hyperslabbing command the longitude is already
# ordered correctly from East to West.
# Note that we rotate after cropping for performance reasons. This way
# only the cropped grid cells need to be rotated.
subprocess.run(['ncap2',
'--overwrite',
'--script', 'where(lon < 0) lon=lon+360',
out_file, out_file], check=True)
except Exception:
print(f'DEBUG: ext = {ext}')
print(f'DEBUG: ext_adj = {ext_adj}')
if os.path.isfile(out_file):
cprint(f"Removing file '{out_file}'.", 'red')
os.remove(out_file)
# Remove temporary file created by ncks.
for g in glob(f'{out_file}.pid*.ncks.tmp'):
cprint(f"Removing file '{g}'.", 'red')
os.remove(g)
raise
if not os.path.isfile(out_file):
raise RuntimeError("Cropping with `ncks` failed: No output file "
"created.")
cprint(f"Successfully created '{out_file}'.", 'green')
return out_file | 3f185c4e86299270b2ef694d6b5505326873f7db | 3,650,405 |
def get_instance_string(params):
""" combine the parameters to a string mostly used for debug output
use of OrderedDict is advised
"""
return "_".join([str(i) for i in params.values()]) | ff9470cd9e308357b594c2ec0389c194d2d6ac00 | 3,650,406 |
import logging
import time
import re
def recv_bgpmon_updates(host, port, queue):
"""
Receive and parse the BGP update XML stream of bgpmon
"""
logging.info ("CALL recv_bgpmon_updates (%s:%d)", host, port)
# open connection
sock = _init_bgpmon_sock(host,port)
data = ""
stream = ""
# receive data
logging.info(" + receiving XML update stream ...")
while(True):
data = sock.recv(1024)
if not data:
sock.close()
time.sleep(60)
sock = _init_bgpmon_sock(host,port)
continue
stream += data
stream = str.replace(stream, "<xml>", "")
while (re.search('</BGP_MONITOR_MESSAGE>', stream)):
messages = stream.split('</BGP_MONITOR_MESSAGE>')
msg = messages[0] + '</BGP_MONITOR_MESSAGE>'
stream = '</BGP_MONITOR_MESSAGE>'.join(messages[1:])
result = parse_bgp_message(msg)
if result:
queue.put(result)
return True | 6cf8e008b2b47437e80b863eab6f7c1fd4a54e18 | 3,650,407 |
import ast
def is_string_expr(expr: ast.AST) -> bool:
"""Check that the expression is a string literal."""
return (
isinstance(expr, ast.Expr)
and isinstance(expr.value, ast.Constant)
and isinstance(expr.value.value, str)
) | f61418b5671c5e11c1e90fce8d90c583659d40e3 | 3,650,408 |
import numba
def events_to_img(
xs: np.ndarray,
ys: np.ndarray,
tots: np.ndarray,
cluster_ids: np.ndarray,
x_img: np.ndarray,
y_img: np.ndarray,
minimum_event_num: int = 30,
extinguish_dist: float = 1.41422, # sqrt(2) = 1.41421356237
) -> np.ndarray:
"""
Converting given events into a flatten image array defined by
given image pixel positions
@param xs: event x position, must be float
@param ys: event y position, must be float
@param tots: event time of threshold (as intensity), must be float
@param cluster_ids: ID labels
@param x_img: pixel position of the target image (see np.meshgrid)
@param y_img: pixel position of the target image (see np.meshgrid)
@param minimum_event_num: minimum number of events needed to be included
@param extinguish_dist: signal impact ends outside this range
@returns: the image converted from given events using weighted centroid method
"""
# preparation
unique_cids = np.unique(cluster_ids)
img_shape = x_img.shape
x_img = x_img.flatten()
y_img = y_img.flatten()
img = x_img * 0.0
for i in numba.prange(unique_cids.shape[0]):
cid = unique_cids[i]
idx = np.where(cluster_ids == cid)[0]
# skip cluster with too few events
if idx.shape[0] < minimum_event_num:
continue
# compute the centroid position and weighted equivalent intensity
wgts = tots[idx] / np.sum(tots[idx])
xc = np.dot(wgts, xs[idx])
yc = np.dot(wgts, ys[idx])
# ic = np.dot(wgts, tots[idx])
# propogate the signal to the image
idx = np.where(
np.logical_and(
np.logical_and(
x_img >= xc - extinguish_dist,
x_img < xc + extinguish_dist,
),
np.logical_and(
y_img >= yc - extinguish_dist,
y_img < yc + extinguish_dist,
),
))[0]
wgts = (x_img[idx] - xc)**2 + (y_img[idx] - yc)**2
wgts = 1.0 / wgts
wgts = wgts / np.sum(wgts)
img[idx] += wgts
# return the results
return img.reshape(img_shape) | ced70fc290157a4168c8e9ebd589263bbc410c6f | 3,650,409 |
import logging
import requests
def worker(job):
"""Run a single download job."""
logger = logging.getLogger("ncbi-genome-download")
ret = False
try:
if job.full_url is not None:
req = requests.get(job.full_url, stream=True)
ret = save_and_check(req, job.local_file, job.expected_checksum)
if not ret:
return ret
ret = create_symlink(job.local_file, job.symlink_path)
except KeyboardInterrupt: # pragma: no cover
# TODO: Actually test this once I figure out how to do this in py.test
logger.debug("Ignoring keyboard interrupt.")
return ret | f7ddf815bc6689ac7086c3f8d04bc4ffd29fccbd | 3,650,410 |
def generate_ar(n_series, n_samples, random_state=0):
"""Generate a linear auto-regressive series.
This simple model is defined as::
X(t) = 0.4 * X(t - 1) - 0.6 * X(t - 4) + 0.5 * N(0, 1)
The task is to predict the current value using all the previous values.
Parameters
----------
n_series : int
Number of time series to generate.
n_samples : int
Number of samples in each time series.
random_state : int, default 0
Seed to use in the random generator.
Returns
-------
X, Y : ndarray, shape (n_series, 1, n_stamps, 1)
Input and output sequences, `Y` is just delayed by 1 sample version
of `X`.
"""
n_init = 4
n_discard = 20
X = np.zeros((n_series, n_init + n_discard + n_samples + 1))
rng = np.random.RandomState(random_state)
X[:, n_init] = rng.randn(n_series)
for i in range(n_init + 1, X.shape[1]):
X[:, i] = (0.4 * X[:, i - 1] - 0.6 * X[:, i - 4] +
0.1 * rng.randn(n_series))
Y = X[:, n_init + n_discard + 1:, None]
X = X[:, n_init + n_discard: -1, None]
return X, Y | a02b2bb242ecc0eb6cf3d5d23d0982c56d81b617 | 3,650,411 |
import subprocess
def get_current_commit_id() -> str:
"""Get current commit id.
Returns:
str: current commit id.
"""
command = "git rev-parse HEAD"
commit_id = (
subprocess.check_output(command.split()).strip().decode("utf-8") # noqa: S603
)
return commit_id | 978bd35fc3cfe71fcc133a6e49fbbe0e27d4feda | 3,650,412 |
import re
def get_raw_code(file_path):
"""
Removes empty lines, leading and trailing whitespaces, single and multi line comments
:param file_path: path to .java file
:return: list with raw code
"""
raw_code = []
multi_line_comment = False
with open(file_path, "r") as f:
for row in f:
# remove leading and trailing whitespaces
line = row.strip()
# remove '/* comments */'
line = re.sub(r'''
^ # start of string
/\* # "/*" string
.* # any character (except line break) zero or more times
\*/ # "*/" string
\s* # zero or many whitespaces
''', '', line, 0, re.VERBOSE)
# remove '//comments'
line = re.sub(r'''
^ # start of string
// # "//" string
.* # any character (except line break) zero or more times
$ # end of string
''', '', line, 0, re.VERBOSE)
# ignore empty lines
if line != '':
# skip multi-line comments (/*)
if re.search(r'''
^ # start of string
/\* # "/*" string
.* # any character (except line break) zero or more times
''', line, re.VERBOSE):
multi_line_comment = True
continue
# check if multi-line comment was closed (*/)
elif re.search(r'''
.* # any character (except line break) zero or more times
\*/ # "*/" string
$ # end of string
''', line, re.VERBOSE):
multi_line_comment = False
line = re.sub(r'''
.* # any character (except line break) zero or more times
\*/ # "*/" string
\s* # zero or many whitespaces
''', '', line, 0, re.VERBOSE)
if line == '':
continue
# add line if it's not multi-line comment
if not multi_line_comment:
raw_code.append(line)
return raw_code | 6654a0423f024eaea3067c557984c3aa5e9494da | 3,650,413 |
import os
def load_content(file_path):
"""
Loads content from file_path if file_path's extension
is one of allowed ones (See ALLOWED_EXTS).
Throws UnsupportedMetaException on disallowed filetypes.
:param file_path: Absolute path to the file to load content from.
:type file_path: ``str``
:rtype: ``dict``
"""
file_name, file_ext = os.path.splitext(file_path)
if file_ext not in ALLOWED_EXTS:
raise Exception('Unsupported meta type %s, file %s. Allowed: %s' %
(file_ext, file_path, ALLOWED_EXTS))
parser_func = PARSER_FUNCS.get(file_ext, None)
with open(file_path, 'r') as fd:
return parser_func(fd) if parser_func else fd.read() | b9fb74d90f181b7066643e82269c5a6915cb57e9 | 3,650,414 |
from typing import Pattern
import re
def _yaml_comment_regex() -> Pattern:
"""
From https://yaml-multiline.info/, it states that `#` cannot appear *after* a space
or a newline, otherwise it will be a syntax error (for multiline strings that don't
use a block scalar). This applies to single lines as well: for example, `a#b` will be
treated as a single value, but `a #b` will only capture `a`, leaving `#b` as a comment.
For lines that *do* use a block scalar, the YAML parser will throw a syntax error if
there is additional text on the same line as the block scalar. Comments however, are fine.
e.g.
key: | # this is ok
blah
key: | but this is not
blah
Given that we've made it to this stage, we can assume the YAML file is syntactically
correct. Therefore, if we add whitespace before the comment character, we can know that
everything else *after* the comment character is a comment for a given line.
"""
return re.compile(r'(\s+#[\S ]*)') | 3b5739f460c3d2c66f802dd46e061d2d07030525 | 3,650,415 |
def to_list(name: str) -> "Expr":
"""
Aggregate to list
"""
return col(name).list() | 2265c14d13ef92bb5481dc2eee17915288cf95e8 | 3,650,416 |
import re
def format_ipc_dimension(number: float, decimal_places: int = 2) -> str:
"""
Format a dimension (e.g. lead span or height) according to IPC rules.
"""
formatted = '{:.2f}'.format(number)
stripped = re.sub(r'^0\.', '', formatted)
return stripped.replace('.', '') | 60001f99b5f107faba19c664f90ee2e9fb61fe68 | 3,650,417 |
def mean_test(data, muy0, alternative = 'equal', alpha = 0.95):
"""
This function is used to create a confidence interval of two.sided hypothesis
Input:
data (1D array): the sample of the whole column that you want to evaluate
confidence (float) : confidence_level, must be in (0, 1)
Output:
the dictionary that contains the info of
- Confidence_Interval (tupple)
- T_statistics (float)
- Two-sided p-value.
"""
# convert datapoint to float
a = 1.0 * np.array(data)
confidence = np.round(1 - alpha, 3)
# Number of observations
n = len(a)
# Compute mean and standard_errors
m, se = np.mean(a), stats.sem(a)
# result of testing
T_stat = ((m - muy0) / se)*np.sqrt(n)
# compute the interval_radius
if alternative in ['equal', "two.side"]:
h = se * stats.t.ppf((1 + confidence) / 2., n-1)
conf_itv = (float(m - h), float(m + h))
alt = "true mean (muy) is not equal to muy0 = {}".format(muy0)
cnls = "true mean (muy) = muy0 = {}".format(muy0)
p_val = 2*min(stats.t.cdf(T_stat, n-1), 1 - stats.t.cdf(T_stat, n-1))
elif alternative == 'greater':
h = se * stats.t.ppf(1 - confidence, n-1)
conf_itv = (float(m - h), '+inf')
alt = "true mean (muy) > muy0 = {}".format(muy0)
cnls = "true mean (muy) <= muy0 = {}".format(muy0)
p_val = 1 - stats.t.cdf(T_stat, n-1)
elif alternative == 'less':
h = se * stats.t.ppf(1 - confidence, n-1)
conf_itv = ('-inf', float(m + h))
alt = "true mean (muy) < muy0 = {}".format(muy0)
cnls = "true mean (muy) >= muy0 = {}".format(muy0)
p_val = stats.t.cdf(T_stat, n-1)
# conclusion
if p_val < alpha:
kl = 'reject the hypothesis, {}'.format(alt)
else:
kl = 'can not reject the null hypothesis, so the {}'.format(cnls)
# save all the output-results
dic_data = pd.DataFrame(
{
'alpha / confidence_level': [{'significance level':alpha, 'confidence_level': 1- alpha}],
'Confidence_Interval': [conf_itv],
'T_statistic': T_stat,
'sample_mean': m,
'alternative_hypothesis': alt,
'p_value': p_val,
'conclusion': "For confidence_level = {}%, we {}".format(100*confidence, kl)
}
)
return dic_data | 9a798eeed1ba2debfe42dbb08ed33c8a1f463fd3 | 3,650,418 |
def __build_command(command, name, background=None, enable=None):
""" Constuct args for systemctl command.
Args:
command: The systemctl command
name: The unit name or name pattern
background: True to have systemctl perform the command in the background
enable: True to enable/disable, False to start/stop only, None for default.
Returns:
The name with type suffix
"""
args = ['--quiet']
if background:
args.append('--no-block')
if ((enable or name.startswith('appscale-'))
and not enable==False
and command in ('start', 'stop')):
args.append('--now')
args.append('--runtime')
if command == 'start':
args.append('enable')
else:
args.append('disable')
else:
args.append(command)
args.append(__expand_name(name))
return args | 5f47c19bb24d05b66d02c6d93ed1bf90144afb63 | 3,650,419 |
import requests
def dockerFetchLatestVersion(image_name: str) -> list[str]:
"""
Fetches the latest version of a docker image from hub.docker.com
:param image_name: image to search for
:return: list of version suggestions for the image or 'not found' if error was returned
"""
base_url = "https://hub.docker.com/v2/repositories/library"
request = f"{base_url}/{image_name}/tags"
params = {
"ordering": "last_updated",
"name": "."
}
version_list = []
response = requests.get(request, params=params)
if response.status_code == requests.codes.ok:
json = response.json()
version_list = list(
map(lambda i: i["name"], json["results"])
)[:5]
if len(version_list) == 0:
version_list = [NOT_FOUND]
else:
del params["name"]
response = requests.get(request, params=params)
if response.status_code == requests.codes.ok:
json = response.json()
version_list += list(
map(lambda i: i["name"], json["results"])
)[:5]
return sorted(sorted(list(set(version_list)), reverse=True), key=lambda it: _isfloat(it), reverse=True) | 5907dfe92b627c272132f97be9019d735aabd570 | 3,650,420 |
import torch
import torchvision
def _dataset(
dataset_type: str,
transform: str,
train: bool = True
) -> torch.utils.data.Dataset:
"""
Dataset:
mnist: MNIST
cifar10: CIFAR-10
cifar100: CIFAR-100
Transform:
default: the default transform for each data set
simclr: the transform introduced in SimCLR
"""
try:
transform = _get_transform(dataset_type, transform, train)
except KeyError:
raise DatasetNotIncludeError(f"Dataset {dataset_type} or transform {transform} is not included.\n" \
f"Refer to the following: {_dataset.__doc__}")
if dataset_type == "mnist":
dataset = torchvision.datasets.MNIST(
root=ROOT, train=train, download=False,
transform=transform
)
elif dataset_type == "fashionmnist":
dataset = torchvision.datasets.FashionMNIST(
root=ROOT, train=train, download=False,
transform=transform
)
elif dataset_type == "cifar10":
dataset = torchvision.datasets.CIFAR10(
root=ROOT, train=train, download=False,
transform=transform
)
elif dataset_type == "cifar100":
dataset = torchvision.datasets.CIFAR100(
root=ROOT, train=train, download=False,
transform=transform
)
return dataset | cdfeefefede97db0a8d8ad6c3f4620855004062c | 3,650,421 |
def inferCustomerClasses(param_file, evidence_dir, year):
"""
This function uses the variable elimination algorithm from libpgm to infer the customer class of each AnswerID, given the evidence presented in the socio-demographic survey responses.
It returns a tuple of the dataframe with the probability distribution over all classes for each AnswerID and the BN object.
"""
bn = loadbn(param_file)
evidence, a_id = readEvidence(year, evidence_dir)
query = {"customer_class":''}
cols = bn.Vdata.get('customer_class')['vals']
result = pd.DataFrame(columns=cols) #create empty dataframe in which to store inferred probabilities
count = 0 #set counter
for e in evidence:
bn = loadbn(param_file)
fn = TableCPDFactorization(bn)
try:
inf = fn.condprobve(query, e)
classprobs = list(inf.vals)
result.loc[count] = classprobs
count += 1
except:
result.loc[count] = [None] * len(cols)
count += 1
result['AnswerID'] = a_id
result.set_index(keys='AnswerID',inplace=True)
return result | 212906aacc2bc3d607e1589742591834953de14e | 3,650,422 |
def MidiSegInfo(segment):
""" Midi file info saved in config file for speed """
class segInfo:
iMsPerTick = 0
bpm = 4
ppqn = 480
total_ticks = 0
iLengthInMs = 0
iTracks = 0
trackList = []
ver = "1.5"
ret = segInfo()
savedVer = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Ver")
savedDateTime = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "DateTime")
dateTime = FileDateTime(segment.filename)
if ver != savedVer or dateTime != savedDateTime:
mi = GetMidiInfo(segment.filename)
if mi.err == 0:
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Ver", ver)
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "DateTime", str(dateTime))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "PPQN", str(mi.ppqn))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "BPM", str(mi.beats_per_measure))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "totalTicks", str(mi.totalTicks))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "maxTracks", str(mi.maxTracks))
iLengthInMs = GetMidiFileLength(segment.filename) * 1000
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "LengthInMs", str(iLengthInMs))
if iLengthInMs > 0:
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "MsPerTick", str(iLengthInMs / mi.totalTicks))
#have to write out the tracklist in format that can be saved in INI file
tl = []
for track in mi.trackList:
tl.append((track.track, track.channel, track.name))
IniSetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Tracks", tl)
trackList = []
tl = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "Tracks", 'list', [])
for t in tl:
trackList.append(trackGrid(t[0], t[1], t[2],False))
iTracks = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "maxTracks", 'int', 0)
iMsPerTick = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "MsPerTick", 'float', 0)
bpm = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "BPM", 'int', 0)
ppqn = IniGetValue(JetDefs.JETMIDIFILES_INI, segment.filename, "PPQN", 'int', 480)
if iMsPerTick == 0 or bpm == 0 or ppqn == 0:
return ret
tb = TimeBase(ppqn, bpm)
total_ticks = tb.ConvertStrTimeToTicks(segment.length)
if total_ticks == 0:
total_ticks = tb.MbtDifference(tb.ConvertStrTimeToTuple(segment.start), tb.ConvertStrTimeToTuple(segment.end))
if total_ticks == 0:
return ret
ret.iTracks = iTracks
ret.iMsPerTick = iMsPerTick
ret.bpm = bpm
ret.ppqn = ppqn
ret.total_ticks = total_ticks
ret.iLengthInMs = total_ticks * iMsPerTick
ret.trackList = trackList
return ret | 7d48b699ed52239cf08e57b217f5dd62f3c64a84 | 3,650,423 |
def num_in_row(board, row, num):
"""True if num is already in the row, False otherwise"""
return num in board[row] | ca9ab9de4514740e25e0c55f3613d03b2844cdb8 | 3,650,424 |
import urllib
def load_mnist(dataset="mnist.pkl.gz"):
"""
dataset: string, the path to dataset (MNIST)
"""
data_dir, data_file = os.path.split(dataset)
# download MNIST if not found
if not os.path.isfile(dataset):
origin = (
'http://www.iro.umontreal.ca/~lisa/deep/data/mnist/mnist.pkl.gz'
)
print 'Downloading MNIST from %s' % origin
assert urllib.urlretrieve(origin, dataset)
print "Loading Data ..."
with gzip.open(dataset, 'rb') as handle:
train_set, valid_set, test_set = cPickle.load(handle)
rval = [(train_set[0],to_categorical(train_set[1])),
(valid_set[0],to_categorical(valid_set[1])),
(test_set[0], to_categorical(test_set[1]))]
#train_X, train_y = shared_data(train_set[0]),shared_data_int32(to_categorical(train_set[1]))
#valid_X, valid_y = shared_data(valid_set[0]),shared_data_int32(to_categorical(valid_set[1]))
#test_X, test_y = shared_data(test_set[0]),shared_data_int32(to_categorical(test_set[1]))
#
#rval = [(train_X, train_y), (valid_X, valid_y), (test_X, test_y)]
return rval | 2e499431bed7a8c1c775b04d6272153564d9c99f | 3,650,425 |
import os
from sys import path
def load_hs13_ZSBB_params():
"""Load the standard parameters to generate SocioPatterns HS13
surrogate networks using the ZSBB model.
Returns
-------
:obj:`dict`
The `kwargs` to pass to :func:`tacoma.ZSBB_model`.
"""
fn = os.path.join(path,'ht09_zsbb_params.json')
return tc.load_json_dict(fn) | a6a922fd696d19e2f0dc609cd14db814b84a1e4d | 3,650,426 |
def factorial_3(n, acc=1):
"""
Replace all recursive tail calls f(x=x1, y=y1, ...) with (x, y, ...) = (x1, y1, ...); continue
"""
while True:
if n < 2:
return 1 * acc
(n, acc) = (n - 1, acc * n)
continue
break | e067cf4564056bf488e56fe58bbd5b998b0175f3 | 3,650,427 |
def autocorr(x, axis=0, fast=False):
"""
Estimate the autocorrelation function of a time series using the FFT.
:param x:
The time series. If multidimensional, set the time axis using the
``axis`` keyword argument and the function will be computed for every
other axis.
:param axis: (optional)
The time axis of ``x``. Assumed to be the first axis if not specified.
:param fast: (optional)
If ``True``, only use the largest ``2^n`` entries for efficiency.
(default: False)
"""
x = np.atleast_1d(x)
m = [slice(None), ] * len(x.shape)
# For computational efficiency, crop the chain to the largest power of
# two if requested.
if fast:
n = int(2**np.floor(np.log2(x.shape[axis])))
m[axis] = slice(0, n)
x = x
else:
n = x.shape[axis]
# Compute the FFT and then (from that) the auto-correlation function.
f = np.fft.fft(x-np.mean(x, axis=axis), n=2*n, axis=axis)
m[axis] = slice(0, n)
acf = np.fft.ifft(f * np.conjugate(f), axis=axis)[m].real
m[axis] = 0
return acf / acf[m] | e2e4105bd0a4aed3431af6acf4f3669bb3340825 | 3,650,428 |
import re
import dateutil
def parse_date(filename_html):
"""Parse a file, and return the date associated with it.
filename_html -- Name of file to parse.
"""
match = re.search(r"\d{4}-\d{2}-\d{2}", filename_html)
if not match:
return None
match_date = match.group()
file_date = dateutil.parser.parse(match_date).date()
return file_date | 2ee45c3f70b75fc2d26b9c00861dbb1e7586d4af | 3,650,429 |
import os
def build_static_runtime(
workspace,
compiler,
module,
compiler_options,
executor=None,
extra_libs=None,
):
"""Build the on-device runtime, statically linking the given modules.
Parameters
----------
compiler : tvm.micro.Compiler
Compiler instance used to build the runtime.
module : IRModule
Module to statically link.
compiler_options : dict
The return value of tvm.micro.default_options(), with any keys overridden to inject
compiler options specific to this build. If not given, tvm.micro.default_options() is
used. This dict contains the `options` parameter passed to Compiler.library() and
Compiler.binary() at various stages in the compilation process.
executor : Optional[str]
Executor used for runtime. Based on this we determine the libraries that need to be
linked with runtime.
extra_libs : Optional[List[MicroLibrary|str]]
If specified, extra libraries to be compiled into the binary. If a MicroLibrary, it is
included into the binary directly. If a string, the path to a directory; all direct children
of this directory matching RUNTIME_SRC_REGEX are built into a library. These libraries are
placed before any common CRT libraries in the link order.
Returns
-------
MicroBinary :
The compiled runtime.
"""
mod_build_dir = workspace.relpath(os.path.join("build", "module"))
os.makedirs(mod_build_dir)
mod_src_dir = workspace.relpath(os.path.join("src", "module"))
if not executor:
executor = "host-driven"
libs = []
for mod_or_src_dir in (extra_libs or []) + get_runtime_libs(executor):
if isinstance(mod_or_src_dir, MicroLibrary):
libs.append(mod_or_src_dir)
continue
lib_src_dir = mod_or_src_dir
lib_name = os.path.basename(lib_src_dir)
lib_build_dir = workspace.relpath(f"build/{lib_name}")
os.makedirs(lib_build_dir)
lib_srcs = []
for p in os.listdir(lib_src_dir):
if RUNTIME_SRC_REGEX.match(p):
lib_srcs.append(os.path.join(lib_src_dir, p))
libs.append(compiler.library(lib_build_dir, lib_srcs, compiler_options["lib_opts"]))
mod_src_dir = workspace.relpath(os.path.join("src", "module"))
os.makedirs(mod_src_dir)
libs.append(
module.export_library(
mod_build_dir,
workspace_dir=mod_src_dir,
fcompile=lambda bdir, srcs, **kwargs: compiler.library(
bdir, srcs, compiler_options["generated_lib_opts"]
),
)
)
runtime_build_dir = workspace.relpath(f"build/runtime")
os.makedirs(runtime_build_dir)
return compiler.binary(runtime_build_dir, libs, compiler_options["bin_opts"]) | 2c9b451a3c0208fb7dc1439fdf887b1a17d749ce | 3,650,430 |
def mod(a1, a2):
"""
Function to give the remainder
"""
return a1 % a2 | f5c03a952aed373e43933bafe37dbc75e796b74d | 3,650,431 |
def select_theme_dirs():
"""
Load theme templates, if applicable
"""
if settings.THEME_DIR:
return ["themes/" + settings.THEME_DIR + "/templates", "templates"]
else:
return ["templates"] | df74bc751f701be63276b5481ac222e64ba914e7 | 3,650,432 |
def encode_string(s):
"""
Simple utility function to make sure a string is proper
to be used in a SQL query
EXAMPLE:
That's my boy! -> N'That''s my boy!'
"""
res = "N'"+s.replace("'","''")+"'"
res = res.replace("\\''","''")
res = res.replace("\''","''")
return res | 814822b9aa15def24f98b2b280ab899a3f7ea617 | 3,650,433 |
def email_manage(request, email_pk, action):
"""Set the requested email address as the primary. Can only be
requested by the owner of the email address."""
email_address = get_object_or_404(EmailAddress, pk=email_pk)
if not email_address.user == request.user and not request.user.is_staff:
messages.error(request, "You are not authorized to manage this email address")
# if not email_address.is_verified():
# messages.error(request, "Email '%s' needs to be verified first." % email_address.email)
if action == "set_primary":
email_address.set_primary()
messages.success(request, "'%s' is now marked as your primary email address." % email_address.email)
elif action == "delete":
email_address.delete()
messages.success(request, "'%s' has been removed." % email_address.email)
if 'HTTP_REFERER' in request.META:
return redirect(request.META['HTTP_REFERER'])
else:
return redirect(reverse('member:profile:view', kwargs={'username': email_address.user.username})) | 7a533fe34fdc13b737025c01bb0bb15dcbeae0f2 | 3,650,434 |
def get_container_service_api_version():
"""Get zun-api-version with format: 'container X.Y'"""
return 'container ' + CONTAINER_SERVICE_MICROVERSION | c6f83640b50132e24ce96889688afcda49ba6b1c | 3,650,435 |
import yaml
import os
def prepare_config(config_path=None,
schema_path=None,
config=None,
schema=None,
base_path=None) -> Munch:
"""
Takes in paths to config and schema files.
Validates the config against the schema, normalizes the config, parses gin and converts the config to a Munch.
"""
# Load up the config
if config is None:
assert config_path is not None, 'Please pass in either config or config_path.'
assert config_path.endswith('.yaml'), 'Must use a YAML file for the config.'
config = yaml.load(open(config_path),
Loader=yaml.FullLoader)
# If the config is a Quinfig object, just grab the __dict__ for convenience
if isinstance(config, Quinfig):
config = config.__dict__
# Convert config to Munch: iffy ensures that the Munch fn is only applied to mappings
config = walk_values_rec(iffy(is_mapping, lambda c: Munch(**c)), config)
# Load up the schema
if schema is None:
if schema_path is not None:
assert schema_path.endswith('.yaml'), 'Must use a YAML file for the config.'
schema = yaml.load(open(schema_path),
Loader=yaml.FullLoader)
if schema is not None:
# Allow gin configuration at any level of nesting: put a gin tag at every level of the schema
schema = autoexpand_schema(schema)
# Validate the config against the schema
validate_config(config, schema)
# Normalize the config
if not base_path:
base_path = os.path.dirname(os.path.abspath(config_path)) if config_path else ''
else:
base_path = os.path.abspath(base_path)
config = normalize_config(config, schema, base_path=base_path)
# Convert config to Munch: iffy ensures that the Munch fn is only applied to mappings
config = walk_values_rec(iffy(is_mapping, lambda c: Munch(**c)), config)
# Parse and load the gin configuration
nested_gin_dict_parser(config)
return config | 3bcf3a033134475ef873ab631410cc210c5b09a2 | 3,650,436 |
from django.utils.cache import get_cache_key
from django.core.cache import cache
def invalidate_view_cache(view_name, args=[], namespace=None, key_prefix=None):
"""
This function allows you to invalidate any view-level cache.
view_name: view function you wish to invalidate or it's named url pattern
args: any arguments passed to the view function
namepace: if an application namespace is used, pass that
key prefix: for the @cache_page decorator for the function (if any)
"""
# create a fake request object
request = HttpRequest()
# Loookup the request path:
if namespace:
view_name = namespace + ":" + view_name
request.path = reverse(view_name, args=args)
# get cache key, expire if the cached item exists:
key = get_cache_key(request, key_prefix=key_prefix)
if key:
if cache.get(key):
cache.set(key, None, 0)
return True
return False | 2622e6ee48cb7565014660858104edba5b20b9eb | 3,650,437 |
def compute_ray_features_segm_2d(seg_binary, position, angle_step=5., smooth_coef=0, edge='up'):
""" compute ray features vector , shift them to be starting from larges
and smooth_coef them by gauss filter
(from given point the close distance to boundary)
:param ndarray seg_binary: np.array<height, width>
:param tuple(int,int) position: integer position in the segmentation
:param float angle_step: angular step for ray features
:param str edge: pointing to the up of down edge of an boundary
:param int smooth_coef: smoothing the final ray features
:return list(float): ray distances
.. seealso:: :func:`imsegm.descriptors.compute_ray_features_segm_2d_vectors`
.. note:: for more examples, see unittests
>>> seg_empty = np.zeros((100, 150), dtype=bool)
>>> compute_ray_features_segm_2d(seg_empty, (50, 75), 90) # doctest: +ELLIPSIS
array([-1., -1., -1., -1.]...)
>>> from skimage import draw
>>> seg = np.ones((100, 150), dtype=bool)
>>> x, y = draw.circle(50, 75, 40, shape=seg.shape)
>>> seg[x, y] = False
>>> np.round(compute_ray_features_segm_2d(seg, (50, 75), 45)) # doctest: +ELLIPSIS
array([ 40., 41., 40., 41., 40., 41., 40., 41.]...)
>>> np.round(compute_ray_features_segm_2d(seg, (60, 40), 30, smooth_coef=1)).tolist()
[66.0, 52.0, 32.0, 16.0, 8.0, 5.0, 5.0, 8.0, 16.0, 33.0, 53.0, 67.0]
>>> ray_fts = compute_ray_features_segm_2d(seg, (40, 60), 20)
>>> np.round(ray_fts).tolist() # doctest: +NORMALIZE_WHITESPACE
[54.0, 57.0, 59.0, 55.0, 51.0, 44.0, 38.0, 31.0, 27.0, 24.0, 22.0, 22.0,
23.0, 26.0, 29.0, 35.0, 42.0, 49.0]
"""
assert seg_binary.ndim == len(position), \
'Segmentation dim of %r and position (%i) does not match' \
% (seg_binary.ndim, len(position))
seg_binary = seg_binary.astype(bool)
position = tuple(map(int, position))
fn_compute = cython_ray_features_seg2d if USE_CYTHON else numpy_ray_features_seg2d
ray_dist = fn_compute(seg_binary, position, angle_step, edge)
if smooth_coef is not None and smooth_coef > 0:
ray_dist = gaussian_filter1d(ray_dist, smooth_coef)
return ray_dist | 18b830fe6ac83cf7282be39d368ad7c1261a890c | 3,650,438 |
def visualize_bbox(img, bbox, class_name, color=(255, 0, 0) , thickness=2):
"""Visualizes a single bounding box on the image"""
BOX_COLOR = (255, 0, 0) # Red
TEXT_COLOR = (255, 255, 255) # White
x_min, y_min, x_max, y_max = bbox
cv2.rectangle(img, (x_min, y_min), (x_max, y_max), color=color, thickness=thickness)
((text_width, text_height), _) = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.35, 1)
cv2.rectangle(img, (x_min, y_min - int(1.3 * text_height)), (x_min + text_width, y_min), BOX_COLOR, -1)
cv2.putText(
img,
text=class_name,
org=(x_min, y_min - int(0.3 * text_height)),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.35,
color=TEXT_COLOR,
lineType=cv2.LINE_AA,
)
return img | 147335c2e87b57f0bd0ba0840e9dae9f713b513f | 3,650,439 |
import json
def mock_light():
"""Mock UniFi Protect Camera device."""
data = json.loads(load_fixture("sample_light.json", integration=DOMAIN))
return Light.from_unifi_dict(**data) | ba83ae80ddb39ec9f9b6f30b77eabee39f998b39 | 3,650,440 |
def randomize_bulge_i(N, M, bp='G', target='none', ligand='theo'):
"""
Replace the upper stem with the aptamer and randomize the bulge to connect
it to the lower stem.
This is a variant of the rb library with two small differences. First, the
nucleotides flanking the aptamer are not randomized and are instead
guaranteed to base pair. The default base pair is GC. However, note that
to be consistent with rb, this base pair is considered part of the linker,
and is included in the N and M arguments. So rbi/4/8 only randomizes 10
positions. Second, the library is based off the Dang scaffold. Most
extended upper stem is replaced with the aptamer, but the CG base pair in
the lower stem remains.
Parameters
----------
N: int
The length of the linker on the 5' side of the aptamer. This length
includes a non-randomized base pair immediately adjacent to the aptamer.
M: int
The length of the linker on the 3' side of the aptamer. This length
includes a non-randomized base pair immediately adjacent to the aptamer.
bp: 'ACGU'
Not implemented, but this would be a good interface for changing the
static base pair. Right now to base pair is hard-coded to be GC.
"""
sgrna = on(target=target)
sgrna['bulge/5'].attachment_sites = 0,
sgrna['bulge/3'].attachment_sites = 4,
sgrna.attach(
random_insert(ligand, N, M, flags='g'),
'bulge/5', 0,
'bulge/3', 4,
)
return sgrna | aff8aa9e6ba276bb9d867f24c58c44e0e3c849f6 | 3,650,441 |
def JSparser(contents: str) -> str:
"""
Is supposed to replace URLs in JS.
Arguments:
contents: JS string
Returns:
Input JS string
"""
# TODO: URL replacement
return contents | 393216d8e10677405d888e4cdd6a884aebb0684a | 3,650,442 |
import logging
def parse_identifier(db, identifier):
"""Parse the identifier and return an Identifier object representing it.
:param db: Database session
:type db: sqlalchemy.orm.session.Session
:param identifier: String containing the identifier
:type identifier: str
:return: Identifier object
:rtype: Optional[core.model.identifier.Identifier]
"""
parsed_identifier = None
try:
result = Identifier.parse_urn(db, identifier)
if result is not None:
parsed_identifier, _ = result
except Exception:
logging.error(
f"An unexpected exception occurred during parsing identifier {identifier}"
)
return parsed_identifier | 390fc8d44014d2cdb17fc641dab9914ba13bc95e | 3,650,443 |
import textwrap
def wrap_name(dirname, figsize):
"""Wrap name to fit in subfig."""
fontsize = plt.rcParams["font.size"]
# 1/120 = inches/(fontsize*character)
num_chars = int(figsize / fontsize * 72)
return textwrap.fill(dirname, num_chars) | 7fb7430a01781c7c53637ae4a94c72c057faddab | 3,650,444 |
def resolve_shape(tensor, rank=None, scope=None):
"""Fully resolves the shape of a Tensor.
Use as much as possible the shape components already known during graph
creation and resolve the remaining ones during runtime.
Args:
tensor: Input tensor whose shape we query.
rank: The rank of the tensor, provided that we know it.
scope: Optional name scope.
Returns:
shape: The full shape of the tensor.
"""
with tf.name_scope(scope, 'resolve_shape', [tensor]):
if rank is not None:
shape = tensor.get_shape().with_rank(rank).as_list()
else:
shape = tensor.get_shape().as_list()
if None in shape:
shape_dynamic = tf.shape(tensor)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = shape_dynamic[i]
return shape | cc1c3a0bd2b5a35580dd94b6c45a2a36cc151e5a | 3,650,445 |
def gradient_clip(gradients, max_gradient_norm):
"""Clipping gradients of a model."""
clipped_gradients, gradient_norm = tf.clip_by_global_norm(
gradients, max_gradient_norm)
gradient_norm_summary = [tf.summary.scalar("grad_norm", gradient_norm)]
gradient_norm_summary.append(
tf.summary.scalar("clipped_gradient", tf.global_norm(clipped_gradients)))
return clipped_gradients, gradient_norm_summary | 416f9560ad612ab364cd03de39851a559012d26b | 3,650,446 |
import subprocess
def get_sha_from_ref(repo_url, reference):
""" Returns the sha corresponding to the reference for a repo
:param repo_url: location of the git repository
:param reference: reference of the branch
:returns: utf-8 encoded string of the SHA found by the git command
"""
# Using subprocess instead of convoluted git libraries.
# Any rc != 0 will be throwing an exception, so we don't have to care
out = subprocess.check_output(
["git", "ls-remote", "--exit-code", repo_url, reference]
)
# out is a b'' type string always finishing up with a newline
# construct list of (ref,sha)
refs = [
(line.split(b"\t")[1], line.split(b"\t")[0])
for line in out.split(b"\n")
if line != b"" and b"^{}" not in line
]
if len(refs) > 1:
raise ValueError(
"More than one ref for reference %s, please be more explicit %s"
% (reference, refs)
)
return refs[0][1].decode("utf-8") | d7ab3e98217fa57e0831a6df94d34f1cf45e3d97 | 3,650,447 |
def list_species(category_id):
"""
List all the species for the specified category
:return: A list of Species instances
"""
with Session.begin() as session:
species = session.query(Species)\
.filter(Species.categoryId == category_id)\
.order_by(db.asc(Species.name))\
.all()
return species | c49283fdde11456ffc6e4eff4b5043d547fa9908 | 3,650,448 |
def get_motif_proteins(meme_db_file):
""" Hash motif_id's to protein names using the MEME DB file """
motif_protein = {}
for line in open(meme_db_file):
a = line.split()
if len(a) > 0 and a[0] == 'MOTIF':
if a[2][0] == '(':
motif_protein[a[1]] = a[2][1:a[2].find(')')]
else:
motif_protein[a[1]] = a[2]
return motif_protein | 88e42b84314593a965e7dd681ded612914e35629 | 3,650,449 |
import os
import sys
def get_files(search_glob):
""" Returns all files matching |search_glob|. """
recursive_glob = '**' + os.path.sep
if recursive_glob in search_glob:
if sys.version_info >= (3, 5):
result = iglob(search_glob, recursive=True)
else:
# Polyfill for recursive glob pattern matching added in Python 3.5.
result = get_files_recursive(*search_glob.split(recursive_glob))
else:
result = iglob(search_glob)
# Sort the result for consistency across platforms.
return sorted(result) | 3e09c26b0e2a77f3c883fce048825af4c025d502 | 3,650,450 |
def pixellate_bboxes(im, bboxes, cell_size=(5,6), expand_per=0.0):
"""Pixellates ROI using Nearest Neighbor inerpolation
:param im: (numpy.ndarray) image BGR
:param bbox: (BBox)
:param cell_size: (int, int) pixellated cell size
:returns (numpy.ndarray) BGR image
"""
if not bboxes:
return im
elif not type(bboxes) == list:
bboxes = list(bboxes)
for bbox in bboxes:
if expand_per > 0:
bbox = bbox.expand_per(expand_per)
x1,y1,x2,y2 = bbox.xyxy_int
im_roi = im[y1:y2, x1:x2]
h,w,c = im_roi.shape
# pixellate
im_roi = cv.resize(im_roi, cell_size, interpolation=cv.INTER_NEAREST)
im_roi = cv.resize(im_roi, (w,h), interpolation=cv.INTER_NEAREST)
im[y1:y2, x1:x2] = im_roi
return im | 8c028714467c350dfd799671b0b18739705393ba | 3,650,451 |
import sys
def in_ipython() -> bool:
"""try to detect whether we are in an ipython shell, e.g., a jupyter notebook"""
ipy_module = sys.modules.get("IPython")
if ipy_module:
return bool(ipy_module.get_ipython())
else:
return False | 7a6804b964bd7fbde6d5795da953954343575413 | 3,650,452 |
def StepToGeom_MakeHyperbola2d_Convert(*args):
"""
:param SC:
:type SC: Handle_StepGeom_Hyperbola &
:param CC:
:type CC: Handle_Geom2d_Hyperbola &
:rtype: bool
"""
return _StepToGeom.StepToGeom_MakeHyperbola2d_Convert(*args) | 6896b27c10526b3a7f1d5840a63209a6f30d163e | 3,650,453 |
def create_dict(local=None, field=None, **kwargs):
"""
以字典的形式从局部变量locals()中获取指定的变量
:param local: dict
:param field: str[] 指定需要从local中读取的变量名称
:param kwargs: 需要将变量指定额外名称时使用
:return: dict
"""
if field is None or local is None:
return {}
result = {k: v for k, v in local.items() if k in field}
result.update(**kwargs)
return result | 19aceef7f648cc72f29fceba811085cde9d6d587 | 3,650,454 |
def sum_list_for_datalist(list):
"""
DB에 저장할 때, 기준일로부터 과거 데이터가 존재하지 않을 경우에는
0을 return 한다.
:param list:
:return: float or int
"""
mysum = 0
for i in range(0, len(list)):
if list[i] == 0:
return 0
mysum = mysum + list[i]
return mysum | bae8966f64c642176d92d31c27df691e0f255d6a | 3,650,455 |
def elu(x, alpha=1.):
"""Exponential linear unit.
Arguments:
x {tensor} -- Input float tensor to perform activation.
alpha {float} -- A scalar, slope of negative section.
Returns:
tensor -- Output of exponential linear activation
"""
return x * (x > 0) + alpha * (tf.math.exp(x) - 1.) * (x < 0) | c40c7aa4a0553dc6b0b6c6dd9f583a701f022e70 | 3,650,456 |
def coalesce(*args):
"""
Compute the first non-null value(s) from the passed arguments in
left-to-right order. This is also known as "combine_first" in pandas.
Parameters
----------
*args : variable-length value list
Examples
--------
>>> import ibis
>>> expr1 = None
>>> expr2 = 4
>>> result = ibis.coalesce(expr1, expr2, 5)
Returns
-------
coalesced : type of first provided argument
"""
return ops.Coalesce(args).to_expr() | 0fb1af5db75c7ad65f470e348d76d0f289ba5ff2 | 3,650,457 |
import json
def get_analysis(poem, operations, rhyme_analysis=False, alternative_output=False):
"""
View for /analysis that perform an analysis of poem running the different
operations on it.
:param poem: A UTF-8 encoded byte string with the text of the poem
:param operations: List of strings with the operations to be performed:
- "scansion": Performs scansion analysis
- "enjambment": Performs enjambment detection
:param rhyme_analysis: Whether or not rhyme analysis is to be performed
:return: Response object with a dict with a key for each operation and its
analysis or a serialized version of it
"""
analysis = analyze(poem.decode('utf-8'), operations, rhyme_analysis, alternative_output)
mime = connexion.request.headers.get("Accept")
# serialization = serialize(analysis, mime)
return Response(json.dumps(analysis), mimetype=mime) | 1f40376a4ecbbe6453caa909406f595707cc44be | 3,650,458 |
def get_detail_root():
"""
Get the detail storage path in the git project
"""
return get_root() / '.detail' | aa2c30ed839d32e084a11c52f17af621ecfb9011 | 3,650,459 |
def solve(strs, m, n):
"""
2D 0-1 knapsack
"""
def count(s):
m, n = 0, 0
for c in s:
if c == "0":
m += 1
elif c == "1":
n += 1
return m, n
dp = []
for _ in range(m + 1):
dp.append([0] * (n + 1))
for s in strs:
mi, ni = count(s)
for j in range(m, mi - 1, -1): # reverse!
for k in range(n, ni - 1, -1): # reverse!
dp[j][k] = max(dp[j][k], dp[j - mi][k - ni] + 1)
return dp[m][n] | 3fb2b16fc9059227c0edce1199269988d18cb908 | 3,650,460 |
def bk_category_chosen_category():
"""Returns chosen category for creating bk_category object."""
return "Bread" | cbf1c933e5c2b69214e828afaab5babdba61dca8 | 3,650,461 |
import math
def apply_weights(
events,
total_num=1214165.85244438, # for chips_1200
nuel_frac=0.00003202064566, # for chips_1200
anuel_frac=0.00000208200747, # for chips_1200
numu_frac=0.00276174709613, # for chips_1200
anumu_frac=0.00006042213136, # for chips_1200
cosmic_frac=0.99714372811940, # for chips_1200
osc_file_name="./inputs/oscillations/matter_osc_cp_zero.root",
verbose=False,
):
"""Calculate and apply the 'weight' column to scale events to predicted numbers.
Args:
events (pd.DataFrame): events dataframe to append weights to
total_num (float): total number of expected events in a year
nuel_frac (float): fraction of events from nuel
anuel_frac (float): fraction of events from anuel
numu_frac (float): fraction of events from numu
anumu_frac (float): fraction of events from anumu
cosmic_frac (float): fractions of events from cosmics
osc_file_name (str): Oscillation data file name
verbose (bool): should we print the weight summary?
Returns:
pd.DataFrame: events dataframe with weights
"""
def apply_scale_weight(event, w_nuel, w_anuel, w_numu, w_anumu, w_cosmic):
"""Add the correct weight to each event.
Args:
event (dict): pandas event(row) dict
w_nuel: nuel weight
w_anuel: anuel weight
w_numu: numu weight
w_anumu: anumu weight
w_cosmic: cosmic weight
"""
if (
event[data.MAP_NU_TYPE["name"]] == 0
and event[data.MAP_SIGN_TYPE["name"]] == 0
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # Beam nuel
return w_nuel
elif (
event[data.MAP_NU_TYPE["name"]] == 0
and event[data.MAP_SIGN_TYPE["name"]] == 0
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 1
): # Appeared nuel
return 1
elif (
event[data.MAP_NU_TYPE["name"]] == 0
and event[data.MAP_SIGN_TYPE["name"]] == 1
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # Beam anuel
return w_anuel
elif (
event[data.MAP_NU_TYPE["name"]] == 1
and event[data.MAP_SIGN_TYPE["name"]] == 0
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # Beam numu
return w_numu
elif (
event[data.MAP_NU_TYPE["name"]] == 1
and event[data.MAP_SIGN_TYPE["name"]] == 1
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # Beam anumu
return w_anumu
elif event[data.MAP_COSMIC_CAT["name"]] == 1 and event["t_sample_type"] == 2:
return w_cosmic
else:
return 0
def apply_osc_weight(event, numu_survival_prob, nuel_osc):
"""Add the correct weight to each event.
Args:
event (dict): pandas event(row) dict
numu_survival_prob (np.array): numu survival probability array
nuel_osc (np.array): numu appearance scaled probability array
"""
if (
event[data.MAP_NU_TYPE["name"]] == 0
and event[data.MAP_SIGN_TYPE["name"]] == 0
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # Beam nuel
return event["w"]
if (
event[data.MAP_NU_TYPE["name"]] == 0
and event[data.MAP_SIGN_TYPE["name"]] == 0
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 1
): # Appeared nuel
nu_energy = math.floor(event["t_nu_energy"] / 100)
if nu_energy > 99:
nu_energy = 99
if nuel_osc[nu_energy] == 0.0:
return event["w"]
else:
return nuel_osc[nu_energy] * event["w"]
elif (
event[data.MAP_NU_TYPE["name"]] == 0
and event[data.MAP_SIGN_TYPE["name"]] == 1
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # beam anuel
return event["w"]
elif (
event[data.MAP_NU_TYPE["name"]] == 1
and event[data.MAP_SIGN_TYPE["name"]] == 0
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # Beam numu
nu_energy = math.floor(event["t_nu_energy"] / 100)
if nu_energy > 99:
nu_energy = 99
return numu_survival_prob[nu_energy] * event["w"]
elif (
event[data.MAP_NU_TYPE["name"]] == 1
and event[data.MAP_SIGN_TYPE["name"]] == 1
and event[data.MAP_COSMIC_CAT["name"]] == 0
and event["t_sample_type"] == 0
): # Beam anumu
nu_energy = math.floor(event["t_nu_energy"] / 100)
if nu_energy > 99:
nu_energy = 99
return numu_survival_prob[nu_energy] * event["w"]
elif event[data.MAP_COSMIC_CAT["name"]] == 1 and event["t_sample_type"] == 2:
return event["w"]
else:
return 0
np.seterr(divide="ignore", invalid="ignore")
tot_nuel = events[
(events[data.MAP_NU_TYPE["name"]] == 0)
& (events[data.MAP_SIGN_TYPE["name"]] == 0)
& (events[data.MAP_COSMIC_CAT["name"]] == 0)
& (events["t_sample_type"] == 0)
].shape[0]
tot_anuel = events[
(events[data.MAP_NU_TYPE["name"]] == 0)
& (events[data.MAP_SIGN_TYPE["name"]] == 1)
& (events[data.MAP_COSMIC_CAT["name"]] == 0)
& (events["t_sample_type"] == 0)
].shape[0]
tot_numu = events[
(events[data.MAP_NU_TYPE["name"]] == 1)
& (events[data.MAP_SIGN_TYPE["name"]] == 0)
& (events[data.MAP_COSMIC_CAT["name"]] == 0)
& (events["t_sample_type"] == 0)
].shape[0]
tot_anumu = events[
(events[data.MAP_NU_TYPE["name"]] == 1)
& (events[data.MAP_SIGN_TYPE["name"]] == 1)
& (events[data.MAP_COSMIC_CAT["name"]] == 0)
& (events["t_sample_type"] == 0)
].shape[0]
tot_cosmic = events[events[data.MAP_COSMIC_CAT["name"]] == 1].shape[0]
if tot_nuel == 0:
w_nuel = 0.0
else:
w_nuel = (1.0 / tot_nuel) * (nuel_frac * total_num)
if tot_anuel == 0:
w_anuel = 0.0
else:
w_anuel = (1.0 / tot_anuel) * (anuel_frac * total_num)
if tot_numu == 0:
w_numu = 0.0
else:
w_numu = (1.0 / tot_numu) * (numu_frac * total_num)
if tot_anumu == 0:
w_anumu = 0.0
else:
w_anumu = (1.0 / tot_anumu) * (anumu_frac * total_num)
if tot_cosmic == 0:
w_cosmic = 0.0
else:
w_cosmic = (1.0 / tot_cosmic) * (cosmic_frac * total_num)
if verbose:
print(
"Weights: ({},{:.5f}), ({},{:.5f}), ({},{:.5f}), ({},{:.5f}), ({},{:.5f})".format(
tot_nuel,
w_nuel,
tot_anuel,
w_anuel,
tot_numu,
w_numu,
tot_anumu,
w_anumu,
tot_cosmic,
w_cosmic,
)
)
events["w"] = events.apply(
apply_scale_weight,
axis=1,
args=(w_nuel, w_anuel, w_numu, w_anumu, w_cosmic),
)
# Now we need to apply the oscillation probability weights
osc_file = uproot.open(osc_file_name)
# We need to scale the nuel events so they simulate the appearance spectra
numu_ev = events[ # Get the unoscillated numu beam events
(events[data.MAP_NU_TYPE["name"]] == 1)
& (events[data.MAP_SIGN_TYPE["name"]] == 0)
& (events[data.MAP_COSMIC_CAT["name"]] == 0)
& (events["t_sample_type"] == 0)
]
nuel_ev = events[ # Get the nuel events generated with the numu flux
(events[data.MAP_NU_TYPE["name"]] == 0)
& (events[data.MAP_SIGN_TYPE["name"]] == 0)
& (events[data.MAP_COSMIC_CAT["name"]] == 0)
& (events["t_sample_type"] == 1)
]
numu_e_h = np.histogram(
numu_ev["t_nu_energy"] / 100,
bins=100,
range=(0, 100),
weights=numu_ev["w"],
)
nuel_e_h = np.histogram(
nuel_ev["t_nu_energy"] / 100,
bins=100,
range=(0, 100),
weights=nuel_ev["w"],
)
nuel_osc = (numu_e_h[0] * osc_file["hist_mue"].values) / nuel_e_h[0]
# Apply a weight to every event
events["w"] = events.apply(
apply_osc_weight,
axis=1,
args=(
osc_file["hist_mumu"].values,
nuel_osc,
),
)
return events | 81f35a51b3d28577204087511f9c405ff56afaaa | 3,650,462 |
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|" | 76dd17c761e7adb06fe57c5210645a4fe3872374 | 3,650,463 |
from datetime import datetime
def convert_to_dates(start, end):
"""
CTD - Convert two strings to datetimes in format 'xx:xx'
param start: String - First string to convert
param end: String - Second string to convert
return: datetime - Two datetimes
"""
start = datetime.strptime(start, "%H:%M")
end = datetime.strptime(end, "%H:%M")
if end < start:
end += timedelta(days=1)
return start, end | 53ffb9924d31385aac2eafc66fe7a6159e5a310d | 3,650,464 |
def flipud(m):
"""
Flips the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Args:
m (Tensor): Input array.
Returns:
Tensor.
Raises:
TypeError: If the input is not a tensor.
Supported Platforms:
``GPU`` ``CPU``
Example:
>>> import mindspore.numpy as np
>>> A = np.arange(8.0).reshape((2,2,2))
>>> output = np.flipud(A)
>>> print(output)
[[[4. 5.]
[6. 7.]]
[[0. 1.]
[2. 3.]]]
"""
return flip(m, 0) | 06770689d23ca365fb57a6b9d1e74654b30ddaf2 | 3,650,465 |
def get_book_url(tool_name, category):
"""Get the link to the help documentation of the tool.
Args:
tool_name (str): The name of the tool.
category (str): The category of the tool.
Returns:
str: The URL to help documentation.
"""
prefix = "https://jblindsay.github.io/wbt_book/available_tools"
url = "{}/{}.html#{}".format(prefix, category, tool_name)
return url | daf6c8e0832295914a03b002b548a82e2949612a | 3,650,466 |
import hashlib
def game_hash(s):
"""Generate hash-based identifier for a game account based on the
text of the game.
"""
def int_to_base(n):
alphabet = "BCDFGHJKLMNPQRSTVWXYZ"
base = len(alphabet)
if n < base:
return alphabet[n]
return int_to_base(n // base) + alphabet[n % base]
return int_to_base(
int(hashlib.sha1(s.encode('utf-8')).hexdigest(), 16)
)[-7:] | c218a2607390916117921fe0f68fc23fedd51fc3 | 3,650,467 |
import math
def thumbnail_url(bbox, layers, qgis_project, style=None, internal=True):
"""Internal function to generate the URL for the thumbnail.
:param bbox: The bounding box to use in the format [left,bottom,right,top].
:type bbox: list
:param layers: Name of the layer to use.
:type layers: basestring
:param qgis_project: The path to the QGIS project.
:type qgis_project: basestring
:param style: Layer style to choose
:type style: str
:param internal: Flag to switch between public url and internal url.
Public url will be served by Django Geonode (proxified).
:type internal: bool
:return: The WMS URL to fetch the thumbnail.
:rtype: basestring
"""
x_min, y_min, x_max, y_max = bbox
# We calculate the margins according to 10 percent.
percent = 10
delta_x = (x_max - x_min) / 100 * percent
delta_x = math.fabs(delta_x)
delta_y = (y_max - y_min) / 100 * percent
delta_y = math.fabs(delta_y)
# We apply the margins to the extent.
margin = [
y_min - delta_y,
x_min - delta_x,
y_max + delta_y,
x_max + delta_x
]
# Call the WMS.
bbox = ','.join([str(val) for val in margin])
query_string = {
'SERVICE': 'WMS',
'VERSION': '1.1.1',
'REQUEST': 'GetMap',
'BBOX': bbox,
'SRS': 'EPSG:4326',
'WIDTH': '250',
'HEIGHT': '250',
'MAP': qgis_project,
'LAYERS': layers,
'STYLE': style,
'FORMAT': 'image/png',
'TRANSPARENT': 'true',
'DPI': '96',
'MAP_RESOLUTION': '96',
'FORMAT_OPTIONS': 'dpi:96'
}
qgis_server_url = qgis_server_endpoint(internal)
url = Request('GET', qgis_server_url, params=query_string).prepare().url
return url | aa405eae72eacd7fd7b842bf569cc1ba3bc19315 | 3,650,468 |
def evaluate_prediction_power(df, num_days=1):
""""
Applies a shift to the model for the number of days given, default to 1, and feed the data to
a linear regression model. Evaluate the results using score and print it.
"""
scores = {}
print "Num days: {}".format(range(num_days))
for i in range(num_days):
X,y = get_xy(df, num_days=i)
regressor = learn(X,y)
scores[i] = regressor.score(X,y)
return scores | d4e91c8eb656fea8fce16cc16eb1588415c80849 | 3,650,469 |
def get_select_file_dialog_dir():
""""
Return the directory that should be displayed by default
in file dialogs.
"""
directory = CONF.get('main', 'select_file_dialog_dir', get_home_dir())
directory = directory if osp.exists(directory) else get_home_dir()
return directory | 9ae485caf5c5162e0b0e4082cee3e99925861717 | 3,650,470 |
import os
def get_photo_filesystem_path(photos_basedir, album_name, filename):
"""
Gets location of photo on filesystem, e.g.:
/some/dir/album/photo.jpg
:param album_name:
:param filename:
:return:
"""
return os.path.join(photos_basedir, get_photo_filesystem_relpath(album_name, filename)) | 6bd754b31def01cf5b403bc924720efe999816dc | 3,650,471 |
import os
import imp
import random
from typing import OrderedDict
def getAdminData(self):
"""
Deliver admin content of module alarms (ajax)
:return: rendered template as string or json dict
"""
if request.args.get('action') == 'upload':
if request.files:
ufile = request.files['uploadfile']
fname = os.path.join(current_app.config.get('PATH_TMP'), ufile.filename)
ufile.save(fname)
scheduler.add_job(processFile, args=[current_app.config.get('PATH_TMP'), ufile.filename]) # schedule operation
return ""
elif request.args.get('action') == 'uploadchecker':
if request.files:
ufile = request.files['uploadfile']
if not os.path.exists('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename)):
ufile.save('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename))
try:
cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % ufile.filename)
if isinstance(getattr(cls, cls.__all__[0])(), AlarmFaxChecker):
return "ok"
except:
pass
os.remove('%s/emonitor/modules/alarms/inc/%s' % (current_app.config.get('PROJECT_ROOT'), ufile.filename))
return babel.gettext(u'admin.alarms.checkernotvalid')
return ""
elif request.args.get('action') == 'uploaddefinition':
"""
add alarmtype with given config for uploadfile
"""
alarmtype = AlarmType.buildFromConfigFile(request.files['uploadfile'])
if not alarmtype:
db.session.rollback()
return babel.gettext(u'admin.alarms.incorrecttypedefinition')
db.session.add(alarmtype)
db.session.commit()
return "ok"
elif request.args.get('action') == 'gettypedefinition':
"""
export alarmtype definition as cfg-file
"""
alarmtype = AlarmType.getAlarmTypes(request.args.get('alarmtype'))
if alarmtype:
return Response(alarmtype.getConfigFile(), mimetype="application/x.download; charset=utf-8")
else:
return None
elif request.args.get('action') == 'getkeywords':
"""
send list with all keywords of alarmtype
"""
for f in [f for f in os.listdir('%s/emonitor/modules/alarms/inc/' % current_app.config.get('PROJECT_ROOT')) if f.endswith('.py')]:
if f == request.args.get('checker'):
cls = imp.load_source('emonitor.modules.alarms.inc', 'emonitor/modules/alarms/inc/%s' % f)
cls = getattr(cls, cls.__all__[0])()
return {u'keywords': "\n".join(cls.getDefaultConfig()[u'keywords']), u'variables': cls.getDefaultConfig()[u'translations'], u'attributes': cls.getDefaultConfig()[u'attributes']}
return ""
elif request.args.get('action') == 'alarmsforstate':
alarms = Alarm.getAlarms(state=int(request.args.get('state')))
return render_template('admin.alarms_alarm.html', alarms=alarms)
elif request.args.get('action') == 'alarmsarchive':
for id in request.args.get('alarmids').split(','):
Alarm.changeState(int(id), 3)
return ""
elif request.args.get('action') == 'savefieldorder': # change order of fields
fields = []
deptid = '0'
for f in request.args.get('order').split(','):
t, deptid, name = f.split('.')
fields.append(name)
if int(deptid):
for dbfield in AlarmField.getAlarmFields(dept=deptid):
dbfield.position = fields.index(dbfield.fieldtype)
db.session.commit()
return ""
elif request.args.get('action') == 'addreport':
f = request.files['template']
fname = "{}.{}".format(random.random(), f.filename.split('.')[-1])
fpath = '{}alarmreports/{}'.format(current_app.config.get('PATH_DATA'), fname[2:])
f.save(fpath)
if f.filename.endswith('pdf'):
fields = getFormFields(fpath)
content = render_template('admin.alarms.report_fields.html', filepath='{}alarmreports/{}'.format(current_app.config.get('PATH_DATA'), fname[2:]), fileinfo=getPDFInformation(fpath), fields=fields, multi=max(fields.values()) > 1)
else:
content = ""
fields = []
return {'filename': fname, 'content': content}
elif request.args.get('action') == 'reportdetails':
return render_template('admin.alarms.report_details.html', report=AlarmReport.getReports(id=request.args.get('reportid')), reporttype=AlarmReport.getReportTypes(request.args.get('template')), departments=request.args.get('departments'))
elif request.args.get('action') == 'reportfieldlookup':
ret = OrderedDict()
ret['basic'] = [] # add basic information from AFBasic class
for f in AFBasic().getFields():
ret['basic'].append({'id': f.name, 'value': f.id})
alarmfields = {}
for alarmfield in AlarmField.getAlarmFields():
if str(alarmfield.dept) not in request.args.get('departments').split(','):
continue
if alarmfield.fieldtype not in alarmfields:
alarmfields[alarmfield.fieldtype] = []
alarmfields[alarmfield.fieldtype].append(alarmfield)
l = ""
for alarmfield in list(chain.from_iterable([f for f in alarmfields.values() if len(f) == len(request.args.get('departments').split(','))])):
if '%s' % alarmfield.name not in ret:
ret['%s' % alarmfield.name] = [{'id': '%s-list' % alarmfield.fieldtype, 'value': '%s (%s)' % (alarmfield.name, babel.gettext('admin.alarms.list'))}]
for f in alarmfield.getFields():
if f.getLabel().strip() not in ["", '<leer>']: # dismiss empty values
if f.name[0] != ' ':
value = '%s' % babel.gettext(f.getLabel())
l = value
else: # add name of kategory
value = '%s > %s' % (l, babel.gettext(f.getLabel()))
ret['%s' % alarmfield.name].append({'id': '%s-%s' % (alarmfield.fieldtype, f.id), 'value': value})
return ret | 38669746fe0b5436827999b51a4a57a3f294a76c | 3,650,472 |
def join_collections(sql_query: sql.SQLQuery) -> QueryExpression:
"""Join together multiple collections to return their documents in the response.
Params:
-------
sql_query: SQLQuery object with information about the query params.
Returns:
--------
An FQL query expression for joined and filtered documents.
"""
tables = sql_query.tables
order_by = sql_query.order_by
from_table = tables[0]
to_table = tables[-1]
table_with_columns = next(table for table in tables if table.has_columns)
if (
order_by is not None
and order_by.columns[0].table_name != table_with_columns.name
):
raise exceptions.NotSupportedError(
"Fauna uses indexes for both joining and ordering of results, "
"and we currently can only sort the principal table "
"(i.e. the one whose columns are being selected or modified) in the query. "
"You can sort on a column from the principal table, query one table at a time, "
"or remove the ordering constraint."
)
if not any(sql_query.filter_groups):
raise exceptions.NotSupportedError(
"Joining tables without cross-table filters via the WHERE clause is not supported. "
"Selecting columns from multiple tables is not supported either, "
"so there's no performance gain from joining tables without cross-table conditions "
"for filtering query results."
)
assert from_table.left_join_table is None
intersection_queries = []
for filter_group in sql_query.filter_groups:
intersection_query = q.intersection(
*[
_build_intersecting_query(filter_group, None, table, direction)
for table, direction in [(from_table, "right"), (to_table, "left")]
]
)
intersection_queries.append(intersection_query)
return q.union(*intersection_queries) | 62ad0cbad609e8218b4ac9d78f893fbcfc90618e | 3,650,473 |
def querylist(query, encoding='utf-8', errors='replace'):
"""Split the query component into individual `name=value` pairs and
return a list of `(name, value)` tuples.
"""
if query:
qsl = [query]
else:
return []
if isinstance(query, bytes):
QUERYSEP = (b';', b'&')
EQ = b'='
else:
QUERYSEP = ';&'
EQ = '='
for sep in QUERYSEP:
qsl = [s for qs in qsl for s in qs.split(sep) if s]
items = []
for qs in qsl:
parts = qs.partition(EQ)
name = uridecode_safe_plus(parts[0], encoding, errors)
if parts[1]:
value = uridecode_safe_plus(parts[2], encoding, errors)
else:
value = None
items.append((name, value))
return items | 25f726aa76c3b34a9aebc5e111b28162d0b91e3f | 3,650,474 |
def rotate_space_123(angles):
"""Returns the direction cosine matrix relating a reference frame B
rotated relative to reference frame A through the x, y, then z axes of
reference frame A (spaced fixed rotations).
Parameters
----------
angles : numpy.array or list or tuple, shape(3,)
Three angles (in units of radians) that specify the orientation of
a new reference frame with respect to a fixed reference frame.
The first angle is a pure rotation about the x-axis, the second about
the y-axis, and the third about the z-axis. All rotations are with
respect to the initial fixed frame, and they occur in the order x,
then y, then z.
Returns
-------
R : numpy.matrix, shape(3,3)
Three dimensional rotation matrix about three different orthogonal axes.
Notes
-----
R = |c2 * c3 s1 * s2 * c3 - s3 * c1 c1 * s2 * c3 + s3 * s1|
|c2 * s3 s1 * s2 * s3 + c3 * c1 c1 * s2 * s3 - c3 * s1|
|-s2 s1 * c2 c1 * c2 |
where
s1, s2, s3 = sine of the first, second and third angles, respectively
c1, c2, c3 = cosine of the first, second and third angles, respectively
So the unit vector b1 in the B frame can be expressed in the A frame (unit
vectors a1, a2, a3) with:
b1 = c2 * c3 * a1 + c2 * s3 * a2 - s2 * a3
Thus a vector vb which is expressed in frame B can be expressed in A by
pre-multiplying by R:
va = R * vb
"""
cx = np.cos(angles[0])
sx = np.sin(angles[0])
cy = np.cos(angles[1])
sy = np.sin(angles[1])
cz = np.cos(angles[2])
sz = np.sin(angles[2])
Rz = np.mat([[ cz,-sz, 0],
[ sz, cz, 0],
[ 0, 0, 1]])
Ry = np.mat([[ cy, 0, sy],
[ 0, 1, 0],
[-sy, 0, cy]])
Rx = np.mat([[ 1, 0, 0],
[ 0, cx, -sx],
[ 0, sx, cx]])
return Rz * Ry * Rx | f62ac16e63591c4852681479ab9d39227bad3dfc | 3,650,475 |
import logging
import os
def parse_arguments():
"""Parse and return the command line argument dictionary object
"""
parser = ArgumentParser("CIFAR-10/100 Training")
_verbosity = "INFO"
parser.add_argument(
"-v",
"--verbosity",
type=str.upper,
choices=logging._nameToLevel.keys(),
default=_verbosity,
metavar="VERBOSITY",
help="output verbosity: {} (default: {})".format(
" | ".join(logging._nameToLevel.keys()), _verbosity
),
)
parser.add_argument("--manual-seed", type=int, help="manual seed integer")
_mode = "train"
parser.add_argument(
"-m",
"--mode",
type=str.lower,
default=_mode,
choices=["train", "evaluate", "profile"],
help=f"script execution mode (default: {_mode})",
)
_tw_file = "tw.log"
parser.add_argument(
"-t",
"--tensorwatch-log",
type=str,
default=_tw_file,
help=f"tensorwatch log filename (default: {_tw_file})",
)
parser.add_argument(
"--gpu-id", default="0", type=str, help="id(s) for CUDA_VISIBLE_DEVICES"
)
# Dataset Options
d_op = parser.add_argument_group("Dataset")
d_op.add_argument(
"-d",
"--dataset",
default="cifar10",
type=str.lower,
choices=("cifar10", "cifar100"),
)
avail_cpus = min(4, len(os.sched_getaffinity(0)))
d_op.add_argument(
"-w",
"--workers",
default=avail_cpus,
type=int,
metavar="N",
help=f"number of data-loader workers (default: {avail_cpus})",
)
# Architecture Options
a_op = parser.add_argument_group("Architectures")
_architecture = "alexnet"
a_op.add_argument(
"-a",
"--arch",
metavar="ARCH",
default=_architecture,
choices=MODEL_ARCHS.keys(),
help="model architecture: {} (default: {})".format(
" | ".join(MODEL_ARCHS.keys()), _architecture
),
)
_depth = 29
a_op.add_argument(
"--depth", type=int, default=_depth, help=f"Model depth (default: {_depth})"
)
_block_name = "basicblock"
_block_choices = ["basicblock", "bottleneck"]
a_op.add_argument(
"--block-name",
type=str.lower,
default=_block_name,
choices=_block_choices,
help=f"Resnet|Preresnet building block: (default: {_block_name}",
)
_cardinality = 8
a_op.add_argument(
"--cardinality",
type=int,
default=_cardinality,
help=f"Resnext cardinality (group) (default: {_cardinality})",
)
_widen_factor = 4
a_op.add_argument(
"--widen-factor",
type=int,
default=_widen_factor,
help=f"Resnext|WRT widen factor, 4 -> 64, 8 -> 128, ... (default: {_widen_factor})",
)
_growth_rate = 12
a_op.add_argument(
"--growth-rate",
type=int,
default=_growth_rate,
help=f"DenseNet growth rate (default: {_growth_rate}",
)
_compression_rate = 2
a_op.add_argument(
"--compressionRate",
type=int,
default=_compression_rate,
help=f"DenseNet compression rate (theta) (default: {_compression_rate}",
)
# Optimization Options
o_op = parser.add_argument_group("Optimizations")
_epochs = 300
o_op.add_argument(
"--epochs",
default=_epochs,
type=int,
metavar="N",
help=f"number of epochs to run (default: {_epochs})",
)
_epoch_start = 0
o_op.add_argument(
"--start-epoch",
default=_epoch_start,
type=int,
metavar="N",
help=f"epoch start number (default: {_epoch_start})",
)
_train_batch = 128
o_op.add_argument(
"--train-batch",
default=_train_batch,
type=int,
metavar="N",
help=f"train batchsize (default: {_train_batch})",
)
_test_batch = 100
o_op.add_argument(
"--test-batch",
default=_test_batch,
type=int,
metavar="N",
help=f"test batchsize (default: {_test_batch})",
)
_lr = 0.1
o_op.add_argument(
"--lr",
"--learning-rate",
default=_lr,
type=float,
metavar="LR",
help=f"initial learning rate (default: {_lr})",
)
_dropout = 0
o_op.add_argument(
"--drop",
"--dropout",
default=_dropout,
type=float,
metavar="Dropout",
help=f"Dropout ratio (default: {_dropout})",
)
_schedule = [150, 225]
o_op.add_argument(
"--schedule",
type=int,
nargs="+",
default=_schedule,
help=f"Decrease LR at these epochs (default: {_schedule})",
)
_gamma = 0.1
o_op.add_argument(
"--gamma",
type=float,
default=_gamma,
help=f"LR is multiplied by gamma on schedule (default: {_gamma})",
)
_momentum = 0.9
o_op.add_argument(
"--momentum",
default=_momentum,
type=float,
metavar="M",
help=f"momentum (default: {_momentum})",
)
_wd = 5e-4
o_op.add_argument(
"--weight-decay",
"--wd",
default=_wd,
type=float,
metavar="W",
help=f"weight decay (default: {_wd})",
)
# Checkpoint Options
c_op = parser.add_argument_group("Checkpoints")
_checkpoint = "checkpoint"
c_op.add_argument(
"-c",
"--checkpoint",
default=_checkpoint,
type=str,
metavar="PATH",
help=f"path to save checkpoint (default: {_checkpoint})",
)
return vars(parser.parse_args()) | 77264dfc1540b47835daf8bd5674484adc0a1d8e | 3,650,476 |
def get_tac_resource(url):
"""
Get the requested resource or update resource using Tacoma account
:returns: http response with content in xml
"""
response = None
response = TrumbaTac.getURL(url, {"Accept": "application/xml"})
_log_xml_resp("Tacoma", url, response)
return response | 4d3fce0c7c65a880bf565c79285bcda081d4ef5a | 3,650,477 |
def cosweightlat(darray, lat1, lat2):
"""Calculate the weighted average for an [:,lat] array over the region
lat1 to lat2
"""
# flip latitudes if they are decreasing
if (darray.lat[0] > darray.lat[darray.lat.size -1]):
print("flipping latitudes")
darray = darray.sortby('lat')
region = darray.sel(lat=slice(lat1, lat2))
weights=np.cos(np.deg2rad(region.lat))
regionw = region.weighted(weights)
regionm = regionw.mean("lat")
return regionm | 87a8722d4d0b7004007fbce966a5ce99a6e51983 | 3,650,478 |
def _GetSoftMaxResponse(goal_embedding, scene_spatial):
"""Max response of an embeddings across a spatial feature map.
The goal_embedding is multiplied across the spatial dimensions of the
scene_spatial to generate a heatmap. Then the spatial softmax-pooled value of
this heatmap is returned. If the goal_embedding and scene_spatial are aligned
to the same space, then _GetSoftMaxResponse returns larger values if the
object is present in the scene, and smaller values if the object is not.
Args:
goal_embedding: A batch x D tensor embedding of the goal image.
scene_spatial: A batch x H x W x D spatial feature map tensor.
Returns:
max_heat: A tensor of length batch.
max_soft: The max value of the softmax (ranges between 0 and 1.0)
"""
batch, dim = goal_embedding.shape
reshaped_query = tf.reshape(goal_embedding, (int(batch), 1, 1, int(dim)))
scene_heatmap = tf.reduce_sum(tf.multiply(scene_spatial,
reshaped_query), axis=3,
keep_dims=True)
scene_heatmap_flat = tf.reshape(scene_heatmap, (batch, -1))
max_heat = tf.reduce_max(scene_heatmap_flat, axis=1)
scene_softmax = tf.nn.softmax(scene_heatmap_flat, axis=1)
max_soft = tf.reduce_max(scene_softmax, axis=1)
return max_heat, max_soft | 547e61b403d99f2c0a4b5a0f78c03f7051a10d5c | 3,650,479 |
def summarize_star(star):
"""return one line summary of star"""
if star.find('name').text[-2] == ' ':
name = star.find('name').text[-1]
else:
name = ' '
mass = format_star_mass_str(star)
radius = format_star_radius_str(star)
temp = format_body_temp_str(star)
metallicity = format_star_metal_str(star)
return u'{} {} {:>8} {:>8} {:>8} {:>8} {:>8} {}'.format(name, format_spectral_name(star),
mass, radius, '', '', temp, metallicity) | f9860d742a646637e4b725e39151ed8f5e8adf0f | 3,650,480 |
def to_unicode(text, encoding='utf8', errors='strict'):
"""Convert a string (bytestring in `encoding` or unicode), to unicode."""
if isinstance(text, unicode):
return text
return unicode(text, encoding, errors=errors) | 1acb85930349832259e9309fed3669fbd1114cad | 3,650,481 |
def parse_pipfile():
"""Reads package requirements from Pipfile."""
cfg = ConfigParser()
cfg.read("Pipfile")
dev_packages = [p.strip('"') for p in cfg["dev-packages"]]
relevant_packages = [
p.strip('"') for p in cfg["packages"] if "nested-dataclasses" not in p
]
return relevant_packages, dev_packages | 72f559193b77989afc3aa200b6806ef051280673 | 3,650,482 |
from typing import Mapping
def print_dist(d, height=12, pch="o", show_number=False,
title=None):
""" Printing a figure of given distribution
Parameters
----------
d: dict, list
a dictionary or a list, contains pairs of: "key" -> "count_value"
height: int
number of maximum lines for the graph
pch : str
shape of the bars in the plot, e.g 'o'
Return
------
str
"""
LABEL_COLOR = ['cyan', 'yellow', 'blue', 'magenta', 'green']
MAXIMUM_YLABEL = 4
try:
if isinstance(d, Mapping):
d = d.items()
orig_d = [(str(name), int(count))
for name, count in d]
d = [(str(name)[::-1].replace('-', '|').replace('_', '|'), count)
for name, count in d]
labels = [[c for c in name] for name, count in d]
max_labels = max(len(name) for name, count in d)
max_count = max(count for name, count in d)
min_count = min(count for name, count in d)
except Exception as e:
raise ValueError('`d` must be distribution dictionary contains pair of: '
'label_name -> disitribution_count, error: "%s"' % str(e))
# ====== create figure ====== #
# draw height, 1 line for minimum bar, 1 line for padding the label,
# then the labels
nb_lines = int(height) + 1 + 1 + max_labels
unit = (max_count - min_count) / height
fig = ""
# ====== add unit and total ====== #
fig += ctext("Unit: ", 'red') + \
'10^%d' % max(len(str(max_count)) - MAXIMUM_YLABEL, 0) + ' '
fig += ctext("Total: ", 'red') + \
str(sum(count for name, count in d)) + '\n'
# ====== add the figure ====== #
for line in range(nb_lines):
value = max_count - unit * line
# draw the y_label
if line % 2 == 0 and line <= int(height): # value
fig += ctext(
('%' + str(MAXIMUM_YLABEL) + 's') % str(int(value))[:MAXIMUM_YLABEL],
color='red')
else: # blank
fig += ' ' * MAXIMUM_YLABEL
fig += '|' if line <= int(height) else ' '
# draw default line
if line == int(height):
fig += ''.join([ctext(pch + ' ',
color=LABEL_COLOR[i % len(LABEL_COLOR)])
for i in range(len(d))])
# draw seperator for the label
elif line == int(height) + 1:
fig += '-' * (len(d) * 2)
# draw the labels
elif line > int(height) + 1:
for i, lab in enumerate(labels):
fig += ctext(' ' if len(lab) == 0 else lab.pop(),
LABEL_COLOR[i % len(LABEL_COLOR)]) + ' '
# draw the histogram
else:
for i, (name, count) in enumerate(d):
fig += ctext(pch if count - value >= 0 else ' ',
LABEL_COLOR[i % len(LABEL_COLOR)]) + ' '
# new line
fig += '\n'
# ====== add actual number of necessary ====== #
maximum_fig_length = MAXIMUM_YLABEL + 1 + len(orig_d) * 2
if show_number:
line_length = 0
name_fmt = '%' + str(max_labels) + 's'
for name, count in orig_d:
n = len(name) + len(str(count)) + 4
text = ctext(name_fmt % name, 'red') + ': %d ' % count
if line_length + n >= maximum_fig_length:
fig += '\n'
line_length = n
else:
line_length += n
fig += text
# ====== add title ====== #
if title is not None:
title = ctext('"%s"' % str(title), 'red')
padding = ' '
n = (maximum_fig_length - len(title) // 2) // 2 - len(padding) * 3
fig = '=' * n + padding + title + padding + '=' * n + '\n' + fig
return fig[:-1] | d6636edbca5b16de8984c36bf9533ae963e21e0e | 3,650,483 |
import os
import errno
def Per_sequence_quality_scores (file_path,output_dire, module):
""" Read Per sequence quality scores contents from a fastqc file and parses to output file.
Returns a list of data used to plot quality graph.
Data and genereted graphs are saved to output directory as png image file and plain text.
Acceptes three Aguments :-
file_path = File name of fastqc
output_dire = Output Folder | directory name
module = Fastqc module name """
# opening fastqc data
fastqc_data = open(file_path, 'r')
# list to store Per Sequence Quality Scores
data = []
# creating start/stop switch to read data for specific module
read_line = 'stop'
for line in fastqc_data:
#line.strip("\n\r")
if line.startswith('>>Per sequence quality scores'):
read_line = 'start'
head = line
pass
elif line.startswith(">>END_MODULE"):
read_line = 'stop'
# once the netxt module is reached it break the loop and stop reading lines
elif line.startswith('>>Per base sequence content'):
break
elif read_line == 'start':
data.append(line)
# Here it's creating directory and the try and except will raise any error except if the file exist.
try:
os.makedirs(output_dire)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# creating file path for both image and text file output
if module == "ALL":
output_path_dire_txt = os.path.join(cwd, "PSQS", 'Per_sequence_quality_scores.txt')
output_path_dire_img = os.path.join(cwd, "PSQS", 'Per_sequence_quality_scores.png')
else:
output_path_dire_txt = os.path.join(cwd, output_dire, 'Per_sequence_quality_scores.txt')
output_path_dire_img = os.path.join(cwd, output_dire, 'Per_sequence_quality_scores.png')
data.insert(0,head)#insert header before writing
fw = open(output_path_dire_txt, 'w' )
fw.write("\n".join(data))
fw.close()
fastqc_data.close()
#load per sequence quality scores data to numpy dataframe
df = np.loadtxt(data, dtype='float', usecols = [0,1], skiprows=2)
df1 = pd.DataFrame(df, columns= ['Quality', 'Count'])
quality =df1.iloc[:,0]
count = df1.iloc[:,1]
count = count.astype('int64')
plt.figure(figsize=(20,10))
plt.plot(quality,count, c = 'b')
plt.xticks(np.arange(0, int(max(quality)+ 2)))
plt.grid(which='major', linestyle='--', linewidth='0.5', color='green')
plt.title('Quality score distribution over all sequences')
plt.xlabel("Mean Sequence Quality (Phred Score)")
ax=plt.gca()
ax.ticklabel_format(axis='y', style='plain', scilimits=(0,0), useOffset=None, useLocale=None, useMathText=None)
plt.savefig(output_path_dire_img, format='png', dpi=300)
plt.draw()
#plt.show()
plt.close()
return data | fa8debfaa7adcb8d827efba929e009a07ec19ed4 | 3,650,484 |
def count_primes(num):
"""
Write a function that returns the number
of prime numbers that exist up to and including a given number
:param num: int
:return: int
"""
count = 0
lower = int(input())
upper = int(input())
for num in range(lower, upper + 1):
if num > 1:
for i in range(2, num):
if (num % i) == 0:
break
else:
count += 1
return count | 7a544265f3a7eca9118b0647bc8926c655cdb8ec | 3,650,485 |
def run_experiment(config):
"""
Run the experiment.
Args:
config: The configuration dictionary.
Returns:
The experiment result.
"""
return None | b12a8a5cbdb03d60ca618826f20c9a731a39fd2a | 3,650,486 |
def read_notification(notification_id):
"""Marks a notification as read."""
notification = Notification.query.get_or_404(notification_id)
if notification.recipient_email != current_user.email:
abort(401)
notification.is_read = True
db.session.add(notification)
db.session.commit()
return NO_PAYLOAD | b2d4066be7b202d680415831fa6d3aa60e2896dc | 3,650,487 |
def grayscale_blur(image):
"""
Convert image to gray and blur it.
"""
image_gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
image_gray = cv.blur(image_gray, (3, 3))
return image_gray | 4e8bf0479c653a3ed073481ad71e2530527ec4a3 | 3,650,488 |
import hmac
def calculate_stream_hmac(stream, hmac_key):
"""Calculate a stream's HMAC code with the given key."""
stream.seek(0)
hash_hmac = hmac.new(bytearray(hmac_key, "utf-8"), digestmod=HASH_FUNCTION)
while True:
buf = stream.read(4096)
if not buf:
break
hash_hmac.update(buf)
return hash_hmac.hexdigest() | 35da77cc708b4dc8a256fbfcc012da8c68868c8c | 3,650,489 |
import os
def find(name, dir_path="." , p="r"):
"""
find(name: string, dirpath="." , p="r")
name: the name to find
dirpath: find in this directory
p = r recursive find sub dirs
"""
files = ls(dir_path, p)
ans = []
for fn in files:
head, tail = os.path.split(fn)
dis = normal_leven(tail, name)
ans.append((dis, fn))
ans.sort()
ans = ans[0:5]
if len(ans) > 0 and ans[0][0] == 0:
return [ans[0][1]]
return list(map(lambda x:x[1], ans)) | 5454a96cde6953f9d6112cdc3f4605dbf8ebf18c | 3,650,490 |
import re
def convert_tac(ThreeAddressCode):
"""Reads three adress code generated from parser and converts to TAC for codegen;
generates the three_addr_code along with leaders;
populates generate symbol table as per three_addr_code"""
for i in range(ThreeAddressCode.length()):
three_addr_instr = ThreeAddressCode.code[i]
three_addr_instr = [str(i+1)] + three_addr_instr
three_addr_code.add_line(three_addr_instr)
if len(three_addr_instr) != 5:
print("Incorrect size for the following instruction: ")
print(three_addr_instr)
return -1
if three_addr_instr[0] == '':
print("Line number not given in the following instruction: ")
print(three_addr_instr)
return -1
if re.search(r'\D', three_addr_instr[0]) != None:
print("Invalid line number given in the following instruction: ")
print(three_addr_instr)
return -1
leader_generating_if_instr = []
leader_generating_if_instr += ['ifgotoeq']
leader_generating_if_instr += ['ifgotoneq']
leader_generating_if_instr += ['ifgotolt']
leader_generating_if_instr += ['ifgotolteq']
leader_generating_if_instr += ['ifgotogt']
leader_generating_if_instr += ['ifgotogteq']
if three_addr_instr[1] in leader_generating_if_instr:
three_addr_code.add_leader(three_addr_code.length())
leader_generating_other_instr = ['label']
if three_addr_instr[1] in leader_generating_if_instr:
three_addr_code.add_leader(three_addr_code.length()-1)
leader_generating_other_instr = []
leader_generating_other_instr += ['goto']
leader_generating_other_instr += ['break']
leader_generating_other_instr += ['continue']
if three_addr_instr[1] in leader_generating_other_instr:
three_addr_code.add_leader(three_addr_code.length())
three_addr_code.leaders = sorted(three_addr_code.leaders, key=int)
return three_addr_code | 4a9408cfbd6b6f79a618b7eb89aa55e6aab25689 | 3,650,491 |
def is_english_score(bigrams, word):
"""Calculate the score of a word."""
prob = 1
for w1, w2 in zip("!" + word, word + "!"):
bigram = f"{w1}{w2}"
if bigram in bigrams:
prob *= bigrams[bigram] # / float(bigrams['total'] + 1)
else:
print("%s not found" % bigram)
prob *= 1 # / float(bigrams['total'] + 1)
return prob | 834e28a32806d0599f5df97d978bc6b9c1a51da7 | 3,650,492 |
def cat(fname, fallback=_DEFAULT, binary=True):
"""Return file content.
fallback: the value returned in case the file does not exist or
cannot be read
binary: whether to open the file in binary or text mode.
"""
try:
with open_binary(fname) if binary else open_text(fname) as f:
return f.read().strip()
except IOError:
if fallback != _DEFAULT:
return fallback
raise | b3f645d79607f1ed986fe76aa20689d0860ef9ca | 3,650,493 |
import os
import json
def _get_from_url(url):
"""
Note: url is in format like this(following OQMD RESTful API) and the result format should be set to json:
http://oqmd.org/oqmdapi/formationenergy?fields=name,entry_id,delta_e&filter=stability=0&format=json
Namely url should be in the form supported by OQMD RESTful API
"""
os.system("mkdir -p /tmp/pymatflow/third")
#os.system("wget \"%s\" -O /tmp/pymatflow/third/oqmd_restful_api_results.json" % (url))
#os.system("curl \"%s\" -Lo /tmp/pymatflow/third/oqmd_restful_api_results.json" % (url))
# silent output of curl
os.system("curl \"%s\" -s -Lo /tmp/pymatflow/third/oqmd_restful_api_results.json" % (url))
with open("/tmp/pymatflow/third/oqmd_restful_api_results.json", 'r') as fin:
out = json.loads(fin.read())
return out | 3860ca4b73eb6e4842ff7afd48485dd12d6d3e45 | 3,650,494 |
import codecs
def createStringObject(string):
"""
Given a string (either a ``str`` or ``unicode``), create a
:class:`ByteStringObject<ByteStringObject>` or a
:class:`TextStringObject<TextStringObject>` to represent the string.
"""
if isinstance(string, string_type):
return TextStringObject(string)
elif isinstance(string, bytes_type):
try:
if string.startswith(codecs.BOM_UTF16_BE):
retval = TextStringObject(string.decode("utf-16"))
retval.autodetect_utf16 = True
return retval
else:
# This is probably a big performance hit here, but we need to
# convert string objects into the text/unicode-aware version if
# possible... and the only way to check if that's possible is
# to try. Some strings are strings, some are just byte arrays.
retval = TextStringObject(decodePdfDocEncoding(string))
retval.autodetect_pdfdocencoding = True
return retval
except UnicodeDecodeError:
return ByteStringObject(string)
else:
raise TypeError("createStringObject() should have str or unicode arg") | 07c0ca42faa2b68dc347e1edad7f70a07930d891 | 3,650,495 |
import win32com.client
def _get_windows_network_adapters():
"""Get the list of windows network adapters."""
wbem_locator = win32com.client.Dispatch('WbemScripting.SWbemLocator')
wbem_service = wbem_locator.ConnectServer('.', 'root\cimv2')
wbem_network_adapters = wbem_service.InstancesOf('Win32_NetworkAdapter')
network_adapters = []
for adapter in wbem_network_adapters:
if (adapter.NetConnectionStatus == 2 or
adapter.NetConnectionStatus == 7):
adapter_name = adapter.NetConnectionID
mac_address = adapter.MacAddress.lower()
config = adapter.associators_(
'Win32_NetworkAdapterSetting',
'Win32_NetworkAdapterConfiguration')[0]
ip_address = ''
subnet_mask = ''
if config.IPEnabled:
ip_address = config.IPAddress[0]
subnet_mask = config.IPSubnet[0]
#config.DefaultIPGateway[0]
network_adapters.append({'name': adapter_name,
'mac-address': mac_address,
'ip-address': ip_address,
'subnet-mask': subnet_mask})
return network_adapters | 796c25089411633d11b28fdd9c23d900db7005f0 | 3,650,496 |
def project(dim, states):
"""Qiskit wrapper of projection operator.
"""
ket, bra = states
if ket in range(dim) and bra in range(dim):
return st.basis(dim, ket) * st.basis(dim, bra).dag()
else:
raise Exception('States are specified on the outside of Hilbert space %s' % states) | 351a190ec183264af58de15944efb3af255c5b03 | 3,650,497 |
def check_service_status(ssh_conn_obj, service_name, status="running", device='server'):
"""
Author: Chaitanya Vella ([email protected])
Function to check the service status
:param ssh_conn_obj:
:param service_name:
:param status:
:return:
"""
st.log("##### Checking {} status for {} service ######".format(status, service_name))
command = "status {}".format(service_name)
result = conn_obj.execute_command(ssh_conn_obj, command) if device == 'server' else st.config(ssh_conn_obj, command)
result = utils_obj.remove_last_line_from_string(result)
if "command not found" not in result:
match = "start/running" if status == "running" else "stop/waiting"
if result.find("{}".format(match)) > 1:
return True
else:
command = "service --status-all | grep {}".format(service_name)
result = conn_obj.execute_command(ssh_conn_obj, command)
result = utils_obj.remove_last_line_from_string(result)
operator = "+" if status == "running" else "-"
return True if operator in result and service_name in result else False
return False | d8f2a9be7a784ad874d218601fdc043babdafe6e | 3,650,498 |
def _persist_block(block_node, block_map):
"""produce persistent binary data for a single block
Children block are assumed to be already persisted and present in
block_map.
"""
data = tuple(_to_value(v, block_map) for v in block_node)
return S_BLOCK.pack(*data) | 2fb97099135fe931d1d387ed616b152ed7c28b34 | 3,650,499 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.