filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_10549 | #!/usr/bin/env python
# Copyright (c) 2012-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Extract _("...") strings for translation and convert to Qt stringdefs so that
they can be picked up by Qt linguist.
'''
from __future__ import division,print_function,unicode_literals
from subprocess import Popen, PIPE
import operator
import os
import sys
OUT_CPP="qt/zalgocoinstrings.cpp"
EMPTY=['""']
def parse_po(text):
"""
Parse 'po' format produced by xgettext.
Return a list of (msgid,msgstr) tuples.
"""
messages = []
msgid = []
msgstr = []
in_msgid = False
in_msgstr = False
for line in text.split('\n'):
line = line.rstrip('\r')
if line.startswith('msgid '):
if in_msgstr:
messages.append((msgid, msgstr))
in_msgstr = False
# message start
in_msgid = True
msgid = [line[6:]]
elif line.startswith('msgstr '):
in_msgid = False
in_msgstr = True
msgstr = [line[7:]]
elif line.startswith('"'):
if in_msgid:
msgid.append(line)
if in_msgstr:
msgstr.append(line)
if in_msgstr:
messages.append((msgid, msgstr))
return messages
files = sys.argv[1:]
# xgettext -n --keyword=_ $FILES
XGETTEXT=os.getenv('XGETTEXT', 'xgettext')
if not XGETTEXT:
print('Cannot extract strings: xgettext utility is not installed or not configured.',file=sys.stderr)
print('Please install package "gettext" and re-run \'./configure\'.',file=sys.stderr)
exit(1)
child = Popen([XGETTEXT,'--output=-','-n','--keyword=_'] + files, stdout=PIPE)
(out, err) = child.communicate()
messages = parse_po(out.decode('utf-8'))
f = open(OUT_CPP, 'w')
f.write("""
#include <QtGlobal>
// Automatically generated by extract_strings_qt.py
#ifdef __GNUC__
#define UNUSED __attribute__((unused))
#else
#define UNUSED
#endif
""")
f.write('static const char UNUSED *zalgocoin_strings[] = {\n')
f.write('QT_TRANSLATE_NOOP("zalgocoin-core", "%s"),\n' % (os.getenv('PACKAGE_NAME'),))
f.write('QT_TRANSLATE_NOOP("zalgocoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS'),))
if os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION') != os.getenv('PACKAGE_NAME'):
f.write('QT_TRANSLATE_NOOP("zalgocoin-core", "%s"),\n' % (os.getenv('COPYRIGHT_HOLDERS_SUBSTITUTION'),))
messages.sort(key=operator.itemgetter(0))
for (msgid, msgstr) in messages:
if msgid != EMPTY:
f.write('QT_TRANSLATE_NOOP("zalgocoin-core", %s),\n' % ('\n'.join(msgid)))
f.write('};\n')
f.close()
|
the-stack_0_10550 | """
Syntax difference of two functions - using diff utility and filtering the
result.
"""
from subprocess import check_output, CalledProcessError
from tempfile import mkdtemp
import os
def syntax_diff(first_file, second_file, name, kind, first_line, second_line):
"""Get diff of a C function or type between first_file and second_file"""
tmpdir = mkdtemp()
command = ["diff", "-C", "1", os.path.join(tmpdir, "1"),
os.path.join(tmpdir, "2")]
if kind == "function":
terminator_list = ["}", ");"]
elif kind == "type":
terminator_list = ["};"]
# Use the provided arguments "first_line" and "second_line" that contain
# the lines on which the function starts in each file to extract both
# functions into temporary files
for filename in [first_file, second_file]:
tmp_file = "1" if filename == first_file else "2"
with open(filename, "r", encoding='utf-8') as input_file, \
open(os.path.join(tmpdir, tmp_file), "w",
encoding='utf-8') as output_file:
lines = input_file.readlines()
start = first_line if filename == first_file else second_line
# The end of the function is detected as a line that contains
# nothing but an ending curly bracket
line_index = start - 1
line = lines[line_index]
while line.rstrip() not in terminator_list:
line_index += 1
if line_index == len(lines):
return "Error: cannot get diff\n"
output_file.write(line)
line = lines[line_index]
output_file.write(line)
# check_output fails when the two files are different due to the error code
# (1), which in fact signalizes success; the exception has to be caught and
# the error code evaluated manually
try:
diff = check_output(command).decode('utf-8')
except CalledProcessError as e:
if e.returncode == 1:
diff = e.output.decode('utf-8')
else:
raise
if diff.isspace() or diff == "":
# Empty diff
return diff
# Split off filename names and fix line numbers
diff_lines = diff.split('\n')[2:]
diff_lines_new = []
for line in diff_lines:
def fix_line(x):
offset = first_line if polarity == "*" else second_line
return str(int(x) + offset - 1)
# Add function header
if set(list(line)) == set(["*"]):
with open(os.path.join(tmpdir, "1"), "r") as extract:
line += " " + extract.readline().strip()
# Check whether the line is a line number line
number_line_set = set([" ", "*", "-", ","] +
list(map(str, list(range(0, 10)))))
if ((not set(list(line)).issubset(number_line_set)) or
(not any(char.isdigit() for char in line)) or
line.isspace() or line == ""):
diff_lines_new += [line]
continue
polarity = "*" if line.count("*") > 1 else "-"
line = line.replace("*", "").replace("-", "").replace(" ", "")
line = ",".join(map(fix_line, line.split(",")))
line = polarity * 3 + " " + line + " " + polarity * 3
diff_lines_new += [line]
diff = "\n".join(diff_lines_new)
return diff
|
the-stack_0_10551 | import copy
import datetime
import errno
import hashlib
import os
import time
from collections import defaultdict, deque, OrderedDict
import torch
import torch.distributed as dist
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
t = reduce_across_processes([self.count, self.total])
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i, len(iterable), eta=eta_string, meters=str(self), time=str(iter_time), data=str(data_time)
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("{} Total time: {}".format(header, total_time_str))
class ExponentialMovingAverage(torch.optim.swa_utils.AveragedModel):
"""Maintains moving averages of model parameters using an exponential decay.
``ema_avg = decay * avg_model_param + (1 - decay) * model_param``
`torch.optim.swa_utils.AveragedModel <https://pytorch.org/docs/stable/optim.html#custom-averaging-strategies>`_
is used to compute the EMA.
"""
def __init__(self, model, decay, device="cpu"):
def ema_avg(avg_model_param, model_param, num_averaged):
return decay * avg_model_param + (1 - decay) * model_param
super().__init__(model, device, ema_avg)
def update_parameters(self, model):
for p_swa, p_model in zip(self.module.state_dict().values(), model.state_dict().values()):
device = p_swa.device
p_model_ = p_model.detach().to(device)
if self.n_averaged == 0:
p_swa.detach().copy_(p_model_)
else:
p_swa.detach().copy_(self.avg_fn(p_swa.detach(), p_model_, self.n_averaged.to(device)))
self.n_averaged += 1
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.inference_mode():
maxk = max(topk)
batch_size = target.size(0)
if target.ndim == 2:
target = target.max(dim=1)[1]
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop("force", False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ["WORLD_SIZE"])
args.gpu = int(os.environ["LOCAL_RANK"])
elif "SLURM_PROCID" in os.environ:
args.rank = int(os.environ["SLURM_PROCID"])
args.gpu = args.rank % torch.cuda.device_count()
elif hasattr(args, "rank"):
pass
else:
print("Not using distributed mode")
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = "nccl"
print("| distributed init (rank {}): {}".format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(
backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size, rank=args.rank
)
setup_for_distributed(args.rank == 0)
def average_checkpoints(inputs):
"""Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from:
https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16
Args:
inputs (List[str]): An iterable of string paths of checkpoints to load from.
Returns:
A dict of string keys mapping to various values. The 'model' key
from the returned dict should correspond to an OrderedDict mapping
string parameter names to torch Tensors.
"""
params_dict = OrderedDict()
params_keys = None
new_state = None
num_models = len(inputs)
for fpath in inputs:
with open(fpath, "rb") as f:
state = torch.load(
f,
map_location=(lambda s, _: torch.serialization.default_restore_location(s, "cpu")),
)
# Copies over the settings from the first checkpoint
if new_state is None:
new_state = state
model_params = state["model"]
model_params_keys = list(model_params.keys())
if params_keys is None:
params_keys = model_params_keys
elif params_keys != model_params_keys:
raise KeyError(
"For checkpoint {}, expected list of params: {}, "
"but found: {}".format(f, params_keys, model_params_keys)
)
for k in params_keys:
p = model_params[k]
if isinstance(p, torch.HalfTensor):
p = p.float()
if k not in params_dict:
params_dict[k] = p.clone()
# NOTE: clone() is needed in case of p is a shared parameter
else:
params_dict[k] += p
averaged_params = OrderedDict()
for k, v in params_dict.items():
averaged_params[k] = v
if averaged_params[k].is_floating_point():
averaged_params[k].div_(num_models)
else:
averaged_params[k] //= num_models
new_state["model"] = averaged_params
return new_state
def store_model_weights(model, checkpoint_path, checkpoint_key="model", strict=True):
"""
This method can be used to prepare weights files for new models. It receives as
input a model architecture and a checkpoint from the training script and produces
a file with the weights ready for release.
Examples:
from torchvision import models as M
# Classification
model = M.mobilenet_v3_large(pretrained=False)
print(store_model_weights(model, './class.pth'))
# Quantized Classification
model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False)
model.fuse_model()
model.qconfig = torch.quantization.get_default_qat_qconfig('qnnpack')
_ = torch.quantization.prepare_qat(model, inplace=True)
print(store_model_weights(model, './qat.pth'))
# Object Detection
model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False)
print(store_model_weights(model, './obj.pth'))
# Segmentation
model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True)
print(store_model_weights(model, './segm.pth', strict=False))
Args:
model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes.
checkpoint_path (str): The path of the checkpoint we will load.
checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored.
Default: "model".
strict (bool): whether to strictly enforce that the keys
in :attr:`state_dict` match the keys returned by this module's
:meth:`~torch.nn.Module.state_dict` function. Default: ``True``
Returns:
output_path (str): The location where the weights are saved.
"""
# Store the new model next to the checkpoint_path
checkpoint_path = os.path.abspath(checkpoint_path)
output_dir = os.path.dirname(checkpoint_path)
# Deep copy to avoid side-effects on the model object.
model = copy.deepcopy(model)
checkpoint = torch.load(checkpoint_path, map_location="cpu")
# Load the weights to the model to validate that everything works
# and remove unnecessary weights (such as auxiliaries, etc)
if checkpoint_key == "model_ema":
del checkpoint[checkpoint_key]["n_averaged"]
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(checkpoint[checkpoint_key], "module.")
model.load_state_dict(checkpoint[checkpoint_key], strict=strict)
tmp_path = os.path.join(output_dir, str(model.__hash__()))
torch.save(model.state_dict(), tmp_path)
sha256_hash = hashlib.sha256()
with open(tmp_path, "rb") as f:
# Read and update hash string value in blocks of 4K
for byte_block in iter(lambda: f.read(4096), b""):
sha256_hash.update(byte_block)
hh = sha256_hash.hexdigest()
output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth")
os.replace(tmp_path, output_path)
return output_path
def reduce_across_processes(val):
if not is_dist_avail_and_initialized():
return val
t = torch.tensor(val, device="cuda")
dist.barrier()
dist.all_reduce(t)
return t
|
the-stack_0_10554 | """Plotting and visualization tools."""
# stdlib
import logging
from datetime import datetime
from pathlib import Path
# external
import plotly.express as px
import plotly.graph_objects as go
LOG = logging.getLogger(__name__)
output_path = Path("output/images")
def line(x, y):
"""Plots line plot. Supports up to 2D data.
Args: x (array_like): 1st dimension of data. y (array_like): 2nd dimension
of data. Returns: plotly.figure: figure object.
"""
fig = px.line(x=x, y=y, markers=True)
fig.show()
return fig
def surface(x, y, z):
"""Plots surface data. Supports up to 3D data.
Args: x (array_like[float]): x-coordinate data. y (array_like[float]):
y-coordinate data. z (array_like[float]): height data. Returns:
plotly.figure: figure object.
"""
fig = go.Figure(data=[go.Surface(z=z, x=x, y=y)])
fig.update_traces(
contours_z=dict(
show=True, usecolormap=True, highlightcolor="limegreen", project_z=True
)
)
fig.show()
return fig
def scatter(x, y, z=None, w=None):
"""Plots scatter plot. Supports up to 4D data.
Args: x (array_like): 1st dimension of data. y (array_like): 2nd dimension
of data. color (array_like, optional): 3rd dimension of data. Defaults to None.
size (array_like, optional): 4th dimension of data. Defaults to None. Returns:
plotly.figure: figure object.
"""
fig = px.scatter(x=x, y=y, color=z, size=w)
fig.show()
return fig
def scatter3(x, y, z, w=None, v=None):
"""Plots 3D scatter plot. Supports up to 5D data.
Args:
x (array_like): 1st dimension of data.
y (array_like): 2nd dimension of data.
z (array-like): 3rd dimension of data.
w (array_like, optional): 4th dimension of data. Defaults to None.
Returns:
plotly.figure: figure object.
"""
fig = px.scatter_3d(x=x, y=y, z=z, color=w, size=v, size_max=18, opacity=0.9)
fig.show()
return fig
def save(fig, filename): # Unable to find installation candidates for kaleido
"""Saves figure to output path.
Args: fig (plotly.figure): figure object. filename (str): filename.
"""
timestamp = datetime.now().strftime("%Y-%m-%d_%H:%M:%S.%f")
file_path = output_path / f"{filename}_{timestamp}.png"
fig.write_image(file_path)
LOG.info(f"Saved figure to {file_path}")
|
the-stack_0_10555 | import pprint
from time import strftime, gmtime
import pandas as pd
import boto3
import sagemaker
from sagemaker import get_execution_role
from sagemaker.model import Model
from sagemaker.xgboost.model import XGBoostModel
from sagemaker.model_monitor import DataCaptureConfig, DatasetFormat, DefaultModelMonitor
sess = boto3.Session()
sm = sess.client('sagemaker')
role = sagemaker.get_execution_role()
#Supress default INFO loggingd
import logging
logger = logging.getLogger()
logger.setLevel(logging.CRITICAL)
def get_endpoint_from_lab4():
print("Getting solution from Lab 4...")
print("Please wait ~10 minutes for the endpoint to be deployed.")
# Set the paths for the datasets saved locally
path_to_lab2 = "/root/sagemaker-end-to-end-workshop/2-Modeling/"
path_to_lab5 = "/root/sagemaker-end-to-end-workshop/5-Monitoring/"
local_train_path = path_to_lab2 + 'config/train.csv'
train_df = pd.read_csv(local_train_path, header=None)
local_validation_path = path_to_lab2 + 'config/validation.csv'
validation_df = pd.read_csv(local_validation_path, header=None)
model_artifact_path = path_to_lab5 + 'config/model.tar.gz'
inference_code_path = path_to_lab5 + 'config/inference.py'
region = sess.region_name
account_id = sess.client('sts', region_name=region).get_caller_identity()["Account"]
bucket = 'sagemaker-studio-{}-{}'.format(sess.region_name, account_id)
prefix = 'xgboost-churn'
train_dir = f"{prefix}/train"
val_dir = f"{prefix}/validation"
model_dir = f"{prefix}/model"
try:
if sess.region_name == "us-east-1":
sess.client('s3').create_bucket(Bucket=bucket)
else:
sess.client('s3').create_bucket(Bucket=bucket,
CreateBucketConfiguration={'LocationConstraint': sess.region_name})
except Exception as e:
print("Looks like you already have a bucket of this name. That's good. Uploading the data files...")
# Return the URLs of the uploaded file, so they can be reviewed or used elsewhere
print("Uploading data and model files to S3")
s3url_train = sagemaker.s3.S3Uploader.upload(local_train_path, 's3://{}/{}'.format(bucket, train_dir))
s3url_validation = sagemaker.s3.S3Uploader.upload(local_validation_path, 's3://{}/{}'.format(bucket, val_dir))
s3url_model_artifact = sagemaker.s3.S3Uploader.upload(model_artifact_path, 's3://{}/{}'.format(bucket, model_dir))
boto_sess = boto3.Session()
region = boto_sess.region_name
role = sagemaker.get_execution_role()
sm_sess = sagemaker.session.Session()
region = sess.region_name
framework_version = '1.2-2'
docker_image_name = sagemaker.image_uris.retrieve(framework='xgboost', region=region, version=framework_version)
xgb_inference_model = Model(
model_data=s3url_model_artifact,
role=role,
image_uri=docker_image_name,
)
data_capture_prefix = '{}/datacapture'.format(prefix)
endpoint_name = "model-xgboost-customer-churn-" + strftime("%Y-%m-%d-%H-%M-%S", gmtime())
print(f"Deploying Endpoint with name = {endpoint_name}...")
predictor = xgb_inference_model.deploy( initial_instance_count=1,
instance_type='ml.m4.xlarge',
endpoint_name=endpoint_name,
data_capture_config=DataCaptureConfig(
enable_capture=True,
sampling_percentage=100,
destination_s3_uri='s3://{}/{}'.format(bucket, data_capture_prefix),
csv_content_types=['text/csv']
)
)
return endpoint_name, predictor
|
the-stack_0_10558 | #
# Pyserini: Reproducible IR research with sparse and dense representations
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import hashlib
import os
import re
import shutil
import unittest
import json
import gzip
from random import randint
from pyserini.util import download_url, download_prebuilt_index
class TestSearchIntegration(unittest.TestCase):
def setUp(self):
curdir = os.getcwd()
if curdir.endswith('clprf'):
self.pyserini_root = '../..'
else:
self.pyserini_root = '.'
self.tmp = f'{self.pyserini_root}/integrations/tmp{randint(0, 10000)}'
# In the rare event there's a collision
if os.path.exists(self.tmp):
shutil.rmtree(self.tmp)
os.mkdir(self.tmp)
os.mkdir(f'{self.tmp}/runs')
self.round5_runs = {
'https://ir.nist.gov/covidSubmit/archive/round5/covidex.r5.d2q.1s.gz':
'2181ae5b7fe8bafbd3b41700f3ccde02',
'https://ir.nist.gov/covidSubmit/archive/round5/covidex.r5.d2q.2s.gz':
'e61f9b6de5ffbe1b5b82d35216968154',
'https://ir.nist.gov/covidSubmit/archive/round5/covidex.r5.2s.gz':
'6e517a5e044d8b7ce983f7e165cf4aeb',
'https://ir.nist.gov/covidSubmit/archive/round5/covidex.r5.1s.gz':
'dc9b4b45494294a8448cf0693f07f7fd'
}
for url in self.round5_runs:
print(f'Verifying stored run at {url}...')
filename = url.split('/')[-1]
filename = re.sub('\\?dl=1$', '', filename) # Remove the Dropbox 'force download' parameter
gzip_filename = (".").join(filename.split('.')[:-1])
download_url(url, f'{self.tmp}/runs/', md5=self.round5_runs[url], force=True)
self.assertTrue(os.path.exists(os.path.join(f'{self.tmp}/runs/', filename)))
with gzip.open(f'{self.tmp}/runs/{filename}', 'rb') as f_in:
with open(f'{self.tmp}/runs/{gzip_filename}', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def test_round5(self):
tmp_folder_name = self.tmp.split('/')[-1]
prebuilt_index_path = download_prebuilt_index('trec-covid-r5-abstract')
os.system(f'python {self.pyserini_root}/scripts/classifier_prf/rank_trec_covid.py \
-alpha 0.6 \
-clf lr \
-vectorizer tfidf \
-new_qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round5.txt \
-base {self.tmp}/runs/covidex.r5.d2q.1s \
-tmp_base {tmp_folder_name} \
-qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round4-cumulative.txt \
-index {prebuilt_index_path} \
-tag covidex.r5.d2q.1s \
-output {self.tmp}/output.json')
with open(f'{self.tmp}/output.json') as json_file:
data = json.load(json_file)
self.assertEqual("0.3859", data['map'])
self.assertEqual("0.8221", data['ndcg'])
os.system(f'python {self.pyserini_root}/scripts/classifier_prf/rank_trec_covid.py \
-alpha 0.6 \
-clf lr \
-vectorizer tfidf \
-new_qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round5.txt \
-base {self.tmp}/runs/covidex.r5.d2q.2s \
-tmp_base {tmp_folder_name} \
-qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round4-cumulative.txt \
-index {prebuilt_index_path} \
-tag covidex.r5.d2q.2s \
-output {self.tmp}/output.json')
with open(f'{self.tmp}/output.json') as json_file:
data = json.load(json_file)
self.assertEqual("0.3875", data['map'])
self.assertEqual("0.8304", data['ndcg'])
os.system(f'python {self.pyserini_root}/scripts/classifier_prf/rank_trec_covid.py \
-alpha 0.6 \
-clf lr \
-vectorizer tfidf \
-new_qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round5.txt \
-base {self.tmp}/runs/covidex.r5.1s \
-tmp_base {tmp_folder_name} \
-qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round4-cumulative.txt \
-index {prebuilt_index_path} \
-tag covidex.r5.1s \
-output {self.tmp}/output.json')
with open(f'{self.tmp}/output.json') as json_file:
data = json.load(json_file)
self.assertEqual("0.3885", data['map'])
self.assertEqual("0.8135", data['ndcg'])
os.system(f'python {self.pyserini_root}/scripts/classifier_prf/rank_trec_covid.py \
-alpha 0.6 \
-clf lr \
-vectorizer tfidf \
-new_qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round5.txt \
-base {self.tmp}/runs/covidex.r5.2s \
-tmp_base {tmp_folder_name} \
-qrels {self.pyserini_root}/tools/topics-and-qrels/qrels.covid-round4-cumulative.txt \
-index {prebuilt_index_path} \
-tag covidex.r5.2s \
-output {self.tmp}/output.json')
with open(f'{self.tmp}/output.json') as json_file:
data = json.load(json_file)
self.assertEqual("0.3922", data['map'])
self.assertEqual("0.8311", data['ndcg'])
def tearDown(self):
shutil.rmtree(self.tmp)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_10559 | import math
import json
import random
class Vector:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __str__(self):
return "(%d,%d)" % (self.x, self.y)
def add(self,p):
return Vector(self.x+p.x,self.y+p.y)
def subtract(self,p):
return Vector(self.x-p.x,self.y-p.y)
def scale(self,a):
return Vector(a*self.x,a*self.y)
def dot(self,p):
return self.x*p.x+self.y*p.y
def cross(self,p):
return self.x*p.y - self.y*p.x
def mod2(self):
return self.x*self.x + self.y*self.y
def mod(self):
return math.sqrt(self.mod2())
def normalised(self):
return Vector(self.x,self.y).scale(1/self.mod())
class Planet:
def __init__(self, p, v, nid, r=1):
self.position = p
self.velocity = v
self.radius = r
self.mass = r**2
self.elasticity = 1
self.nid=nid
self.num_boundaries = 0
def intersects(self,p):
return self.position.subtract(p.position).mod2() < math.pow(self.radius + p.radius,2)
def accelerate(self,force,clock_tick):
self.velocity = self.velocity.add(force.scale(clock_tick/self.mass))
#adds to velocity vector
def attract(self,otherp,clock_tick):
between = self.position.subtract(otherp.position)
dist = between.mod()
force = 150*(self.mass * otherp.mass / dist ** 2)
self.accelerate(between.scale(-force),clock_tick)
otherp.accelerate(between.scale(force),clock_tick)
#Scale force to clock tick?
def move(self,clock_tick=1):
self.position = self.position.add(self.velocity.scale(clock_tick))
def update_mass(self, k = 1):
om = self.mass
self.mass = self.radius**2
self.velocity = self.velocity.scale(k*om/self.mass+1-k)
def get_rebound_vectors(p1, p2):
n = p2.position.subtract(p1.position).normalised()
u1 = p1.velocity.dot(n)
u2 = p2.velocity.dot(n)
v1 = ((p1.mass-p2.mass)*u1 + 2*p2.mass*u2) / (p1.mass+p2.mass)
v2 = ((p2.mass-p1.mass)*u2 + 2*p1.mass*u1) / (p1.mass+p2.mass)
return (p1.velocity.subtract(n.scale(u1-v1)), p2.velocity.subtract(n.scale(u2-v2)))
class Universe:
def __init__(self, R, r, extras=False):
if extras:
p1 = Planet(Vector(30,150), Vector(2,5),1, 30)
p2 = Planet(Vector(100,50), Vector(2,-5), 500, 30)
p3 = Planet(Vector(300,50), Vector(0,10), 20, 30)
self.planets = [p1,p2,p3]
else:
self.planets = []
self.map_radius = R
self.elasticity = 0.95
self.clock_tick = r
def get_json(self):
data = []
for i,p in enumerate(self.planets):
data.append({'x': p.position.x, 'y': p.position.y, 'r': p.radius,'id':p.nid, 's':p.num_boundaries})
return json.dumps(data)
def shrink(self,planet_id):
#Selection by list comprehensilson? lol
selected_planet = [x for x in self.planets if x.nid==planet_id][0]
if (selected_planet.radius - 10 < 20): return
selected_planet.radius -= 10
selected_planet.update_mass(0.6);
def grow(self,planet_id):
selected_planet = [x for x in self.planets if x.nid==planet_id][0]
if (selected_planet.radius + 10 > 80): return
selected_planet.radius += 10
selected_planet.update_mass()
player_slots = [-1]
def add_planet(self, competition_mode=True):
new_id = random.randint(0,100000)
ids = [x.nid for x in self.planets]
##To make sure Ids are unique lol
while new_id in ids:
new_id = random.randint(0,100000)
p = Vector(self.map_radius, self.map_radius)
if competition_mode:
if all(i >= 0 for i in self.player_slots):
self.player_slots = [j for i in self.player_slots for j in [i,-1]]
for i,v in enumerate(self.player_slots):
if v < 0:
self.player_slots[i] = new_id
t = math.pi*(-0.75 - 2*i/len(self.player_slots))
p = p.add(Vector(math.cos(t), math.sin(t)).scale(self.map_radius*0.6))
break
ivel = 50
newp = Planet(p,Vector(2*ivel*(random.random()-0.5),2*ivel*(random.random()-0.5)),new_id,20)
self.planets.append(newp)
return new_id
def remove_planet(self,planet_id):
ids = [x.nid for x in self.planets]
self.player_slots = [-1 if i == planet_id else i for i in self.player_slots]
if all(i < 0 for i in self.player_slots): self.player_slots = [-1]
if planet_id in ids:
remove_index = ids.index(planet_id)
del self.planets[remove_index]
def run_loop(self):
for i in range(len(self.planets)):
pos = self.planets[i].position
rad = self.planets[i].radius
# circular boundary
cv = pos.subtract(Vector(self.map_radius, self.map_radius))
if cv.mod() + rad > self.map_radius:
self.planets[i].num_boundaries += 1
n = cv.normalised()
v = self.planets[i].velocity
u = n.scale(v.dot(n))
self.planets[i].velocity = v.subtract(n.scale((1+self.elasticity)*v.dot(n)))
self.planets[i].position = pos.subtract(n.scale(cv.mod() + rad - self.map_radius))
# bouncing
for j in range(i):
p1 = self.planets[i]
p2 = self.planets[j]
if p1.intersects(p2):
vi, vj = get_rebound_vectors(p1,p2)
p1.velocity = vi
p2.velocity = vj
##Fix for intersecting
o = p1.radius + p2.radius - p1.position.subtract(p2.position).mod()
n = p2.position.subtract(p1.position).normalised()
p2.position = p2.position.add(n.scale(o/2))
p1.position = p1.position.add(n.scale(-o/2))
p1.attract(p2,self.clock_tick)
for p in self.planets:
p.move(self.clock_tick)
#self.planets[i].position = pos.add(self.planets[i].velocity.scale(self.clock_tick))
|
the-stack_0_10560 | # coding: utf-8
import sys
from python_environment_check import check_packages
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
import numpy as np
from mlxtend.plotting import heatmap
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import RANSACRegressor
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import r2_score
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import PolynomialFeatures
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# # Machine Learning with PyTorch and Scikit-Learn
# # -- Code Examples
# ## Package version checks
# Add folder to path in order to load from the check_packages.py script:
sys.path.insert(0, '..')
# Check recommended package versions:
d = {
'numpy': '1.21.2',
'mlxtend': '0.19.0',
'matplotlib': '3.4.3',
'sklearn': '1.0',
'pandas': '1.3.2',
}
check_packages(d)
# # Chapter 09 - Predicting Continuous Target Variables with Regression Analysis
# ### Overview
# - [Introducing regression](#Introducing-linear-regression)
# - [Simple linear regression](#Simple-linear-regression)
# - [Exploring the Ames Housing Dataset](#Exploring-the-Ames-Housing-Dataset)
# - [Loading the Ames Housing dataset into a data frame](Loading-the-Ames-Housing-dataset-into-a-data-frame)
# - [Visualizing the important characteristics of a dataset](#Visualizing-the-important-characteristics-of-a-dataset)
# - [Implementing an ordinary least squares linear regression model](#Implementing-an-ordinary-least-squares-linear-regression-model)
# - [Solving regression for regression parameters with gradient descent](#Solving-regression-for-regression-parameters-with-gradient-descent)
# - [Estimating the coefficient of a regression model via scikit-learn](#Estimating-the-coefficient-of-a-regression-model-via-scikit-learn)
# - [Fitting a robust regression model using RANSAC](#Fitting-a-robust-regression-model-using-RANSAC)
# - [Evaluating the performance of linear regression models](#Evaluating-the-performance-of-linear-regression-models)
# - [Using regularized methods for regression](#Using-regularized-methods-for-regression)
# - [Turning a linear regression model into a curve - polynomial regression](#Turning-a-linear-regression-model-into-a-curve---polynomial-regression)
# - [Modeling nonlinear relationships in the Ames Housing dataset](#Modeling-nonlinear-relationships-in-the-Ames-Housing-dataset)
# - [Dealing with nonlinear relationships using random forests](#Dealing-with-nonlinear-relationships-using-random-forests)
# - [Decision tree regression](#Decision-tree-regression)
# - [Random forest regression](#Random-forest-regression)
# - [Summary](#Summary)
# # Introducing linear regression
# ## Simple linear regression
# ## Multiple linear regression
# # Exploring the Ames Housing dataset
# ## Loading the Ames Housing dataset into a data frame
# - Dataset source: http://jse.amstat.org/v19n3/decock/AmesHousing.txt
# - Dataset documentation: http://jse.amstat.org/v19n3/decock/DataDocumentation.txt
# - Dataset write-up: http://jse.amstat.org/v19n3/decock.pdf
# - `'Overall Qual'`: Rates the overall material and finish of the house
#
# 10 Very Excellent
# 9 Excellent
# 8 Very Good
# 7 Good
# 6 Above Average
# 5 Average
# 4 Below Average
# 3 Fair
# 2 Poor
# 1 Very Poor
#
# - `'Overall Cond'`: Rates the overall condition of the house
#
# 10 Very Excellent
# 9 Excellent
# 8 Very Good
# 7 Good
# 6 Above Average
# 5 Average
# 4 Below Average
# 3 Fair
# 2 Poor
# 1 Very Poor
# - `'Gr Liv Area'`: Above grade (ground) living area square feet
# - `'Central Air'`: Central air conditioning
#
# N No
# Y Yes
#
# - `'Total Bsmt SF'`: Total square feet of basement area
# - `'SalePrice'`: Sale price $$
columns = ['Overall Qual', 'Overall Cond', 'Gr Liv Area',
'Central Air', 'Total Bsmt SF', 'SalePrice']
df = pd.read_csv('http://jse.amstat.org/v19n3/decock/AmesHousing.txt',
sep='\t',
usecols=columns)
df.head()
df.shape
df['Central Air'] = df['Central Air'].map({'N': 0, 'Y': 1})
df.isnull().sum()
# remove rows that contain missing values
df = df.dropna(axis=0)
df.isnull().sum()
# ## Visualizing the important characteristics of a dataset
scatterplotmatrix(df.values, figsize=(12, 10),
names=df.columns, alpha=0.5)
plt.tight_layout()
#plt.savefig('figures/09_04.png', dpi=300)
plt.show()
cm = np.corrcoef(df.values.T)
hm = heatmap(cm, row_names=df.columns, column_names=df.columns)
plt.tight_layout()
#plt.savefig('figures/09_05.png', dpi=300)
plt.show()
# # Implementing an ordinary least squares linear regression model
# ...
# ## Solving regression for regression parameters with gradient descent
class LinearRegressionGD:
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = np.array([0.])
self.losses_ = []
for i in range(self.n_iter):
output = self.net_input(X)
errors = (y - output)
self.w_ += self.eta * 2.0 * X.T.dot(errors) / X.shape[0]
self.b_ += self.eta * 2.0 * errors.mean()
loss = (errors**2).mean()
self.losses_.append(loss)
return self
def net_input(self, X):
return np.dot(X, self.w_) + self.b_
def predict(self, X):
return self.net_input(X)
X = df[['Gr Liv Area']].values
y = df['SalePrice'].values
sc_x = StandardScaler()
sc_y = StandardScaler()
X_std = sc_x.fit_transform(X)
y_std = sc_y.fit_transform(y[:, np.newaxis]).flatten()
lr = LinearRegressionGD(eta=0.1)
lr.fit(X_std, y_std)
plt.plot(range(1, lr.n_iter+1), lr.losses_)
plt.ylabel('MSE')
plt.xlabel('Epoch')
plt.tight_layout()
#plt.savefig('figures/09_06.png', dpi=300)
plt.show()
def lin_regplot(X, y, model):
plt.scatter(X, y, c='steelblue', edgecolor='white', s=70)
plt.plot(X, model.predict(X), color='black', lw=2)
return
lin_regplot(X_std, y_std, lr)
plt.xlabel('Living area above ground (standardized)')
plt.ylabel('Sale price (standardized)')
#plt.savefig('figures/09_07.png', dpi=300)
plt.show()
feature_std = sc_x.transform(np.array([[2500]]))
target_std = lr.predict(feature_std)
target_reverted = sc_y.inverse_transform(target_std.reshape(-1, 1))
print(f'Sale price: ${target_reverted.flatten()[0]:.2f}')
print(f'Slope: {lr.w_[0]:.3f}')
print(f'Intercept: {lr.b_[0]:.3f}')
# ## Estimating the coefficient of a regression model via scikit-learn
slr = LinearRegression()
slr.fit(X, y)
y_pred = slr.predict(X)
print(f'Slope: {slr.coef_[0]:.3f}')
print(f'Intercept: {slr.intercept_:.3f}')
lin_regplot(X, y, slr)
plt.xlabel('Living area above ground in square feet')
plt.ylabel('Sale price in U.S. dollars')
plt.tight_layout()
#plt.savefig('figures/09_08.png', dpi=300)
plt.show()
# **Normal Equations** alternative:
# adding a column vector of "ones"
Xb = np.hstack((np.ones((X.shape[0], 1)), X))
w = np.zeros(X.shape[1])
z = np.linalg.inv(np.dot(Xb.T, Xb))
w = np.dot(z, np.dot(Xb.T, y))
print(f'Slope: {w[1]:.3f}')
print(f'Intercept: {w[0]:.3f}')
# # Fitting a robust regression model using RANSAC
ransac = RANSACRegressor(LinearRegression(),
max_trials=100, # default
min_samples=0.95,
loss='absolute_error', # default
residual_threshold=None, # default
random_state=123)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask],
c='steelblue', edgecolor='white',
marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask],
c='limegreen', edgecolor='white',
marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='black', lw=2)
plt.xlabel('Living area above ground in square feet')
plt.ylabel('Sale price in U.S. dollars')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('figures/09_09.png', dpi=300)
plt.show()
print(f'Slope: {ransac.estimator_.coef_[0]:.3f}')
print(f'Intercept: {ransac.estimator_.intercept_:.3f}')
def mean_absolute_deviation(data):
return np.mean(np.abs(data - np.mean(data)))
mean_absolute_deviation(y)
ransac = RANSACRegressor(LinearRegression(),
max_trials=100, # default
min_samples=0.95,
loss='absolute_error', # default
residual_threshold=65000, # default
random_state=123)
ransac.fit(X, y)
inlier_mask = ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
line_X = np.arange(3, 10, 1)
line_y_ransac = ransac.predict(line_X[:, np.newaxis])
plt.scatter(X[inlier_mask], y[inlier_mask],
c='steelblue', edgecolor='white',
marker='o', label='Inliers')
plt.scatter(X[outlier_mask], y[outlier_mask],
c='limegreen', edgecolor='white',
marker='s', label='Outliers')
plt.plot(line_X, line_y_ransac, color='black', lw=2)
plt.xlabel('Living area above ground in square feet')
plt.ylabel('Sale price in U.S. dollars')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('figures/09_10.png', dpi=300)
plt.show()
print(f'Slope: {ransac.estimator_.coef_[0]:.3f}')
print(f'Intercept: {ransac.estimator_.intercept_:.3f}')
# # Evaluating the performance of linear regression models
target = 'SalePrice'
features = df.columns[df.columns != target]
X = df[features].values
y = df[target].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=123)
slr = LinearRegression()
slr.fit(X_train, y_train)
y_train_pred = slr.predict(X_train)
y_test_pred = slr.predict(X_test)
x_max = np.max([np.max(y_train_pred), np.max(y_test_pred)])
x_min = np.min([np.min(y_train_pred), np.min(y_test_pred)])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 3), sharey=True)
ax1.scatter(y_test_pred, y_test_pred - y_test,
c='limegreen', marker='s', edgecolor='white',
label='Test data')
ax2.scatter(y_train_pred, y_train_pred - y_train,
c='steelblue', marker='o', edgecolor='white',
label='Training data')
ax1.set_ylabel('Residuals')
for ax in (ax1, ax2):
ax.set_xlabel('Predicted values')
ax.legend(loc='upper left')
ax.hlines(y=0, xmin=x_min-100, xmax=x_max+100, color='black', lw=2)
plt.tight_layout()
#plt.savefig('figures/09_11.png', dpi=300)
plt.show()
mse_train = mean_squared_error(y_train, y_train_pred)
mse_test = mean_squared_error(y_test, y_test_pred)
print(f'MSE train: {mse_train:.2f}')
print(f'MSE test: {mse_test:.2f}')
mae_train = mean_absolute_error(y_train, y_train_pred)
mae_test = mean_absolute_error(y_test, y_test_pred)
print(f'MAE train: {mae_train:.2f}')
print(f'MAE test: {mae_test:.2f}')
r2_train = r2_score(y_train, y_train_pred)
r2_test =r2_score(y_test, y_test_pred)
print(f'R^2 train: {r2_train:.2f}')
print(f'R^2 test: {r2_test:.2f}')
# # Using regularized methods for regression
lasso = Lasso(alpha=1.0)
lasso.fit(X_train, y_train)
y_train_pred = lasso.predict(X_train)
y_test_pred = lasso.predict(X_test)
print(lasso.coef_)
train_mse = mean_squared_error(y_train, y_train_pred)
test_mse = mean_squared_error(y_test, y_test_pred)
print(f'MSE train: {train_mse:.3f}, test: {test_mse:.3f}')
train_r2 = r2_score(y_train, y_train_pred)
test_r2 = r2_score(y_test, y_test_pred)
print(f'R^2 train: {train_r2:.3f}, {test_r2:.3f}')
# Ridge regression:
ridge = Ridge(alpha=1.0)
# LASSO regression:
lasso = Lasso(alpha=1.0)
# Elastic Net regression:
elanet = ElasticNet(alpha=1.0, l1_ratio=0.5)
# # Turning a linear regression model into a curve - polynomial regression
X = np.array([258.0, 270.0, 294.0,
320.0, 342.0, 368.0,
396.0, 446.0, 480.0, 586.0])\
[:, np.newaxis]
y = np.array([236.4, 234.4, 252.8,
298.6, 314.2, 342.2,
360.8, 368.0, 391.2,
390.8])
lr = LinearRegression()
pr = LinearRegression()
quadratic = PolynomialFeatures(degree=2)
X_quad = quadratic.fit_transform(X)
# fit linear features
lr.fit(X, y)
X_fit = np.arange(250, 600, 10)[:, np.newaxis]
y_lin_fit = lr.predict(X_fit)
# fit quadratic features
pr.fit(X_quad, y)
y_quad_fit = pr.predict(quadratic.fit_transform(X_fit))
# plot results
plt.scatter(X, y, label='Training points')
plt.plot(X_fit, y_lin_fit, label='Linear fit', linestyle='--')
plt.plot(X_fit, y_quad_fit, label='Quadratic fit')
plt.xlabel('Explanatory variable')
plt.ylabel('Predicted or known target values')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('figures/09_12.png', dpi=300)
plt.show()
y_lin_pred = lr.predict(X)
y_quad_pred = pr.predict(X_quad)
mse_lin = mean_squared_error(y, y_lin_pred)
mse_quad = mean_squared_error(y, y_quad_pred)
print(f'Training MSE linear: {mse_lin:.3f}'
f', quadratic: {mse_quad:.3f}')
r2_lin = r2_score(y, y_lin_pred)
r2_quad = r2_score(y, y_quad_pred)
print(f'Training R^2 linear: {r2_lin:.3f}'
f', quadratic: {r2_quad:.3f}')
# ## Modeling nonlinear relationships in the Ames Housing dataset
X = df[['Gr Liv Area']].values
y = df['SalePrice'].values
X = X[(df['Gr Liv Area'] < 4000)]
y = y[(df['Gr Liv Area'] < 4000)]
regr = LinearRegression()
# create quadratic features
quadratic = PolynomialFeatures(degree=2)
cubic = PolynomialFeatures(degree=3)
X_quad = quadratic.fit_transform(X)
X_cubic = cubic.fit_transform(X)
# fit features
X_fit = np.arange(X.min()-1, X.max()+2, 1)[:, np.newaxis]
regr = regr.fit(X, y)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y, regr.predict(X))
regr = regr.fit(X_quad, y)
y_quad_fit = regr.predict(quadratic.fit_transform(X_fit))
quadratic_r2 = r2_score(y, regr.predict(X_quad))
regr = regr.fit(X_cubic, y)
y_cubic_fit = regr.predict(cubic.fit_transform(X_fit))
cubic_r2 = r2_score(y, regr.predict(X_cubic))
# plot results
plt.scatter(X, y, label='Training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label=f'Linear (d=1), $R^2$={linear_r2:.2f}',
color='blue',
lw=2,
linestyle=':')
plt.plot(X_fit, y_quad_fit,
label=f'Quadratic (d=2), $R^2$={quadratic_r2:.2f}',
color='red',
lw=2,
linestyle='-')
plt.plot(X_fit, y_cubic_fit,
label=f'Cubic (d=3), $R^2$={cubic_r2:.2f}',
color='green',
lw=2,
linestyle='--')
plt.xlabel('Living area above ground in square feet')
plt.ylabel('Sale price in U.S. dollars')
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig('figures/09_13.png', dpi=300)
plt.show()
X = df[['Overall Qual']].values
y = df['SalePrice'].values
regr = LinearRegression()
# create quadratic features
quadratic = PolynomialFeatures(degree=2)
cubic = PolynomialFeatures(degree=3)
X_quad = quadratic.fit_transform(X)
X_cubic = cubic.fit_transform(X)
# fit features
X_fit = np.arange(X.min()-1, X.max()+2, 1)[:, np.newaxis]
regr = regr.fit(X, y)
y_lin_fit = regr.predict(X_fit)
linear_r2 = r2_score(y, regr.predict(X))
regr = regr.fit(X_quad, y)
y_quad_fit = regr.predict(quadratic.fit_transform(X_fit))
quadratic_r2 = r2_score(y, regr.predict(X_quad))
regr = regr.fit(X_cubic, y)
y_cubic_fit = regr.predict(cubic.fit_transform(X_fit))
cubic_r2 = r2_score(y, regr.predict(X_cubic))
# plot results
plt.scatter(X, y, label='Training points', color='lightgray')
plt.plot(X_fit, y_lin_fit,
label=f'Linear (d=1), $R^2$={linear_r2:.2f}',
color='blue',
lw=2,
linestyle=':')
plt.plot(X_fit, y_quad_fit,
label=f'Quadratic (d=2), $R^2$={quadratic_r2:.2f}',
color='red',
lw=2,
linestyle='-')
plt.plot(X_fit, y_cubic_fit,
label=f'Cubic (d=3), $R^2$={cubic_r2:.2f}',
color='green',
lw=2,
linestyle='--')
plt.xlabel('Overall quality of the house')
plt.ylabel('Sale price in U.S. dollars')
plt.legend(loc='upper left')
plt.tight_layout()
#plt.savefig('figures/09_14.png', dpi=300)
plt.show()
# # Dealing with nonlinear relationships using random forests
# ...
# ## Decision tree regression
X = df[['Gr Liv Area']].values
y = df['SalePrice'].values
tree = DecisionTreeRegressor(max_depth=3)
tree.fit(X, y)
sort_idx = X.flatten().argsort()
lin_regplot(X[sort_idx], y[sort_idx], tree)
plt.xlabel('Living area above ground in square feet')
plt.ylabel('Sale price in U.S. dollars')
plt.tight_layout()
#plt.savefig('figures/09_15.png', dpi=300)
plt.show()
tree_r2 = r2_score(y, tree.predict(X))
tree_r2
# ## Random forest regression
target = 'SalePrice'
features = df.columns[df.columns != target]
X = df[features].values
y = df[target].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=123)
forest = RandomForestRegressor(n_estimators=1000,
criterion='squared_error',
random_state=1,
n_jobs=-1)
forest.fit(X_train, y_train)
y_train_pred = forest.predict(X_train)
y_test_pred = forest.predict(X_test)
mae_train = mean_absolute_error(y_train, y_train_pred)
mae_test = mean_absolute_error(y_test, y_test_pred)
print(f'MAE train: {mae_train:.2f}')
print(f'MAE test: {mae_test:.2f}')
r2_train = r2_score(y_train, y_train_pred)
r2_test =r2_score(y_test, y_test_pred)
print(f'R^2 train: {r2_train:.2f}')
print(f'R^2 test: {r2_test:.2f}')
x_max = np.max([np.max(y_train_pred), np.max(y_test_pred)])
x_min = np.min([np.min(y_train_pred), np.min(y_test_pred)])
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(7, 3), sharey=True)
ax1.scatter(y_test_pred, y_test_pred - y_test,
c='limegreen', marker='s', edgecolor='white',
label='Test data')
ax2.scatter(y_train_pred, y_train_pred - y_train,
c='steelblue', marker='o', edgecolor='white',
label='Training data')
ax1.set_ylabel('Residuals')
for ax in (ax1, ax2):
ax.set_xlabel('Predicted values')
ax.legend(loc='upper left')
ax.hlines(y=0, xmin=x_min-100, xmax=x_max+100, color='black', lw=2)
plt.tight_layout()
#plt.savefig('figures/09_16.png', dpi=300)
plt.show()
# # Summary
# ...
# ---
#
# Readers may ignore the next cell.
|
the-stack_0_10562 | # Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
import paddle
from op_test import OpTest
class TestEinsumBinary(OpTest):
def setUp(self):
paddle.enable_static()
self.op_type = "einsum"
self.disable = False
self.set_mandatory()
self.init_input()
np.random.seed(123)
out = np.einsum(self.equation, *self.inputs)
self.operands = []
for idx, inp in enumerate(self.inputs):
self.operands.append(("x" + str(idx), inp))
self.inputs = {"Operands": self.operands}
self.attrs = {"equation": self.equation}
self.outputs = {
'Out': out,
"InnerCache": [('cache_' + str(i), np.array([1.0]))
for i in range(len(self.operands))]
}
def init_input(self):
self.inputs = []
for t, s in zip(self.types, self.shapes):
self.inputs.append(np.random.random(s).astype(t))
def set_mandatory(self):
self.disable = False
self.shapes = [(10, 10, 20), (20, 6)]
self.types = [np.float64, np.float64]
self.equation = "mij,jk->ki"
def test_check_output(self):
if not self.disable:
self.check_output(no_check_set=["InnerCache"])
def test_grad(self):
if not self.disable:
self.check_grad([op[0] for op in self.operands], ["Out"])
class TestEinsum1(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(20, 3, 3), (20, 3, 3)]
self.types = [np.float64, np.float64]
self.equation = "mij,mjk->mik"
class TestEinsum2(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(20, 3, 3), (20, 3, 3)]
self.types = [np.float64, np.float64]
self.equation = "mij,mjk->ikm"
class TestEinsum3(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(10, 10), (10, 10)]
self.types = [np.float64, np.float64]
self.equation = "ij,jk->ik" # }}}
class TestEinsumWithReduction(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(10, 3, 5), (5, 30)]
self.types = [np.float64, np.float64]
self.equation = "ijk,kl->jl"
class TestEinsumWithReduction1(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(10, 3, 3, 5), (10, 5, 10, 10)]
self.types = [np.float64, np.float64]
self.equation = "mijk,mklh->ljm"
class TestEinsumWithUnary(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(10, 10, 3, 5)]
self.types = [np.float64]
self.equation = "mijk->mi"
class TestEinsumWithUnary1(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(5, 10, 3, 3), (3, 6, 3, 10)]
self.types = [np.float64, np.float64]
self.equation = "imjl,jklm->imk"
class TestEinsumWithBroadcast1(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(5, 10, 3, 3)]
self.types = [np.float64]
self.equation = "i...->..."
class TestEinsumWithBroadcast2(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(10, 11), (3, 4, 5, 10)]
self.types = [np.float64, np.float64]
self.equation = "...ij,...i->j..."
class TestEinsumWithBroadcast3(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(10, 3, 2, 3, 4), (12, 10)]
self.types = [np.float64, np.float64]
self.equation = "k...,...jk->...k"
class TestEinsumWithBroadcast4(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(10, 3, 2, 3, 4), (12, 10)]
self.types = [np.float64, np.float64]
self.equation = "a...d,...cb->...abcd"
class TestEinsumWithBroadcast5(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(3, 2, 2, 10), (10, 3, 2, 2)]
self.types = [np.float64, np.float64]
self.equation = "...a,a...->..."
class TestEinsumWithBroadcast6(TestEinsumBinary):
def set_mandatory(self):
self.shapes = [(100), (100)]
self.types = [np.float64, np.float64]
self.equation = "i,i->"
if __name__ == "__main__":
unittest.main()
|
the-stack_0_10563 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Libunwind(AutotoolsPackage):
"""A portable and efficient C programming interface (API) to determine
the call-chain of a program."""
homepage = "http://www.nongnu.org/libunwind/"
url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz"
git = "https://github.com/libunwind/libunwind"
maintainers = ['mwkrentel']
version('master', branch='master')
version('1.5-head', branch='v1.5-stable')
version('1.5-rc1', sha256='3e0cbc6dee326592097ef06e97cf76ef597987eddd0df8bea49b0594e587627a')
version('1.4-head', branch='v1.4-stable')
version('1.4.0', sha256='df59c931bd4d7ebfd83ee481c943edf015138089b8e50abed8d9c57ba9338435', preferred=True)
version('1.4-rc1', sha256='1928459139f048f9b4aca4bb5010540cb7718d44220835a2980b85429007fa9f')
version('1.3.1', sha256='43997a3939b6ccdf2f669b50fdb8a4d3205374728c2923ddc2354c65260214f8')
version('1.2.1', sha256='3f3ecb90e28cbe53fba7a4a27ccce7aad188d3210bb1964a923a731a27a75acb')
version('1.1', sha256='9dfe0fcae2a866de9d3942c66995e4b460230446887dbdab302d41a8aee8d09a')
variant('xz', default=False,
description='Support xz (lzma) compressed symbol tables.')
variant('zlib', default=False,
description='Support zlib compressed symbol tables '
'(1.5 and later).')
# The libunwind releases contain the autotools generated files,
# but the git repo snapshots do not.
depends_on('autoconf', type='build', when='@master,1.4-head,1.5-head')
depends_on('automake', type='build', when='@master,1.4-head,1.5-head')
depends_on('libtool', type='build', when='@master,1.4-head,1.5-head')
depends_on('m4', type='build', when='@master,1.4-head,1.5-head')
depends_on('xz', type='link', when='+xz')
depends_on('zlib', type='link', when='+zlib')
conflicts('platform=darwin',
msg='Non-GNU libunwind needs ELF libraries Darwin does not have')
provides('unwind')
flag_handler = AutotoolsPackage.build_system_flags
def configure_args(self):
spec = self.spec
args = []
if '+xz' in spec:
args.append('--enable-minidebuginfo')
else:
args.append('--disable-minidebuginfo')
# zlib support is available in 1.5.x and later
if spec.satisfies('@1.5:'):
if '+zlib' in spec:
args.append('--enable-zlibdebuginfo')
else:
args.append('--disable-zlibdebuginfo')
return args
|
the-stack_0_10564 | # -*- coding: utf-8 -*-
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# You can only use this computer program if you have closed
# a license agreement with MPG or you get the right to use the computer
# program from someone who is authorized to grant you that right.
# Any use of the computer program without a valid license is prohibited and
# liable to prosecution.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# Contact: [email protected]
import sys
sys.path.append('.')
import glob
import joblib
import argparse
import numpy as np
import json
import os.path as osp
from lib.models import spin
from lib.core.config import MP_DB_DIR
from lib.utils.utils import tqdm_enumerate
from lib.data_utils.feature_extractor import extract_features
from lib.data_utils.kp_utils import get_posetrack_original_kp_names, convert_kps
def read_data(folder, set):
dataset = {
'img_name' : [] ,
'joints2D': [],
'bbox': [],
'vid_name': [],
'features': [],
}
model = spin.get_pretrained_hmr()
file_names = glob.glob(osp.join(folder, 'posetrack_data/annotations/', f'{set}/*.json'))
file_names = sorted(file_names)
nn_corrupted = 0
tot_frames = 0
min_frame_number = 8
for fid,fname in tqdm_enumerate(file_names):
if fname == osp.join(folder, 'annotations/train/021133_mpii_train.json'):
continue
with open(fname, 'r') as entry:
anns = json.load(entry)
# num_frames = anns['images'][0]['nframes']
anns['images'] = [item for item in anns['images'] if item['is_labeled'] ]
num_frames = len(anns['images'])
frame2imgname = dict()
for el in anns['images']:
frame2imgname[el['frame_id']] = el['file_name']
num_people = -1
for x in anns['annotations']:
if num_people < x['track_id']:
num_people = x['track_id']
num_people += 1
posetrack_joints = get_posetrack_original_kp_names()
idxs = [anns['categories'][0]['keypoints'].index(h) for h in posetrack_joints if h in anns['categories'][0]['keypoints']]
for x in anns['annotations']:
kps = np.array(x['keypoints']).reshape((17,3))
kps = kps[idxs,:]
x['keypoints'] = list(kps.flatten())
tot_frames += num_people * num_frames
for p_id in range(num_people):
annot_pid = [(item['keypoints'], item['bbox'], item['image_id'])
for item in anns['annotations']
if item['track_id'] == p_id and not(np.count_nonzero(item['keypoints']) == 0) ]
if len(annot_pid) < min_frame_number:
nn_corrupted += len(annot_pid)
continue
bbox = np.zeros((len(annot_pid),4))
# perm_idxs = get_perm_idxs('posetrack', 'common')
kp_2d = np.zeros((len(annot_pid), len(annot_pid[0][0])//3 ,3))
img_paths = np.zeros((len(annot_pid)))
for i, (key2djnts, bbox_p, image_id) in enumerate(annot_pid):
if (bbox_p[2]==0 or bbox_p[3]==0) :
nn_corrupted +=1
continue
img_paths[i] = image_id
key2djnts[2::3] = len(key2djnts[2::3])*[1]
kp_2d[i,:] = np.array(key2djnts).reshape(int(len(key2djnts)/3),3) # [perm_idxs, :]
for kp_loc in kp_2d[i,:]:
if kp_loc[0] == 0 and kp_loc[1] == 0:
kp_loc[2] = 0
x_tl = bbox_p[0]
y_tl = bbox_p[1]
w = bbox_p[2]
h = bbox_p[3]
bbox_p[0] = x_tl + w / 2
bbox_p[1] = y_tl + h / 2
#
w = h = np.where(w / h > 1, w, h)
w = h = h * 0.8
bbox_p[2] = w
bbox_p[3] = h
bbox[i, :] = bbox_p
img_paths = list(img_paths)
img_paths = [osp.join(folder, frame2imgname[item]) if item != 0 else 0 for item in img_paths ]
bbx_idxs = []
for bbx_id, bbx in enumerate(bbox):
if np.count_nonzero(bbx) == 0:
bbx_idxs += [bbx_id]
kp_2d = np.delete(kp_2d, bbx_idxs, 0)
img_paths = np.delete(np.array(img_paths), bbx_idxs, 0)
bbox = np.delete(bbox, np.where(~bbox.any(axis=1))[0], axis=0)
# Convert to common 2d keypoint format
if bbox.size == 0 or bbox.shape[0] < min_frame_number:
nn_corrupted += 1
continue
kp_2d = convert_kps(kp_2d, src='posetrack', dst='spin')
dataset['vid_name'].append(np.array([f'{fname}_{p_id}']*img_paths.shape[0]))
dataset['img_name'].append(np.array(img_paths))
dataset['joints2D'].append(kp_2d)
dataset['bbox'].append(np.array(bbox))
# compute_features
features = extract_features(
model,
np.array(img_paths),
bbox,
kp_2d=kp_2d,
dataset='spin',
debug=False,
)
assert kp_2d.shape[0] == img_paths.shape[0] == bbox.shape[0]
dataset['features'].append(features)
print(nn_corrupted, tot_frames)
for k in dataset.keys():
dataset[k] = np.array(dataset[k])
for k in dataset.keys():
dataset[k] = np.concatenate(dataset[k])
for k,v in dataset.items():
print(k, v.shape)
return dataset
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--dir', type=str, help='dataset directory', default='data/posetrack')
args = parser.parse_args()
dataset_train = read_data(args.dir, 'train')
joblib.dump(dataset_train, osp.join(MP_DB_DIR, 'posetrack_train_db.pt'))
|
the-stack_0_10567 | """Console adapted from Python's console found in code.py.
An earlier version of this code was subclassing code.InteractiveConsole.
However, as more and more changes were introduced, dealing with
code transformation and especially customized error handling,
it seemed to make sense to "rewrite" every relevant part in
this single module.
"""
import os
import platform
import friendly_traceback
from . import version
from .converter import convert
from .session import state
from .my_gettext import gettext_lang
class AvantPyInteractiveConsole(friendly_traceback.FriendlyConsole):
"""A Python console that tries to emulate the normal Python interpreter
except that it support experimental code transformations.
It is adapted from cPython's ``code.InteractiveConsole`` and its
parent.
Like the normal Python interactive console, it attempts to evaluate
code entered one line at a time by a user.
"""
def __init__(self, locals=None):
self.locals = locals if locals is not None else {}
super().__init__(locals=locals)
self.name = "<AvantPy console>"
self.resetbuffer()
state.console_active = True
def push(self, line):
"""Pushes a transformed line to the interpreter.
The line should not have a trailing newline; it may have
internal newlines. The line is transformed and appended to a buffer.
The interpreter's run_source() method is called with the
concatenated contents of the buffer as source. If this
indicates that the command was executed or invalid, the buffer
is reset; otherwise, the command is incomplete, and the buffer
is left as it was after the line was appended. The return
value is True if more input is required, False if the line was dealt
with in some way (this is the same as run_source()).
"""
assert not line.endswith("\n"), "Forbidden trailing newline in push()."
_ = gettext_lang.lang
self.buffer.append(line)
self.source = "\n".join(self.buffer)
self.counter += 1
self.name = "<avantpy-console:%d>" % self.counter
friendly_traceback.cache.add(self.name, self.source)
try:
self.converted = convert(self.source, filename=self.name)
except SystemExit:
os._exit(1)
except Exception:
friendly_traceback.explain()
self.resetbuffer()
return False
try:
more = self.runsource(self.converted, filename=self.name)
except SystemExit:
os._exit(1)
except Exception:
friendly_traceback.explain()
self.resetbuffer()
return False
if not more:
self.resetbuffer()
return more
def runsource(self, source, filename="<input>", symbol="single"):
"""Compile and run some source in the interpreter.
Arguments are as for compile_command().
One several things can happen:
1) The input is incorrect; compile_command() raised an
exception (SyntaxError or OverflowError). A syntax traceback
will be printed by calling the showsyntaxerror() method.
2) The input is incomplete, and more input is required;
compile_command() returned None. Nothing happens.
3) The input is complete; compile_command() returned a code
object. The code is executed by calling self.runcode() (which
also handles run-time exceptions, except for SystemExit).
The return value is True in case 2, False in the other cases (unless
an exception is raised). The return value can be used to
decide whether to use sys.ps1 or sys.ps2 to prompt the next
line.
"""
try:
code = self.compile(source, filename, symbol)
except (OverflowError, SyntaxError, ValueError):
# Case 1
friendly_traceback.explain()
return False
if code is None:
# Case 2
return True
# Case 3
self.runcode(code)
return False
def start_console(local_vars=None):
"""Starts a console; modified from code.interact"""
console_defaults = {"set_lang": state.set_lang, "set_dialect": state.set_dialect}
if local_vars is None:
local_vars = console_defaults
else:
local_vars.update(console_defaults)
console = AvantPyInteractiveConsole(locals=local_vars)
banner = "AvantPy version {} [Python: {}; Friendly-traceback: {}]\n".format(
version.__version__,
platform.python_version(),
friendly_traceback.version.__version__,
)
console.interact(banner=banner)
|
the-stack_0_10569 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 11 19:05:26 2014
Module for linking with the Allen brain atlas
@author: tim
"""
import csv
from os import path, rename, remove
from glob import glob
import numpy as np
from scipy import stats
from matplotlib import pyplot as plt
from maybrain import brain
# from maybrain.plotting import mayavi_wrapper as plot
class AllenBrain:
"""
An object the combines a network generated from imaging data with gene
expression data taken from the Allen brain atlas. Genetic data can be
downloaded from http://human.brain-map.org/static/download.
An example will be available on the wiki soon.
"""
def __init__(self, allen_subj,
assoc_matrix,
allen_dir="allen_data",
delim=None,
imaging_spatial_file="atlas471_xyz_flip_xy.txt",
nodesToExclude=[],
symmetrise=False,
mirror=False,
signif_value=1.0,
convert_mni=False):
"""
This object contains two 'brain' network objects, one for the imaging
data and one for the Allen data. Embedded functions make a comparison
between the imaging and Allen data by pairing nodes between the two
network objects.
Convert MNI converts 2mm space to MNI space - useful if comparing to
a standard template.
"""
self.allen_dir = allen_dir # directory where data is located
self.allen_subj = allen_subj # subject number, eg 178236545
# files in the subject directory to assign gene properties
self.annotation_file = "SampleAnnot.csv"
self.microarray_expression_file = "MicroarrayExpression.csv"
self.probe_file = "Probes.csv"
self.signif_value = signif_value # set significance value for gene correlations
# if symmetrise is true then regions are identified by the structure
# name, if symmetrise is false, then regions are identified by the
# structural acronym these refer to headers in the SampleAnnot.csv file
if symmetrise:
self.s_lab = "structure_acronym"
else:
self.s_lab = "structure_name"
self.mirror = mirror
if symmetrise and self.mirror:
print("Please select either a symmetrised or mirrored graph, \
ignoring mirror=True")
self.mirror = False
node_counter = 0
# import probe data
file = open(path.join(self.allen_dir, self.allen_subj, self.annotation_file), "r")
reader = csv.DictReader(file, delimiter=",", quotechar='"')
self.headers = ['probe']
self.s_id_dict = {}
self.expr_brain = load_allen(self.allen_subj,
self.allen_dir,
self.annotation_file,
self.s_lab,
convert_mni,
mirror)
# set up brain with graph properties
self.imaging_brain = brain.Brain()
self.imaging_brain.import_adj_file(assoc_matrix, delimiter=delim,
nodes_to_exclude=nodesToExclude)
self.imaging_brain.import_spatial_info(imaging_spatial_file,
convert_mni=convert_mni)
def comparison(self, max_dist=None):
"""
set up dictionary to link nodes from probe data and graph
keys are the mri nodes and values are disctionaries containing two keys:
key 1= allen, value= (node_count=sa_id of closest allen node, dist=distance to closest allen node)
key 2= mri, value= (node_count=sa_id of closest other mri node, dist=distance to closest mri node)
max_dist is the maximum distance allowed between node pairs. This should be mandatory if 'mirror'
or 'symmetrise' is used, otherwise nodes will be paired across hemispheres.
"""
imaging_node_dict = {}
# iterate through the imaging nodes and find the closest Allen brain nodes
for node in self.imaging_brain.G.nodes():
d_other = (None, 999.) # dummy length of 999
d_own = (None, 999.)
# iterate through the Allen nodes
for node_count in self.expr_brain.G.nodes():
# find the distance between the imaging and Allen nodes
dist = np.linalg.norm(np.array(self.imaging_brain.G.node[node]['xyz']
- np.array(self.expr_brain.G.node[node_count]['xyz'])))
if dist < d_other[1]:
d_other = (node_count, dist)
# check if the distance exceeds the maximum specified
if max_dist:
if d_other[1] > max_dist:
d_other = (None, None)
# now iterate through the imaging nodes to find the closest imaging node
for node_count in [v for v in self.imaging_brain.G.nodes() if not v == node]:
dist = np.linalg.norm(np.array(self.imaging_brain.G.node[node]['xyz']
- np.array(self.imaging_brain.G.node[node_count]['xyz'])))
if dist < d_own[1]:
d_own = (node_count, dist)
# check if the distance exceeds the maximum specified
if max_dist:
if d_own[1] > max_dist:
d_own = (None, None)
imaging_node_dict[node] = {"allen":d_other, "MRIs":d_own}
node_pairs = [] # create a list of node pairs
# for each MRI node..
for node in list(imaging_node_dict.keys()):
# ...find closest allen node 'node_count'
node_count = imaging_node_dict[node]['allen'][0]
self.imaging_brain.G.node[node]['pair'] = node_count
try:
self.expr_brain.G.node[node_count]['pair'] = node
except KeyError:
pass
node_pairs.append((node, node_count))
# iterate through the list of nodes to assign pairs and remove unpaired nodes
node_list = [v for v in self.expr_brain.G.nodes()]
for node in node_list:
if 'pair' not in list(self.expr_brain.G.node[node].keys()):
self.expr_brain.G.remove_node(node)
def probedata(self, prop_dict, graph_metric_name="gm", node_list=None, scatter_plot=False,
probe_list=[], probe_numbers=[], thresh=False):
'''
Explanation required.
'''
probes = open(path.join(self.allen_dir, self.allen_subj, "Probes.csv"), "r")
probe_reader = csv.DictReader(probes, delimiter=",",
quotechar='"')
probe_dict = {line['probe_id']:[line['gene_symbol'], line['gene_name']] for line in probe_reader}
if probe_list:
probe_numbers = []
for probe in probe_list:
probe_numbers.extend([v for v in list(probe_dict.keys()) if any([probe in probe_dict[v][1], probe in probe_dict[v][0]])])
print((" ".join(["Probe numbers:", ' '.join(probe_numbers)])))
else:
probe_numbers = None
out_file = path.join(self.allen_subj, graph_metric_name+'.txt')
print(("Saving data in:"+out_file))
if path.exists(out_file):
rename(out_file, out_file+'.old')
file = open(out_file, "w")
file.writelines(', '.join(['probe_id', 'gene_name', 'r', 'probe\node_count']))
file.close()
if node_list:
for node in self.imaging_brain.G.nodes():
if not node in node_list:
self.expr_brain.G.remove_node(self.imaging_brain.G.node[node]['pair'])
self.imaging_brain.G.remove_node(node)
# import probe data
file = open(path.join(self.allen_dir, self.allen_subj, self.microarray_expression_file), "r")
reader = csv.DictReader(file, delimiter=",",
fieldnames=self.headers,
quotechar='"')
for line in reader:
if thresh:
self.probesubt(line, prop_dict, probe_dict, graph_metric_name, out_file, probe_numbers)
else:
self.probesub(line, prop_dict, probe_dict, graph_metric_name, out_file,
scatter_plot, probe_numbers)
file.close()
def xmatrix(self, out_file="Xmatrix.csv", probe_numbers=None, temp_mat_name="tempMat.txt",
st_dev=False, st_dev_file="NodesSd.txt"):
"""
Needs writing
"""
# get all probes if otherwise unspecified
if not probe_numbers:
file = open(path.join(self.allen_dir, self.allen_subj, self.probe_file))
probe_numbers = [line['probe_id'] for line in csv.DictReader(file, delimiter=",", quotechar='"')]
file.close()
# set up out file
out = open(self.allen_subj+out_file, "wb")
headers = ["Gene"]
headers.extend([str(v) for v in self.imaging_brain.G.nodes()])
writer = csv.DictWriter(out, fieldnames=headers, delimiter=" ")
writer.writeheader()
# set up matrix
y = len(probe_numbers)
z = len(self.imaging_brain.G.nodes())
st = np.max([len(list(self.s_id_dict.values()))]) # max numbers of nodes for any region
probe_mat = np.memmap(temp_mat_name,
dtype="float64",
mode="w+",
shape=(y, z, st))
# set up gene list
gene_flag = True
probe_file = open(path.join(self.allen_dir, self.allen_subj, self.probe_file))
probe_reader = csv.DictReader(probe_file, delimiter=",", quotechar='"')
probe_dict = {line['probe_id']:line['gene_symbol'] for line in probe_reader}
gene_list = list(probe_dict.values())
set(gene_list)
gene_list = {gene:[] for gene in gene_list}
s_id_list = [v for v in self.headers if v != 'probe']
s_id_list = set(s_id_list)
# get the corresponding node names in the MRI graph
# c_nodes is a dict whose keys are all the MRI nodes and values are the matched alen nodes
#### PV modified line below which constructed c_nodes by looping through allen nodes
# but with PV's lax matching criteria several mri nodes can be matched to same allen node
# the mri pair of these allen nodes gets overwritten in self.expr_brain.G.nodes and so
# not all mri nodes will appear as pairs of allen nodes in this dict...
# need to look up pairs in self.imaging_brain.G.nodes instead, where
# each mri node is matched to an allen region
# c_nodes = {str(self.expr_brain.G.node[v]['pair']):v for v in self.expr_brain.G.nodes()}
c_nodes = {str(v):self.imaging_brain.G.node[v]['pair'] for v in self.imaging_brain.G.nodes()}
# assign values to matrix
print((str(self.allen_subj)))
print('\node_count')
# Generate custom fieldnames list for DictReader which doesn'thresh rely on structure_id
# *************************************
fieldnames_pv = ['probe']
my_node_dict = {}
temp_headers = self.headers
temp_headers.remove('probe')
for p, q in enumerate(self.headers):
my_node_dict[p] = q
fieldnames_pv.append(str(p)) #####
# *************************************
# import probe data
file = open(path.join(self.allen_dir, self.allen_subj, self.microarray_expression_file), "r")
reader = csv.DictReader(file, delimiter=",",
fieldnames=fieldnames_pv,
quotechar='"')
y = 0
for line in reader:
if line['probe']in probe_numbers:
for node in self.expr_brain.G.nodes():
if line[self.expr_brain.G.node[node][self.s_lab]]:
self.expr_brain.G.node[node][line['probe']] = line[self.expr_brain.G.node[node][self.s_lab]]
else:
self.expr_brain.G.node[node][line['probe']] = None
aa_mat = np.zeros((len(self.imaging_brain.G.nodes()), 3))
for node_count, c_node in enumerate(self.imaging_brain.G.nodes()):
node = self.imaging_brain.G.node[c_node]['pair']
if prop_dict[self.expr_brain.G.node[node]['pair']]:
aa_mat[node_count, 0] = self.expr_brain.G.node[node][line['probe']]
aa_mat[node_count, 1] = prop_dict[c_node]
aa_mat[node_count, 2] = c_node
else:
aa_mat[node_count, :] = [np.nan, np.nan, np.nan]
aa_mat = aa_mat[~np.isnan(aa_mat)]
aa_mat.shape = (len(aa_mat)/3, 3)
r, p = stats.pearsonr(aa_mat[:, 0], aa_mat[:, 1])
if p < self.signif_value:
print(line['probe'])
# plot graph
out = open(out_file, "a")
out.writelines(', '.join([str(v) for v in [line['probe'],
'"' + probe_dict[line['probe']][1]+'"',
r, p]])+'\node_count')
out.close()
if scatter_plot:
plt.scatter(aa_mat[:, 1], aa_mat[:, 0])
plt.savefig(out_file.replace('.txt', line['probe']+'.png'), dpi=300)
plt.close()
# save data
print("Saving data in :"+out_file.replace('.txt',
line['probe']+graph_metric_name+'.txt'))
dat_file = open(out_file.replace('.txt',
line['probe']+graph_metric_name+'.txt'), "wb")
dat_file.writelines(' '.join([line['probe'],
graph_metric_name,
"node", "subj"])+'\node_count')
dat_file.writelines('\node_count'.join([' '.join([str(aa_mat[node_count, 0]),
str(aa_mat[node_count, 1]),
str(aa_mat[node_count, 2]),
self.allen_subj]) for node_count in range(len(aa_mat[:, 1]))]))
dat_file.close()
else:
pass
def probesubt(self, line, prop_dict, probe_dict, graph_metric_name, out_file, probe_numbers=None):
'''
line is a line from the probe file.
The purpose of this function is to write thresholded data to a datafile
eg for use in ANOVA
'''
probe = line['probe']
dat_file = None
if probe_numbers:
if probe in probe_numbers:
# assign probe values to sample numbers
for node in self.expr_brain.G.nodes():
if line[str(node)]:
self.expr_brain.G.node[node][probe] = line[str(node)]
else:
self.expr_brain.G.node[node][probe] = None
out_dict = {probe:probe, 'subj':self.allen_subj}
for probe in list(prop_dict.keys()):
out_dict[probe] = prop_dict[probe]
if not dat_file:
headers = [probe, "subj"]
gm_subjs = list(prop_dict[list(probe_dict.keys())[0]].keys())
gm_subjs.sort()
headers.extend(gm_subjs)
dat_file = open(out_file.replace('.txt', probe+graph_metric_name+'.txt'), "wb")
writer = csv.DictWriter(dat_file, fieldnames=headers, delimiter=" ")
writer.writeheader()
writer.writerow(out_dict)
class multisubj:
"""
This object contains two 'brain' network objects, one for the imaging data and
one for the Allen data. Embedded functions make a comparison between the imaging
and Allen data by pairing nodes between the two network objects.
"""
def __init__(self,
assoc_matrix,
allen_dir="allen_data",
nodesToExclude=[],
delim=" ",
subj_list=None,
imaging_spatial_file="parcel_500.txt",
symmetrise=False,
convert_mni=True,
mirror=True):
self.allen_dir = allen_dir
if subj_list:
self.allen_subj_list = subj_list
else:
self.allen_subj_list = [path.basename(v) for v in glob(path.join(self.allen_dir, "17823*")) if path.isdir(path.join(self.allen_dir, v))]
self.annotation_file = "SampleAnnot.csv"
self.microarray_expression_file = "MicroarrayExpression.csv"
self.probe_file = "Probes.csv"
self.mirror = mirror
# if symmetrise is true then regions are identified by the structure name,
# if symmetrise is false, then regions are identified by the structural acronym
# these refer to headers in the SampleAnnot.csv file
if symmetrise:
self.s_lab = "structure_acronym"
else:
self.s_lab = "structure_name"
# set up brain for expression data
self.expr_brain = brain.Brain()
node_counter = 0
self.headers = {}
# dictionary storing the list of nodes for each structural sa_id by subject
# for use later in averaging across all subjects
self.s_id_dict = {}
for subj in self.allen_subj_list:
self.s_id_dict[subj] = {}
self.expr_brain = load_allen(subj,
self.allen_dir,
self.annotation_file,
self.s_lab,
convert_mni,
mirror,
node_counter,
input_brain=self.expr_brain)
node_counter = len(self.expr_brain.G.nodes())
# set up brain with graph properties
self.imaging_brain = brain.Brain()
self.imaging_brain.import_adj_file(assoc_matrix,
delimiter=delim,
nodes_to_exclude=nodesToExclude)
self.imaging_brain.import_spatial_info(imaging_spatial_file,
convert_mni=convert_mni)
def comparison(self):
"""
set up dictionary to link nodes from probe data and graph
keys are the mri nodes and values are dictionaries containing two keys:
key 1= allen, value= (node_count=sa_id of closest allen node, dist=distance to closest allen node)
key 2= mri, value= (node_count=sa_id of closest other mri node, dist=distance to closest mri node)
"""
imaging_node_dict = {}
# iterate through the imaging nodes and find the closest Allen brain nodes
for node in self.imaging_brain.G.nodes():
d_other = (None, 999.) # dummy length of 999
d_own = (None, 999.)
# iterate through the Allen nodes
for node_count in self.expr_brain.G.nodes():
# find the distance between the imaging and Allen nodes
dist = np.linalg.norm(np.array(self.imaging_brain.G.node[node]['xyz']
- np.array(self.expr_brain.G.node[node_count]['xyz'])))
if dist < d_other[1]:
d_other = (node_count, dist)
# now iterate through the imaging nodes to find the closest imaging node
for node_count in [v for v in self.imaging_brain.G.nodes() if not v == node]:
dist = np.linalg.norm(np.array(self.imaging_brain.G.node[node]['xyz']
- np.array(self.imaging_brain.G.node[node_count]['xyz'])))
if dist < d_own[1]:
d_own = (node_count, dist)
imaging_node_dict[node] = {"allen":d_other, "MRIs":d_own}
# iterate through the allen nodes to find the closest imaging node
allen_node_dict = {}
for node in self.expr_brain.G.nodes():
d_other = (None, 999.)
d_own = (None, 999.)
# iterate through the imaging nodes
for node_count in self.imaging_brain.G.nodes():
dist = np.linalg.norm(np.array(self.expr_brain.G.node[node]['xyz']
- np.array(self.imaging_brain.G.node[node_count]['xyz'])))
if dist < d_other[1]:
d_other = (node_count, dist)
# iterate through the allen nodes to find the closes node
for node_count in [v for v in self.expr_brain.G.nodes() if not v == node]:
dist = np.linalg.norm(np.array(self.expr_brain.G.node[node]['xyz']
- np.array(self.expr_brain.G.node[node_count]['xyz'])))
if dist < d_own[1]:
d_own = (node_count, dist)
allen_node_dict[node] = {"allen":d_own, "MRIs":d_other}
node_pairs = [] # create a list of node pairs
# for each MRI node
for node in list(imaging_node_dict.keys()):
# find closest allen node 'node_count'
node_count = imaging_node_dict[node]['allen'][0]
self.imaging_brain.G.node[node]['pair'] = node_count
self.expr_brain.G.node[node_count]['pair'] = node
node_pairs.append((node, node_count))
for node in [v for v in self.expr_brain.G.nodes()]:
if 'pair' not in list(self.expr_brain.G.node[node].keys()):
self.expr_brain.G.remove_node(node)
def comparisonaveraged(self):
"""
This function should generate sets of nodes in the imaging data associated
with single nodes in the Allen data, ie all the closest imaging data nodes will
be associated with any specific Allen node.
"""
for node_count in self.expr_brain.G.nodes():
self.expr_brain.G.node[node_count]['pairNodes'] = []
# iterate through imaging nodes to find closes Allen node
for node in self.imaging_brain.G.nodes():
d_other = (None, 999.) # dummy length of 999
for node_count in self.expr_brain.G.nodes():
dist = np.linalg.norm(np.array(self.imaging_brain.G.node[node]['xyz']
- np.array(self.expr_brain.G.node[node_count]['xyz'])))
if dist < d_other[1]:
d_other = (node_count, dist)
self.expr_brain.G.node[d_other[0]]['pairNodes'].append(node)
for node in self.expr_brain.G.nodes():
if not self.expr_brain.G.node[node]['pairNodes']:
self.expr_brain.G.remove_node(node)
def probedata(self, probe_numbers=[], mean_vals=True):
"""
If mean_vals is specified, this takes the mean probe value across subjects
"""
for subj in self.allen_subj_list:
# import probe data
file = open(path.join(self.allen_dir, subj, self.microarray_expression_file), "r")
reader = csv.DictReader(file, delimiter=",",
fieldnames=self.headers[subj],
quotechar='"')
for line in reader:
probe = line['probe']
if probe in probe_numbers:
# assign probe values to sample numbers
for c_node in self.imaging_brain.G.nodes():
node = self.imaging_brain.G.node[c_node]['pair']
s_id = self.expr_brain.G.node[node][self.s_lab]
if not probe in list(self.expr_brain.G.node[node].keys()):
self.expr_brain.G.node[node][probe] = {}
if s_id in list(line.keys()):
self.expr_brain.G.node[node][probe][subj] = line[s_id]
file.close()
del reader
# self.expr_brain.G.nodes is a dict containing every
# UNIQUE structure sa_id (across all subjects)
if mean_vals:
for node_count in self.expr_brain.G.nodes():
for probe in probe_numbers:
self.expr_brain.G.node[node_count][probe] = np.mean([float(v) for v in list(self.expr_brain.G.node[node_count][probe].values())])
def xmatrix(self, out_file="Xmatrix.csv", allen_dir="allen_data",
probe_numbers=None, temp_mat_name="tempMat.txt", st_dev=False,
st_dev_file="NodesSd.txt"):
"""
Needs writing
"""
# get all probes if otherwise unspecified
if not probe_numbers:
file = open(path.join(allen_dir, self.allen_subj_list[0], self.probe_file))
reader = csv.DictReader(file, delimiter=",", quotechar='"')
probe_numbers = [line['probe_id'] for line in reader]
del reader
file.close()
# set up out file
out = open(out_file, "wb")
headers = ["Gene"]
headers.extend([str(v) for v in self.imaging_brain.G.nodes()])
writer = csv.DictWriter(out, fieldnames=headers, delimiter=" ")
writer.writeheader()
# set up matrix
x = len(self.allen_subj_list)
y = len(probe_numbers)
z = len(self.imaging_brain.G.nodes())
# max numbers of nodes for any region
st = np.max([np.max([len(v) for v in list(self.s_id_dict[subj].values())]) for subj in list(self.s_id_dict.keys())])
probe_mat = np.memmap(temp_mat_name,
dtype="float64",
mode="w+",
shape=(y, z, st*x))
# set up gene list
gene_flag = True
probe_file = open(path.join(allen_dir, self.allen_subj_list[0], self.probe_file))
probe_reader = csv.DictReader(probe_file, delimiter=",", quotechar='"')
probe_dict = {line['probe_id']:line['gene_symbol'] for line in probe_reader}
gene_list = list(probe_dict.values())
set(gene_list)
gene_list = {gene:[] for gene in gene_list}
s_id_list = []
for subj in self.allen_subj_list:
s_id_list.extend([v for v in self.headers[subj] if not v == 'probe'])
s_id_list = set(s_id_list)
# get the corresponding node names in the MRI graph
# c_nodes is a dict whose keys are all the MRI nodes and values are the matched alen nodes
#### PV modified line below which constructed c_nodes by looping through allen nodes
# but with PV's lax matching criteria several mri nodes can be matched to same allen node
# the mri pair of these allen nodes gets overwritten in self.expr_brain.G.nodes and so not
# all mri nodes will appear as pairs of allen nodes in this dict... need to look up pairs
# in self.imaging_brain.G.nodes instead, where each mri node is matched to an allen region
# c_nodes = {str(self.expr_brain.G.node[v]['pair']):v for v in self.expr_brain.G.nodes()}
c_nodes = {str(v):self.imaging_brain.G.node[v]['pair'] for v in self.imaging_brain.G.nodes()}
# assign values to matrix
for x, subj in enumerate(self.allen_subj_list):
print((str(subj)))
print('\node_count')
# Generate custom fieldnames list for DictReader which doesn'thresh rely on structure_id
# *************************************
fieldnames_pv = ['probe']
my_node_dict = {}
temp_headers = self.headers[subj]
temp_headers.remove('probe')
for p, q in enumerate(self.headers[subj]):
my_node_dict[p] = q
fieldnames_pv.append(str(p)) #####
# *************************************
# import probe data
file = open(path.join(self.allen_dir, subj, self.microarray_expression_file), "r")
reader = csv.DictReader(file, delimiter=",",
fieldnames=fieldnames_pv,
quotechar='"')
y = 0
for line in reader:
probe = line['probe']
if probe in probe_numbers:
# assign probe values to sample numbers
for z, c_node in enumerate(self.imaging_brain.G.nodes()):
a_node = c_nodes[str(c_node)]
# *************************************
# Find structure_acronym corresponding to the matched allen node
acronym = self.expr_brain.G.node[a_node][self.s_lab]
# Initialise list for summing expression values for
# allen nodes with same structure_acronym
total_expression = []
# Loop over allen nodes to find all those with the correct acronym
# for the current MRI node and add their expression values to
# the list for averaging
s = 0
for sa_id, struct_ac in list(my_node_dict.items()):
if struct_ac == acronym:
# print(sa_id, struct_ac, line[str(sa_id)])
# print('\node_count')
# if line[str(sa_id)]:
total_expression.append(float(line[str(sa_id)]))
probe_mat[y, z, st*x + s] = float(line[str(sa_id)])
s += 1
if gene_flag:
# records the position of the probe in a dictionary with genes as a key
gene_list[probe_dict[probe]].append(y)
y += 1
file.close()
del reader
# get values to normalise expression levels for each probe within subject
for y in range(probe_mat.shape[0]):
# create a masked array removing the 0. values
subj_mat = np.ma.array(probe_mat[y, :, st*x:st*x+st],
mask=probe_mat[y, :, st*x:st*x+st] == 0.,
dtype="float64")
subj_mat = (subj_mat - np.mean(np.ma.array(subj_mat, mask=subj_mat == 0.))) / np.std(np.ma.array(subj_mat, mask=subj_mat == 0.))
probe_mat[y, :, st*x:st*x+st] = subj_mat
gene_flag = False
# collapse across subjects and probes by gene
gene_names = list(gene_list.keys())
gene_names.sort() # sort in to alphabetical order
# collapse across nodes within regions (averaging across all subjects)
sh = probe_mat.shape
probe_mat_temp = np.memmap("probe_mat_temp.txt", mode="w+", dtype="float64", shape=sh[:2])
# write out the standard deviation for each probe if specified
if st_dev:
st_dev_out = open(st_dev_file, "wb")
st_dev_out.writelines("Probe Node st_dev\node_count")
for y in range(sh[0]):
for z in range(sh[1]):
# mask out unused values, ie where there are less than the maximum number
# of homolous nodes in a structural region
probe_mat_temp[y, z] = np.mean(np.ma.array(probe_mat[y, z],
mask=probe_mat[y, z] == 0.))
if st_dev:
std = np.std(np.ma.array(probe_mat[y, z], mask=probe_mat[y, z] == 0.))
st_dev_out.writelines(' '.join([str(int(probe_numbers[y])),
str(self.imaging_brain.G.nodes()[z]),
"{:2.5f}".format(float(std))])+'\node_count')
if st_dev:
st_dev_out.close()
# reassign the probe matrix and delete temporary memory-mapped file
probe_mat = probe_mat_temp
del probe_mat_temp
remove(temp_mat_name)
for gene in gene_names:
if gene_list[gene]:
x = probe_mat.shape[1] # number of nodes
y = len(gene_list[gene]) # number of probes
gene_mat = np.zeros(shape=(x, y), dtype="float64")
# nb: probe is the position of the probe recorded above
for node_count, probe in enumerate(gene_list[gene]):
gene_mat[:, node_count] = probe_mat[probe, :]
# collapse values across probes for each gene
mean_gene = np.mean(np.ma.array(gene_mat, mask=np.isnan(gene_mat)), axis=1)
out_dict = dict(list(zip([str(v) for v in self.imaging_brain.G.nodes()],
["{:10.20f}".format(v) for v in mean_gene])))
out_dict["Gene"] = gene
writer.writerow(out_dict)
out.close()
remove("probe_mat_temp.txt") # delete memory map file
def ymatrixgroup(self, metric_dict, subj="Control",
allen_dir="allen_data", out_file="YmatrixGroup.csv"):
'''
Collates metrics in to a matrix for use in partial least squares analysis.
Note, the metric_dict contains the metric name as a key and filename as
the value. Takes group level measures
'''
out = open(out_file, "wb")
headers = ["Metric"]
headers.extend([str(v) for v in self.imaging_brain.G.nodes()])
writer = csv.DictWriter(out, fieldnames=headers)
writer.writeheader()
# iterate through the metrics
for m in list(metric_dict.keys()):
file = open(path.join(allen_dir, subj, metric_dict[m]), "r")
reader = csv.DictReader(file, delimiter=" ")
line = next(reader)
# remove non-numeric keys
for v in list(line.keys()):
try:
int(v)
except ValueError:
del line[v]
m_dict = {v:line[v] for v in list(line.keys()) if int(v) in self.imaging_brain.G.nodes()}
file.close()
# normalise within metric
mean_metric = np.mean([float(v) for v in list(m_dict.values())])
sd_metric = np.std([float(v) for v in list(m_dict.values())])
m_dict = {str(v):(float(m_dict[str(v)])-mean_metric)/sd_metric for v in list(m_dict.keys())}
m_dict["Metric"] = m
writer.writerow(m_dict)
def ymatrixindividuals(self, metric_dict, subj_list,
allen_dir="allen_data",
out_file="YmatrixInd.csv"):
'''
Collates metrics in to a matrix for use in partial least squares analysis.
Note, the metric_dict contains the metric name as a key and filename as
the value. Takes metrics for individual subjects defined in the subject list.
'''
out = open(out_file, "wb")
headers = ["Metric", "Subject"]
headers.extend([str(v) for v in self.imaging_brain.G.nodes()])
writer = csv.DictWriter(out, fieldnames=headers)
writer.writeheader()
# iterate through the metrics
for m in list(metric_dict.keys()):
for subj in subj_list:
file = open(path.join(allen_dir, subj, metric_dict[m]), "r")
reader = csv.DictReader(file, delimiter=" ")
line = next(reader)
# remove non-numeric keys
for v in list(line.keys()):
try:
int(v)
except ValueError:
del line[v]
m_dict = {v:line[v] for v in list(line.keys()) if int(v) in self.imaging_brain.G.nodes()}
file.close()
# normalise within metric
mean_metric = np.mean([float(v) for v in list(m_dict.values())])
sd_metric = np.std([float(v) for v in list(m_dict.values())])
m_dict = {str(v):(float(m_dict[str(v)]) - mean_metric) / sd_metric for v in list(m_dict.keys())}
m_dict["Metric"] = m
m_dict["Subject"] = subj
writer.writerow(m_dict)
def load_allen(subject,
allen_dir,
annotation_file,
s_lab,
convert_mni,
mirror,
node_counter=0,
input_brain=None
):
"""
Function for loading the data from a single Allen Brain atlas subject.
subject = number of the allen subject
allen_dir = the directory where allen data is located
annotation_file = the file containing allen annotation, usually SampleAnnot.csv
s_lab = Structural label, dictated by whether the brain is being symmetrised
convert_mni = whether to convert allen data to MNI space
mirror = whether to mirror the values
node_count = numbers nodes, important if multiple subjects are being imported and compared
input_brain = existing brain object, useful if multiple subjects
"""
# import probe data
file = open(path.join(allen_dir, subject, annotation_file), "r")
reader = csv.DictReader(file, delimiter=",", quotechar='"')
# define brain object
if input_brain:
expr_brain = input_brain
else:
expr_brain = brain.Brain()
# import probe data
file = open(path.join(allen_dir, subject, annotation_file), "r")
reader = csv.DictReader(file, delimiter=",", quotechar='"')
for line in reader:
s_id = line[s_lab]
expr_brain.G.add_node(node_counter) # give nodes unique incremental sa_id (across all subjects)
brain.nx.set_node_attributes(expr_brain.G, {node_counter: line})
# store the structure_acronym/structure_name for the node
brain.nx.set_node_attributes(expr_brain.G, {node_counter: {'s_id': s_id}})
node_counter += 1
# store structure_acronym or structure_name depending on symmetrise
# headers.append(s_id)
# if not s_id in list(s_id_dict.keys()):
# s_id_dict[s_id] = [node_count]
# else:
# s_id_dict[s_id].append(node_count)
file.close()
if convert_mni:
# convert location data for Allen brain from MNI space
for node_count in expr_brain.G.nodes():
x = 45 - (float(expr_brain.G.node[node_count]['mni_x'])/2)
y = 63 + (float(expr_brain.G.node[node_count]['mni_y'])/2)
z = 36 + (float(expr_brain.G.node[node_count]['mni_z'])/2)
expr_brain.G.node[node_count]['xyz'] = (x, y, z)
midline = 45 # midline for mirroring nodes
conv = "neuro"
else:
for node_count in expr_brain.G.nodes():
x = float(expr_brain.G.node[node_count]['mni_x'])
y = float(expr_brain.G.node[node_count]['mni_y'])
z = float(expr_brain.G.node[node_count]['mni_z'])
expr_brain.G.node[node_count]['xyz'] = (x, y, z)
midline = 0 # midline for mirroring nodes
conv = "radio"
# copy hemisphere if required
if mirror and len(expr_brain.G.nodes()) < 600:
expr_brain.copy_hemisphere(hsphere="L", midline=midline,
conv=conv)
return(expr_brain)
|
the-stack_0_10570 | import pytest # type: ignore
from openstates.cli.update import override_settings
class _Settings:
pass
@pytest.fixture
def settings():
ret = _Settings()
ret.foo = "bar"
ret.baz = "bob"
return ret
def test_override_settings(settings):
with override_settings(settings, {"baz": "fez"}):
assert settings.foo == "bar"
assert settings.baz == "fez"
assert settings.foo == "bar"
assert settings.baz == "bob"
def test_override_settings_unset(settings):
with override_settings(settings, {"qux": "fez"}):
assert settings.qux == "fez"
assert not hasattr(settings, "qux")
|
the-stack_0_10573 | import os
from saleor.account import models
import secrets
from itertools import chain
from typing import Iterable, Tuple, Union
import graphene
from django.core.exceptions import (
NON_FIELD_ERRORS,
ImproperlyConfigured,
ValidationError,
)
from django.core.files.storage import default_storage
from django.db.models.fields.files import FileField
from graphene import ObjectType
from graphene.types.mutation import MutationOptions
from graphene_django.registry import get_global_registry
from graphql.error import GraphQLError
from ...core.exceptions import PermissionDenied
from ...core.permissions import AccountPermissions
from ..decorators import staff_member_or_app_required
from ..utils import get_nodes, resolve_global_ids_to_primary_keys
from .descriptions import DEPRECATED_IN_3X_FIELD
from .types import File, Upload
from .types.common import UploadError
from .utils import from_global_id_or_error, snake_to_camel_case
from .utils.error_codes import get_error_code_from_error
registry = get_global_registry()
def get_model_name(model):
"""Return name of the model with first letter lowercase."""
model_name = model.__name__
return model_name[:1].lower() + model_name[1:]
def get_error_fields(error_type_class, error_type_field, deprecation_reason=None):
error_field = graphene.Field(
graphene.List(
graphene.NonNull(error_type_class),
description="List of errors that occurred executing the mutation.",
),
default_value=[],
required=True,
)
if deprecation_reason is not None:
error_field.deprecation_reason = deprecation_reason
return {error_type_field: error_field}
def validation_error_to_error_type(
validation_error: ValidationError, error_type_class
) -> list:
"""Convert a ValidationError into a list of Error types."""
err_list = []
error_class_fields = set(error_type_class._meta.fields.keys())
if hasattr(validation_error, "error_dict"):
# convert field errors
for field, field_errors in validation_error.error_dict.items():
field = None if field == NON_FIELD_ERRORS else snake_to_camel_case(field)
for err in field_errors:
error = error_type_class(
field=field,
message=err.messages[0],
code=get_error_code_from_error(err),
)
attach_error_params(error, err.params, error_class_fields)
err_list.append(error)
else:
# convert non-field errors
for err in validation_error.error_list:
error = error_type_class(
message=err.messages[0],
code=get_error_code_from_error(err),
)
attach_error_params(error, err.params, error_class_fields)
err_list.append(error)
return err_list
def attach_error_params(error, params: dict, error_class_fields: set):
if not params:
return {}
# If some of the params key overlap with error class fields
# attach param value to the error
error_fields_in_params = set(params.keys()) & error_class_fields
for error_field in error_fields_in_params:
setattr(error, error_field, params[error_field])
class ModelMutationOptions(MutationOptions):
exclude = None
model = None
return_field_name = None
class BaseMutation(graphene.Mutation):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
description=None,
permissions: Tuple = None,
_meta=None,
error_type_class=None,
error_type_field=None,
errors_mapping=None,
**options,
):
if not _meta:
_meta = MutationOptions(cls)
if not description:
raise ImproperlyConfigured("No description provided in Meta")
if not error_type_class:
raise ImproperlyConfigured("No error_type_class provided in Meta.")
if isinstance(permissions, str):
permissions = (permissions,)
if permissions and not isinstance(permissions, tuple):
raise ImproperlyConfigured(
"Permissions should be a tuple or a string in Meta"
)
_meta.permissions = permissions
_meta.error_type_class = error_type_class
_meta.error_type_field = error_type_field
_meta.errors_mapping = errors_mapping
super().__init_subclass_with_meta__(
description=description, _meta=_meta, **options
)
if error_type_field:
deprecated_msg = f"{DEPRECATED_IN_3X_FIELD} Use `errors` field instead."
cls._meta.fields.update(
get_error_fields(
error_type_class,
error_type_field,
deprecated_msg,
)
)
cls._meta.fields.update(get_error_fields(error_type_class, "errors"))
@classmethod
def _update_mutation_arguments_and_fields(cls, arguments, fields):
cls._meta.arguments.update(arguments)
cls._meta.fields.update(fields)
@classmethod
def _get_node_by_pk(
cls, info, graphene_type: ObjectType, pk: Union[int, str], qs=None
):
"""Attempt to resolve a node from the given internal ID.
Whether by using the provided query set object or by calling type's get_node().
"""
if qs is not None:
return qs.filter(pk=pk).first()
get_node = getattr(graphene_type, "get_node", None)
if get_node:
return get_node(info, pk)
return None
@classmethod
def get_global_id_or_error(
cls, id: str, only_type: Union[ObjectType, str] = None, field: str = "id"
):
try:
_object_type, pk = from_global_id_or_error(id, only_type, raise_error=True)
except GraphQLError as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
return pk
@classmethod
def get_node_or_error(cls, info, node_id, field="id", only_type=None, qs=None):
if not node_id:
return None
try:
object_type, pk = from_global_id_or_error(
node_id, only_type, raise_error=True
)
if isinstance(object_type, str):
object_type = info.schema.get_type(object_type).graphene_type
node = cls._get_node_by_pk(info, graphene_type=object_type, pk=pk, qs=qs)
except (AssertionError, GraphQLError) as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
else:
if node is None:
raise ValidationError(
{
field: ValidationError(
"Couldn't resolve to a node: %s" % node_id, code="not_found"
)
}
)
return node
@classmethod
def get_global_ids_or_error(
cls,
ids: Iterable[str],
only_type: Union[ObjectType, str] = None,
field: str = "ids",
):
try:
_nodes_type, pks = resolve_global_ids_to_primary_keys(
ids, only_type, raise_error=True
)
except GraphQLError as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
return pks
@classmethod
def get_nodes_or_error(cls, ids, field, only_type=None, qs=None):
try:
instances = get_nodes(ids, only_type, qs=qs)
except GraphQLError as e:
raise ValidationError(
{field: ValidationError(str(e), code="graphql_error")}
)
return instances
@staticmethod
def remap_error_fields(validation_error, field_map):
"""Rename validation_error fields according to provided field_map.
Skips renaming fields from field_map that are not on validation_error.
"""
for old_field, new_field in field_map.items():
try:
validation_error.error_dict[
new_field
] = validation_error.error_dict.pop(old_field)
except KeyError:
pass
@classmethod
def clean_instance(cls, info, instance):
"""Clean the instance that was created using the input data.
Once an instance is created, this method runs `full_clean()` to perform
model validation.
"""
try:
instance.full_clean()
except ValidationError as error:
if hasattr(cls._meta, "exclude"):
# Ignore validation errors for fields that are specified as
# excluded.
new_error_dict = {}
for field, errors in error.error_dict.items():
if field not in cls._meta.exclude:
new_error_dict[field] = errors
error.error_dict = new_error_dict
if cls._meta.errors_mapping:
cls.remap_error_fields(error, cls._meta.errors_mapping)
if error.error_dict:
raise error
@classmethod
def construct_instance(cls, instance, cleaned_data):
"""Fill instance fields with cleaned data.
The `instance` argument is either an empty instance of a already
existing one which was fetched from the database. `cleaned_data` is
data to be set in instance fields. Returns `instance` with filled
fields, but not saved to the database.
"""
from django.db import models
opts = instance._meta
for f in opts.fields:
if any(
[
not f.editable,
isinstance(f, models.AutoField),
f.name not in cleaned_data,
]
):
continue
data = cleaned_data[f.name]
if data is None:
# We want to reset the file field value when None was passed
# in the input, but `FileField.save_form_data` ignores None
# values. In that case we manually pass False which clears
# the file.
if isinstance(f, FileField):
data = False
if not f.null:
data = f._get_default()
f.save_form_data(instance, data)
return instance
@classmethod
def check_permissions(cls, context, permissions=None):
"""Determine whether user or app has rights to perform this mutation.
Default implementation assumes that account is allowed to perform any
mutation. By overriding this method or defining required permissions
in the meta-class, you can restrict access to it.
The `context` parameter is the Context instance associated with the request.
"""
permissions = permissions or cls._meta.permissions
if not permissions:
return True
user: models.User = context.user
if user.has_perms(permissions):
return True
app = getattr(context, "app", None)
if app:
# for now MANAGE_STAFF permission for app is not supported
if AccountPermissions.MANAGE_STAFF in permissions:
return False
return app.has_perms(permissions)
return False
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
try:
response = cls.perform_mutation(root, info, **data)
if response.errors is None:
response.errors = []
return response
except ValidationError as e:
return cls.handle_errors(e)
@classmethod
def perform_mutation(cls, root, info, **data):
pass
@classmethod
def handle_errors(cls, error: ValidationError, **extra):
error_list = validation_error_to_error_type(error, cls._meta.error_type_class)
return cls.handle_typed_errors(error_list, **extra)
@classmethod
def handle_typed_errors(cls, errors: list, **extra):
"""Return class instance with errors."""
if cls._meta.error_type_field is not None:
extra.update({cls._meta.error_type_field: errors})
return cls(errors=errors, **extra)
class ModelMutation(BaseMutation):
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(
cls,
arguments=None,
model=None,
exclude=None,
return_field_name=None,
_meta=None,
**options,
):
if not model:
raise ImproperlyConfigured("model is required for ModelMutation")
if not _meta:
_meta = ModelMutationOptions(cls)
if exclude is None:
exclude = []
if not return_field_name:
return_field_name = get_model_name(model)
if arguments is None:
arguments = {}
_meta.model = model
_meta.return_field_name = return_field_name
_meta.exclude = exclude
super().__init_subclass_with_meta__(_meta=_meta, **options)
model_type = cls.get_type_for_model()
if not model_type:
raise ImproperlyConfigured(
"Unable to find type for model %s in graphene registry" % model.__name__
)
fields = {return_field_name: graphene.Field(model_type)}
cls._update_mutation_arguments_and_fields(arguments=arguments, fields=fields)
@classmethod
def clean_input(cls, info, instance, data, input_cls=None):
"""Clean input data received from mutation arguments.
Fields containing IDs or lists of IDs are automatically resolved into
model instances. `instance` argument is the model instance the mutation
is operating on (before setting the input data). `input` is raw input
data the mutation receives.
Override this method to provide custom transformations of incoming
data.
"""
def is_list_of_ids(field):
if isinstance(field.type, graphene.List):
of_type = field.type.of_type
if isinstance(of_type, graphene.NonNull):
of_type = of_type.of_type
return of_type == graphene.ID
return False
def is_id_field(field):
return (
field.type == graphene.ID
or isinstance(field.type, graphene.NonNull)
and field.type.of_type == graphene.ID
)
def is_upload_field(field):
if hasattr(field.type, "of_type"):
return field.type.of_type == Upload
return field.type == Upload
if not input_cls:
input_cls = getattr(cls.Arguments, "input")
cleaned_input = {}
for field_name, field_item in input_cls._meta.fields.items():
if field_name in data:
value = data[field_name]
# handle list of IDs field
if value is not None and is_list_of_ids(field_item):
instances = (
cls.get_nodes_or_error(value, field_name) if value else []
)
cleaned_input[field_name] = instances
# handle ID field
elif value is not None and is_id_field(field_item):
instance = cls.get_node_or_error(info, value, field_name)
cleaned_input[field_name] = instance
# handle uploaded files
elif value is not None and is_upload_field(field_item):
value = info.context.FILES.get(value)
cleaned_input[field_name] = value
# handle other fields
else:
cleaned_input[field_name] = value
return cleaned_input
@classmethod
def _save_m2m(cls, info, instance, cleaned_data):
opts = instance._meta
for f in chain(opts.many_to_many, opts.private_fields):
if not hasattr(f, "save_form_data"):
continue
if f.name in cleaned_data and cleaned_data[f.name] is not None:
f.save_form_data(instance, cleaned_data[f.name])
@classmethod
def success_response(cls, instance):
"""Return a success response."""
return cls(**{cls._meta.return_field_name: instance, "errors": []})
@classmethod
def save(cls, info, instance, cleaned_input):
instance.save()
@classmethod
def get_type_for_model(cls):
return registry.get_type_for_model(cls._meta.model)
@classmethod
def get_instance(cls, info, **data):
"""Retrieve an instance from the supplied global id.
The expected graphene type can be lazy (str).
"""
object_id = data.get("id")
qs = data.get("qs")
if object_id:
model_type = cls.get_type_for_model()
instance = cls.get_node_or_error(
info, object_id, only_type=model_type, qs=qs
)
else:
instance = cls._meta.model()
return instance
@classmethod
def post_save_action(cls, info, instance, cleaned_input):
"""Perform an action after saving an object and its m2m."""
pass
@classmethod
def perform_mutation(cls, _root, info, **data):
"""Perform model mutation.
Depending on the input data, `mutate` either creates a new instance or
updates an existing one. If `id` argument is present, it is assumed
that this is an "update" mutation. Otherwise, a new instance is
created based on the model associated with this mutation.
"""
instance = cls.get_instance(info, **data)
data = data.get("input")
cleaned_input = cls.clean_input(info, instance, data)
instance = cls.construct_instance(instance, cleaned_input)
cls.clean_instance(info, instance)
cls.save(info, instance, cleaned_input)
cls._save_m2m(info, instance, cleaned_input)
cls.post_save_action(info, instance, cleaned_input)
return cls.success_response(instance)
class ModelDeleteMutation(ModelMutation):
class Meta:
abstract = True
@classmethod
def clean_instance(cls, info, instance):
"""Perform additional logic before deleting the model instance.
Override this method to raise custom validation error and abort
the deletion process.
"""
@classmethod
def perform_mutation(cls, _root, info, **data):
"""Perform a mutation that deletes a model instance."""
if not cls.check_permissions(info.context):
raise PermissionDenied()
node_id = data.get("id")
model_type = cls.get_type_for_model()
instance = cls.get_node_or_error(info, node_id, only_type=model_type)
if instance:
cls.clean_instance(info, instance)
db_id = instance.id
instance.delete()
# After the instance is deleted, set its ID to the original database's
# ID so that the success response contains ID of the deleted object.
instance.id = db_id
return cls.success_response(instance)
class BaseBulkMutation(BaseMutation):
count = graphene.Int(
required=True, description="Returns how many objects were affected."
)
class Meta:
abstract = True
@classmethod
def __init_subclass_with_meta__(cls, model=None, _meta=None, **kwargs):
if not model:
raise ImproperlyConfigured("model is required for bulk mutation")
if not _meta:
_meta = ModelMutationOptions(cls)
_meta.model = model
super().__init_subclass_with_meta__(_meta=_meta, **kwargs)
@classmethod
def clean_instance(cls, info, instance):
"""Perform additional logic.
Override this method to raise custom validation error and prevent
bulk action on the instance.
"""
@classmethod
def bulk_action(cls, info, queryset, **kwargs):
"""Implement action performed on queryset."""
raise NotImplementedError
@classmethod
def perform_mutation(cls, _root, info, ids, **data):
"""Perform a mutation that deletes a list of model instances."""
clean_instance_ids, errors = [], {}
# Allow to pass empty list for dummy mutation
if not ids:
return 0, errors
instance_model = cls._meta.model
model_type = registry.get_type_for_model(instance_model)
try:
instances = cls.get_nodes_or_error(ids, "id", model_type)
except ValidationError as error:
return 0, error
for instance, node_id in zip(instances, ids):
instance_errors = []
# catch individual validation errors to raise them later as
# a single error
try:
cls.clean_instance(info, instance)
except ValidationError as e:
msg = ". ".join(e.messages)
instance_errors.append(msg)
if not instance_errors:
clean_instance_ids.append(instance.pk)
else:
instance_errors_msg = ". ".join(instance_errors)
ValidationError({node_id: instance_errors_msg}).update_error_dict(
errors
)
if errors:
errors = ValidationError(errors)
count = len(clean_instance_ids)
if count:
qs = instance_model.objects.filter(pk__in=clean_instance_ids)
cls.bulk_action(info=info, queryset=qs, **data)
return count, errors
@classmethod
def mutate(cls, root, info, **data):
if not cls.check_permissions(info.context):
raise PermissionDenied()
count, errors = cls.perform_mutation(root, info, **data)
if errors:
return cls.handle_errors(errors, count=count)
return cls(errors=errors, count=count)
class ModelBulkDeleteMutation(BaseBulkMutation):
class Meta:
abstract = True
@classmethod
def bulk_action(cls, info, queryset):
queryset.delete()
class FileUpload(BaseMutation):
uploaded_file = graphene.Field(File)
class Arguments:
file = Upload(
required=True, description="Represents a file in a multipart request."
)
class Meta:
description = (
"Upload a file. This mutation must be sent as a `multipart` "
"request. More detailed specs of the upload format can be found here: "
"https://github.com/jaydenseric/graphql-multipart-request-spec"
)
error_type_class = UploadError
error_type_field = "upload_errors"
@classmethod
@staff_member_or_app_required
def perform_mutation(cls, _root, info, **data):
file_data = info.context.FILES.get(data["file"])
# add unique text fragment to the file name to prevent file overriding
file_name, format = os.path.splitext(file_data._name)
hash = secrets.token_hex(nbytes=4)
new_name = f"file_upload/{file_name}_{hash}{format}"
path = default_storage.save(new_name, file_data.file)
return FileUpload(
uploaded_file=File(url=path, content_type=file_data.content_type)
)
|
the-stack_0_10574 | import librosa
import numpy as np
from .utils import Util
from sklearn.preprocessing import StandardScaler
import pandas as pd
from numpy.linalg import det
from multiprocessing.dummy import Pool as ThreadPool
#from multiprocessing import Pool
from concurrent.futures import ThreadPoolExecutor
class FeatureAggregator(object):
def __init__(self, data, parallel=False):
self.data = data
self.parallel = parallel
def get_features(self, feature_to_extract=None):
generators = [MFCC, ZeroCrossing, CentroidsRolloff, Rhythm, Energy]
MFCC_names = ['MFCC_' + str(i) for i in range(1,21)] + ['det_cov_MFCC']
statistics = ['median', 'mean', 'max', 'min', 'std']
ZRC_names = ['ZRC_' + statistic for statistic in statistics]
Centroids_names = ['Centroids_' + statistic for statistic in statistics]
Rolloff_names = ['Rolloff_' + statistic for statistic in statistics]
Rhythm_names = ['tempo'] + ['AutoCorr_mean'] + ['AutoCorr_std'] +\
['Tempogram_' + statistic for statistic in statistics]
Energy_names = ['Chroma_' + str(i) for i in range(1,13)] + ['Low_energy'] +\
['Flux' + statistic for statistic in statistics]
if self.parallel:
with ThreadPoolExecutor(len(generators)) as pool:
parallel_results = pool.map(self.generate, generators)
result = np.hstack(parallel_results)
else:
result = [np.hstack([generator(self.data).generate()
for generator in generators])]
return (result, MFCC_names + ZRC_names + Centroids_names +
Rolloff_names + Rhythm_names + Energy_names)
def generate(self, generator):
result = generator(self.data).generate()
print("Done ", generator)
return result
class FeatureExtractor(object):
def __init__(self, data, verbose=True, sr=22050):
self.data = data
self.util = Util()
self.verbose = verbose
self.sr = sr
def generate(self):
raise NotImplementedError()
class MFCC(FeatureExtractor):
def __init__(self, data, sr=22050, n_mfcc=20):
self.n_mfcc = n_mfcc
super().__init__(data, sr)
def generate(self):
mfcc_means = np.empty((len(self.data), self.n_mfcc + 1))
# Generate mfcc means matrix MxN_MFCC
for i, song in enumerate(self.data):
if self.verbose and i % 100 == 0:
print("Got mfcc for {0} songs".format(i))
mfcc = librosa.feature.mfcc(song, sr=self.sr, n_mfcc=self.n_mfcc)
#mfcc_scaled = self.std_scaler.fit_transform(mfcc)
mfcc_mean = mfcc.mean(axis=1)
mfcc_means[i] = np.append(mfcc_mean, det(
np.cov(mfcc, rowvar=True)))
return mfcc_means
class ZeroCrossing(FeatureExtractor):
def generate(self):
zero_crossing_rates = np.empty((len(self.data), 5))
for i, song in enumerate(self.data):
if self.verbose and i % 100 == 0:
print("Got zero_cross_rate for {0} songs".format(i))
zcr = librosa.feature.zero_crossing_rate(song)
features = self.util.vector_to_features(zcr)
zero_crossing_rates[i] = features
return zero_crossing_rates
class CentroidsRolloff(FeatureExtractor):
def generate(self):
""" Generate centroids, rolloff features """
centroid_meanstd = np.empty((len(self.data), 5))
rolloff_meanstd = np.empty((len(self.data), 5))
for i, song in enumerate(self.data):
if self.verbose and i % 100 == 0:
print("Got centroid data for {0} songs".format(i))
cent = librosa.feature.spectral_centroid(y=song, sr=self.sr)
centroid_features = self.util.vector_to_features(cent)
centroid_meanstd[i] = centroid_features
rolloff = librosa.feature.spectral_rolloff(
y=song, sr=self.sr, roll_percent=0.85)[0]
rolloff_features = self.util.vector_to_features(rolloff)
rolloff_meanstd[i, :] = rolloff_features
result = np.hstack([centroid_meanstd, rolloff_meanstd])
return result
class Rhythm(FeatureExtractor):
def generate(self):
rhythm_bpm = np.empty((len(self.data), 8))
for i, song in enumerate(self.data):
if self.verbose and i % 100 == 0:
print("Got rhythm data for {0} songs".format(i))
oenv = librosa.onset.onset_strength(y=song, sr=self.sr)
# tempo = librosa.beat.tempo(onset_envelope=onset_env, sr=self.sr)
# dtempo = librosa.beat.tempo(
# onset_envelope=onset_env, sr=self.sr, aggregate=None)
tempogram = librosa.feature.tempogram(
onset_envelope=oenv, sr=self.sr)
tempogram_features = self.util.vector_to_features(tempogram)
ac_global = librosa.autocorrelate(
oenv, max_size=tempogram.shape[0])
ac_global = librosa.util.normalize(ac_global)
tempo = librosa.beat.tempo(onset_envelope=oenv, sr=self.sr)
rhythm_bpm[i] = np.hstack([tempo, ac_global.mean(), ac_global.std(), tempogram_features])
return rhythm_bpm
class Energy(FeatureExtractor):
def generate(self):
flux_meanstd = np.empty((len(self.data), 5))
low_energy = np.empty((len(self.data), 1))
chroma_feature = np.empty((len(self.data), 12))
for i, song in enumerate(self.data):
if self.verbose and i % 100 == 0:
print("Got rmse data for {0} songs".format(i))
rmse = librosa.feature.rmse(y=song)
mean_rmse = rmse.mean()
low_energy[i] = np.sum(rmse <= mean_rmse) / len(rmse)
max_rmse = rmse.max()
rmse = np.where(rmse == 0, 1, rmse)
D = librosa.stft(y=song) / rmse
flux = np.sqrt((np.abs((D[:, 1:] - D[:, :-1]))**2).sum(axis=0))
flux_features = self.util.vector_to_features(flux)
flux_meanstd[i] = flux_features
chroma = librosa.feature.chroma_cens(y=song, sr=self.sr)
chroma_feature[i] = chroma.mean(axis=1)
result = np.hstack([chroma_feature, low_energy, flux_meanstd])
return result |
the-stack_0_10575 | ##################################################################################
# name: classifier
# file: sort.py
# date: 11-05-2021
# author: @nilspinnau, Nils Pinnau
# description: file to split the data into training + validating and testing set
##################################################################################
import os
import argparse
import csv
import random
import os
parser = argparse.ArgumentParser(
description="Sorting class folders for training + validating + testing")
parser.add_argument("--set", help="set to sort")
parser.add_argument(
"--dir", help="root dir where char images are located")
parser.add_argument(
"--val_size", help="size in decimal (1.0 - 0.0) of val set")
parser.add_argument(
"--max_size", type=int, help="max size of elements in each set")
parser.add_argument(
"--remove", action="store_true", help="remove wrong images")
def main():
args = parser.parse_args()
class_set = "set"
if args.set:
class_set = args.set
root_dir = "./"
if args.dir:
root_dir = args.dir
if args.remove:
remove(root_dir)
return
val_size = 0.2
if args.val_size:
val_size = args.val_size
labels = os.listdir(os.path.join(root_dir, class_set))
val_csv = open(os.path.join(root_dir, class_set, "val.csv"), "w")
train_csv = open(os.path.join(root_dir, class_set, "train.csv"), "w")
test_csv = open(os.path.join(root_dir, class_set, "test.csv"), "w")
test_csv = csv.writer(test_csv)
val_csv = csv.writer(val_csv)
train_csv = csv.writer(train_csv)
# high performance gain by only iterating once over the images and not 4 times ?!
for i in labels:
# separate the images
images = [os.path.relpath(os.path.join(root_dir, class_set, str(i), p)) for p in os.listdir(os.path.join(
root_dir, class_set, str(i))) if p.endswith(('jpg', 'png'))]
# randomize the images for validation and trainset
j = 0
to_test = args.max_size/len(images)
for img in images:
if (j < 0.1 * len(images)):
test_csv.writerow([img, str(i)])
elif (random.random() > to_test):
test_csv.writerow([img, str(i)])
continue
j += 1
if random.random() < float(val_size):
val_csv.writerow([img, str(i)])
else:
train_csv.writerow([img, str(i)])
return 0
if __name__ == "__main__":
main()
|
the-stack_0_10577 | from minqlx import Plugin, thread, next_frame
import time
import random
import threading
class thirtysecwarn(Plugin):
"""Created by Thomas Jones on 01/09/2016 - [email protected]
thirtysecwarn.py - a minqlx plugin to play unused VO when a CA game is nearing the round time limit.
This plugin is released to everyone, for any purpose. It comes with no warranty, no guarantee it works, it's
released AS IS.
You can modify everything, except for lines 1-4 and the !tomtec_versions code. They're there to indicate I whacked this
together originally. Please make it better :D
Completely rebuild by iouonegirl and Gelenkbusfahrer on 25/09/2017, customization of sounds and unit tests added by
ShiN0 somewhen in October 2017
"""
def __init__(self):
super().__init__()
self.add_hook("round_start", self.handle_round_start)
self.add_hook("round_end", self.handle_round_end)
self.add_hook("game_start", self.handle_game_start)
self.set_cvar_once("qlx_thirtySecondWarnAnnouncer", "standard")
self.announcerMap = {
"standard": "sound/vo/30_second_warning.ogg",
"female": "sound/vo_female/30_second_warning.ogg",
"evil": "sound/vo_evil/30_second_warning.ogg"
}
self.warner_thread_name = None
def handle_game_start(self, game):
self.warner_thread_name = None
def handle_round_end(self, data):
self.warner_thread_name = None
def handle_round_start(self, round_number):
self.warntimer()
@thread
def warntimer(self):
warner_thread_name = threading.current_thread().name
self.warner_thread_name = warner_thread_name
timer_delay = self.get_cvar("roundtimelimit", int) - 30
time.sleep(timer_delay)
self.play_thirty_second_warning(warner_thread_name)
@next_frame
def play_thirty_second_warning(self, warner_thread_name):
if not self.game:
return
if not self.game.type_short == "ca":
return
if not self.game.state == "in_progress":
return
if not self.warner_thread_name == warner_thread_name:
return
# passed all conditions, play sound
Plugin.play_sound(self.get_announcer_sound())
def get_announcer_sound(self):
qlx_thirtySecondWarnAnnouncer = self.get_cvar("qlx_thirtySecondWarnAnnouncer")
if qlx_thirtySecondWarnAnnouncer == "random":
return self.random_announcer()
if qlx_thirtySecondWarnAnnouncer not in self.announcerMap:
qlx_thirtySecondWarnAnnouncer = "standard"
return self.announcerMap[qlx_thirtySecondWarnAnnouncer]
def random_announcer(self):
key, sound = random.choice(list(self.announcerMap.items()))
return sound
|
the-stack_0_10578 | from appdirs import user_config_dir, site_config_dir, user_cache_dir
import os
import platform
APP_NAME = "xicam"
APP_AUTHOR = "CAMERA"
user_cache_dir = user_cache_dir(appname=APP_NAME, appauthor=APP_AUTHOR)
site_config_dir = site_config_dir(appname=APP_NAME, appauthor=APP_AUTHOR)
user_config_dir = user_config_dir(appname=APP_NAME, appauthor=APP_AUTHOR)
user_dev_dir = os.path.expanduser("~/Xi-cam/plugins")
op_sys = platform.system()
if op_sys == "Darwin": # User config dir incompatible with venv on darwin (space in path name conflicts)
user_plugin_dir = os.path.join(user_cache_dir, "plugins")
else:
user_plugin_dir = os.path.join(user_config_dir, "plugins")
site_plugin_dir = os.path.join(site_config_dir, "plugins")
def init_dir(path):
try:
os.makedirs(path)
except FileExistsError:
pass
def init_dirs():
for path in [user_cache_dir, user_config_dir, user_plugin_dir, user_dev_dir]:
init_dir(path)
|
the-stack_0_10579 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, import-self, len-as-condition, unused-argument, too-many-lines
# pylint: disable=import-outside-toplevel
"""ONNX: Open Neural Network Exchange frontend for Relay."""
import numpy as np
import tvm
from tvm.ir import IRModule
from ... import nd as _nd
from .. import analysis
from .. import expr as _expr
from .. import function as _function
from .. import op as _op
from .. import vision as _vision
from .common import AttrCvt, Renamer
from .common import get_relay_op, new_var, infer_shape, infer_channels
from .common import infer_type, infer_value, infer_value_simulated, get_name
__all__ = ['from_onnx']
class onnx_input():
""" Dual purpose list or dictionary access object."""
def __init__(self):
self.input_keys = []
self.input_dict = {}
def __getitem__(self, item):
if isinstance(item, int):
return self.input_dict[self.input_keys[item]]
if isinstance(item, str):
if item not in self.input_keys:
return None
return self.input_dict[item]
if isinstance(item, slice):
keys = self.input_keys[item]
return [self.input_dict[key] for key in keys]
raise ValueError("Only integer, string, and slice accesses allowed.")
def __setitem__(self, item, value):
if isinstance(item, int):
self.input_dict[self.input_keys[item]] = value
elif isinstance(item, str):
self.input_keys.append(item)
self.input_dict[item] = value
else:
raise ValueError("Only integer and string indexed writes allowed.")
def keys(self):
return self.input_keys
def __len__(self):
return len(self.input_keys)
def __iter__(self):
self.n = 0
return self
def __next__(self):
if self.n < len(self.input_keys):
output = self.input_dict[self.input_keys[self.n]]
self.n += 1
return output
raise StopIteration
def get_numpy(tensor_proto):
"""Grab data in TensorProto and convert to numpy array."""
try:
from onnx.numpy_helper import to_array
except ImportError as e:
raise ImportError(
"Unable to import onnx which is required {}".format(e))
return to_array(tensor_proto)
def dimension_picker(prefix, suffix=''):
"""Check that dimensions are supported."""
def _impl(attr):
kernel = attr['kernel_shape']
if len(kernel) == 1:
return prefix + '1d' + suffix
if len(kernel) == 2:
return prefix + '2d' + suffix
if len(kernel) == 3:
return prefix + '3d' + suffix
msg = 'Only 1D, 2D, and 3D kernels are supported for operator {}.'
op_name = prefix + '1d/2d/3d'
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
return _impl
def revert_caffe2_pad(pads):
"""Caffe2 requires two times the normal padding."""
if len(pads) == 4:
pads = pads[:2]
elif len(pads) == 2:
pass
else:
raise tvm.error.OpAttributeInvalid(
'Number of pads must be either 2 or 4.')
return pads
def get_pad_pair(input1d, kernel1d, stride1d):
"""infer pad size"""
if input1d % stride1d == 0:
pad = max(kernel1d - stride1d, 0)
else:
pad = max(kernel1d - (input1d % stride1d), 0)
pad_before = pad // 2
pad_after = pad - pad_before
return [pad_before, pad_after]
def onnx_default_layout(dims):
if dims == 1:
return 'NCW'
if dims == 2:
return 'NCHW'
if dims == 3:
return 'NCDHW'
msg = "Only 1D, 2D and 3D layouts are currently supported"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
def onnx_storage_order2layout(storage_order, dims=2):
"""converter of onnx storage order parameter to tvm storage order format"""
if storage_order not in (0, 1):
raise tvm.error.OpAttributeInvalid('Mode of storage_order must be either 0 or 1')
if dims == 1:
return 'NCW' if storage_order == 0 else 'NWC'
if dims == 2:
return 'NCHW' if storage_order == 0 else 'NHWC'
if dims == 3:
return 'NCDHW' if storage_order == 0 else 'NDHWC'
msg = "Only 1D, 2D and 3D layouts are currently supported"
raise tvm.error.OpAttributeInvalid(msg.format(op_name))
def dimension_constraint():
def _dim_check(attrs):
if len(attrs['kernel_shape']) in [1, 2, 3]:
return True
return False
return _dim_check, "Only 1d, 2d and 3d kernel supported."
class OnnxOpConverter(object):
""" A helper class for holding onnx op converters.
"""
@classmethod
def get_converter(cls, opset):
""" Get converter matches given opset.
Parameters
----------
opset: int
opset from model.
Returns
-------
converter, which should be `_impl_vx`. Number x is the biggest
number smaller than or equal to opset belongs to all support versions.
"""
versions = [
int(d.replace('_impl_v', '')) for d in dir(cls) if '_impl_v' in d
]
versions = sorted(versions + [opset])
version = versions[
max([i for i, v in enumerate(versions) if v == opset]) - 1]
if hasattr(cls, '_impl_v{}'.format(version)):
return getattr(cls, '_impl_v{}'.format(version))
raise NotImplementedError(
'opset version {} of {} not implemented'.format(
version, cls.__name__))
class Unary(OnnxOpConverter):
""" A helper class for unary op converters.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 1, "Unary math op {} takes 1 input, {} given".format(
cls.name, len(inputs))
op_name = cls.name
return get_relay_op(op_name)(*inputs)
class Elemwise(OnnxOpConverter):
""" A helper class for elemwise op converters.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Math op {} take 2 inputs, {} given".format(
cls.name, len(inputs))
op_name = cls.name
conv_ops = ["conv2d", "conv2d_transpose"]
if attr.get('broadcast', 0) and any(x in str(inputs[0]) for x in conv_ops):
# TODO(zhreshold): remove hard coded infershape
axis = int(attr.get('axis', 0))
inputs[1] = _op.expand_dims(inputs[1], axis=axis, num_newaxis=2)
return get_relay_op(op_name)(*inputs)
class Pool(OnnxOpConverter):
""" A helper class for pool op converters.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
input_shape = infer_shape(inputs[0])
if 'auto_pad' in attr:
attr['auto_pad'] = attr['auto_pad'].decode('utf-8')
if attr['auto_pad'] in ('SAME_UPPER', 'SAME_LOWER'):
pad_tuple = []
for axis in range(len(input_shape) - 2):
axis_shape = input_shape[2 + axis]
stride = attr['strides'][axis]
kernel = attr['kernel_shape'][axis]
pad = get_pad_pair(axis_shape, kernel, stride)
pad_tuple.append(pad)
pad_tuple = tuple([val for pair in zip(*pad_tuple) for val in pair])
attr['pads'] = pad_tuple
elif attr['auto_pad'] == 'VALID':
attr['pads'] = 0
elif attr['auto_pad'] == 'NOTSET':
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator {} is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr['auto_pad'], cls.name))
attr.pop("auto_pad")
if 'storage_order' in attr:
attr['layout'] = onnx_storage_order2layout(attr['storage_order'],
dims=(len(input_shape) - 2))
else:
attr['layout'] = onnx_default_layout(dims=(len(input_shape) - 2))
return AttrCvt(
op_name=dimension_picker(cls.name),
transforms={
'kernel_shape': 'pool_size',
'pads': ('padding', 0)
},
ignores=['dilations'],
custom_check=dimension_constraint())(inputs, attr, params)
class Absolute(Unary):
""" Operator converter for Absolute.
"""
name = 'abs'
class Add(Elemwise):
""" Operator converter for Add.
"""
name = 'add'
class AveragePool(Pool):
""" Operator converter for AveragePool.
"""
name = 'avg_pool'
class BatchNorm(OnnxOpConverter):
""" Operator converter for BatchNorm.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# TODO(zhreshold): 'spatial' is not properly handled here.
out = AttrCvt(
op_name='batch_norm',
ignores=['spatial', 'is_test', 'consumed_inputs', 'momentum'])(inputs, attr,
params)
return out[0]
class InstanceNorm(OnnxOpConverter):
""" Operator converter for BatchNorm.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name='instance_norm')(inputs, attr, params)
class Conv(OnnxOpConverter):
""" Operator converter for Conv.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Use shape of input to determine convolution type.
input_shape = infer_shape(inputs[0])
if 'auto_pad' in attr:
attr['auto_pad'] = attr['auto_pad'].decode('utf-8')
if attr['auto_pad'] in ('SAME_UPPER', 'SAME_LOWER'):
pad_tuple = []
for axis in range(len(input_shape) - 2):
axis_shape = input_shape[2 + axis]
stride = attr['strides'][axis]
kernel = attr['kernel_shape'][axis]
dilation = attr['dilations'][axis]
dilated_kernel = (kernel - 1) * dilation + 1
pad = get_pad_pair(axis_shape, dilated_kernel, stride)
pad_tuple.append(pad)
pad_tuple = tuple([val for pair in zip(*pad_tuple) for val in pair])
attr['pads'] = pad_tuple
elif attr['auto_pad'] == 'VALID':
attr['pads'] = tuple([0 for i in range(len(input_shape) - 2)])
elif attr['auto_pad'] == 'NOTSET':
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr['auto_pad']))
attr.pop('auto_pad')
elif len(attr['kernel_shape']) == 2:
sym_pad = True
if 'pads' in attr:
padding = attr['pads']
else:
padding = [0, 0, 0, 0]
for i in range(0, len(padding), 2):
sym_pad = sym_pad and padding[i] == padding[i + 1]
if sym_pad:
attr['pads'] = padding[0::2]
out = AttrCvt(
op_name=dimension_picker('conv'),
transforms={
'kernel_shape': 'kernel_size',
'dilations': ('dilation', 1),
'pads': ('padding', 0),
'group': ('groups', 1)
},
custom_check=dimension_constraint())(inputs[:2], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class ConvTranspose(OnnxOpConverter):
""" Operator converter for ConvTranspose.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], True)
attr['channels'] = channels
groups = attr.pop('group')
attr['groups'] = groups
# infer pads for auto_pad
if 'auto_pad' in attr:
attr['auto_pad'] = attr['auto_pad'].decode('utf-8')
if attr['auto_pad'] in ('SAME_UPPER', 'SAME_LOWER'):
input_shape = infer_shape(inputs[0])
in_h, in_w = input_shape[2], input_shape[3]
stride_h, stride_w = attr['strides']
kernel_h, kernel_w = attr['kernel_shape']
dilation_h, dilation_w = attr['dilations']
dilated_kernel_h = (kernel_h - 1) * dilation_h + 1
dilated_kernel_w = (kernel_w - 1) * dilation_w + 1
pad_v = get_pad_pair(in_h, dilated_kernel_h, stride_h)
pad_h = get_pad_pair(in_w, dilated_kernel_w, stride_w)
attr['pads'] = (pad_v[0], pad_h[0], pad_v[1], pad_h[1])
elif attr['auto_pad'] == 'VALID':
attr['pads'] = (0, 0)
elif attr['auto_pad'] == 'NOTSET':
pass
else:
msg = 'Value {} in attribute "auto_pad" of operator Conv is invalid.'
raise tvm.error.OpAttributeInvalid(msg.format(attr['auto_pad']))
attr.pop('auto_pad')
out = AttrCvt(
op_name=dimension_picker('conv', '_transpose'),
transforms={
'kernel_shape': 'kernel_size',
'dilations': ('dilation', (0, 0)),
'pads': ('padding', (0, 0), revert_caffe2_pad)
},
disables=['output_shape'],
custom_check=dimension_constraint())(inputs[:2], attr, params)
use_bias = len(inputs) == 3
if use_bias:
out = _op.nn.bias_add(out, inputs[2])
return out
class Div(Elemwise):
""" Operator converter for Divide.
"""
name = 'divide'
class Elu(OnnxOpConverter):
""" Operator converter for Elu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
return _expr.const(-alpha) * _op.nn.relu(_expr.const(1.) - _op.exp(inputs[0])) + \
_op.nn.relu(inputs[0])
class Gemm(OnnxOpConverter):
""" Operator converter for Gemm.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 3, "Gemm op take 3 inputs, {} given".format(
len(inputs))
# Y = alpha * A * B + beta * C
alpha = float(attr.get('alpha', 1.0))
beta = float(attr.get('beta', 1.0))
transA = int(attr.get('transA', 0))
transB = int(attr.get('transB', 0))
# get number of channels
channels = infer_channels(inputs[1], not transB)
if transA:
inputs[0] = _op.transpose(inputs[0], axes=(1, 0))
if not transB:
inputs[1] = _op.transpose(inputs[1], axes=(1, 0))
inputs[0] = _op.nn.batch_flatten(inputs[0])
out = _op.nn.dense(_expr.const(alpha) * inputs[0],
inputs[1], units=channels)
return _op.nn.bias_add(out, _expr.const(beta) * inputs[2])
class MatMul(OnnxOpConverter):
""" Operator converter for MatMul.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "MatMul op take 2 inputs, {} given".format(len(inputs))
# Need to check input shape as batch matmul must be supported.
a_shape = infer_shape(inputs[0])
# When performing a batch matmul, we need to properly handle N-dim shapes.
if len(a_shape) > 2:
b_shape = infer_shape(inputs[1])
# Convert a and b into 3 dimensional tensors.
a = _op.reshape(inputs[0], [-1, a_shape[-2], a_shape[-1]])
b = _op.reshape(inputs[1], [-1, b_shape[-2], b_shape[-1]])
# Broadcast b to match batch size of a
new_b_shape = list(infer_shape(b))
new_a_shape = infer_shape(a)
if new_a_shape[0] > new_b_shape[0]:
new_b_shape[0] = new_a_shape[0]
b = _op.broadcast_to(b, new_b_shape)
# Transpose matrix dimensions of b.
b = _op.transpose(b, [0, 2, 1])
# Perform a batch matmul.
output = _op.nn.batch_matmul(a, b)
# Reshape output to original dimensions.
return _op.reshape(output, [*a_shape[:-2], a_shape[-2], b_shape[-1]])
# Otherwise a simple dense op will get the job done.
input_1_t = _op.transpose(inputs[1], axes=(1, 0))
return _op.nn.dense(inputs[0], input_1_t)
class MaxPool(Pool):
""" Operator converter for MaxPool
"""
name = 'max_pool'
class Mul(Elemwise):
""" Operator converter for Multiply.
"""
name = 'multiply'
class Pad(OnnxOpConverter):
""" Operator converter for Pad.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
pad_width = []
pads = attr.pop('paddings')
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i+dims]))
attr['pad_width'] = pad_width
pad_mode = attr.get('mode', b'constant').decode('utf-8')
if pad_mode in ['constant', 'edge', 'reflect']:
attr['pad_mode'] = pad_mode
attr.pop('mode', None)
else:
raise tvm.error.OpAttributeInvalid(
'Value ' + pad_mode + ' in attribute "mode" is invalid for operator Pad.')
return AttrCvt(
_op.nn.pad,
transforms={
'value': 'pad_value',
},
)(inputs, attr, params)
@classmethod
def _impl_v2(cls, inputs, attr, params):
pad_width = []
pads = attr.pop('pads')
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i+dims]))
attr['pad_width'] = pad_width
pad_mode = attr.get('mode', b'constant').decode('utf-8')
if pad_mode in ['constant', 'edge', 'reflect']:
attr['pad_mode'] = pad_mode
attr.pop('mode', None)
else:
raise tvm.error.OpAttributeInvalid(
'Value ' + pad_mode + ' in attribute "mode" is invalid for operator Pad.')
return AttrCvt(
'pad',
transforms={
'value': 'pad_value',
},
)(inputs, attr, params)
@classmethod
def _impl_v11(cls, inputs, attr, params):
pad_width = []
pads = infer_value_simulated(inputs[1], params).asnumpy()
if len(inputs) == 3:
value = infer_value_simulated(inputs[2], params).asnumpy().item()
else:
value = 0
attr["pad_value"] = value
dims = int(len(pads) / 2)
for i in range(dims):
pad_width.append((pads[i], pads[i+dims]))
attr['pad_width'] = pad_width
pad_mode = attr.get('mode', b'constant').decode('utf-8')
if pad_mode in ['constant', 'edge', 'reflect']:
attr['pad_mode'] = pad_mode
attr.pop('mode', None)
else:
raise tvm.error.OpAttributeInvalid(
'Value ' + pad_mode + ' in attribute "mode" is invalid for operator Pad.')
return AttrCvt('pad')(inputs[:1], attr, params)
class ParametricSoftPlus(OnnxOpConverter):
""" Operator converter for ParametricSoftPlus.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(float(attr.get('alpha', 1.0)))
beta = _expr.const(float(attr.get('beta', 1.0)))
return _op.log(_op.exp(beta * inputs[0]) + _expr.const(1.)) * alpha
class Prelu(OnnxOpConverter):
""" Operator converter for Prelu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
assert len(inputs) == 2, "Prelu need 2 inputs, {} given".format(len(inputs))
alpha_shape = infer_shape(inputs[1])
if len(alpha_shape) != 1:
alpha = _op.reshape(inputs[1], (-1,))
else:
alpha = inputs[1]
return _op.nn.prelu(inputs[0], alpha)
class Reciprocal(OnnxOpConverter):
""" Operator converter for Reciprocal.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _expr.const(1.0) / inputs[0]
class Flatten(OnnxOpConverter):
""" Operator converter for Flatten.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 1)
if axis == 1:
out = _op.nn.batch_flatten(inputs[0])
else:
newshape = [0] * (axis + 1)
newshape[axis] = -1
out = _op.reshape(inputs[0], list(newshape))
return out
class Reshape(OnnxOpConverter):
""" Operator converter for Reshape.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.reshape(inputs[0], attr['shape'])
@classmethod
def _impl_v5(cls, inputs, attr, params):
if get_name(inputs[1]) in params:
# pop shape out of parameters since it wont be needed later.
shape = tuple(params.pop(inputs[1].name_hint).asnumpy().astype("int32"))
out = _op.reshape(inputs[0], shape)
else:
data, shape = inputs
static_shape = infer_value_simulated(shape, params)
out = _op.reshape(data, newshape=tuple(
static_shape.asnumpy().astype('int32')))
return out
class DepthToSpace(OnnxOpConverter):
""" Operator converter for DepthToSpace.
"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
block_size = int(attr['blocksize'])
mode = attr.get('mode', b'DCR').decode('utf-8')
return _op.nn.depth_to_space(inputs[0], block_size, mode=mode)
class SpaceToDepth(OnnxOpConverter):
""" Operator converter for SpaceToDepth.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
block_size = int(attr['blocksize'])
return _op.nn.space_to_depth(inputs[0], block_size)
class Concat(OnnxOpConverter):
""" Operator converter for Concat.
"""
@classmethod
def _impl_v1(cls, inputs, args, params):
return AttrCvt(op_name='concatenate')((inputs,), args)
class Scale(OnnxOpConverter):
""" Operator converter for Scale.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
scale = float(attr.get('scale', 1.0))
return inputs[0] * _expr.const(scale)
class Selu(OnnxOpConverter):
""" Operator converter for Selu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.6732))
gamma = float(attr.get('gamma', 1.0507))
return _expr.const(gamma) * (_expr.const(-alpha) *
_op.nn.relu(_expr.const(1.) - _op.exp(inputs[0])) +
_op.nn.relu(inputs[0]))
class ScaledTanh(OnnxOpConverter):
""" Operator converter for ScaledTanh.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
beta = float(attr.get('beta', 1.0))
return _op.tanh(_expr.const(beta) * inputs[0]) * _expr.const(alpha)
class SoftPlus(OnnxOpConverter):
""" Operator converter for SoftPlus.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.log(_op.exp(inputs[0]) + _expr.const(1.))
class Softsign(OnnxOpConverter):
""" Operator converter for Softsign.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return inputs[0] / (_expr.const(1.) + Absolute.get_converter(1)(inputs, attr, params))
class Sub(Elemwise):
""" Operator converter for Subtract.
"""
name = 'subtract'
class Sum(OnnxOpConverter):
""" Operator converter for Sum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# Onnx Sum Operator
for in_index in range(len(inputs) - 1):
inputs[in_index + 1] = _op.add(inputs[in_index], inputs[in_index + 1])
return inputs[len(inputs) - 1]
class Affine(OnnxOpConverter):
""" Operator converter for Affine transformation.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = _expr.const(attr.get('alpha', 1.0))
beta = _expr.const(attr.get('beta', 0.0))
return (alpha * inputs[0]) + beta
class ThresholdedRelu(OnnxOpConverter):
""" Operator converter for ThresholdedRelu.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = float(attr.get('alpha', 1.0))
alpha_tensor = _op.full_like(inputs[0], fill_value=_expr.const(alpha))
mask = _op.greater(inputs[0], alpha_tensor).astype("float32")
return inputs[0] * mask
def _broadcast_constraint():
def _broadcast_check(attrs):
if attrs.get('axis', None):
return False
return True
return _broadcast_check, "Specifying broadcast axis not allowed."
def _fully_connected(opset):
def _impl(inputs, attr, params):
# get number of channels
channels = infer_channels(inputs[1], params)
attr['units'] = channels
return AttrCvt('dense', ignores=['axis', 'axis_w'])(inputs, attr)
return _impl
class Upsample(OnnxOpConverter):
""" Operator converter for Upsample (nearest mode).
"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
scales = attr.get('scales')
if not scales:
#Here we are going to higher OPSET version.
assert len(inputs) == 2, "Upsample op take 2 inputs, {} given".format(len(inputs))
if get_name(inputs[1]) in params:
scales = params[inputs[1].name_hint].asnumpy()
else:
scales = infer_value_simulated(inputs[1], params).asnumpy()
inputs = inputs[:1]
assert scales[0] == 1.0 and scales[1] == 1.0
input_shape = infer_shape(inputs[0])
dims = len(input_shape)
mode = attr.get('mode')
if mode == b'nearest':
method = "nearest_neighbor"
elif mode == b'linear':
method = "trilinear" if dims == 5 else "bilinear"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Upsample is not valid.'.format(mode))
attr = {'scale_h': scales[-2],
'scale_w': scales[-1],
'method': method}
if dims == 5:
assert len(scales) == 5
attr['scale_d'] = scales[-3]
attr['layout'] = 'NCDHW'
op_name = 'upsampling3d'
else:
assert len(scales) == 4
attr['layout'] = 'NCHW'
attr['align_corners'] = True
op_name = 'upsampling'
return AttrCvt(op_name)(inputs, attr)
class Shape(OnnxOpConverter):
""" Operator converter for Shape.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.shape_of(inputs[0], "int64")
class Cast(OnnxOpConverter):
""" Operator converter for Cast.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
@classmethod
def _impl_v5(cls, inputs, attr, params):
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
attr['to'] = str(TENSOR_TYPE_TO_NP_TYPE[attr['to']])
except ImportError as e:
raise ImportError(
"Unable to import onnx.mapping which is required {}".format(e))
return AttrCvt(op_name='cast', transforms={'to': 'dtype'})(inputs, attr)
class Unsqueeze(OnnxOpConverter):
""" Operator converter for Unsqueeze.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
for axes in attr['axes']:
inputs[0] = _op.expand_dims(inputs[0], axis=axes, num_newaxis=1)
return inputs[0]
class Split(OnnxOpConverter):
""" Operator converter for Split.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
splits = attr.get('split', False)
if splits:
attr['indices_or_sections'] = []
index = 0
for i in splits[:-1]:
index += i
attr['indices_or_sections'].append(index)
# When splits isnt specified divide evenly over axis.
else:
in_shape = infer_shape(inputs[0])
attr['indices_or_sections'] = in_shape[attr['axis']]
return AttrCvt(
'split',
ignores=['split'])(inputs, attr, params)
class Slice(OnnxOpConverter):
""" Operator converter for Slice.
"""
@classmethod
def _common(cls, starts, ends, axes):
new_axes = []
new_starts = []
new_ends = []
pop_index = 0
for i in range(max(axes) + 1):
if i in axes:
new_axes.append(i)
new_starts.append(starts[pop_index])
new_ends.append(ends[pop_index])
pop_index += 1
else:
new_axes.append(i)
new_starts.append(0)
new_ends.append(np.iinfo(np.int32).max)
return new_starts, new_ends, new_axes
@classmethod
def _impl_v1(cls, inputs, attr, params):
if isinstance(attr['starts'], int):
attr['starts'] = (attr['starts'],)
attr['ends'] = (attr['ends'],)
try:
# Update the starts and ends according to axes if required.
if isinstance(attr['axes'], int):
attr['axes'] = (attr['axes'],)
if (max(attr['axes']) + 1) != len(attr['axes']):
new_starts, new_ends, new_axes = cls._common(
attr['starts'], attr['ends'], attr['axes'])
attr['axes'] = new_axes
attr['starts'] = new_starts
attr['ends'] = new_ends
except KeyError:
pass
return AttrCvt('strided_slice',
transforms={'starts': 'begin',
'ends': 'end'},
ignores=['axes'])(inputs, attr)
@classmethod
def _impl_v10(cls, inputs, attr, params):
starts = params[get_name(inputs[1])].asnumpy()
ends = params[get_name(inputs[2])].asnumpy()
# Update the starts and ends according to axes if required.
if len(inputs) >= 4:
axes = params[get_name(inputs[3])].asnumpy()
if max(axes + 1) != len(axes):
new_starts, new_ends, _ = cls._common(
starts, ends, axes)
starts = new_starts
ends = new_ends
return _op.strided_slice(inputs[0], begin=starts, end=ends)
class Gather(OnnxOpConverter):
""" Operator converter for Gather.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
return AttrCvt('take',
extras={'axis': axis})(inputs, {})
class GatherND(OnnxOpConverter):
""" Operator converter for GatherND.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.gather_nd(inputs[0], inputs[1])
class Greater(OnnxOpConverter):
""" Operator logical greater.
"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.greater(inputs[0], inputs[1])
class Less(OnnxOpConverter):
""" Operator logical less than.
"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.less(inputs[0], inputs[1])
class LRN(OnnxOpConverter):
""" Operator converter for Local Response Normalization.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
"""LRN support only NCHW format
https://github.com/onnx/onnx/blob/master/docs/Operators.md#LRN
"""
axis = 1
alpha = attr.get('alpha', 0.0001)
beta = attr.get('beta', 0.75)
bias = attr.get('bias', 1.0)
nsize = attr.get('size')
attr = {'size': nsize, 'axis': axis, 'alpha': alpha, 'beta': beta, 'bias': bias}
return AttrCvt('lrn')(inputs, attr)
class Maximum(OnnxOpConverter):
""" Operator converter for Maximum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, (list, onnx_input)) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_max = inputs[0]
for i in range(1, len(inputs)):
_max = AttrCvt('maximum')([_max, inputs[i]], {})
return _max
class Minimum(OnnxOpConverter):
""" Operator converter for Minimum.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, (list, onnx_input)) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
_min = inputs[0]
for i in range(1, len(inputs)):
_min = AttrCvt('minimum')([_min, inputs[i]], {})
return _min
class Mean(OnnxOpConverter):
""" Operator converter for Mean.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if not isinstance(inputs, (list, onnx_input)) or len(inputs) < 2:
raise ValueError("Expect minimum 2 inputs")
# avoid overflow
concat = _op.concatenate([_op.expand_dims(x, axis=0) for x in inputs], axis=0)
return _op.mean(concat, axis=0, keepdims=False)
class HardSigmoid(OnnxOpConverter):
""" Operator converter for HardSigmoid.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
alpha = attr.get('alpha', 0.2)
beta = attr.get('beta', 0.5)
transformX = (inputs[0] * _expr.const(alpha)) + _expr.const(beta)
attr = {'a_min': 0, 'a_max': 1}
return AttrCvt('clip')([transformX], attr)
class Reduce(OnnxOpConverter):
""" Operator converter for reduce ops.
"""
name = ''
@classmethod
def _impl_v1(cls, inputs, attr, params):
if 'axes' in attr:
axis = attr.get('axes', 0)
else:
axis_len = len(infer_shape(inputs[0]))
axis = list(range(axis_len))
attr = {'axis': axis, 'keepdims': attr.get('keepdims', True)}
return AttrCvt(cls.name)(inputs, attr)
class ReduceMax(Reduce):
""" Operator converter for ReduceMax.
"""
name = 'max'
class ReduceMin(Reduce):
""" Operator converter for ReduceMin.
"""
name = 'min'
class ReduceSum(Reduce):
""" Operator converter for ReduceSum.
"""
name = 'sum'
class ReduceMean(Reduce):
""" Operator converter for ReduceMean.
"""
name = 'mean'
class ReduceProd(Reduce):
""" Operator converter for ReduceProd.
"""
name = 'prod'
class ReduceLogSumExp(Reduce):
""" Operator converter for ReduceLogSumExp.
"""
name = 'logsumexp'
class ArgMax(OnnxOpConverter):
""" Operator converter for ArgMax.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
keepdims = attr.get('keepdims', True)
attr = {'axis': axis, 'keepdims': keepdims}
return AttrCvt('argmax')(inputs, attr)
class ArgMin(OnnxOpConverter):
""" Operator converter for ArgMin.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
axis = attr.get('axis', 0)
keepdims = attr.get('keepdims', True)
attr = {'axis': axis, 'keepdims': keepdims}
return AttrCvt('argmin')(inputs, attr)
class Softmax(OnnxOpConverter):
""" Operator converter for Softmax.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
# set default value when axis is not set in the model
if 'axis' not in attr:
attr['axis'] = 1
return AttrCvt('softmax', transforms={'axis': ('axis', 1)})(inputs, attr, params)
class OneHot(OnnxOpConverter):
""" Operator converter for OneHot.
"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
# Extract relay one_hot inputs.
indices, depth, values = inputs
# Split onnx on off values into two separate expressions.
off_value, on_value = _op.take(
values, _op.const(0)), _op.take(values, _op.const(1))
# Extract the datatype of the output from on_value.
dtype = infer_type(on_value).checked_type.dtype
# Convert depth into an integer.
depth = int(infer_value(depth, params).asnumpy()[0])
# set default value when axis is not set in the model
if 'axis' not in attr:
attr['axis'] = -1
return _op.one_hot(indices,
on_value,
off_value,
depth,
int(attr['axis']),
dtype=dtype)
class ConstantOfShape(OnnxOpConverter):
""" Operator converter for ConstantOfShape.
"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if 'value' in attr:
np_value = get_numpy(attr.pop('value'))[0]
value = _expr.const(np_value)
dtype = np_value.dtype.name
else:
value = _expr.const(0)
dtype = 'float32'
static_shape = infer_value_simulated(inputs[0], params)
output = _op.full(
value, shape=tuple(static_shape.asnumpy().astype('int32')), dtype=dtype)
return output
class Sign(OnnxOpConverter):
""" Operator converter for Sign.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.sign(inputs[0])
class Equal(Elemwise):
""" Operator converter for Equal.
"""
name = 'equal'
class Not(Elemwise):
""" Operator converter for Not.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_not(inputs[0])
class And(Elemwise):
""" Operator converter for And.
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.logical_and(inputs[0], inputs[1])
class Tile(Elemwise):
"""Operator converter for Tile
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if 'repeats' not in attr:
raise tvm.error.OpAttributeInvalid('Attribute "repeats" should be set '
'for operator Tile.')
reps = attr.pop('repeats') # The number of times repeating the tensor data.
return _op.tile(inputs[0], reps)
@classmethod
def _impl_v6(cls, inputs, attr, params):
reps = tuple(infer_value_simulated(
inputs[1], params).asnumpy().astype('int32'))
return _op.tile(inputs[0], reps)
class Erf(OnnxOpConverter):
"""Operator converter for Erf
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
return _op.erf(inputs[0])
class Where(OnnxOpConverter):
"""Operator converter for Where
"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
condition_shape = infer_shape(inputs[0])
x_shape = infer_shape(inputs[1])
y_shape = infer_shape(inputs[2])
# condition, x, and y can all be broadcasted.
# broadcast each of them to the longest shape.
# if two shapes have the same number of dimensions,
# try to choose the one that doesn't have "1" as
# a dimension.
shapes = [condition_shape, x_shape, y_shape]
shape_lens = [len(shape) for shape in shapes]
max_size = max(shape_lens)
max_size_idxs = [i for i, x in enumerate(shape_lens) if x == max_size]
broadcast_idx = max_size_idxs[0]
if len(max_size_idxs) > 1:
for idx in max_size_idxs:
if 1 not in shapes[idx]:
broadcast_idx = idx
broadcast_shape = shapes[broadcast_idx]
if condition_shape != broadcast_shape:
inputs[0] = _op.broadcast_to(inputs[0], broadcast_shape)
if x_shape != broadcast_shape:
inputs[1] = _op.broadcast_to(inputs[1], broadcast_shape)
if y_shape != broadcast_shape:
inputs[2] = _op.broadcast_to(inputs[2], broadcast_shape)
return _op.where(inputs[0], inputs[1], inputs[2])
class Or(Elemwise):
""" Operator converter for Or.
"""
@classmethod
def _impl_v7(cls, inputs, attr, params):
return _op.logical_or(inputs[0], inputs[1])
class Expand(OnnxOpConverter):
""" Operator converter for Expand.
"""
@classmethod
def _impl_v8(cls, inputs, attr, params):
in_shape = np.array(infer_shape(inputs[0])).astype('int32')
if get_name(inputs[1]) in params:
shape = params[inputs[1].name_hint].asnumpy().astype('int32')
else:
shape = infer_value_simulated(inputs[1], params).asnumpy().astype('int32')
# Currently 'op.broadcast_to' expect the rank of the given 'shape'
# (the 2nd input) is always higher than that of the given 'input' (the 1st input)
# However, ONNX Expand supports multi-directional broadcasting, which allows
# above pattern and also some extent of 'shape' can be smaller than the corresponding
# extent of 'input'. In this case, the extent of 'shape' must be 1.
# https://github.com/onnx/onnx/blob/master/docs/Broadcasting.md
# In above cases, we cannot directorly apply 'op.broadcast_to' instead of 'expand'
# so, here we solved this problem by expanding the given 'shape' itself.
def expand_shape(in_shape, shape):
""" A function expands the shape when the rank is lower than that of the given
intput. Also it replaces the extent of the shape with the corresponding extent
of the intput when it is 1.
"""
# here we flip the shapes because this can be more simply written
# when the innermost dimension is located at the index 0.
in_shape = np.flip(in_shape, axis=0)
shape = np.flip(shape, axis=0)
if in_shape.size < shape.size:
for i in range(shape.size):
if i < in_shape.size and in_shape[i] > shape[i]:
shape[i] = in_shape[i]
else:
for i in range(in_shape.size):
if i >= shape.size:
np.append(shape, in_shape[i])
elif shape[i] == 1:
shape[i] = in_shape[i]
new_shape = np.flip(shape, axis=0)
return new_shape
shape = expand_shape(in_shape, shape)
return _op.broadcast_to(inputs[0], shape=tuple(shape))
class LSTM(OnnxOpConverter):
""" Operator converter for LSTM.
"""
@classmethod
def _activation_helper(cls, activation, alpha, beta):
convert_map = _get_convert_map(1)
attrs = {}
if alpha is not None:
attrs['alpha'] = alpha
if beta is not None:
attrs['beta'] = beta
return lambda x: convert_map[activation.decode("utf-8")]([x], attrs, {})
@classmethod
def _activation_needs_alpha(cls, activation):
needs_alpha = [
"Affine",
"LeakyRelu",
"ThresholdedRelu",
"ScaledTanh",
"HardSigmoid",
"Elu",
]
return activation.decode("utf-8") in needs_alpha
@classmethod
def _activation_needs_beta(cls, activation):
needs_beta = [
"Affine",
"ScaledTanh",
"HardSigmoid",
]
return activation.decode("utf-8") in needs_beta
@classmethod
def _impl_v7(cls, inputs, attr, params):
# Unpack inputs, note that if optional and not provided then value will be None.
X = inputs[0]
W = inputs[1]
R = inputs[2]
B = inputs['B']
# Sequence length currently unused as it can be inferred from shapes.
#sequence_lens = inputs['sequence_lens']
h_0 = inputs['initial_h']
c_0 = inputs['initial_c']
P = inputs['P']
num_directions = infer_shape(W)[0]
W_dtype = infer_type(W).type_annotation.dtype
if num_directions != 1:
raise NotImplementedError("Bidirectional LSTMs not yet supported.")
# Remove num_directions axis from weights.
W = _op.squeeze(W, axis=[0])
R = _op.squeeze(R, axis=[0])
if B is not None:
B = _op.squeeze(B, axis=[0])
X_shape = infer_shape(X)
hidden_size = infer_shape(R)[-1]
batch_size = X_shape[1]
# Initialize state if not provided.
# Otherwise remove bidirectional axis.
if h_0 is None:
h_0 = _op.zeros((batch_size, hidden_size), W_dtype)
else:
h_0 = _op.squeeze(h_0, axis=[0])
if c_0 is None:
c_0 = _op.zeros((batch_size, hidden_size), W_dtype)
else:
c_0 = _op.squeeze(c_0, axis=[0])
if P is not None:
P = _op.squeeze(P, axis=[0])
p_i, p_o, p_f = _op.split(P, 3)
H_t = h_0
C_t = c_0
h_list = []
if 'activations' in attr:
activations = attr['activations']
if len(activations) != 3:
raise NotImplementedError("LSTM assumes 3 activation functions are provided")
alpha_loc = 0
alphas = attr.get('activation_alpha', [])
if isinstance(alphas, float):
alphas = [alphas]
beta_loc = 0
betas = attr.get('activation_beta', [])
if isinstance(betas, float):
betas = [betas]
acts = []
for i in range(3):
alpha = None
beta = None
activation = activations[i]
if cls._activation_needs_alpha(activation) and len(alphas) > alpha_loc:
alpha = alphas[alpha_loc]
alpha_loc += 1
if cls._activation_needs_beta(activation) and len(betas) > beta_loc:
beta = betas[beta_loc]
beta_loc += 1
acts.append(cls._activation_helper(activation, alpha, beta))
f_act, g_act, h_act = acts
else:
f_act = _op.sigmoid
g_act = _op.tanh
h_act = _op.tanh
X_steps = _op.split(X, indices_or_sections=X_shape[0], axis=0)
for step in X_steps:
step = _op.squeeze(step, axis=[0])
gates = _op.nn.dense(step, W) + _op.nn.dense(H_t, R)
if B is not None:
WB, RB = _op.split(B, 2)
gates += WB + RB
i, o, f, c = _op.split(gates, 4, axis=-1)
if P is not None:
i = f_act(i + p_i * C_t)
f = f_act(f + p_f * C_t)
else:
i = f_act(i)
f = f_act(f)
c = g_act(c)
C = f * C_t + i * c
if P is not None:
o = f_act(o + p_o * C)
else:
o = f_act(o)
H = o * h_act(C)
H_t = H
C_t = C
h_list.append(_op.expand_dims(H, axis=0))
# Concatenate outputs and add back in direction axis.
concatenated = _op.concatenate(h_list, 0)
output = _op.expand_dims(concatenated, axis=1)
H_t = _op.expand_dims(H_t, axis=0)
C_t = _op.expand_dims(C_t, axis=0)
return _expr.TupleWrapper(_expr.Tuple((output, H_t, C_t)), 3)
class Resize(OnnxOpConverter):
"""Operator converter for Resize
"""
@classmethod
def _impl_v11(cls, inputs, attr, params):
mode = attr.get('mode')
if mode == b'nearest':
method = "nearest_neighbor"
elif mode == b'linear':
method = "bilinear"
else:
raise tvm.error.OpAttributeInvalid(
'Value {} in attribute "mode" of operator Resize is not valid.'.format(mode))
in_size = np.array(infer_shape(inputs[0]))
scale = infer_value_simulated(inputs[2], params).asnumpy()
if len(inputs) == 4:
assert len(scale) == 0, "One of scale or size should be passed, not both."
size = infer_value_simulated(inputs[3], params).asnumpy().astype(np.int32)
else:
assert len(scale) != 0, "One of scale or size should be passed."
size = (in_size * scale).astype(np.int32)
coord_trans = attr.get('coordinate_transformation_mode')
if coord_trans in [b'pytorch_half_pixel', b'half_pixel']:
coord_trans = "half_pixel"
elif coord_trans == b'align_corners':
coord_trans = "align_corners"
elif coord_trans == b'asymmetric' or method == "nearest_neighbor":
coord_trans = "asymmetric"
else:
raise tvm.error.OpAttributeInvalid(
'Unsupported coordinate_transformation_mode: {}'.format(coord_trans))
layout = "NCHW" # ONNX assumes NCHW layout
out_size = (size[2], size[3])
return _op.image.resize(inputs[0], out_size, layout, method, coord_trans)
class NonZero(OnnxOpConverter):
"""Operator converter for NonZero
"""
@classmethod
def _impl_v9(cls, inputs, attr, params):
if len(inputs) > 1:
raise ValueError("Expect 1 input only")
output = AttrCvt(op_name='argwhere')(inputs, attr, params)
# ONNX NonZero always outputs int64
output = _op.cast(output, "int64")
return _op.transpose(output, axes=(1, 0))
class TopK(OnnxOpConverter):
"""Operator converter for TopK
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 2:
raise ValueError("Expect 2 input only")
axis = attr.get("axis", -1)
largest = attr.get("largest", 1)
if largest == 0:
raise ValueError("TVM only supports finding TopK largest elements")
K = int(infer_value(inputs[1], params).asnumpy()[0])
return _op.topk(inputs[0], k=K, axis=axis)
class RoiAlign(OnnxOpConverter):
"""Operator converter for TopK
"""
@classmethod
def _impl_v1(cls, inputs, attr, params):
if len(inputs) != 3:
raise ValueError("Expect 3 inputs only")
x = inputs[0]
rois = inputs[1]
batch_indices = inputs[2]
mode = attr.get("mode", "avg")
if mode != b'avg':
raise ValueError("RoiAlign in Relay only uses avg mode")
output_height = attr.get("output_height", 1)
output_width = attr.get("output_width", 1)
sampling_ratio = attr.get("sampling_ratio", 0)
spatial_scale = attr.get("spatial_scale", 1.0)
batch_indices = _op.expand_dims(batch_indices, axis=1, num_newaxis=1)
batch_indices = _op.cast(
batch_indices, infer_type(rois).type_annotation.dtype)
rois = _op.concatenate([batch_indices, rois], 1)
return _vision.roi_align(x, rois, [output_height, output_width],
spatial_scale, sampling_ratio)
# compatible operators that do NOT require any conversion.
_identity_list = []
# _convert_map defines maps of name to converter functor(callable)
# for 1 to 1 mapping, use Renamer if nothing but name is different
# use AttrCvt if attributes need to be converted
# for 1 to N mapping(composed), use custom callable functions
# for N to 1 mapping, currently not supported(?)
def _get_convert_map(opset):
return {
# defs/experimental
'Identity': Renamer('copy'),
'Affine': Affine.get_converter(opset),
'ThresholdedRelu': ThresholdedRelu.get_converter(opset),
'ScaledTanh': ScaledTanh.get_converter(opset),
'ParametricSoftplus': ParametricSoftPlus.get_converter(opset),
'ConstantOfShape': ConstantOfShape.get_converter(opset),
# 'GivenTensorFill'
'FC': AttrCvt('dense', ignores=['axis', 'axis_w']),
'Scale': Scale.get_converter(opset),
# 'GRUUnit'
# 'ATen'
# 'ImageScaler'
# 'MeanVarianceNormalization'
# 'Crop'
# 'Embedding'
'Upsample': Upsample.get_converter(opset),
'SpatialBN': BatchNorm.get_converter(opset),
# defs/generator
# 'Constant' # Implemented
# 'RandomUniform'
# 'RandomNormal'
# 'RandomUniformLike'
# 'RandomNormalLike'
# defs/logical
# defs/math
'Add': Add.get_converter(opset),
'Sub': Sub.get_converter(opset),
'Mul': Mul.get_converter(opset),
'Div': Div.get_converter(opset),
'Neg': Renamer('negative'),
'Abs': Absolute.get_converter(opset),
'Reciprocal': Reciprocal.get_converter(opset),
'Floor': Renamer('floor'),
'Ceil': Renamer('ceil'),
'Round': Renamer('round'),
'IsInf': Renamer('isinf'),
'IsNaN': Renamer('isnan'),
'Sqrt': Renamer('sqrt'),
'Relu': Renamer('relu'),
'LeakyRelu': Renamer('leaky_relu'),
'Selu': Selu.get_converter(opset),
'Elu': Elu.get_converter(opset),
'Exp': Renamer('exp'),
'Greater': Greater.get_converter(opset),
'Less': Less.get_converter(opset),
'Log': Renamer('log'),
'Tanh': Renamer('tanh'),
'Pow': Renamer('power'),
'PRelu': Prelu.get_converter(opset),
'Sigmoid': Renamer('sigmoid'),
'HardSigmoid': HardSigmoid.get_converter(opset),
'Max': Maximum.get_converter(opset),
'Min': Minimum.get_converter(opset),
'Sum': Sum.get_converter(opset),
'Mean': Mean.get_converter(opset),
'Clip': AttrCvt('clip', transforms={'min': 'a_min', 'max': 'a_max'}),
# softmax default axis is different in onnx
'Softmax': Softmax.get_converter(opset),
'LogSoftmax': AttrCvt('log_softmax', {'axis': ('axis', 1)}),
'OneHot': OneHot.get_converter(opset),
# 'Hardmax'
'Softsign': Softsign.get_converter(opset),
'SoftPlus': SoftPlus.get_converter(opset),
'Gemm': Gemm.get_converter(opset),
'MatMul': MatMul.get_converter(opset),
# defs/nn
'AveragePool': AveragePool.get_converter(opset),
'MaxPool': MaxPool.get_converter(opset),
'Conv': Conv.get_converter(opset),
'ConvTranspose': ConvTranspose.get_converter(opset),
'GlobalAveragePool': Renamer('global_avg_pool2d'),
'GlobalMaxPool': Renamer('global_max_pool2d'),
'BatchNormalization': BatchNorm.get_converter(opset),
'InstanceNormalization': InstanceNorm.get_converter(opset),
# 'LpNormalization'
'Dropout': AttrCvt('dropout', {'ratio': 'rate'}, ignores=['is_test']),
'Flatten': Flatten.get_converter(opset),
'LRN': LRN.get_converter(opset),
# Recurrent Layers
'LSTM': LSTM.get_converter(opset),
# defs/vision
'RoiAlign': RoiAlign.get_converter(opset),
# defs/reduction
'ReduceMax': ReduceMax.get_converter(opset),
'ReduceMin': ReduceMin.get_converter(opset),
'ReduceSum': ReduceSum.get_converter(opset),
'ReduceMean': ReduceMean.get_converter(opset),
'ReduceProd': ReduceProd.get_converter(opset),
'ReduceLogSumExp': ReduceLogSumExp.get_converter(opset),
#defs/sorting
'ArgMax': ArgMax.get_converter(opset),
'ArgMin': ArgMin.get_converter(opset),
'TopK': TopK.get_converter(opset),
# defs/tensor
'Cast': Cast.get_converter(opset),
'Reshape': Reshape.get_converter(opset),
'Expand': Expand.get_converter(opset),
'Concat': Concat.get_converter(opset),
'Split': Split.get_converter(opset),
'Slice': Slice.get_converter(opset),
'Transpose': AttrCvt('transpose', {'perm': 'axes'}),
'DepthToSpace': DepthToSpace.get_converter(opset),
'SpaceToDepth': SpaceToDepth.get_converter(opset),
'Gather': Gather.get_converter(opset),
'GatherND': GatherND.get_converter(opset),
'Squeeze': AttrCvt('squeeze', {'axes': 'axis'}),
'Unsqueeze': Unsqueeze.get_converter(opset),
'Pad': Pad.get_converter(opset),
'Shape': Shape.get_converter(opset),
'Sign': Sign.get_converter(opset),
'Equal': Equal.get_converter(opset),
'Not': Not.get_converter(opset),
'And': And.get_converter(opset),
'Tile': Tile.get_converter(opset),
'Erf': Erf.get_converter(opset),
'Where': Where.get_converter(opset),
'Or': Or.get_converter(opset),
'Resize': Resize.get_converter(opset),
'NonZero': NonZero.get_converter(opset),
}
class GraphProto(object):
"""A helper class for handling Relay expression copying from pb2.GraphProto.
Definition: https://github.com/onnx/onnx/blob/master/onnx/onnx.proto
Parameters
----------
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
"""
def __init__(self, shape, dtype):
self._nodes = {}
self._params = {}
self._renames = {}
self._num_input = 0
self._num_param = 0
self._shape = shape if shape else {}
self._dtype = dtype
def from_onnx(self, graph, opset):
"""Construct Relay expression from ONNX graph.
Onnx graph is a python protobuf object.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
graph : onnx protobuf object
The loaded onnx graph
opset : opset version
Returns
-------
mod : tvm.IRModule
The returned relay module
params : dict
A dict of name: tvm.nd.array pairs, used as pretrained weights
"""
# parse network inputs to relay, aka parameters
for init_tensor in graph.initializer:
if not init_tensor.name.strip():
raise ValueError("Tensor's name is required.")
self._params[init_tensor.name] = self._parse_array(init_tensor)
self._nodes[init_tensor.name] = new_var(init_tensor.name,
shape=self._params[init_tensor.name].shape,
dtype=self._params[init_tensor.name].dtype)
for i in graph.input:
# from onnx v0.2, GraphProto.input has type ValueInfoProto,
# and the name is 'i.name'
i_name = self._parse_value_proto(i)
d_type = self._parse_dtype(i, 'float32')
if i_name in self._params:
# i is a param instead of input
self._num_param += 1
self._params[i_name] = self._params.pop(i_name)
self._nodes[i_name] = new_var(i_name,
shape=self._params[i_name].shape,
dtype=self._params[i_name].dtype)
else:
self._num_input += 1
if i_name in self._shape:
tshape = self._shape[i_name]
else:
raise ValueError("Must provide an input shape for `{0}`.".format(i_name))
if isinstance(self._dtype, dict):
dtype = self._dtype[i_name] if i_name in self._dtype else d_type
else:
dtype = d_type
self._nodes[i_name] = new_var(i_name, shape=tshape, dtype=dtype)
# get list of unsupported ops
convert_map = _get_convert_map(opset)
unsupported_ops = set()
for node in graph.node:
op_name = node.op_type
if op_name not in convert_map and \
op_name != 'Constant' and \
op_name not in _identity_list:
unsupported_ops.add(op_name)
if unsupported_ops:
msg = 'The following operators are not supported for frontend ONNX: '
msg += ', '.join(unsupported_ops)
raise tvm.error.OpNotImplemented(msg)
# construct nodes, nodes are stored as directed acyclic graph
for node in graph.node:
op_name = node.op_type
attr = self._parse_attr(node.attribute)
# Create and populate onnx input object.
inputs = onnx_input()
for i in node.input:
if i != '':
inputs[i] = self._nodes[self._renames.get(i, i)]
if op_name == "Constant":
t_proto = self._parse_attr(node.attribute)["value"]
self._num_param += 1
# We should convert scalar integers to int32, to normalize.
array = self._parse_array(t_proto)
self._params[node.output[0]] = array
self._nodes[node.output[0]] = new_var(
node.output[0],
shape=list(t_proto.dims),
dtype=array.dtype)
else:
i_name = self._parse_value_proto(node)
attr['tvm_custom'] = {}
attr['tvm_custom']['name'] = i_name
op = self._convert_operator(op_name, inputs, attr, opset)
node_output = self._fix_outputs(op_name, node.output)
if not isinstance(op, _expr.TupleWrapper):
outputs_num = 1
else:
outputs_num = len(op)
assert len(node_output) == outputs_num, (
"Number of output mismatch {} vs {} in {}.".format(
len(node_output), outputs_num, op_name))
if outputs_num == 1:
self._nodes[node_output[0]] = op
else:
for k, i in zip(list(node_output), range(len(node_output))):
self._nodes[k] = op[i]
# now return the outputs
outputs = [self._nodes[self._parse_value_proto(i)] for i in graph.output]
outputs = outputs[0] if len(outputs) == 1 else _expr.Tuple(outputs)
func = _function.Function(analysis.free_vars(outputs), outputs)
return IRModule.from_expr(func), self._params
def _parse_value_proto(self, value_proto):
"""Parse ValueProto or raw str."""
try:
name = value_proto.name
except AttributeError:
name = value_proto
return name
def _parse_dtype(self, value_proto, dtype):
"""Parse dtype."""
try:
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
return TENSOR_TYPE_TO_NP_TYPE[value_proto.type.tensor_type.elem_type].name
except AttributeError:
return dtype
def _parse_array(self, tensor_proto):
np_array = get_numpy(tensor_proto).reshape(tuple(tensor_proto.dims))
return _nd.array(np_array)
def _parse_attr(self, attr_proto):
"""Convert a list of AttributeProto to a dict, with names as keys."""
attrs = {}
for a in attr_proto:
for f in ['f', 'i', 's']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['floats', 'ints', 'strings']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['t']:
if a.HasField(f):
attrs[a.name] = getattr(a, f)
for f in ['tensors']:
if list(getattr(a, f)):
assert a.name not in attrs, "Only one type of attr is allowed"
attrs[a.name] = tuple(getattr(a, f))
for f in ['g']:
if a.HasField(f):
raise NotImplementedError(
"Filed {} is not supported in relay.".format(f))
for f in ['graphs']:
if list(getattr(a, f)):
raise NotImplementedError(
"Filed {} is not supported in relay.".format(f))
if a.name not in attrs:
raise ValueError("Cannot parse attribute: \n{}\n.".format(a))
return attrs
def _convert_operator(self,
op_name,
inputs,
attrs,
opset):
"""Convert ONNX operator into a Relay operator.
The converter must specify conversions explicitly for incompatible name, and
apply handlers to operator attributes.
Parameters
----------
op_name : str
Operator name, such as Convolution, FullyConnected
inputs : list of tvm.relay.function.Function
List of inputs.
attrs : dict
Dict of operator attributes
opset : int
Opset version
Returns
-------
sym : tvm.relay.function.Function
Converted relay function
"""
convert_map = _get_convert_map(opset)
if op_name in _identity_list:
sym = get_relay_op(op_name)(*inputs, **attrs)
elif op_name in convert_map:
sym = convert_map[op_name](inputs, attrs, self._params)
else:
raise NotImplementedError(
"Operator {} not implemented.".format(op_name))
return sym
def _fix_outputs(self, op_name, outputs):
"""A hack to handle dropout or similar operator that have more than one out
in ONNX.
"""
if op_name == 'Dropout':
if len(outputs) == 1:
return outputs
# TODO(zhreshold): support dropout mask?
outputs = outputs[:-1]
return outputs
def from_onnx(model,
shape=None,
dtype="float32",
opset=None):
"""Convert a ONNX model into an equivalent Relay Function.
ONNX graphs are represented as Python Protobuf objects.
The companion parameters will be handled automatically.
However, the input names from onnx graph is vague, mixing inputs and
network weights/bias such as "1", "2"...
For convenience, we rename the `real` input names to "input_0",
"input_1"... And renaming parameters to "param_0", "param_1"...
Parameters
----------
model : protobuf object
ONNX ModelProto after ONNX v1.1.0
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
opset : int, optional
Override to autodetected opset.
This can be helpful for some testing.
Returns
-------
mod : tvm.IRModule
The relay module for compilation
params : dict of str to tvm.nd.NDArray
The parameter dict to be used by relay
"""
try:
import onnx
if hasattr(onnx.checker, 'check_model'):
# try use onnx's own model checker before converting any model
try:
onnx.checker.check_model(model)
except onnx.onnx_cpp2py_export.checker.ValidationError as e:
import warnings
# the checker is a bit violent about errors, so simply print warnings here
warnings.warn(str(e))
except ImportError:
pass
g = GraphProto(shape, dtype)
graph = model.graph
if opset is None:
try:
opset = model.opset_import[0].version if model.opset_import else 1
except AttributeError:
opset = 1
mod, params = g.from_onnx(graph, opset)
return mod, params
|
the-stack_0_10580 | import numpy as np
import EggNet
def _read_np_tensor(weight_file: str):
if weight_file.endswith('.npy'):
is_binary = True
elif weight_file.endswith('.txt'):
is_binary = False
else:
raise NotImplementedError()
if is_binary:
return np.load(weight_file)
else:
with open(weight_file) as f:
init_line = f.readline()
# assume the first line is a comment
assert init_line[0] == '#'
p1 = init_line.find('(')
p2 = init_line.find(')')
dims = [int(ds) for ds in init_line[p1 + 1:p2].split(sep=',') if len(ds) > 0]
return np.loadtxt(weight_file).reshape(dims)
def read_np_torch(ordering='BCHW', binary=True, target_dtype=None):
if binary:
d = {
'cn1.b': _read_np_tensor('np/t_0.0.bias.npy'),
'cn1.k': _read_np_tensor('np/t_0.0.weight.npy'),
'cn2.b': _read_np_tensor('np/t_3.0.bias.npy'),
'cn2.k': _read_np_tensor('np/t_3.0.weight.npy'),
'fc1.b': _read_np_tensor('np/t_7.0.bias.npy'),
'fc1.w': _read_np_tensor('np/t_7.0.weight.npy'),
'fc2.b': _read_np_tensor('np/t_9.bias.npy'),
'fc2.w': _read_np_tensor('np/t_9.weight.npy'),
}
else:
d = {
'cn1.b': _read_np_tensor('np/t_0.0.bias.txt'),
'cn1.k': _read_np_tensor('np/t_0.0.weight.txt'),
'cn2.b': _read_np_tensor('np/t_3.0.bias.txt'),
'cn2.k': _read_np_tensor('np/t_3.0.weight.txt'),
'fc1.b': _read_np_tensor('np/t_7.0.bias.txt'),
'fc1.w': _read_np_tensor('np/t_7.0.weight.txt'),
'fc2.b': _read_np_tensor('np/t_9.bias.txt'),
'fc2.w': _read_np_tensor('np/t_9.weight.txt'),
}
if ordering is 'BCHW':
pass
elif ordering is 'BHWC':
k1 = np.moveaxis(d['cn1.k'], [0, 1], [3, 2])
k2 = np.moveaxis(d['cn2.k'], [0, 1], [3, 2])
# k1 = reorder(d['cn1.k'])
# k2 = reorder(d['cn2.k'])
assert k1[1, 2, 0, 4] == d['cn1.k'][4, 0, 1, 2]
assert k2[1, 2, 3, 4] == d['cn2.k'][4, 3, 1, 2]
d['cn1.k'] = k1
d['cn2.k'] = k2
d['fc1.w'] = np.moveaxis(d['fc1.w'], 0, 1)
d['fc2.w'] = np.moveaxis(d['fc2.w'], 0, 1)
x = d['fc1.w'].transpose()
# Hack: Because torch positions the channels first, we have to reorder the following FC weights
# Input Tensor is: [batch, channels, 7, 7]
# Reshaped to: [batch, channels * 7 * 7]
x = x.reshape((-1, 32, 7, 7))
x = np.moveaxis(x, 1, 3)
x = np.reshape(x, (32, -1))
x = x.transpose()
d['fc1.w'] = x
else:
raise NotImplementedError('Expected ordering to be "BCHW" or "BHWC" but is: {}'.format(ordering))
if target_dtype is not None:
d_old = d
d = {}
for key, values in d_old.items():
d[key] = values.astype(target_dtype)
return d
def read_np_keras(binary=True, target_dtype=None):
if binary:
d = {
'cn1.b': _read_np_tensor('np/k_cn1.bias.npy'),
'cn1.k': _read_np_tensor('np/k_cn1.weight.npy'),
'cn2.b': _read_np_tensor('np/k_cn2.bias.npy'),
'cn2.k': _read_np_tensor('np/k_cn2.weight.npy'),
'fc1.b': _read_np_tensor('np/k_fc1.bias.npy'),
'fc1.w': _read_np_tensor('np/k_fc1.weight.npy'),
'fc2.b': _read_np_tensor('np/k_fc2.bias.npy'),
'fc2.w': _read_np_tensor('np/k_fc2.weight.npy'),
}
else:
d = {
'cn1.b': _read_np_tensor('np/k_0.0.bias.txt'),
'cn1.k': _read_np_tensor('np/k_0.0.weight.txt'),
'cn2.b': _read_np_tensor('np/k_3.0.bias.txt'),
'cn2.k': _read_np_tensor('np/k_3.0.weight.txt'),
'fc1.b': _read_np_tensor('np/k_7.0.bias.txt'),
'fc1.w': _read_np_tensor('np/k_7.0.weight.txt'),
'fc2.b': _read_np_tensor('np/k_9.bias.txt'),
'fc2.w': _read_np_tensor('np/k_9.weight.txt'),
}
if target_dtype is not None:
d_old = d
d = {}
for key, values in d_old.items():
d[key] = values.astype(target_dtype)
return d
def reorder(x):
co, ci, h, w, = x.shape
x_ = np.zeros(shape=(h, w, ci, co), dtype=x.dtype)
for hx in range(h):
for wx in range(w):
for cix in range(ci):
for cox in range(co):
x_[hx, wx, cix, cox] = x[cox, cix, hx, wx]
return x_
def perform_real_quant(weight_dict,
in_bits: np.ndarray, in_frac: np.ndarray,
w_bits: np.ndarray, w_frac: np.ndarray,
out_bits: np.ndarray, out_frac: np.ndarray,
activations_signed: np.ndarray,
additions=np.ndarray([16, 32, 1568, 32]),
traget_dtype=np.int32):
"""
Performs real quantization, meaning all values will be rounded to
their fixed point representation
Args:
weight_dict: Dictionary containing numpy arrays
target_bits: Target bit length of the integer values
frac_bits: Target fraction bit width
Returns:
Dictionary with original keys, containing quantized values
"""
# Use short notations
ia_b = in_bits
ia_f = in_frac
w_b = w_bits
w_f = w_frac
oa_b = out_bits
oa_f = out_frac
# Check if consistent: input2 must be output 1
assert np.all(oa_b[:-1] == ia_b[1:])
assert np.all(oa_f[:-1] == ia_f[1:])
# Temporary bits and fractions while adding (for bias)
t_b = ia_b + w_b
t_f = ia_f + w_f
shift = t_f - oa_f
# v = Q * 2^-m
# Q = v * 2^m
# Scaling for weights
bias_max = 2.0 ** (t_b - 1) - 1
bias_min = -2.0 ** (t_b - 1)
bias_scale = 1 / 2 ** t_f
w_max = 2.0 ** (w_bits - 1) - 1
w_min = -2.0 ** (w_bits - 1)
w_scale = 1 / 2 ** w_f
out_max = np.zeros_like(out_bits)
out_min = np.zeros_like(out_bits)
out_scale = np.zeros_like(out_bits)
for i in range(len(out_bits)):
if activations_signed[i]:
out_max[i] = 2.0 ** (oa_b[i] - 1) - 1
out_min[i] = -2.0 ** (oa_b[i] - 1)
out_scale[i] = 1 / 2 ** oa_f[i]
else:
out_max[i] = 2.0 ** (oa_b[i]) - 1
out_min[i] = 0
out_scale[i] = 1 / 2 ** oa_f[i]
options = {
'input_bits': ia_b,
'input_frac': ia_f,
'output_bits': oa_b,
'output_frac': oa_f,
'weight_bits': w_b,
'weight_frac': w_f,
'w_max': w_max,
'w_min': w_min,
'w_scale': w_scale,
'bias_max': bias_max,
'bias_min': bias_min,
'bias_scale': bias_scale,
'out_max': out_max,
'out_min': out_min,
'out_max_f': out_max * out_scale,
'out_min_f': out_min * out_scale,
'out_scale': out_scale,
'shifts': shift
}
# ToDo: This becomes a bit hacky
wi = 0
bi = 0
d_out = {}
for i, (key, value) in enumerate(weight_dict.items()):
# check key if it is weight or bias
if key.endswith('.b'):
w = np.clip(value / bias_scale[bi], a_min=bias_min[bi], a_max=bias_max[bi]).round().astype(traget_dtype)
bi += 1
else:
w = np.clip(value / w_scale[wi], a_min=w_min[wi], a_max=w_max[wi]).round().astype(traget_dtype)
wi += 1
# Those are now ints, convert back to floats
d_out[key] = w
return d_out, shift, options
def quant2float(qweights, options):
w_scale = options['w_scale']
bias_scale = options['bias_scale']
wi = 0
bi = 0
d_out = {}
for i, (key, value) in enumerate(qweights.items()):
# check key if it is weight or bias
if key.endswith('.b'):
w = value * bias_scale[bi]
bi += 1
else:
w = value * w_scale[wi]
wi += 1
# Those are now ints, convert back to floats
d_out[key] = w
return d_out
def perform_fake_quant(weight_dict, target_bits, frac_bits, target_dtype=np.float64):
"""
Performs fake quantization, meaning all values will be rounded to
their expression
Args:
weight_dict: Dictionary containing numpy arrays
target_bits: Target bit length of the integer values
frac_bits: Target fraction bit width
Returns:
Dictionary with original keys, containing quantized values
"""
assert target_bits > frac_bits
value_bits = target_bits - frac_bits
a_max = 2.0 ** (value_bits - 1) - 1
a_min = -2.0 ** (value_bits - 1)
scale = 1 / 2 ** frac_bits
d_out = {}
for key, value in weight_dict.items():
# round weights
w = np.clip(value / scale, a_min=a_min, a_max=a_max).round()
w = (w * scale).astype(dtype=target_dtype)
# All values
d_out[key] = w
return d_out
def init_network_from_weights(qweights, from_torch):
our_net = EggNet.LeNet(reshape_torch=from_torch)
our_net.cn1.weights = qweights['cn1.k']
our_net.cn1.bias = qweights['cn1.b']
our_net.cn2.weights = qweights['cn2.k']
our_net.cn2.bias = qweights['cn2.b']
our_net.fc1.weights = qweights['fc1.w']
our_net.fc1.bias = qweights['fc1.b']
our_net.fc2.weights = qweights['fc2.w']
our_net.fc2.bias = qweights['fc2.b']
return our_net
def init_fake_network_from_weights(qweights, shift, options):
our_net = EggNet.FpiLeNet(qweights, shifts=shift, options=options, real_quant=False)
return our_net
def init_quant_network_from_weights(qweights, shift, options):
our_net = EggNet.FpiLeNet(qweights, shifts=shift, options=options, real_quant=True)
return our_net
def evaluate_network_full(batch_size, network, train_images, train_labels,
images_as_int=False, n_batches=None, intermediates=False):
i = 0
total_correct = 0
confusion_matrix = np.zeros(shape=(10, 10))
INTERESTING_LAYER_INDICES = (1, 3, 6, 7)
y_layers_out = None
while i < train_images.shape[0]:
if images_as_int:
x = train_images[i:i + batch_size].astype(np.int32)
else:
x = train_images[i:i + batch_size] / 255.0
y_ = train_labels[i:i + batch_size]
y, y_layers = network.forward_intermediate(x)
if intermediates:
if y_layers_out is None:
y_layers_out = y_layers
else:
for ix in INTERESTING_LAYER_INDICES:
y_layers_out[ix] = np.concatenate((y_layers_out[ix], y_layers[ix]), axis=0)
y = y.argmax(-1)
# ToDo: Might be a faster way
for pred, label in zip(y, y_):
confusion_matrix[pred, label] = confusion_matrix[pred, label] + 1
total_correct += np.sum(y == y_)
i += batch_size
if n_batches is not None and i / batch_size > n_batches:
break
accuracy = total_correct / train_images.shape[0]
if intermediates:
# Remove other layers
y_layers_out = [y_layers_out[i] for i in INTERESTING_LAYER_INDICES]
return accuracy, confusion_matrix, y_layers_out
else:
return accuracy, confusion_matrix
def evaluate_network(batch_size, network, train_images, train_labels):
a, _, _ = evaluate_network_full(batch_size, network, train_images, train_labels)
return a
def plot_confusion_matrix(cm: np.ndarray, title='Confusion matrix', target_names=None, normalize=True,
cmap=None, filename=None):
"""
Plot a confusion matrix.
Taken from: https://www.kaggle.com/grfiv4/plot-a-confusion-matrix
Args:
cm: Confusion matrix itself, must be a 2D numpy array
title: Plot title
target_names: Axes labels
normalize: True, for normalized values, false otherwise
cmap: Optional color map
filename: Optional filename if the plot should be saved
Returns:
"""
assert cm.ndim == 2
import matplotlib.pyplot as plt
import itertools
accuracy = np.trace(cm) / float(np.sum(cm))
misclass = 1 - accuracy
if cmap is None:
cmap = plt.get_cmap('Blues')
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
fig = plt.figure(figsize=(8, 6))
ax = plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if target_names is not None:
tick_marks = np.arange(len(target_names))
plt.xticks(tick_marks, target_names, rotation=45)
plt.yticks(tick_marks, target_names)
thresh = cm.max() / 1.5 if normalize else cm.max() / 2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if normalize:
plt.text(j, i, "{:0.2f}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
else:
plt.text(j, i, "{:,}".format(cm[i, j]),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label\naccuracy={:0.4f}; misclass={:0.4f}'.format(accuracy, misclass))
plt.show()
if filename is not None:
fig.savefig(fname=filename, dpi=300)
def plot_convolutions(kernel, nrows=4, c_out=None, order='keras', title='Convolution Kernels', target_names=None,
normalize=True,
labels=None,
cmap=None, filename=None):
if order is 'keras':
# Keras order is fine
pass
elif order is 'torch':
# Convert to keras
# Tensor is: [Co, Ci, H, W]
# Tensor should be: [H, W, Ci, Co]
# Use numpy for conversion
kernel = np.moveaxis(kernel, source=(0, 1), destination=(2, 3))
pass
else:
raise NotImplementedError(f'Not currently implemented. Should be "keras" or "torch" but is: {order}')
import matplotlib.pyplot as plt
fh, fw, ci, co = kernel.shape
ncols = ci // nrows + 1
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(8, 6))
for c_out in co:
for r in range(nrows):
axes.set_ylabel('')
for c in range(ncols):
axes[r, c].matshow(np.mean(kernel[:, :, :, c_out], axis=-1))
plt.show()
def plot_kernel_density():
"""
Check out:
https://stackoverflow.com/questions/30145957/plotting-2d-kernel-density-estimation-with-python
Returns:
"""
pass
|
the-stack_0_10582 | # -*- coding: utf-8 -*-
# Adapted from nlxio written by Bernard Willards <https://github.com/bwillers/nlxio>
import numpy as np
import nept
def load_events(filename, labels):
"""Loads neuralynx events
Parameters
----------
filename: str
labels: dict
With event name as the key and Neuralynx event string as the value.
Returns
-------
timestamps: dict
"""
nev_data = load_nev(filename)
idx = {key: [] for key in labels}
for key in labels:
for i, event in enumerate(nev_data['event_str']):
if event.decode() == labels[key]:
idx[key].append(i)
timestamps = {label: [] for label in labels}
times = nev_data['time'].astype(float) * 1e-6
for label in labels:
timestamps[label] = times[idx[label]]
return timestamps
def load_lfp(filename):
"""Loads LFP as nept.LocalFieldPotential
Parameters
----------
filename: str
Returns
-------
lfp: nept.LocalFieldPotential
"""
data, time = load_ncs(filename)
return nept.LocalFieldPotential(data, time)
def load_position(filename, pxl_to_cm):
"""Loads videotracking position as nept.Position
Parameters
----------
filename: str
pxl_to_cm: tuple
With (x, y) conversion factors
Returns
-------
position: nept.Position
"""
nvt_data = load_nvt(filename)
xy = np.hstack(np.array([nvt_data['x'] / pxl_to_cm[0], nvt_data['y'] / pxl_to_cm[1]])[..., np.newaxis])
return nept.Position(xy, nvt_data['time'])
def load_neuralynx_header(filename):
"""Loads a neuralynx header.
Parameters
----------
filename: str
Returns
-------
header: byte str
"""
with open(filename, 'rb') as f:
# Neuralynx files have a 16kbyte header
header = f.read(16 * 2**10)
return header
def load_ncs(filename):
"""Loads a neuralynx .ncs electrode file.
Parameters
----------
filename: str
Returns
-------
cscs: np.array
Voltage trace (V)
times: np.array
Timestamps (microseconds)
"""
with open(filename, 'rb') as f:
# Neuralynx files have a 16kbyte header
header = f.read(16 * 2**10)
# The format for a .ncs files according the the neuralynx docs is
# uint64 - timestamp in microseconds
# uint32 - channel number
# uint32 - sample freq
# uint32 - number of valid samples
# int16 x 512 - actual csc samples
dt = np.dtype([('time', '<Q'), ('channel', '<i'), ('freq', '<i'),
('valid', '<i'), ('csc', '<h', (512,))])
data = np.fromfile(f, dt)
# unpack the csc matrix
csc = data['csc'].reshape((data['csc'].size,))
data_times = data['time'] * 1e-6
# find the frequency
frequency = np.unique(data['freq'])
if len(frequency) > 1:
raise IOError("only one frequency allowed")
frequency = frequency[0]
# .ncs files have a timestamp for every ~512 data points.
# Here, we assign timestamps for each data sample based on the sampling frequency
# for each of the 512 data points. Sometimes a block will have fewer than 512 data entries,
# number is set in data['valid'].
this_idx = 0
n_block = 512.
offsets = np.arange(0, n_block / frequency, 1. / frequency)
times = np.zeros(csc.shape)
for i, (time, n_valid) in enumerate(zip(data_times, data['valid'])):
times[this_idx:this_idx + n_valid] = time + offsets[:n_valid]
this_idx += n_valid
# now find analog_to_digital conversion factor in the header
analog_to_digital = None
for line in header.split(b'\n'):
if line.strip().startswith(b'-ADBitVolts'):
analog_to_digital = np.array(float(line.split(b' ')[1].decode()))
if analog_to_digital is None:
raise IOError("ADBitVolts not found in .ncs header for " + filename)
cscs = csc * analog_to_digital
return cscs, times
def load_nev(filename):
"""Loads a neuralynx .nev file.
Parameters
----------
filename: str
Returns
-------
nev_data: dict
With time (uint64), id (uint16), nttl (uint16), and event_str (charx128) as the most usable keys.
"""
with open(filename, 'rb') as f:
# There's nothing useful in the header for .nev files, so skip past it
f.seek(2 ** 14)
# An event record is as follows:
# int16 - nstx - reserved
# int16 - npkt_id - id of the originating system
# int16 - npkt_data_size - this value should always be 2
# uint64 - timestamp, microseconds
# int16 - nevent_id - ID value for event
# int16 - nttl - decimal TTL value read from the TTL input port
# int16 - ncrc - record crc check, not used in consumer applications
# int16 - ndummy1 - reserved
# int16 - ndummy2 - reserved
# int32x8 - dnExtra - extra bit values for this event
# string(128) - event string
dt = np.dtype([('filler1', '<h', 3), ('time', '<Q'), ('id', '<h'),
('nttl', '<h'), ('filler2', '<h', 3), ('extra', '<i', 8),
('event_str', np.dtype('a128'))])
nev_data = np.fromfile(f, dt)
return nev_data
def load_ntt(filename):
"""Loads a neuralynx .ntt tetrode spike file.
Parameters
----------
filename: str
Returns
-------
timestamps: np.array
Spikes as (num_spikes, length_waveform, num_channels)
spikes: np.array
Spike times as uint64 (us)
frequency: float
Sampling frequency in waveforms (Hz)
Usage:
timestamps, spikes, frequency = load_ntt('TT13.ntt')
"""
with open(filename, 'rb')as f:
# A tetrode spike record is as folows:
# uint64 - timestamp bytes 0:8
# uint32 - acquisition entity number bytes 8:12
# uint32 - classified cel number bytes 12:16
# 8 * uint32- params bytes 16:48
# 32 * 4 * int16 - waveform points
# hence total record size is 2432 bits, 304 bytes
# header is 16kbyte, i.e. 16 * 2^10 = 2^14
header = f.read(16 * 2**10)
# Read the header and find the conversion factors / sampling frequency
analog_to_digital = None
frequency = None
for line in header.split(b'\n'):
if line.strip().startswith(b'-ADBitVolts'):
analog_to_digital = np.array(float(line.split(b' ')[1].decode()))
if line.strip().startswith(b'-SamplingFrequency'):
frequency = float(line.split(b' ')[1].decode())
f.seek(2 ** 14) # start of the spike, records
# Neuralynx write little endian for some dumb reason
dt = np.dtype([('time', '<Q'), ('filer', '<i', 10),
('spikes', np.dtype('<h'), (32, 4))])
data = np.fromfile(f, dt)
if analog_to_digital is None:
raise IOError("ADBitVolts not found in .ntt header for " + filename)
if frequency is None:
raise IOError("Frequency not found in .ntt header for " + filename)
f.close()
return data['time'], data['spikes'] * analog_to_digital, frequency
def load_nvt(filename, remove_empty=True):
"""Loads a neuralynx .nvt file.
Parameters
----------
filename: str
remove_empty: bool
Returns
-------
nvt_data: dict
With time, x, and y as keys.
"""
with open(filename, 'rb') as f:
# Neuralynx files have a 16kbyte header
header = f.read(16 * 2**10)
# The format for .nvt files according the the neuralynx docs is
# uint16 - beginning of the record
# uint16 - ID for the system
# uint16 - size of videorec in bytes
# uint64 - timestamp in microseconds
# uint32 x 400 - points with the color bitfield values
# int16 - unused
# int32 - extracted X location of target
# int32 - extracted Y location of target
# int32 - calculated head angle in degrees clockwise from the positive Y axis
# int32 x 50 - colored targets using the same bitfield format used to extract colors earlier
dt = np.dtype([('filler1', '<h', 3), ('time', '<Q'), ('points', '<i', 400),
('filler2', '<h'), ('x', '<i'), ('y', '<i'), ('head_angle', '<i'),
('targets', '<i', 50)])
data = np.fromfile(f, dt)
nvt_data = dict()
nvt_data['time'] = data['time'] * 1e-6
nvt_data['x'] = np.array(data['x'], dtype=float)
nvt_data['y'] = np.array(data['y'], dtype=float)
nvt_data['targets'] = np.array(data['targets'], dtype=float)
empty_idx = (data['x'] == 0) & (data['y'] == 0)
for key in nvt_data:
if remove_empty:
nvt_data[key] = nvt_data[key][~empty_idx]
return nvt_data
|
the-stack_0_10585 | from pyscf import gto, dft
from automr import guess
#mf=guess.from_fch_simp("v2.fchk", xc='pbe0')
#mf2.verbose=9
#mf2.stability()
mol = gto.Mole(atom='''Cr 0.0 0.0 0.0; Cr 0.0 0.0 1.6''', basis='def2-tzvp', verbose=5).build()
mf = dft.RKS(mol)
mf.xc = 'pbe0'
mf.kernel()
mf2 = guess.check_stab(mf, newton=True, res=True)
|
the-stack_0_10586 | #!/usr/bin/env python3
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import sys
import platform
import subprocess
lib_env_key = "PATH" if platform.system() == "Windows" else "LD_LIBRARY_PATH"
if lib_env_key not in os.environ:
os.environ[lib_env_key] = ""
python_path_key = "PYTHONPATH"
if python_path_key not in os.environ:
os.environ[python_path_key] = ""
ov_frontend_path_key = "OV_FRONTEND_PATH"
if ov_frontend_path_key not in os.environ:
os.environ[ov_frontend_path_key] = ""
lib_path_orig = os.environ[lib_env_key]
python_path_orig = os.environ[python_path_key]
ov_frontend_path_orig = os.environ[ov_frontend_path_key]
def setup_env(module="", libs=[]):
"""
Update os.environ variables with given values.
:param module: path to python module
:param libs: list with paths to libraries
"""
os.environ[python_path_key] = os.pathsep.join([module, os.environ[python_path_key]])
os.environ[lib_env_key] = os.pathsep.join([*libs, os.environ[lib_env_key]])
if len(os.getenv(ov_frontend_path_key)) == 0:
os.environ[ov_frontend_path_key] = os.pathsep.join([*libs])
def reset_env():
"""
Reset os.environ variables to default values
"""
os.environ[python_path_key] = python_path_orig
os.environ[lib_env_key] = lib_path_orig
os.environ[ov_frontend_path_key] = ov_frontend_path_orig
def try_to_import_ie(module="", libs=[], silent=False):
"""
Check if Inference Engine Python API modules exists and in case of success
environment will be set with given values.
:param module: path to python module
:param libs: list with paths to libraries
:param silent: hide all output
"""
path_to_script = os.path.join(os.path.realpath(os.path.dirname(__file__)), 'check_ie_bindings.py')
# We need to execute python modules checker in subprocess to avoid issue with environment
# in case if previous import was unsuccessful it can fail further imports even if sys.path
# will be restored to initial default values.
# To pass environment to sub-process PATH/LD_LIBRARY_PATH and PYTHONPATH are used from
# os.environ that is set after setup_env()
setup_env(module=module, libs=libs)
cmd_args = [sys.executable, path_to_script, "--path_to_module", "PYTHONPATH" if module == "" else module]
if silent:
cmd_args.append("--silent")
status = subprocess.run(cmd_args, env=os.environ)
if status.returncode == 0:
return True
else:
reset_env()
return False
def find_ie_version(silent=False):
"""
Tries to import Inference Engine Python API bindings. In case of successful import
PATH/LD_LIBRARY_PATH and PYTHONPATH environment variables will be set
This variables must be passed to subprocess in order to execute IE python bindings.
Example:
if find_ie_version():
subprocess.run([sys.executable, path_to_script], env=os.environ)
"""
if try_to_import_ie(silent=silent):
return True
python_version = 'python{}.{}'.format(sys.version_info[0], sys.version_info[1])
script_path = os.path.realpath(os.path.dirname(__file__))
# Windows
bindings_paths_windows = [
# Local builds
{
"module": os.path.join(script_path, '../../../../../../bin/intel64/Release/python_api/', python_version),
"libs": [
os.path.join(script_path, '../../../../../../bin/intel64'),
os.path.join(script_path, '../../../../../../bin/intel64/Release'),
os.path.join(script_path, '../../../../../../temp/tbb/bin'),
]
},
{
"module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/python_api/', python_version),
"libs": [
os.path.join(script_path, '../../../../../../bin/intel64'),
os.path.join(script_path, '../../../../../../bin/intel64/Debug'),
os.path.join(script_path, '../../../../../../temp/tbb/bin'),
]
},
]
# Linux / Darwin
bindings_paths_linux = [
# Local builds
{
"module": os.path.join(script_path, '../../../../../../bin/intel64/Release/lib/python_api/', python_version),
"libs": [
os.path.join(script_path, '../../../../../../bin/intel64/Release/lib'),
]
},
{
"module": os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo/lib/python_api/', python_version),
"libs": [
os.path.join(script_path, '../../../../../../bin/intel64/RelWithDebInfo/lib'),
]
},
{
"module": os.path.join(script_path, '../../../../../../bin/intel64/Debug/lib/python_api/', python_version),
"libs": [
os.path.join(script_path, '../../../../../../bin/intel64/Debug/lib'),
]
}
]
bindings_paths = bindings_paths_windows if platform.system() == "Windows" else bindings_paths_linux
for item in bindings_paths:
module = item['module']
if not os.path.exists(module):
continue
if try_to_import_ie(module=os.path.normpath(module), libs=item['libs'] if 'libs' in item else [], silent=silent):
return True
return False
if __name__ == "__main__":
if not find_ie_version():
exit(1)
|
the-stack_0_10587 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Primitives for dealing with datastore indexes.
Example index.yaml file:
------------------------
indexes:
- kind: Cat
ancestor: no
properties:
- name: name
- name: age
direction: desc
- kind: Cat
properties:
- name: name
direction: ascending
- name: whiskers
direction: descending
- kind: Store
ancestor: yes
properties:
- name: business
direction: asc
- name: owner
direction: asc
"""
from google.appengine.api import datastore_types
from google.appengine.api import validation
from google.appengine.api import yaml_errors
from google.appengine.api import yaml_object
from google.appengine.datastore import datastore_pb
from google.appengine.datastore import entity_pb
class Property(validation.Validated):
"""Representation for an individual property of an index.
Attributes:
name: Name of attribute to sort by.
direction: Direction of sort.
"""
ATTRIBUTES = {
'name': validation.Type(str, convert=False),
'direction': validation.Options(('asc', ('ascending',)),
('desc', ('descending',)),
default='asc'),
}
class Index(validation.Validated):
"""Individual index definition.
Order of the properties determines a given indexes sort priority.
Attributes:
kind: Datastore kind that index belongs to.
ancestors: Include ancestors in index.
properties: Properties to sort on.
"""
ATTRIBUTES = {
'kind': validation.Type(str, convert=False),
'ancestor': validation.Type(bool, convert=False, default=False),
'properties': validation.Optional(validation.Repeated(Property)),
}
class IndexDefinitions(validation.Validated):
"""Top level for index definition file.
Attributes:
indexes: List of Index definitions.
"""
ATTRIBUTES = {
'indexes': validation.Optional(validation.Repeated(Index)),
}
def ParseIndexDefinitions(document, open_fn=None):
"""Parse an individual index definitions document from string or stream.
Args:
document: Yaml document as a string or file-like stream.
open_fn: Function for opening files. Unused.
Raises:
EmptyConfigurationFile when the configuration file is empty.
MultipleConfigurationFile when the configuration file contains more than
one document.
Returns:
Single parsed yaml file if one is defined, else None.
"""
try:
return yaml_object.BuildSingleObject(IndexDefinitions, document)
except yaml_errors.EmptyConfigurationFile:
return None
def ParseMultipleIndexDefinitions(document):
"""Parse multiple index definitions documents from a string or stream.
Args:
document: Yaml document as a string or file-like stream.
Returns:
A list of datstore_index.IndexDefinitions objects, one for each document.
"""
return yaml_object.BuildObjects(IndexDefinitions, document)
def IndexDefinitionsToKeys(indexes):
"""Convert IndexDefinitions to set of keys.
Args:
indexes: A datastore_index.IndexDefinitions instance, or None.
Returns:
A set of keys constructed from the argument, each key being a
tuple of the form (kind, ancestor, properties) where properties is
a tuple of (name, direction) pairs, direction being ASCENDING or
DESCENDING (the enums).
"""
keyset = set()
if indexes is not None:
if indexes.indexes:
for index in indexes.indexes:
keyset.add(IndexToKey(index))
return keyset
def IndexToKey(index):
"""Convert Index to key.
Args:
index: A datastore_index.Index instance (not None!).
Returns:
A tuple of the form (kind, ancestor, properties) where properties
is a tuple of (name, direction) pairs, direction being ASCENDING
or DESCENDING (the enums).
"""
props = []
if index.properties is not None:
for prop in index.properties:
if prop.direction == 'asc':
direction = ASCENDING
else:
direction = DESCENDING
props.append((prop.name, direction))
return index.kind, index.ancestor, tuple(props)
ASCENDING = datastore_pb.Query_Order.ASCENDING
DESCENDING = datastore_pb.Query_Order.DESCENDING
EQUALITY_OPERATORS = set((datastore_pb.Query_Filter.EQUAL,
))
INEQUALITY_OPERATORS = set((datastore_pb.Query_Filter.LESS_THAN,
datastore_pb.Query_Filter.LESS_THAN_OR_EQUAL,
datastore_pb.Query_Filter.GREATER_THAN,
datastore_pb.Query_Filter.GREATER_THAN_OR_EQUAL,
))
EXISTS_OPERATORS = set((datastore_pb.Query_Filter.EXISTS,
))
_DIRECTION_MAP = {
'asc': entity_pb.Index_Property.ASCENDING,
'ascending': entity_pb.Index_Property.ASCENDING,
'desc': entity_pb.Index_Property.DESCENDING,
'descending': entity_pb.Index_Property.DESCENDING,
}
def Normalize(filters, orders, properties):
""" Normalizes filter and order query components.
The resulting components have the same effect as the given components if used
in a query.
Returns:
(filter, orders) the reduced set of filters and orders
"""
eq_properties = set()
inequality_properties = set()
for f in filters:
if f.op() == datastore_pb.Query_Filter.IN and f.property_size() == 1:
f.set_op(datastore_pb.Query_Filter.EQUAL)
if f.op() in EQUALITY_OPERATORS:
eq_properties.add(f.property(0).name())
elif f.op() in INEQUALITY_OPERATORS:
inequality_properties.add(f.property(0).name())
eq_properties -= inequality_properties
remove_set = eq_properties.copy()
new_orders = []
for o in orders:
if o.property() not in remove_set:
remove_set.add(o.property())
new_orders.append(o)
orders = new_orders
remove_set.update(inequality_properties)
new_filters = []
for f in filters:
if f.op() not in EXISTS_OPERATORS:
new_filters.append(f)
continue
name = f.property(0).name()
if name not in remove_set:
remove_set.add(name)
new_filters.append(f)
for prop in properties:
if prop not in remove_set:
remove_set.add(prop)
new_filter = datastore_pb.Query_Filter()
new_filter.set_op(datastore_pb.Query_Filter.EXISTS)
new_prop = new_filter.add_property()
new_prop.set_name(prop)
new_prop.set_multiple(False)
new_prop.mutable_value()
new_filters.append(new_filter)
filters = new_filters
if datastore_types.KEY_SPECIAL_PROPERTY in eq_properties:
orders = []
new_orders = []
for o in orders:
if o.property() == datastore_types.KEY_SPECIAL_PROPERTY:
new_orders.append(o)
break
new_orders.append(o)
orders = new_orders
return (filters, orders)
def RemoveNativelySupportedComponents(filters, orders, properties):
""" Removes query components that are natively supported by the datastore.
The resulting filters and orders should not be used in an actual query.
Returns
(filters, orders) the reduced set of filters and orders
"""
(filters, orders) = Normalize(filters, orders, properties)
for f in filters:
if f.op() in EXISTS_OPERATORS:
return (filters, orders)
has_key_desc_order = False
if orders and orders[-1].property() == datastore_types.KEY_SPECIAL_PROPERTY:
if orders[-1].direction() == ASCENDING:
orders = orders[:-1]
else:
has_key_desc_order = True
if not has_key_desc_order:
for f in filters:
if (f.op() in INEQUALITY_OPERATORS and
f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY):
break
else:
filters = [f for f in filters
if f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY]
return (filters, orders)
def CompositeIndexForQuery(query):
"""Return the composite index needed for a query.
A query is translated into a tuple, as follows:
- The first item is the kind string, or None if we're not filtering
on kind (see below).
- The second item is a bool giving whether the query specifies an
ancestor.
- After that come (property, ASCENDING) pairs for those Filter
entries whose operator is EQUAL or IN. Since the order of these
doesn't matter, they are sorted by property name to normalize them
in order to avoid duplicates.
- After that comes at most one (property, ASCENDING) pair for a
Filter entry whose operator is on of the four inequalities. There
can be at most one of these.
- After that come all the (property, direction) pairs for the Order
entries, in the order given in the query. Exceptions:
(a) if there is a Filter entry with an inequality operator that matches
the first Order entry, the first order pair is omitted (or,
equivalently, in this case the inequality pair is omitted).
(b) if an Order entry corresponds to an equality filter, it is ignored
(since there will only ever be one value returned).
(c) if there is an equality filter on __key__ all orders are dropped
(since there will be at most one result returned).
(d) if there is an order on __key__ all further orders are dropped (since
keys are unique).
(e) orders on __key__ ASCENDING are dropped (since this is supported
natively by the datastore).
- Finally, if there are Filter entries whose operator is EXISTS, and
whose property names are not already listed, they are added, with
the direction set to ASCENDING.
This algorithm should consume all Filter and Order entries.
Additional notes:
- The low-level implementation allows queries that don't specify a
kind; but the Python API doesn't support this yet.
- If there's an inequality filter and one or more sort orders, the
first sort order *must* match the inequality filter.
- The following indexes are always built in and should be suppressed:
- query on kind only;
- query on kind and one filter *or* one order;
- query on ancestor only, without kind (not exposed in Python yet);
- query on kind and equality filters only, no order (with or without
ancestor).
- While the protocol buffer allows a Filter to contain multiple
properties, we don't use this. It is only needed for the IN operator
but this is (currently) handled on the client side, so in practice
each Filter is expected to have exactly one property.
Args:
query: A datastore_pb.Query instance.
Returns:
A tuple of the form (required, kind, ancestor, properties).
required: boolean, whether the index is required;
kind: the kind or None;
ancestor: True if this is an ancestor query;
properties: A tuple consisting of any number of:
- Sets of property names: Indicates these properties can appear in any
order with any direction.
- Tuples of (property name, direction) tuples. Indicating the properties
must appear in the exact order with the given direction. direction can
be None if direction does not matter.
"""
required = True
kind = query.kind()
ancestor = query.has_ancestor()
filters = query.filter_list()
orders = query.order_list()
for filter in filters:
assert filter.op() != datastore_pb.Query_Filter.IN, 'Filter.op()==IN'
nprops = len(filter.property_list())
assert nprops == 1, 'Filter has %s properties, expected 1' % nprops
if not kind:
required = False
filters, orders = RemoveNativelySupportedComponents(
filters, orders, query.property_name_list())
eq_filters = [f for f in filters if f.op() in EQUALITY_OPERATORS]
ineq_filters = [f for f in filters if f.op() in INEQUALITY_OPERATORS]
exists_filters = [f for f in filters if f.op() in EXISTS_OPERATORS]
assert (len(eq_filters) + len(ineq_filters) +
len(exists_filters)) == len(filters), 'Not all filters used'
if (kind and not ineq_filters and not exists_filters and
not orders):
names = set(f.property(0).name() for f in eq_filters)
if not names.intersection(datastore_types._SPECIAL_PROPERTIES):
required = False
ineq_property = None
if ineq_filters:
for filter in ineq_filters:
if (filter.property(0).name() ==
datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY):
continue
if not ineq_property:
ineq_property = filter.property(0).name()
else:
assert filter.property(0).name() == ineq_property
prefix = frozenset(f.property(0).name() for f in eq_filters)
postfix_ordered = [(order.property(), order.direction()) for order in orders]
postfix_unordered = frozenset(f.property(0).name() for f in exists_filters)
if ineq_property:
if orders:
assert ineq_property == orders[0].property()
else:
postfix_ordered.append((ineq_property, None))
property_count = len(prefix) + len(postfix_ordered) + len(postfix_unordered)
if kind and not ancestor and property_count <= 1:
required = False
if postfix_ordered:
prop, dir = postfix_ordered[0]
if prop == datastore_types.KEY_SPECIAL_PROPERTY and dir is DESCENDING:
required = True
return (required, kind, ancestor,
(prefix, tuple(postfix_ordered), postfix_unordered))
def GetRecommendedIndexProperties(properties):
"""Converts the properties returned by datastore_index.CompositeIndexForQuery
into a recommended list of index properties and directions.
All unordered components are sorted and assigned an ASCENDING direction. All
ordered components with out a direction are assigned an ASCEDNING direction.
Args:
properties: See datastore_index.CompositeIndexForQuery
Returns:
A tuple of (name, direction) tuples where:
name: a property name
direction: datastore_pb.Query_Order.ASCENDING or ...DESCENDING
"""
result = []
for sub_list in properties:
if isinstance(sub_list, (frozenset, set)):
for prop in sorted(sub_list):
result.append((prop, ASCENDING))
else:
for prop, dir in sub_list:
result.append((prop, dir if dir is not None else ASCENDING))
return tuple(result)
def MinimalCompositeIndexForQuery(query, index_defs):
"""Computes the minimal composite index for this query.
Unlike datastore_index.CompositeIndexForQuery, this function takes into
account indexes that already exist in the system.
Args:
query: the datastore_pb.Query to compute suggestions for
index_defs: a list of datastore_index.Index objects that already exist.
Returns:
None if no index is needed, otherwise the minimal index in the form
(is_most_efficient, kind, ancestor, properties). Where is_most_efficient is a
boolean denoting if the suggested index is the most efficient (i.e. the one
returned by datastore_index.CompositeIndexForQuery). kind, ancestor,
and properties are the same variables returned by
datastore_index.CompositeIndexForQuery.
"""
required, kind, ancestor, props = CompositeIndexForQuery(query)
if not required:
return None
prefix, postfix_ordered, postfix_unordered = props
remaining_dict = {}
for definition in index_defs:
if (kind != definition.kind or
(not ancestor and definition.ancestor)):
continue
_, _, index_props = IndexToKey(definition)
postfix_split = len(index_props) - 1
while (postfix_split >= 0 and
index_props[postfix_split][0] in postfix_unordered):
postfix_split -= 1
postfix_split += 1
index_postfix_unordered = index_props[postfix_split:]
if (set(prop for prop, _ in index_postfix_unordered) != postfix_unordered or
len(index_postfix_unordered) != len(postfix_unordered)):
continue
postfix_start = postfix_split - len(postfix_ordered)
if postfix_start < 0:
continue
index_postfix_ordered = index_props[postfix_start:postfix_split]
match = True
for (index_prop, index_dir), (prop, dir) in zip(index_postfix_ordered,
postfix_ordered):
if index_prop != prop or (dir is not None and index_dir != dir):
match = False
break
if not match:
continue
index_prefix = set(prop for prop, dir in index_props[:postfix_start])
if index_prefix - prefix:
continue
index_postfix = tuple(index_postfix_ordered + index_postfix_unordered)
remaining = remaining_dict.get(index_postfix)
if remaining is None:
remaining = prefix.copy(), ancestor
props_remaining, ancestor_remaining = remaining
props_remaining = props_remaining - index_prefix
if definition.ancestor:
ancestor_remaining = False
if not (props_remaining or ancestor_remaining):
return None
if (props_remaining, ancestor_remaining) == remaining:
continue
remaining_dict[index_postfix] = (props_remaining, ancestor_remaining)
if not remaining_dict:
return (True, kind, ancestor, props)
def calc_cost(minimal_props, minimal_ancestor):
result = len(minimal_props)
if minimal_ancestor:
result += 2
minimal_postfix, remaining = remaining_dict.popitem()
minimal_props, minimal_ancestor = remaining
minimal_cost = calc_cost(minimal_props, minimal_ancestor)
for index_postfix, (props_remaining, ancestor_remaining) in remaining_dict:
cost = calc_cost(props_remaining, ancestor_remaining)
if cost < minimal_cost:
minimal_cost = cost
minimal_postfix = index_postfix
minimal_props = props_remaining
minimal_ancestor = ancestor_remaining
return False, kind, minimal_ancestor, (frozenset(minimal_props),
minimal_postfix, frozenset())
def IndexYamlForQuery(kind, ancestor, props):
"""Return the composite index definition YAML needed for a query.
Given a query, the arguments for this method can be computed with:
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query)
props = datastore_index.GetRecommendedIndexProperties(props)
Args:
kind: the kind or None
ancestor: True if this is an ancestor query, False otherwise
props: tuples of the form (name, direction) where:
name - a property name;
direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING;
Returns:
A string with the YAML for the composite index needed by the query.
"""
yaml = []
yaml.append('- kind: %s' % kind)
if ancestor:
yaml.append(' ancestor: yes')
if props:
yaml.append(' properties:')
for name, direction in props:
yaml.append(' - name: %s' % name)
if direction == DESCENDING:
yaml.append(' direction: desc')
return '\n'.join(yaml)
def IndexXmlForQuery(kind, ancestor, props):
"""Return the composite index definition XML needed for a query.
Given a query, the arguments for this method can be computed with:
_, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query)
props = datastore_index.GetRecommendedIndexProperties(props)
Args:
kind: the kind or None
ancestor: True if this is an ancestor query, False otherwise
props: tuples of the form (name, direction) where:
name - a property name;
direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING;
Returns:
A string with the XML for the composite index needed by the query.
"""
xml = []
xml.append('<datastore-index kind="%s" ancestor="%s">'
% (kind, 'true' if ancestor else 'false'))
for name, direction in props:
xml.append(' <property name="%s" direction="%s" />'
% (name, 'asc' if direction == ASCENDING else 'desc'))
xml.append('</datastore-index>')
return '\n'.join(xml)
def IndexDefinitionToProto(app_id, index_definition):
"""Transform individual Index definition to protocol buffer.
Args:
app_id: Application id for new protocol buffer CompositeIndex.
index_definition: datastore_index.Index object to transform.
Returns:
New entity_pb.CompositeIndex with default values set and index
information filled in.
"""
proto = entity_pb.CompositeIndex()
proto.set_app_id(app_id)
proto.set_id(0)
proto.set_state(entity_pb.CompositeIndex.WRITE_ONLY)
definition_proto = proto.mutable_definition()
definition_proto.set_entity_type(index_definition.kind)
definition_proto.set_ancestor(index_definition.ancestor)
if index_definition.properties is not None:
for prop in index_definition.properties:
prop_proto = definition_proto.add_property()
prop_proto.set_name(prop.name)
prop_proto.set_direction(_DIRECTION_MAP[prop.direction])
return proto
def IndexDefinitionsToProtos(app_id, index_definitions):
"""Transform multiple index definitions to composite index records
Args:
app_id: Application id for new protocol buffer CompositeIndex.
index_definition: A list of datastore_index.Index objects to transform.
Returns:
A list of tranformed entity_pb.Compositeindex entities with default values
set and index information filled in.
"""
return [IndexDefinitionToProto(app_id, index)
for index in index_definitions]
def ProtoToIndexDefinition(proto):
"""Transform individual index protocol buffer to index definition.
Args:
proto: An instance of entity_pb.CompositeIndex to transform.
Returns:
A new instance of datastore_index.Index.
"""
properties = []
proto_index = proto.definition()
for prop_proto in proto_index.property_list():
prop_definition = Property(name=prop_proto.name())
if prop_proto.direction() == entity_pb.Index_Property.DESCENDING:
prop_definition.direction = 'descending'
properties.append(prop_definition)
index = Index(kind=proto_index.entity_type(), properties=properties)
if proto_index.ancestor():
index.ancestor = True
return index
def ProtosToIndexDefinitions(protos):
"""Transform multiple index protocol buffers to index definitions.
Args:
A list of entity_pb.Index records.
"""
return [ProtoToIndexDefinition(definition) for definition in protos]
|
the-stack_0_10590 | #!/usr/bin/env python
from glob import glob
import sipprcommon.runMetadata as runMetadata
from sipprcommon.offhours import Offhours
from sipprcommon.accessoryfunctions.accessoryFunctions import *
# Import ElementTree - try first to import the faster C version, if that doesn't
# work, try to import the regular version
try:
import xml.etree.cElementTree as ElementTree
except ImportError:
import xml.etree.ElementTree as ElementTree
__author__ = 'adamkoziol'
class CreateFastq(object):
def createfastq(self):
"""Uses bcl2fastq to create .fastq files from a MiSeqRun"""
from time import sleep
from subprocess import call
# Initialise samplecount
samplecount = 0
# If the fastq destination folder is not provided, make the default value of :path/:miseqfoldername
self.fastqdestination = self.fastqdestination if self.fastqdestination else self.path + self.miseqfoldername
# Make the path
make_path(self.fastqdestination)
# Initialise variables for storing index information
index = ''
indexlength = int()
# bcl2fastq requires an older version of the sample sheet, this recreates the required version
# Create the new sample sheet
with open(os.path.join(self.fastqdestination, 'SampleSheet_modified.csv'), "w") as modifiedsamplesheet:
# Write the required headings to the file
modifiedsamplesheet.write(
"FCID,Lane,SampleID,SampleRef,Index,Description,Control,Recipe,Operator,SampleProject\n")
for strain in self.samples:
# Create a combined index of index1-index2
try:
strain.run.modifiedindex = '{}-{}'.format(strain.run.index, strain.run.index2)
indexlength = 16
index = 'I8,I8'
except KeyError:
strain.run.modifiedindex = strain.run.index
indexlength = 6
index = 'I6'
# The list of items to print to each line of the modified sample sheet
printlist = [self.flowcell, '1', strain.name, str(strain.run.SampleNumber), strain.run.modifiedindex,
strain.run.Description, 'N', 'NA',
strain.run.InvestigatorName, self.projectname]
modifiedsamplesheet.write('{}\n'.format(",".join(printlist)))
samplecount += 1
# Set :forward/reverse length to :header.forward/reverse length if the argument is not provided, or it's 'full',
# otherwise use the supplied argument
self.forwardlength = self.metadata.header.forwardlength if self.forwardlength.lower()\
== 'full' else self.forwardlength
# Set :reverselength to :header.reverselength
self.reverselength = self.metadata.header.reverselength if self.reverselength.lower() \
== 'full' else self.reverselength
# As the number of cycles required is the number of forward reads + the index(8) + the second index(8)
# Also set the basemask variable as required
if self.reverselength != '0':
self.readsneeded = int(self.forwardlength) + int(self.reverselength) + indexlength
basemask = "Y{}n*,{},Y{}n*".format(self.forwardlength, index, self.reverselength)
nohup = "nohup make -j 16 > nohup.out"
else:
# + 1
self.readsneeded = int(self.forwardlength) + indexlength
basemask = "Y{}n*,{},n*".format(self.forwardlength, index)
nohup = "nohup make -j 16 r1 > nohup.out"
# Handle plurality appropriately
samples = 'samples' if samplecount > 1 else 'sample'
number = 'are' if samplecount > 1 else 'is'
printtime('There {} {} {} in this run. '
'Running fastq creating module with the following parameters:\n'
'MiSeqPath: {},\n'
'MiSeqFolder: {},\n'
'Fastq destination: {},\n'
'SampleSheet: {}'
.format(number, samplecount, samples, self.miseqpath, self.miseqfolder,
self.fastqdestination, os.path.join(self.fastqdestination, 'SampleSheet_modified.csv')),
self.start)
# Count the number of completed cycles in the run of interest
cycles = glob(os.path.join(self.miseqpath, self.miseqfolder, 'Data', 'Intensities', 'BaseCalls', 'L001', 'C*'))
while len(cycles) < self.readsneeded:
printtime('Currently at {} cycles. Waiting until the MiSeq reaches cycle {}'.format(len(cycles),
self.readsneeded), self.start)
sleep(300)
cycles = glob(os.path.join(self.miseqpath, self.miseqfolder,
'Data', 'Intensities', 'BaseCalls', 'L001', 'C*'))
# configureBClToFastq requires :self.miseqfolder//Data/Intensities/BaseCalls/config.xml in order to work
# When you download runs from BaseSpace, this file is not provided. There is an empty config.xml file that
# can be populated with run-specific values and moved to the appropriate folder
if not os.path.isfile(os.path.join(self.miseqfolder, 'Data', 'Intensities', 'BaseCalls', 'config.xml')):
self.configfilepopulator()
# Define the bcl2fastq system call
bclcall = "configureBclToFastq.pl --input-dir {}Data/Intensities/BaseCalls " \
"--output-dir {} --force --sample-sheet {}/SampleSheet_modified.csv " \
"--mismatches 1 --no-eamss --fastq-cluster-count 0 --compression none --use-bases-mask {}"\
.format(self.miseqfolder, self.fastqdestination, self.fastqdestination, basemask)
# Define the nohup system call
nohupcall = "cd {} && {}".format(self.fastqdestination, nohup)
fnull = open(os.devnull, 'wb')
if not os.path.isdir(os.path.join(self.fastqdestination, 'Project_{}'.format(self.projectname))):
# Call configureBclToFastq.pl
printtime('Running bcl2fastq', self.start)
# Run the commands
call(bclcall, shell=True, stdout=fnull, stderr=fnull)
call(nohupcall, shell=True, stdout=fnull, stderr=fnull)
# Populate the metadata
for sample in self.metadata.samples:
sample.commands = GenObject()
sample.commands.nohup = nohupcall
sample.commands.bcl = bclcall
sample.run.forwardlength = self.forwardlength
sample.run.reverselength = self.reverselength
# Copy the fastq files to a central folder so they can be processed
self.fastqmover()
def configfilepopulator(self):
"""Populates an unpopulated config.xml file with run-specific values and creates
the file in the appropriate location"""
# Set the number of cycles for each read and index using the number of reads specified in the sample sheet
self.forwardlength = self.metadata.header.forwardlength
self.reverselength = self.metadata.header.reverselength
# Create a list of lists containing [cycle start, cycle end, and :runid] for each of forward reads, index 1
# index 2, and reverse reads
cycles = [[1, self.forwardlength, self.runid],
[self.forwardlength + 1, self.forwardlength + 8, self.runid],
[self.forwardlength + 9, self.forwardlength + 16, self.runid],
[self.forwardlength + 17, self.forwardlength + 16 + self.reverselength, self.runid]]
# A dictionary of parameters (keys) and the values to use when repopulating the config file
parameters = {'RunFolder': self.runid, 'RunFolderDate': self.metadata.date.replace("-", ""),
'RunFolderId': self.metadata.runnumber, 'RunFlowcellId': self.metadata.flowcell}
# Load the xml file using element tree
config = ElementTree.parse(os.path.join(self.homepath, 'config.xml'))
# Get the root of the tree
configroot = config.getroot()
# The run node is the only child node of the root
for run in configroot:
# Iterate through the child nodes. There are three nodes sections that must be populated
for child in run:
# Find the cycles tag
if child.tag == 'Cycles':
# Set the attributes with a dictionary containing the total reads
child.attrib = {'Last': '{}'.format(self.forwardlength + 16 + self.reverselength),
'Number': '{}'.format(self.totalreads), 'First': '1'}
elif child.tag == 'RunParameters':
# Name the child as runparameter for easier coding
runparameters = child
for runparameter in runparameters:
# This replaces data in both 'ImagingReads' and 'Reads' nodes
if 'Reads' in runparameter.tag:
# Enumerate through the run parameters
for indexcount, reads in enumerate(runparameter):
# The values for the index are 1, 2, 3, 4. Subtract one to get the index of the first
# list in cycles
index = int(runparameter.attrib['Index']) - 1
# Set the text value as the appropriate value from cycles
reads.text = str(cycles[index][indexcount])
# Populate the instrument value
if runparameter.tag == 'Instrument':
runparameter.text = self.instrument
# Iterate through the parameters in the parameter dictionary
for parameter in parameters:
# If the key is encountered
if runparameter.tag == parameter:
# Replace the text with the value
runparameter.text = parameters[parameter]
if 'Barcode' in runparameter.tag:
for cycle, barcode in enumerate(runparameter):
# Add the barcode cycles. These are the number of forward reads (+ 1 as the barcode
# starts 1 cycle after the first run) plus the current iterator
barcode.text = str(self.forwardlength + 1 + cycle)
# Write the modified config file to the desired location
config.write(os.path.join(self.miseqfolder, 'Data', 'Intensities', 'BaseCalls', 'config.xml'))
def fastqmover(self):
"""Links .fastq files created above to :sequencepath"""
from re import sub
import errno
# Create the project path variable
self.projectpath = os.path.join(self.fastqdestination, "Project_{}".format(self.projectname))
# Create the sequence path if necessary
make_path(self.sequencepath)
# Iterate through all the sample names
for sample in self.metadata.samples:
# Make directory variables
outputdir = os.path.join(self.sequencepath, sample.name)
sampledir = os.path.join(self.projectpath, 'Sample_{}'.format(sample.name))
# Glob all the .gz files in the subfolders - projectpath/Sample_:sample.name/*.gz
for fastq in sorted(glob(os.path.join(sampledir, '*.gz'))):
fastqname = os.path.basename(fastq)
# Set the name of the destination file renamed with the sample number.
# 2015-SEQ-1283_GGACTCCT-GCGTAAGA_L001_R1_001.fastq.gz is renamed:
# 2015-SEQ-1283_S1_L001_R1_001.fastq.gz
outputfile = os.path.join(self.sequencepath,
os.path.basename(
sub(
sample.run.modifiedindex,
'S{}'.format(sample.run.SampleNumber),
fastq)))
if not self.copy:
# Try/except loop link .gz files to self.path
try:
# Symlink fastq file to the seq path
relativepath = os.path.relpath(sampledir, self.sequencepath)
os.symlink(
os.path.join(relativepath, fastqname),
os.path.join(outputfile)
)
# Except os errors
except OSError as exception:
# If there is an exception other than the file exists, raise it
if exception.errno != errno.EEXIST:
raise
else:
import shutil
# Copy the file if it doesn't already exist
if not os.path.isfile(outputfile):
shutil.copyfile(fastq, outputfile)
# Repopulate .strainfastqfiles with the freshly-linked/copied files
fastqfiles = glob(os.path.join(self.sequencepath, '{}*.fastq*'.format(sample.name)))
fastqfiles = [fastq for fastq in fastqfiles if 'trimmed' not in fastq]
# Populate the metadata object with the name/path of the fastq files
sample.general.fastqfiles = fastqfiles
# Save the outputdir to the metadata object
sample.run.outputdirectory = outputdir
sample.general.outputdirectory = outputdir
sample.general.bestassemblyfile = True
sample.general.trimmedcorrectedfastqfiles = sample.general.fastqfiles
sample.commands = GenObject()
def __init__(self, inputobject):
"""Initialise variables"""
self.path = inputobject.path
self.sequencepath = inputobject.sequencepath
self.start = inputobject.starttime
self.fastqdestination = inputobject.fastqdestination
self.homepath = inputobject.homepath
self.miseqout = str()
self.projectname = 'fastqCreation'
self.projectpath = str()
self.numreads = inputobject.numreads
self.forwardlength = inputobject.forwardlength
self.reverselength = inputobject.reverselength if self.numreads > 1 else '0'
self.readsneeded = 0
self.commit = inputobject.commit
self.copy = inputobject.copy
if inputobject.miseqpath:
self.miseqpath = os.path.join(inputobject.miseqpath, '')
else:
print('MiSeqPath argument is required in order to use the fastq creation module. Please provide this '
'argument and run the script again.')
quit()
self.customsamplesheet = inputobject.customsamplesheet
if self.customsamplesheet:
assert os.path.isfile(self.customsamplesheet), 'Cannot find custom sample sheet as specified {}' \
.format(self.customsamplesheet)
# Use the assertions module from offhours to validate whether provided arguments are valid
self.assertions = Offhours(inputobject)
self.assertions.assertpathsandfiles()
# Populate variables from this object
self.miseqfolder = self.assertions.miseqfolder
self.miseqfoldername = self.assertions.miseqfoldername
self.customsamplesheet = self.assertions.customsamplesheet if self.assertions.customsamplesheet \
else os.path.join(self.miseqfolder, 'SampleSheet.csv')
self.runinfo = os.path.join(self.miseqfolder, 'RunInfo.xml')
# Parse the sample sheet and other metadata files here
self.metadata = runMetadata.Metadata(self)
self.metadata.parseruninfo()
# Create variables from this method
self.flowcell = self.metadata.flowcell
self.instrument = self.metadata.instrument
self.samples = self.metadata.samples
self.runid = self.metadata.runid
# self.header = self.metadata.
self.ids = self.metadata.ids
self.date = self.metadata.date
self.totalreads = self.metadata.totalreads
# Create fastq files
self.createfastq()
# If the script is called from the command line, then call the argument parser
if __name__ == '__main__':
import subprocess
from time import time
# Get the current commit of the pipeline from git
# Extract the path of the current script from the full path + file name
homepath = os.path.split(os.path.abspath(__file__))[0]
# Find the commit of the script by running a command to change to the directory containing the script and run
# a git command to return the short version of the commit hash
commit = subprocess.Popen('cd {} && git tag | tail -n 1'.format(homepath),
shell=True, stdout=subprocess.PIPE).communicate()[0].rstrip()
from argparse import ArgumentParser
# Parser for arguments
parser = ArgumentParser(description='Assemble genomes from Illumina fastq files')
parser.add_argument('-v', '--version',
action='version', version='%(prog)s commit {}'.format(commit))
parser.add_argument('path',
help='Specify path')
parser.add_argument('-n', '--numreads',
default=2,
type=int,
help='Specify the number of reads. Paired-reads:'
' 2, unpaired-reads: 1. Default is paired-end')
parser.add_argument('-t', '--threads',
help='Number of threads. Default is the number of cores in the system')
parser.add_argument('-d', '--fastqdestination',
help='Optional folder path to store .fastq files created using the fastqCreation module. '
'Defaults to path/miseqfolder')
parser.add_argument('-m', '--miseqpath',
required=True,
help='Path of the folder containing MiSeq run data folder e.g. /mnt/MiSeq')
parser.add_argument('-f', '--miseqfolder',
required=True,
help='Name of the folder containing MiSeq run data e.g. 161129_M02466_0007_000000000-AW5L5')
parser.add_argument('-r1', '--forwardlength',
default='full',
help='Length of forward reads to use. Can specify "full" to take the full length of forward '
'reads specified on the SampleSheet. Defaults to "full"')
parser.add_argument('-r2', '--reverselength',
default='full',
help='Length of reverse reads to use. Can specify "full" to take the full length of reverse '
'reads specified on the SampleSheet. Defaults to "full"')
parser.add_argument('-c', '--customsamplesheet',
help='Path of folder containing a custom sample sheet and name of sample sheet file '
'e.g. /home/name/folder/BackupSampleSheet.csv. Note that this sheet must still have the '
'same format of Illumina SampleSheet.csv files')
parser.add_argument('-C', '--copy',
action='store_true',
help='Normally, the program will create symbolic links of the files into the sequence path, '
'however, the are occasions when it is necessary to copy the files instead')
# Get the arguments into an object
arguments = parser.parse_args()
arguments.starttime = time()
arguments.commit = commit
arguments.homepath = homepath
# Run the pipeline
CreateFastq(arguments)
# Print a bold, green exit statement
print('\033[92m' + '\033[1m' + "\nElapsed Time: %0.2f seconds" % (time() - arguments.starttime) + '\033[0m')
|
the-stack_0_10591 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on plot.ly for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
"Arial", "Balto", "Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old Standard TT", "Open
Sans", "Overpass", "PT Sans Narrow", "Raleway", "Times New
Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on plot.ly for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on plot.ly for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return "table.hoverlabel"
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.hoverlabel.Font`
color
colorsrc
Sets the source reference on plot.ly for color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include "Arial", "Balto", "Courier New", "Droid Sans",,
"Droid Serif", "Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on plot.ly for family .
size
sizesrc
Sets the source reference on plot.ly for size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.table.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
# Import validators
# -----------------
from plotly.validators.table.hoverlabel import font as v_font
# Initialize validators
# ---------------------
self._validators["color"] = v_font.ColorValidator()
self._validators["colorsrc"] = v_font.ColorsrcValidator()
self._validators["family"] = v_font.FamilyValidator()
self._validators["familysrc"] = v_font.FamilysrcValidator()
self._validators["size"] = v_font.SizeValidator()
self._validators["sizesrc"] = v_font.SizesrcValidator()
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
self["color"] = color if color is not None else _v
_v = arg.pop("colorsrc", None)
self["colorsrc"] = colorsrc if colorsrc is not None else _v
_v = arg.pop("family", None)
self["family"] = family if family is not None else _v
_v = arg.pop("familysrc", None)
self["familysrc"] = familysrc if familysrc is not None else _v
_v = arg.pop("size", None)
self["size"] = size if size is not None else _v
_v = arg.pop("sizesrc", None)
self["sizesrc"] = sizesrc if sizesrc is not None else _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
__all__ = ["Font"]
|
the-stack_0_10592 | import socket
import os
import cv2
import pickle
import threading
import struct
def sendImage():
os.system('python sender.py')
t1 = threading.Thread(target=sendImage)
t1.start()
client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
host_ip = '192.168.43.6'
port = 9900
client_socket.connect((host_ip, port))
data = b""
payload_size = struct.calcsize("Q")
while True:
while len(data) < payload_size:
packet = client_socket.recv(4 * 1024)
if not packet: break
data += packet
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("Q", packed_msg_size)[0]
while len(data) < msg_size:
data += client_socket.recv(4 * 1024)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
cv2.imshow("Live Streaming Video Chat", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
break
client_socket.close()
|
the-stack_0_10593 | import sys
from setuptools import setup, find_packages
import versioneer
with open("README.md", "r") as fh:
long_description = fh.read()
# from https://github.com/pytest-dev/pytest-runner#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
setup(
name="atesa",
version="1.0",
author="Tucker Burgin",
author_email="[email protected]",
description="Python program for automating Aimless Transition Ensemble Sampling and Analysis (ATESA) with the Amber molecular simulations package.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/team-mayes/atesa",
packages=find_packages(),
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
scripts=['atesa/atesa.py']
) |
the-stack_0_10594 | # Copyright (c) 2006-2007, 2009-2014 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2013-2016 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Google, Inc.
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2015-2016 Cara Vinson <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/master/COPYING.LESSER
"""tests for specific behaviour of astroid nodes
"""
import os
import sys
import textwrap
import unittest
import warnings
import six
import astroid
from astroid import bases
from astroid import builder
from astroid import context as contextmod
from astroid import exceptions
from astroid import node_classes
from astroid import nodes
from astroid import parse
from astroid import util
from astroid import test_utils
from astroid import transforms
from astroid.tests import resources
abuilder = builder.AstroidBuilder()
BUILTINS = six.moves.builtins.__name__
class AsStringTest(resources.SysPathSetup, unittest.TestCase):
def test_tuple_as_string(self):
def build(string):
return abuilder.string_build(string).body[0].value
self.assertEqual(build('1,').as_string(), '(1, )')
self.assertEqual(build('1, 2, 3').as_string(), '(1, 2, 3)')
self.assertEqual(build('(1, )').as_string(), '(1, )')
self.assertEqual(build('1, 2, 3').as_string(), '(1, 2, 3)')
@test_utils.require_version(minver='3.0')
def test_func_signature_issue_185(self):
code = textwrap.dedent('''
def test(a, b, c=42, *, x=42, **kwargs):
print(a, b, c, args)
''')
node = parse(code)
self.assertEqual(node.as_string().strip(), code.strip())
def test_as_string_for_list_containing_uninferable(self):
node = builder.extract_node('''
def foo():
bar = [arg] * 1
''')
binop = node.body[0].value
inferred = next(binop.infer())
self.assertEqual(inferred.as_string(), '[Uninferable]')
self.assertEqual(binop.as_string(), '([arg]) * (1)')
def test_frozenset_as_string(self):
ast_nodes = builder.extract_node('''
frozenset((1, 2, 3)) #@
frozenset({1, 2, 3}) #@
frozenset([1, 2, 3,]) #@
frozenset(None) #@
frozenset(1) #@
''')
ast_nodes = [next(node.infer()) for node in ast_nodes]
self.assertEqual(ast_nodes[0].as_string(), 'frozenset((1, 2, 3))')
self.assertEqual(ast_nodes[1].as_string(), 'frozenset({1, 2, 3})')
self.assertEqual(ast_nodes[2].as_string(), 'frozenset([1, 2, 3])')
self.assertNotEqual(ast_nodes[3].as_string(), 'frozenset(None)')
self.assertNotEqual(ast_nodes[4].as_string(), 'frozenset(1)')
def test_varargs_kwargs_as_string(self):
ast = abuilder.string_build('raise_string(*args, **kwargs)').body[0]
self.assertEqual(ast.as_string(), 'raise_string(*args, **kwargs)')
def test_module_as_string(self):
"""check as_string on a whole module prepared to be returned identically
"""
module = resources.build_file('data/module.py', 'data.module')
with open(resources.find('data/module.py'), 'r') as fobj:
self.assertMultiLineEqual(module.as_string(), fobj.read())
def test_module2_as_string(self):
"""check as_string on a whole module prepared to be returned identically
"""
module2 = resources.build_file('data/module2.py', 'data.module2')
with open(resources.find('data/module2.py'), 'r') as fobj:
self.assertMultiLineEqual(module2.as_string(), fobj.read())
def test_as_string(self):
"""check as_string for python syntax >= 2.7"""
code = '''one_two = {1, 2}
b = {v: k for (k, v) in enumerate('string')}
cdd = {k for k in b}\n\n'''
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
@test_utils.require_version('3.0')
def test_3k_as_string(self):
"""check as_string for python 3k syntax"""
code = '''print()
def function(var):
nonlocal counter
try:
hello
except NameError as nexc:
(*hell, o) = b'hello'
raise AttributeError from nexc
\n'''
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
@test_utils.require_version('3.0')
@unittest.expectedFailure
def test_3k_annotations_and_metaclass(self):
code_annotations = textwrap.dedent('''
def function(var:int):
nonlocal counter
class Language(metaclass=Natural):
"""natural language"""
''')
ast = abuilder.string_build(code_annotations)
self.assertEqual(ast.as_string(), code_annotations)
def test_ellipsis(self):
ast = abuilder.string_build('a[...]').body[0]
self.assertEqual(ast.as_string(), 'a[...]')
def test_slices(self):
for code in ('a[0]', 'a[1:3]', 'a[:-1:step]', 'a[:,newaxis]',
'a[newaxis,:]', 'del L[::2]', 'del A[1]', 'del Br[:]'):
ast = abuilder.string_build(code).body[0]
self.assertEqual(ast.as_string(), code)
def test_slice_and_subscripts(self):
code = """a[:1] = bord[2:]
a[:1] = bord[2:]
del bree[3:d]
bord[2:]
del av[d::f], a[df:]
a[:1] = bord[2:]
del SRC[::1,newaxis,1:]
tous[vals] = 1010
del thousand[key]
del a[::2], a[:-1:step]
del Fee.form[left:]
aout.vals = miles.of_stuff
del (ccok, (name.thing, foo.attrib.value)), Fee.form[left:]
if all[1] == bord[0:]:
pass\n\n"""
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string(), code)
class _NodeTest(unittest.TestCase):
"""test transformation of If Node"""
CODE = None
@property
def astroid(self):
try:
return self.__class__.__dict__['CODE_Astroid']
except KeyError:
module = builder.parse(self.CODE)
self.__class__.CODE_Astroid = module
return module
class IfNodeTest(_NodeTest):
"""test transformation of If Node"""
CODE = """
if 0:
print()
if True:
print()
else:
pass
if "":
print()
elif []:
raise
if 1:
print()
elif True:
print()
elif func():
pass
else:
raise
"""
def test_if_elif_else_node(self):
"""test transformation for If node"""
self.assertEqual(len(self.astroid.body), 4)
for stmt in self.astroid.body:
self.assertIsInstance(stmt, nodes.If)
self.assertFalse(self.astroid.body[0].orelse) # simple If
self.assertIsInstance(self.astroid.body[1].orelse[0], nodes.Pass) # If / else
self.assertIsInstance(self.astroid.body[2].orelse[0], nodes.If) # If / elif
self.assertIsInstance(self.astroid.body[3].orelse[0].orelse[0], nodes.If)
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.block_range(1), (0, 22))
self.assertEqual(self.astroid.block_range(10), (0, 22)) # XXX (10, 22) ?
self.assertEqual(self.astroid.body[1].block_range(5), (5, 6))
self.assertEqual(self.astroid.body[1].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(7), (7, 8))
self.assertEqual(self.astroid.body[1].orelse[0].block_range(8), (8, 8))
class TryExceptNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
except IOError:
pass
except UnicodeError:
print()
else:
print()
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 8))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 8))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
self.assertEqual(self.astroid.body[0].block_range(7), (7, 7))
self.assertEqual(self.astroid.body[0].block_range(8), (8, 8))
class TryFinallyNodeTest(_NodeTest):
CODE = """
try:
print ('pouet')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 4))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
class TryExceptFinallyNodeTest(_NodeTest):
CODE = """
try:
print('pouet')
except Exception:
print ('oops')
finally:
print ('pouet')
"""
def test_block_range(self):
# XXX ensure expected values
self.assertEqual(self.astroid.body[0].block_range(1), (1, 6))
self.assertEqual(self.astroid.body[0].block_range(2), (2, 2))
self.assertEqual(self.astroid.body[0].block_range(3), (3, 4))
self.assertEqual(self.astroid.body[0].block_range(4), (4, 4))
self.assertEqual(self.astroid.body[0].block_range(5), (5, 5))
self.assertEqual(self.astroid.body[0].block_range(6), (6, 6))
@unittest.skipIf(six.PY3, "Python 2 specific test.")
class TryExcept2xNodeTest(_NodeTest):
CODE = """
try:
hello
except AttributeError, (retval, desc):
pass
"""
def test_tuple_attribute(self):
handler = self.astroid.body[0].handlers[0]
self.assertIsInstance(handler.name, nodes.Tuple)
class ImportNodeTest(resources.SysPathSetup, unittest.TestCase):
def setUp(self):
super(ImportNodeTest, self).setUp()
self.module = resources.build_file('data/module.py', 'data.module')
self.module2 = resources.build_file('data/module2.py', 'data.module2')
def test_import_self_resolve(self):
myos = next(self.module2.igetattr('myos'))
self.assertTrue(isinstance(myos, nodes.Module), myos)
self.assertEqual(myos.name, 'os')
self.assertEqual(myos.qname(), 'os')
self.assertEqual(myos.pytype(), '%s.module' % BUILTINS)
def test_from_self_resolve(self):
namenode = next(self.module.igetattr('NameNode'))
self.assertTrue(isinstance(namenode, nodes.ClassDef), namenode)
self.assertEqual(namenode.root().name, 'astroid.node_classes')
self.assertEqual(namenode.qname(), 'astroid.node_classes.Name')
self.assertEqual(namenode.pytype(), '%s.type' % BUILTINS)
abspath = next(self.module2.igetattr('abspath'))
self.assertTrue(isinstance(abspath, nodes.FunctionDef), abspath)
self.assertEqual(abspath.root().name, 'os.path')
self.assertEqual(abspath.qname(), 'os.path.abspath')
self.assertEqual(abspath.pytype(), '%s.function' % BUILTINS)
def test_real_name(self):
from_ = self.module['NameNode']
self.assertEqual(from_.real_name('NameNode'), 'Name')
imp_ = self.module['os']
self.assertEqual(imp_.real_name('os'), 'os')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'os.path')
imp_ = self.module['NameNode']
self.assertEqual(imp_.real_name('NameNode'), 'Name')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'Name')
imp_ = self.module2['YO']
self.assertEqual(imp_.real_name('YO'), 'YO')
self.assertRaises(exceptions.AttributeInferenceError, imp_.real_name, 'data')
def test_as_string(self):
ast = self.module['modutils']
self.assertEqual(ast.as_string(), "from astroid import modutils")
ast = self.module['NameNode']
self.assertEqual(ast.as_string(), "from astroid.node_classes import Name as NameNode")
ast = self.module['os']
self.assertEqual(ast.as_string(), "import os.path")
code = """from . import here
from .. import door
from .store import bread
from ..cave import wine\n\n"""
ast = abuilder.string_build(code)
self.assertMultiLineEqual(ast.as_string(), code)
def test_bad_import_inference(self):
# Explication of bug
'''When we import PickleError from nonexistent, a call to the infer
method of this From node will be made by unpack_infer.
inference.infer_from will try to import this module, which will fail and
raise a InferenceException (by mixins.do_import_module). The infer_name
will catch this exception and yield and Uninferable instead.
'''
code = '''
try:
from pickle import PickleError
except ImportError:
from nonexistent import PickleError
try:
pass
except PickleError:
pass
'''
module = builder.parse(code)
handler_type = module.body[1].handlers[0].type
excs = list(node_classes.unpack_infer(handler_type))
# The number of returned object can differ on Python 2
# and Python 3. In one version, an additional item will
# be returned, from the _pickle module, which is not
# present in the other version.
self.assertIsInstance(excs[0], nodes.ClassDef)
self.assertEqual(excs[0].name, 'PickleError')
self.assertIs(excs[-1], util.Uninferable)
def test_absolute_import(self):
module = resources.build_file('data/absimport.py')
ctx = contextmod.InferenceContext()
# will fail if absolute import failed
ctx.lookupname = 'message'
next(module['message'].infer(ctx))
ctx.lookupname = 'email'
m = next(module['email'].infer(ctx))
self.assertFalse(m.file.startswith(os.path.join('data', 'email.py')))
def test_more_absolute_import(self):
module = resources.build_file('data/module1abs/__init__.py', 'data.module1abs')
self.assertIn('sys', module.locals)
class CmpNodeTest(unittest.TestCase):
def test_as_string(self):
ast = abuilder.string_build("a == 2").body[0]
self.assertEqual(ast.as_string(), "a == 2")
class ConstNodeTest(unittest.TestCase):
def _test(self, value):
# pylint: disable=no-member; union type in const_factory, this shouldn't happen
node = nodes.const_factory(value)
self.assertIsInstance(node._proxied, nodes.ClassDef)
self.assertEqual(node._proxied.name, value.__class__.__name__)
self.assertIs(node.value, value)
self.assertTrue(node._proxied.parent)
self.assertEqual(node._proxied.root().name, value.__class__.__module__)
def test_none(self):
self._test(None)
def test_bool(self):
self._test(True)
def test_int(self):
self._test(1)
def test_float(self):
self._test(1.0)
def test_complex(self):
self._test(1.0j)
def test_str(self):
self._test('a')
def test_unicode(self):
self._test(u'a')
class NameNodeTest(unittest.TestCase):
def test_assign_to_True(self):
"""test that True and False assignments don't crash"""
code = """
True = False
def hello(False):
pass
del True
"""
if sys.version_info >= (3, 0):
with self.assertRaises(exceptions.AstroidBuildingError):
builder.parse(code)
else:
ast = builder.parse(code)
assign_true = ast['True']
self.assertIsInstance(assign_true, nodes.AssignName)
self.assertEqual(assign_true.name, "True")
del_true = ast.body[2].targets[0]
self.assertIsInstance(del_true, nodes.DelName)
self.assertEqual(del_true.name, "True")
class AnnAssignNodeTest(unittest.TestCase):
@test_utils.require_version(minver='3.6')
def test_primitive(self):
code = textwrap.dedent("""
test: int = 5
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "int")
self.assertEqual(assign.value.value, 5)
self.assertEqual(assign.simple, 1)
@test_utils.require_version(minver='3.6')
def test_primitive_without_initial_value(self):
code = textwrap.dedent("""
test: str
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertEqual(assign.annotation.name, "str")
self.assertEqual(assign.value, None)
@test_utils.require_version(minver='3.6')
def test_complex(self):
code = textwrap.dedent("""
test: Dict[List[str]] = {}
""")
assign = builder.extract_node(code)
self.assertIsInstance(assign, nodes.AnnAssign)
self.assertEqual(assign.target.name, "test")
self.assertIsInstance(assign.annotation, astroid.Subscript)
self.assertIsInstance(assign.value, astroid.Dict)
@test_utils.require_version(minver='3.6')
def test_as_string(self):
code = textwrap.dedent("""
print()
test: int = 5
test2: str
test3: List[Dict[(str, str)]] = []
""")
ast = abuilder.string_build(code)
self.assertEqual(ast.as_string().strip(), code.strip())
class ArgumentsNodeTC(unittest.TestCase):
def test_linenumbering(self):
ast = builder.parse('''
def func(a,
b): pass
x = lambda x: None
''')
self.assertEqual(ast['func'].args.fromlineno, 2)
self.assertFalse(ast['func'].args.is_statement)
xlambda = next(ast['x'].infer())
self.assertEqual(xlambda.args.fromlineno, 4)
self.assertEqual(xlambda.args.tolineno, 4)
self.assertFalse(xlambda.args.is_statement)
if sys.version_info < (3, 0):
self.assertEqual(ast['func'].args.tolineno, 3)
else:
self.skipTest('FIXME http://bugs.python.org/issue10445 '
'(no line number on function args)')
@test_utils.require_version(minver='3.0')
def test_kwoargs(self):
ast = builder.parse('''
def func(*, x):
pass
''')
args = ast['func'].args
self.assertTrue(args.is_argument('x'))
class UnboundMethodNodeTest(unittest.TestCase):
def test_no_super_getattr(self):
# This is a test for issue
# https://bitbucket.org/logilab/astroid/issue/91, which tests
# that UnboundMethod doesn't call super when doing .getattr.
ast = builder.parse('''
class A(object):
def test(self):
pass
meth = A.test
''')
node = next(ast['meth'].infer())
with self.assertRaises(exceptions.AttributeInferenceError):
node.getattr('__missssing__')
name = node.getattr('__name__')[0]
self.assertIsInstance(name, nodes.Const)
self.assertEqual(name.value, 'test')
class BoundMethodNodeTest(unittest.TestCase):
def test_is_property(self):
ast = builder.parse('''
import abc
def cached_property():
# Not a real decorator, but we don't care
pass
def reify():
# Same as cached_property
pass
def lazy_property():
pass
def lazyproperty():
pass
def lazy(): pass
class A(object):
@property
def builtin_property(self):
return 42
@abc.abstractproperty
def abc_property(self):
return 42
@cached_property
def cached_property(self): return 42
@reify
def reified(self): return 42
@lazy_property
def lazy_prop(self): return 42
@lazyproperty
def lazyprop(self): return 42
def not_prop(self): pass
@lazy
def decorated_with_lazy(self): return 42
cls = A()
builtin_property = cls.builtin_property
abc_property = cls.abc_property
cached_p = cls.cached_property
reified = cls.reified
not_prop = cls.not_prop
lazy_prop = cls.lazy_prop
lazyprop = cls.lazyprop
decorated_with_lazy = cls.decorated_with_lazy
''')
for prop in ('builtin_property', 'abc_property', 'cached_p', 'reified',
'lazy_prop', 'lazyprop', 'decorated_with_lazy'):
inferred = next(ast[prop].infer())
self.assertIsInstance(inferred, nodes.Const, prop)
self.assertEqual(inferred.value, 42, prop)
inferred = next(ast['not_prop'].infer())
self.assertIsInstance(inferred, bases.BoundMethod)
class AliasesTest(unittest.TestCase):
def setUp(self):
self.transformer = transforms.TransformVisitor()
def parse_transform(self, code):
module = parse(code, apply_transforms=False)
return self.transformer.visit(module)
def test_aliases(self):
def test_from(node):
node.names = node.names + [('absolute_import', None)]
return node
def test_class(node):
node.name = 'Bar'
return node
def test_function(node):
node.name = 'another_test'
return node
def test_callfunc(node):
if node.func.name == 'Foo':
node.func.name = 'Bar'
return node
def test_assname(node):
if node.name == 'foo':
return nodes.AssignName('bar', node.lineno, node.col_offset,
node.parent)
def test_assattr(node):
if node.attrname == 'a':
node.attrname = 'b'
return node
def test_getattr(node):
if node.attrname == 'a':
node.attrname = 'b'
return node
def test_genexpr(node):
if node.elt.value == 1:
node.elt = nodes.Const(2, node.lineno, node.col_offset,
node.parent)
return node
self.transformer.register_transform(nodes.From, test_from)
self.transformer.register_transform(nodes.Class, test_class)
self.transformer.register_transform(nodes.Function, test_function)
self.transformer.register_transform(nodes.CallFunc, test_callfunc)
self.transformer.register_transform(nodes.AssName, test_assname)
self.transformer.register_transform(nodes.AssAttr, test_assattr)
self.transformer.register_transform(nodes.Getattr, test_getattr)
self.transformer.register_transform(nodes.GenExpr, test_genexpr)
string = '''
from __future__ import print_function
class Foo: pass
def test(a): return a
foo = Foo()
foo.a = test(42)
foo.a
(1 for _ in range(0, 42))
'''
module = self.parse_transform(string)
self.assertEqual(len(module.body[0].names), 2)
self.assertIsInstance(module.body[0], nodes.ImportFrom)
self.assertEqual(module.body[1].name, 'Bar')
self.assertIsInstance(module.body[1], nodes.ClassDef)
self.assertEqual(module.body[2].name, 'another_test')
self.assertIsInstance(module.body[2], nodes.FunctionDef)
self.assertEqual(module.body[3].targets[0].name, 'bar')
self.assertIsInstance(module.body[3].targets[0], nodes.AssignName)
self.assertEqual(module.body[3].value.func.name, 'Bar')
self.assertIsInstance(module.body[3].value, nodes.Call)
self.assertEqual(module.body[4].targets[0].attrname, 'b')
self.assertIsInstance(module.body[4].targets[0], nodes.AssignAttr)
self.assertIsInstance(module.body[5], nodes.Expr)
self.assertEqual(module.body[5].value.attrname, 'b')
self.assertIsInstance(module.body[5].value, nodes.Attribute)
self.assertEqual(module.body[6].value.elt.value, 2)
self.assertIsInstance(module.body[6].value, nodes.GeneratorExp)
@unittest.skipIf(six.PY3, "Python 3 doesn't have Repr nodes.")
def test_repr(self):
def test_backquote(node):
node.value.name = 'bar'
return node
self.transformer.register_transform(nodes.Backquote, test_backquote)
module = self.parse_transform('`foo`')
self.assertEqual(module.body[0].value.value.name, 'bar')
self.assertIsInstance(module.body[0].value, nodes.Repr)
class DeprecationWarningsTest(unittest.TestCase):
def test_asstype_warnings(self):
string = '''
class C: pass
c = C()
with warnings.catch_warnings(record=True) as w:
pass
'''
module = parse(string)
filter_stmts_mixin = module.body[0]
assign_type_mixin = module.body[1].targets[0]
parent_assign_type_mixin = module.body[2]
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
filter_stmts_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
assign_type_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
parent_assign_type_mixin.ass_type()
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
def test_isinstance_warnings(self):
msg_format = ("%r is deprecated and slated for removal in astroid "
"2.0, use %r instead")
for cls in (nodes.Discard, nodes.Backquote, nodes.AssName,
nodes.AssAttr, nodes.Getattr, nodes.CallFunc, nodes.From):
with warnings.catch_warnings(record=True) as w:
with test_utils.enable_warning(PendingDeprecationWarning):
isinstance(42, cls)
self.assertIsInstance(w[0].message, PendingDeprecationWarning)
actual_msg = msg_format % (cls.__class__.__name__, cls.__wrapped__.__name__)
self.assertEqual(str(w[0].message), actual_msg)
@test_utils.require_version('3.5')
class Python35AsyncTest(unittest.TestCase):
def test_async_await_keywords(self):
async_def, async_for, async_with, await_node = builder.extract_node('''
async def func(): #@
async for i in range(10): #@
f = __(await i)
async with test(): #@
pass
''')
self.assertIsInstance(async_def, nodes.AsyncFunctionDef)
self.assertIsInstance(async_for, nodes.AsyncFor)
self.assertIsInstance(async_with, nodes.AsyncWith)
self.assertIsInstance(await_node, nodes.Await)
self.assertIsInstance(await_node.value, nodes.Name)
def _test_await_async_as_string(self, code):
ast_node = parse(code)
self.assertEqual(ast_node.as_string().strip(), code.strip())
def test_await_as_string(self):
code = textwrap.dedent('''
async def function():
await 42
''')
self._test_await_async_as_string(code)
def test_asyncwith_as_string(self):
code = textwrap.dedent('''
async def function():
async with (42):
pass
''')
self._test_await_async_as_string(code)
def test_asyncfor_as_string(self):
code = textwrap.dedent('''
async def function():
async for i in range(10):
await 42
''')
self._test_await_async_as_string(code)
class ContextTest(unittest.TestCase):
def test_subscript_load(self):
node = builder.extract_node('f[1]')
self.assertIs(node.ctx, astroid.Load)
def test_subscript_del(self):
node = builder.extract_node('del f[1]')
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_subscript_store(self):
node = builder.extract_node('f[1] = 2')
subscript = node.targets[0]
self.assertIs(subscript.ctx, astroid.Store)
def test_list_load(self):
node = builder.extract_node('[]')
self.assertIs(node.ctx, astroid.Load)
def test_list_del(self):
node = builder.extract_node('del []')
self.assertIs(node.targets[0].ctx, astroid.Del)
def test_list_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node('[0] = 2')
def test_tuple_load(self):
node = builder.extract_node('(1, )')
self.assertIs(node.ctx, astroid.Load)
def test_tuple_store(self):
with self.assertRaises(exceptions.AstroidSyntaxError):
builder.extract_node('(1, ) = 3')
@test_utils.require_version(minver='3.5')
def test_starred_load(self):
node = builder.extract_node('a = *b')
starred = node.value
self.assertIs(starred.ctx, astroid.Load)
@test_utils.require_version(minver='3.0')
def test_starred_store(self):
node = builder.extract_node('a, *b = 1, 2')
starred = node.targets[0].elts[1]
self.assertIs(starred.ctx, astroid.Store)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_10595 | """Module implementing a wrapper for the Prod2Vec model"""
import logging
from functools import partial
from multiprocessing import cpu_count
from multiprocessing.pool import ThreadPool
import pandas as pd
from gensim.models import Word2Vec
from tqdm import tqdm
from ..data.initializer import DataLoaderSaver
from .base import BaseRecommender
WORKERS = cpu_count()
def _restrict_to_target_users(interactions, target_users):
"""
Merge interactions with target users on user column
:param interactions: Interactions dataset
:param target_users: Target user dataset
:return: Interactions dataset for target users
"""
return interactions.merge(
pd.DataFrame(target_users).rename(columns={0: "user"}), on="user"
)
def _interactions_to_list_of_lists(interactions):
"""
Transforms interactions dataframe user, item to user, [item1, item2] dataframe
:param interactions: Interactions dataframe
:return: Interactions dataframe format user, [item1, item2]
"""
interactions = interactions.sort_values(by="timestamp")
return interactions.groupby("user")["item"].apply(list)
class Prod2Vec(BaseRecommender, DataLoaderSaver):
"""
Wrapper over Word2Vec model
"""
def __init__(
self,
vector_size=48,
alpha=0.1,
window=5,
min_count=2,
sample=1e-3,
workers=WORKERS,
min_alpha=0.0001,
sg=1,
hs=0,
negative=50,
ns_exponent=0.75,
cbow_mean=1,
epochs=20,
show_progress=True,
):
"""
Source:
https://github.com/RaRe-Technologies/gensim/blob/develop/gensim/models/word2vec.py
vector_size : int, optional
Dimensionality of the word vectors.
window : int, optional
Maximum distance between the current and predicted word within a sentence.
min_count : int, optional
Ignores all words with total frequency lower than this.
workers : int, optional
Use these many worker threads to train the model (=faster training with multicore machines).
sg : {0, 1}, optional
Training algorithm: 1 for skip-gram; otherwise CBOW.
hs : {0, 1}, optional
If 1, hierarchical softmax will be used for model training.
If 0, and `negative` is non-zero, negative sampling will be used.
negative : int, optional
If > 0, negative sampling will be used, the int for negative specifies how many "noise words"
should be drawn (usually between 5-20).
If set to 0, no negative sampling is used.
ns_exponent : float, optional
The exponent used to shape the negative sampling distribution. A value of 1.0 samples exactly in proportion
to the frequencies, 0.0 samples all words equally, while a negative value samples low-frequency words more
than high-frequency words. The popular default value of 0.75 was chosen by the original Word2Vec paper.
More recently, in https://arxiv.org/abs/1804.04212, Caselles-Dupré, Lesaint, & Royo-Letelier suggest that
other values may perform better for recommendation applications.
cbow_mean : {0, 1}, optional
If 0, use the sum of the context word vectors. If 1, use the mean, only applies when cbow is used.
alpha : float, optional
The initial learning rate.
min_alpha : float, optional
Learning rate will linearly drop to `min_alpha` as training progresses.
sample : float, optional
The threshold for configuring which higher-frequency words are randomly downsampled,
useful range is (0, 1e-5).
epochs : int, optional
Number of iterations (epochs) over the corpus. (Formerly: `iter`)
"""
super().__init__()
# model
self.model = Word2Vec(
vector_size=vector_size,
alpha=alpha,
window=window,
min_count=min_count,
sample=sample,
workers=workers,
min_alpha=min_alpha,
sg=sg,
hs=hs,
negative=negative,
ns_exponent=ns_exponent,
cbow_mean=cbow_mean,
epochs=epochs,
)
# data
self.interactions = None
self.train_sequences = None
self.user_sequences = None
self.show_progress = show_progress
if not show_progress:
logging.getLogger("gensim").setLevel(logging.WARNING)
def _restrict_to_vocab(self, interactions):
known_items = pd.DataFrame(
self.model.wv.key_to_index.keys(), columns=["item"]
).astype(str)
return interactions.merge(known_items, on="item")
def _prepare_user_sequences(self, interactions, target_users):
"""
It returns pd.Series with user as Index and a list of interacted items from model vocabulary as value.
:param target_users: list of target users to be considered in the output, for None all users with interactions
will be considered
"""
restricted_interactions = _restrict_to_target_users(interactions, target_users)
restricted_interactions = self._restrict_to_vocab(restricted_interactions)
return _interactions_to_list_of_lists(restricted_interactions)
def preprocess(self):
"""
Prepare sequences for training the Word2Vec model
"""
self.train_sequences = _interactions_to_list_of_lists(self.interactions)
def fit(self):
"""
Returns Word2VecKeyedVectors from trained i2vCF model
"""
# Build vocabulary
self.model.build_vocab(self.train_sequences)
# Train
self.model.train(
self.train_sequences,
total_examples=self.model.corpus_count,
epochs=self.model.epochs,
)
# Precompute L2-normalized vectors
self.model.wv.init_sims(replace=True)
def recommend(
self,
target_users,
n_recommendations,
filter_out_interacted_items=True,
show_progress=True,
):
"""
Recommends n_recommendations items for target_users
:return:
pd.DataFrame (user, item_1, item_2, ..., item_n)
"""
self.user_sequences = self._prepare_user_sequences(
self.interactions, target_users
)
with ThreadPool() as thread_pool:
recommendations = list(
tqdm(
thread_pool.imap(
partial(
self.recommend_per_user,
n_recommendations=n_recommendations,
filter_out_interacted_items=filter_out_interacted_items,
),
target_users,
),
disable=not self.show_progress,
)
)
return pd.DataFrame(recommendations)
def recommend_per_user(
self, user, n_recommendations, filter_out_interacted_items=True
):
"""
Recommends n items per user
:param user: User id
:param n_recommendations: Number of recommendations
:param filter_out_interacted_items: boolean value to filter interacted items
:return: list of format [user_id, item1, item2 ...]
"""
u_recommended_items = []
if self.user_sequences.get(user) is not None:
u_items = self.user_sequences.get(user)
u_recommended_items = list(
list(
zip(
*self.model.wv.most_similar(
u_items,
topn=n_recommendations
+ len(u_items) * filter_out_interacted_items,
)
)
)[0]
)
if filter_out_interacted_items:
u_recommended_items = [
i for i in u_recommended_items if i not in u_items
][:n_recommendations]
return (
[user]
+ u_recommended_items
+ [None] * (n_recommendations - len(u_recommended_items))
)
|
the-stack_0_10596 | '''
Support for reading source files, including unpacking from cat/dat files.
Includes File_Missing_Exception for when a file is not found.
Import as:
from Source_Reader import *
'''
import os
from pathlib import Path # TODO: convert all from os to pathlib.
from ..Common.Settings import Settings
from .Logs import *
from collections import OrderedDict
from .File_Types import *
from .File_Paths import *
from .Cat_Reader import *
from .. import Common
import gzip
'''
Notes on X3 Plugin Manager generated TWareT.pck file:
This file does not work with standard gzip, though does appear to
open with X3 Editor 2.
Exploration into reasons led down this path:
- Look through X3 Editor source code (c#) to trace compression.
- This uses x2fd.dll.
- Look through x2fd source code (c++).
- Find a file reading in catpck.cpp, function DecompressBuffer.
The apparent decompression used is:
magic_value = bytestream[0] ^ 0xC8
file_binary_demagicked = bytestream[1:] ^ magic_value
file_contents = gzip.decompress(file_binary_demagicked)
This does not match up with the magic values or approaches used
in the actual cat files (running magic value for cats, 0x33 for dats,
no dropping of the first byte), and the standalone pck files in scripts
are all plain gzipped.
At any rate, if normal gzip fails, this decompression can be applied
as a second pass to see if it works better.
'''
class Source_Reader_class:
'''
Class used to find and read the highest priority source files.
The general search order is:
* Source folder defined in the settings.
* Loose folders, including scripts.
* Cat files in the addon folder numbered 01 through the highest
contiguous 2-digit number, higher number taking precedence.
* Cat files in the base X3 directory, treated similarly to the
addon folder.
Note: the addon/mods folder is ignored, due to ambiguity on which
mod located there might be in use for a given game session.
Files which were generated on a prior customizer run (identified
by matching a hash in the prior run's log) will be skipped.
Files which were backed up on a prior run will be checked.
Cat files will be parsed as they are reached in the search order,
not before, to avoid excessive startup time when deeper cat files
may never be needed.
Attributes:
* source_file_path_dict
- Dict, keyed by virtual_path, holding the system path
for where the file is located, for files in the source folder
specified in the Settings.
* script_file_path_dict
- Dict, keyed by script name (without path), holding the full path
for a script in the addon/scripts folder.
- pending development.
* catalog_file_dict
- OrderedDict of Cat_Reader objects, keyed by file path, organized
by priority, where the first entry is the highest priority cat.
- Early catalogs are from the addon folder, later catalogs are from
the base x3 folder.
- Dict entries are initially None, and get replaced with Catalog_Files
as the cats are searched.
* file_to_cat_dict
- Dict, keyed by file name, with the Cat_Reader object that holds
the file with the highest priority.
- Used for code result reuse, and is not an exhaustive list.
* prior_customizer_cat_path
- String, path for any catalog file from a prior customizer run.
- The dat file has a matched path, changing extension.
- This path may need to get replaced with an empty cat file if the
user added more catalogs after the prior run, to maintain indexing
order.
* prior_customizer_cat_needs_dummy
- Bool, if True then a dummy catalog should be generated using the
prior_customizer_cat_path during file cleanup (after the prior
cat is deleted normally).
- Any new catalog file will always be at least 2 steps higher than
the prior cat file in this case.
'''
def __init__(s):
s.source_file_path_dict = {}
s.catalog_file_dict = OrderedDict()
s.file_to_cat_dict = {}
s.prior_customizer_cat_path = None
s.prior_customizer_cat_needs_dummy = False
def Init(s):
'''
Initializes the file system by finding files in the source folder,
and finding all cat files in priority order.
This should be run after paths have been set up in Settings.
'''
# Look up the source folder.
source_folder = Settings.Get_Source_Folder()
# If it is not None, look into it.
if source_folder != None:
# Dynamically find all files in the source folder.
# These will all be copied at final writeout, even if not modified,
# eg. if a transform was formerly run on a file but then commented
# out, need to overwrite the previous results with a non-transformed
# version of the file.
# Uses os.walk to go through the source folder and all subfolders.
# Note: dir_path is the relative dir from the source folder, including
# the source folder. This could be removed by doing a nested walk
# from each folder in the source_folder, though this wouldn't work if
# the user just wanted to give a basic list of files to transform and
# manually handle moving around outputs.
# Edits to the path can remove the first folder, though are clumsy
# to implement.
# The working directory could be moved to the source directory
# temporarily for this, though starting walk using '.' will have
# that as the start of the path still (which might be fine).
original_cwd = os.getcwd()
os.chdir(source_folder)
for dir_path, folder_names, file_names in os.walk('.'):
# Loop over the file names.
for file_name in file_names:
# Record the absolute path.
s.Record_New_Source_File( os.path.abspath(
os.path.join(dir_path, file_name)))
# Restore the working directory.
os.chdir(original_cwd)
# Search for cat files the game will recognize.
# These start at 01.cat, and count up as 2-digit values until
# the count is broken.
# For convenience, the first pass will fill in a list with low
# to high priority, then the list can be reversed at the end.
cat_dir_list_low_to_high = []
# Loop over the base x3 folder and the addon folder, doing x3
# first since it is lower priority.
for path in [Settings.Get_X3_Folder(), Settings.Get_Addon_Folder()]:
# Loop until a cat index not found.
cat_index = 1
while 1:
# Error if hit 100.
assert cat_index < 100
cat_name = '{:02d}.cat'.format(
cat_index
)
cat_path = os.path.join(path, cat_name)
# Stop if the cat file is not found.
if not os.path.exists(cat_path):
break
# Record the path if the cat is not from a prior run.
if not Log_Old.File_Is_From_Last_Run(cat_path):
cat_dir_list_low_to_high.append(cat_path)
# If a prior cat file was found before this, then this
# was added by the user since the last customizer run.
# If this cat file is not the highest priority,
# toss a warning, since this indicates the user added
# a higher numbered cat since the last customizer
# run.
# Only print this warning once, by checking the dummy
# flag.
if (s.prior_customizer_cat_path != None
and s.prior_customizer_cat_needs_dummy == False):
s.prior_customizer_cat_needs_dummy = True
print('Warning: cat file {} is from a prior'
' Customizer run but is not the highest'
' numbered cat file.'.format(
s.prior_customizer_cat_path))
else:
s.prior_customizer_cat_path = cat_path
# Increment for the next cat.
cat_index += 1
# Note on the mods folder:
# One cat/dat pair in the addon/mods folder can be selected for
# use by the game. At this time, no effort has been put into
# knowing which pair a user might have selected, so mods applied
# in that way will be ignored for now.
# Fill in dict entries with the list paths, in reverse order.
for path in reversed(cat_dir_list_low_to_high):
s.catalog_file_dict[path] = None
return
def Get_Next_Higher_Cat_Index(s):
'''
Returns a 2-digit string, the next catalog index, 1 higher than
the existing highest index when ignoring any catalogs generated
by prior runs. To be used if writing modified files to a catalog.
'''
# The first catalog_file_dict entry is the highest priority.
# This will get the file path.
# Convert to a Path for convenience.
highest_index_path = Path(next(iter(s.catalog_file_dict.keys())))
# Get the index.
highest_index = int(highest_index_path.stem)
# Increment and convert back to a 2-digit string.
next_index_str = '{:02}'.format(highest_index + 1)
assert len(next_index_str) == 2
return next_index_str
def Record_New_Source_File(s, sys_path):
'''
Records a new file in the source folder, placed there after init.
The provided path should be absolute.
'''
virtual_path = System_Path_to_Virtual_Path(sys_path)
s.source_file_path_dict[virtual_path] = sys_path
def Decompress(s, file_binary, virtual_path):
'''
Decompress the given binary using gzip.
This will attempt to decompress the binary as-is, and if failing,
will decompress as if an x2 file (gzipped with an xor pass and
prefix byte) to support X3 Plugin Manager generated pck files.
* file_binary
- Byte string or Bytearray with the original file binary data.
* virtual_path
- String, virtual path of the file to look up.
- Only used for printouts.
'''
try:
decompressed_binary = gzip.decompress(file_binary)
except:
# Print a nice message in dev mode to indicate this fallback
# code is being used. TODO: remove this if code seems robust.
if Settings.developer:
print('First gzip pass on {} failed, attempting to apply'
' x2 style decompression.'\
.format(virtual_path))
# Try method 2. See notes way up above for what is
# going on, but in short, the first byte xors with 0xC8
# to get a magic value to Xor with all other bytes, then
# that is all decompressed with gzip.
magic = file_binary[0] ^ 0xC8
file_binary = bytearray(x ^ magic for x in file_binary)
try:
# Toss the first byte for this.
decompressed_binary = gzip.decompress(file_binary[1:])
except Exception as ex:
if Settings.developer:
# Dev mode will give a little extra info.
print('Gzip error for file {}'.format(virtual_path))
raise ex
else:
# Swap to a generic exception.
raise Common.Gzip_Exception()
return decompressed_binary
def Read(s,
virtual_path,
error_if_not_found = True,
copy_to_source_folder = False
):
'''
Returns a Game_File including the contents read from the
source folder or unpacked from a cat file.
Contents may be binary or text, depending on the Game_File subclass.
This will search for packed versions as well, automatically unzipping
the contents.
If the file contents are empty, this returns None; this may occur
for LU dummy files.
* virtual_path
- String, virtual path of the file to look up.
- For files which may be gzipped into a pck file, give the
expected non-zipped extension (.xml, .txt, etc.).
* error_if_not_found
- Bool, if True an exception will be thrown if the file cannot
be found, otherwise None is returned.
* copy_to_source_folder
- Bool, if True and the file is read from a cat/dat pair, then
a copy of the data will be placed into the source folder.
- The copy is made after any unzipping is applied.
- Pending development.
'''
# Grab the extension.
file_extension = virtual_path.rsplit('.',1)[1]
# Determine the name for a possibly packed version.
# This is None if the file is not expected to be packed.
virtual_path_pck = Unpacked_Path_to_Packed_Path(virtual_path)
# Flag to indicate if the binary was loaded from a pck file, and
# needs unzipping.
file_binary_is_zipped = False
# Binary data read from a file.
# Once a source is found, this will be filled in, so later
# source checks can be skipped once this is not None.
file_binary = None
# For debug, the path of the file sourced from, maybe a cat.
file_source_path = None
# Check the source folder.
# This could do a full path check, but will reuse the parsed
# files found during Init.
# Pck takes precedence over other files when X3 loads them.
for test_virtual_path in [virtual_path_pck, virtual_path]:
# Skip empty packed paths.
if test_virtual_path == None:
continue
# Skip if not found.
if test_virtual_path not in s.source_file_path_dict:
continue
# Open the file and grab the binary data.
# If this needs to be treated as text, it will be
# reinterpretted elsewhere.
file_source_path = s.source_file_path_dict[test_virtual_path]
with open(file_source_path, 'rb') as file:
file_binary = file.read()
# If it was pck, clarify as zipped.
file_binary_is_zipped = test_virtual_path == virtual_path_pck
# Don't check the other suffix after a match is found.
break
# Check for a loose file outside the source folder, unless
# this is disabled in the settings.
if file_binary == None and Settings.ignore_loose_files == False:
sys_path = Virtual_Path_to_System_Path(virtual_path)
sys_path_pck = Unpacked_Path_to_Packed_Path(sys_path)
# Loop over pck and standard versions.
for test_sys_path in [sys_path_pck, sys_path]:
# Skip empty packed paths.
if test_sys_path == None:
continue
# Following checks will look for a renamed file or a file with
# the original name.
# TODO: consider doing a more generic renamed file check.
file_path_to_source = None
# If the file was not created by the customizer on a previous
# run, and exists, use it.
# This allows a user to overwrite a customizer file with a new
# version, and have it get used over any prior backup.
if (not Log_Old.File_Is_From_Last_Run(test_sys_path)
and os.path.exists(test_sys_path)):
file_path_to_source = test_sys_path
# Check if there is a renamed version of the file, if the main
# path was not valid.
if file_path_to_source == None:
renamed_sys_path = Log_Old.Get_Renamed_File_Path(test_sys_path)
# Source from the renamed file, if it still exists.
if (renamed_sys_path != None
and os.path.exists(renamed_sys_path)):
file_path_to_source = renamed_sys_path
# If no path found, go to next loop iteration.
if file_path_to_source == None:
continue
# Load from the selected file.
file_source_path = file_path_to_source
with open(file_path_to_source, 'rb') as file:
file_binary = file.read()
# If it was pck, clarify as zipped.
# (Use the test_sys_path, since the actual path may
# have a backup extension.)
file_binary_is_zipped = test_sys_path == sys_path_pck
# If still no binary found, check the cat/dat pairs.
# Special check: if looking for a script, they are never
# in the cat/dats, so can skip checks early.
# Note: it is possible unpacked versions of files (with a packed
# version) are not recognized in catalogs by the game, but this
# will look for them anyway.
if (file_binary == None
and not virtual_path.startswith('scripts/')):
# Get the cat versions of the file path.
cat_path = Virtual_Path_to_Cat_Path(virtual_path)
cat_path_pck = Unpacked_Path_to_Packed_Path(cat_path)
# Loop over the cats in priority order.
for cat_file, cat_reader in s.catalog_file_dict.items():
# If the reader hasn't been created, make it.
if cat_reader == None:
cat_reader = Cat_Reader(cat_file)
s.catalog_file_dict[cat_file] = cat_reader
# Loop over the pck and standard versions.
# Note: this is done in the inner loop, checking each cat
# for a pck or non-pck before moving on to the next.
# (It is unclear on how the game handles this, though it
# should be fine since the cats are expected to not
# mix packed and unpacked versions.)
for test_cat_path in [cat_path_pck, cat_path]:
# Skip empty packed paths.
if test_cat_path == None:
continue
# Check the cat for the file.
file_binary = cat_reader.Read(test_cat_path)
if file_binary == None:
continue
# If it returned something, then it matched, so can
# stop searching.
file_source_path = cat_file
# If it was pck, clarify as zipped.
file_binary_is_zipped = test_cat_path == cat_path_pck
break
# Stop looping over cats once a match found.
if file_binary != None:
break
# If no binary was found, error.
if file_binary == None:
if error_if_not_found:
raise Common.File_Missing_Exception(
'Could not find a match for file {}'.format(virtual_path))
return None
# Decompress if needed.
if file_binary_is_zipped:
file_binary = s.Decompress(file_binary, virtual_path)
# If the binary is an empty string, this is an LU dummy file,
# so return None.
if not file_binary:
return None
# Convert the binary into a Game_File object.
# The object constructors will handle parsing of binary data,
# so this just checks file extensions and picks the right class.
# Some special lookups will be done for select files.
if virtual_path == 'types/Globals.txt':
game_file_class = Globals_File
elif file_extension == 'xml':
game_file_class = XML_File
elif file_extension == 'obj':
game_file_class = Obj_File
elif file_extension == 'txt':
game_file_class = T_File
else:
raise Exception('File type for {} not understood.'.format(virtual_path))
# Construct the game file.
# These will also record the path used, to help know where to place
# an edited file in the folder structure.
game_file = game_file_class(
file_binary = file_binary,
virtual_path = virtual_path,
file_source_path = file_source_path,
)
if Settings.write_file_source_paths_to_message_log:
Write_Summary_Line(
'Loaded file {} from {}'.format(virtual_path, file_source_path))
return game_file
# Single, global copy of the reader.
# TODO: make a copy part of a File_System or similar object, instead
# of keeping one here.
Source_Reader = Source_Reader_class() |
the-stack_0_10599 | """
Pytest fixtures: High level Resource Management and base setup fixtures
"""
import datetime
import random
import string
import sys
import os
import time
import allure
import re
import logging
from _pytest.fixtures import SubRequest
from pyparsing import Optional
ALLURE_ENVIRONMENT_PROPERTIES_FILE = 'environment.properties'
ALLUREDIR_OPTION = '--alluredir'
# if "logs" not in os.listdir():
# os.mkdir("logs/")
# logging.basicConfig(level=logging.INFO, filename="logs/" + '{:%Y-%m-%d-%H-%M-%S}.log'.format(datetime.datetime.now()))
sys.path.append(
os.path.dirname(
os.path.realpath(__file__)
)
)
if "libs" not in sys.path:
sys.path.append(f'../libs')
for folder in 'py-json', 'py-scripts':
if folder not in sys.path:
sys.path.append(f'../lanforge/lanforge-scripts/{folder}')
sys.path.append(f"../lanforge/lanforge-scripts/py-scripts/tip-cicd-sanity")
sys.path.append(f'../libs')
sys.path.append(f'../libs/lanforge/')
from LANforge.LFUtils import *
if 'py-json' not in sys.path:
sys.path.append('../py-scripts')
from apnos.apnos import APNOS
from controller.controller_1x.controller import FirmwareUtility
import pytest
from lanforge.lf_tests import RunTest
from cv_test_manager import cv_test
from configuration import CONFIGURATION
from configuration import open_flow
from configuration import RADIUS_SERVER_DATA
from configuration import RADIUS_ACCOUNTING_DATA
from configuration import RATE_LIMITING_RADIUS_SERVER_DATA
from configuration import RATE_LIMITING_RADIUS_ACCOUNTING_DATA
from lanforge.scp_util import SCP_File
from testrails.testrail_api import APIClient
from testrails.reporting import Reporting
from lf_tools import ChamberView
from os import path
from typing import Any, Callable, Optional
from _pytest.fixtures import SubRequest
from pytest import fixture
import fixtures_1x
from fixtures_1x import Fixtures_1x
import fixtures_2x
from fixtures_2x import Fixtures_2x
ALLURE_ENVIRONMENT_PROPERTIES_FILE = 'environment.properties'
ALLUREDIR_OPTION = '--alluredir'
def pytest_addoption(parser):
"""pytest addoption function: contains ini objects and options"""
parser.addini("tr_url", "Test Rail URL")
parser.addini("tr_prefix", "Test Rail Prefix (Generally Testbed_name_)")
parser.addini("tr_user", "Testrail Username")
parser.addini("tr_pass", "Testrail Password")
parser.addini("tr_project_id", "Testrail Project ID")
parser.addini("milestone", "milestone Id")
parser.addini("influx_host", "Influx Host", default="influx.cicd.lab.wlan.tip.build")
parser.addini("influx_port", "Influx Port", default=80)
parser.addini("influx_token", "Influx Token", default="TCkdATXAbHmNbn4QyNaj43WpGBYxFrzV")
parser.addini("influx_bucket", "influx bucket", default="tip-cicd")
parser.addini("influx_org", "influx organization", default="tip")
parser.addini(name="firmware", type='string', help="AP Firmware build URL", default="0")
parser.addini("cloud_ctlr", "AP Firmware build URL", default="0")
parser.addini("num_stations", "Number of Stations/Clients for testing")
# change behaviour
parser.addoption(
"--skip-upgrade",
action="store_true",
default=False,
help="skip updating firmware on the AP (useful for local testing)"
)
parser.addoption(
"--skip-lanforge",
action="store_true",
default=False,
help="skip to do any interactions on lanforge (to be used in case of interop)"
)
# change behaviour
parser.addoption(
"--exit-on-fail",
action="store_true",
default=False,
help="skip updating firmware on the AP (useful for local testing)"
)
# change to Ucentral Ctlr
parser.addoption(
"--1.x",
action="store_true",
default=False,
help="Option to run Test Cases on 1.x SDK"
)
# change behaviour
parser.addoption(
"--force-upgrade",
action="store_true",
default=False,
help="force Upgrading Firmware even if it is already latest version"
)
parser.addoption(
"--force-upload",
action="store_true",
default=False,
help="force Uploading Firmware even if it is already latest version"
)
# this has to be the last argument
# example: --access-points ECW5410 EA8300-EU
parser.addoption(
"--testbed",
# nargs="+",
default="basic-01",
help="AP Model which is needed to test"
)
parser.addoption(
"--use-testrail",
action="store_false",
default=True,
help="Stop using Testrails"
)
# Perfecto Parameters
parser.addini("perfectoURL", "Cloud URL")
parser.addini("securityToken", "Security Token")
parser.addini("platformName-iOS", "iOS Platform")
parser.addini("platformName-android", "Android Platform")
parser.addini("model-iOS", "iOS Devices")
parser.addini("model-android", "Android Devices")
parser.addini("bundleId-iOS", "iOS Devices")
parser.addini("bundleId-iOS-Settings", "iOS Settings App")
parser.addini("appPackage-android", "Android Devices")
parser.addini("bundleId-iOS-Safari", "Safari BundleID")
parser.addini("wifi-SSID-2g-Pwd", "Wifi 2g Password")
parser.addini("Default-SSID-5gl-perfecto-b", "Wifi 5g AP Name")
parser.addini("Default-SSID-2g-perfecto-b", "Wifi 2g AP Name")
parser.addini("Default-SSID-perfecto-b", "Wifi AP Name")
parser.addini("bundleId-iOS-Ping", "Ping Bundle ID")
parser.addini("browserType-iOS", "Mobile Browser Name")
parser.addini("projectName", "Project Name")
parser.addini("projectVersion", "Project Version")
parser.addini("jobName", "CI Job Name")
parser.addini("jobNumber", "CI Job Number")
parser.addini("reportTags", "Report Tags")
parser.addoption(
"--access-points-perfecto",
# nargs="+",
default=["Perfecto"],
help="list of access points to test"
)
"""
Test session base fixture
"""
# To be depreciated as testrails will go
@pytest.fixture(scope="session")
def test_cases():
"""Yields the test cases from configuration.py: will be depreciated"""
yield []
@pytest.fixture(scope="session")
def testbed(request):
"""yields the testbed option selection"""
var = request.config.getoption("--testbed")
yield var
@pytest.fixture(scope="session")
def should_upload_firmware(request):
"""yields the --force-upload option for firmware upload selection"""
yield request.config.getoption("--force-upload")
@pytest.fixture(scope="session")
def should_upgrade_firmware(request):
"""yields the --force-upgrade option for firmware upgrade selection"""
yield request.config.getoption("--force-upgrade")
@pytest.fixture(scope="session")
def exit_on_fail(request):
"""yields the --exit-on-fail option for exiting the test case if it fails without teardown"""
yield request.config.getoption("--exit-on-fail")
@pytest.fixture(scope="session")
def radius_info():
"""yields the radius server information from lab info file"""
yield RADIUS_SERVER_DATA
@pytest.fixture(scope="session")
def radius_accounting_info():
"""yields the radius accounting information from lab info file"""
yield RADIUS_ACCOUNTING_DATA
@pytest.fixture(scope="session")
def rate_radius_info():
"""yields the radius server information from lab info file"""
yield RATE_LIMITING_RADIUS_SERVER_DATA
@pytest.fixture(scope="session")
def rate_radius_accounting_info():
"""yields the radius accounting information from lab info file"""
yield RATE_LIMITING_RADIUS_ACCOUNTING_DATA
@pytest.fixture(scope="session")
def get_configuration(testbed, request):
"""yields the selected testbed information from lab info file (configuration.py)"""
if request.config.getini("cloud_ctlr") != "0":
CONFIGURATION[testbed]["controller"]["url"] = request.config.getini("cloud_ctlr")
if request.config.getini("firmware") != "0":
version = request.config.getini("firmware")
version_list = version.split(",")
for i in range(len(CONFIGURATION[testbed]["access_point"])):
CONFIGURATION[testbed]["access_point"][i]["version"] = version_list[i]
yield CONFIGURATION[testbed]
@pytest.fixture(scope="session")
def get_apnos():
"""yields the LIBRARY for APNOS, Reduces the use of imports across files"""
yield APNOS
@pytest.fixture(scope="session")
def get_equipment_ref(request, setup_controller, testbed, get_configuration):
""""""
if request.config.getoption("1.x"):
equipment_id_list = []
for i in get_configuration['access_point']:
equipment_id_list.append(setup_controller.get_equipment_id(
serial_number=i['serial']))
else:
equipment_id_list = []
for i in get_configuration['access_point']:
equipment_id_list.append(i['serial'])
yield equipment_id_list
@pytest.fixture(scope="session")
def get_sdk_version(fixtures_ver):
version = fixtures_ver.get_sdk_version()
yield version
@pytest.fixture(scope="session")
def get_uci_show(fixtures_ver, get_apnos, get_configuration):
uci_show = fixtures_ver.get_uci_show(get_apnos, get_configuration)
yield uci_show
@pytest.fixture(scope="session")
def skip_lf(request):
yield request.config.getoption("--skip-lanforge")
@pytest.fixture(scope="session")
def get_openflow():
yield open_flow
# Controller Fixture
@pytest.fixture(scope="session")
def setup_controller(request, get_configuration, add_env_properties, fixtures_ver):
"""sets up the controller connection and yields the sdk_client object"""
sdk_client = fixtures_ver.controller_obj
request.addfinalizer(fixtures_ver.disconnect)
yield sdk_client
@pytest.fixture(scope="session")
def setup_firmware(setup_controller):
""" Fixture to Setup Firmware with the selected sdk """
setup_controller.instantiate_firmware()
yield True
@pytest.fixture(scope="session")
def instantiate_firmware(request, setup_controller, get_configuration):
"""sets up firmware utility and yields the object for firmware upgrade"""
if request.config.getoption("--1.x"):
firmware_client_obj = []
for access_point_info in get_configuration['access_point']:
version = access_point_info["version"]
if request.config.getini("build").__contains__("https://tip.jfrog.io/artifactory/tip-wlan-ap-firmware/"):
version = request.config.getini("build")
firmware_client = FirmwareUtility(sdk_client=setup_controller,
model=access_point_info["model"],
version_url=version)
firmware_client_obj.append(firmware_client)
yield firmware_client_obj
else:
# 2.x
pass
@pytest.fixture(scope="session")
def get_latest_firmware(request, instantiate_firmware):
"""yields the list of firmware version"""
if request.config.getoption("--1.x"):
fw_version_list = []
try:
for fw_obj in instantiate_firmware:
latest_firmware = fw_obj.get_fw_version()
latest_firmware = latest_firmware.replace(".tar.gz", "")
fw_version_list.append(latest_firmware)
except Exception as e:
print(e)
fw_version_list = []
yield fw_version_list
else:
# 2.x
pass
@pytest.fixture(scope="session")
def upload_firmware(request, should_upload_firmware, instantiate_firmware):
"""yields the firmware_id that is uploaded to cloud"""
if request.config.getoption("--1.x"):
firmware_id_list = []
for i in range(0, len(instantiate_firmware)):
firmware_id = instantiate_firmware[i].upload_fw_on_cloud(force_upload=should_upload_firmware)
firmware_id_list.append(firmware_id)
yield firmware_id_list
else:
# 2.x release
yield True
@pytest.fixture(scope="session")
def upgrade_firmware(request, instantiate_firmware, get_equipment_ref, check_ap_firmware_cloud, get_latest_firmware,
should_upgrade_firmware, should_upload_firmware, get_apnos, get_configuration):
"""yields the status of upgrade of firmware. waits for 300 sec after each upgrade request"""
print(should_upgrade_firmware, should_upload_firmware)
if request.config.getoption("--1.x"):
status_list = []
active_fw_list = []
try:
for access_point in get_configuration['access_point']:
ap_ssh = get_apnos(access_point, sdk="1.x")
active_fw = ap_ssh.get_active_firmware()
active_fw_list.append(active_fw)
except Exception as e:
print(e)
active_fw_list = []
print(active_fw_list, get_latest_firmware)
if get_latest_firmware != active_fw_list:
if request.config.getoption("--skip-upgrade"):
status = "skip-upgrade"
status_list.append(status)
else:
for i in range(0, len(instantiate_firmware)):
status = instantiate_firmware[i].upgrade_fw(equipment_id=get_equipment_ref[i], force_upload=True,
force_upgrade=should_upgrade_firmware)
status_list.append(status)
else:
if should_upgrade_firmware:
for i in range(0, len(instantiate_firmware)):
status = instantiate_firmware[i].upgrade_fw(equipment_id=get_equipment_ref[i],
force_upload=should_upload_firmware,
force_upgrade=should_upgrade_firmware)
status_list.append(status)
else:
status = "skip-upgrade Version Already Available"
status_list.append(status)
yield status_list
else:
# 2.x release
pass
@pytest.fixture(scope="session")
def check_ap_firmware_cloud(request, setup_controller, get_equipment_ref):
"""yields the active version of firmware on cloud"""
if request.config.getoption("--1.x"):
ap_fw_list = []
for i in get_equipment_ref:
ap_fw_list.append(setup_controller.get_ap_firmware_old_method(equipment_id=i))
yield ap_fw_list
else:
# 2.x
pass
@pytest.fixture(scope="session")
def check_ap_firmware_ssh(get_configuration, request):
"""yields the active version of firmware on ap"""
if request.config.getoption("--1.x"):
active_fw_list = []
try:
for access_point in get_configuration['access_point']:
ap_ssh = APNOS(access_point)
active_fw = ap_ssh.get_active_firmware()
active_fw_list.append(active_fw)
except Exception as e:
print(e)
active_fw_list = []
yield active_fw_list
else:
# 2.x
pass
@pytest.fixture(scope="session")
def setup_test_run(setup_controller, request, upgrade_firmware, get_configuration,
get_equipment_ref, get_latest_firmware,
get_apnos):
"""used to upgrade the firmware on AP and should be called on each test case on a module level"""
if request.config.getoption("--1.x"):
active_fw_list = []
try:
for access_point in get_configuration['access_point']:
ap_ssh = get_apnos(access_point, sdk="1.x")
active_fw = ap_ssh.get_active_firmware()
active_fw_list.append(active_fw)
except Exception as e:
print(e)
active_fw_list = []
print(active_fw_list, get_latest_firmware)
if active_fw_list == get_latest_firmware:
yield True
else:
pytest.exit("AP is not Upgraded tp Target Firmware versions")
else:
# 2.x
pass
"""
Instantiate Reporting
"""
@pytest.fixture(scope="session")
def update_report(request, testbed, get_configuration):
"""used to update the test report on testrail/allure"""
if request.config.getoption("--use-testrail"):
tr_client = Reporting()
else:
tr_client = APIClient(request.config.getini("tr_url"), request.config.getini("tr_user"),
request.config.getini("tr_pass"), request.config.getini("tr_project_id"))
if request.config.getoption("--use-testrail"):
tr_client.rid = "skip testrails"
else:
projId = tr_client.get_project_id(project_name=request.config.getini("tr_project_id"))
test_run_name = request.config.getini("tr_prefix") + testbed + "_" + str(
datetime.date.today()) + "_" + get_configuration['access_point'][0]['version']
tr_client.create_testrun(name=test_run_name, case_ids=list(TEST_CASES.values()), project_id=projId,
milestone_id=request.config.getini("milestone"),
description="Automated Nightly Sanity test run for new firmware build")
rid = tr_client.get_run_id(test_run_name=test_run_name)
tr_client.rid = rid
yield tr_client
"""
FRAMEWORK MARKER LOGIC
"""
@pytest.fixture(scope="session")
def get_security_flags():
"""used to get the essential markers on security and band"""
# Add more classifications as we go
security = ["open", "wpa", "wep", "wpa2_personal", "wpa3_personal", "wpa3_personal_mixed",
"wpa_wpa2_enterprise_mixed", "wpa2_eap", "wpa2_only_eap",
"wpa_wpa2_personal_mixed", "wpa_enterprise", "wpa2_enterprise", "wpa3_enterprise_mixed",
"wpa3_enterprise", "twog", "fiveg", "radius"]
yield security
@pytest.fixture(scope="session")
def get_markers(request, get_security_flags):
"""used to get the markers on the selected test case class, used in setup_profiles"""
session = request.node
markers = list()
security = get_security_flags
security_dict = dict().fromkeys(security)
for item in session.items:
for j in item.iter_markers():
markers.append(j.name)
for i in security:
if set(markers).__contains__(i):
security_dict[i] = True
else:
security_dict[i] = False
yield security_dict
@pytest.fixture(scope="session")
def test_access_point(fixtures_ver, request, get_configuration, get_apnos):
"""used to check the manager status of AP, should be used as a setup to verify if ap can reach cloud"""
status = fixtures_ver.get_ap_cloud_connectivity_status(get_configuration, get_apnos)
def teardown_session():
data = []
data.append(False)
for s in status:
data.append(s[0])
print(data)
if False not in data:
pytest.exit("AP is Not connected to ucentral gw")
allure.attach(name=str(status), body="")
request.addfinalizer(teardown_session)
yield status
@pytest.fixture(scope="session")
def test_ap_connection_status(fixtures_ver, request, get_configuration, get_apnos):
"""used to check the manager status of AP, should be used as a setup to verify if ap can reach cloud"""
connection, redirector_value = fixtures_ver.get_ap_status_logs(get_configuration, get_apnos)
yield connection, redirector_value
@pytest.fixture(scope="session")
def traffic_generator_connectivity(testbed, get_configuration):
"""Verify if traffic generator is reachable"""
if get_configuration['traffic_generator']['name'] == "lanforge":
lanforge_ip = get_configuration['traffic_generator']['details']['ip']
lanforge_port = get_configuration['traffic_generator']['details']['port']
# Condition :
# if gui connection is not available
# yield False
# Condition :
# If Gui Connection is available
# yield the gui version
try:
cv = cv_test(lanforge_ip, lanforge_port)
url_data = cv.get_ports("/")
lanforge_GUI_version = url_data["VersionInfo"]["BuildVersion"]
lanforge_gui_git_version = url_data["VersionInfo"]["GitVersion"]
lanforge_gui_build_date = url_data["VersionInfo"]["BuildDate"]
print(lanforge_GUI_version, lanforge_gui_build_date, lanforge_gui_git_version)
if not (lanforge_GUI_version or lanforge_gui_build_date or lanforge_gui_git_version):
yield False
else:
yield lanforge_GUI_version
except:
yield False
else:
yield True
@pytest.fixture(scope="session")
def create_lanforge_chamberview_dut(lf_tools, skip_lf):
dut_name = ""
if not skip_lf:
dut_object, dut_name = lf_tools.Create_Dut()
return dut_name
@pytest.fixture(scope="session")
def lf_tools(get_configuration, testbed, skip_lf):
""" Create a DUT on LANforge"""
if not skip_lf:
obj = ChamberView(lanforge_data=get_configuration["traffic_generator"]["details"],
testbed=testbed, access_point_data=get_configuration["access_point"])
else:
obj = False
yield obj
@pytest.fixture(scope="session")
def lf_test(get_configuration, setup_influx, request, skip_lf):
if not skip_lf:
if request.config.getoption("--exit-on-fail"):
obj = RunTest(lanforge_data=get_configuration['traffic_generator']['details'], influx_params=setup_influx,
debug=True)
if request.config.getoption("--exit-on-fail") is False:
obj = RunTest(lanforge_data=get_configuration['traffic_generator']['details'], influx_params=setup_influx,
debug=False)
yield obj
@pytest.fixture(scope="session")
def setup_influx(request, testbed, get_configuration):
""" Setup Influx Parameters: Used in CV Automation"""
influx_params = {
"influx_host": request.config.getini("influx_host"),
"influx_port": request.config.getini("influx_port"),
"influx_token": request.config.getini("influx_token"),
"influx_bucket": request.config.getini("influx_bucket"),
"influx_org": request.config.getini("influx_org"),
"influx_tag": [testbed, get_configuration["access_point"][0]["model"]],
}
yield influx_params
# Need for Perforce Mobile Device Execution
def pytest_sessionstart(session):
session.results = dict()
@fixture(scope='session', autouse=True)
def add_allure_environment_property(request: SubRequest) -> Optional[Callable]:
environment_properties = dict()
def maker(key: str, value: Any):
environment_properties.update({key: value})
yield maker
alluredir = request.config.getoption(ALLUREDIR_OPTION)
if not alluredir or not os.path.isdir(alluredir) or not environment_properties:
return
allure_env_path = path.join(alluredir, ALLURE_ENVIRONMENT_PROPERTIES_FILE)
with open(allure_env_path, 'w') as _f:
data = '\n'.join([f'{variable}={value}' for variable, value in environment_properties.items()])
_f.write(data)
@fixture(scope='session')
def add_env_properties(get_configuration, get_sdk_version, get_apnos, fixtures_ver,
add_allure_environment_property: Callable) -> None:
add_allure_environment_property('Access-Point-Model', get_configuration["access_point"][0]["model"])
add_allure_environment_property('SDK-Version', get_sdk_version)
try:
add_allure_environment_property('Access-Point-Firmware-Version',
fixtures_ver.get_ap_version(get_apnos, get_configuration)[0].split("\n")[1])
except Exception as e:
print(e)
pass
add_allure_environment_property('Cloud-Controller-SDK-URL', get_configuration["controller"]["url"])
add_allure_environment_property('AP-Serial-Number', get_configuration["access_point"][0]["serial"] + "\n")
@pytest.fixture(scope="session")
def fixtures_ver(request, get_configuration):
if request.config.getoption("1.x") is False:
print("2.x")
obj = Fixtures_2x(configuration=get_configuration)
if request.config.getoption("1.x"):
print("1.x")
obj = Fixtures_1x(configuration=get_configuration)
yield obj
@pytest.fixture(scope="session")
def firmware_upgrade(fixtures_ver, get_apnos, get_configuration):
upgrade_status = fixtures_ver.setup_firmware(get_apnos, get_configuration)
yield upgrade_status
"""
Logs related Fixtures
"""
@pytest.fixture(scope="function")
def get_ap_logs(request, get_apnos, get_configuration):
S = 9
instance_name = ''.join(random.choices(string.ascii_uppercase + string.digits, k=S))
for ap in get_configuration['access_point']:
ap_ssh = get_apnos(ap, pwd="../libs/apnos/", sdk="2.x")
ap_ssh.run_generic_command(cmd="logger start testcase: " + instance_name)
def collect_logs():
for ap in get_configuration['access_point']:
ap_ssh = get_apnos(ap, pwd="../libs/apnos/", sdk="2.x")
ap_ssh.run_generic_command(cmd="logger stop testcase: " + instance_name)
ap_logs = ap_ssh.get_logread(start_ref="start testcase: " + instance_name,
stop_ref="stop testcase: " + instance_name)
allure.attach(name='logread', body=str(ap_logs))
pass
request.addfinalizer(collect_logs)
@pytest.fixture(scope="function")
def get_lf_logs(request, get_apnos, get_configuration):
ip = get_configuration["traffic_generator"]["details"]["ip"]
port = get_configuration["traffic_generator"]["details"]["ssh_port"]
def collect_logs_lf():
log_0 = "/home/lanforge/lanforge_log_0.txt"
log_1 = "/home/lanforge/lanforge_log_1.txt"
obj = SCP_File(ip=ip, port=port, username="root", password="lanforge", remote_path=log_0,
local_path=".")
obj.pull_file()
allure.attach.file(source="lanforge_log_0.txt",
name="lanforge_log_0")
obj = SCP_File(ip=ip, port=port, username="root", password="lanforge", remote_path=log_1,
local_path=".")
obj.pull_file()
allure.attach.file(source="lanforge_log_1.txt",
name="lanforge_log_1")
request.addfinalizer(collect_logs_lf)
|
the-stack_0_10600 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 22:53:59 2018
@authors: a.pakbin, T.J. Ashby
"""
from sklearn.model_selection import StratifiedKFold
from auxiliary import grid_search,ICD9_categorizer, save_fold_data, convert_numbers_to_names, min_max_mean_auc_for_labels, train_test_one_hot_encoder, possible_values_finder,train_test_normalizer, train_test_imputer, feature_importance_saver, feature_importance_updator, save_roc_curve, data_reader, vectors_to_csv, create_subfolder_if_not_existing, feature_rankings_among_all_labels_saver
import numpy as np
import pandas as pd
from fmeasure import roc, maximize_roc
from xgboost.sklearn import XGBClassifier
import random as rnd
from sklearn.metrics import roc_auc_score
import pickle
import gc
import sys
import logging as lg
#
# NB: the original code base contains code that will trigger
# "pandas.core.common.SettingWithCopyError: A value is trying to be set on a
# copy of a slice from a DataFrame" errors if the code is run with
# pd.set_option('mode.chained_assignment', 'raise'). Hence I'm not using it.
#
def main(file_name,
data_address,
writing_address):
lg.basicConfig(stream=sys.stderr, level=lg.DEBUG)
mpl_logger = lg.getLogger('matplotlib')
mpl_logger.setLevel(lg.WARNING)
pd.set_option('display.max_colwidth', None)
pd.set_option('display.max_columns', 20)
data_address = str(data_address)
writing_address = str(writing_address)
#the address where MIMIC III tables are in .csv.gz format. The tables are: D_ICD_PROCEDURES.csv.gz, D_ITEMS.csv.gz and D_LABITEMS.csv.gz
#conversion_tables_address='../data'
conversion_tables_address = data_address
#outcome labels can contain: '24hrs' ,'48hrs','72hrs', '24hrs~72hrs','7days','30days', 'Bounceback'
outcome_labels=['24hrs' ,'48hrs','72hrs', '24hrs~72hrs','7days','30days', 'Bounceback']
normalize_data=False
save_folds_data=True
values_for_grid_search=[np.linspace(start=1, stop=6, num=6),[50,100,200,1000,1500],[0.1]]
num_of_folds=5
#################################
categorical_column_names=['ADMISSION_TYPE', 'INSURANCE', 'LANGUAGE', 'RELIGION', 'MARITAL_STATUS', 'ETHNICITY','FIRST_CAREUNIT', 'GENDER']
# Read the CSV file
# - The form of the CSV file is:
# -
data=data_reader(data_address, file_name)
# Returns a dictionary where each column name is a key, and the result is the
# set of values that can appear (with NaN etc removed)
possible_values=possible_values_finder(data, categorical_column_names)
# Fill in the target data column
data['IsReadmitted_24hrs~72hrs']=[1 if x>0 else 0 for x in (data['IsReadmitted_72hrs']-data['IsReadmitted_24hrs'])]
# List of non-feature column names
non_attribute_column_names=['HADM_ID', 'ICUSTAY_ID', 'INTIME', 'OUTTIME', 'SUBJECT_ID', 'IsReadmitted_24hrs','IsReadmitted_Bounceback','IsReadmitted_24hrs~72hrs' ,'IsReadmitted_48hrs','IsReadmitted_72hrs','IsReadmitted_7days','IsReadmitted_30days', 'Time_To_readmission', 'hospital_expire_flag']
if 'Subset' in data.columns:
#
# NB: If doing subsetting, you should NOT add the test fold from subset A to
# the real test data from subset B, otherwise you'll get better results than
# you should (as the model is trained on subset A and so will do well on the
# slice of subset A included in the test set).
#
testOnSubsetA = False
else:
#
# However, if there is no subsetting (everything is subset A), then you need
# to use the test data from subset A, otherwise there is no test data. Hence
# the flag.
#
lg.info("No subsetting in input data")
data.loc[:, 'Subset'] = 'A'
testOnSubsetA = True
non_attribute_column_names.append('Subset')
#TODO: for excludig insurance, language, religion, marital status and ethnicity from the data, uncomment the following line
#non_attribute_column_names += ['INSURANCE', 'LANGUAGE', 'RELIGION', 'MARITAL_STATUS', 'ETHNICITY']
#
# The function ICD9_categorizer() coarsens the ICD codes to a higher level
# by dropping the last code digit - but, it looks like there may be some
# issues with the original code as it treats the ICD codes as numbers rather
# than strings and so doesn't take into account the semantically meaningful
# leading and trailing zeros.
#
data=ICD9_categorizer(data)
model_type='XGB'
PREDICTIONS=list()
current_folder=writing_address
#
# Loop over target labels to predict
#
for idx, label_column_name in enumerate(['IsReadmitted_'+outcome_label for outcome_label in outcome_labels]):
#
# Original code (replaced because we need to handle subsets for the
# experiments):
# icu_stays=data['ICUSTAY_ID'].values
# y=data[label_column_name].values
# X=data.drop(non_attribute_column_names, axis=1)
#
#
# Subsetting
#
# Labels to predict (sklearn format)
y=data.loc[data['Subset'] == "A", label_column_name].values
y_testB = data.loc[data['Subset'] == "B", label_column_name].values
# Input features
X = data.loc[data['Subset'] == "A", :].drop(non_attribute_column_names, axis=1)
X_testB = data.loc[data['Subset'] == "B", :].drop(non_attribute_column_names, axis=1)
# Output folder
current_subfolder=current_folder+'/'+outcome_labels[idx]
create_subfolder_if_not_existing(current_subfolder)
auc_list=list()
ICUstayID=list()
Prediction=list()
accumulative_feature_importance=None
print ('\n',model_type, ' '*5,'LABEL: ', outcome_labels[idx])
skf=StratifiedKFold(n_splits=num_of_folds, shuffle=True, random_state=rnd.randint(1,1e6))
#
# Loop over folds
# - Each fold is a train/test split, with the test being used for the final score
#
fold_number=0
for train_index, test_index in skf.split(X, y):
fold_number+=1
print ('\n fold',fold_number)
#
# Original code (replaced because we need to handle subsets for the
# experiments):
# X_train, X_test = X.iloc[train_index], X.iloc[test_index]
# y_train, y_test = y[train_index], y[test_index]
# icustay_id_train, icustay_id_test=icu_stays[train_index],icu_stays[test_index]
#
X_train = X.iloc[train_index]
y_train = y[train_index]
if testOnSubsetA == True:
X_test = pd.concat([X_testB, X.iloc[test_index]])
y_test = np.concatenate((y_testB, y[test_index]))
else:
X_test = X_testB
y_test = y_testB
lg.debug("len X_test: {}, len y_test: {}".format(len(X_test), len(y_test)))
#
# Original code (replaced because we need to handle subsets for the
# experiments):
# icustay_id_train, icustay_id_test=icu_stays[train_index],icu_stays[test_index]
#
icustay_id_train = (data.loc[data['Subset'] == "A", 'ICUSTAY_ID'].values)[train_index]
testB = data.loc[data['Subset'] == "B", 'ICUSTAY_ID'].values
if testOnSubsetA == True:
testA = (data.loc[data['Subset'] == "A", 'ICUSTAY_ID'].values)[test_index]
icustay_id_test = np.concatenate((testB, testA))
else:
icustay_id_test = testB
lg.debug("len icustay_id_test: {}".format(len(icustay_id_test)))
# Fill in missing values in train and test sets
[X_TRAIN_IMPUTED, X_TEST_IMPUTED]=train_test_imputer(X_train, X_test, categorical_column_names)
if normalize_data:
[X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]=train_test_normalizer(X_TRAIN_IMPUTED, X_TEST_IMPUTED, categorical_column_names)
else:
[X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]=[X_TRAIN_IMPUTED, X_TEST_IMPUTED]
# Do one-hot encoding for categorical variables
[X_TRAIN_NORMALIZED, X_TEST_NORMALIZED]=train_test_one_hot_encoder(X_TRAIN_NORMALIZED, X_TEST_NORMALIZED, categorical_column_names, possible_values)
if save_folds_data:
# Save the train and test inputs for this fold
save_fold_data(current_subfolder, fold_number, icustay_id_train, X_TRAIN_NORMALIZED, y_train, icustay_id_test, X_TEST_NORMALIZED, y_test, convert_names=True, conversion_tables_address=conversion_tables_address)
[max_depths, n_estimators, learning_rates]=values_for_grid_search
#
# Grid search to find best hyperparams
# - Hyper params picked per fold (?)
# - Hyper params picked using nested k-fold with 2 folds (?)
#
best_settings=grid_search(X=X_TRAIN_NORMALIZED, y=y_train, num_of_folds=2, verbose=True, return_auc_values=False, first_dim=max_depths, second_dim=n_estimators, third_dim=learning_rates)
print ('{:<4s}{:<16s}: max_depth: {:<1s}, n_estimators: {:<2s}, learning_rate: {:<2s}'.format('','best hyperparameters', str(best_settings[0]), str(best_settings[1]), str(best_settings[2])))
model=XGBClassifier(max_depth=int(best_settings[0]), n_estimators=int(best_settings[1]), learning_rate=best_settings[2])
#
# Do the actual training (with the best hyperparams)
#
model.fit(X_TRAIN_NORMALIZED, y_train)
feature_importance=model.feature_importances_
accumulative_feature_importance=feature_importance_updator(accumulative_feature_importance, feature_importance)
# Dump the feature importances to file
pd.DataFrame(data={'FEATURE_NAME': convert_numbers_to_names(X_TRAIN_NORMALIZED.columns, conversion_tables_address), 'IMPORTANCE': feature_importance}).sort_values(by='IMPORTANCE', ascending=False).reset_index(drop=True).to_csv(current_subfolder+'/'+'fold_'+str(fold_number)+'_ranked_feature_importances.csv')
#
# Make the predictions on the test set
#
predictions=model.predict_proba(X_TEST_NORMALIZED)[:,1]
# Append results to an array (?)
# These variables seem to be only assigned to, never used
ICUstayID=np.append(ICUstayID,icustay_id_test)
Prediction=np.append(Prediction,predictions)
# Write stuff out...
lg.debug("Vector lengths: 1 icustay_id_test: {}, 2 predictions: {}, 3 y_test: {}".format(len(icustay_id_test), len(predictions), len(y_test)))
vectors_to_csv(current_subfolder, file_name='fold_'+str(fold_number), vector_one=icustay_id_test, label_one='ICUSTAY_ID', vector_two=predictions, label_two='PREDICTION', vector_three=y_test, label_three='LABEL')
auc=roc_auc_score(y_true=y_test, y_score=predictions)
auc_list.append(auc)
ROC=roc(predicted=predictions, labels=y_test)
ROC.to_csv(current_subfolder+'/'+'fold_'+str(fold_number)+'_roc.csv')
maximum=maximize_roc(ROC, maximization_criteria='fscore')
maximum.to_csv(current_subfolder+'/'+'fold_'+str(fold_number)+'_optimum_point.csv')
TPR, FPR = ROC['recall'].values, 1-ROC['specificity']
# Minor change here to allow different figure formats
figtype = 'png'
save_roc_curve(current_subfolder+'/'+'fold_'+str(fold_number)+'_roc_curve.'+figtype, TPR, FPR, auc)
pickle.dump(model, open(current_subfolder+'/'+'fold_'+str(fold_number)+'.model','wb'))
print (' '+'-'*30)
feature_importance_saver(address=current_subfolder, col_names=convert_numbers_to_names(X_TRAIN_NORMALIZED.columns, conversion_tables_address), accumulative_feature_importance=accumulative_feature_importance, num_of_folds=num_of_folds)
# Minor change here to avoid complications with python generator functions
vectors_to_csv(current_subfolder, file_name='folds_AUC', vector_one=auc_list, label_one='AUC', vector_two=list(range(1,num_of_folds+1)), label_two='FOLD_NUMBER')
gc.collect()
current_folder=writing_address
min_max_mean_auc_for_labels(current_folder, outcome_labels)
feature_rankings_among_all_labels_saver(current_folder,outcome_labels, conversion_tables_address)
if __name__=='__main__':
file_name = sys.argv[1]
data_address = sys.argv[2]
writing_address = sys.argv[3]
main(file_name, data_address, writing_address)
|
the-stack_0_10601 | from acmacs_py import *
from .. import utils
from .log import Log
import acmacs
# ----------------------------------------------------------------------
class MapMaker:
def __init__(self, chain_setup, minimum_column_basis, log :Log):
self.chain_setup = chain_setup
self.minimum_column_basis = minimum_column_basis
self.log = log
def individual_map_directory_name(self):
return f"i-{self.minimum_column_basis}"
def command(self, source :Path, target :Path):
"""returns command (list) or None if making is not necessary (already made)"""
target.parent.mkdir(parents=True, exist_ok=True)
if utils.older_than(target, source):
if self.process(source):
return [self.command_name(), *self.command_args(), "--grid-json", target.with_suffix(".grid.json"), self.preprocess(source, target.parent), target]
else:
self.log.info(f"{target} ignored")
return None
else:
# self.log.info(f"{target} up to date")
return None
def command_name(self):
return "chart-relax-grid"
def command_args(self):
return [
"-n", self.chain_setup.number_of_optimizations(),
"-d", self.chain_setup.number_of_dimensions(),
"-m", self.minimum_column_basis,
*self.args_keep_projections(),
*self.args_reorient(),
*self.args_disconnect()
]
def args_keep_projections(self):
return ["--keep-projections", self.chain_setup.projections_to_keep()]
def args_reorient(self):
reorient_to = self.chain_setup.reorient_to()
if reorient_to:
return ["--reorient", reorient_to]
else:
return []
def args_disconnect(self):
if not self.chain_setup.disconnect_having_few_titers():
return ["--no-disconnect-having-few-titers"]
else:
return []
def process(self, source):
return True
def preprocess(self, source :Path, output_directory :Path):
return source
@classmethod
def add_threads_to_commands(cls, threads :int, commands :list):
"""Modifies commands to make it limit threads number. Returns modified command"""
return [command + ["--threads", threads] for command in commands]
# ----------------------------------------------------------------------
class MapMakerInSteps (MapMaker):
"""
1. multiple chart-relax (without grid) to run on multiple machines (nodes)
2. combine results
3. muiltipe chart-grid-test for the best result of 2, for different sets of antigens and sera to run on multiple nodes
4. combine results, move trapped points, relax, then repeat 3
"""
# ----------------------------------------------------------------------
class IndividualMapMaker (MapMaker):
def __init__(self, *args, ignore_tables_with_too_few_sera, **kwargs):
super().__init__(*args, **kwargs)
self.ignore_tables_with_too_few_sera = ignore_tables_with_too_few_sera
def process(self, source):
return not self.ignore(source)
def preprocess(self, source :Path, output_directory :Path):
return self.chain_setup.individual_table_preprocess(source, output_directory=output_directory)
def ignore(self, source):
if self.ignore_tables_with_too_few_sera:
if isinstance(source, acmacs.Chart):
chart = source
chart_name = chart.make_name()
else:
chart = acmacs.Chart(source)
chart_name = source
if chart.number_of_antigens() < 3 or chart.number_of_sera() < 3:
self.log.info(f"chart has too few antigens ({chart.number_of_antigens()}) or sera ({chart.number_of_sera()}), ignored ({chart_name})")
return True
return False
# ----------------------------------------------------------------------
class IndividualMapWithMergeColumnBasesMaker (IndividualMapMaker):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.output_dir_name = output_dir_name
self.source = None # nothing to do
self.target = None # nothing to do
def prepare(self, source :Path, merge_column_bases :dict, merge_path :Path, output_dir :Path, output_prefix :str):
self.log.info(f"Individual table map ({source.name}) with column bases from the merge ({merge_path.name})")
chart = acmacs.Chart(self.preprocess(source, output_directory=output_dir))
mcb_source = output_dir.joinpath(f"{output_prefix}{chart.date()}.mcb-table{source.suffix}")
mcb_target = output_dir.joinpath(f"{output_prefix}{chart.date()}.mcb{source.suffix}")
if utils.older_than(mcb_target, source):
if not self.ignore(chart):
cb = chart.column_bases(self.minimum_column_basis)
orig_cb = str(cb)
updated = False
for sr_no, serum in chart.select_all_sera():
mcb = merge_column_bases.get(serum.name_full())
if mcb is None:
message = f"No column basis for {serum.name_full()} in the merge column bases (source: {source.name}:\n{pprint.pformat(merge_column_bases, width=200)}"
self.log.info(f"ERROR {message}")
raise RuntimeError(message)
if mcb != cb[sr_no]:
if mcb < cb[sr_no]:
self.log.info(f"Column basis for {serum.name_full()} in the merge ({mcb}) is less than in the individual table ({cb[sr_no]})")
cb[sr_no] = mcb
updated = True
if updated:
chart.column_bases(cb)
self.log.info(f"{mcb_source} <-- {source}: column basis updated from merge:\n orig: {orig_cb}\n new: {cb}")
self.source = mcb_source
self.target = mcb_target
chart.export(self.source, program_name=sys.argv[0])
else:
self.log.info("column basis in the merge are the same as in the original individual table")
# else:
# self.log.info(f"{mcb_source} up to date")
self.log.separator(newlines_before=1)
# ----------------------------------------------------------------------
class IncrementalMapMaker (MapMaker):
def command_name(self):
return "chart-relax-incremental"
def command_args(self):
return [
"-n", self.chain_setup.number_of_optimizations(),
"--grid-test",
"--remove-source-projection",
*self.args_keep_projections(),
# *self.args_reorient(),
*self.args_disconnect()
]
# ----------------------------------------------------------------------
def extract_column_bases(chart):
return {serum.name_full(): chart.column_basis(sr_no) for sr_no, serum in chart.select_all_sera()}
# ======================================================================
### Local Variables:
### eval: (if (fboundp 'eu-rename-buffer) (eu-rename-buffer))
### End:
|
the-stack_0_10602 | import json
import os
from flask import Flask, render_template, redirect, request
import tv
import logging
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
app = Flask(__name__)
BUTTONS = {}
@app.route('/')
def index():
return render_template('index.html',
tv_state=tv.get_state(),
buttons=BUTTONS.values())
@app.route('/off')
def hello_world():
tv.off()
return redirect("/", code=302)
@app.route('/button/<btn>')
def button(btn):
b = BUTTONS.get(btn)
if b:
tv.do_script(b['script'])
return redirect("/", code=302)
@app.route('/shutdown')
def shutdown():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
return 'Server shutting down...'
def load_buttons():
dir, file = os.path.split(os.path.abspath(__file__))
with open(os.path.join(dir, 'buttons.json')) as json_data:
btns = json.load(json_data)
for btn in btns:
BUTTONS[btn["id"]] = btn
if __name__ == "__main__":
load_buttons()
try:
app.run(host='0.0.0.0', port=5000)
finally:
tv.cleanup()
|
the-stack_0_10604 | import numpy as np
import random
import copy
from collections import namedtuple, deque
from ddpg_models import Actor, Critic
from ou_noise import OUNoise
from replay_buffer import ReplayBuffer
import torch
import torch.nn.functional as F
import torch.optim as optim
BUFFER_SIZE = int(1e6) # replay buffer size
BATCH_SIZE = 1024 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR_ACTOR = 1e-4 # learning rate of the actor
LR_CRITIC = 1e-3 # learning rate of the critic
WEIGHT_DECAY = 0 # L2 weight decay
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent():
"""Interacts with and learns from the environment."""
def __init__(self, state_size, action_size, random_seed):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
random_seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(random_seed)
# Actor Network (w/ Target Network)
self.actor_local = Actor(state_size, action_size, random_seed).to(device)
self.actor_target = Actor(state_size, action_size, random_seed).to(device)
self.actor_optimizer = optim.Adam(self.actor_local.parameters(), lr=LR_ACTOR)
# Critic Network (w/ Target Network)
self.critic_local = Critic(state_size, action_size, random_seed).to(device)
self.critic_target = Critic(state_size, action_size, random_seed).to(device)
self.critic_optimizer = optim.Adam(self.critic_local.parameters(), lr=LR_CRITIC, weight_decay=WEIGHT_DECAY)
# Noise process
self.noise = OUNoise(action_size, random_seed)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, random_seed)
def step(self, states, actions, rewards, next_states, dones):
"""Save experience in replay memory, and use random sample from buffer to learn."""
# Save experience / reward
for state, action, reward, next_state, done in zip(states, actions, rewards, next_states, dones):
self.memory.add(state, action, reward, next_state, done)
# Learn, if enough samples are available in memory
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(self, state, add_noise=True):
"""Returns actions for given state as per current policy."""
state = torch.from_numpy(state).float().to(device)
self.actor_local.eval()
with torch.no_grad():
action = self.actor_local(state).cpu().data.numpy()
self.actor_local.train()
if add_noise:
action += self.noise.sample()
return np.clip(action, -1, 1)
def reset(self):
"""reset the noise function values"""
self.noise.reset()
def learn(self, experiences, gamma):
"""Update policy and value parameters using given batch of experience tuples.
Q_targets = r + γ * critic_target(next_state, actor_target(next_state))
where:
actor_target(state) -> action
critic_target(state, action) -> Q-value
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
# ---------------------------- update critic ---------------------------- #
# Get predicted next-state actions and Q values from target models
actions_next = self.actor_target(next_states)
Q_targets_next = self.critic_target(next_states, actions_next)
# Compute Q targets for current states (y_i)
Q_targets = rewards + (gamma * Q_targets_next * (1 - dones))
# Compute critic loss
Q_expected = self.critic_local(states, actions)
critic_loss = F.mse_loss(Q_expected, Q_targets)
# Minimize the loss
self.critic_optimizer.zero_grad()
critic_loss.backward()
self.critic_optimizer.step()
# ---------------------------- update actor ---------------------------- #
# Compute actor loss
actions_pred = self.actor_local(states)
actor_loss = -self.critic_local(states, actions_pred).mean()
# Minimize the loss
self.actor_optimizer.zero_grad()
actor_loss.backward()
self.actor_optimizer.step()
# ----------------------- update target networks ----------------------- #
self.soft_update(self.critic_local, self.critic_target, TAU)
self.soft_update(self.actor_local, self.actor_target, TAU)
def soft_update(self, local_model, target_model, tau):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model: PyTorch model (weights will be copied from)
target_model: PyTorch model (weights will be copied to)
tau (float): interpolation parameter
"""
for target_param, local_param in zip(target_model.parameters(), local_model.parameters()):
target_param.data.copy_(tau*local_param.data + (1.0-tau)*target_param.data)
|
the-stack_0_10606 | import warnings
import rdflib
from rdflib import OWL, RDF, RDFS, BNode
from ..exceptions import NeuroLangNotImplementedError
from ..expressions import Constant, Symbol
from ..logic import Conjunction, Implication, Union
from .constraints_representation import RightImplication
class OntologyParser:
"""
This class is in charge of generating the rules that can be derived
from an ontology, both at entity and constraint levels.
"""
def __init__(self, paths, load_format="xml"):
self.namespaces_dic = None
self.owl_dic = None
if isinstance(paths, list):
self._load_ontology(paths, load_format)
else:
self._load_ontology([paths], [load_format])
self._triple = Symbol.fresh()
self._pointer = Symbol.fresh()
self._dom = Symbol.fresh()
self.parsed_restrictions = [
OWL.allValuesFrom,
OWL.hasValue,
OWL.minCardinality,
OWL.maxCardinality,
OWL.cardinality,
OWL.someValuesFrom,
]
def _load_ontology(self, paths, load_format):
g = rdflib.Graph()
for counter, path in enumerate(paths):
g.load(path, format=load_format[counter])
self.graph = g
def parse_ontology(self):
extensional_predicate_tuples, union_of_constraints_dom = (
self._load_domain()
)
union_of_constraints_prop = self._load_properties()
union_of_constraints = self._load_constraints()
union_of_constraints = Union(
union_of_constraints_dom.formulas
+ union_of_constraints_prop.formulas
+ union_of_constraints.formulas
)
return extensional_predicate_tuples, union_of_constraints
def get_triples_symbol(self):
return self._triple
def get_pointers_symbol(self):
return self._pointer
def get_domain_symbol(self):
return self._dom
def _load_domain(self):
pointers = frozenset(
(str(x),) for x in self.graph.subjects() if isinstance(x, BNode)
)
triples = frozenset(
(str(x[0]), str(x[1]), str(x[2])) for x in self.get_triples()
)
x = Symbol.fresh()
y = Symbol.fresh()
z = Symbol.fresh()
dom1 = RightImplication(self._triple(x, y, z), self._dom(x))
dom2 = RightImplication(self._triple(x, y, z), self._dom(y))
dom3 = RightImplication(self._triple(x, y, z), self._dom(z))
extensional_predicate_tuples = {}
extensional_predicate_tuples[self._triple] = triples
extensional_predicate_tuples[self._pointer] = pointers
union_of_constraints = Union((dom1, dom2, dom3))
return extensional_predicate_tuples, union_of_constraints
def _load_properties(self):
"""
Function that parse all the properties defined in
the ontology.
"""
x = Symbol.fresh()
z = Symbol.fresh()
constraints = ()
for pred in set(self.graph.predicates()):
symbol_name = str(pred)
symbol = Symbol(symbol_name)
const = Constant(symbol_name)
constraints += (
RightImplication(self._triple(x, const, z), symbol(x, z)),
)
return Union(constraints)
def _load_constraints(self):
"""
Function in charge of parsing the ontology's restrictions.
It needs a function "_process_X", where X is the name of
the restriction to be processed, to be defined.
"""
restriction_ids = [
s for s, _, _ in self.graph.triples((None, None, OWL.Restriction))
]
union_of_constraints = Union(())
for rest in restriction_ids:
cut_graph = list(self.graph.triples((rest, None, None)))
res_type = self._identify_restriction_type(cut_graph)
try:
process_restriction_method = getattr(
self, f"_process_{res_type}"
)
constraints = process_restriction_method(cut_graph)
union_of_constraints = Union(
union_of_constraints.formulas + constraints.formulas
)
except AttributeError as err:
raise NeuroLangNotImplementedError(
f"""Ontology parser doesn\'t handle
restrictions of type {res_type}"""
)
return union_of_constraints
def _identify_restriction_type(self, list_of_triples):
"""
Given a list of nodes associated to a restriction,
this function returns the name of the restriction
to be applied (hasValue, minCardinality, etc).
Parameters
----------
list_of_triples : list
List of nodes associated to a restriction.
Returns
-------
str
the name of the restriction or an empty string
if the name cannot be identified.
"""
for triple in list_of_triples:
if triple[1] == OWL.onProperty or triple[1] == RDF.type:
continue
else:
return triple[1].rsplit("#")[-1]
return ""
def _process_hasValue(self, cut_graph):
"""
A restriction containing a owl:hasValue constraint describes a class
of all individuals for which the property concerned has at least
one value semantically equal to V (it may have other values as well)
The following example describes the class of individuals
who have the individual referred to as Clinton as their parent:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:hasValue rdf:resource="#Clinton" />
</owl:Restriction>
"""
parsed_prop, restricted_node, value = self._parse_restriction_nodes(
cut_graph
)
rdfs_type = Constant(str(RDF.type))
property_symbol = Symbol(str(parsed_prop))
x = Symbol.fresh()
constraint = Union(
(
RightImplication(
self._triple(x, rdfs_type, Constant(str(restricted_node))),
property_symbol(x, Constant(str(value))),
),
)
)
return constraint
def _process_minCardinality(self, cut_graph):
"""
A restriction containing an owl:minCardinality constraint describes
a class of all individuals that have at least N semantically distinct
values (individuals or data values) for the property concerned,
where N is the value of the cardinality constraint.
The following example describes a class of individuals
that have at least two parents:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:minCardinality rdf:datatype="&xsd;nonNegativeInteger">
2
</owl:minCardinality>
</owl:Restriction>
Note that an owl:minCardinality of one or more means that all
instances of the class must have a value for the property.
"""
_, restricted_node, _ = self._parse_restriction_nodes(
cut_graph
)
warnings.warn(
f"""The restriction minCardinality cannot be
parsed for {restricted_node}."""
)
return Union(())
def _process_maxCardinality(self, cut_graph):
"""
A restriction containing an owl:maxCardinality constraint describes
a class of all individuals that have at most N semantically distinct
values (individuals or data values) for the property concerned,
where N is the value of the cardinality constraint.
The following example describes a class of individuals
that have at most two parents:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:maxCardinality rdf:datatype="&xsd;nonNegativeInteger">
2
</owl:maxCardinality>
</owl:Restriction>
"""
_, restricted_node, _ = self._parse_restriction_nodes(
cut_graph
)
warnings.warn(
f"""The restriction maxCardinality cannot be
parsed for {restricted_node}"""
)
return Union(())
def _process_cardinality(self, cut_graph):
"""
A restriction containing an owl:cardinality constraint describes
a class of all individuals that have exactly N semantically distinct
values (individuals or data values) for the property concerned,
where N is the value of the cardinality constraint.
This construct is in fact redundant as it can always be replaced
by a pair of matching owl:minCardinality and owl:maxCardinality
constraints with the same value. It is included as a convenient
shorthand for the user.
The following example describes a class of individuals that have
exactly two parents:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:cardinality rdf:datatype="&xsd;nonNegativeInteger">
2
</owl:cardinality>
</owl:Restriction>
"""
_, restricted_node, _ = self._parse_restriction_nodes(
cut_graph
)
warnings.warn(
f"""The restriction cardinality cannot be
parsed for {restricted_node}"""
)
return Union(())
def _process_someValuesFrom(self, cut_graph):
"""
It defines a class of individuals x for which there is at least one y
(either an instance of the class description or value of the data
range) such that the pair (x,y) is an instance of P. This does not
exclude that there are other instances (x,y') of P for which y' does
not belong to the class description or data range.
The following example defines a class of individuals which have at
least one parent who is a physician:
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:someValuesFrom rdf:resource="#Physician" />
</owl:Restriction>
"""
parsed_prop, restricted_node, values = self._parse_restriction_nodes(
cut_graph
)
nodes_someValuesFrom = self._parse_list(values)
constraints = Union(())
property_symbol = Symbol(str(parsed_prop))
rdfs_type = Constant(str(RDF.type))
y = Symbol.fresh()
for value in nodes_someValuesFrom:
constraints = Union(
constraints.formulas
+ (
RightImplication(
self._triple(
y, rdfs_type, Constant(str(restricted_node))
),
property_symbol(y, Constant(str(value))),
),
)
)
return constraints
def _process_allValuesFrom(self, cut_graph):
"""
AllValuesFrom defines a class of individuals x
for which holds that if the pair (x,y) is an instance of
P (the property concerned), then y should be an instance
of the class description.
<owl:Restriction>
<owl:onProperty rdf:resource="#hasParent" />
<owl:allValuesFrom rdf:resource="#Human" />
</owl:Restriction>
This example describes an anonymous OWL class of all individuals
for which the hasParent property only has values of class Human
"""
parsed_prop, restricted_node, values = self._parse_restriction_nodes(
cut_graph
)
allValuesFrom = self._parse_list(values)
constraints = Union(())
property_symbol = Symbol(str(parsed_prop))
rdf_type = Constant(str(RDF.type))
rdf_symbol = Symbol(str(RDF.type))
y = Symbol.fresh()
x = Symbol.fresh()
for value in allValuesFrom:
constraints = Union(
constraints.formulas
+ (
RightImplication(
Conjunction(
(
self._triple(
y, rdf_type, Constant(str(restricted_node))
),
property_symbol(y, x),
)
),
rdf_symbol(x, Constant(str(value))),
),
)
)
return constraints
def _parse_restriction_nodes(self, cut_graph):
"""
Given the list of nodes associated with a restriction,
this function returns: The restricted node, the property that
restricts it and the value associated to it.
Parameters
----------
cut_graph : list
List of nodes associated to a restriction.
Returns
-------
parsed_property : URIRef
The node of the property.
restricted_node : URIRef
The node restricted by the property.
value : URIRef
The value of the property
"""
restricted_node = list(
self.graph.triples((None, None, cut_graph[0][0]))
)[0][0]
for triple in cut_graph:
if OWL.onProperty == triple[1]:
parsed_property = triple[2]
elif triple[1] in self.parsed_restrictions:
value = triple[2]
return parsed_property, restricted_node, value
def _parse_list(self, initial_node):
"""
This function receives an initial BNode from a list of nodes
and goes through the list collecting the values from it and
returns them as an array
Parameters
----------
initial_node : BNode
Initial node of the list that you want to go through.
Returns
-------
values : list
Array of nodes that are part of the list.
"""
if not isinstance(initial_node, BNode):
return [initial_node]
list_node = RDF.nil
values = []
for node_triples in self.graph.triples((initial_node, None, None)):
if OWL.unionOf == node_triples[1]:
list_node = node_triples[2]
else:
values.append(node_triples[0])
while list_node != RDF.nil and list_node is not None:
list_iter = self.graph.triples((list_node, None, None))
values.append(self._get_list_first_value(list_iter))
list_node = self._get_list_rest_value(list_iter)
return values
def _get_list_first_value(self, list_iter):
"""
Given a list of triples, as a result of the iteration of a list,
this function returns the node associated to the rdf:first property.
Parameters
----------
list_iter : generator
Generator that represents the list of nodes that
form a position in a list.
Returns
-------
URIRef
Node associated to the rdf:first property.
"""
for triple in list_iter:
if RDF.first == triple[1]:
return triple[2]
def _get_list_rest_value(self, list_iter):
"""
Given a list of triples, as a result of the iteration of a list,
this function returns the node associated to the rdf:rest property.
Parameters
----------
list_iter : generator
Generator that represents the list of nodes that
form a position in a list.
Returns
-------
URIRef
Node associated to the rdf:rest property.
"""
for triple in list_iter:
if RDF.rest == triple[1]:
return triple[2]
def get_triples(self):
return self.graph.triples((None, None, None))
|
the-stack_0_10608 | import os
import json
import numpy as np
from pychemia.crystal import KPoints
from ...tasks import Task
from ..abinit import AbinitJob
__author__ = 'Guillermo Avendano-Franco'
class StaticCalculation(Task):
def __init__(self, structure, workdir='.', binary='abinit', ecut=50, kpoints=None, kp_density=1E4):
self.ecut = ecut
if kpoints is None:
kp = KPoints.optimized_grid(structure.lattice, kp_density=kp_density, force_odd=True)
self.kpoints = kp
else:
self.kpoints = kpoints
self.task_params = {'ecut': self.ecut, 'kpoints': self.kpoints.to_dict}
Task.__init__(self, structure=structure, task_params=self.task_params, workdir=workdir, binary=binary)
self.abinitjob = AbinitJob()
self.abinitjob.initialize(workdir=workdir, structure=structure, binary=binary)
def run(self, nparal=1):
self.abinitjob.set_kpoints(kpoints=self.kpoints)
self.abinitjob.job_static()
self.abinitjob.set_ecut(self.ecut)
self.abinitjob.set_psps()
self.abinitjob.write_all()
self.abinitjob.run(use_mpi=True, omp_max_threads=nparal, mpi_num_procs=nparal)
def plot(self, figname='static_calculation.pdf'):
if not self.finished:
print('The task is not finished')
return
import matplotlib.pyplot as plt
plt.switch_backend('agg')
plt.figure(figsize=(8, 6))
plt.subplots_adjust(left=0.09, bottom=0.08, right=0.95, top=0.95, wspace=None, hspace=None)
data = np.array(self.output['energies'])
plt.plot(data[:, 1], data[:, 2], 'b.-')
plt.xlabel('SCF cycle')
plt.ylabel('Energy [eV]')
a = plt.axes([.6, .6, .3, .3], axisbg='0.9')
a.semilogy(data[:, 1], data[:, 2] - np.min(data[:, 2]))
a.set_title('min energy %7.3f eV' % np.min(data[:, 2]))
if figname is not None:
plt.savefig(figname)
return plt.gcf()
def load(self, filename=None):
if filename is None:
filename = self.workdir + os.sep + 'task.json'
rf = open(filename)
data = json.load(rf)
rf.close()
self.task_params = data['task_params']
self.output = data['output']
self.ecut = self.task_params['ecut']
self.kpoints = KPoints.from_dict(self.task_params['kpoints'])
def report(self, file_format='html'):
from lxml.builder import ElementMaker, E
self.plot(figname=self.report_dir + os.sep + 'static.jpg')
element_maker = ElementMaker(namespace=None, nsmap={None: "http://www.w3.org/1999/xhtml"})
html = element_maker.html(E.head(E.title("ABINIT Static Calculation")),
E.body(E.h1("ABINIT Static Calculation"),
E.h2('Structure'),
E.pre(str(self.structure)),
E.h2('Self Consistent Field Convergence'),
E.p(E.img(src='static.jpg', width="800", height="600",
alt="Static Calculation"))
))
return self.report_end(html, file_format)
|
the-stack_0_10609 | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ==============================================================================
r"""Script for training model.
Simple command to get up and running:
python train.py --memory_size=8192 \
--batch_size=16 --validation_length=50 \
--episode_width=5 --episode_length=30
"""
import logging
import os
import random
import numpy as np
import tensorflow as tf
import data_utils
import model
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer('rep_dim', 128,
'dimension of keys to use in memory')
tf.flags.DEFINE_integer('episode_length', 100, 'length of episode')
tf.flags.DEFINE_integer('episode_width', 5,
'number of distinct labels in a single episode')
tf.flags.DEFINE_integer('memory_size', None, 'number of slots in memory. '
'Leave as None to default to episode length')
tf.flags.DEFINE_integer('batch_size', 16, 'batch size')
tf.flags.DEFINE_integer('num_episodes', 100000, 'number of training episodes')
tf.flags.DEFINE_integer('validation_frequency', 20,
'every so many training episodes, '
'assess validation accuracy')
tf.flags.DEFINE_integer('validation_length', 10,
'number of episodes to use to compute '
'validation accuracy')
tf.flags.DEFINE_integer('seed', 888, 'random seed for training sampling')
tf.flags.DEFINE_string('save_dir', '', 'directory to save model to')
tf.flags.DEFINE_bool('use_lsh', False,
'use locality-sensitive hashing '
'(NOTE: not fully tested)')
class Trainer(object):
"""Class that takes care of training, validating, and checkpointing model."""
def __init__(self, train_data, valid_data, input_dim, output_dim=None):
self.train_data = train_data
self.valid_data = valid_data
self.input_dim = input_dim
self.rep_dim = FLAGS.rep_dim
self.episode_length = FLAGS.episode_length
self.episode_width = FLAGS.episode_width
self.batch_size = FLAGS.batch_size
self.memory_size = (self.episode_length * self.batch_size
if FLAGS.memory_size is None else FLAGS.memory_size)
self.use_lsh = FLAGS.use_lsh
self.output_dim = (output_dim if output_dim is not None
else self.episode_width)
def get_model(self):
# vocab size is the number of distinct values that
# could go into the memory key-value storage
vocab_size = self.episode_width * self.batch_size
return model.Model(
self.input_dim, self.output_dim, self.rep_dim, self.memory_size,
vocab_size, use_lsh=self.use_lsh)
def sample_episode_batch(self, data,
episode_length, episode_width, batch_size):
"""Generates a random batch for training or validation.
Structures each element of the batch as an 'episode'.
Each episode contains episode_length examples and
episode_width distinct labels.
Args:
data: A dictionary mapping label to list of examples.
episode_length: Number of examples in each episode.
episode_width: Distinct number of labels in each episode.
batch_size: Batch size (number of episodes).
Returns:
A tuple (x, y) where x is a list of batches of examples
with size episode_length and y is a list of batches of labels.
"""
episodes_x = [[] for _ in xrange(episode_length)]
episodes_y = [[] for _ in xrange(episode_length)]
assert len(data) >= episode_width
keys = data.keys()
for b in xrange(batch_size):
episode_labels = random.sample(keys, episode_width)
remainder = episode_length % episode_width
remainders = [0] * (episode_width - remainder) + [1] * remainder
episode_x = [
random.sample(data[lab],
r + (episode_length - remainder) / episode_width)
for lab, r in zip(episode_labels, remainders)]
episode = sum([[(x, i, ii) for ii, x in enumerate(xx)]
for i, xx in enumerate(episode_x)], [])
random.shuffle(episode)
# Arrange episode so that each distinct label is seen before moving to
# 2nd showing
episode.sort(key=lambda elem: elem[2])
assert len(episode) == episode_length
for i in xrange(episode_length):
episodes_x[i].append(episode[i][0])
episodes_y[i].append(episode[i][1] + b * episode_width)
return ([np.array(xx).astype('float32') for xx in episodes_x],
[np.array(yy).astype('int32') for yy in episodes_y])
def compute_correct(self, ys, y_preds):
return np.mean(np.equal(y_preds, np.array(ys)))
def individual_compute_correct(self, y, y_pred):
return y_pred == y
def run(self):
"""Performs training.
Trains a model using episodic training.
Every so often, runs some evaluations on validation data.
"""
train_data, valid_data = self.train_data, self.valid_data
input_dim, output_dim = self.input_dim, self.output_dim
rep_dim, episode_length = self.rep_dim, self.episode_length
episode_width, memory_size = self.episode_width, self.memory_size
batch_size = self.batch_size
train_size = len(train_data)
valid_size = len(valid_data)
logging.info('train_size (number of labels) %d', train_size)
logging.info('valid_size (number of labels) %d', valid_size)
logging.info('input_dim %d', input_dim)
logging.info('output_dim %d', output_dim)
logging.info('rep_dim %d', rep_dim)
logging.info('episode_length %d', episode_length)
logging.info('episode_width %d', episode_width)
logging.info('memory_size %d', memory_size)
logging.info('batch_size %d', batch_size)
assert all(len(v) >= float(episode_length) / episode_width
for v in train_data.itervalues())
assert all(len(v) >= float(episode_length) / episode_width
for v in valid_data.itervalues())
output_dim = episode_width
self.model = self.get_model()
self.model.setup()
sess = tf.Session()
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver(max_to_keep=10)
ckpt = None
if FLAGS.save_dir:
ckpt = tf.train.get_checkpoint_state(FLAGS.save_dir)
if ckpt and ckpt.model_checkpoint_path:
logging.info('restoring from %s', ckpt.model_checkpoint_path)
saver.restore(sess, ckpt.model_checkpoint_path)
logging.info('starting now')
losses = []
random.seed(FLAGS.seed)
np.random.seed(FLAGS.seed)
for i in xrange(FLAGS.num_episodes):
x, y = self.sample_episode_batch(
train_data, episode_length, episode_width, batch_size)
outputs = self.model.episode_step(sess, x, y, clear_memory=True)
loss = outputs
losses.append(loss)
if i % FLAGS.validation_frequency == 0:
logging.info('episode batch %d, avg train loss %f',
i, np.mean(losses))
losses = []
# validation
correct = []
correct_by_shot = dict((k, []) for k in xrange(self.episode_width + 1))
for _ in xrange(FLAGS.validation_length):
x, y = self.sample_episode_batch(
valid_data, episode_length, episode_width, 1)
outputs = self.model.episode_predict(
sess, x, y, clear_memory=True)
y_preds = outputs
correct.append(self.compute_correct(np.array(y), y_preds))
# compute per-shot accuracies
seen_counts = [[0] * episode_width for _ in xrange(batch_size)]
# loop over episode steps
for yy, yy_preds in zip(y, y_preds):
# loop over batch examples
for k, (yyy, yyy_preds) in enumerate(zip(yy, yy_preds)):
yyy, yyy_preds = int(yyy), int(yyy_preds)
count = seen_counts[k][yyy % self.episode_width]
if count in correct_by_shot:
correct_by_shot[count].append(
self.individual_compute_correct(yyy, yyy_preds))
seen_counts[k][yyy % self.episode_width] = count + 1
logging.info('validation overall accuracy %f', np.mean(correct))
logging.info('%d-shot: %.3f, ' * (self.episode_width + 1),
*sum([[k, np.mean(correct_by_shot[k])]
for k in xrange(self.episode_width + 1)], []))
if saver and FLAGS.save_dir:
saved_file = saver.save(sess,
os.path.join(FLAGS.save_dir, 'model.ckpt'),
global_step=self.model.global_step)
logging.info('saved model to %s', saved_file)
def main(unused_argv):
train_data, valid_data = data_utils.get_data()
trainer = Trainer(train_data, valid_data, data_utils.IMAGE_NEW_SIZE ** 2)
trainer.run()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
tf.app.run()
|
the-stack_0_10610 | """
Copyright (c) 2015 SONATA-NFV, 2017 5GTANGO
ALL RIGHTS RESERVED.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Neither the name of the SONATA-NFV, 5GTANGO
nor the names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
This work has been performed in the framework of the SONATA project,
funded by the European Commission under Grant number 671517 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the SONATA
partner consortium (www.sonata-nfv.eu).
This work has been performed in the framework of the 5GTANGO project,
funded by the European Commission under Grant number 761493 through
the Horizon 2020 and 5G-PPP programmes. The authors would like to
acknowledge the contributions of their colleagues of the 5GTANGO
partner consortium (www.5gtango.eu).
"""
import logging
import yaml
import time
from smbase.smbase import smbase
try:
from ds import ssh
except:
import ssh
logging.basicConfig(level=logging.INFO)
LOG = logging.getLogger("fsm-ds")
LOG.setLevel(logging.DEBUG)
logging.getLogger("son-mano-base:messaging").setLevel(logging.INFO)
class dsFSM(smbase):
def __init__(self, connect_to_broker=True):
"""
:param specific_manager_type: specifies the type of specific manager
that could be either fsm or ssm.
:param service_name: the name of the service that this specific manager
belongs to.
:param function_name: the name of the function that this specific
manager belongs to, will be null in SSM case
:param specific_manager_name: the actual name of specific manager
(e.g., scaling, placement)
:param id_number: the specific manager id number which is used to
distinguish between multiple SSM/FSM that are created for the same
objective (e.g., scaling with algorithm 1 and 2)
:param version: version
:param description: description
"""
self.sm_id = "sonfsmcommunication-pilotds-vnfcss1"
self.sm_version = "0.1"
super(self.__class__, self).__init__(sm_id=self.sm_id,
sm_version=self.sm_version,
connect_to_broker=connect_to_broker)
def on_registration_ok(self):
# The fsm registration was successful
LOG.debug("Received registration ok event.")
# send the status to the SMR
status = 'Subscribed, waiting for alert message'
message = {'name': self.sm_id,
'status': status}
self.manoconn.publish(topic='specific.manager.registry.ssm.status',
message=yaml.dump(message))
# Subscribing to the topics that the fsm needs to listen on
topic = "generic.fsm." + str(self.sfuuid)
self.manoconn.subscribe(self.message_received, topic)
LOG.info("Subscribed to " + topic + " topic.")
def message_received(self, ch, method, props, payload):
"""
This method handles received messages
"""
# Decode the content of the message
request = yaml.load(payload)
# Don't trigger on non-request messages
if "fsm_type" not in request.keys():
LOG.info("Received a non-request message, ignoring...")
return
# Create the response
response = None
# the 'fsm_type' field in the content indicates for which type of
# fsm this message is intended.
if str(request["fsm_type"]) == "start":
LOG.info("Start event received: " + str(request["content"]))
response = self.start_event(request["content"])
if str(request["fsm_type"]) == "stop":
LOG.info("Stop event received: " + str(request["content"]))
response = self.stop_event(request["content"])
if str(request["fsm_type"]) == "configure":
LOG.info("Config event received: " + str(request["content"]))
response = self.configure_event(request["content"])
# If a response message was generated, send it back to the FLM
LOG.info("Response to request generated:" + str(response))
topic = "generic.fsm." + str(self.sfuuid)
corr_id = props.correlation_id
self.manoconn.notify(topic,
yaml.dump(response),
correlation_id=corr_id)
return
def start_event(self, content):
"""
This method handles a start event.
"""
# Dummy content
response = {'status': 'completed'}
return response
def stop_event(self, content):
"""
This method handles a stop event.
"""
# Dummy content
response = {'status': 'completed'}
return response
def configure_event(self, content):
"""
This method handles a configure event. The configure event changes the configuration
of the Dispatcher.
"""
# Extract VNF-DS management IP and VNF-BS internal IP
ds_ip = ''
bs_ip = ''
for vnfr in content['vnfrs']:
if vnfr['virtual_deployment_units'][0]['vdu_reference'][:2] == 'bs':
for cp in vnfr['virtual_deployment_units'][0]['vnfc_instance'][0]['connection_points']:
if cp['id'] == 'internal':
bs_ip = cp['interface']['address']
break
if vnfr['virtual_deployment_units'][0]['vdu_reference'][:2] == 'ds':
for cp in vnfr['virtual_deployment_units'][0]['vnfc_instance'][0]['connection_points']:
if cp['id'] == 'mgmt':
ds_ip = cp['interface']['address']
break
LOG.info('ds ip: ' + ds_ip)
LOG.info('bs ip: ' + bs_ip)
# Initiate SSH connection with the VM
ssh_client = ssh.Client(ds_ip, username='ubuntu', logger=LOG,
key_filename='/root/ds/sandbox.pem', retries=40)
# Enable user ubuntu in tmp folder
ssh_client.sendCommand("sudo chown -R ubuntu:ubuntu /tmp/")
# Change qss config
ssh_client.sendCommand("sudo sed -r -i '/mongodbUrl: .*$/c\ mongodbUrl: \"mongodb:\/\/" +
bs_ip + "/dispatcher\",' /opt/sippo/janus-dispatcher/janus-dispatcher-current/quobis-dispatcher-config.js")
# Restart the services
ssh_client.sendCommand(
"pm2 restart /opt/sippo/janus-dispatcher/janus-dispatcher-current/process.json")
if ssh_client.connected:
response = {'status': 'COMPLETED', 'error': 'None'}
else:
response = {'status': 'FAILED', 'error': 'FSM SSH connection failed'}
return response
def main():
dsFSM()
if __name__ == '__main__':
main()
|
the-stack_0_10613 | from django.test.testcases import TestCase
from mock import patch
from robber import expect
from data import cache_managers
class CacheManagersTestCase(TestCase):
@patch('data.cache_managers.allegation_cache_manager.cache_data')
@patch('data.cache_managers.officer_cache_manager.cache_data')
@patch('data.cache_managers.salary_cache_manager.cache_data')
@patch('activity_grid.cache_managers.activity_pair_card_cache_manager.cache_data')
def test_cache_all(
self,
salary_cache_mock,
officer_cache_mock,
allegation_cache_mock,
activity_pair_card_cache_mock
):
cache_managers.cache_all()
expect(salary_cache_mock).to.be.called_once()
expect(officer_cache_mock).to.be.called_once()
expect(allegation_cache_mock).to.be.called_once()
expect(activity_pair_card_cache_mock).to.be.called_once()
expect(len(cache_managers.managers)).to.eq(4)
|
the-stack_0_10614 | #!/usr/bin/python3
import argparse
import itertools
import os
import pprint
import sys
import yaml
from PIL import Image, ImageDraw
import bs4
THUMB_MARGIN = 10
def get_polys(html):
with open(html) as f:
soup = bs4.BeautifulSoup(f.read(), features="html5lib")
out = {}
for a in soup.find_all("area"):
assert a["shape"] == "poly"
name = a["href"]
coords = a["coords"]
coords = [int(i) for i in coords.split(",")]
coords = list(zip(coords[::2], coords[1::2]))
out[name] = coords
return out
class Patch:
MARGIN = 5
def __init__(self, image, coords):
mask = Image.new("L", image.size, 0)
d = ImageDraw.Draw(mask)
d.polygon(coords, 255)
masked = Image.new("RGBA", image.size, (0,0,0,0))
masked.paste(image, (0,0), mask)
min_x = min(p[0] for p in coords) - self.MARGIN
max_x = max(p[0] for p in coords) + self.MARGIN
min_y = min(p[1] for p in coords) - self.MARGIN
max_y = max(p[1] for p in coords) + self.MARGIN
if min_x < 0: min_x = 0
if min_y < 0: min_y = 0
if max_x > image.size[0]: max_x = image.size[0]
if max_y > image.size[1]: max_y = image.size[1]
self.origin = [min_x, min_y]
self.size = [max_x - min_x, max_y - min_y]
self.image = masked.crop((min_x, min_y, max_x, max_y))
t = []
for x, y in coords:
t.append(str(x))
t.append(str(y))
self.coords_str = ",".join(t)
self.highlight = Image.new("RGBA", self.image.size, (255,255,255,0))
for ox in range(-2, 3):
for oy in range(-2, 3):
if ox in (-2,2) and oy in (-2,2): continue
self.highlight.paste((255,255,255,255), (ox,oy), self.image)
pixels = set()
for j in range(self.size[1]):
for i in range(self.size[0]):
if self.image.getpixel((i,j))[3]:
pixels.add((i,j))
elif self.highlight.getpixel((i,j))[3]:
pixels.add((i,j))
if not pixels:
self.image = None
self.highlight = None
return
min_x = min(p[0] for p in pixels)
max_x = max(p[0] for p in pixels)
min_y = min(p[1] for p in pixels)
max_y = max(p[1] for p in pixels)
w = max_x + 1 - min_x
h = max_y + 1 - min_y
self.image = self.image.crop((min_x, min_y, max_x, max_y))
self.highlight = self.highlight.crop((min_x, min_y, max_x, max_y))
self.origin = [self.origin[0] + min_x, self.origin[1] + min_y]
self.size = [w, h]
def main():
parser = argparse.ArgumentParser(
description="Extract icons from images and a map.")
parser.add_argument("--output_dir",
default=".",
help="Directory for output icons")
parser.add_argument("--max_thumb_height",
type=int, default=260,
help="Max height of thumb images")
parser.add_argument("--background_color",
default="#f8f8f8",
help="Background color for map")
parser.add_argument("--output_yaml", default="land.yaml",
help="File for yaml data output")
parser.add_argument("html",
help="Image map HTML")
parser.add_argument("source_image")
parser.add_argument("--under_image", default=None)
parser.add_argument("--under_html", default=None)
options = parser.parse_args()
assert options.background_color[0] == "#" and len(options.background_color) == 7
options.background_color = tuple(int(options.background_color[i*2+1:i*2+3], 16) for i in range(3))
html_map = get_polys(options.html)
if options.under_html:
under_map = get_polys(options.under_html)
else:
under_map = html_map
source_image = Image.open(options.source_image).convert("RGBA")
if options.under_image:
under_image = Image.open(options.under_image).convert("RGBA")
assert under_image.size == source_image.size
else:
under_image = None
size = source_image.size
icons = {}
for name, coords in html_map.items():
out = {}
icons[name] = out
patch = Patch(source_image, coords)
if patch.image:
od = {}
out["image"] = od
od["pos"] = patch.origin
od["poly"] = patch.coords_str
od["size"] = patch.size
patch.image.save(os.path.join(options.output_dir, f"image_{name}.png"))
if patch.highlight:
od = {}
out["mask"] = od
od["pos"] = patch.origin[:]
od["size"] = patch.size[:]
patch.highlight.save(os.path.join(options.output_dir, f"mask_{name}.png"))
if under_image:
under_coords = under_map.get(name)
if under_coords:
under_patch = Patch(under_image, under_coords)
if under_patch.image:
od = {}
out["under"] = od
od["pos"] = under_patch.origin
od["poly"] = under_patch.coords_str
od["size"] = under_patch.size
under_patch.image.save(os.path.join(options.output_dir, f"under_{name}.png"))
y = { "icons": icons }
with open(os.path.join(options.output_dir, options.output_yaml), "w") as f:
f.write(yaml.dump(y))
if __name__ == "__main__":
main()
|
the-stack_0_10615 | """
Frequency-split parameters
==========================
Split spectra and plot parameters
"""
import matplotlib.pyplot as plt
from wavespectra import read_ww3
dset = read_ww3("../_static/ww3file.nc")
fcut = 1 / 8
sea = dset.spec.split(fmin=fcut)
swell = dset.spec.split(fmax=fcut)
plt.figure(figsize=(8, 4.5))
p1 = dset.spec.hs().isel(site=0).plot(label="Full spectrum", marker="o")
p2 = sea.spec.hs().isel(site=0).plot(label="Sea", marker="o")
p3 = swell.spec.hs().isel(site=0).plot(label="Swell", marker="o")
l = plt.legend(loc=0, fontsize=8)
plt.title("")
plt.ylabel("$Hs$ (m)")
plt.xlabel("")
|
the-stack_0_10616 | # coded by: salism3
# 23 - 05 - 2020 23:18 (Malam Takbir)
from .checker import check_login
from .output import Output, People, Group
from . import parsing
import re
@check_login
def msgUrl(ses, next = None):
html = ses.session.get("https://mbasic.facebook.com/messages" if not next else next).text
data = parsing.parsing_href(html, "/read/")
next = parsing.parsing_href_regex(html, r"[?]pageNum.*selectable", one = True)
return Output(ses, msgUrl, items = data, next = next, html = html)
@check_login
def myGroup(ses):
html = ses.session.get("https://mbasic.facebook.com/groups/?seemore&refid=27").text
data = parsing.parsing_href_regex(html, r"/groups/\d+\W", bs4_class = True)
data = [(x.text, re.search(r"/(\d+)\W", x["href"]).group(1)) for x in data]
return Output(ses, myGroup, items = data, html = html)
def find_people(ses, name):
html = ses.session.get("https://mbasic.facebook.com/search/people/?q={}&source=filter&isTrending=0".format(name)).text
url = parsing.parsing_href(html, "__xts__", one = True)
try:
html = ses.session.get(url).text
return People(ses, html)
except:
return
def find_group(ses, name):
html = ses.session.get("https://mbasic.facebook.com/search/groups/?q={}&source=filter&isTrending=0".format(name)).text
url = parsing.parsing_href(html, "__xts__", one = True)
try:
# print("in try")
id_ = re.search(r"/(\d+)\Wrefid", url).group(1)
html = ses.session.get("https://mbasic.facebook.com/groups/{}?view=info".format(id_)).text
return Group(ses, html)
except:
return
|
the-stack_0_10620 | from django.db import connection
from django.urls import resolve
class QueryCountDebugMiddleware:
"""
This middleware will log the number of queries run
and the total time taken for each request (with a
status code of 200). It does not currently support
multi-db setups.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
current_url = resolve(request.path_info).url_name
response = self.get_response(request)
total_time = 0
for index, query in enumerate(connection.queries, 1):
query_time = query.get('time')
sql_query = query.get('sql')
if query_time is None:
query_time = query.get('duration', 0) / 1000
total_time += float(query_time)
print(f"\n{index}: ({query_time}) {sql_query}")
print(f"{current_url}: {request.get_raw_uri()}")
print(f"{len(connection.queries)} queries run, total {total_time} seconds\n", "-" * 100)
return response
|
the-stack_0_10622 | #!/usr/bin/python
import sys
import usb.core
import usb.util
import uinput
import time
from array import array
try:
# hexadecimal vendor and product values
dev = usb.core.find(idVendor=0x084f, idProduct=0xee05)
if dev == None:
print("Could not detect Brigthsign Tochboard")
raise SystemExit
# first endpoint
interface = 0
endpoint = dev[0][(0,0)][0]
# if the OS kernel already claimed the device, which is most likely true
# thanks to http://stackoverflow.com/questions/8218683/pyusb-cannot-set-configuration
if dev.is_kernel_driver_active(interface) is True:
# tell the kernel to detach
dev.detach_kernel_driver(interface)
# claim the device
usb.util.claim_interface(dev, interface)
keys = {
'KEY_UP': array('B', [ 2, 0, 85, 92]),
'KEY_RIGHT': array('B', [ 32, 0, 85, 92]),
'KEY_DOWN': array('B', [ 128, 0, 85, 92]),
'KEY_LEFT': array('B', [ 8, 0, 85, 92]),
'KEY_ENTER': array('B', [ 16, 0, 85, 92]),
'KEY_ESC': array('B', [ 1, 0, 85, 92]),
'KEY_VOLUMEUP': array('B', [ 0, 2, 85, 92]),
'KEY_VOLUMEDOWN': array('B', [ 0, 4, 85, 92]),
'KEY_RELEASE': array('B', [ 0, 0, 85, 92])
}
brightsign_keys = [
uinput.KEY_UP,
uinput.KEY_RIGHT,
uinput.KEY_DOWN,
uinput.KEY_LEFT,
uinput.KEY_ENTER,
uinput.KEY_ESC,
uinput.KEY_VOLUMEUP,
uinput.KEY_VOLUMEDOWN
]
key_pressed = False
last_key = "KEY_ESC"
touchboard = uinput.Device( brightsign_keys )
while True:
try:
data = dev.read(endpoint.bEndpointAddress,endpoint.wMaxPacketSize)
for key, code in keys.items():
if code == data[0:4]:
if 'KEY_RELEASE' != key:
touchboard.emit(eval('uinput.'+key), value=1) # press key
last_key = key
else:
touchboard.emit(eval('uinput.'+last_key), value=0)
except usb.core.USBError as e:
data = None
if e.args == ('Operation timed out',):
continue
finally:
# release the device
usb.util.release_interface(dev, interface)
touchboard.destroy()
# reattach the device to the OS kernel
dev.attach_kernel_driver(interface)
|
the-stack_0_10623 | # Задача: От A до Z
''' Напишите функцию, которая будет принимать строку — диапазон букв английского алфавита. Функция должна возвращать строку из всех букв этого диапазона. Если в диапазоне заданы заглавные буквы, в результирующей строке тоже должны быть заглавные.
Примечания
Диапазон будет задаваться двумя буквами с дефисом между ними.
Обрабатывать ошибки не нужно (при указании диапазона обе буквы будут в одинаковом регистре и располагаться будут в алфавитном порядке).
Примеры
gimme_the_letters("a-z") ➞ "abcdefghijklmnopqrstuvwxyz"
gimme_the_letters("h-o") ➞ "hijklmno"
gimme_the_letters("Q-Z") ➞ "QRSTUVWXYZ"
gimme_the_letters("J-J") ➞ J"
'''
# Первый Вариант: Успех
def gimme_the_letters1(sp):
return "".join(chr(n) for n in range(ord(sp[0]), ord(sp[-1])+1))
gtl1 = gimme_the_letters1("a-z"), gimme_the_letters1("h-o"), gimme_the_letters1("Q-Z"), gimme_the_letters1("J-J")
print(gtl1)
# Второй Вариант: Успех
def gimme_the_letters2(spectrum1):
a = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
x, y = spectrum1.split('-')
return a[a.index(x):a.index(y) + 1]
gtl2 = gimme_the_letters2("a-z"), gimme_the_letters2("h-o"), gimme_the_letters2("Q-Z"), gimme_the_letters2("J-J")
print(gtl2)
# Трейти Вариант: Успех
def gimme_the_letters3(spectrum2):
start, end = [ord(i) for i in spectrum2.split('-')]
return ''.join(chr(i) for i in range(start, end+1))
gtl3 = gimme_the_letters3("a-z"), gimme_the_letters3("h-o"), gimme_the_letters3("Q-Z"), gimme_the_letters3("J-J")
print(gtl3) |
the-stack_0_10624 | """
``fish_http_status`` 包含最通用的一些网络状态码
https://github.com/openstack/swift/blob/master/swift/common/http.py
"""
def is_informational(status):
"""
检查状态码是否信息提示
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 100 <= status <= 199
def is_success(status):
"""
检查状态码是否成功
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 200 <= status <= 299
def is_redirection(status):
"""
检查状态码是否重定向
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 300 <= status <= 399
def is_client_error(status):
"""
检查状态码是否客户端错误
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 400 <= status <= 499
def is_server_error(status):
"""
检查状态码是否服务端错误
:param:
* status: http 状态码
:return:
* result: True or False
"""
return 500 <= status <= 599
# List of HTTP status codes
###############################################################################
# 1xx Informational
###############################################################################
HTTP_CONTINUE = 100
HTTP_SWITCHING_PROTOCOLS = 101
HTTP_PROCESSING = 102 # WebDAV
HTTP_CHECKPOINT = 103
HTTP_REQUEST_URI_TOO_LONG = 122
###############################################################################
# 2xx Success
###############################################################################
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_ACCEPTED = 202
HTTP_NON_AUTHORITATIVE_INFORMATION = 203
HTTP_NO_CONTENT = 204
HTTP_RESET_CONTENT = 205
HTTP_PARTIAL_CONTENT = 206
HTTP_MULTI_STATUS = 207 # WebDAV
HTTP_IM_USED = 226
###############################################################################
# 3xx Redirection
###############################################################################
HTTP_MULTIPLE_CHOICES = 300
HTTP_MOVED_PERMANENTLY = 301
HTTP_FOUND = 302
HTTP_SEE_OTHER = 303
HTTP_NOT_MODIFIED = 304
HTTP_USE_PROXY = 305
HTTP_SWITCH_PROXY = 306
HTTP_TEMPORARY_REDIRECT = 307
HTTP_RESUME_INCOMPLETE = 308
###############################################################################
# 4xx Client Error
###############################################################################
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_PAYMENT_REQUIRED = 402
HTTP_FORBIDDEN = 403
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_NOT_ACCEPTABLE = 406
HTTP_PROXY_AUTHENTICATION_REQUIRED = 407
HTTP_REQUEST_TIMEOUT = 408
HTTP_CONFLICT = 409
HTTP_GONE = 410
HTTP_LENGTH_REQUIRED = 411
HTTP_PRECONDITION_FAILED = 412
HTTP_REQUEST_ENTITY_TOO_LARGE = 413
HTTP_REQUEST_URI_TOO_LONG = 414
HTTP_UNSUPPORTED_MEDIA_TYPE = 415
HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = 416
HTTP_EXPECTATION_FAILED = 417
HTTP_IM_A_TEAPOT = 418
HTTP_UNPROCESSABLE_ENTITY = 422 # WebDAV
HTTP_LOCKED = 423 # WebDAV
HTTP_FAILED_DEPENDENCY = 424 # WebDAV
HTTP_UNORDERED_COLLECTION = 425
HTTP_UPGRADE_REQUIED = 426
HTTP_PRECONDITION_REQUIRED = 428
HTTP_TOO_MANY_REQUESTS = 429
HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = 431
HTTP_NO_RESPONSE = 444
HTTP_RETRY_WITH = 449
HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450
HTTP_CLIENT_CLOSED_REQUEST = 499
###############################################################################
# 5xx Server Error
###############################################################################
HTTP_INTERNAL_SERVER_ERROR = 500
HTTP_NOT_IMPLEMENTED = 501
HTTP_BAD_GATEWAY = 502
HTTP_SERVICE_UNAVAILABLE = 503
HTTP_GATEWAY_TIMEOUT = 504
HTTP_VERSION_NOT_SUPPORTED = 505
HTTP_VARIANT_ALSO_NEGOTIATES = 506
HTTP_INSUFFICIENT_STORAGE = 507 # WebDAV
HTTP_BANDWIDTH_LIMIT_EXCEEDED = 509
HTTP_NOT_EXTENDED = 510
HTTP_NETWORK_AUTHENTICATION_REQUIRED = 511
HTTP_NETWORK_READ_TIMEOUT_ERROR = 598 # not used in RFC
HTTP_NETWORK_CONNECT_TIMEOUT_ERROR = 599 # not used in RFC
|
the-stack_0_10625 | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import os
import sys
import argparse
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common import dtype as mstype
from mindspore.nn import TrainOneStepCell, WithLossCell
from mindspore.nn.optim import Adam
from mindspore.ops import operations as P
from mindspore.common.initializer import TruncatedNormal
from mindspore.parallel._ps_context import _is_role_pserver, _is_role_worker
parser = argparse.ArgumentParser(description="test_sparse_embedding")
parser.add_argument("--device_target", type=str, default="Ascend")
args, _ = parser.parse_known_args()
device_target = args.device_target
context.set_context(
mode=context.GRAPH_MODE, device_target=device_target, enable_sparse=True
)
context.set_ps_context(enable_ps=True)
def fc_with_initialize(input_channels, out_channels):
"""weight initial for fc layer"""
weight = weight_variable()
bias = weight_variable()
return nn.Dense(input_channels, out_channels, weight, bias)
def weight_variable():
"""weight initial"""
return TruncatedNormal(0.02)
class LeNet5(nn.Cell):
def __init__(self, num_class=10):
super(LeNet5, self).__init__()
self.cast = P.Cast()
self.flatten = nn.Flatten()
self.embedding = nn.EmbeddingLookup(16, 4)
self.relu = nn.ReLU()
self.fc = fc_with_initialize(12, num_class)
def construct(self, x):
x = self.cast(x, mstype.int32)
x = self.embedding(x)
x = self.flatten(x)
x = self.fc(x)
return x
def do_sparse_embedding(ps=False):
epoch = 10
net = LeNet5(10)
if ps:
net.embedding.embedding_table.set_param_ps()
optimizer = Adam(filter(lambda x: x.requires_grad, net.get_parameters()))
optimizer.sparse_opt.add_prim_attr("primitive_target", "CPU")
criterion = nn.SoftmaxCrossEntropyWithLogits(sparse=True, reduction="mean")
net_with_criterion = WithLossCell(net, criterion)
train_network = TrainOneStepCell(net_with_criterion, optimizer)
train_network.set_train()
losses = []
for _ in range(epoch):
data = Tensor(np.random.randint(0, 15, (32, 3), np.int32))
label = Tensor(np.random.randint(0, 9, (32), np.int32))
if _is_role_pserver():
train_network(data, label)
sys.exit()
else:
loss = train_network(data, label).asnumpy()
losses.append(loss)
print(losses)
return losses
envs = os.environ
if __name__ == "__main__":
np.random.seed(0)
ps_loss = do_sparse_embedding(True)
if _is_role_worker():
context.reset_ps_context()
np.random.seed(0)
no_ps_loss = do_sparse_embedding()
context.set_ps_context(enable_ps=True)
assert np.allclose(ps_loss, no_ps_loss, rtol=1.0e-6, atol=1.0e-6)
|
the-stack_0_10626 | """write log to file."""
import logging
import os
ROOT_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
def get_logger(filename, logger_name=None, on_screen=False, level=None):
"""Return logger."""
if not logger_name:
logger_name = filename
logger = logging.getLogger(logger_name)
formatter = logging.Formatter(
'[%(asctime)s] %(levelname)s - %(filename)s:%(lineno)d - %(message)s',
'%Y-%m-%d %X')
if level is None or level == "info":
level = logging.INFO
elif level == "debug":
level = logging.DEBUG
elif level == "warning":
level = logging.WARN
elif level == "error":
level = logging.ERROR
elif level == "critical":
level = logging.CRITICAL
stream_handler = logging.StreamHandler()
stream_handler.setLevel(level)
file_handler = logging.FileHandler('%s/logs/%s.log' % (ROOT_PATH, filename))
file_handler.setLevel(level)
file_handler.setFormatter(formatter)
stream_handler.setFormatter(formatter)
logger.addHandler(file_handler)
if on_screen:
logger.addHandler(stream_handler)
logger.setLevel(level)
return logger
|
the-stack_0_10627 | #
# This file made available under CC0 1.0 Universal (https://creativecommons.org/publicdomain/zero/1.0/legalcode)
#
# Created with the Rule Development Kit: https://github.com/awslabs/aws-config-rdk
# Can be used stand-alone or with the Rule Compliance Engine: https://github.com/awslabs/aws-config-engine-for-compliance-as-code
#
import sys
import unittest
try:
from unittest.mock import MagicMock, patch, ANY
except ImportError:
import mock
from mock import MagicMock, patch, ANY
import botocore
from botocore.exceptions import ClientError
##############
# Parameters #
##############
# Define the default resource to report to Config Rules
DEFAULT_RESOURCE_TYPE = 'AWS::ApiGateway::RestApi'
#############
# Main Code #
#############
config_client_mock = MagicMock()
sts_client_mock = MagicMock()
apigw_client_mock = MagicMock()
class Boto3Mock():
def client(self, client_name, *args, **kwargs):
if client_name == 'config':
return config_client_mock
elif client_name == 'sts':
return sts_client_mock
elif client_name == 'apigateway':
return apigw_client_mock
else:
raise Exception("Attempting to create an unknown client")
sys.modules['boto3'] = Boto3Mock()
rule = __import__('API_GW_NOT_EDGE_OPTIMISED')
class ParameterTest(unittest.TestCase):
get_rest_apis_private = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['PRIVATE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
invalid_rule_parameters = '{"ExceptionList":"apiid-1"}'
def test_api_invalid_parameter(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_private)
response = rule.lambda_handler(build_lambda_scheduled_event(rule_parameters=self.invalid_rule_parameters), {})
assert_customer_error_response(
self, response, 'InvalidParameterValueException', 'Invalid value in the ExceptionList: apiid-1')
class ComplianceTest(unittest.TestCase):
rule_parameters = '{"ExceptionList":"apiid1,apiid2"}'
invoking_event_iam_role_sample = '{"configurationItem":{"relatedEvents":[],"relationships":[],"configuration":{},"tags":{},"configurationItemCaptureTime":"2018-07-02T03:37:52.418Z","awsAccountId":"123456789012","configurationItemStatus":"ResourceDiscovered","resourceType":"AWS::IAM::Role","resourceId":"some-resource-id","resourceName":"some-resource-name","ARN":"some-arn"},"notificationCreationTime":"2018-07-02T23:05:34.445Z","messageType":"ConfigurationItemChangeNotification"}'
get_rest_apis_private = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['PRIVATE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
get_rest_apis_regional = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['REGIONAL']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['REGIONAL']}}]
}
get_rest_apis_edge = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['EDGE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['EDGE']}}]
}
get_rest_apis_mix_compliant_only = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['REGIONAL']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
get_rest_apis_mix = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['EDGE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['REGIONAL']}},
{'id': 'apiid3', 'endpointConfiguration': {'types': ['PRIVATE']}}]
}
get_rest_apis_multi_type = {
'items': [{'id': 'apiid1', 'endpointConfiguration': {'types': ['EDGE', 'PRIVATE']}},
{'id': 'apiid2', 'endpointConfiguration': {'types': ['REGIONAL']}}]
}
def test_no_gw(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value={"items": []})
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NOT_APPLICABLE', '123456789012', 'AWS::::Account'))
assert_successful_evaluation(self, response, resp_expected)
def test_private_only_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_private)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_regional_only_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_regional)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_edge_only_NON_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_edge)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid1', annotation="EDGE OPTIMIZED API Gateway is present."))
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid2', annotation="EDGE OPTIMIZED API Gateway is present."))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_mix_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_mix_compliant_only)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_mix(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_mix)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid1', annotation="EDGE OPTIMIZED API Gateway is present."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid3'))
assert_successful_evaluation(self, response, resp_expected, 3)
def test_edge_exception_COMPLIANT(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_edge)
response = rule.lambda_handler(build_lambda_scheduled_event(rule_parameters=self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1', annotation="API is part of exception list."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2', annotation="API is part of exception list."))
assert_successful_evaluation(self, response, resp_expected, 2)
def test_mix_with_exceptions(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_mix)
response = rule.lambda_handler(build_lambda_scheduled_event(rule_parameters=self.rule_parameters), {})
resp_expected = []
resp_expected.append(build_expected_response('COMPLIANT', 'apiid1', annotation="API is part of exception list."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2', annotation="API is part of exception list."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid3'))
assert_successful_evaluation(self, response, resp_expected, 3)
def test_multi_type(self):
apigw_client_mock.get_rest_apis = MagicMock(return_value=self.get_rest_apis_multi_type)
response = rule.lambda_handler(build_lambda_scheduled_event(), {})
resp_expected = []
resp_expected.append(build_expected_response('NON_COMPLIANT', 'apiid1', annotation="EDGE OPTIMIZED API Gateway is present."))
resp_expected.append(build_expected_response('COMPLIANT', 'apiid2'))
assert_successful_evaluation(self, response, resp_expected, 2)
####################
# Helper Functions #
####################
def build_lambda_configurationchange_event(invoking_event, rule_parameters=None):
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_lambda_scheduled_event(rule_parameters=None):
invoking_event = '{"messageType":"ScheduledNotification","notificationCreationTime":"2017-12-23T22:11:18.158Z"}'
event_to_return = {
'configRuleName':'myrule',
'executionRoleArn':'roleArn',
'eventLeftScope': False,
'invokingEvent': invoking_event,
'accountId': '123456789012',
'configRuleArn': 'arn:aws:config:us-east-1:123456789012:config-rule/config-rule-8fngan',
'resultToken':'token'
}
if rule_parameters:
event_to_return['ruleParameters'] = rule_parameters
return event_to_return
def build_expected_response(compliance_type, compliance_resource_id, compliance_resource_type=DEFAULT_RESOURCE_TYPE, annotation=None):
if not annotation:
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type
}
return {
'ComplianceType': compliance_type,
'ComplianceResourceId': compliance_resource_id,
'ComplianceResourceType': compliance_resource_type,
'Annotation': annotation
}
def assert_successful_evaluation(testClass, response, resp_expected, evaluations_count=1):
if isinstance(response, dict):
testClass.assertEquals(resp_expected['ComplianceType'], response['ComplianceType'])
testClass.assertEquals(resp_expected['ComplianceResourceType'], response['ComplianceResourceType'])
testClass.assertEquals(resp_expected['ComplianceResourceId'], response['ComplianceResourceId'])
testClass.assertTrue(response['OrderingTimestamp'])
if 'Annotation' in resp_expected or 'Annotation' in response:
testClass.assertEquals(resp_expected['Annotation'], response['Annotation'])
elif isinstance(response, list):
testClass.assertEquals(evaluations_count, len(response))
for i, response_expected in enumerate(resp_expected):
testClass.assertEquals(response_expected['ComplianceType'], response[i]['ComplianceType'])
testClass.assertEquals(response_expected['ComplianceResourceType'], response[i]['ComplianceResourceType'])
testClass.assertEquals(response_expected['ComplianceResourceId'], response[i]['ComplianceResourceId'])
testClass.assertTrue(response[i]['OrderingTimestamp'])
if 'Annotation' in response_expected or 'Annotation' in response[i]:
testClass.assertEquals(response_expected['Annotation'], response[i]['Annotation'])
def assert_customer_error_response(testClass, response, customerErrorCode=None, customerErrorMessage=None):
if customerErrorCode:
testClass.assertEqual(customerErrorCode, response['customerErrorCode'])
if customerErrorMessage:
testClass.assertEqual(customerErrorMessage, response['customerErrorMessage'])
testClass.assertTrue(response['customerErrorCode'])
testClass.assertTrue(response['customerErrorMessage'])
if "internalErrorMessage" in response:
testClass.assertTrue(response['internalErrorMessage'])
if "internalErrorDetails" in response:
testClass.assertTrue(response['internalErrorDetails'])
def sts_mock():
assume_role_response = {
"Credentials": {
"AccessKeyId": "string",
"SecretAccessKey": "string",
"SessionToken": "string"}}
sts_client_mock.reset_mock(return_value=True)
sts_client_mock.assume_role = MagicMock(return_value=assume_role_response)
##################
# Common Testing #
##################
class TestStsErrors(unittest.TestCase):
def test_sts_unknown_error(self):
rule.ASSUME_ROLE_MODE = True
sts_client_mock.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'unknown-code', 'Message': 'unknown-message'}}, 'operation'))
response = rule.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'InternalError', 'InternalError')
def test_sts_access_denied(self):
rule.ASSUME_ROLE_MODE = True
sts_client_mock.assume_role = MagicMock(side_effect=botocore.exceptions.ClientError(
{'Error': {'Code': 'AccessDenied', 'Message': 'access-denied'}}, 'operation'))
response = rule.lambda_handler(build_lambda_configurationchange_event('{}'), {})
assert_customer_error_response(
self, response, 'AccessDenied', 'AWS Config does not have permission to assume the IAM role.')
|
the-stack_0_10630 | # -*- coding: utf-8 -*-
# (c) 2009-2018 Martin Wendt and contributors; see WsgiDAV https://github.com/mar10/wsgidav
# Original PyFileServer (c) 2005 Ho Chun Wei.
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license.php
"""
WSGI middleware used for debugging (optional).
This module dumps request and response information to the console, depending
on current debug configuration.
On init:
Define HTTP methods and litmus tests, that should turn on the verbose mode
(currently hard coded).
For every request:
Increase value of ``environ['verbose']``, if the request should be debugged.
Also dump request and response headers and body.
Then pass the request to the next middleware.
These configuration settings are evaluated:
*verbose*
This is also used by other modules. This filter adds additional information
depending on the value.
======= ===================================================================
verbose Effect
======= ===================================================================
<= 3 No additional output (only standard request logging).
4 Dump headers of all requests and responses.
5 Dump headers and bodies of all requests and responses.
======= ===================================================================
*debug_methods*
Boost verbosity to 3 while processing certain request methods. This option
is ignored, when ``verbose < 2``.
Configured like::
debug_methods = ["PROPPATCH", "PROPFIND", "GET", "HEAD", "DELET E",
"PUT", "COPY", "MOVE", "LOCK", "UNLOCK",
]
*debug_litmus*
Boost verbosity to 3 while processing litmus tests that contain certain
substrings. This option is ignored, when ``verbose < 2``.
Configured like::
debug_litmus = ["notowner_modify", "props: 16", ]
"""
import sys
import threading
from wsgidav import compat, util
from wsgidav.middleware import BaseMiddleware
from wsgidav.util import safe_re_encode
__docformat__ = "reStructuredText"
_logger = util.get_module_logger(__name__)
class WsgiDavDebugFilter(BaseMiddleware):
def __init__(self, wsgidav_app, next_app, config):
super(WsgiDavDebugFilter, self).__init__(wsgidav_app, next_app, config)
self._config = config
# self.out = sys.stdout
self.passedLitmus = {}
# These methods boost verbose=2 to verbose=3
self.debug_methods = config.get("debug_methods", [])
# Litmus tests containing these string boost verbose=2 to verbose=3
self.debug_litmus = config.get("debug_litmus", [])
# Exit server, as soon as this litmus test has finished
self.break_after_litmus = [
# "locks: 15",
]
def __call__(self, environ, start_response):
""""""
# srvcfg = environ["wsgidav.config"]
verbose = self._config.get("verbose", 3)
method = environ["REQUEST_METHOD"]
debugBreak = False
dumpRequest = False
dumpResponse = False
if verbose >= 5:
dumpRequest = dumpResponse = True
# Process URL commands
if "dump_storage" in environ.get("QUERY_STRING", ""):
dav = environ.get("wsgidav.provider")
if dav.lockManager:
dav.lockManager._dump()
if dav.propManager:
dav.propManager._dump()
# Turn on max. debugging for selected litmus tests
litmusTag = environ.get("HTTP_X_LITMUS", environ.get("HTTP_X_LITMUS_SECOND"))
if litmusTag and verbose >= 3:
_logger.info("----\nRunning litmus test '{}'...".format(litmusTag))
for litmusSubstring in self.debug_litmus:
if litmusSubstring in litmusTag:
verbose = 5
debugBreak = True
dumpRequest = True
dumpResponse = True
break
for litmusSubstring in self.break_after_litmus:
if (
litmusSubstring in self.passedLitmus
and litmusSubstring not in litmusTag
):
_logger.info(" *** break after litmus {}".format(litmusTag))
sys.exit(-1)
if litmusSubstring in litmusTag:
self.passedLitmus[litmusSubstring] = True
# Turn on max. debugging for selected request methods
if verbose >= 3 and method in self.debug_methods:
verbose = 5
debugBreak = True
dumpRequest = True
dumpResponse = True
# Set debug options to environment
environ["wsgidav.verbose"] = verbose
# environ["wsgidav.debug_methods"] = self.debug_methods
environ["wsgidav.debug_break"] = debugBreak
environ["wsgidav.dump_request_body"] = dumpRequest
environ["wsgidav.dump_response_body"] = dumpResponse
# Dump request headers
if dumpRequest:
_logger.info("{} Request ---".format(method))
# _logger.info("<{}> --- {} Request ---".format(
# threading.currentThread().ident, method))
for k, v in environ.items():
if k == k.upper():
_logger.info("{:<20}: '{}'".format(k, safe_re_encode(v, "utf8")))
_logger.info("\n")
# Intercept start_response
#
sub_app_start_response = util.SubAppStartResponse()
nbytes = 0
first_yield = True
app_iter = self.next_app(environ, sub_app_start_response)
for v in app_iter:
# Start response (the first time)
if first_yield:
# Success!
start_response(
sub_app_start_response.status,
sub_app_start_response.response_headers,
sub_app_start_response.exc_info,
)
# Dump response headers
if first_yield and dumpResponse:
_logger.info(
"<{}> ---{} Response({}): ---".format(
threading.currentThread().ident,
method,
sub_app_start_response.status,
)
)
headersdict = dict(sub_app_start_response.response_headers)
for envitem in headersdict.keys():
_logger.info("{}: {}".format(envitem, repr(headersdict[envitem])))
_logger.info("")
# Check, if response is a binary string, otherwise we probably have
# calculated a wrong content-length
assert compat.is_bytes(v), v
# Dump response body
drb = environ.get("wsgidav.dump_response_body")
if compat.is_basestring(drb):
# Middleware provided a formatted body representation
_logger.info(drb)
drb = environ["wsgidav.dump_response_body"] = None
elif drb is True:
# Else dump what we get, (except for long GET responses)
if method == "GET":
if first_yield:
_logger.info("{}...".format(v[:50]))
elif len(v) > 0:
_logger.info(v)
nbytes += len(v)
first_yield = False
yield v
if hasattr(app_iter, "close"):
app_iter.close()
# Start response (if it hasn't been done yet)
if first_yield:
# Success!
start_response(
sub_app_start_response.status,
sub_app_start_response.response_headers,
sub_app_start_response.exc_info,
)
if dumpResponse:
_logger.info(
"<{}> --- End of {} Response ({:d} bytes) ---".format(
threading.currentThread().ident, method, nbytes
)
)
return
|
the-stack_0_10631 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and methods related to model_fn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
from tensorflow.python.estimator.export.export_output import ExportOutput
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.training import monitored_session
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import nest
class ModeKeys(object):
"""Standard names for model modes.
The following standard keys are defined:
* `TRAIN`: training mode.
* `EVAL`: evaluation mode.
* `PREDICT`: inference mode.
"""
TRAIN = 'train'
EVAL = 'eval'
PREDICT = 'infer'
LOSS_METRIC_KEY = 'loss'
AVERAGE_LOSS_METRIC_KEY = 'average_loss'
class EstimatorSpec(
collections.namedtuple('EstimatorSpec', [
'predictions', 'loss', 'train_op', 'eval_metric_ops',
'export_outputs', 'training_chief_hooks', 'training_hooks',
'scaffold', 'evaluation_hooks'
])):
"""Ops and objects returned from a `model_fn` and passed to an `Estimator`.
`EstimatorSpec` fully defines the model to be run by an `Estimator`.
"""
def __new__(cls,
mode,
predictions=None,
loss=None,
train_op=None,
eval_metric_ops=None,
export_outputs=None,
training_chief_hooks=None,
training_hooks=None,
scaffold=None,
evaluation_hooks=None):
"""Creates a validated `EstimatorSpec` instance.
Depending on the value of `mode`, different arguments are required. Namely
* For `mode == ModeKeys.TRAIN`: required fields are `loss` and `train_op`.
* For `mode == ModeKeys.EVAL`: required field is `loss`.
* For `mode == ModeKeys.PREDICT`: required fields are `predictions`.
model_fn can populate all arguments independent of mode. In this case, some
arguments will be ignored by an `Estimator`. E.g. `train_op` will be
ignored in eval and infer modes. Example:
```python
def my_model_fn(mode, features, labels):
predictions = ...
loss = ...
train_op = ...
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Alternatively, model_fn can just populate the arguments appropriate to the
given mode. Example:
```python
def my_model_fn(mode, features, labels):
if (mode == tf.estimator.ModeKeys.TRAIN or
mode == tf.estimator.ModeKeys.EVAL):
loss = ...
else:
loss = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = ...
else:
train_op = None
if mode == tf.estimator.ModeKeys.PREDICT:
predictions = ...
else:
predictions = None
return tf.estimator.EstimatorSpec(
mode=mode,
predictions=predictions,
loss=loss,
train_op=train_op)
```
Args:
mode: A `ModeKeys`. Specifies if this is training, evaluation or
prediction.
predictions: Predictions `Tensor` or dict of `Tensor`.
loss: Training loss `Tensor`. Must be either scalar, or with shape `[1]`.
train_op: Op for the training step.
eval_metric_ops: Dict of metric results keyed by name. The values of the
dict are the results of calling a metric function, namely a
`(metric_tensor, update_op)` tuple.
export_outputs: Describes the output signatures to be exported to
`SavedModel` and used during serving.
A dict `{name: output}` where:
* name: An arbitrary name for this output.
* output: an `ExportOutput` object such as `ClassificationOutput`,
`RegressionOutput`, or `PredictOutput`.
Single-headed models only need to specify one entry in this dictionary.
Multi-headed models should specify one entry for each head, one of
which must be named using
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.
training_chief_hooks: Iterable of `tf.train.SessionRunHook` objects to
run on the chief worker during training.
training_hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers during training.
scaffold: A `tf.train.Scaffold` object that can be used to set
initialization, saver, and more to be used in training.
evaluation_hooks: Iterable of `tf.train.SessionRunHook` objects to
run during evaluation.
Returns:
A validated `EstimatorSpec` object.
Raises:
ValueError: If validation fails.
TypeError: If any of the arguments is not the expected type.
"""
# Validate train_op.
if train_op is None:
if mode == ModeKeys.TRAIN:
raise ValueError('Missing train_op.')
else:
_check_is_tensor_or_operation(train_op, 'train_op')
# Validate loss.
if loss is None:
if mode in (ModeKeys.TRAIN, ModeKeys.EVAL):
raise ValueError('Missing loss.')
else:
loss = _check_is_tensor(loss, 'loss')
loss_shape = loss.get_shape()
if loss_shape.num_elements() not in (None, 1):
raise ValueError('Loss must be scalar, given: {}'.format(loss))
if not loss_shape.is_compatible_with(tensor_shape.scalar()):
loss = array_ops.reshape(loss, [])
# Validate predictions.
if predictions is None:
if mode == ModeKeys.PREDICT:
raise ValueError('Missing predictions.')
predictions = {}
else:
if isinstance(predictions, dict):
predictions = {
k: _check_is_tensor(v, 'predictions[{}]'.format(k))
for k, v in six.iteritems(predictions)
}
else:
predictions = _check_is_tensor(predictions, 'predictions')
# Validate eval_metric_ops.
if eval_metric_ops is None:
eval_metric_ops = {}
else:
if not isinstance(eval_metric_ops, dict):
raise TypeError(
'eval_metric_ops must be a dict, given: {}'.format(eval_metric_ops))
for key, metric_value_and_update in six.iteritems(eval_metric_ops):
if (not isinstance(metric_value_and_update, tuple) or
len(metric_value_and_update) != 2):
raise TypeError(
'Values of eval_metric_ops must be (metric_value, update_op) '
'tuples, given: {} for key: {}'.format(
metric_value_and_update, key))
metric_value, metric_update = metric_value_and_update
for metric_value_member in nest.flatten(metric_value):
# Allow (possibly nested) tuples for metric values, but require that
# each of them be Tensors or Operations.
_check_is_tensor_or_operation(metric_value_member,
'eval_metric_ops[{}]'.format(key))
_check_is_tensor_or_operation(metric_update,
'eval_metric_ops[{}]'.format(key))
# Validate export_outputs.
if export_outputs is not None:
if not isinstance(export_outputs, dict):
raise TypeError('export_outputs must be dict, given: {}'.format(
export_outputs))
for v in six.itervalues(export_outputs):
if not isinstance(v, ExportOutput):
raise TypeError(
'Values in export_outputs must be ExportOutput objects. '
'Given: {}'.format(export_outputs))
# Note export_outputs is allowed to be empty.
if len(export_outputs) == 1:
(key, value), = export_outputs.items()
if key != signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
export_outputs[
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = value
if len(export_outputs) > 1:
if (signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY
not in export_outputs):
raise ValueError(
'Multiple export_outputs were provided, but none of them is '
'specified as the default. Do this by naming one of them with '
'signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY.')
# Validate that all tensors and ops are from the default graph.
default_graph = ops.get_default_graph()
# We enumerate possible error causes here to aid in debugging.
error_message_template = (
'{0} with "{1}" must be from the default graph. '
'Possible causes of this error include: \n\n'
'1) {0} was created outside the context of the default graph.'
'\n\n'
'2) The object passed through to EstimatorSpec was not created '
'in the most recent call to "model_fn".')
if isinstance(predictions, dict):
for key, value in six.iteritems(predictions):
if value.graph is not default_graph:
raise ValueError(error_message_template.format(
'prediction values',
'{0}: {1}'.format(key, value.name)))
elif predictions is not None:
# 'predictions' must be a single Tensor.
if predictions.graph is not default_graph:
raise ValueError(error_message_template.format(
'prediction values', predictions.name))
if loss is not None and loss.graph is not default_graph:
raise ValueError(error_message_template.format('loss', loss.name))
if train_op is not None and train_op.graph is not default_graph:
raise ValueError(error_message_template.format('train_op', train_op.name))
for key, value in list(six.iteritems(eval_metric_ops)):
values = nest.flatten(value)
for value in values:
if value.graph is not default_graph:
raise ValueError(error_message_template.format(
'eval_metric_ops',
'{0}: {1}'.format(key, value.name)))
# Validate hooks.
training_chief_hooks = tuple(training_chief_hooks or [])
training_hooks = tuple(training_hooks or [])
evaluation_hooks = tuple(evaluation_hooks or [])
for hook in training_hooks + training_chief_hooks + evaluation_hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be SessionRunHook instances, given: {}'.format(
hook))
scaffold = scaffold or monitored_session.Scaffold()
# Validate scaffold.
if not isinstance(scaffold, monitored_session.Scaffold):
raise TypeError(
'scaffold must be tf.train.Scaffold. Given: {}'.format(scaffold))
return super(EstimatorSpec, cls).__new__(
cls,
predictions=predictions,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops,
export_outputs=export_outputs,
training_chief_hooks=training_chief_hooks,
training_hooks=training_hooks,
scaffold=scaffold,
evaluation_hooks=evaluation_hooks)
def _check_is_tensor_or_operation(x, name):
if not (isinstance(x, ops.Operation) or isinstance(x, ops.Tensor)):
raise TypeError('{} must be Operation or Tensor, given: {}'.format(name, x))
def _check_is_tensor(x, tensor_name):
"""Returns `x` if it is a `Tensor`, raises TypeError otherwise."""
if not isinstance(x, ops.Tensor):
raise TypeError('{} must be Tensor, given: {}'.format(tensor_name, x))
return x
|
the-stack_0_10632 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
def delete(start,root,sec_node,k):
if(root==None):
return start.next
if(k==0 and sec_node==None):
sec_node=root
return delete(start.next,root,sec_node.next,k)
if(k==0 and sec_node.next==None):
start.next=start.next.next
return
elif(k!=0):
return delete(start,root.next,sec_node,k-1)
return delete(start.next,root,sec_node.next,k)
class Solution:
# @param A : head node of linked list
# @param B : integer
# @return the head node in the linked list
def removeNthFromEnd(self, root, k):
if(root==None or (root.next==None and k==1)):
return None
# ans=delete(root,root,None,k)
# return root if ans==None else ans
sec_node=root
top=root
while(sec_node.next!=None):
if(k!=0):
sec_node=sec_node.next
k-=1
else:
sec_node=sec_node.next
top=top.next
if(k!=0):
return root.next
top.next=top.next.next
return root
"""
Remove Nth Node from List End
Problem Description
Given a linked list A, remove the B-th node from the end of list and return its head.
For example, Given linked list: 1->2->3->4->5, and B = 2. After removing the second node from the end, the linked list becomes 1->2->3->5.
NOTE: If B is greater than the size of the list, remove the first node of the list.
NOTE: Try doing it using constant additional space.
Problem Constraints
1 <= |A| <= 106
Input Format
The first argument of input contains a pointer to the head of the linked list.
The second argument of input contains the integer B.
Output Format
Return the head of the linked list after deleting the B-th element from the end.
Example Input
Input 1:
A = [1, 2, 3, 4, 5]
B = 2
Input 2:
A = [1]
B = 1
Example Output
Output 1:
[1, 2, 3, 5]
Output 2:
[]
Example Explanation
Explanation 1:
In the first example, 4 is the second last element.
Explanation 2:
In the second example, 1 is the first and the last element.
""" |
the-stack_0_10633 | import numpy as np
import random
import milk.supervised.svm
import milk.supervised.multi
from milk.supervised.classifier import ctransforms
from .fast_classifier import fast_classifier
import milksets.wine
features,labels = milksets.wine.load()
A = np.arange(len(features))
random.seed(9876543210)
random.shuffle(A)
features = features[A]
labels = labels[A]
labelset = set(labels)
base = ctransforms(milk.supervised.svm.svm_raw(C=2.,kernel=milk.supervised.svm.rbf_kernel(2.**-3)),milk.supervised.svm.svm_binary())
def test_one_against_rest():
M = milk.supervised.multi.one_against_rest(base)
M = M.train(features[:100,:],labels[:100])
tlabels = [M.apply(f) for f in features[100:]]
for tl in tlabels:
assert tl in labelset
def test_one_against_one():
M = milk.supervised.multi.one_against_one(base)
M = M.train(features[:100,:],labels[:100])
tlabels = [M.apply(f) for f in features[100:]]
for tl in tlabels:
assert tl in labelset
tlabels_many = M.apply_many(features[100:])
assert np.all(tlabels == tlabels_many)
def test_two_thirds():
np.random.seed(2345)
C = milk.supervised.defaultclassifier('fast')
X = np.random.rand(120,4)
X[:40] += np.random.rand(40,4)
X[:40] += np.random.rand(40,4)
X[40:80] -= np.random.rand(40,4)
X[40:80] -= np.random.rand(40,4)
Y = np.repeat(np.arange(3), 40)
model = C.train(X,Y)
Y_ = np.array([model.apply(x) for x in X])
assert (Y_ == Y).mean() * 3 > 2
def test_multi_labels():
clabels = [[lab, lab+7] for lab in labels]
multi_label = milk.supervised.multi.one_against_rest_multi(base)
model = multi_label.train(features[::2], clabels[::2])
test_vals = [model.apply(f) for f in features[1::2]]
for ts in test_vals:
if 0.0 in ts: assert 7.0 in ts
if 1.0 in ts: assert 8.0 in ts
if 2.0 in ts: assert 9.0 in ts
def test_classifier_no_set_options():
# Basically these should not raise an exception
milk.supervised.multi.one_against_rest_multi(fast_classifier())
milk.supervised.multi.one_against_rest(fast_classifier())
milk.supervised.multi.one_against_one(fast_classifier())
def test_tree():
mtree = milk.supervised.multi.multi_tree_learner(fast_classifier())
labels = [0,1,2,2,3,3,3,3]
features = np.random.random_sample((len(labels), 8))
model = mtree.train(features, labels)
counts = np.zeros(4)
for ell in labels:
counts[ell] += 1
g0,g1 = milk.supervised.multi.split(counts)
assert np.all(g0 == [3]) or np.all(g1 == [3])
def list_to_zero(v):
if isinstance(v, list):
return 1000
return v
def r(m):
if len(m) == 1: return int(m[0])
else: return sorted([r(m[1]), r(m[2])], key=list_to_zero)
assert r(model.model) == [3,[2,[0,1]]]
|
the-stack_0_10634 | #!/usr/bin/env python
import math
import os
import sys
from PIL import Image
from escpos.printer import Serial
STRIP_WIDTH = 8
MAX_WIDTH = 540
if len(sys.argv) != 2:
print("\033[1;31;40musage: {} imagefile.png\033[0m".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
image = Image.open(sys.argv[1])
print("Loaded image: {}".format(sys.argv[1]))
print("Size: {}".format(image.size))
# Resize picture if too wide
(img_w, img_h) = image.size
if img_w > MAX_WIDTH:
img_h = int(MAX_WIDTH * img_h / float(img_w))
img_w = MAX_WIDTH
image = image.resize((img_w, img_h), Image.ANTIALIAS)
print("Too large, resizing to: {}".format((img_w, img_h)))
image = image.convert('L')
num_strips = math.ceil(img_h / STRIP_WIDTH)
print("Total Strips: {}".format(num_strips))
print("Strip size: {}".format((img_w, STRIP_WIDTH)))
strips = [None] * num_strips
for i in range(num_strips):
area = (0, STRIP_WIDTH * i, img_w, STRIP_WIDTH * (i + 1))
strips[i] = image.crop(area)
if img_h % STRIP_WIDTH != 0:
strips[-1] = strips[-1].crop((0, 0, img_w, img_h % STRIP_WIDTH))
# Dump strips into a temporary directory
if not os.path.exists('.temp'):
os.mkdir('.temp')
for i in range(num_strips):
strips[i].save(os.path.join('.temp', "strip{0:03}.png".format(i)))
# Do the printing
p = Serial(devfile='COM5', baudrate=9600, parity='N', stopbits=1, timeout=1.00, dsrdtr=True)
p.text("\033@") # Reset
p.text("\033C\20") # Set sheet eject length
p.text("\0331") # Select 1/8-inch line spacing
p.text("\033$\000\000") # Set left margin
p.text("\033a\001") # Center align
for i in range(num_strips):
p.image(os.path.join('.temp', "strip{0:03}.png".format(i)))
p.text("\033a\000") # Left align
#p.cut()
|
the-stack_0_10635 | import json
from flask import render_template, url_for, redirect, request, send_from_directory, g, flash
from flask_login import current_user, login_user, logout_user, login_required
from flask_babel import _, get_locale
from flask_babel import lazy_gettext as _l
from wtforms import RadioField, TextAreaField
from wtforms.validators import DataRequired, Length
from app import app, db, moment
from app.models import Class, User, Group, Test, Result, TestResume, LogRequest, LogClick
from app.forms import EmptyForm, LoginForm, RegisterForm, AddGroupForm, UpdateGroupForm, AddTestForm, UpdateTestForm, UpdateProfileForm
from app.spec_checks import check_test_9
from datetime import datetime
# ------------------------ main pages ------------------------ #
@app.route('/')
@app.route('/index')
def index():
groups = Group.query.all()
return render_template( "index.html", title = _("All tests"), menu = _("Test by groups"), groups = groups )
@app.route('/group/<int:id>')
def group(id):
group = Group.query.get(id)
link = url_for( 'index' )
path = f"<a href='{link}'>{_('All tests')}</a> / {group.title}"
return render_template( "group.html", title = f"{group.title}", path = path, menu = "Тесты в группе", group = group )
@app.route('/test/<int:id>')
def test(id):
test = Test.query.get(id)
group = Group.query.get(test.id_group)
link0 = url_for( 'index' )
link1 = url_for( 'group', id = group.id )
path = f"<a href={link0}>{_('All tests')}</a> / <a href={link1}>{group.title}</a> / {test.name}"
return render_template( "test-base.html", title = test.name + " / " + _("Info"), path = path, test = test )
@app.route('/testing/<int:id>', methods = [ 'GET', 'POST' ])
def testing(id):
test = Test.query.get(id)
group = Group.query.get(test.id_group)
link0 = url_for( 'index' )
link1 = url_for( 'group', id = group.id )
path = f"<a href={link0}>{_('All tests')}</a> / <a href={link1}>{group.title}</a> / {test.name}"
class TestingForm(EmptyForm):
pass
for question in test.questions:
setattr( TestingForm,
str(question.id),
RadioField( question.text, choices = [ ( a.id, a.text ) for a in question.answers ] ,
validators = [ DataRequired() ] ) )
form = TestingForm()
if form.validate_on_submit():
arr = form.data
score = -1
mark = 0
quests = test.questions.count()
percent = -1
if current_user.is_authenticated:
id_user = current_user.id
else:
id_user = None
if id != 9:
# Checks usual tests
score = 0
for question in test.questions:
if arr[ str(question.id) ] == str( question.true_answer() ):
score += 1
percent = round( ( score / quests ) * 100, 1 )
if percent >= 90:
mark = 5
elif 75 < percent < 90:
mark = 4
elif 50 < percent <= 75:
mark = 3
elif percent <= 50:
mark = 2
elif id == 9:
# Check test 9
mark = check_test_9( arr )
print( mark )
result = Result( id_test = test.id, id_user = id_user, mark = mark, score = score, quests = quests, percent = percent )
db.session.add( result )
db.session.commit()
last_insert_id = result.id
return redirect( url_for( "result", id = last_insert_id ) )
return render_template( "test.html", title = test.name + " / " + _("Testing"), path = path, form = form, test = test )
@app.route('/result/<int:id>')
def result(id):
result = Result.query.get( id )
test = Test.query.get( result.id_test )
group = Group.query.get( test.id_group )
if result.id_user is None:
user = "None"
else:
user = User.query.get( result.id_user )
link0 = url_for( 'index' )
link1 = url_for( 'group', id = group.id )
path = f"<a href={link0}>{_('All tests')}</a> / <a href={link1}>{group.title}</a> / {test.name}"
return render_template( "test-result.html", title = test.name + " / " + _("Result"), path = path, result = result,
test = test, user = user )
@app.route('/edit_profile', methods = [ 'GET', 'POST' ])
# @login_required
def profile():
form = UpdateProfileForm(current_user.username)
classes = Class.query.all()
classes_list = [(c.id, c.abbr) for c in classes]
form.id_class.choices = classes_list
if form.validate_on_submit():
current_user.username = form.username.data
current_user.name = form.name.data
current_user.lastname = form.lastname.data
current_user.description = form.description.data
current_user.id_class = form.id_class.data
current_user.role = form.role.data
current_user.sex = form.sex.data
db.session.commit()
return redirect( url_for( 'profile' ) )
elif request.method == 'GET':
form.username.data = current_user.username
form.name.data = current_user.name
form.lastname.data = current_user.lastname
form.description.data = current_user.description
form.id_class.data = current_user.id_class
form.role.data = current_user.role
form.sex.data = current_user.sex
return render_template( "forms/profile.html", title = _( 'Profile' ), form = form )
# ------------------------ login system ------------------------ #
@app.route('/login', methods = [ 'GET', 'POST' ])
def login():
if current_user.is_authenticated:
return redirect( url_for( "index" ) )
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by( username = form.username.data ).first()
if user is None or not user.check_password( form.password.data ):
return redirect( url_for( "login" ) )
login_user( user, remember = form.remember_me.data )
return redirect( url_for( "index" ) )
return render_template( "login.html", title = _("Sign in"), form = form )
@app.route('/register', methods = [ 'GET', 'POST' ])
def register():
if current_user.is_authenticated:
return redirect( url_for("index") )
classes = Class.query.all()
classes_list = [ ( c.id, c.abbr ) for c in classes ]
form = RegisterForm()
form.id_class.choices = classes_list
if form.validate_on_submit():
user = User( username = form.username.data, name = form.name.data, lastname = form.lastname.data,
email = form.email.data, id_class = form.id_class.data, role = form.role.data )
user.set_password( password = form.password.data )
db.session.add( user )
db.session.commit()
return redirect( url_for( "login" ) )
return render_template( "register.html", title = _( "Register" ), form = form )
@app.route('/logout')
def logout():
logout_user()
return redirect( url_for( "index" ) )
# ------------------------ forms pages ------------------------ #
@app.route('/add_group', methods = [ 'GET', 'POST' ])
@login_required
def add_group():
form = AddGroupForm()
if form.validate_on_submit():
group = Group( title = form.title.data, description = form.description.data )
db.session.add( group )
db.session.commit()
last_insert_id = group.id
return redirect( url_for( "group", id = last_insert_id ) )
return render_template( "forms/group-add.html", title = _( "Add group" ), form = form )
@app.route('/update_group/<int:id>', methods = [ 'GET', 'POST' ])
def update_group(id):
form = UpdateGroupForm()
group = Group.query.get( id )
if form.validate_on_submit():
group.title = form.title.data
group.description = form.description.data
db.session.commit()
return redirect( url_for( "group", id = id ) )
elif request.method == 'GET':
form.title.data = group.title
form.description.data = group.description
return render_template( "forms/group-update.html", title = _('Change of group'), form = form )
@app.route('/add_test', methods = [ 'GET', 'POST' ])
@login_required
def add_test():
groups = Group.query.all()
groups_list = [ ( g.id, g.title ) for g in groups ]
form = AddTestForm()
form.id_group.choices = groups_list
if form.validate_on_submit():
test = Test( id_group = form.id_group.data, name = form.name.data, annotation = form.annotation.data,
description = form.description.data )
db.session.add( test )
db.session.commit()
last_insert_id = test.id
return redirect( url_for( "test", id = last_insert_id ) )
return render_template( "forms/test-add.html", title = _( "Add test" ), form = form )
@app.route('/update_test/<int:id>', methods = ['GET', 'POST'])
@login_required
def update_test(id):
groups = Group.query.all()
groups_list = [(g.id, g.title) for g in groups]
test = Test.query.get( id )
class UpdateSpecTestForm(UpdateTestForm):
pass
min_key = 0
max_key = 0
name_key = ""
if test.is_usual():
min_key = 2
max_key = 6
name_key = _l( "Test resume for mark " )
else:
if id == 9:
min_key = -1
max_key = 11
name_key = _l( "Test resume for key " )
for i in range( min_key, max_key ):
setattr( UpdateSpecTestForm, f'test_resume_{i}',
TextAreaField( "{}'{}'".format( name_key, i ), validators = [DataRequired(), Length( min = 32, max = 512 )] ) )
form = UpdateSpecTestForm()
form.id_group.choices = groups_list
if form.validate_on_submit():
test.id_group = form.id_group.data
test.name = form.name.data
test.difficult = form.difficult.data
test.annotation = form.annotation.data
test.description = form.description.data
for i in range(min_key, max_key):
test.set_description_mark( i, form[ f'test_resume_{i}' ].data )
db.session.commit()
return redirect( url_for( "test", id = id ) )
elif request.method == 'GET':
form.id_group.data = test.id_group
form.name.data = test.name
form.difficult.data = test.difficult
form.annotation.data = test.annotation
form.description.data = test.description
for i in range( min_key, max_key ):
form[ f'test_resume_{i}' ].data = test.get_description_mark(i)
return render_template( "forms/test-update.html", title = _('Change of test'), form = form,
min_key = min_key, max_key = max_key )
# ------------------------ admin pages ------------------------ #
@app.route('/admin/tables')
def admin_tables():
user = User
class_ = Class
group = Group
test = Test
result = Result
return render_template( "admin/tables.html", title = _('Admin-panel') + ' / ' + _('Tables'),
user = user, group = group, test = test, result = result, class_ = class_ )
@app.route('/admin/table/users')
def admin_table_classes():
classes = Class.query.all()
title = f"{_( 'Admin-panel' )} / {_( 'Tables' )} / {_( 'Classes' )}"
link0 = url_for( 'admin_tables' )
path = f"{_('Admin-panel')} / <a href='{link0}'>{_('Tables')}</a> / {_('Classes')}"
return render_template( "admin/table-classes.html", title = title, path = path, classes = classes, wide = True )
@app.route('/admin/table/users')
def admin_table_users():
users = User.query.all()
title = f"{_( 'Admin-panel' )} / {_( 'Tables' )} / {_( 'Users' )}"
link0 = url_for( 'admin_tables' )
path = f"{_('Admin-panel')} / <a href='{link0}'>{_('Tables')}</a> / {_('Users')}"
return render_template( "admin/table-users.html", title = title, path = path, users = users, wide = True )
@app.route('/admin/table/groups')
def admin_table_groups():
groups = Group.query.all()
title = f"{_( 'Admin-panel' )} / {_( 'Tables' )} / {_( 'Groups' )}"
link0 = url_for( 'admin_tables' )
path = f"{_( 'Admin-panel' )} / <a href='{link0}'>{_( 'Tables' )}</a> / {_( 'Groups' )}"
return render_template( "admin/table-groups.html", title = title, path = path, groups = groups, wide = True )
@app.route('/admin/table/tests')
def admin_table_tests():
tests = Test.query.all()
title = _( 'Admin-panel' ) + ' / ' + _( 'Tables' ) + ' / ' + _( 'Tests' )
link0 = url_for( 'admin_tables' )
path = f"{_( 'Admin-panel' )} / <a href='{link0}'>{_( 'Tables' )}</a> / {_( 'Tests' )}"
return render_template( "admin/table-tests.html", title = title, path = path, tests = tests, wide = True )
@app.route('/admin/table/results')
def admin_table_results():
results = Result.query.all()
title = _( 'Admin-panel' ) + ' / ' + _( 'Tables' ) + ' / ' + _( 'Results' )
link0 = url_for( 'admin_tables' )
path = f"{_( 'Admin-panel' )} / <a href='{link0}'>{_( 'Tables' )}</a> / {_( 'Results' )}"
return render_template( "admin/table-results.html", title = title, path = path, results = results, wide = True )
@app.route('/admin/statistic')
def admin_statistic():
clicks = LogClick.query
requests = LogRequest.query
return render_template( "admin/statistic.html", title = _('Admin-panel') + ' / ' + _('Statistic'),
clicks = clicks, requests = requests )
# ------------------------ API pages ------------------------ #
@app.route('/api')
def api():
return render_template("api.html", title = _('API methods list'))
# --- users ---
@app.route('/api/get_users_count')
def api_get_users_count():
count = User.query.count()
return str( count )
# --- groups ---
@app.route('/api/get_groups_count')
def api_get_groups_count():
count = Group.query.count()
return str( count )
@app.route('/api/get_groups_list')
def api_get_groups_list():
list = Group.query.all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'title': item.title } )
return json.dumps(arr)
# --- tests ---
@app.route('/api/get_tests_count')
def api_get_tests_count():
count = Test.query.count()
return str( count )
@app.route('/api/get_tests_list')
def api_get_tests_list():
list = Test.query.all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'id_group': item.id_group, 'name': item.name } )
return json.dumps( arr )
@app.route('/api/get_tests_count_by_group/<int:id>')
def api_get_tests_count_by_group(id):
if Group.query.get( id ):
count = Test.query.filter( Test.id_group == id ).count()
else:
count = 'null'
return str( count )
@app.route('/api/get_tests_list_by_group/<int:id>')
def api_get_tests_list_by_group(id):
if Group.query.get( id ):
list = Test.query.filter( Test.id_group == id ).all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'id_group': item.id_group, 'name': item.name } )
return json.dumps( arr )
else:
response = 'null'
return str( response )
# --- results ---
@app.route('/api/get_results_count')
def api_get_results_count():
count = Result.query.count()
return str( count )
@app.route('/api/get_results_list')
def api_get_results_list():
list = Result.query.all()
arr = []
for item in list:
arr.append( { 'id': item.id, 'id_test': item.id_test, 'id_user': item.id_user, 'mark': item.mark } )
return json.dumps( arr )
@app.route('/api/get_results_count_by_test/<int:id>')
def api_get_results_count_by_test(id):
if Test.query.get( id ):
count = Result.query.filter( Result.id_test == id ).count()
else:
count = 'null'
return str( count )
# ------------------------ system pages ------------------------ #
@app.route('/about_system')
def about_system():
return render_template( "about-system.html", title = _('About TeSi') )
@app.route('/about_us')
def about_us():
return render_template( "about-us.html", title = _('About us') )
# ------------------------ technical pages ------------------------ #
@app.route('/null')
def null():
return "null"
@app.route('/favicon.ico')
@app.route('/robots.txt')
@app.route('/sitemap.xml')
def static_from_root():
return send_from_directory(app.static_folder, request.path[1:])
@app.errorhandler(404)
def error_404(e):
path = _('Errors') + " / 400 / " + _('Error 404')
return render_template( "errors/404.html", title = _( 'Error 404' ), path = path ), 404
@app.errorhandler(405)
def error_405(e):
path = _('Errors') + " / 400 / " + _('Error 405')
return render_template( "errors/405.html", title = _( 'Error 405' ), path = path ), 405
@app.errorhandler(500)
def error_500(e):
path = _('Errors') + " / 500 / " + _('Error 500')
return render_template( "errors/500.html", title = _( 'Error 500' ), path = path ), 500
@app.before_request
def before_request():
if current_user.is_authenticated:
current_user.datetime_last = datetime.utcnow()
db.session.commit()
g.locale = str( get_locale() )
g.theme = 'dark'
|
the-stack_0_10636 | from dagster import Field, RepositoryDefinition, Shape, composite_solid, pipeline, seven, solid
@solid(
config={
'cluster_cfg': Shape(
{
'num_mappers': Field(int),
'num_reducers': Field(int),
'master_heap_size_mb': Field(int),
'worker_heap_size_mb': Field(int),
}
),
'name': Field(str),
}
)
def hello(context):
context.log.info(seven.json.dumps(context.solid_config['cluster_cfg']))
return 'Hello, %s!' % context.solid_config['name']
def config_mapping_fn(cfg):
return {
'hello': {
'config': {
'cluster_cfg': {
'num_mappers': 100,
'num_reducers': 20,
'master_heap_size_mb': 1024,
'worker_heap_size_mb': 8192,
},
'name': cfg['name'],
}
}
}
@composite_solid(
config_fn=config_mapping_fn,
config={'name': Field(str, is_required=False, default_value='Sam')},
)
def hello_external():
return hello()
@pipeline
def my_pipeline():
hello_external()
def define_repository():
return RepositoryDefinition('config_mapping', pipeline_defs=[my_pipeline])
|
the-stack_0_10638 | import glob
import importlib
import itertools
import json
import logging
from io import StringIO
from os import path
from pprint import pprint
import click
import conllu
import mlflow
import pandas as pd
import spacy
from gensim.models.keyedvectors import KeyedVectors
from lemmy import Lemmatizer
from sklearn.model_selection import train_test_split
from spacy.gold import GoldParse
from spacy.scorer import Scorer
from tqdm import tqdm
import conll17_ud_eval
from model_builder.eval import lemmy_accuracy
from model_builder.io import (
parse_szk_morph,
parse_szk_dep,
sentence_repr,
read_conllu_data_for_lemmy,
RESOURCES_ROOT,
format_as_conllu,
)
from model_builder.ner import SpacyNerTrainer, DataIterator, sentence_to_str
logging.basicConfig(level=logging.INFO)
@click.group()
def cli():
pass
@cli.command()
@click.argument("from_path")
@click.argument("to_path")
def convert_vectors_to_txt(from_path, to_path):
model = KeyedVectors.load_word2vec_format(
from_path, binary=True, unicode_errors="replace"
)
model.save_word2vec_format(to_path, binary=False)
@cli.command()
@click.argument("vectors_path")
def eval_vectors(vectors_path):
model = KeyedVectors.load_word2vec_format(
vectors_path, binary=False, unicode_errors="replace"
)
analogies_result = model.wv.evaluate_word_analogies(
path.join(RESOURCES_ROOT, "questions-words-hu.txt"),
dummy4unknown=True,
restrict_vocab=None,
case_insensitive=False,
)
pprint(analogies_result[0])
@cli.command()
@click.argument("model_name")
def smoke_test(model_name):
nlp = spacy.load(model_name)
doc = nlp(
"Csiribiri csiribiri zabszalma - négy csillag közt alszom ma. "
"Csiribiri csiribiri bojtorján lélek lép a lajtorján."
)
print(nlp)
print(doc, type(doc))
pprint(
[
dict(
text=t.text,
lemma=t.lemma_,
pos=t.pos_,
tag=t.tag_,
dep=t.dep_,
head=t.head,
is_stop=t.is_stop,
has_vector=t.has_vector,
brown_cluser=t.cluster,
prob=t.prob,
)
for t in doc
]
)
@cli.command()
@click.argument("input_file")
@click.argument("output_file")
def normalize_ud_corpus(input_file, output_file):
with open(input_file) as f, open(output_file, "w") as of:
for line in tqdm(f):
stripped_line = line.strip()
if len(stripped_line) == 0 or stripped_line[0] == "#":
of.write(line)
else:
parts = stripped_line.split("\t")
dep_label = parts[7]
dep_label = dep_label.split(":")[0]
parts[7] = dep_label
of.write("\t".join(parts) + "\n")
@cli.command()
@click.argument("from_glob")
@click.argument("to_path")
@click.argument("dev_path")
@click.argument("test_path")
@click.option("--morph/--dep", default=False)
def convert_szk_to_conllu(from_glob, to_path, dev_path, test_path, morph):
ignored = []
for fpath in [dev_path, test_path]:
with open(fpath) as f:
ignored.extend(map(sentence_repr, conllu.parse(f.read())))
parser = parse_szk_morph if morph else parse_szk_dep
ignored = set(ignored)
parsed = []
for fpath in glob.glob(from_glob):
for sent in conllu.parse("\n\n".join(parser(fpath))):
if sentence_repr(sent) not in ignored:
parsed.append(sent)
logging.info("Read {} sentences".format(len(parsed)))
with open(to_path, "w") as outf:
out = "".join(sent.serialize() for sent in parsed)
outf.write(out)
@cli.command()
@click.argument("train_path")
@click.argument("test_path")
@click.argument("model_path")
def train_lemmy(train_path, test_path, model_path):
X_train, y_train = read_conllu_data_for_lemmy(train_path)
X_test, y_test = read_conllu_data_for_lemmy(test_path)
lemmatizer = Lemmatizer()
lemmatizer.fit(X_train, y_train)
lemmy_accuracy(lemmatizer, X_test, y_test)
with open(model_path, "w") as f:
json.dump(lemmatizer.rules, f)
@cli.command()
@click.argument("model_name")
@click.argument("test_data_path")
@click.argument("ner_test_data")
def benchmark_model(model_name, test_data_path, ner_test_data):
with open(test_data_path) as f:
data = conllu.parse(f.read())
text = " ".join(d.metadata["text"] for d in data)
load_model = getattr(importlib.import_module(model_name), "load")
nlp = load_model()
_parsed = StringIO(format_as_conllu(nlp(text), 1))
parsed = conll17_ud_eval.load_conllu(_parsed)
gold = conll17_ud_eval.load_conllu_file(test_data_path)
results = pd.DataFrame(
{k: v.__dict__ for k, v in conll17_ud_eval.evaluate(gold, parsed).items()}
).T
print(results)
diterator = DataIterator()
test_sents = list(itertools.islice(diterator.tagged_sentences(ner_test_data), None))
scorer = Scorer()
for sentence, annot in test_sents:
doc_gold_text = nlp.make_doc(sentence)
gold = GoldParse(doc_gold_text, entities=annot)
predicted = nlp(sentence)
scorer.score(predicted, gold)
print(scorer.scores)
@cli.command()
@click.argument("model_name")
@click.argument("output_path")
@click.argument("train_data")
@click.argument("dev_data")
@click.argument("test_data")
@click.argument("dropout")
@click.argument("n_iter")
@click.argument("patience")
def train_ner(model_name, output_path, train_data, dev_data, test_data, dropout, n_iter, patience):
mlflow.set_tracking_uri("./mlruns")
mlflow.set_experiment("Spacy NER")
mlflow.start_run(run_name="Using all")
if model_name in ["None", "False", "", "blank"]:
model_name = None
trainer = SpacyNerTrainer(model_name, output_path)
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logging.info("Reading train data")
diterator = DataIterator()
train_sentences = list(tqdm(itertools.islice(diterator.tagged_sentences(train_data), None)))
logging.info("Got {} sentences with at least one entity".format(len(train_sentences)))
logging.info("Reading test data")
test_sentences = list(tqdm(diterator.tagged_sentences(test_data)))
logging.info("Got {} sentences with at least one entity".format(len(test_sentences)))
logging.info("Reading dev data")
dev_sentences = list(tqdm(diterator.tagged_sentences(dev_data)))
logging.info("Got {} sentences with at least one entity".format(len(dev_sentences)))
trainer(train_sentences, dev_sentences, test_sentences, int(n_iter), float(dropout), int(patience))
mlflow.end_run()
@cli.command()
@click.argument("szegedner_data")
@click.argument("train_data")
@click.argument("dev_data")
@click.argument("test_data")
def split_ner_data(szegedner_data, train_data, dev_data, test_data):
diterator = DataIterator()
logging.info("Reading gold data")
gold_sents = list(tqdm(itertools.islice(diterator.sentences_with_tags(szegedner_data), None)))
train_sents, all_test_sents = train_test_split(gold_sents, test_size=.2, random_state=42)
dev_sents, test_sents = train_test_split(all_test_sents, test_size=.5, random_state=42)
logging.info("Storing training data")
with open(train_data, "w") as f:
for i, s in tqdm(enumerate(train_sents)):
f.write(sentence_to_str(s))
f.write("\n")
f.write("\n")
logging.info("Storing test data")
with open(dev_data, "w") as f:
for i, s in tqdm(enumerate(dev_sents)):
f.write(sentence_to_str(s))
f.write("\n")
f.write("\n")
logging.info("Storing test data")
with open(test_data, "w") as f:
for i, s in tqdm(enumerate(test_sents)):
f.write(sentence_to_str(s))
f.write("\n")
f.write("\n")
if __name__ == "__main__":
cli()
|
the-stack_0_10639 | import psyco; psyco.full()
from fltk import *
import copy
import numpy as np
import sys
#if '../PyCommon/modules' not in sys.path:
# sys.path.append('../PyCommon/modules')
if './modules' not in sys.path:
sys.path.append('./modules')
import Math.mmMath as mm
import Resource.ysMotionLoader as yf
import Renderer.ysRenderer as yr
import Renderer.csVpRenderer as cvr
import Simulator.csVpWorld as cvw
import Simulator.csVpModel as cvm
import GUI.ysSimpleViewer as ysv
import Optimization.ysAnalyticConstrainedOpt as yac
import ArticulatedBody.ysJacobian as yjc
import Util.ysPythonEx as ype
import ArticulatedBody.ysReferencePoints as yrp
import ArticulatedBody.ysMomentum as ymt
import ArticulatedBody.ysControl as yct
import Motion.ysHierarchyEdit as yme
import Simulator.ysPhysConfig as ypc
import numpy.linalg as npl
import mtOptimize as mot
import mtInitialize_005 as mit
contactState = 0
g_applyForce = False
g_initFlag = 0
softConstPoint = [0, 0, 0]
forceShowFrame = 0
forceApplyFrame = 0
JsysPre = 0
JsupPreL = 0
JsupPreR = 0
JsupPre = 0
stage = 0
## Constant
STATIC_BALANCING = 0
MOTION_TRACKING = 1
DYNAMIC_BALANCING = 2
POWERFUL_BALANCING = 3
POWERFUL_MOTION_TRACKING = 4
FLYING = 5
def checkAll(list, value) :
for i in range(len(list)) :
if list[i] != value :
return 0
return 1
def getDesFootLinearAcc(refModel, controlModel, footIndex, ModelOffset, CM_ref, CM, Kk, Dk) :
desLinearAcc = [0,0,0]
refPos = refModel.getBodyPositionGlobal(footIndex)
curPos = controlModel.getBodyPositionGlobal(footIndex)
refVecL = refPos - CM_ref
if stage == MOTION_TRACKING:
refPos = CM + refVecL
#refPos[1] += 0.05
#refPos[0] -= 0.05
elif stage == POWERFUL_BALANCING:
refPos = copy.copy(curPos)
refPos[1] = 0
elif stage == DYNAMIC_BALANCING:
refPos = CM + refVecL
else:
refPos[0] += ModelOffset[0]
refVel = refModel.getBodyVelocityGlobal(footIndex)
curVel = controlModel.getBodyVelocityGlobal(footIndex)
#refAcc = (0,0,0)
refAcc = refModel.getBodyAccelerationGlobal(footIndex)
if stage != MOTION_TRACKING:
refPos[1] = 0.032
#refPos[1] = 0.0416
if refPos[1] < 0.0 :
refPos[1] = 0.032
#refPos[1] = 0.0416
desLinearAcc = yct.getDesiredAcceleration(refPos, curPos, refVel, curVel, refAcc, Kk, Dk)
return desLinearAcc, refPos
def getDesFootAngularAcc(refModel, controlModel, footIndex, Kk, Dk) :
desAngularAcc = [0,0,0]
curAng = [controlModel.getBodyOrientationGlobal(footIndex)]
refAngVel = refModel.getBodyAngVelocityGlobal(footIndex)
curAngVel = controlModel.getBodyAngVelocityGlobal(footIndex)
refAngAcc = (0,0,0)
curAngY = np.dot(curAng, np.array([0,1,0]))
refAngY = np.array([0,1,0])
if stage == MOTION_TRACKING+10:
refAng = [refModel.getBodyOrientationGlobal(footIndex)]
refAngY2 = np.dot(refAng, np.array([0,1,0]))
refAngY = refAngY2[0]
aL = mm.logSO3(mm.getSO3FromVectors(curAngY[0], refAngY))
desAngularAcc = Kk*aL + Dk*(refAngVel-curAngVel)
return desAngularAcc
def main():
np.set_printoptions(precision=4, linewidth=200)
# motion, mcfg, wcfg, stepsPerFrame, config = mit.create_vchain_5()
motion, mcfg, wcfg, stepsPerFrame, config = mit.create_biped()
vpWorld = cvw.VpWorld(wcfg)
motionModel = cvm.VpMotionModel(vpWorld, motion[0], mcfg)
motionModel.recordVelByFiniteDiff()
controlModel = cvm.VpControlModel(vpWorld, motion[0], mcfg)
vpWorld.initialize()
controlModel.initializeHybridDynamics()
#ModelOffset = (1.5, -0.01, 0)
ModelOffset = (1.5, 0.0, 0)
controlModel.translateByOffset(ModelOffset)
totalDOF = controlModel.getTotalDOF()
DOFs = controlModel.getDOFs()
# parameter
Kt = config['Kt']; Dt = config['Dt'] # tracking gain
Kl = config['Kl']; Dl = config['Dl'] # linear balance gain
Kh = config['Kh']; Dh = config['Dh'] # angular balance gain
Ks = config['Ks']; Ds = config['Ds'] # penalty force spring gain
Bt = config['Bt']
Bl = config['Bl']
Bh = config['Bh']
w = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap'])
w2 = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['weightMap2'])
#w_IK = mot.getTrackingWeight(DOFs, motion[0].skeleton, config['IKweightMap'])
supL = motion[0].skeleton.getJointIndex(config['supLink'])
supR = motion[0].skeleton.getJointIndex(config['supLink2'])
rootB = motion[0].skeleton.getJointIndex(config['root'])
selectedBody = motion[0].skeleton.getJointIndex(config['end'])
#constBody = motion[0].skeleton.getJointIndex('LeftForeArm')
constBody = motion[0].skeleton.getJointIndex(config['const'])
# jacobian
Jsup = yjc.makeEmptyJacobian(DOFs, 1)
dJsup = Jsup.copy()
JsupPre = Jsup.copy()
Jsys = yjc.makeEmptyJacobian(DOFs, controlModel.getBodyNum())
dJsys = Jsys.copy()
JsysPre = Jsys.copy()
Jconst = yjc.makeEmptyJacobian(DOFs, 1)
dJconst = Jconst.copy()
###############
footPartNum = config['FootPartNum']
indexFootL = [None]*footPartNum
indexFootR = [None]*footPartNum
jFootL = [None]*footPartNum
dJFootL = [None]*footPartNum
jFootR = [None]*footPartNum
dJFootR = [None]*footPartNum
jointMasksFootL = [None]*footPartNum
jointMasksFootR = [None]*footPartNum
jAngFootL = [None]*footPartNum
dJAngFootL = [None]*footPartNum
jAngFootR = [None]*footPartNum
dJAngFootR = [None]*footPartNum
for i in range(footPartNum) :
jFootL[i] = yjc.makeEmptyJacobian(DOFs, 1)
dJFootL[i] = jFootL[i].copy()
jFootR[i] = yjc.makeEmptyJacobian(DOFs, 1)
dJFootR[i] = jFootR[i].copy()
jAngFootL[i] = yjc.makeEmptyJacobian(DOFs, 1, False)
dJAngFootL[i] = jAngFootL[i].copy()
jAngFootR[i] = yjc.makeEmptyJacobian(DOFs, 1, False)
dJAngFootR[i] = jAngFootR[i].copy()
indexFootL[i] = motion[0].skeleton.getJointIndex(config['FootLPart'][i])
indexFootR[i] = motion[0].skeleton.getJointIndex(config['FootRPart'][i])
jointMasksFootL[i] = [yjc.getLinkJointMask(motion[0].skeleton, indexFootL[i])]
jointMasksFootR[i] = [yjc.getLinkJointMask(motion[0].skeleton, indexFootR[i])]
constJointMasks = [yjc.getLinkJointMask(motion[0].skeleton, constBody)]
allLinkJointMasks = yjc.getAllLinkJointMasks(motion[0].skeleton)
'''
maskArray = [foreSupLJointMasks, foreSupRJointMasks, rearSupLJointMasks, rearSupRJointMasks]
parentArray = [supL, supR, supL, supR]
effectorArray = [foreSupL, foreSupR, rearSupL, rearSupR]
for j in range(4) :
for i in range(len(foreSupLJointMasks)) :
if i == parentArray[j] or i == effectorArray[j] :
maskArray[j][0][i] = 1
else :
maskArray[j][0][i] = 0
'''
# momentum matrix
linkMasses = controlModel.getBodyMasses()
totalMass = controlModel.getTotalMass()
TO = ymt.make_TO(linkMasses)
dTO = ymt.make_dTO(len(linkMasses))
# optimization
problem = yac.LSE(totalDOF, 6)
a_sup = (0,0,0, 0,0,0) #L
#a_sup2 = (0,0,0, 0,0,0)#R
a_sup2 = [0,0,0, 0,0,0]#R
a_sup_2 = [0,0,0, 0,0,0, 0,0,0, 0,0,0]
CP_old = [mm.v3(0.,0.,0.)]
# penalty method
bodyIDsToCheck = range(vpWorld.getBodyNum())
mus = [1.]*len(bodyIDsToCheck)
# flat data structure
ddth_des_flat = ype.makeFlatList(totalDOF)
dth_flat = ype.makeFlatList(totalDOF)
ddth_sol = ype.makeNestedList(DOFs)
d_th_IK = ype.makeNestedList(DOFs)
d_th_IK_L = ype.makeNestedList(DOFs)
d_th_IK_R = ype.makeNestedList(DOFs)
dd_th_IK = ype.makeNestedList(DOFs)
dd_th_IK_flat = ype.makeFlatList(totalDOF)
d_th_IK_flat = ype.makeFlatList(totalDOF)
ddth_c_flat = ype.makeFlatList(totalDOF)
# viewer
rd_footCenter = [None]
rd_footCenter_ref = [None]
rd_footCenterL = [None]
rd_footCenterR = [None]
rd_CM_plane = [None]
rd_CM_plane_ref = [None]
rd_CM_ref = [None]
rd_CM = [None]
rd_CM_vec = [None]
rd_CM_ref_vec = [None]
rd_CP = [None]
rd_CP_des = [None]
rd_dL_des_plane = [None]
rd_dH_des = [None]
rd_grf_des = [None]
rd_exf_des = [None]
rd_root_des = [None]
rd_soft_const_vec = [None]
rd_root = [None]
rd_footL_vec = [None]
rd_footR_vec = [None]
rd_CMP = [None]
rd_DesPosL = [None]
rd_DesPosR = [None]
rd_DesForePosL = [None]
rd_DesForePosR = [None]
rd_DesRearPosL = [None]
rd_DesRearPosR = [None]
rootPos = [None]
selectedBodyId = [selectedBody]
extraForce = [None]
applyedExtraForce = [None]
applyedExtraForce[0] = [0,0,0]
normalVector = [[0,2,0]]
viewer = ysv.SimpleViewer()
# viewer.record(False)
# viewer.doc.addRenderer('motion', yr.JointMotionRenderer(motion, (0,255,255), yr.LINK_BONE))
viewer.doc.addObject('motion', motion)
viewer.doc.addRenderer('motionModel', cvr.VpModelRenderer(motionModel, (150,150,255), yr.POLYGON_FILL))
viewer.doc.addRenderer('controlModel', cvr.VpModelRenderer(controlModel, (255,240,255), yr.POLYGON_FILL))
viewer.doc.addRenderer('rd_footCenter', yr.PointsRenderer(rd_footCenter))
#viewer.doc.addRenderer('rd_footCenterL', yr.PointsRenderer(rd_footCenterL))
#viewer.doc.addRenderer('rd_footCenterR', yr.PointsRenderer(rd_footCenterR))
#viewer.doc.addRenderer('rd_CM_plane', yr.PointsRenderer(rd_CM_plane, (255,255,0)))
viewer.doc.addRenderer('rd_CM', yr.PointsRenderer(rd_CM, (255,255,0)))
viewer.doc.addRenderer('rd_CP_des', yr.PointsRenderer(rd_CP_des, (0,255,0)))
#viewer.doc.addRenderer('rd_CP_des', yr.PointsRenderer(rd_CP_des, (255,0,255)))
# viewer.doc.addRenderer('rd_dL_des_plane', yr.VectorsRenderer(rd_dL_des_plane, rd_CM, (255,255,0)))
# viewer.doc.addRenderer('rd_dH_des', yr.VectorsRenderer(rd_dH_des, rd_CM, (0,255,0)))
viewer.doc.addRenderer('rd_grf_des', yr.ForcesRenderer(rd_grf_des, rd_CP, (0,255,255), .001))
viewer.doc.addRenderer('rd_exf_des', yr.ForcesRenderer(rd_exf_des, rd_root_des, (0,255,0), .009, 0.05))
#viewer.doc.addRenderer('rd_CMP', yr.PointsRenderer(rd_CMP, (0,0,255)))
viewer.doc.addRenderer('rd_DesPosL', yr.PointsRenderer(rd_DesPosL, (0,0,255)))
viewer.doc.addRenderer('rd_DesPosR', yr.PointsRenderer(rd_DesPosR, (0,100,255)))
viewer.doc.addRenderer('rd_DesForePosL', yr.PointsRenderer(rd_DesForePosL, (150,0,200)))
viewer.doc.addRenderer('rd_DesForePosR', yr.PointsRenderer(rd_DesForePosR, (150,0,250)))
viewer.doc.addRenderer('rd_DesRearPosL', yr.PointsRenderer(rd_DesRearPosL, (0,150,200)))
viewer.doc.addRenderer('rd_DesRearPosR', yr.PointsRenderer(rd_DesRearPosR, (0,150,250)))
#viewer.doc.addRenderer('softConstraint', yr.VectorsRenderer(rd_soft_const_vec, rd_CMP, (255,0,0), 3))
viewer.doc.addRenderer('rd_footLVec', yr.VectorsRenderer(rd_footL_vec, rd_footCenterL, (255,0,0), 3))
viewer.doc.addRenderer('rd_footRVec', yr.VectorsRenderer(rd_footR_vec, rd_footCenterL, (255,255,0), 3))
#viewer.doc.addRenderer('rd_footCenter_ref', yr.PointsRenderer(rd_footCenter_ref))
viewer.doc.addRenderer('rd_CM_plane_ref', yr.PointsRenderer(rd_CM_plane_ref, (255,255,0)))
viewer.doc.addRenderer('rd_refNormalVec', yr.VectorsRenderer(normalVector, rd_footCenter_ref, (255,0,0), 3))
viewer.doc.addRenderer('rd_refCMVec', yr.VectorsRenderer(rd_CM_ref_vec, rd_footCenter_ref, (255,0,255), 3))
viewer.doc.addRenderer('rd_curNormalVec', yr.VectorsRenderer(normalVector, rd_footCenter, (255,0,0), 3))
viewer.doc.addRenderer('rd_CMVec', yr.VectorsRenderer(rd_CM_vec, rd_footCenter, (255,0,255), 3))
stage = STATIC_BALANCING
def simulateCallback(frame):
global g_initFlag
global forceShowFrame
global forceApplyFrame
global JsysPre
global JsupPreL
global JsupPreR
global JsupPre
global softConstPoint
global stage
motionModel.update(motion[frame])
Kt, Kk, Kl, Kh, Ksc, Bt, Bl, Bh, Bsc = viewer.GetParam()
Dt = 2*(Kt**.5)
Dk = 2*(Kk**.5)
Dl = 2*(Kl**.5)
Dh = 2*(Kh**.5)
Dsc = 2*(Ksc**.5)
if Bsc == 0.0 :
viewer.doc.showRenderer('softConstraint', False)
viewer.motionViewWnd.update(1, viewer.doc)
else:
viewer.doc.showRenderer('softConstraint', True)
renderer1 = viewer.doc.getRenderer('softConstraint')
renderer1.rc.setLineWidth(0.1+Bsc*3)
viewer.motionViewWnd.update(1, viewer.doc)
# tracking
th_r = motion.getDOFPositions(frame)
th = controlModel.getDOFPositions()
dth_r = motion.getDOFVelocities(frame)
dth = controlModel.getDOFVelocities()
ddth_r = motion.getDOFAccelerations(frame)
ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r, Kt, Dt)
ddth_c = controlModel.getDOFAccelerations()
ype.flatten(ddth_des, ddth_des_flat)
ype.flatten(dth, dth_flat)
ype.flatten(ddth_c, ddth_c_flat)
# jacobian
refFootL = motionModel.getBodyPositionGlobal(supL)
refFootR = motionModel.getBodyPositionGlobal(supR)
positionFootL = [None]*footPartNum
positionFootR = [None]*footPartNum
for i in range(footPartNum):
positionFootL[i] = controlModel.getBodyPositionGlobal(indexFootL[i])
positionFootR[i] = controlModel.getBodyPositionGlobal(indexFootR[i])
linkPositions = controlModel.getBodyPositionsGlobal()
linkVelocities = controlModel.getBodyVelocitiesGlobal()
linkAngVelocities = controlModel.getBodyAngVelocitiesGlobal()
linkInertias = controlModel.getBodyInertiasGlobal()
jointPositions = controlModel.getJointPositionsGlobal()
jointAxeses = controlModel.getDOFAxeses()
CM = yrp.getCM(linkPositions, linkMasses, totalMass)
dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)
CM_plane = copy.copy(CM); CM_plane[1]=0.
dCM_plane = copy.copy(dCM); dCM_plane[1]=0.
linkPositions_ref = motionModel.getBodyPositionsGlobal()
CM_ref = yrp.getCM(linkPositions_ref, linkMasses, totalMass)
CM_plane_ref = copy.copy(CM_ref)
CM_plane_ref[1] = 0.
P = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions, CM, linkInertias)
dP = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses, linkVelocities, dCM, linkAngVelocities, linkInertias)
yjc.computeJacobian2(Jsys, DOFs, jointPositions, jointAxeses, linkPositions, allLinkJointMasks)
yjc.computeJacobianDerivative2(dJsys, DOFs, jointPositions, jointAxeses, linkAngVelocities, linkPositions, allLinkJointMasks)
if g_initFlag == 0:
softConstPoint = controlModel.getBodyPositionGlobal(constBody)
softConstPoint[1] -= .3
g_initFlag = 1
yjc.computeJacobian2(jFootL[0], DOFs, jointPositions, jointAxeses, [positionFootL[0]], jointMasksFootL[0])
yjc.computeJacobianDerivative2(dJFootL[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[0]], jointMasksFootL[0], False)
yjc.computeJacobian2(jFootR[0], DOFs, jointPositions, jointAxeses, [positionFootR[0]], jointMasksFootR[0])
yjc.computeJacobianDerivative2(dJFootR[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[0]], jointMasksFootR[0], False)
yjc.computeAngJacobian2(jAngFootL[0], DOFs, jointPositions, jointAxeses, [positionFootL[0]], jointMasksFootL[0])
yjc.computeAngJacobianDerivative2(dJAngFootL[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[0]], jointMasksFootL[0], False)
yjc.computeAngJacobian2(jAngFootR[0], DOFs, jointPositions, jointAxeses, [positionFootR[0]], jointMasksFootR[0])
yjc.computeAngJacobianDerivative2(dJAngFootR[0], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[0]], jointMasksFootR[0], False)
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
CP = yrp.getCP(contactPositions, contactForces)
for i in range(len(bodyIDsToCheck)) :
controlModel.SetBodyColor(bodyIDsToCheck[i], 0, 0, 0)
contactFlagFootL = [0]*footPartNum
contactFlagFootR = [0]*footPartNum
for i in range(len(bodyIDs)) :
controlModel.SetBodyColor(bodyIDs[i], 255, 105, 105)
index = controlModel.id2index(bodyIDs[i])
for j in range(len(indexFootL)):
if index == indexFootL[j]:
contactFlagFootL[j] = 1
if j != 0:
yjc.computeJacobian2(jFootL[j], DOFs, jointPositions, jointAxeses, [positionFootL[j]], jointMasksFootL[j])
yjc.computeJacobianDerivative2(dJFootL[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[j]], jointMasksFootL[j], False)
break
for j in range(len(indexFootR)):
if index == indexFootR[j]:
contactFlagFootR[j] = 1
if j != 0:
yjc.computeJacobian2(jFootR[j], DOFs, jointPositions, jointAxeses, [positionFootR[j]], jointMasksFootR[j])
yjc.computeJacobianDerivative2(dJFootR[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[j]], jointMasksFootR[j], False)
break
for j in range(len(indexFootL)):
yjc.computeAngJacobian2(jAngFootL[j], DOFs, jointPositions, jointAxeses, [positionFootL[j]], jointMasksFootL[j])
yjc.computeAngJacobianDerivative2(dJAngFootL[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootL[j]], jointMasksFootL[j], False)
yjc.computeAngJacobian2(jAngFootR[j], DOFs, jointPositions, jointAxeses, [positionFootR[j]], jointMasksFootR[j])
yjc.computeAngJacobianDerivative2(dJAngFootR[j], DOFs, jointPositions, jointAxeses, linkAngVelocities, [positionFootR[j]], jointMasksFootR[j], False)
'''
if frame < 100 :
if stage == POWERFUL_BALANCING:
#if stage != MOTION_TRACKING:
footCenterL = controlModel.getBodyPositionGlobal(supL)
footCenterR = controlModel.getBodyPositionGlobal(supR)
else:
footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
else:
'''
if footPartNum == 1:
footCenterL = controlModel.getBodyPositionGlobal(supL)
footCenterR = controlModel.getBodyPositionGlobal(supR)
else:
if ((contactFlagFootL[3] == 1 or contactFlagFootL[4] == 1) and contactFlagFootL[0] == 0) or ((contactFlagFootR[3] == 1 or contactFlagFootR[4] == 1) and contactFlagFootR[0] == 0):
footCenterL = (controlModel.getBodyPositionGlobal(supL) + controlModel.getBodyPositionGlobal(indexFootL[1]))/2.0
footCenterR = (controlModel.getBodyPositionGlobal(supR) + controlModel.getBodyPositionGlobal(indexFootR[1]))/2.0
#footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
#footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
else :
footCenterL = (controlModel.getBodyPositionGlobal(supL) + controlModel.getBodyPositionGlobal(indexFootL[1]))/2.0
footCenterR = (controlModel.getBodyPositionGlobal(supR) + controlModel.getBodyPositionGlobal(indexFootR[1]))/2.0
#footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
#footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
footCenter = footCenterL + (footCenterR - footCenterL)/2.0
footCenter[1] = 0.
footCenter_ref = refFootL + (refFootR - refFootL)/2.0
#footCenter_ref[1] = 0.
#
if checkAll(contactFlagFootL, 0) == 1 and checkAll(contactFlagFootR, 0) == 1:
footCenter = footCenter
elif checkAll(contactFlagFootL, 0) == 1 :
footCenter = footCenterR
elif checkAll(contactFlagFootR, 0) == 1 :
footCenter = footCenterL
footCenter[1] = 0.
desForeSupLAcc = [0,0,0]
desForeSupRAcc = [0,0,0]
totalNormalForce = [0,0,0]
for i in range(len(contactForces)):
totalNormalForce[0] += contactForces[i][0]
totalNormalForce[1] += contactForces[i][1]
totalNormalForce[2] += contactForces[i][2]
# linear momentum
CM_ref_plane = footCenter
dL_des_plane = Kl*totalMass*(CM_ref_plane - CM_plane) - Dl*totalMass*dCM_plane
# angular momentum
CP_ref = footCenter
timeStep = 30.
if CP_old[0]==None or CP==None:
dCP = None
else:
dCP = (CP - CP_old[0])/(1/timeStep)
CP_old[0] = CP
if CP!=None and dCP!=None:
ddCP_des = Kh*(CP_ref - CP) - Dh*(dCP)
CP_des = CP + dCP*(1/timeStep) + .5*ddCP_des*((1/timeStep)**2)
dH_des = np.cross((CP_des - CM), (dL_des_plane + totalMass*mm.s2v(wcfg.gravity)))
#dH_des = np.cross((CP_des - CM_plane), (dL_des_plane + totalMass*mm.s2v(wcfg.gravity)))
else:
dH_des = None
# momentum matrix
RS = np.dot(P, Jsys)
R, S = np.vsplit(RS, 2)
rs = np.dot((np.dot(dP, Jsys) + np.dot(P, dJsys)), dth_flat)
r_bias, s_bias = np.hsplit(rs, 2)
##############################
# soft point constraint
P_des = softConstPoint
P_cur = controlModel.getBodyPositionGlobal(constBody)
dP_des = [0, 0, 0]
dP_cur = controlModel.getBodyVelocityGlobal(constBody)
ddP_des1 = Ksc*(P_des - P_cur) - Dsc*(dP_cur - dP_des)
r = P_des - P_cur
I = np.vstack(([1,0,0],[0,1,0],[0,0,1]))
Z = np.hstack((I, mm.getCrossMatrixForm(-r)))
yjc.computeJacobian2(Jconst, DOFs, jointPositions, jointAxeses, [softConstPoint], constJointMasks)
JL, JA = np.vsplit(Jconst, 2)
Q1 = np.dot(Z, Jconst)
q1 = np.dot(JA, dth_flat)
q2 = np.dot(mm.getCrossMatrixForm(q1), np.dot(mm.getCrossMatrixForm(q1), r))
yjc.computeJacobianDerivative2(dJconst, DOFs, jointPositions, jointAxeses, linkAngVelocities, [softConstPoint], constJointMasks, False)
q_bias1 = np.dot(np.dot(Z, dJconst), dth_flat) + q2
##############################
flagContact = True
if dH_des==None or np.any(np.isnan(dH_des)) == True:
flagContact = False
viewer.doc.showRenderer('rd_grf_des', False)
viewer.motionViewWnd.update(1, viewer.doc)
else:
viewer.doc.showRenderer('rd_grf_des', True)
viewer.motionViewWnd.update(1, viewer.doc)
'''
0 : initial
1 : contact
2 : fly
3 : landing
'''
#MOTION = FORWARD_JUMP
if mit.MOTION == mit.FORWARD_JUMP :
frame_index = [136, 100]
#frame_index = [100000, 100000]
elif mit.MOTION == mit.TAEKWONDO:
frame_index = [130, 100]
#frame_index = [100000, 100000]
elif mit.MOTION == mit.TAEKWONDO2:
frame_index = [130+40, 100]
else :
frame_index = [1000000, 1000000]
#MOTION = TAEKWONDO
#frame_index = [135, 100]
'''
if frame > 300 :
if stage != DYNAMIC_BALANCING:
print("#", frame,"-DYNAMIC_BALANCING")
stage = DYNAMIC_BALANCING
Kk = Kk*1
Dk = 2*(Kk**.5)
'''
if frame > frame_index[0] :
if stage != POWERFUL_BALANCING:
print("#", frame,"-POWERFUL_BALANCING")
stage = POWERFUL_BALANCING
Kk = Kk*2
Dk = 2*(Kk**.5)
elif frame > frame_index[1]:
if stage != MOTION_TRACKING:
print("#", frame,"-MOTION_TRACKING")
stage = MOTION_TRACKING
trackingW = w
if stage == MOTION_TRACKING:
trackingW = w2
Bt = Bt*2
# optimization
mot.addTrackingTerms(problem, totalDOF, Bt, trackingW, ddth_des_flat)
mot.addSoftPointConstraintTerms(problem, totalDOF, Bsc, ddP_des1, Q1, q_bias1)
if flagContact == True:
if stage != MOTION_TRACKING+10:
mot.addLinearTerms(problem, totalDOF, Bl, dL_des_plane, R, r_bias)
mot.addAngularTerms(problem, totalDOF, Bh, dH_des, S, s_bias)
a_sup_2 = [None]
Jsup_2 = [None]
dJsup_2 = [None]
##############################
# Hard constraint
if stage != MOTION_TRACKING:
Kk2 = Kk * 2.0
else :
Kk2 = Kk * 1.5
Dk2 = 2*(Kk2**.5)
'''
desLinearAccL, desPosL = getDesFootLinearAcc(motionModel, controlModel, supL, ModelOffset, CM_ref, CM, Kk2, Dk2)
desLinearAccR, desPosR = getDesFootLinearAcc(motionModel, controlModel, supR, ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, supL, Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, supR, Kk2, Dk2)
'''
if stage != MOTION_TRACKING:
idx = 0 #LEFT/RIGHT_TOES
desLinearAccL, desPosL = getDesFootLinearAcc(motionModel, controlModel, indexFootL[idx], ModelOffset, CM_ref, CM, Kk2, Dk2)
desLinearAccR, desPosR = getDesFootLinearAcc(motionModel, controlModel, indexFootR[idx], ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[idx], Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[idx], Kk2, Dk2)
a_sup_2 = np.hstack(( np.hstack((desLinearAccL, desAngularAccL)), np.hstack((desLinearAccR, desAngularAccR)) ))
Jsup_2 = np.vstack((jFootL[idx], jFootR[idx]))
dJsup_2 = np.vstack((dJFootL[idx], dJFootR[idx]))
rd_DesPosL[0] = desPosL.copy()
rd_DesPosR[0] = desPosR.copy()
else:
if footPartNum == 5:
idx = 3
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[idx], Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[idx], Kk2, Dk2)
a_sup_2 = np.hstack(( desAngularAccL, desAngularAccR ))
Jsup_2 = np.vstack((jAngFootL[idx], jAngFootR[idx]))
dJsup_2 = np.vstack((dJAngFootL[idx], dJAngFootR[idx]))
else:
idx = 1
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[idx], Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[idx], Kk2, Dk2)
a_sup_2 = np.hstack(( desAngularAccL, desAngularAccR ))
Jsup_2 = np.vstack((jAngFootL[idx], jAngFootR[idx]))
dJsup_2 = np.vstack((dJAngFootL[idx], dJAngFootR[idx]))
##############################
##############################
# Additional constraint
if stage != MOTION_TRACKING:
#Kk2 = Kk * 2.5
Kk2 = Kk * 2.5
Dk2 = 2*(Kk2**.5)
desForePosL = [0,0,0]
desForePosR = [0,0,0]
desRearPosL = [0,0,0]
desRearPosR = [0,0,0]
for i in range(1, footPartNum) :
if contactFlagFootL[i] == 1:
desLinearAccL, desForePosL = getDesFootLinearAcc(motionModel, controlModel, indexFootL[i], ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, indexFootL[i], Kk2, Dk2)
a_sup_2 = np.hstack(( a_sup_2, np.hstack((desLinearAccL, desAngularAccL)) ))
Jsup_2 = np.vstack(( Jsup_2, jFootL[i] ))
dJsup_2 = np.vstack(( dJsup_2, dJFootL[i] ))
if contactFlagFootR[i] == 1:
desLinearAccR, desForePosR = getDesFootLinearAcc(motionModel, controlModel, indexFootR[i], ModelOffset, CM_ref, CM, Kk2, Dk2)
desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, indexFootR[i], Kk2, Dk2)
a_sup_2 = np.hstack(( a_sup_2, np.hstack((desLinearAccR, desAngularAccR)) ))
Jsup_2 = np.vstack(( Jsup_2, jFootR[i] ))
dJsup_2 = np.vstack(( dJsup_2, dJFootR[i] ))
rd_DesForePosL[0] = desForePosL
rd_DesForePosR[0] = desForePosR
rd_DesRearPosL[0] = desRearPosL
rd_DesRearPosR[0] = desRearPosR
##############################
mot.setConstraint(problem, totalDOF, Jsup_2, dJsup_2, dth_flat, a_sup_2)
r = problem.solve()
problem.clear()
ype.nested(r['x'], ddth_sol)
rootPos[0] = controlModel.getBodyPositionGlobal(selectedBody)
localPos = [[0, 0, 0]]
for i in range(stepsPerFrame):
# apply penalty force
bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(bodyIDsToCheck, mus, Ks, Ds)
vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals, contactForces)
extraForce[0] = viewer.GetForce()
if (extraForce[0][0] != 0 or extraForce[0][1] != 0 or extraForce[0][2] != 0) :
forceApplyFrame += 1
#vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
controlModel.applyBodyForceGlobal(selectedBody, extraForce[0])
applyedExtraForce[0] = extraForce[0]
if forceApplyFrame*wcfg.timeStep > 0.1:
viewer.ResetForce()
forceApplyFrame = 0
controlModel.setDOFAccelerations(ddth_sol)
controlModel.solveHybridDynamics()
'''
extraForce[0] = viewer.GetForce()
if (extraForce[0][0] != 0 or extraForce[0][1] != 0 or extraForce[0][2] != 0) :
forceApplyFrame += 1
vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
applyedExtraForce[0] = extraForce[0]
if forceApplyFrame*wcfg.timeStep > 0.1:
viewer.ResetForce()
forceApplyFrame = 0
'''
vpWorld.step()
# rendering
rd_footCenter[0] = footCenter
rd_CM[0] = CM.copy()
rd_CM_plane[0] = CM_plane.copy()
rd_footCenter_ref[0] = footCenter_ref
rd_CM_plane_ref[0] = CM_ref.copy()
rd_CM_ref[0] = CM_ref.copy()
rd_CM_ref_vec[0] = (CM_ref - footCenter_ref)*3.
rd_CM_vec[0] = (CM - footCenter)*3
#rd_CM_plane[0][1] = 0.
if CP!=None and dCP!=None:
rd_CP[0] = CP
rd_CP_des[0] = CP_des
rd_dL_des_plane[0] = dL_des_plane
rd_dH_des[0] = dH_des
rd_grf_des[0] = totalNormalForce - totalMass*mm.s2v(wcfg.gravity)#dL_des_plane - totalMass*mm.s2v(wcfg.gravity)
rd_exf_des[0] = applyedExtraForce[0]
rd_root_des[0] = rootPos[0]
rd_CMP[0] = softConstPoint
rd_soft_const_vec[0] = controlModel.getBodyPositionGlobal(constBody)-softConstPoint
if (forceApplyFrame == 0) :
applyedExtraForce[0] = [0, 0, 0]
viewer.setSimulateCallback(simulateCallback)
viewer.startTimer(1/60.)
viewer.show()
Fl.run()
main() |
the-stack_0_10640 | # SPDX-License-Identifier: BSD-3-Clause
#
# Copyright (c) 2021 Vít Labuda. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
# following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following
# disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Class diagram:
# A A
# / \ |
# / \ |
# B C B
# \ / |
# \ / |
# D1 D2
class A:
def __init__(self):
print("A")
# In the case of the D1 class, this method gets called twice (because both B and C call A's __init__).
class B(A):
def __init__(self, x):
print("B ({})".format(x))
A.__init__(self)
class C(A):
def __init__(self, x):
print("C ({})".format(x))
A.__init__(self)
class D1(B, C):
def __init__(self, x):
print("D1 ({})".format(x))
B.__init__(self, x)
C.__init__(self, x)
class D2(B):
def __init__(self, x):
print("D2 ({})".format(x))
B.__init__(self, x)
if __name__ == '__main__':
print(D1.__mro__)
D1("x")
print()
print(D2.__mro__)
D2("x")
# Output:
# (<class '__main__.D1'>, <class '__main__.B'>, <class '__main__.C'>, <class '__main__.A'>, <class 'object'>)
# D1 (x)
# B (x)
# A
# C (x)
# A
#
# (<class '__main__.D2'>, <class '__main__.B'>, <class '__main__.A'>, <class 'object'>)
# D2 (x)
# B (x)
# A
|
the-stack_0_10641 | import unittest
from django.contrib.gis.gdal import HAS_GDAL
from django.contrib.gis.tests.utils import (
SpatialRefSys, oracle, postgis, spatialite,
)
from django.db import connection
from django.test import skipUnlessDBFeature
from django.utils import six
test_srs = ({
'srid': 4326,
'auth_name': ('EPSG', True),
'auth_srid': 4326,
# Only the beginning, because there are differences depending on installed libs
'srtext': 'GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84"',
# +ellps=WGS84 has been removed in the 4326 proj string in proj-4.8
'proj4_re': r'\+proj=longlat (\+ellps=WGS84 )?(\+datum=WGS84 |\+towgs84=0,0,0,0,0,0,0 )\+no_defs ',
'spheroid': 'WGS 84', 'name': 'WGS 84',
'geographic': True, 'projected': False, 'spatialite': True,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.3, 298.257223563),
'eprec': (1, 1, 9),
}, {
'srid': 32140,
'auth_name': ('EPSG', False),
'auth_srid': 32140,
'srtext': (
'PROJCS["NAD83 / Texas South Central",GEOGCS["NAD83",'
'DATUM["North_American_Datum_1983",SPHEROID["GRS 1980"'
),
'proj4_re': r'\+proj=lcc \+lat_1=30.28333333333333 \+lat_2=28.38333333333333 \+lat_0=27.83333333333333 '
r'\+lon_0=-99 \+x_0=600000 \+y_0=4000000 (\+ellps=GRS80 )?'
r'(\+datum=NAD83 |\+towgs84=0,0,0,0,0,0,0 )?\+units=m \+no_defs ',
'spheroid': 'GRS 1980', 'name': 'NAD83 / Texas South Central',
'geographic': False, 'projected': True, 'spatialite': False,
# From proj's "cs2cs -le" and Wikipedia (semi-minor only)
'ellipsoid': (6378137.0, 6356752.31414, 298.257222101),
'eprec': (1, 5, 10),
})
@unittest.skipUnless(HAS_GDAL, "SpatialRefSysTest needs gdal support")
@skipUnlessDBFeature("has_spatialrefsys_table")
class SpatialRefSysTest(unittest.TestCase):
def test_retrieve(self):
"""
Test retrieval of SpatialRefSys model objects.
"""
for sd in test_srs:
srs = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(sd['srid'], srs.srid)
# Some of the authority names are borked on Oracle, e.g., SRID=32140.
# also, Oracle Spatial seems to add extraneous info to fields, hence the
# the testing with the 'startswith' flag.
auth_name, oracle_flag = sd['auth_name']
if postgis or (oracle and oracle_flag):
self.assertEqual(True, srs.auth_name.startswith(auth_name))
self.assertEqual(sd['auth_srid'], srs.auth_srid)
# No proj.4 and different srtext on oracle backends :(
if postgis:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
six.assertRegex(self, srs.proj4text, sd['proj4_re'])
def test_osr(self):
"""
Test getting OSR objects from SpatialRefSys model objects.
"""
for sd in test_srs:
sr = SpatialRefSys.objects.get(srid=sd['srid'])
self.assertEqual(True, sr.spheroid.startswith(sd['spheroid']))
self.assertEqual(sd['geographic'], sr.geographic)
self.assertEqual(sd['projected'], sr.projected)
if not (spatialite and not sd['spatialite']):
# Can't get 'NAD83 / Texas South Central' from PROJ.4 string
# on SpatiaLite
self.assertEqual(True, sr.name.startswith(sd['name']))
# Testing the SpatialReference object directly.
if postgis or spatialite:
srs = sr.srs
six.assertRegex(self, srs.proj4, sd['proj4_re'])
# No `srtext` field in the `spatial_ref_sys` table in SpatiaLite < 4
if not spatialite or connection.ops.spatial_version[0] >= 4:
self.assertTrue(srs.wkt.startswith(sd['srtext']))
def test_ellipsoid(self):
"""
Test the ellipsoid property.
"""
for sd in test_srs:
# Getting the ellipsoid and precision parameters.
ellps1 = sd['ellipsoid']
prec = sd['eprec']
# Getting our spatial reference and its ellipsoid
srs = SpatialRefSys.objects.get(srid=sd['srid'])
ellps2 = srs.ellipsoid
for i in range(3):
self.assertAlmostEqual(ellps1[i], ellps2[i], prec[i])
@skipUnlessDBFeature('supports_add_srs_entry')
def test_add_entry(self):
"""
Test adding a new entry in the SpatialRefSys model using the
add_srs_entry utility.
"""
from django.contrib.gis.utils import add_srs_entry
add_srs_entry(3857)
self.assertTrue(
SpatialRefSys.objects.filter(srid=3857).exists()
)
srs = SpatialRefSys.objects.get(srid=3857)
self.assertTrue(
SpatialRefSys.get_spheroid(srs.wkt).startswith('SPHEROID[')
)
|
the-stack_0_10642 | from typing import Any, Type
def subclasses_of(klass: Type[Any]):
subclasses = []
stack = [klass]
while stack:
parent = stack.pop()
for subclass in parent.__subclasses__():
if subclass not in subclasses:
stack.append(subclass)
subclasses.append(subclass)
return subclasses
|
the-stack_0_10643 | # Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides utilities to preprocess images for the Inception networks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import random_ops
flags.DEFINE_float(
'cb_distortion_range', 0.1, 'Cb distortion range +/-')
flags.DEFINE_float(
'cr_distortion_range', 0.1, 'Cr distortion range +/-')
flags.DEFINE_boolean(
'use_fast_color_distort', True,
'apply fast color/chroma distortion if True, else apply'
'brightness/saturation/hue/contrast distortion')
FLAGS = flags.FLAGS
def apply_with_random_selector(x, func, num_cases):
"""Computes func(x, sel), with sel sampled from [0...num_cases-1].
Args:
x: input Tensor.
func: Python function to apply.
num_cases: Python int32, number of cases to sample sel from.
Returns:
The result of func(x, sel), where func receives the value of the
selector as a python integer, but sel is sampled dynamically.
"""
sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32)
# Pass the real x only to one of the func calls.
return control_flow_ops.merge([
func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case)
for case in range(num_cases)])[0]
def distort_color(image, color_ordering=0, fast_mode=True, scope=None):
"""Distort the color of a Tensor image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: 3-D Tensor containing single image in [0, 1].
color_ordering: Python int, a type of distortion (valid values: 0-3).
fast_mode: Avoids slower ops (random_hue and random_contrast)
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
Raises:
ValueError: if color_ordering not in [0, 3]
"""
with tf.name_scope(scope, 'distort_color', [image]):
if fast_mode:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
elif color_ordering == 2:
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
elif color_ordering == 3:
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_brightness(image, max_delta=32. / 255.)
else:
raise ValueError('color_ordering must be in [0, 3]')
# The random_* ops do not necessarily clamp.
return tf.minimum(tf.maximum(image, 0.0), 1.0)
def distort_color_fast(image, scope=None):
"""Distort the color of a Tensor image.
Distort brightness and chroma values of input image
Args:
image: 3-D Tensor containing single image in [0, 1].
scope: Optional scope for name_scope.
Returns:
3-D Tensor color-distorted image on range [0, 1]
"""
with tf.name_scope(scope, 'distort_color', [image]):
br_delta = random_ops.random_uniform([], -32./255., 32./255., seed=None)
cb_factor = random_ops.random_uniform(
[], -FLAGS.cb_distortion_range, FLAGS.cb_distortion_range, seed=None)
cr_factor = random_ops.random_uniform(
[], -FLAGS.cr_distortion_range, FLAGS.cr_distortion_range, seed=None)
channels = tf.split(axis=2, num_or_size_splits=3, value=image)
red_offset = 1.402 * cr_factor + br_delta
green_offset = -0.344136 * cb_factor - 0.714136 * cr_factor + br_delta
blue_offset = 1.772 * cb_factor + br_delta
channels[0] += red_offset
channels[1] += green_offset
channels[2] += blue_offset
image = tf.concat(axis=2, values=channels)
image = tf.minimum(tf.maximum(image, 0.), 1.)
return image
def distorted_bounding_box_crop(image,
bbox,
min_object_covered=0.1,
aspect_ratio_range=(3./4., 4./3.),
area_range=(0.05, 1.0),
max_attempts=100,
scope=None):
"""Generates cropped_image using a one of the bboxes randomly distorted.
See `tf.image.sample_distorted_bounding_box` for more documentation.
Args:
image: 3-D Tensor of image (it will be converted to floats in [0, 1]).
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole
image.
min_object_covered: An optional `float`. Defaults to `0.1`. The cropped
area of the image must contain at least this fraction of any bounding box
supplied.
aspect_ratio_range: An optional list of `floats`. The cropped area of the
image must have an aspect ratio = width / height within this range.
area_range: An optional list of `floats`. The cropped area of the image
must contain a fraction of the supplied image within in this range.
max_attempts: An optional `int`. Number of attempts at generating a cropped
region of the image of the specified constraints. After `max_attempts`
failures, return the entire image.
scope: Optional scope for name_scope.
Returns:
A tuple, a 3-D Tensor cropped_image and the distorted bbox
"""
with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=min_object_covered,
aspect_ratio_range=aspect_ratio_range,
area_range=area_range,
max_attempts=max_attempts,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
# Crop the image to the specified bounding box.
cropped_image = tf.slice(image, bbox_begin, bbox_size)
return cropped_image, distort_bbox
def preprocess_for_train(image, height, width, bbox,
fast_mode=True,
scope=None,
add_image_summaries=True):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Additionally it would create image_summaries to display the different
transformations applied to the image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations (i.e.
bi-cubic resizing, random_hue or random_contrast).
scope: Optional scope for name_scope.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor of distorted image used for training with range [-1, 1].
"""
with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
if bbox is None:
bbox = tf.constant([0.0, 0.0, 1.0, 1.0],
dtype=tf.float32,
shape=[1, 1, 4])
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
if add_image_summaries:
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([None, None, 3])
if add_image_summaries:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distorted_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
# We select only 1 case for fast_mode bilinear.
num_resize_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, method: tf.image.resize_images(x, [height, width], method),
num_cases=num_resize_cases)
if add_image_summaries:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors. There are 1 or 4 ways to do it.
if FLAGS.use_fast_color_distort:
distorted_image = distort_color_fast(distorted_image)
else:
num_distort_cases = 1 if fast_mode else 4
distorted_image = apply_with_random_selector(
distorted_image,
lambda x, ordering: distort_color(x, ordering, fast_mode),
num_cases=num_distort_cases)
if add_image_summaries:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def preprocess_for_eval(image, height, width,
central_fraction=0.875, scope=None):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(scope, 'eval_image', [image, height, width]):
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
if central_fraction:
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
image.set_shape([height, width, 3])
return image
def preprocess_image(image,
output_height,
output_width,
is_training=False,
scaled_images=True,
bbox=None,
fast_mode=True,
add_image_summaries=False):
"""Pre-process one image for training or evaluation.
Args:
image: 3-D Tensor [height, width, channels] with the image. If dtype is
tf.float32 then the range should be [0, 1], otherwise it would converted
to tf.float32 assuming that the range is [0, MAX], where MAX is largest
positive representable number for int(8/16/32) data type (see
`tf.image.convert_image_dtype` for details).
output_height: integer, image expected height.
output_width: integer, image expected width.
is_training: Boolean. If true it would transform an image for train,
otherwise it would transform it for evaluation.
scaled_images: Whether to scale pixel values to the range [-1, 1].
If set to false, pixel values are in the range [0, 1].
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
fast_mode: Optional boolean, if True avoids slower transformations.
add_image_summaries: Enable image summaries.
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if is_training:
image = preprocess_for_train(
image,
output_height,
output_width,
bbox,
fast_mode,
add_image_summaries=add_image_summaries)
else:
image = preprocess_for_eval(image, output_height, output_width)
if scaled_images:
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
|
the-stack_0_10644 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from pants.contrib.python.checks.checker.pyflakes import PyflakesChecker
from pants.contrib.python.checks.tasks.checkstyle.plugin_subsystem_base import PluginSubsystemBase
class FlakeCheckSubsystem(PluginSubsystemBase):
options_scope = 'pycheck-pyflakes'
@classmethod
def register_plugin_options(cls, register):
register('--ignore', fingerprint=True, type=list, default=[],
help='List of warning codes to ignore.')
@classmethod
def plugin_type(cls):
return PyflakesChecker
|
the-stack_0_10647 | from cereal import car
from opendbc.can.parser import CANParser
from opendbc.can.can_define import CANDefine
from selfdrive.config import Conversions as CV
from selfdrive.car.interfaces import CarStateBase
from selfdrive.car.chrysler.values import DBC, STEER_THRESHOLD
class CarState(CarStateBase):
def __init__(self, CP):
super().__init__(CP)
can_define = CANDefine(DBC[CP.carFingerprint]['pt'])
self.shifter_values = can_define.dv["GEAR"]['PRNDL']
def update(self, cp, cp_cam):
ret = car.CarState.new_message()
self.frame = int(cp.vl["EPS_STATUS"]['COUNTER'])
ret.doorOpen = any([cp.vl["DOORS"]['DOOR_OPEN_FL'],
cp.vl["DOORS"]['DOOR_OPEN_FR'],
cp.vl["DOORS"]['DOOR_OPEN_RL'],
cp.vl["DOORS"]['DOOR_OPEN_RR']])
ret.seatbeltUnlatched = cp.vl["SEATBELT_STATUS"]['SEATBELT_DRIVER_UNLATCHED'] == 1
ret.brakePressed = cp.vl["BRAKE_2"]['BRAKE_PRESSED_2'] == 5 # human-only
ret.brake = 0
ret.brakeLights = ret.brakePressed
ret.gas = cp.vl["ACCEL_GAS_134"]['ACCEL_134']
ret.gasPressed = ret.gas > 1e-5
ret.espDisabled = (cp.vl["TRACTION_BUTTON"]['TRACTION_OFF'] == 1)
ret.wheelSpeeds.fl = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_FL']
ret.wheelSpeeds.rr = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_RR']
ret.wheelSpeeds.rl = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_RL']
ret.wheelSpeeds.fr = cp.vl['WHEEL_SPEEDS']['WHEEL_SPEED_FR']
ret.vEgoRaw = (cp.vl['SPEED_1']['SPEED_LEFT'] + cp.vl['SPEED_1']['SPEED_RIGHT']) / 2.
ret.vEgo, ret.aEgo = self.update_speed_kf(ret.vEgoRaw)
ret.standstill = not ret.vEgoRaw > 0.001
ret.leftBlinker = cp.vl["STEERING_LEVERS"]['TURN_SIGNALS'] == 1
ret.rightBlinker = cp.vl["STEERING_LEVERS"]['TURN_SIGNALS'] == 2
ret.steeringAngle = cp.vl["STEERING"]['STEER_ANGLE']
ret.steeringRate = cp.vl["STEERING"]['STEERING_RATE']
ret.gearShifter = self.parse_gear_shifter(self.shifter_values.get(cp.vl['GEAR']['PRNDL'], None))
ret.cruiseState.enabled = cp.vl["ACC_2"]['ACC_STATUS_2'] == 7 # ACC is green.
ret.cruiseState.available = ret.cruiseState.enabled # FIXME: for now same as enabled
ret.cruiseState.speed = cp.vl["DASHBOARD"]['ACC_SPEED_CONFIG_KPH'] * CV.KPH_TO_MS
ret.steeringTorque = cp.vl["EPS_STATUS"]["TORQUE_DRIVER"]
ret.steeringTorqueEps = cp.vl["EPS_STATUS"]["TORQUE_MOTOR"]
ret.steeringPressed = abs(ret.steeringTorque) > STEER_THRESHOLD
steer_state = cp.vl["EPS_STATUS"]["LKAS_STATE"]
ret.steerError = steer_state == 4 or (steer_state == 0 and ret.vEgo > self.CP.minSteerSpeed)
ret.genericToggle = bool(cp.vl["STEERING_LEVERS"]['HIGH_BEAM_FLASH'])
self.lkas_counter = cp_cam.vl["LKAS_COMMAND"]['COUNTER']
self.lkas_car_model = cp_cam.vl["LKAS_HUD"]['CAR_MODEL']
self.lkas_status_ok = cp_cam.vl["LKAS_HEARTBIT"]['LKAS_STATUS_OK']
return ret
@staticmethod
def get_can_parser(CP):
signals = [
# sig_name, sig_address, default
("PRNDL", "GEAR", 0),
("DOOR_OPEN_FL", "DOORS", 0),
("DOOR_OPEN_FR", "DOORS", 0),
("DOOR_OPEN_RL", "DOORS", 0),
("DOOR_OPEN_RR", "DOORS", 0),
("BRAKE_PRESSED_2", "BRAKE_2", 0),
("ACCEL_134", "ACCEL_GAS_134", 0),
("SPEED_LEFT", "SPEED_1", 0),
("SPEED_RIGHT", "SPEED_1", 0),
("WHEEL_SPEED_FL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RR", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_RL", "WHEEL_SPEEDS", 0),
("WHEEL_SPEED_FR", "WHEEL_SPEEDS", 0),
("STEER_ANGLE", "STEERING", 0),
("STEERING_RATE", "STEERING", 0),
("TURN_SIGNALS", "STEERING_LEVERS", 0),
("ACC_STATUS_2", "ACC_2", 0),
("HIGH_BEAM_FLASH", "STEERING_LEVERS", 0),
("ACC_SPEED_CONFIG_KPH", "DASHBOARD", 0),
("TORQUE_DRIVER", "EPS_STATUS", 0),
("TORQUE_MOTOR", "EPS_STATUS", 0),
("LKAS_STATE", "EPS_STATUS", 1),
("COUNTER", "EPS_STATUS", -1),
("TRACTION_OFF", "TRACTION_BUTTON", 0),
("SEATBELT_DRIVER_UNLATCHED", "SEATBELT_STATUS", 0),
]
checks = [
# sig_address, frequency
("BRAKE_2", 50),
("EPS_STATUS", 100),
("SPEED_1", 100),
("WHEEL_SPEEDS", 50),
("STEERING", 100),
("ACC_2", 50),
]
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 0)
@staticmethod
def get_cam_can_parser(CP):
signals = [
# sig_name, sig_address, default
("COUNTER", "LKAS_COMMAND", -1),
("CAR_MODEL", "LKAS_HUD", -1),
("LKAS_STATUS_OK", "LKAS_HEARTBIT", -1)
]
checks = []
return CANParser(DBC[CP.carFingerprint]['pt'], signals, checks, 2)
|
the-stack_0_10648 | from random import randint
from typing import Dict
from uuid import uuid4
import pytest
from pydantic import BaseModel, ValidationError
from geojson_pydantic.features import Feature, FeatureCollection
from geojson_pydantic.geometries import Geometry, MultiPolygon, Polygon
class GenericProperties(BaseModel):
id: str
description: str
size: int
properties = {
"id": str(uuid4()),
"description": str(uuid4()),
"size": randint(0, 1000),
}
polygon = {
"type": "Polygon",
"coordinates": [
[
[13.38272, 52.46385],
[13.42786, 52.46385],
[13.42786, 52.48445],
[13.38272, 52.48445],
[13.38272, 52.46385],
]
],
}
test_feature = {
"type": "Feature",
"geometry": polygon,
"properties": properties,
}
def test_geometry_collection_iteration():
"""test if feature collection is iterable"""
gc = FeatureCollection(features=[test_feature, test_feature])
iter(gc)
def test_generic_properties_is_dict():
feature = Feature(**test_feature)
assert feature.properties["id"] == test_feature["properties"]["id"]
assert type(feature.properties) == dict
assert not hasattr(feature.properties, "id")
def test_generic_properties_is_object():
feature = Feature[Geometry, GenericProperties](**test_feature)
assert feature.properties.id == test_feature["properties"]["id"]
assert type(feature.properties) == GenericProperties
assert hasattr(feature.properties, "id")
def test_generic_geometry():
feature = Feature[Polygon, GenericProperties](**test_feature)
assert feature.properties.id == test_feature["properties"]["id"]
assert type(feature.geometry) == Polygon
assert type(feature.properties) == GenericProperties
assert hasattr(feature.properties, "id")
feature = Feature[Polygon, Dict](**test_feature)
assert type(feature.geometry) == Polygon
assert feature.properties["id"] == test_feature["properties"]["id"]
assert type(feature.properties) == dict
assert not hasattr(feature.properties, "id")
with pytest.raises(ValidationError):
Feature[MultiPolygon, Dict](**({"type": "Feature", "geometry": polygon}))
def test_generic_properties_should_raise_for_string():
with pytest.raises(ValidationError):
Feature(
**({"type": "Feature", "geometry": polygon, "properties": "should raise"})
)
def test_feature_collection_generic():
fc = FeatureCollection[Polygon, GenericProperties](
features=[test_feature, test_feature]
)
assert len(fc) == 2
assert type(fc[0].properties) == GenericProperties
assert type(fc[0].geometry) == Polygon
def test_geo_interface_protocol():
class Pointy:
__geo_interface__ = {"type": "Point", "coordinates": (0.0, 0.0)}
feat = Feature(geometry=Pointy())
assert feat.geometry.dict() == Pointy.__geo_interface__
|
the-stack_0_10649 | import torch
import torch.nn as nn
import torch.nn.functional as F
def kl_loss(x, mu, logsigma, beta):
kl = -0.5 * torch.sum(1 + logsigma - mu.pow(2) - logsigma.exp())
return beta * (kl / torch.numel(x))
def vae_loss(x, mu, logsigma, recon_x, beta=1):
recon_loss = F.mse_loss(x, recon_x, reduction='mean')
kl = kl_loss(x, mu, logsigma, beta)
return recon_loss + kl
def reparameterize(mu, logsigma):
std = torch.exp(0.5*logsigma)
eps = torch.randn_like(std)
return mu + eps*std
def carracing_encoder(input_channel):
return nn.Sequential(
nn.Conv2d(input_channel, 32, 4, stride=2), nn.ReLU(),
nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(),
nn.Conv2d(64, 128, 4, stride=2), nn.ReLU(),
nn.Conv2d(128, 256, 4, stride=2), nn.ReLU()
)
def carracing_decoder(flatten_size):
return nn.Sequential(
nn.ConvTranspose2d(flatten_size, 128, 5, stride=2), nn.ReLU(),
nn.ConvTranspose2d(128, 64, 5, stride=2), nn.ReLU(),
nn.ConvTranspose2d(64, 32, 6, stride=2), nn.ReLU(),
nn.ConvTranspose2d(32, 3, 6, stride=2), nn.Sigmoid()
) |
the-stack_0_10650 | from collections import namedtuple, deque
import difflib
import pygments.formatters
import pygments.lexers
import pygments.token
import re
from typing import List, Tuple, Optional, Iterator, Iterable
from literate.annot import Span, Annot, SpanMerger, \
cut_annot, merge_annot, sub_annot, fill_annot
from literate.file import File, Line, Diff, DiffBlock, Hunk, OutputLine
from literate.points import Point, cut_annot_at_points
# Regex for finding runs of identical non-space characters
RUN_RE = re.compile(r'([^ \n])\1*')
def parse_intra_annot(s: str) -> Annot[str]:
'''Parse an `ndiff` detail (`?`) line and convert it to an annotation
indicating intraline edits in the text of the preceding line. The
annotation labels inserted, deleted, and changed characters with `'ins'`,
`'del'`, and `'chg'` respectively.'''
spans = []
for m in RUN_RE.finditer(s):
c = m.group(1)
# Map the symbols used by `ndiff` to something more meaningful.
label = {
'+': 'ins',
'-': 'del',
'^': 'chg',
}[c]
spans.append(Span(m.start(), m.end(), label))
return spans
DiffLine = Tuple[bool, bool, Optional[Annot[str]], Optional[Annot[str]]]
def diff_lines(old_lines: List[str], new_lines: List[str]) -> Iterator[DiffLine]:
'''Compute a diff of `old` and `new`, and yield a sequence of (old_line,
new_line, old_detail, new_detail). Each `line` is a boolean indicating
whether there is a line present in the old/new file, and each `detail` is
an intraline edit annotation (see `parse_intra_annot`).
Possible outputs:
- (True, True, None, None): Unmodified/context line
- (True, False, None, None): Deletion of a line from the old text.
- (False, True, None, None): Insertion of a line in the new text.
- (True, True, [...], [...]): Changed line, modified via the indicated
intraline insertions and deletions.
'''
# We buffer up to two previous result tuples. This lets us handle
# intraline change markers, and in particular, the nasty '-+?' case, where
# we don't find out that we're in an intraline change ('?') until we've
# seen both the '-' and '+' lines.
buf = deque()
for dl in difflib.ndiff(old_lines, new_lines):
prefix = dl[0:2]
if prefix == ' ':
# Context line. Flush the whole buffer.
while buf:
yield buf.popleft()
yield (True, True, None, None)
elif prefix == '- ':
while buf:
yield buf.popleft()
buf.append((True, False, None, None))
elif prefix == '+ ':
# Try to fold into a previous intraline edit quad, if one exists.
if len(buf) > 0:
old_line, new_line, old_detail, new_detail = buf[-1]
if not new_line and old_detail is not None:
# Previously saw a '-' and a '?'. Fold in this '+'.
assert not new_line
buf[-1] = (old_line, True, old_detail, None)
continue
# If there's no old_detail ('?'), then we aren't in an
# intraline edit. If there's a new_line, then the intraline
# edit is already finished. In either case, we want to do the
# default action of just adding the '+' on its own.
while len(buf) > 2:
yield buf.popleft()
buf.append((False, True, None, None))
elif prefix == '? ':
detail = parse_intra_annot(dl[2:])
# Add this detail to the previous buffered line. We may also need
# to merge a pair of previous '-' and '+' lines, if we didn't
# previously know that they were part of an intraline change quad.
assert len(buf) > 0
old_line, new_line, old_detail, new_detail = buf.pop()
if new_line:
if old_line:
# The previous line is a rollup of a '-' and a '+'.
# (Context lines are not included in the buffer.)
assert old_detail is not None
buf.append((True, True, old_detail, detail))
else:
# The previous line is just a '+'. There must be a '-'
# before it, so roll up both of those together with the new
# detail.
old_line2, new_line2, old_detail2, new_detail2 = buf.pop()
assert old_line2
assert not new_line2
assert old_detail2 is None
assert new_detail2 is None
buf.append((True, True, None, detail))
else:
# The previous line is just a '-'. Roll this detail into it.
# Next we should see a '+', which will get rolled in, so this
# bogus (True, False, [...], None) entry will never be yielded.
buf.append((True, False, detail, None))
# Flush any remaining buffered entries.
while buf:
yield buf.popleft()
def adjust_closing_brace(old_lines: List[str], new_lines: List[str],
diff: Iterable[DiffLine]) -> Iterator[DiffLine]:
'''Adjust the output of `diff_lines` to turn this:
fn f() {
...
+}
+fn g() {
+ ...
}
into this:
fn f() {
...
}
+fn g() {
+ ...
+}
'''
# Specifically: at the end of every run of insertions or deletions, if the
# first context line after the run consists of solely a '}' character (with
# whitespace), then we scan from the top of the run for an identical
# inserted line. If found, we change the earlier line from an insertion to
# context, and change the context line to an insertion.
mode = None
buf = []
buf_start = None
old_i = -1
new_i = -1
for dl in diff:
old_line, new_line, old_detail, new_detail = dl
if old_line and not new_line:
new_mode = 'del'
old_i += 1
elif not old_line and new_line:
new_mode = 'ins'
new_i += 1
else:
new_mode = None
old_i += 1
new_i += 1
if new_mode != mode:
if new_mode is None:
# Switching from ins or del mode to context mode. If the
# current line is a '}', we try to do the block adjustment.
check_lines = new_lines if mode == 'ins' else old_lines
i = new_i if mode == 'ins' else old_i
if check_lines[i].strip() == '}':
# Yield everything from buf, while scanning for an earlier
# matching line.
found_dl = None
for j, buf_dl in enumerate(buf):
if check_lines[buf_start + j] == check_lines[i]:
found_dl = buf_dl
yield (True, True, None, None)
# We're stopping early, so yield the remaining
# elements.
yield from buf[j + 1:]
break
else:
yield buf_dl
if found_dl:
yield found_dl
else:
yield (True, True, None, None)
else:
yield from buf
yield dl
mode = None
buf = []
buf_start = None
# We already yielded the correct info, so don't fall through to
# the default logic.
continue
else:
if mode is not None:
yield from buf
mode = new_mode
buf = []
buf_start = new_i if mode == 'ins' else old_i
if mode is None:
yield dl
else:
buf.append(dl)
# There are no more lines, so there can't be a `}` line following `buf` to
# trigger our heuristic. That means we can blindly dump everything in
# `buf`.
yield from buf
WORD_BREAK_RE = re.compile(r'\b')
def token_annot(line: Line) -> Annot[None]:
'''Annotate the tokens of `l`. Each token (and some sub-token strings)
gets a separate span. This is a helper function for
`calc_tokenized_intra`.'''
annot = fill_annot(line.highlight, len(line.text))
# Special cases: treat word boundaries inside strings and comments as token
# breaks. This essentially gives us the behavior of `git`'s `--word-diff`
# feature.
extra_cuts = []
for span in annot:
# We don't handle String subtypes (only String itself) because we don't
# want to break up `\x00` and similar escapes.
if span.label == pygments.token.String or \
span.label in pygments.token.Comment:
text = line.text[span.start : span.end]
for m in WORD_BREAK_RE.finditer(text):
extra_cuts.append(Point(span.start + m.start()))
return cut_annot_at_points(annot, extra_cuts)
def calc_tokenized_intra(l1: Line, l2: Line) -> Tuple[Annot[str], Annot[str]]:
'''Calculate token-based intraline edit annotations for `l1` and `l2`.
`difflib.ndiff` does a pretty good job of matching up similar lines, but it
computes intraline changes character-by-character, which often produces bad
results. For example, it might turn `unsafe` into `malloc` by replacing
`uns` -> `m` and `fe` -> `lloc`, instead of doing `unsafe` -> `malloc` in
one go.
Here we calculate some intraline edits that are easier to read, using the
tokenization provided by `pygments` to align edit boundaries to the
boundaries of source tokens.'''
annot1 = token_annot(l1)
annot2 = token_annot(l2)
tokens1 = [l1.text[s.start : s.end] for s in annot1]
tokens2 = [l2.text[s.start : s.end] for s in annot2]
intra1 = []
intra2 = []
sm = difflib.SequenceMatcher(a=tokens1, b=tokens2)
for tag, i1, i2, j1, j2 in sm.get_opcodes():
if tag == 'equal':
continue
while i1 < i2 and tokens1[i1].isspace():
i1 += 1
while i2 > i1 and tokens1[i2 - 1].isspace():
i2 -= 1
while j1 < j2 and tokens2[j1].isspace():
j1 += 1
while j2 > j1 and tokens2[j2 - 1].isspace():
j2 -= 1
if i1 != i2:
intra1.append(Span(annot1[i1].start, annot1[i2 - 1].end,
'chg' if tag == 'replace' else 'del'))
if j1 != j2:
intra2.append(Span(annot2[j1].start, annot2[j2 - 1].end,
'chg' if tag == 'replace' else 'ins'))
return (intra1, intra2)
def diff_files(f1: File, f2: File) -> Diff:
'''Diff two files, returning a `Diff` between them and also setting the
`intra` annotation on the lines of both files.'''
dls = diff_lines(f1.line_text, f2.line_text)
dls = adjust_closing_brace(f1.line_text, f2.line_text, dls)
# Accumulator for diff blocks.
diff_blocks = []
# Start and current position of the current block.
old_start = 0
old_cur = 0
new_start = 0
new_cur = 0
# Is the current block a change? (If not, it's context.)
changed = True
def flush():
nonlocal old_start, new_start
# This check means we can blindly call `flush()` without worrying about
# cluttering the output with zero-length blocks.
if old_cur - old_start > 0 or new_cur - new_start > 0:
diff_blocks.append(DiffBlock(changed,
Span(old_start, old_cur),
Span(new_start, new_cur)))
old_start = old_cur
new_start = new_cur
for old_line, new_line, old_detail, new_detail in dls:
next_changed = not (old_line and new_line and
old_detail is None and new_detail is None)
has_intra = old_detail is not None or new_detail is not None
if next_changed != changed:
flush()
if has_intra:
# Emit each `intra` line as its own block, to ensure they're
# aligned in the output.
flush()
intra1, intra2 = calc_tokenized_intra(
f1.lines[old_cur], f2.lines[new_cur])
if len(intra1) > 0:
f1.lines[old_cur].set_intra(intra1)
if len(intra2) > 0:
f2.lines[new_cur].set_intra(intra2)
flush()
if old_line:
old_cur += 1
if new_line:
new_cur += 1
changed = next_changed
flush()
return Diff(f1, f2, diff_blocks)
def context_annot(blocks: List[DiffBlock], new: bool, context_lines: int) -> Annot[None]:
'''Generate an annotation of the old or new file's lines, indicating which
lines are changes or context for changes (within `context_lines`
distance).'''
result = SpanMerger()
for (changed, old_span, new_span) in blocks:
if not changed:
continue
span = new_span if new else old_span
result.add(Span(
span.start - context_lines,
span.end + context_lines))
return result.finish()
def split_hunks(blocks: List[DiffBlock]) -> List[Hunk]:
'''Split the output of `filter_unchanged` into hunks, anywhere there's a
gap in the old or new line numbers.'''
last_old = 0
last_new = 0
cur = []
hunks = []
def flush():
nonlocal cur
if len(cur) > 0:
hunks.append(Hunk(cur))
cur = []
for b in blocks:
changed, old_span, new_span = b
if old_span.start != last_old or new_span.start != last_new:
flush()
cur.append(b)
last_old = old_span.end
last_new = new_span.end
flush()
return hunks
def annotate_blocks(blocks: List[DiffBlock]) \
-> Tuple[Annot[Span[None]], Annot[Span[None]]]:
'''Return annotations on the old and new files, labeling each line with the
block that contains it.'''
old = []
new = []
for b in blocks:
old.append(Span(b.old_span.start, b.old_span.end, b))
new.append(Span(b.new_span.start, b.new_span.end, b))
return old, new
def build_diff_hunks(d: Diff, context_diff: bool=True):
'''Build a list of output hunks, and assign it to `d.hunks`.
If `d.old_file` or `d.new_file` has a `keep_mark_lines` annotation, all
annotated lines will be kept as additional context.'''
# Find the set of lines each file wants to keep.
def calc_file_keep(f, is_new):
if context_diff:
keep = context_annot(d.blocks, is_new, 5)
if f.keep_mark_lines is not None:
keep = merge_annot(keep, f.keep_mark_lines)
else:
if len(f.line_annot) > 0:
keep = [Span(0, f.line_annot[-1].end)]
else:
keep = []
if f.drop_irrelevant_lines is not None:
keep = sub_annot(keep, f.drop_irrelevant_lines)
return keep
keep_old = calc_file_keep(d.old_file, False)
keep_new = calc_file_keep(d.new_file, True)
# In unchanged blocks, add each file's keep lines to the other file's set.
# This works because unchanged blocks have the same number of lines on each
# side.
old_blocks, new_blocks = annotate_blocks(d.blocks)
extra_keep_old = []
extra_keep_new = []
for block_span, keep_spans in cut_annot(keep_old, old_blocks):
if block_span.label.changed:
continue
base = block_span.label.new_span.start
extra_keep_new.extend(s + base for s in keep_spans)
for block_span, keep_spans in cut_annot(keep_new, new_blocks):
if block_span.label.changed:
continue
base = block_span.label.old_span.start
extra_keep_old.extend(s + base for s in keep_spans)
keep_old = merge_annot(keep_old, extra_keep_old)
keep_new = merge_annot(keep_new, extra_keep_new)
# For changed blocks, we can't match up lines from different files, so we
# just hope for the best. (Normally all changed lines are kept, so there's
# no need to match - the only exception is when the `irrelevant_*_regex`
# options are set.)
# Build the filtered list of blocks. There can be different numbers of
# blocks on the old and new sides. We use a fairly naive strategy to match
# them up, but it generally seems to work okay.
blocks = []
for (old_block, old_keeps), (new_block, new_keeps) in zip(
cut_annot(keep_old, old_blocks),
cut_annot(keep_new, new_blocks)):
# `old_blocks` and `new_blocks` have corresponding entries (based on
# the same block) at corresponding positions.
assert old_block.label is new_block.label
block = old_block.label
# Match up `old_keeps` and `new_keeps` entries by position. In most
# cases, the two lists will have the same length.
for old_keep, new_keep in zip(old_keeps, new_keeps):
blocks.append(DiffBlock(block.changed,
old_keep + block.old_span.start,
new_keep + block.new_span.start))
for old_keep in old_keeps[len(new_keeps):]:
blocks.append(DiffBlock(block.changed,
old_keep + block.old_span.start,
Span(block.new_span.end, block.new_span.end)))
for new_keep in new_keeps[len(old_keeps):]:
blocks.append(DiffBlock(block.changed,
Span(block.old_span.end, block.old_span.end),
new_keep + block.new_span.start))
# Split the new blocks into hunks, and save them in the `Diff`.
hunks = split_hunks(blocks)
d.set_hunks(hunks)
def hunk_output_lines(h: Hunk) -> List[OutputLine]:
result = []
for changed, old_span, new_span in h.blocks:
common_lines = min(len(old_span), len(new_span))
for i in range(0, common_lines):
result.append(OutputLine(changed, old_span.start + i, new_span.start + i))
for i in range(common_lines, len(old_span)):
result.append(OutputLine(changed, old_span.start + i, None))
for i in range(common_lines, len(new_span)):
result.append(OutputLine(changed, None, new_span.start + i))
return result
def build_output_lines(d: Diff):
'''Build a list of two-column output lines for each hunk of `d`, and set
the `Hunk.output_lines` fields.'''
for h in d.hunks:
output_lines = hunk_output_lines(h)
h.set_output_lines(output_lines)
|
the-stack_0_10652 | # -*- coding: utf-8 -*-
############################ Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Cameron White <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 poulp <[email protected]> #
# Copyright 2014 Tomas Radej <[email protected]> #
# Copyright 2014 Vincent Jacques <[email protected]> #
# Copyright 2016 E. Dunham <[email protected]> #
# Copyright 2016 Jannis Gebauer <[email protected]> #
# Copyright 2016 Peter Buckley <[email protected]> #
# Copyright 2017 Balázs Rostás <[email protected]> #
# Copyright 2017 Jannis Gebauer <[email protected]> #
# Copyright 2017 Simon <[email protected]> #
# Copyright 2018 Wan Liuyang <[email protected]> #
# Copyright 2018 bryanhuntesl <[email protected]> #
# Copyright 2018 sfdye <[email protected]> #
# Copyright 2018 itsbruce <[email protected]> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.Gist
import github.Repository
import github.NamedUser
import github.Plan
import github.Organization
import github.UserKey
import github.Issue
import github.Event
import github.Authorization
import github.Notification
import github.Migration
from . import Consts
class AuthenticatedUser(github.GithubObject.CompletableGithubObject):
"""
This class represents AuthenticatedUsers as returned by https://developer.github.com/v3/users/#get-the-authenticated-user
An AuthenticatedUser object can be created by calling ``get_user()`` on a Github object.
"""
def __repr__(self):
return self.get__repr__({"login": self._login.value})
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def bio(self):
"""
:type: string
"""
self._completeIfNotSet(self._bio)
return self._bio.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def followers_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._followers_url)
return self._followers_url.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def following_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._following_url)
return self._following_url.value
@property
def gists_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._gists_url)
return self._gists_url.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def hireable(self):
"""
:type: bool
"""
self._completeIfNotSet(self._hireable)
return self._hireable.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def organizations_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._organizations_url)
return self._organizations_url.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def received_events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._received_events_url)
return self._received_events_url.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def site_admin(self):
"""
:type: bool
"""
self._completeIfNotSet(self._site_admin)
return self._site_admin.value
@property
def starred_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._starred_url)
return self._starred_url.value
@property
def subscriptions_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._subscriptions_url)
return self._subscriptions_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_emails(self, *emails):
"""
:calls: `POST /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, str) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/emails",
input=post_parameters
)
def add_to_following(self, following):
"""
:calls: `PUT /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/following/" + following._identity
)
def add_to_starred(self, starred):
"""
:calls: `PUT /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/starred/" + starred._identity
)
def add_to_subscriptions(self, subscription):
"""
:calls: `PUT /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/user/subscriptions/" + subscription._identity
)
def add_to_watched(self, watched):
"""
:calls: `PUT /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/repos/" + watched._identity + "/subscription",
input={"subscribed": True}
)
def create_authorization(self, scopes=github.GithubObject.NotSet, note=github.GithubObject.NotSet, note_url=github.GithubObject.NotSet, client_id=github.GithubObject.NotSet, client_secret=github.GithubObject.NotSet, onetime_password=None):
"""
:calls: `POST /authorizations <http://developer.github.com/v3/oauth>`_
:param scopes: list of string
:param note: string
:param note_url: string
:param client_id: string
:param client_secret: string
:param onetime_password: string
:rtype: :class:`github.Authorization.Authorization`
"""
assert scopes is github.GithubObject.NotSet or all(isinstance(element, str) for element in scopes), scopes
assert note is github.GithubObject.NotSet or isinstance(note, str), note
assert note_url is github.GithubObject.NotSet or isinstance(note_url, str), note_url
assert client_id is github.GithubObject.NotSet or isinstance(client_id, str), client_id
assert client_secret is github.GithubObject.NotSet or isinstance(client_secret, str), client_secret
assert onetime_password is None or isinstance(onetime_password, str), onetime_password
post_parameters = dict()
if scopes is not github.GithubObject.NotSet:
post_parameters["scopes"] = scopes
if note is not github.GithubObject.NotSet:
post_parameters["note"] = note
if note_url is not github.GithubObject.NotSet:
post_parameters["note_url"] = note_url
if client_id is not github.GithubObject.NotSet:
post_parameters["client_id"] = client_id
if client_secret is not github.GithubObject.NotSet:
post_parameters["client_secret"] = client_secret
if onetime_password is not None:
request_header = {Consts.headerOTP: onetime_password} # pragma no cover (Should be covered)
else:
request_header = None
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/authorizations",
input=post_parameters,
headers=request_header,
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks"
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_gist(self, public, files, description=github.GithubObject.NotSet):
"""
:calls: `POST /gists <http://developer.github.com/v3/gists>`_
:param public: bool
:param files: dict of string to :class:`github.InputFileContent.InputFileContent`
:param description: string
:rtype: :class:`github.Gist.Gist`
"""
assert isinstance(public, bool), public
assert all(isinstance(element, github.InputFileContent) for element in files.values()), files
assert description is github.GithubObject.NotSet or isinstance(description, str), description
post_parameters = {
"public": public,
"files": dict((key, value._identity) for key, value in files.items()),
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/gists",
input=post_parameters
)
return github.Gist.Gist(self._requester, headers, data, completed=True)
def create_key(self, title, key):
"""
:calls: `POST /user/keys <http://developer.github.com/v3/users/keys>`_
:param title: string
:param key: string
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(title, str), title
assert isinstance(key, str), key
post_parameters = {
"title": title,
"key": key,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/keys",
input=post_parameters
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet,
private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet,
has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet,
has_projects=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, license_template=github.GithubObject.NotSet,
gitignore_template=github.GithubObject.NotSet, allow_squash_merge=github.GithubObject.NotSet,
allow_merge_commit=github.GithubObject.NotSet, allow_rebase_merge=github.GithubObject.NotSet):
"""
:calls: `POST /user/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param has_projects: bool
:param auto_init: bool
:param license_template: string
:param gitignore_template: string
:param allow_squash_merge: bool
:param allow_merge_commit: bool
:param allow_rebase_merge: bool
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
assert description is github.GithubObject.NotSet or isinstance(description, str), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, str), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert has_projects is github.GithubObject.NotSet or isinstance(has_projects, bool), has_projects
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert license_template is github.GithubObject.NotSet or isinstance(license_template, str), license_template
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, str), gitignore_template
assert allow_squash_merge is github.GithubObject.NotSet or isinstance(allow_squash_merge, bool), allow_squash_merge
assert allow_merge_commit is github.GithubObject.NotSet or isinstance(allow_merge_commit, bool), allow_merge_commit
assert allow_rebase_merge is github.GithubObject.NotSet or isinstance(allow_rebase_merge, bool), allow_rebase_merge
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if has_projects is not github.GithubObject.NotSet:
post_parameters["has_projects"] = has_projects
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if license_template is not github.GithubObject.NotSet:
post_parameters["license_template"] = license_template
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
if allow_squash_merge is not github.GithubObject.NotSet:
post_parameters["allow_squash_merge"] = allow_squash_merge
if allow_merge_commit is not github.GithubObject.NotSet:
post_parameters["allow_merge_commit"] = allow_merge_commit
if allow_rebase_merge is not github.GithubObject.NotSet:
post_parameters["allow_rebase_merge"] = allow_rebase_merge
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def edit(self, name=github.GithubObject.NotSet, email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, location=github.GithubObject.NotSet, hireable=github.GithubObject.NotSet, bio=github.GithubObject.NotSet):
"""
:calls: `PATCH /user <http://developer.github.com/v3/users>`_
:param name: string
:param email: string
:param blog: string
:param company: string
:param location: string
:param hireable: bool
:param bio: string
:rtype: None
"""
assert name is github.GithubObject.NotSet or isinstance(name, str), name
assert email is github.GithubObject.NotSet or isinstance(email, str), email
assert blog is github.GithubObject.NotSet or isinstance(blog, str), blog
assert company is github.GithubObject.NotSet or isinstance(company, str), company
assert location is github.GithubObject.NotSet or isinstance(location, str), location
assert hireable is github.GithubObject.NotSet or isinstance(hireable, bool), hireable
assert bio is github.GithubObject.NotSet or isinstance(bio, str), bio
post_parameters = dict()
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if hireable is not github.GithubObject.NotSet:
post_parameters["hireable"] = hireable
if bio is not github.GithubObject.NotSet:
post_parameters["bio"] = bio
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
"/user",
input=post_parameters
)
self._useAttributes(data)
def get_authorization(self, id):
"""
:calls: `GET /authorizations/:id <http://developer.github.com/v3/oauth>`_
:param id: integer
:rtype: :class:`github.Authorization.Authorization`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/authorizations/" + str(id)
)
return github.Authorization.Authorization(self._requester, headers, data, completed=True)
def get_authorizations(self):
"""
:calls: `GET /authorizations <http://developer.github.com/v3/oauth>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Authorization.Authorization`
"""
return github.PaginatedList.PaginatedList(
github.Authorization.Authorization,
self._requester,
"/authorizations",
None
)
def get_emails(self):
"""
:calls: `GET /user/emails <http://developer.github.com/v3/users/emails>`_
:rtype: list of string
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/emails"
)
return data
def get_events(self):
"""
:calls: `GET /events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/events",
None
)
def get_followers(self):
"""
:calls: `GET /user/followers <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/followers",
None
)
def get_following(self):
"""
:calls: `GET /user/following <http://developer.github.com/v3/users/followers>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
"/user/following",
None
)
def get_gists(self, since=github.GithubObject.NotSet):
"""
:calls: `GET /gists <http://developer.github.com/v3/gists>`_
:param since: datetime.datetime format YYYY-MM-DDTHH:MM:SSZ
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists",
url_parameters
)
def get_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, str), filter
assert state is github.GithubObject.NotSet or isinstance(state, str), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, str), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_user_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /user/issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, str), filter
assert state is github.GithubObject.NotSet or isinstance(state, str), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, str), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
"/issues",
url_parameters
)
def get_key(self, id):
"""
:calls: `GET /user/keys/:id <http://developer.github.com/v3/users/keys>`_
:param id: integer
:rtype: :class:`github.UserKey.UserKey`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/user/keys/" + str(id)
)
return github.UserKey.UserKey(self._requester, headers, data, completed=True)
def get_keys(self):
"""
:calls: `GET /user/keys <http://developer.github.com/v3/users/keys>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.UserKey.UserKey`
"""
return github.PaginatedList.PaginatedList(
github.UserKey.UserKey,
self._requester,
"/user/keys",
None
)
def get_notification(self, id):
"""
:calls: `GET /notifications/threads/:id <http://developer.github.com/v3/activity/notifications>`_
:rtype: :class:`github.Notification.Notification`
"""
assert isinstance(id, str), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/notifications/threads/" + id
)
return github.Notification.Notification(self._requester, headers, data, completed=True)
def get_notifications(self, all=github.GithubObject.NotSet, participating=github.GithubObject.NotSet):
"""
:calls: `GET /notifications <http://developer.github.com/v3/activity/notifications>`_
:param all: bool
:param participating: bool
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Notification.Notification`
"""
assert all is github.GithubObject.NotSet or isinstance(all, bool), all
assert participating is github.GithubObject.NotSet or isinstance(participating, bool), participating
params = dict()
if all is not github.GithubObject.NotSet:
params["all"] = all
if participating is not github.GithubObject.NotSet:
params["participating"] = participating
# TODO: implement parameter "since"
return github.PaginatedList.PaginatedList(
github.Notification.Notification,
self._requester,
"/notifications",
params
)
def get_organization_events(self, org):
"""
:calls: `GET /users/:user/events/orgs/:org <http://developer.github.com/v3/activity/events>`_
:param org: :class:`github.Organization.Organization`
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
assert isinstance(org, github.Organization.Organization), org
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
"/users/" + self.login + "/events/orgs/" + org.login,
None
)
def get_orgs(self):
"""
:calls: `GET /user/orgs <http://developer.github.com/v3/orgs>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Organization.Organization`
"""
return github.PaginatedList.PaginatedList(
github.Organization.Organization,
self._requester,
"/user/orgs",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, visibility=github.GithubObject.NotSet, affiliation=github.GithubObject.NotSet, type=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet):
"""
:calls: `GET /user/repos <http://developer.github.com/v3/repos>`
:param visibility: string
:param affiliation: string
:param type: string
:param sort: string
:param direction: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert visibility is github.GithubObject.NotSet or isinstance(visibility, str), visibility
assert affiliation is github.GithubObject.NotSet or isinstance(affiliation, str), affiliation
assert type is github.GithubObject.NotSet or isinstance(type, str), type
assert sort is github.GithubObject.NotSet or isinstance(sort, str), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, str), direction
url_parameters = dict()
if visibility is not github.GithubObject.NotSet:
url_parameters["visibility"] = visibility
if affiliation is not github.GithubObject.NotSet:
url_parameters["affiliation"] = affiliation
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/repos",
url_parameters
)
def get_starred(self):
"""
:calls: `GET /user/starred <http://developer.github.com/v3/activity/starring>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/starred",
None
)
def get_starred_gists(self):
"""
:calls: `GET /gists/starred <http://developer.github.com/v3/gists>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Gist.Gist`
"""
return github.PaginatedList.PaginatedList(
github.Gist.Gist,
self._requester,
"/gists/starred",
None
)
def get_subscriptions(self):
"""
:calls: `GET /user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/subscriptions",
None
)
def get_teams(self):
"""
:calls: `GET /user/teams <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
"/user/teams",
None
)
def get_watched(self):
"""
:calls: `GET /user/subscriptions <http://developer.github.com/v3/activity/watching>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
"/user/subscriptions",
None
)
def has_in_following(self, following):
"""
:calls: `GET /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(following, github.NamedUser.NamedUser), following
status, headers, data = self._requester.requestJson(
"GET",
"/user/following/" + following._identity
)
return status == 204
def has_in_starred(self, starred):
"""
:calls: `GET /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(starred, github.Repository.Repository), starred
status, headers, data = self._requester.requestJson(
"GET",
"/user/starred/" + starred._identity
)
return status == 204
def has_in_subscriptions(self, subscription):
"""
:calls: `GET /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(subscription, github.Repository.Repository), subscription
status, headers, data = self._requester.requestJson(
"GET",
"/user/subscriptions/" + subscription._identity
)
return status == 204
def has_in_watched(self, watched):
"""
:calls: `GET /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: bool
"""
assert isinstance(watched, github.Repository.Repository), watched
status, headers, data = self._requester.requestJson(
"GET",
"/repos/" + watched._identity + "/subscription"
)
return status == 200
def mark_notifications_as_read(self, last_read_at=datetime.datetime.utcnow()):
"""
:calls: `PUT /notifications <https://developer.github.com/v3/activity/notifications>`_
:param last_read_at: datetime
"""
assert isinstance(last_read_at, datetime.datetime)
put_parameters = {
"last_read_at": last_read_at.strftime('%Y-%m-%dT%H:%M:%SZ')
}
headers, data = self._requester.requestJsonAndCheck(
"PUT",
"/notifications",
input=put_parameters
)
def remove_from_emails(self, *emails):
"""
:calls: `DELETE /user/emails <http://developer.github.com/v3/users/emails>`_
:param email: string
:rtype: None
"""
assert all(isinstance(element, str) for element in emails), emails
post_parameters = emails
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/emails",
input=post_parameters
)
def remove_from_following(self, following):
"""
:calls: `DELETE /user/following/:user <http://developer.github.com/v3/users/followers>`_
:param following: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(following, github.NamedUser.NamedUser), following
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/following/" + following._identity
)
def remove_from_starred(self, starred):
"""
:calls: `DELETE /user/starred/:owner/:repo <http://developer.github.com/v3/activity/starring>`_
:param starred: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(starred, github.Repository.Repository), starred
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/starred/" + starred._identity
)
def remove_from_subscriptions(self, subscription):
"""
:calls: `DELETE /user/subscriptions/:owner/:repo <http://developer.github.com/v3/activity/watching>`_
:param subscription: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(subscription, github.Repository.Repository), subscription
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/user/subscriptions/" + subscription._identity
)
def remove_from_watched(self, watched):
"""
:calls: `DELETE /repos/:owner/:repo/subscription <http://developer.github.com/v3/activity/watching>`_
:param watched: :class:`github.Repository.Repository`
:rtype: None
"""
assert isinstance(watched, github.Repository.Repository), watched
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
"/repos/" + watched._identity + "/subscription"
)
def accept_invitation(self, invitation):
"""
:calls: `PATCH /user/repository_invitations/:invitation_id <https://developer.github.com/v3/repos/invitations/>`
:param invitation: :class:`github.Invitation.Invitation` or int
:rtype: None
"""
assert isinstance(invitation, github.Invitation.Invitation) or isinstance(invitation, int)
if isinstance(invitation, github.Invitation.Invitation):
invitation = invitation.id
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
"/user/repository_invitations/" + str(invitation),
input={}
)
def create_migration(self, repos, lock_repositories=github.GithubObject.NotSet, exclude_attachments=github.GithubObject.NotSet):
"""
:calls: `POST /user/migrations`_
:param repos: list or tuple of str
:param lock_repositories: bool
:param exclude_attachments: bool
:rtype: :class:`github.Migration.Migration`
"""
assert isinstance(repos, (list, tuple)), repos
assert all(isinstance(repo, str) for repo in repos), repos
assert lock_repositories is github.GithubObject.NotSet or isinstance(lock_repositories, bool), lock_repositories
assert exclude_attachments is github.GithubObject.NotSet or isinstance(exclude_attachments, bool), exclude_attachments
post_parameters = {
"repositories": repos
}
if lock_repositories is not github.GithubObject.NotSet:
post_parameters["lock_repositories"] = lock_repositories
if exclude_attachments is not github.GithubObject.NotSet:
post_parameters["exclude_attachments"] = exclude_attachments
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/user/migrations",
input=post_parameters,
headers={
"Accept": Consts.mediaTypeMigrationPreview
}
)
return github.Migration.Migration(self._requester, headers, data, completed=True)
def get_migrations(self):
"""
:calls: `GET /user/migrations`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Migration.Migration`
"""
return github.PaginatedList.PaginatedList(
github.Migration.Migration,
self._requester,
"/user/migrations",
None,
headers={
"Accept": Consts.mediaTypeMigrationPreview
}
)
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._bio = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._followers_url = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._following_url = github.GithubObject.NotSet
self._gists_url = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._hireable = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._organizations_url = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._received_events_url = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._site_admin = github.GithubObject.NotSet
self._starred_url = github.GithubObject.NotSet
self._subscriptions_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "bio" in attributes: # pragma no branch
self._bio = self._makeStringAttribute(attributes["bio"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "followers_url" in attributes: # pragma no branch
self._followers_url = self._makeStringAttribute(attributes["followers_url"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "following_url" in attributes: # pragma no branch
self._following_url = self._makeStringAttribute(attributes["following_url"])
if "gists_url" in attributes: # pragma no branch
self._gists_url = self._makeStringAttribute(attributes["gists_url"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "hireable" in attributes: # pragma no branch
self._hireable = self._makeBoolAttribute(attributes["hireable"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "organizations_url" in attributes: # pragma no branch
self._organizations_url = self._makeStringAttribute(attributes["organizations_url"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "received_events_url" in attributes: # pragma no branch
self._received_events_url = self._makeStringAttribute(attributes["received_events_url"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "site_admin" in attributes: # pragma no branch
self._site_admin = self._makeBoolAttribute(attributes["site_admin"])
if "starred_url" in attributes: # pragma no branch
self._starred_url = self._makeStringAttribute(attributes["starred_url"])
if "subscriptions_url" in attributes: # pragma no branch
self._subscriptions_url = self._makeStringAttribute(attributes["subscriptions_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
|
the-stack_0_10655 | import unittest
from ultrasonic.driver import UltrasonicDriver
class UltrasonicSensorTest(unittest.TestCase):
def test_parse_data(self):
test_data = "SensorA: 34\nSensorB: 0\nSensorC: 0\nSensorA: 40\nSensorD: 0"
parsed_data = []
for line in test_data.split("\n"):
parsed_data.append(UltrasonicDriver.parse_data(line))
self.assertIn(("A", 0.34), parsed_data)
if __name__ == '__main__':
unittest.main() |
the-stack_0_10656 | import os
import re
from poetry.semver import Version
from poetry.version.requirements import Requirement
from .dependency import Dependency
from .dependency_package import DependencyPackage
from .directory_dependency import DirectoryDependency
from .file_dependency import FileDependency
from .locker import Locker
from .package import Package
from .package_collection import PackageCollection
from .project_package import ProjectPackage
from .utils.link import Link
from .utils.utils import convert_markers
from .utils.utils import group_markers
from .utils.utils import is_archive_file
from .utils.utils import is_installable_dir
from .utils.utils import is_url
from .utils.utils import path_to_url
from .utils.utils import strip_extras
from .vcs_dependency import VCSDependency
def dependency_from_pep_508(name):
# Removing comments
parts = name.split("#", 1)
name = parts[0].strip()
if len(parts) > 1:
rest = parts[1]
if ";" in rest:
name += ";" + rest.split(";", 1)[1]
req = Requirement(name)
if req.marker:
markers = convert_markers(req.marker)
else:
markers = {}
name = req.name
path = os.path.normpath(os.path.abspath(name))
link = None
if is_url(name):
link = Link(name)
else:
p, extras = strip_extras(path)
if os.path.isdir(p) and (os.path.sep in name or name.startswith(".")):
if not is_installable_dir(p):
raise ValueError(
"Directory {!r} is not installable. File 'setup.py' "
"not found.".format(name)
)
link = Link(path_to_url(p))
elif is_archive_file(p):
link = Link(path_to_url(p))
# it's a local file, dir, or url
if link:
# Handle relative file URLs
if link.scheme == "file" and re.search(r"\.\./", link.url):
link = Link(path_to_url(os.path.normpath(os.path.abspath(link.path))))
# wheel file
if link.is_wheel:
m = re.match(r"^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))", link.filename)
if not m:
raise ValueError("Invalid wheel name: {}".format(link.filename))
name = m.group("name")
version = m.group("ver")
dep = Dependency(name, version)
else:
name = link.egg_fragment
if link.scheme == "git":
dep = VCSDependency(name, "git", link.url_without_fragment)
else:
dep = Dependency(name, "*")
else:
if req.pretty_constraint:
constraint = req.constraint
else:
constraint = "*"
dep = Dependency(name, constraint)
if "extra" in markers:
# If we have extras, the dependency is optional
dep.deactivate()
for or_ in markers["extra"]:
for _, extra in or_:
dep.in_extras.append(extra)
if "python_version" in markers:
ors = []
for or_ in markers["python_version"]:
ands = []
for op, version in or_:
# Expand python version
if op == "==":
version = "~" + version
op = ""
elif op == "!=":
version += ".*"
elif op in ("<=", ">"):
parsed_version = Version.parse(version)
if parsed_version.precision == 1:
if op == "<=":
op = "<"
version = parsed_version.next_major.text
elif op == ">":
op = ">="
version = parsed_version.next_major.text
elif parsed_version.precision == 2:
if op == "<=":
op = "<"
version = parsed_version.next_minor.text
elif op == ">":
op = ">="
version = parsed_version.next_minor.text
elif op in ("in", "not in"):
versions = []
for v in re.split("[ ,]+", version):
split = v.split(".")
if len(split) in [1, 2]:
split.append("*")
op_ = "" if op == "in" else "!="
else:
op_ = "==" if op == "in" else "!="
versions.append(op_ + ".".join(split))
glue = " || " if op == "in" else ", "
if versions:
ands.append(glue.join(versions))
continue
ands.append("{}{}".format(op, version))
ors.append(" ".join(ands))
dep.python_versions = " || ".join(ors)
if req.marker:
dep.marker = req.marker
# Extras
for extra in req.extras:
dep.extras.append(extra)
return dep
|
the-stack_0_10658 | """
Message delivery
Various interfaces to messaging services. Currently:
- ``pushover`` - a platform for sending and receiving push notifications
is supported.
AUTHORS:
- Martin Albrecht (2012) - initial implementation
"""
import http.client as httplib
from urllib.parse import urlencode
from ssl import SSLContext
pushover_defaults = {"token": "Eql67F14ohOZJ0AtEBJJU7FiLAk8wK"}
def pushover(message, **kwds):
"""
Send a push notification with ``message`` to ``user`` using https://pushover.net/.
Pushover is a platform for sending and receiving push notifications. On the server side, it
provides an HTTP API for queueing messages to deliver to devices. On the device side, iOS and
Android clients receive those push notifications, show them to the user, and store them for
offline viewing.
An account on https://pushover.net is required and the Pushover app must be installed on your
phone for this function to be able to deliver messages to you.
INPUT:
- ``message`` - your message
- ``user`` - the user key (not e-mail address) of your user (or you), viewable when logged
into the Pushover dashboard. (default: ``None``)
- ``device`` - your user's device identifier to send the message directly to that device,
rather than all of the user's devices (default: ``None``)
- ``title`` - your message's title, otherwise uses your app's name (default: ``None``)
- ``url`` - a supplementary URL to show with your message (default: ``None``)
- ``url_title`` - a title for your supplementary URL (default: ``None``)
- ``priority`` - set to 1 to display as high-priority and bypass quiet hours, or -1 to always
send as a quiet notification (default: ``0``)
- ``timestamp`` - set to a unix timestamp to have your message show with a particular time,
rather than now (default: ``None``)
- ``sound`` - set to the name of one of the sounds supported by device clients to override the
user's default sound choice (default: ``None``)
- ``token`` - your application's API token (default: Sage's default App token)
EXAMPLES::
sage: import sage.misc.messaging
sage: sage.misc.messaging.pushover("Hi, how are you?", user="XXX") # not tested
To set default values populate ``pushover_defaults``::
sage: sage.misc.messaging.pushover_defaults["user"] = "USER_TOKEN"
sage: sage.misc.messaging.pushover("Hi, how are you?") # not tested
.. note::
You may want to populate ``sage.misc.messaging.pushover_defaults`` with default values such
as the default user in ``$HOME/.sage/init.sage``.
"""
request = {"message": message}
request.update(pushover_defaults)
request.update(kwds)
conn = httplib.HTTPSConnection("api.pushover.net:443", context=SSLContext())
conn.request("POST", "/1/messages.json",
urlencode(request),
{"Content-type": "application/x-www-form-urlencoded"})
return conn.getresponse().status == 200
|
the-stack_0_10660 | """
@file
@brief Helpers to run examples created with function
@see fn export2tf2onnx.
"""
import collections
import inspect
import numpy
from onnx.numpy_helper import from_array
from onnx.helper import (
make_node, make_graph, make_model, set_model_props, make_tensor)
from onnx import AttributeProto
from ..onnx2py_helper import guess_dtype, guess_proto_dtype
from ..onnx_tools import ensure_topological_order
_make_name_id = 0
def make_name(name):
"Creates a unique name."
global _make_name_id # pylint: disable=W0603
name = "%s_%d" % (name, _make_name_id)
_make_name_id += 1
return name
def make_sure(cond, msg, *args):
"Raises an exception if cond is not verified."
if not cond:
raise RuntimeError(msg % tuple(args))
def map_onnx_to_numpy_type(onnx_dtype):
"Converts ONNX type into numpy type."
return guess_dtype(onnx_dtype)
class tf_op:
"""
Decorator to register any new converter.
:param name: type of the operator to rewrite
:param domain: domain
"""
_OPSETS = collections.OrderedDict()
def __init__(self, name, domain='', **kwargs):
if not isinstance(name, list):
name = [name]
self.names = name
self.domain = domain
self.kwargs = kwargs
def __call__(self, func):
for ke, va in inspect.getmembers(func, inspect.ismethod):
if ke.startswith("version_"):
version = int(ke.replace("version_", ""))
self._register_handler(
va, version, self.names, self.domain, self.kwargs)
return func
def _register_handler(self, func, version, names, domain, kwargs):
opset = tf_op._OPSETS.get(domain)
if not opset:
opset = []
tf_op._OPSETS[domain] = opset
while version >= len(opset):
opset.append({})
opset_dict = opset[version]
for name in names:
opset_dict[name] = (func, kwargs)
class Tf2OnnxConvert:
"""
Applies the converter on an ONNX graph.
:param onnx_model: ONNX graph
:param tf_op: class which register
:param verbose: verbosity
:param target_opset: targetted opsets
"""
def __init__(self, onnx_model, _tf_op=None, verbose=None,
target_opset=None):
self._onnx_model = onnx_model
self._tf_op = _tf_op or tf_op
self.verbose = verbose
if isinstance(target_opset, int):
self.target_opsets = {'': target_opset}
elif isinstance(target_opset, dict):
self.target_opsets = target_opset
elif target_opset is None:
opsets = {}
for oimp in onnx_model.opset_import:
if oimp.domain == '':
opsets[oimp.domain] = oimp.version
opset = oimp.version
else:
opsets[oimp.domain] = opset
self.target_opsets = opsets
else:
raise ValueError( # pragma: no cover
"Unexepected value for target_opset=%r." % target_opset)
self._names = {}
for node in onnx_model.graph.node:
self._names[node.name] = node
for init in onnx_model.graph.initializer:
self._names[init.name] = init
# _forbidden_new_names contains current names and deleted names.
self._forbidden_new_names = set(self._names)
if '' in self.target_opsets:
self.opset = self.target_opsets['']
if not hasattr(self, 'opset'):
raise RuntimeError( # pragma: no cover
"Attribute opset is missing, target_opset=%r." % target_opset)
def get_node_by_name(self, name):
"""
Retrieves a node by its name.
:param name: node name
:return: node name
"""
if name not in self._names:
raise RuntimeError(
"Unable to find node name %r among %r." % (
name, ", ".join(sorted(self._names))))
return self._names[name]
def _add_node_name(self, obj):
"""
Registers an object in in the graph by its name.
:param name: node or initializer
"""
if obj.name in self._forbidden_new_names:
raise RuntimeError(
"Name %r is already registered." % obj.name)
self._names[obj.name] = obj
self._forbidden_new_names.add(obj.name)
def make_node(self, op_type, inputs, attr=None, outputs=None,
name=None, domain='', output_count=1):
"""
Adds a node to the list of nodes.
:param op_type: operator type
:param inputs: list of strings
:param attr: dictionary of attributes
:param outputs: None or list of strings
:param output_count: used if outputs is None to guess
the number of outputs of this node
:param name: name of the node
:param domain: domain
:return: created node
"""
if self.verbose:
print("[Tf2OnnxConvert.make_node] op_type=%r inputs=%r" % (
op_type, inputs))
if attr is None:
attr = {}
if name is None:
name = make_name(op_type)
if name in self._names:
raise RuntimeError(
"Node name %r already exists in %r." % (
name, ", ".join(sorted(self._names))))
if outputs is None:
outputs = [(name + ":" + str(i)) for i in range(output_count)]
output_count = len(outputs)
raw_attr = {}
onnx_attrs = []
for a, v in attr.items():
if isinstance(v, AttributeProto):
onnx_attrs.append(v)
else:
raw_attr[a] = v
onnx_node = make_node(
op_type, inputs, outputs, name=name, domain=domain, **raw_attr)
self._add_node_name(onnx_node)
return onnx_node
def make_const(self, name, np_val, skip_conversion=False, raw=True):
"""
Make a new constants in the graph.
:param name: const node name, must be unique.
:param np_val: value of type numpy ndarray.
:param skip_conversion:
bool, indicate whether this created node would be mapped
during conversion
:param raw: whether to store data at field of raw_data or the
specific field according to its dtype
:return: create initializer
"""
if name in self._names:
raise RuntimeError(
"Initializer name %r already exists in %r." % (
name, ", ".join(sorted(self._names))))
np_val_flat = np_val.flatten()
is_bytes = (np_val.dtype == numpy.object and len(np_val_flat) > 0 and
isinstance(np_val_flat[0], bytes))
if raw and not is_bytes:
onnx_tensor = from_array(np_val, name)
else:
onnx_tensor = make_tensor(
name, guess_proto_dtype(np_val.dtype),
np_val.shape, np_val_flat, raw=False)
self._add_node_name(onnx_tensor)
return onnx_tensor
def get_dtype(self, input_name):
"""
Returns the type of one node or None if unknown.
:param input_name: result name
:return: numpy dtype
"""
inputs = self._onnx_model.graph.input
names = [_.name for _ in inputs]
if input_name not in names:
return None # pragma: no cover
ind = names.index(input_name)
return inputs[ind].type.tensor_type.elem_type
def replace_all_inputs(self, old_name, new_name):
"""
Every taking *old_name* as inputs will take *new_name* instead.
Looks in the output as well but in that case, it creates an identity
node to avoid changing an output name.
:param old_name: name to replace
:param new_name: new name
:return: list of impacted nodes
"""
res = []
for node in self._names.values():
if not hasattr(node, 'input'):
continue
if old_name not in node.input:
continue
new_inputs = [new_name if i.name == old_name else i.name
for i in node.input]
node.input[:] = new_inputs[:]
res.append(node)
if self.verbose:
print("[Tf2OnnxConvert.replace_all_inputs] replace %r by %r in node %r" % (
old_name, new_name, node.name))
for o in self._onnx_model.graph.output:
if o.name != old_name:
continue
n = self.make_node("Identity", [new_name], outputs=[old_name],
name=make_name("IdOutputReplaced"))
res.append(n)
if self.verbose:
print("[Tf2OnnxConvert.replace_all_inputs] add id node from %r to %r "
"with node %r." % (
old_name, new_name, n.name)) # pylint: disable=E1101
return res
def remove_node(self, name):
"""
Removes a node name from the list.
"""
if name not in self._names:
raise RuntimeError(
"Unable to delete name %r because it does not exists." % name)
del self._names[name]
if self.verbose:
print("[Tf2OnnxConvert.remove_node] delete name %r" % name)
def get_shape(self, input_name):
"""
Returns the type of one node or None if unknown.
:param input_name: result name
:return: numpy dtype
"""
inputs = self._onnx_model.graph.input
names = [_.name for _ in inputs]
if input_name not in names:
return None # pragma: no cover
ind = names.index(input_name)
dims = inputs[ind].type.tensor_type.shape.dim
return tuple(dims)
def run(self):
"""
Calls the registered converters on the graph
held by this instance. Returns the new onnx graph.
:return: ONNX graph
"""
if len(self._tf_op._OPSETS) == 0:
raise RuntimeError( # pragma: no cover
"No converter was registered.")
if self.verbose:
print("[Tf2OnnxConvert.run]")
done = {}
modif = 1
while modif > 0:
modif = 0
# The converter may alter the current list of nodes, we freeze it.
current_values = list(self._names.values())
for node in current_values:
if not hasattr(node, 'domain'):
# initializer
continue
if done.get(node.name, False):
continue
domain = node.domain
if domain not in self._tf_op._OPSETS:
continue
# look for a converter
rews = self._tf_op._OPSETS[domain]
target = min(self.target_opsets[domain], len(rews))
conv = None
for i in range(len(rews) - 1, -1, -1):
if node.op_type in rews[i]:
conv = rews[i][node.op_type]
break
if conv is None:
continue
# applies the converter
if self.verbose:
print("[Tf2OnnxConvert.run] convert node type=%r opset=%r name=%r"
"" % (node.op_type, target, node.name))
fct, kwargs = conv
fct(self, node, target_opset=target, **kwargs)
modif += 1
return self.make_model()
def make_model(self):
"""
Produces the new ONNX graph with the updated sets of nodes.
"""
inputs = self._onnx_model.graph.input
outputs = self._onnx_model.graph.output
inits = [init[1] for init in sorted(self._names.items())
if not hasattr(init[1], 'domain')]
nodes = [node[1] for node in sorted(self._names.items())
if hasattr(node[1], 'domain')]
nodes = ensure_topological_order(inputs, inits, nodes)
if self.verbose:
print(
"[Tf2OnnxConvert.make_node] %d nodes %d inputs %d "
"outputs %d initializers"
"" % (len(nodes), len(inputs), len(outputs), len(inits)))
graph = make_graph(nodes, self._onnx_model.graph.name,
inputs, outputs, inits)
onnx_model = make_model(graph)
onnx_model.ir_version = self._onnx_model.ir_version
onnx_model.producer_name = self._onnx_model.producer_name + "-mlprodict"
onnx_model.producer_version = self._onnx_model.producer_version
onnx_model.domain = self._onnx_model.domain
onnx_model.model_version = self._onnx_model.model_version
onnx_model.doc_string = self._onnx_model.doc_string
metadata = {p.key: p.value for p in self._onnx_model.metadata_props}
set_model_props(onnx_model, metadata)
# opsets
del onnx_model.opset_import[:] # pylint: disable=E1101
for dom, value in self.target_opsets.items():
op_set = onnx_model.opset_import.add() # pylint: disable=E1101
op_set.domain = dom
op_set.version = value
return onnx_model
class GraphBuilder:
"""
Helpers to build graph.
:param graph!
"""
def __init__(self, graph):
self._g = graph
@property
def graph(self):
"Returns the graph."
return self._g
def make_slice(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False):
"""
slice changes its schema at opset 10: it treats some attributes as dynamic input
so this function has to process inputs according to graph's opset version
to get "inputs" and "attr" to feed "make_node"
kwargs: key could be ["data", "starts", "ends", "axes", "steps", "outputs"].
"""
outputs = kwargs.pop("outputs", None)
if self.graph.opset < 10:
# "data" is string
# "starts", "ends" and "axes" are attributes, and "axes" is optional.
data = kwargs.pop("data")
starts = self._convert_to_attribute(kwargs.pop("starts"))
ends = self._convert_to_attribute(kwargs.pop("ends"))
axes = self._convert_to_attribute(
kwargs.pop("axes", None), is_optional=True)
attr = {"starts": starts, "ends": ends, "axes": axes}
inputs = [data]
else:
# slice-10 has 3 required inputs "data", "starts", "ends"l
# and 2 optional inputs "axes", "steps"
# input sequence should be "data", "starts", "ends", "axes", "steps"
attr = {}
data = kwargs.pop("data")
starts = self._convert_to_input(kwargs.pop(
"starts"), "const_starts", dtype=numpy.int64)
ends = self._convert_to_input(kwargs.pop(
"ends"), "const_ends", dtype=numpy.int64)
axes = self._convert_to_input(kwargs.pop(
"axes", None), "const_axes", is_optional=True, dtype=numpy.int64)
steps = self._convert_to_input(kwargs.pop(
"steps", None), "const_steps", is_optional=True, dtype=numpy.int64)
inputs = [data, starts.name, ends.name, axes.name, steps.name]
# pro-process inputs and attr
make_sure(not kwargs, "kwargs contains un-used key")
new_attr = {}
for key, val in attr.items():
if val is not None:
new_attr[key] = val
attr = new_attr
for ind, val in enumerate(inputs):
if val is None:
inputs[ind] = "" # empty string means no connection in ONNX
# remove tailing ""
while inputs[-1] == "":
inputs = inputs[:-1]
if self.graph.opset >= 10:
dtype = self.graph.get_dtype(inputs[1])
for input_data in inputs[1:]:
if input_data != "":
make_sure(dtype == self.graph.get_dtype(
input_data), "dtype should be same")
node = self.graph.make_node(op_type="Slice", inputs=inputs, attr=attr, name=name,
outputs=outputs, shapes=shapes, dtypes=dtypes)
if return_node:
return node
raise NotImplementedError("return_node must be True")
def make_squeeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None):
"""
Squeeze changes its schema at opset 13: it treats axes as a dynamic input
kwargs: key could be ["data", "axes"].
"""
outputs = kwargs.pop("outputs", None)
if self.graph.opset < 13:
data = kwargs.pop("data")
axes = self._convert_to_attribute(
kwargs.pop("axes", None), is_optional=True)
attr = {"axes": axes}
inputs = [data]
else:
data = kwargs.pop("data")
axes = self._convert_to_input(kwargs.pop(
"axes", None), "const_axes", is_optional=True, dtype=numpy.int64)
attr = {}
inputs = [data, axes.name]
make_sure(not kwargs, "kwargs contains un-used key")
new_attr = {}
for key, val in attr.items():
if val is not None:
new_attr[key] = val
attr = new_attr
for ind, val in enumerate(inputs):
if val is None:
inputs[ind] = "" # empty string means no connection in ONNX
# remove tailing ""
while inputs[-1] == "":
inputs = inputs[:-1]
node = self.graph.make_node(op_type="Squeeze", inputs=inputs, attr=attr, name=name,
outputs=outputs)
if return_node:
return node
raise NotImplementedError("return_node must be True")
def make_unsqueeze(self, kwargs, name=None, shapes=None, dtypes=None, return_node=False, op_name_scope=None):
"""
Unsqueeze changes its schema at opset 13: it treats axes as a dynamic input
kwargs: key could be ["data", "axes"].
"""
outputs = kwargs.pop("outputs", None)
if self.graph.opset < 13:
data = kwargs.pop("data")
axes = self._convert_to_attribute(
kwargs.pop("axes", None), is_optional=True)
attr = {"axes": axes}
inputs = [data]
else:
data = kwargs.pop("data")
axes = self._convert_to_input(kwargs.pop(
"axes", None), "const_axes", is_optional=True, dtype=numpy.int64)
attr = {}
inputs = [data, axes.name]
make_sure(not kwargs, "kwargs contains un-used key")
new_attr = {}
for key, val in attr.items():
if val is not None:
new_attr[key] = val
attr = new_attr
for ind, val in enumerate(inputs):
if val is None:
inputs[ind] = "" # empty string means no connection in ONNX
# remove tailing ""
while inputs[-1] == "":
inputs = inputs[:-1]
node = self.graph.make_node(op_type="Unsqueeze", inputs=inputs, attr=attr, name=name,
outputs=outputs)
if return_node:
return node
raise NotImplementedError("return_node must be True")
def _convert_to_input(self, tensor, const_name, is_optional=False, dtype=None):
"""in ONNX, input shold come from node, so it must be a string"""
if is_optional and tensor is None:
return None
make_sure(tensor is not None,
"input is required so it couldn't be None")
res = tensor
if isinstance(tensor, list):
res = self.graph.make_const(
make_name(const_name), numpy.array(tensor, dtype))
return res
def _convert_to_attribute(self, tensor, is_optional=False):
if is_optional and tensor is None:
return None
make_sure(tensor is not None,
"input is required so it couldn't be None")
res = tensor
if isinstance(tensor, str):
const_node = self.graph.get_node_by_output(tensor)
res = const_node.get_tensor_value(as_list=True)
make_sure(isinstance(res, list),
"input is an attr, so a list is needed")
return res
|
the-stack_0_10661 | from lbry.testcase import CommandTestCase
class AddressManagement(CommandTestCase):
async def test_address_list(self):
addresses = await self.out(self.daemon.jsonrpc_address_list())
self.assertEqual(27, len(addresses))
single = await self.out(self.daemon.jsonrpc_address_list(addresses[11]['address']))
self.assertEqual(1, len(single))
self.assertEqual(single[0], addresses[11])
|
the-stack_0_10662 | #!/usr/bin/env python
"""
ZetCode wxPython tutorial
In this example, we create a wx.ListBox widget.
author: Jan Bodnar
website: www.zetcode.com
last modified: July 2020
"""
import wx
class Example(wx.Frame):
def __init__(self, *args, **kw):
super(Example, self).__init__(*args, **kw)
self.InitUI()
def InitUI(self):
panel = wx.Panel(self)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.listbox = wx.ListBox(panel)
hbox.Add(self.listbox, wx.ID_ANY, wx.EXPAND | wx.ALL, 20)
btnPanel = wx.Panel(panel)
vbox = wx.BoxSizer(wx.VERTICAL)
newBtn = wx.Button(btnPanel, wx.ID_ANY, 'New', size=(90, 30))
renBtn = wx.Button(btnPanel, wx.ID_ANY, 'Rename', size=(90, 30))
delBtn = wx.Button(btnPanel, wx.ID_ANY, 'Delete', size=(90, 30))
clrBtn = wx.Button(btnPanel, wx.ID_ANY, 'Clear', size=(90, 30))
self.Bind(wx.EVT_BUTTON, self.NewItem, id=newBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnRename, id=renBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnDelete, id=delBtn.GetId())
self.Bind(wx.EVT_BUTTON, self.OnClear, id=clrBtn.GetId())
self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRename)
vbox.Add((-1, 20))
vbox.Add(newBtn)
vbox.Add(renBtn, 0, wx.TOP, 5)
vbox.Add(delBtn, 0, wx.TOP, 5)
vbox.Add(clrBtn, 0, wx.TOP, 5)
btnPanel.SetSizer(vbox)
hbox.Add(btnPanel, 0.6, wx.EXPAND | wx.RIGHT, 20)
panel.SetSizer(hbox)
self.SetTitle('wx.ListBox')
self.Centre()
def NewItem(self, event):
text = wx.GetTextFromUser('Enter a new item', 'Insert dialog')
if text != '':
self.listbox.Append(text)
def OnRename(self, event):
sel = self.listbox.GetSelection()
text = self.listbox.GetString(sel)
renamed = wx.GetTextFromUser('Rename item', 'Rename dialog', text)
if renamed != '':
self.listbox.Delete(sel)
item_id = self.listbox.Insert(renamed, sel)
self.listbox.SetSelection(item_id)
def OnDelete(self, event):
sel = self.listbox.GetSelection()
if sel != -1:
self.listbox.Delete(sel)
def OnClear(self, event):
self.listbox.Clear()
def main():
app = wx.App()
ex = Example(None)
ex.Show()
app.MainLoop()
if __name__ == '__main__':
main() |
the-stack_0_10664 | from abc import (
ABC,
abstractmethod
)
from argparse import (
ArgumentParser,
Namespace,
_SubParsersAction,
)
import asyncio
from enum import (
auto,
Enum,
)
import logging
from multiprocessing import (
Process
)
from typing import (
Any,
Dict,
NamedTuple,
)
from lahja import (
BaseEvent,
)
from trinity.config import (
TrinityConfig
)
from trinity.endpoint import (
TrinityEventBusEndpoint,
)
from trinity.extensibility.events import (
PluginStartedEvent,
)
from trinity.extensibility.exceptions import (
InvalidPluginStatus,
)
from trinity._utils.mp import (
ctx,
)
from trinity._utils.logging import (
setup_log_levels,
setup_queue_logging,
)
from trinity._utils.os import (
friendly_filename_or_url,
)
class PluginStatus(Enum):
NOT_READY = auto()
READY = auto()
STARTED = auto()
STOPPED = auto()
INVALID_START_STATUS = (PluginStatus.NOT_READY, PluginStatus.STARTED,)
class TrinityBootInfo(NamedTuple):
args: Namespace
trinity_config: TrinityConfig
boot_kwargs: Dict[str, Any] = None
class BasePlugin(ABC):
_status: PluginStatus = PluginStatus.NOT_READY
def __init__(self, boot_info: TrinityBootInfo) -> None:
self.boot_info = boot_info
@property
@abstractmethod
def event_bus(self) -> TrinityEventBusEndpoint:
pass
@property
@abstractmethod
def name(self) -> str:
"""
Describe the name of the plugin.
"""
pass
@property
def normalized_name(self) -> str:
"""
The normalized (computer readable) name of the plugin
"""
return friendly_filename_or_url(self.name)
@classmethod
def get_logger(cls) -> logging.Logger:
return logging.getLogger(f'trinity.extensibility.plugin(#{cls.__name__})')
@property
def logger(self) -> logging.Logger:
return self.get_logger()
@property
def running(self) -> bool:
"""
Return ``True`` if the ``status`` is ``PluginStatus.STARTED``, otherwise return ``False``.
"""
return self._status is PluginStatus.STARTED
@property
def status(self) -> PluginStatus:
"""
Return the current :class:`~trinity.extensibility.plugin.PluginStatus` of the plugin.
"""
return self._status
def ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:
"""
Set the ``status`` to ``PluginStatus.READY`` and delegate to
:meth:`~trinity.extensibility.plugin.BasePlugin.on_ready`
"""
self._status = PluginStatus.READY
self.on_ready(manager_eventbus)
def on_ready(self, manager_eventbus: TrinityEventBusEndpoint) -> None:
"""
Notify the plugin that it is ready to bootstrap itself.
The ``manager_eventbus`` refers to the instance of the
:class:`~lahja.endpoint.Endpoint` that the
:class:`~trinity.extensibility.plugin_manager.PluginManager` uses which may or may not
be the same :class:`~lahja.endpoint.Endpoint` as the plugin uses depending on the type
of the plugin. The plugin should use this :class:`~lahja.endpoint.Endpoint` instance to
listen for events *before* the plugin has started.
"""
pass
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
"""
Give the plugin a chance to amend the Trinity CLI argument parser. This hook is called
before :meth:`~trinity.extensibility.plugin.BasePlugin.on_ready`
"""
pass
def start(self) -> None:
"""
Delegate to :meth:`~trinity.extensibility.plugin.BasePlugin.do_start` and set ``running``
to ``True``. Broadcast a :class:`~trinity.extensibility.events.PluginStartedEvent` on the
event bus and hence allow other plugins to act accordingly.
"""
if self._status in INVALID_START_STATUS:
raise InvalidPluginStatus(
f"Can not start plugin when the plugin status is {self.status}"
)
self._status = PluginStatus.STARTED
self.do_start()
self.event_bus.broadcast_nowait(
PluginStartedEvent(type(self))
)
self.logger.info("Plugin started: %s", self.name)
def do_start(self) -> None:
"""
Perform the actual plugin start routine. In the case of a `BaseIsolatedPlugin` this method
will be called in a separate process.
This method should usually be overwritten by subclasses with the exception of plugins that
set ``func`` on the ``ArgumentParser`` to redefine the entire host program.
"""
pass
class BaseAsyncStopPlugin(BasePlugin):
"""
A :class:`~trinity.extensibility.plugin.BaseAsyncStopPlugin` unwinds asynchronoulsy, hence
needs to be awaited.
"""
def __init__(self,
boot_info: TrinityBootInfo,
event_bus: TrinityEventBusEndpoint) -> None:
super().__init__(boot_info)
self._event_bus = event_bus
@property
def event_bus(self) -> TrinityEventBusEndpoint:
return self._event_bus
async def do_stop(self) -> None:
"""
Asynchronously stop the plugin. Should be overwritten by subclasses.
"""
pass
async def stop(self) -> None:
"""
Delegate to :meth:`~trinity.extensibility.plugin.BaseAsyncStopPlugin.do_stop` causing the
plugin to stop asynchronously and setting ``running`` to ``False``.
"""
await self.do_stop()
self._status = PluginStatus.STOPPED
class BaseMainProcessPlugin(BasePlugin):
"""
A :class:`~trinity.extensibility.plugin.BaseMainProcessPlugin` overtakes the whole main process
early before any of the subsystems started. In that sense it redefines the whole meaning of the
``trinity`` command.
"""
@property
def event_bus(self) -> TrinityEventBusEndpoint:
raise NotImplementedError('BaseMainProcessPlugins do not have event busses')
class BaseIsolatedPlugin(BasePlugin):
"""
A :class:`~trinity.extensibility.plugin.BaseIsolatedPlugin` runs in an isolated process and
hence provides security and flexibility by not making assumptions about its internal
operations.
Such plugins are free to use non-blocking asyncio as well as synchronous calls. When an
isolated plugin is stopped it does first receive a SIGINT followed by a SIGTERM soon after.
It is up to the plugin to handle these signals accordingly.
"""
_process: Process = None
_event_bus: TrinityEventBusEndpoint = None
@property
def process(self) -> Process:
"""
Return the ``Process`` created by the isolated plugin.
"""
return self._process
def start(self) -> None:
"""
Prepare the plugin to get started and eventually call ``do_start`` in a separate process.
"""
self._status = PluginStatus.STARTED
self._process = ctx.Process(
target=self._spawn_start,
)
self._process.start()
self.logger.info("Plugin started: %s (pid=%d)", self.name, self._process.pid)
@abstractmethod
def _spawn_start(self) -> None:
pass
def stop(self) -> None:
"""
Set the ``status`` to `STOPPED`` but rely on the
:class:`~trinity.extensibility.plugin_manager.PluginManager` to tear down the process. This
allows isolated plugins to be taken down concurrently without depending on a running
event loop.
"""
self._status = PluginStatus.STOPPED
def _setup_logging(self) -> None:
log_queue = self.boot_info.boot_kwargs['log_queue']
level = self.boot_info.boot_kwargs.get('log_level', logging.INFO)
setup_queue_logging(log_queue, level)
if self.boot_info.args.log_levels:
setup_log_levels(self.boot_info.args.log_levels)
class DebugPlugin(BaseAsyncStopPlugin):
"""
This is a dummy plugin useful for demonstration and debugging purposes
"""
@property
def name(self) -> str:
return "Debug Plugin"
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
arg_parser.add_argument("--debug-plugin", type=bool, required=False)
def handle_event(self, activation_event: BaseEvent) -> None:
self.logger.info("Debug plugin: handle_event called: %s", activation_event)
def do_start(self) -> None:
self.logger.info("Debug plugin: start called")
asyncio.ensure_future(self.count_forever())
async def count_forever(self) -> None:
i = 0
while True:
self.logger.info(i)
i += 1
await asyncio.sleep(1)
async def do_stop(self) -> None:
self.logger.info("Debug plugin: stop called")
|
the-stack_0_10665 | from datetime import datetime as dt
from common.logger import get_logger
from orchestrator.config import ORDER_EXPIRATION_THRESHOLD_IN_MINUTES
from orchestrator.order_status import OrderStatus
logger = get_logger(__name__)
class TransactionHistoryDAO:
def __init__(self, repo):
self.__repo = repo
def insert_transaction_history(self, obj_transaction_history):
transaction_history = obj_transaction_history.get_transaction_history()
query_response = self.__repo.execute(
"INSERT INTO transaction_history (username, order_id, order_type, status, payment_id, payment_method, "
"raw_payment_data, transaction_hash, row_created, row_updated)"
"VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s) "
"ON DUPLICATE KEY UPDATE payment_id = %s, payment_method = %s, raw_payment_data = %s, transaction_hash = %s, row_updated = %s",
[
transaction_history["username"],
transaction_history["order_id"],
transaction_history["order_type"],
transaction_history["status"],
transaction_history["payment_id"],
transaction_history["payment_method"],
transaction_history["raw_payment_data"],
transaction_history["transaction_hash"],
dt.utcnow(),
dt.utcnow(),
transaction_history["payment_id"],
transaction_history["payment_method"],
transaction_history["raw_payment_data"],
transaction_history["transaction_hash"],
dt.utcnow()
]
)
if query_response[0] == 1:
return True
return False
def get_order_id_for_expired_transaction(self):
params = [OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,
OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES]
order_id_raw_data = self.__repo.execute(
"SELECT order_id FROM transaction_history WHERE status IN (%s, %s, %s) AND "
"TIMESTAMPDIFF(MINUTE, row_created, NOW()) > %s ",
[OrderStatus.PAYMENT_INITIATED.value, OrderStatus.PAYMENT_INITIATION_FAILED.value,
OrderStatus.PAYMENT_EXECUTION_FAILED.value, ORDER_EXPIRATION_THRESHOLD_IN_MINUTES])
list_of_order_id = [rec["order_id"] for rec in order_id_raw_data]
return list_of_order_id
def update_transaction_status(self, list_of_order_id, status):
if len(list_of_order_id) == 0:
return "No order id found"
temp_holder = ("%s, " * len(list_of_order_id))[:-2]
params = [status] + list_of_order_id + [OrderStatus.PAYMENT_INITIATED.value,
OrderStatus.PAYMENT_INITIATION_FAILED.value,
OrderStatus.PAYMENT_EXECUTION_FAILED.value]
update_transaction_status_response = self.__repo.execute(
"UPDATE transaction_history SET status = %s WHERE order_id IN (" + temp_holder + ") AND status IN (%s, %s, %s)",
params)
logger.info(f"update_transaction_status: {update_transaction_status_response}")
return update_transaction_status_response
def get_transaction_details_for_given_order_id(self, order_id):
transaction_data = self.__repo.execute(
"SELECT username, order_id, order_type, status, payment_id, payment_type, payment_method, raw_payment_data, "
"transaction_hash FROM transaction_history WHERE order_id = %s", [order_id])
if len(transaction_data) == 0:
raise Exception("Order Id does not exist.")
return transaction_data[0]
|
the-stack_0_10666 | """
This file offers the methods to automatically retrieve the graph Marinobacter salinus.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def MarinobacterSalinus(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Marinobacter salinus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Marinobacter salinus graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="MarinobacterSalinus",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
|
the-stack_0_10667 | import abc
import numpy as np
import math
import random
import itertools as it
from hklearn_genetic.board_conflicts import conflict
from deap import tools, gp
class ProblemInterface(metaclass=abc.ABCMeta):
@classmethod
def __subclasshook__(cls, subclass):
return (hasattr(subclass, 'evaluate') and
callable(subclass.evaluate) and
hasattr(subclass, 'stop_criteria') and
callable(subclass.stop_criteria) and
hasattr(subclass, 'populate') and
callable(subclass.populate) and
hasattr(subclass, 'decode') and
callable(subclass.decode) and
hasattr(subclass, 'crossover') and
callable(subclass.crossover) and
hasattr(subclass, 'mutate') and
callable(subclass.mutate))
@ProblemInterface.register
class IProblem:
"""Evalua las soluciones potenciales del problema"""
def evaluate(self, X):
pass
"""Regresa si la población ha llegado al criterio de paro"""
def stop_criteria(self, X_eval):
pass
"""Crea una poblacion inicial de posibles soluciones"""
def populate(self, n_individuals):
pass
"""Pasa a la población del genotipo al fenotipo"""
def decode(self, X_encoded):
pass
"""Efectúa la cruza con los elementos de la población"""
def crossover(self, X, pc, elitism):
pass
"""Efectúa la mutación con los elementos de la población"""
def mutate(self, X, pm, elitism):
pass
class BaseProblem(IProblem):
def get_crossover_probs(self, n_cross):
return np.random.rand(1 , n_cross)[0,:]
def get_crossover_points(self, length):
return np.random.randint(0, length)
@abc.abstractmethod
def a_eval(self, X_decoded):
pass
def evaluate(self, X):
decoded_rep = self.decode(X)
X_eval = self.a_eval(decoded_rep, X)
return X_eval
def crossover(self, X, pc, elitism):
if not elitism:
n_cross = X.shape[0] // 2
elitism_num = 0
else:
elitism_num = math.floor(elitism * X.shape[0])
n_cross = (X.shape[0] - elitism_num) // 2
prob_cross = self.get_crossover_probs(n_cross)
for i, p in enumerate(prob_cross):
if p <= pc:
cross_point = self.get_crossover_points(X.shape[1] - 1)
son1 = X[2*i + elitism_num,:].copy()
son2 = X[2*i + 1 + elitism_num, :].copy()
son1[cross_point : X.shape[1]] = X[2*i + 1 + elitism_num, cross_point : X.shape[1]].copy()
son2[cross_point : X.shape[1]] = X[2*i + elitism_num, cross_point : X.shape[1]].copy()
X[2*i + elitism_num,:] = son1
X[2*i + 1 + elitism_num,:] = son2
return X
class _BaseGeneticProgrammingProblem(BaseProblem):
def __init__(self, mutation_type = "Branch"):
self.avg_lengths = []
self.mutation_type = mutation_type
def populate(self, n_individuals):
return tools.initRepeat(list, lambda: gp.genHalfAndHalf(self.pset, min_=1, max_=2), n_individuals)
def decode(self, X_encoded):
X_decoded = []
length_sum = 0
for x_i in X_encoded:
tree = gp.PrimitiveTree(x_i)
length_sum += len(tree)
X_decoded += [gp.compile(tree, self.pset)]
self.avg_lengths += [length_sum/len(X_decoded)]
return X_decoded
def crossover(self, X, pc, elitism):
if not elitism:
n_cross = len(X) // 2
elitism_num = 0
else:
elitism_num = math.floor(elitism * len(X))
n_cross = (len(X) - elitism_num) // 2
prob_cross = self.get_crossover_probs(n_cross)
for i, p in enumerate(prob_cross):
if p <= pc:
parent1 = gp.PrimitiveTree(X[2*i + elitism_num])
parent2 = gp.PrimitiveTree(X[2*i + 1 + elitism_num])
offspring = gp.cxOnePoint(parent1, parent2)
if offspring[0].height < self.height_limit:
X[2*i + elitism_num] = offspring[0]
else:
r = random.uniform(0, 1)
X[2*i + elitism_num] = X[2*i + 1 + elitism_num].copy() if r >= 0.5 else X[2*i + elitism_num]
if offspring[1].height < self.height_limit:
X[2*i + 1 + elitism_num] = offspring[1]
else:
r = random.uniform(0, 1)
X[2*i + 1 + elitism_num] = X[2*i + elitism_num].copy() if r >= 0.5 else X[2*i + 1 + elitism_num]
return X
def mutate(self, X, pm, elitism):
if pm > 0:
mutate_m = np.random.uniform(size = (len(X), 1))
mutate_m = mutate_m <= pm
func = lambda pset, type_ : gp.genFull(pset, min_=0, max_=2)
if not elitism:
for i, m in enumerate(mutate_m):
#if m <= 1./len(X[i]):
if m:
if self.mutation_type == "Branch":
offspring = gp.mutUniform(gp.PrimitiveTree(X[i]), func, self.pset)
elif self.mutation_type == "Node":
offspring = gp.mutNodeReplacement(gp.PrimitiveTree(X[i]), self.pset)
if offspring[0].height <= self.height_limit:
X[i] = offspring[0]
else:
elitism_num = math.floor(elitism * len(X))
for i in range(elitism_num, len(X)):
#if mutate_m[i] <= 1./len(X[i]):
if mutate_m[i]:
if self.mutation_type == "Branch":
offspring = gp.mutUniform(gp.PrimitiveTree(X[i]), func, self.pset)
elif self.mutation_type == "Node":
offspring = gp.mutNodeReplacement(gp.PrimitiveTree(X[i]), self.pset)
if offspring[0].height <= self.height_limit:
X[i] = offspring[0]
return X
class SymbolicRegressionProblem(_BaseGeneticProgrammingProblem):
def __init__(self, bounds, pset, real_values, height_limit, stop_thresh = 0.2, mutation_type = "Branch"):
super().__init__(mutation_type)
self.bounds = bounds
self.height_limit = height_limit
self.pset = pset
self.real_values = real_values
self.stop_thresh = stop_thresh
param_values = []
for param_bound in self.bounds:
param_values += [list(np.linspace(param_bound[0], param_bound[1], num = len(real_values)))]
self.points = list(it.product(*param_values))
def a_eval(self, X_decoded, X_encoded):
m = len(self.points)
X_fitness = []
for j, func in enumerate(X_decoded):
try:
s = 0
for i in range(m):
s += (func(*self.points[i]) - self.real_values[i])**2
X_fitness += [- (1./m)*s]
except Exception as e:
print(e)
x_encoded = X_encoded[j]
print(gp.PrimitiveTree(x_encoded))
return np.array(list(zip(X_fitness, list(range(len(X_fitness))))), dtype = [('fitness', float),('index', int)])
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= - self.stop_thresh)[0])
class BitParityCheck(_BaseGeneticProgrammingProblem):
def __init__(self, pset, real_values, height_limit, mutation_type = "Branch"):
super().__init__(mutation_type)
self.height_limit = height_limit
self.pset = pset
self.real_values = real_values
self.points = list(map(list, it.product([False, True], repeat=int(math.log2(len(self.real_values))))))
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= 0)[0])
def a_eval(self, X_decoded, X_encoded):
m = len(self.points)
X_fitness = []
for j, func in enumerate(X_decoded):
try:
X_fitness += [-sum(func(*in_) == out for in_, out in zip(self.points, self.real_values))]
except Exception as e:
print(e)
print(gp.PrimitiveTree(X_encoded[j]))
return X_fitness
class NeutralityProblem(_BaseGeneticProgrammingProblem):
def __init__(self, pset, T, height_limit, terminals, mutation_type = "Branch"):
super().__init__(mutation_type)
self.height_limit = height_limit
self.pset = pset
self.T = T
self.str_terminals = [str(t) for t in terminals]
for t in terminals:
self.pset.addTerminal(t)
self.gene_counts = {t : [] for t in self.str_terminals}
def stop_criteria(self, X_eval):
return []
def a_eval(self, X_decoded, X_encoded):
X_fitness = []
for j, x_i in enumerate(X_decoded):
try:
X_fitness += [-abs(self.T - x_i)]
except Exception as e:
print(e)
print(gp.PrimitiveTree(X_encoded[j]))
for gene in self.gene_counts.keys():
self.gene_counts[gene]+=[0]
for x in X_encoded:
x_tree = gp.PrimitiveTree(x)
x_tree_str = str(x_tree)
for s in x_tree_str:
if s in self.str_terminals:
self.gene_counts[s][-1] += 1
return X_fitness
class _BaseBinaryProblem(BaseProblem):
def __init__(self, thresh, bounds, n_dim = 2, n_prec = 4):
self.bounds = bounds
self.n_dim = n_dim
self.gene_length = math.ceil(math.log2((self.bounds[1] - self.bounds[0])*10**n_prec))
self.thresh = thresh
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= self.thresh)[0])
def populate(self, n_individuals):
return np.random.randint(2, size = (n_individuals, self.gene_length*self.n_dim))
def decode(self, X_encoded):
decoded_rep = np.zeros((X_encoded.shape[0], self.n_dim))
for i in range(self.n_dim):
decoded_rep[:,i] = (X_encoded[:, i*self.gene_length : (i + 1)*self.gene_length]@(2**np.arange(X_encoded[:, i*self.gene_length : (i + 1)*self.gene_length].shape[1], dtype = np.float64)[::-1][:, np.newaxis])).T
return self.bounds[0] + decoded_rep*(self.bounds[1] - self.bounds[0])/(2**self.gene_length - 1)
def get_mutation(self, shape):
return np.random.uniform(size = shape)
def mutate(self, X, pm, elitism):
mutate_m = self.get_mutation((X.shape[0], X.shape[1]))
mutate_m = mutate_m <= pm
X_bit = X == 1
if not elitism:
X = np.logical_xor(X_bit, mutate_m)
else:
elitism_num = math.floor(elitism * X.shape[0])
X[elitism_num : X.shape[0], :] = np.logical_xor(X_bit, mutate_m)[elitism_num : X.shape[0], :]
X = X.astype(int)
return X
class _BaseIntegerProblem(BaseProblem):
def __init__(self, thresh, n_dim = 2):
self.n_dim = n_dim
self.thresh = thresh
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= self.thresh)[0])
def populate(self, n_individuals):
return np.random.randint(self.n_dim, size = (n_individuals, self.n_dim))
def decode(self, X_encoded):
return X_encoded
def get_mutation(self, shape):
return np.random.uniform(size = shape)
def mutate(self, X, pm, elitism):
mutate_m = self.get_mutation((X.shape[0], 1))
mutate_m = mutate_m <= pm
if not elitism:
for i, m in enumerate(mutate_m):
if m:
indices = np.random.permutation(X.shape[1])[0 : 2]
X[i,indices[0]], X[i, indices[1]] = X[i, indices[1]], X[i, indices[0]]
else:
elitism_num = math.floor(elitism * X.shape[0])
for i in range(elitism_num, X.shape[0]):
if mutate_m[i]:
indices = np.random.permutation(X.shape[1])[0 : 2]
X[i,indices[0]], X[i, indices[1]] = X[i, indices[1]], X[i, indices[0]]
return X
class _BaseRealProblem(BaseProblem):
def __init__(self, thresh, bounds, rang_param = 0.1, n_dim = 2):
self.n_dim = n_dim
self.thresh = thresh
self.bounds = bounds
self.rang_param = rang_param
def stop_criteria(self, X_eval):
return list(np.where(X_eval >= self.thresh)[0])
def populate(self, n_individuals):
return np.random.uniform(self.bounds[0], self.bounds[1] + 0.1, size = (n_individuals, self.n_dim))
def decode(self, X_encoded):
return X_encoded
def get_crossover_points(self, length):
return np.random.uniform(low = -.25 , high = 1.25, size = length)
def crossover(self, X, pc, elitism):
if not elitism:
n_cross = X.shape[0] // 2
elitism_num = 0
else:
elitism_num = math.floor(elitism * X.shape[0])
n_cross = (X.shape[0] - elitism_num) // 2
prob_cross = self.get_crossover_probs(n_cross)
for i, p in enumerate(prob_cross):
if p <= pc:
alphas = self.get_crossover_points(X.shape[1])
X[2*i + elitism_num,:] += alphas * (X[2*i + 1 + elitism_num, :] - X[2*i + elitism_num,:])
X[2*i + 1 + elitism_num,:] += alphas * (X[2*i + elitism_num,:] - X[2*i + 1 + elitism_num, :])
X[2*i + elitism_num,:] = np.clip(X[2*i + elitism_num,:], self.bounds[0], self.bounds[1])
X[2*i + 1 + elitism_num,:] = np.clip(X[2*i + 1 + elitism_num,:], self.bounds[0], self.bounds[1])
return X
def get_mutation(self, shape):
return np.random.uniform(size = shape)
def mutate(self, X, pm, elitism):
if not elitism:
elitism = 0
rang = (self.bounds[1] - self.bounds[0])*self.rang_param
mutate_m = self.get_mutation((X.shape[0], X.shape[1]))
mutate_plus_minus = self.get_mutation((X.shape[0], X.shape[1]))
mutate_m[mutate_m <= pm] = 1.
mutate_m[mutate_m < 1.] = 0.
mutate_plus_minus[mutate_plus_minus <= .5] = 1.0
mutate_plus_minus[mutate_plus_minus > .5] = -1.0
elitism_num = math.floor(elitism * X.shape[0])
for i in range(elitism_num, X.shape[0]):
mutate_delta = self.get_mutation((X.shape[1], X.shape[1]))
mutate_delta[mutate_delta <= 1./self.n_dim] = 1.
mutate_delta[mutate_delta < 1.] = 0.
deltas = (mutate_delta @ (2**-np.arange(self.n_dim, dtype = np.float64)[:, np.newaxis])).T
X[i, :] = X[i, :] + mutate_m[i, :] * mutate_plus_minus[i, :] * rang * deltas
X[i, :] = np.clip(X[i, :], self.bounds[0], self.bounds[1])
return X
class BaseNQueen(BaseProblem):
def a_eval(self, X_decoded):
X_fitness = np.zeros(X_decoded.shape[0])
for i, x in enumerate(X_decoded):
X_fitness[i] = -conflict(x)
#print(X_fitness)
return np.array(list(zip(X_fitness, list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class IntegerNQueen(_BaseIntegerProblem, BaseNQueen):
def __init__(self, n_dim = 2):
super().__init__(0, n_dim = n_dim)
class RealNQueen(_BaseRealProblem, BaseNQueen):
def __init__(self, n_dim = 2):
super().__init__(0, (0, 5.), n_dim = n_dim)
def decode(self, X_encoded):
X_decoded = np.zeros(X_encoded.shape, dtype=np.int64)
for i, x in enumerate(X_encoded):
indexed = np.array(list(zip(x, list(range(X_decoded.shape[1])))), dtype = [('real_rep', float),('index', int)])
indexed = np.sort(indexed, order=["real_rep"])
X_decoded[i, :] = indexed["index"]
return X_decoded
class BinaryNQueen(_BaseBinaryProblem, BaseNQueen):
def __init__(self, n_dim = 2, n_prec = 4):
super().__init__(0, (0.01, n_dim), n_dim = n_dim, n_prec=n_prec)
def decode(self, X_encoded):
return np.ceil(super().decode(X_encoded)).astype(int) - 1
class BaseRastrigin(BaseProblem):
def __init__(self):
self.rank = 100.
def a_eval(self, X_decoded):
return np.array(list(zip(self.rank - (10.*self.n_dim + np.sum(X_decoded**2 - 10.*np.cos(2.*np.pi*X_decoded), axis = 1)), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BaseBeale(BaseProblem):
def __init__(self):
self.rank = 150000.
def a_eval(self, X_decoded):
first_term = (1.5 - X_decoded[:, 0] + X_decoded[:, 0]*X_decoded[:, 1])**2
second_term = (2.25 - X_decoded[:, 0] + X_decoded[:, 0]*(X_decoded[:, 1]**2))**2
third_term = (2.625 - X_decoded[:, 0] + X_decoded[:, 0]*(X_decoded[:, 1]**3))**2
return np.array(list(zip(self.rank - (first_term + second_term + third_term), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BaseHimmelblau(BaseProblem):
def __init__(self):
self.rank = 2200.
def a_eval(self, X_decoded):
first_term = (X_decoded[:, 0]**2 + X_decoded[:, 1] - 11.)**2
second_term = (X_decoded[:, 0] + X_decoded[:, 1]**2 - 7.)**2
return np.array(list(zip(self.rank - (first_term + second_term), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BaseEggholder(BaseProblem):
def __init__(self):
self.rank = 1200.
def a_eval(self, X_decoded):
first_term = - (X_decoded[:, 1] + 47)*np.sin(np.sqrt(np.abs(X_decoded[:, 0]/2. + (X_decoded[:, 1] + 47))))
second_term = - X_decoded[:, 0]*np.sin(np.sqrt(np.abs(X_decoded[:, 0] - (X_decoded[:, 1] + 47))))
return np.array(list(zip(self.rank - (first_term + second_term), list(range(X_decoded.shape[0])))), dtype = [('fitness', float),('index', int)])
class BinaryRastrigin(_BaseBinaryProblem, BaseRastrigin):
def __init__(self, n_dim = 2, n_prec = 4):
super().__init__(99.99, (-5.12, 5.12), n_dim=n_dim, n_prec=n_prec)
BaseRastrigin.__init__(self)
class BinaryBeale(_BaseBinaryProblem, BaseBeale):
def __init__(self, n_prec = 4):
super().__init__(149999.99, (-4.5, 4.5), n_dim=2, n_prec=n_prec)
BaseBeale.__init__(self)
class BinaryHimmelblau(_BaseBinaryProblem, BaseHimmelblau):
def __init__(self, n_prec = 4):
super().__init__(2199.99, (-5., 5.), n_dim=2, n_prec=n_prec)
BaseHimmelblau.__init__(self)
class BinaryEggholder(_BaseBinaryProblem, BaseEggholder):
def __init__(self, n_prec = 4):
super().__init__(2157., (-512., 512.), n_dim=2, n_prec=n_prec)
BaseEggholder.__init__(self)
class RealRastrigin(_BaseRealProblem, BaseRastrigin):
def __init__(self, rang_param = .0001, n_dim = 2):
super().__init__(99.99, (-5.12, 5.12), rang_param, n_dim=n_dim)
BaseRastrigin.__init__(self)
class RealBeale(_BaseRealProblem, BaseBeale):
def __init__(self, rang_param = .0001):
super().__init__(149999.99, (-4.5, 4.5), rang_param, n_dim=2)
BaseBeale.__init__(self)
class RealHimmelblau(_BaseRealProblem, BaseHimmelblau):
def __init__(self, rang_param = .001):
super().__init__(2199.99, (-5., 5.), rang_param, n_dim=2)
BaseHimmelblau.__init__(self)
class RealEggholder(_BaseRealProblem, BaseEggholder):
def __init__(self, rang_param = .001):
super().__init__(2157., (-512., 512.), rang_param, n_dim=2)
BaseEggholder.__init__(self)
class RealRastriginPSO(_BaseRealProblem):
def __init__(self, n_dim = 2):
super().__init__(99.99, (-5.12, 5.12), n_dim=n_dim)
class RealBealePSO(_BaseRealProblem):
def __init__(self):
super().__init__(149999.99, (-4.5, 4.5), n_dim=2)
class RealHimmelblauPSO(_BaseRealProblem):
def __init__(self):
super().__init__(2199.99, (-5., 5.), n_dim=2)
class RealEggholderPSO(_BaseRealProblem):
def __init__(self):
super().__init__(2157., (-512., 512.), n_dim=2) |
the-stack_0_10668 | import re
import subprocess
import pygit2
tag_ref = re.compile('^refs/tags/')
committer = pygit2.Signature('Git Worker', '[email protected]')
def git_show(path, commitish, obj):
repo = pygit2.Repository(path)
commit, _ = repo.resolve_refish(commitish)
data = (commit.tree / obj).read_raw().decode()
return data
def delete_tag(path, tag):
repo = pygit2.Repository(path)
repo.references.delete(f'refs/tags/{tag}')
def git_tag(repo):
return [repo.references[r] for r in repo.references if tag_ref.match(r)]
def git_commit(repo, file_paths, author=None, message="[OpenNeuro] Recorded changes", parents=None):
"""Commit array of paths at HEAD."""
# Refresh index with git-annex specific handling
annex_command = ["git-annex", "add"] + file_paths
subprocess.run(annex_command, check=True, cwd=repo.workdir)
repo.index.add_all(file_paths)
repo.index.write()
return git_commit_index(repo, author, message, parents)
def git_commit_index(repo, author=None, message="[OpenNeuro] Recorded changes", parents=None):
"""Commit any existing index changes."""
if not author:
author = committer
if parents is None:
parent_commits = [repo.head.target.hex]
else:
parent_commits = parents
tree = repo.index.write_tree()
commit = repo.create_commit(
'refs/heads/master', author, committer, message, tree, parent_commits)
repo.head.set_target(commit)
return commit
|
the-stack_0_10669 | import pdb
import pickle
import pandas as pd
import os
import numpy as np
import sys
sys.path.insert(1,"../")
sys.path.insert(1,"../../")
sys.path.insert(1,"../../../")
from config_u import base
project_base_path = base
current_path = "scripts/cpmg/automated_metabolite_quantification/"
sys.path.insert(1, os.path.join(project_base_path, current_path))
from data_utils import split_to_kfold, spectrum2ppm, spectrum_peak_unit_quantification
# load fully quantified samples
datapath_base = os.path.join(project_base_path, "data/raw_data_cpmg/")
with open(os.path.join(datapath_base, "fully_quantified_samples_spectra"), "rb") as f:
c_spectra = pickle.load(f)
with open(os.path.join(datapath_base, "fully_quantified_samples_quantification"), "rb") as f:
c_quantification = pickle.load(f)
with open(os.path.join(project_base_path, "data/raw_data_cpmg/metabolite_names"), "rb") as f:
metabolite_names = pickle.load(f)
c_statistics = pd.read_pickle(os.path.join(datapath_base, "fully_quantified_samples_statistics"))
# find samples with invalid pathologic classification (i.e. "*")
index = c_statistics.index
condition = c_statistics["Pathologic Classification"] == "*"
invalid_pc_idx = index[condition].tolist()
statistics = c_statistics.iloc[invalid_pc_idx, :].reset_index(drop=True)
spectra = c_spectra[invalid_pc_idx, :]
quant = c_quantification[invalid_pc_idx, :]
# scale CPMG spectra with respect to reference Acetate and sample mass
mass = np.array(statistics["Mass"].tolist()).astype(float)
mass_factor = np.repeat(mass.reshape(-1,1), spectra.shape[1], axis=1)
normalized_spectra = np.divide(spectra, mass_factor)
scaled_spectra = normalized_spectra * spectrum_peak_unit_quantification
# calculate ppm spectra
ppm_spectra = spectrum2ppm(scaled_spectra)
# rename variables to be accessed from other scripts
fq_i_ppm_spectra = ppm_spectra
fq_i_spectra = scaled_spectra
fq_i_statistics = statistics
fq_i_quant = quant |
the-stack_0_10670 | import os
from pywps import Process
from pywps import LiteralInput
from pywps import ComplexOutput
from pywps import FORMATS, Format
from pywps import configuration
from pywps.app.Common import Metadata
# from c4cds.regridder import Regridder, REGIONAL
from c4cds.subsetter import Subsetter
from c4cds.plotter import Plotter
from c4cds.search import Search
from c4cds.ncdump import ncdump
from c4cds import util
CORDEX_DOMAIN_MAP = {
'Egypt': 'AFR-44i',
'UK': 'EUR-44i',
'France': 'EUR-44i',
'Germany': 'EUR-44i',
}
class CordexSubsetter(Process):
def __init__(self):
inputs = [
LiteralInput('country', 'Country',
abstract='Choose a Country like UK.',
data_type='string',
allowed_values=['UK', 'France', 'Germany', 'Egypt'],
default='UK'),
LiteralInput('model', 'Model',
abstract='Choose a model like MOHC-HadRM3P.',
data_type='string',
allowed_values=['MOHC-HadRM3P'],
default='MOHC-HadRM3P'),
LiteralInput('experiment', 'Experiment',
abstract='Choose an experiment like evaluation.',
data_type='string',
allowed_values=['evaluation'],
default='evaluation'),
LiteralInput('variable', 'Variable',
abstract='Choose a variable like tas.',
data_type='string',
allowed_values=['tas', 'tasmax', 'tasmin'],
default='tas'),
LiteralInput('year', 'Match year', data_type='integer',
abstract='File should match this year.',
allowed_values=[1990, 2000, 2010],
default="1990"),
]
outputs = [
ComplexOutput('output', 'Subsetted Dataset',
abstract='Subsetted Dataset.',
as_reference=True,
supported_formats=[FORMATS.NETCDF]),
ComplexOutput('ncdump', 'Metadata',
abstract='ncdump of subsetted Dataset.',
as_reference=True,
supported_formats=[FORMATS.TEXT]),
ComplexOutput('preview', 'Preview',
abstract='Preview of subsetted Dataset.',
as_reference=True,
supported_formats=[Format('image/png')]),
]
super(CordexSubsetter, self).__init__(
self._handler,
identifier='cordex_subsetter',
version='1.0',
title='CORDEX Subsetter',
abstract='CORDEX Subsetter working on the Copernicus C3S CORDEX archive. '
'The selected CORDEX file is subsetted by the bounding-box of a Country '
'using the CDO "sellonlatbox" operator.',
metadata=[
Metadata('CP4CDS Portal', 'https://cp4cds.github.io/'),
Metadata('Documentation',
'https://c4cds-wps.readthedocs.io/en/latest/processes.html#cordex_subsetter',
role=util.WPS_ROLE_DOC),
Metadata('Media',
'https://c4cds-wps.readthedocs.io/en/latest/_static/media/cordex_subsetter_thumbnail.png',
role=util.WPS_ROLE_MEDIA),
],
inputs=inputs,
outputs=outputs,
store_supported=True,
status_supported=True
)
def _handler(self, request, response):
search = Search(configuration.get_config_value("data", "cordex_archive_root"))
nc_file = search.search_cordex(
model=request.inputs['model'][0].data,
experiment=request.inputs['experiment'][0].data,
variable=request.inputs['variable'][0].data,
domain=CORDEX_DOMAIN_MAP[request.inputs['country'][0].data],
start_year=request.inputs['year'][0].data,
end_year=request.inputs['year'][0].data,
)
if not nc_file:
raise Exception("Could not find CORDEX file.")
response.update_status('search done.', 10)
# regridding
# regridder = Regridder(
# archive_base=configuration.get_config_value("data", "cordex_archive_root"),
# output_dir=os.path.join(self.workdir, 'out_regrid')
# )
# regridded_file = regridder.regrid(input_file=nc_file, domain_type=REGIONAL)
# response.update_status('regridding done.', 60)
# subset by country
subsetter = Subsetter(
output_dir=os.path.join(self.workdir, 'out_subset')
)
subsetted_file = subsetter.subset_by_country(
nc_file,
country=request.inputs['country'][0].data)
response.outputs['output'].file = subsetted_file
response.update_status('subsetting done.', 70)
# plot preview
title = "{} {} {} {} {}".format(
request.inputs['country'][0].data,
request.inputs['model'][0].data,
request.inputs['experiment'][0].data,
request.inputs['variable'][0].data,
request.inputs['year'][0].data,
)
plotter = Plotter(
output_dir=os.path.join(self.workdir, 'out_plot')
)
preview_file = plotter.plot_preview(subsetted_file, title)
response.outputs['preview'].file = preview_file
response.update_status('plot done.', 80)
# run ncdump
with open(os.path.join(self.workdir, "nc_dump.txt"), 'w') as fp:
response.outputs['ncdump'].file = fp.name
fp.writelines(ncdump(subsetted_file))
response.update_status('ncdump done.', 90)
# done
response.update_status("done.", 100)
return response
|
the-stack_0_10671 | import argparse
import logging
import time
import ast
from tf_pose import common
import cv2
import numpy as np
from tf_pose.estimator import TfPoseEstimator
from tf_pose.networks import get_graph_path, model_wh
from tf_pose.lifting.prob_model import Prob3dPose
from tf_pose.lifting.draw import plot_pose
logger = logging.getLogger('TfPoseEstimator')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='tf-pose-estimation run')
parser.add_argument('--image', type=str, default='./images/p1.jpg')
parser.add_argument('--model', type=str, default='cmu',
help='cmu / mobilenet_thin / mobilenet_v2_large / mobilenet_v2_small')
parser.add_argument('--resize', type=str, default='0x0',
help='if provided, resize images before they are processed. '
'default=0x0, Recommends : 432x368 or 656x368 or 1312x736 ')
parser.add_argument('--resize-out-ratio', type=float, default=4.0,
help='if provided, resize heatmaps before they are post-processed. default=1.0')
args = parser.parse_args()
w, h = model_wh(args.resize)
if w == 0 or h == 0:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
else:
e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
# estimate human poses from a single image !
image = common.read_imgfile(args.image, None, None)
# image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
if image is None:
logger.error('Image can not be read, path=%s' % args.image)
sys.exit(-1)
t = time.time()
humans = e.inference(image, resize_to_default=(w > 0 and h > 0), upsample_size=args.resize_out_ratio)
elapsed = time.time() - t
logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))
image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
# cv2.imshow('tf-pose-estimation result', image)
# cv2.waitKey()
import matplotlib.pyplot as plt
fig = plt.figure()
a = fig.add_subplot(2, 2, 1)
a.set_title('Result')
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
bgimg = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
bgimg = cv2.resize(bgimg, (e.heatMat.shape[1], e.heatMat.shape[0]), interpolation=cv2.INTER_AREA)
# show network output
a = fig.add_subplot(2, 2, 2)
plt.imshow(bgimg, alpha=0.5)
tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
tmp2 = e.pafMat.transpose((2, 0, 1))
tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)
a = fig.add_subplot(2, 2, 3)
a.set_title('Vectormap-x')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
a = fig.add_subplot(2, 2, 4)
a.set_title('Vectormap-y')
# plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
plt.colorbar()
plt.show()
#import sys
#actisys.exit(0)
logger.info('3d lifting initialization.')
poseLifting = Prob3dPose('./tf_pose/lifting/models/prob_model_params.mat')
image_h, image_w = image.shape[:2]
standard_w = 640
standard_h = 480
pose_2d_mpiis = []
visibilities = []
for human in humans:
pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
pose_2d_mpiis.append([(int(x * standard_w + 0.5), int(y * standard_h + 0.5)) for x, y in pose_2d_mpii])
visibilities.append(visibility)
pose_2d_mpiis = np.array(pose_2d_mpiis)
visibilities = np.array(visibilities)
transformed_pose2d, weights = poseLifting.transform_joints(pose_2d_mpiis, visibilities)
pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)
for i, single_3d in enumerate(pose_3d):
plot_pose(single_3d)
plt.show()
pass |
the-stack_0_10672 | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from neutron_lib import constants
def check_subnet_ip(cidr, ip_address, port_owner=''):
"""Validate that the IP address is on the subnet."""
ip = netaddr.IPAddress(ip_address)
net = netaddr.IPNetwork(cidr)
# Check that the IP is valid on subnet. In IPv4 this cannot be the
# network or the broadcast address
if net.version == constants.IP_VERSION_6:
# NOTE(njohnston): In some cases the code cannot know the owner of the
# port. In these cases port_owner should an empty string, and we pass
# it through here.
return ((port_owner in (constants.ROUTER_PORT_OWNERS + ('', )) or
ip != net.network) and
ip in net)
else:
return ip != net.network and ip != net.broadcast and ip in net
def check_gateway_invalid_in_subnet(cidr, gateway):
"""Check whether the gw IP address is invalid on the subnet."""
ip = netaddr.IPAddress(gateway)
net = netaddr.IPNetwork(cidr)
# Check whether the gw IP is in-valid on subnet.
# If gateway is in the subnet, it cannot be the
# 'network' or the 'broadcast address (only in IPv4)'.
# If gateway is out of subnet, there is no way to
# check since we don't have gateway's subnet cidr.
return (ip in net and
(net.version == constants.IP_VERSION_4 and
ip in (net.network, net[-1])))
def generate_pools(cidr, gateway_ip):
"""Create IP allocation pools for a specified subnet
The Neutron API defines a subnet's allocation pools as a list of
IPRange objects for defining the pool range.
"""
# Auto allocate the pool around gateway_ip
net = netaddr.IPNetwork(cidr)
ip_version = net.version
first = netaddr.IPAddress(net.first, ip_version)
last = netaddr.IPAddress(net.last, ip_version)
if first == last:
# handle single address subnet case
return [netaddr.IPRange(first, last)]
first_ip = first + 1
# last address is broadcast in v4
last_ip = last - (ip_version == 4)
if first_ip >= last_ip:
# /31 lands here
return []
ipset = netaddr.IPSet(netaddr.IPRange(first_ip, last_ip))
if gateway_ip:
ipset.remove(netaddr.IPAddress(gateway_ip, ip_version))
return list(ipset.iter_ipranges())
|
the-stack_0_10673 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from sedona.core.SpatialRDD import CircleRDD
from sedona.core.enums import GridType, IndexType
from sedona.core.formatMapper import WktReader
from sedona.core.spatialOperator.join_params import JoinParams
from sedona.core.spatialOperator.join_query_raw import JoinQueryRaw
from sedona.core.spatialOperator.range_query_raw import RangeQueryRaw
from tests.test_base import TestBase
import os
from tests.tools import tests_resource
from shapely.wkt import loads
bank_csv_path = os.path.join(tests_resource, "small/points.csv")
areas_csv_path = os.path.join(tests_resource, "small/areas.csv")
class TestOmitPythonJvmSerdeToRDD(TestBase):
expected_pois_within_areas_ids = [['4', '4'], ['1', '6'], ['2', '1'], ['3', '3'], ['3', '7']]
def test_spatial_join_to_spatial_rdd(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
areas_polygon_rdd = WktReader.readToGeometryRDD(self.sc, areas_csv_path, 1, False, False)
poi_point_rdd.analyze()
areas_polygon_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
areas_polygon_rdd.spatialPartitioning(poi_point_rdd.getPartitioner())
jvm_sedona_rdd = JoinQueryRaw.spatialJoin(poi_point_rdd, areas_polygon_rdd, JoinParams())
sedona_rdd = jvm_sedona_rdd.to_rdd().collect()
assert sedona_rdd.__len__() == 5
def test_distance_join_query_flat_to_df(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
circle_rdd = CircleRDD(poi_point_rdd, 2.0)
circle_rdd.analyze()
poi_point_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
circle_rdd.spatialPartitioning(poi_point_rdd.getPartitioner())
jvm_sedona_rdd = JoinQueryRaw.DistanceJoinQueryFlat(poi_point_rdd, circle_rdd, False, True)
assert jvm_sedona_rdd.to_rdd().collect().__len__() == 10
def test_spatial_join_query_flat_to_df(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
areas_polygon_rdd = WktReader.readToGeometryRDD(self.sc, areas_csv_path, 1, False, False)
poi_point_rdd.analyze()
areas_polygon_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
areas_polygon_rdd.spatialPartitioning(poi_point_rdd.getPartitioner())
jvm_sedona_rdd = JoinQueryRaw.SpatialJoinQueryFlat(
poi_point_rdd, areas_polygon_rdd, False, True)
assert jvm_sedona_rdd.to_rdd().collect().__len__() == 5
def test_range_query_flat_to_df(self):
poi_point_rdd = WktReader.readToGeometryRDD(self.sc, bank_csv_path, 1, False, False)
poi_point_rdd.analyze()
poi_point_rdd.spatialPartitioning(GridType.QUADTREE)
poi_point_rdd.buildIndex(IndexType.QUADTREE, False)
result = RangeQueryRaw.SpatialRangeQuery(
poi_point_rdd, loads("POLYGON((0 0, 0 20, 20 20, 20 0, 0 0))"), True, True
)
rdd = result.to_rdd()
assert rdd.collect().__len__() == 4
|
the-stack_0_10674 | import torch
import torch.nn as nn
import sys
sys.path.insert(0, '../../../../..')
import libs_layers
class Model(torch.nn.Module):
def __init__(self, input_shape, outputs_count, hidden_count = 512):
super(Model, self).__init__()
self.device = "cpu"
self.layers = [
nn.Linear(input_shape[0], hidden_count),
nn.ReLU(),
libs_layers.NoisyLinearFull(hidden_count, hidden_count//2),
nn.ReLU(),
libs_layers.NoisyLinearFull(hidden_count//2, outputs_count),
nn.Tanh()
]
torch.nn.init.xavier_uniform_(self.layers[0].weight)
torch.nn.init.xavier_uniform_(self.layers[2].weight)
torch.nn.init.uniform_(self.layers[4].weight, -0.3, 0.3)
self.model = nn.Sequential(*self.layers)
self.model.to(self.device)
print("model_actor")
print(self.model)
print("\n\n")
def forward(self, state):
return self.model(state)
def save(self, path):
torch.save(self.model.state_dict(), path + "trained/model_actor.pt")
def load(self, path):
self.model.load_state_dict(torch.load(path + "trained/model_actor.pt", map_location = self.device))
self.model.eval()
|
the-stack_0_10675 | from labels import LabelsPlugin
from electrum.plugins import hook
class Plugin(LabelsPlugin):
@hook
def load_wallet(self, wallet, window):
self.window = window
self.start_wallet(wallet)
def on_pulled(self, wallet):
self.print_error('on pulled')
self.window._trigger_update_history()
|
the-stack_0_10676 | # -*- coding: utf-8 -*-
#
# Copyright © 2009-2010 CEA
# Pierre Raybaut
# Licensed under the terms of the CECILL License
# (see guidata/__init__.py for details)
"""
All guidata DataItem objects demo
A DataSet object is a set of parameters of various types (integer, float,
boolean, string, etc.) which may be edited in a dialog box thanks to the
'edit' method. Parameters are defined by assigning DataItem objects to a
DataSet class definition: each parameter type has its own DataItem class
(IntItem for integers, FloatItem for floats, StringItem for strings, etc.)
"""
from __future__ import print_function
SHOW = True # Show test in GUI-based test launcher
import tempfile, atexit, shutil, datetime, numpy as np
from guidata.dataset.datatypes import DataSet, BeginGroup, EndGroup
from guidata.dataset.dataitems import (FloatItem, IntItem, BoolItem, ChoiceItem,
MultipleChoiceItem, ImageChoiceItem, FilesOpenItem,
StringItem, TextItem, ColorItem, FileSaveItem,
FileOpenItem, DirectoryItem, FloatArrayItem,
DateItem, DateTimeItem)
# Creating temporary files and registering cleanup functions
TEMPDIR = tempfile.mkdtemp(prefix="test_")
atexit.register(shutil.rmtree, TEMPDIR)
FILE_ETA = tempfile.NamedTemporaryFile(suffix=".eta", dir=TEMPDIR)
atexit.register(FILE_ETA.close)
FILE_CSV = tempfile.NamedTemporaryFile(suffix=".csv", dir=TEMPDIR)
atexit.register(FILE_CSV.close)
class TestParameters(DataSet):
"""
DataSet test
The following text is the DataSet 'comment': <br>Plain text or
<b>rich text<sup>2</sup></b> are both supported,
as well as special characters (α, β, γ, δ, ...)
"""
dir = DirectoryItem("Directory", TEMPDIR)
fname = FileOpenItem("Open file", ("csv", "eta"), FILE_CSV.name)
fnames = FilesOpenItem("Open files", "csv", FILE_CSV.name)
fname_s = FileSaveItem("Save file", "eta", FILE_ETA.name)
string = StringItem("String")
text = TextItem("Text")
float_slider = FloatItem("Float (with slider)",
default=0.5, min=0, max=1, step=0.01, slider=True)
integer = IntItem("Integer", default=5, min=3, max=16, slider=True
).set_pos(col=1)
dtime = DateTimeItem("Date/time", default=datetime.datetime(2010, 10, 10))
date = DateItem("Date", default=datetime.date(2010, 10, 10)).set_pos(col=1)
bool1 = BoolItem("Boolean option without label")
bool2 = BoolItem("Boolean option with label", "Label")
_bg = BeginGroup("A sub group")
color = ColorItem("Color", default="red")
choice = ChoiceItem("Single choice 1",
[('16', "first choice"), ('32', "second choice"),
('64', "third choice")])
mchoice2 = ImageChoiceItem("Single choice 2",
[("rect", "first choice", "gif.png" ),
("ell", "second choice", "txt.png" ),
("qcq", "third choice", "file.png" )]
)
_eg = EndGroup("A sub group")
floatarray = FloatArrayItem("Float array", default=np.ones( (50, 5), float),
format=" %.2e ").set_pos(col=1)
mchoice3 = MultipleChoiceItem("MC type 1",
[ str(i) for i in range(12)]
).horizontal(4)
mchoice1 = MultipleChoiceItem("MC type 2",
["first choice", "second choice",
"third choice"]).vertical(1).set_pos(col=1)
if __name__ == "__main__":
# Create QApplication
import guidata
_app = guidata.qapplication()
e = TestParameters()
e.floatarray[:, 0] = np.linspace( -5, 5, 50)
print(e)
if e.edit():
print(e)
e.view() |
the-stack_0_10678 | import logging
import os
from quasimodo.parts_of_facts import PartsOfFacts
from quasimodo.data_structures.submodule_interface import SubmoduleInterface
from quasimodo.assertion_fusion.trainer import Trainer
from quasimodo.parameters_reader import ParametersReader
save_weights = True
parameters_reader = ParametersReader()
annotations_file = parameters_reader.get_parameter("annotations-file") or "data/training_active_learning.tsv"
save_file = parameters_reader.get_parameter("weights-file") or os.path.dirname(__file__) + "/../temp/weights.tsv"
def _save_weights(parts_of_facts):
annotations = get_annotated_data()
header = parts_of_facts.get_header()
header.append("label")
save = ["\t".join(header)]
for fact in parts_of_facts.get_all_facts():
row = parts_of_facts.get_fact_row(fact)
row.append(annotations.get((fact.get_subject().get(),
fact.get_predicate().get(),
fact.get_object().get(),
str(int(fact.is_negative()))),
-1))
row = [str(x) for x in row]
save.append("\t".join(row))
with open(save_file, "w") as f:
for element in save:
f.write(element + "\n")
class LinearCombinationWeightedSubmodule(SubmoduleInterface):
def __init__(self, module_reference):
super().__init__()
self._module_reference = module_reference
self._name = "Linear Combination Per Module Submodule"
def process(self, input_interface):
logging.info("Start linear combining per module submodule")
logging.info("Grouping facts")
parts_of_facts = PartsOfFacts.from_generated_facts(input_interface.get_generated_facts())
if save_weights:
logging.info("Saving weights facts")
_save_weights(parts_of_facts)
logging.info("Training the model...")
trainer = Trainer(save_file)
trainer.train()
logging.info("Generating new facts")
new_generated_facts = []
for fact in parts_of_facts.get_all_facts():
new_generated_facts.append(parts_of_facts.get_generated_fact_with_score_from_classifier(fact, trainer))
new_generated_facts = sorted(new_generated_facts,
key=lambda x: -sum([score[0] for score in x.get_score().scores]))
return input_interface.replace_generated_facts(new_generated_facts)
def get_annotated_data():
annotations = dict()
with open(annotations_file) as f:
for line in f:
line = line.strip().split("\t")
annotations[(line[0], line[1], line[2], line[3])] = line[4]
return annotations
|
the-stack_0_10679 | import sys, os
import numpy as np
import time
import gym
import tensorflow as tf
from spinup.utils.logx import EpochLogger
from common_utils import *
from core import *
# configure gpu use and supress tensorflow warnings
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.6)
tf_config = tf.compat.v1.ConfigProto(gpu_options=gpu_options)
tf_config.gpu_options.allow_growth = True
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
"""
Soft Actor-Critic
(With slight variations that bring it closer to TD3)
"""
def sac(env_fn, logger_kwargs=dict(), network_params=dict(), rl_params=dict()):
# env params
thresh = rl_params['thresh']
# control params
seed = rl_params['seed']
epochs = rl_params['epochs']
steps_per_epoch = rl_params['steps_per_epoch']
replay_size = rl_params['replay_size']
batch_size = rl_params['batch_size']
start_steps = rl_params['start_steps']
max_ep_len = rl_params['max_ep_len']
save_freq = rl_params['save_freq']
render = rl_params['render']
# rl params
gamma = rl_params['gamma']
polyak = rl_params['polyak']
lr = rl_params['lr']
grad_clip_val = rl_params['grad_clip_val']
# entropy params
alpha = rl_params['alpha']
target_entropy = rl_params['target_entropy']
logger = EpochLogger(**logger_kwargs)
if save_freq is not None:
logger.save_config(locals())
train_env, test_env = env_fn(), env_fn()
obs = train_env.observation_space
act = train_env.action_space
tf.set_random_seed(seed)
np.random.seed(seed)
train_env.seed(seed)
train_env.action_space.np_random.seed(seed)
test_env.seed(seed)
test_env.action_space.np_random.seed(seed)
# get the size after resize
obs_dim = network_params['input_dims']
act_dim = act.shape[0]
# init a state buffer for storing last m states
train_state_buffer = StateBuffer(m=obs_dim[2])
test_state_buffer = StateBuffer(m=obs_dim[2])
# Experience buffer
replay_buffer = ReplayBuffer(obs_dim=obs_dim, act_dim=act_dim, size=replay_size)
# Inputs to computation graph
x_ph, a_ph, x2_ph, r_ph, d_ph = placeholders(obs_dim, act_dim, obs_dim, None, None)
# Main outputs from computation graph
with tf.variable_scope('main'):
mu, pi, logp_pi, q1_a, q2_a = build_models(x_ph, a_ph, act, act_dim, network_params)
with tf.variable_scope('main', reuse=True):
# compose q with pi, for pi-learning
_, _, _, q1_pi, q2_pi = build_models(x_ph, pi, act, act_dim, network_params)
# get actions and log probs of actions for next states, for Q-learning
_, pi_next, logp_pi_next, _, _ = build_models(x2_ph, a_ph, act, act_dim, network_params)
# Target value network
with tf.variable_scope('target'):
_, _, _, q1_pi_targ, q2_pi_targ = build_models(x2_ph, pi_next, act, act_dim, network_params)
# alpha Params
if target_entropy == 'auto':
target_entropy = tf.cast(-act_dim, tf.float32)
else:
target_entropy = tf.cast(target_entropy, tf.float32)
log_alpha = tf.get_variable('log_alpha', dtype=tf.float32, initializer=0.0)
if alpha == 'auto': # auto tune alpha
alpha = tf.exp(log_alpha)
else: # fixed alpha
alpha = tf.get_variable('alpha', dtype=tf.float32, initializer=alpha)
# Count variables
var_counts = tuple(count_vars(scope) for scope in ['log_alpha',
'main/pi',
'main/q1',
'main/q2',
'main'])
print("""\nNumber of other parameters:
alpha: %d,
pi: %d,
q1: %d,
q2: %d,
total: %d\n"""%var_counts)
# Min Double-Q:
min_q_pi = tf.minimum(q1_pi, q2_pi)
min_q_pi_targ = tf.minimum(q1_pi_targ, q2_pi_targ)
# Targets for Q and V regression
q_backup = tf.stop_gradient(r_ph + gamma*(1-d_ph)*(min_q_pi_targ - alpha*logp_pi_next))
# critic losses
q1_loss = 0.5 * tf.reduce_mean((q_backup - q1_a)**2)
q2_loss = 0.5 * tf.reduce_mean((q_backup - q2_a)**2)
value_loss = q1_loss + q2_loss
# Soft actor losses
pi_loss = tf.reduce_mean(alpha * logp_pi - min_q_pi)
# alpha loss for temperature parameter
alpha_backup = tf.stop_gradient(logp_pi + target_entropy)
alpha_loss = -tf.reduce_mean(log_alpha * alpha_backup)
# Policy train op
pi_optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-04)
if grad_clip_val is not None:
gvs = pi_optimizer.compute_gradients(pi_loss, var_list=get_vars('main/pi'))
capped_gvs = [(ClipIfNotNone(grad, grad_clip_val), var) for grad, var in gvs]
train_pi_op = pi_optimizer.apply_gradients(capped_gvs)
else:
train_pi_op = pi_optimizer.minimize(pi_loss, var_list=get_vars('main/pi'))
# Value train op
value_optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-04)
with tf.control_dependencies([train_pi_op]):
if grad_clip_val is not None:
gvs = value_optimizer.compute_gradients(value_loss, var_list=get_vars('main/q'))
capped_gvs = [(ClipIfNotNone(grad, grad_clip_val), var) for grad, var in gvs]
train_value_op = value_optimizer.apply_gradients(capped_gvs)
else:
train_value_op = value_optimizer.minimize(value_loss, var_list=get_vars('main/q'))
alpha_optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-04)
with tf.control_dependencies([train_value_op]):
train_alpha_op = alpha_optimizer.minimize(alpha_loss, var_list=get_vars('log_alpha'))
# Polyak averaging for target variables
# (control flow because sess.run otherwise evaluates in nondeterministic order)
with tf.control_dependencies([train_value_op]):
target_update = tf.group([tf.assign(v_targ, polyak*v_targ + (1-polyak)*v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
# All ops to call during one training step
step_ops = [pi_loss, q1_loss, q2_loss, q1_a, q2_a, logp_pi, target_entropy, alpha_loss, alpha,
train_pi_op, train_value_op, train_alpha_op, target_update]
# Initializing targets to match main variables
target_init = tf.group([tf.assign(v_targ, v_main)
for v_main, v_targ in zip(get_vars('main'), get_vars('target'))])
sess = tf.Session(config=tf_config)
sess.run(tf.global_variables_initializer())
sess.run(target_init)
# Setup model saving
if save_freq is not None:
logger.setup_tf_saver(sess, inputs={'x_ph': x_ph, 'a_ph': a_ph},
outputs={'mu': mu, 'pi': pi, 'q1_a': q1_a, 'q2_a': q2_a})
def get_action(state, deterministic=False):
state = state.astype('float32') / 255.
act_op = mu if deterministic else pi
return sess.run(act_op, feed_dict={x_ph: [state]})[0]
def reset(env, state_buffer):
o, r, d, ep_ret, ep_len = env.reset(), 0, False, 0, 0
o = process_image_observation(o, obs_dim, thresh)
state = state_buffer.init_state(init_obs=o)
return o, r, d, ep_ret, ep_len, state
def test_agent(n=10, render=True):
for j in range(n):
o, r, d, ep_ret, ep_len, test_state = reset(test_env, test_state_buffer)
if render: test_env.render()
while not(d or (ep_len == max_ep_len)):
# Take deterministic actions at test time
o, r, d, _ = test_env.step(get_action(test_state, True))
o = process_image_observation(o, obs_dim, thresh)
test_state = test_state_buffer.append_state(o)
ep_ret += r
ep_len += 1
if render: test_env.render()
if render: test_env.close()
logger.store(TestEpRet=ep_ret, TestEpLen=ep_len)
start_time = time.time()
o, r, d, ep_ret, ep_len, state = reset(train_env, train_state_buffer)
total_steps = steps_per_epoch * epochs
save_iter = 0
# Main loop: collect experience in env and update/log each epoch
for t in range(total_steps):
"""
Until start_steps have elapsed, randomly sample actions
from a uniform distribution for better exploration. Afterwards,
use the learned policy.
"""
if t > start_steps:
a = get_action(state)
else:
a = train_env.action_space.sample()
# Step the env
o2, r, d, _ = train_env.step(a)
o2 = process_image_observation(o2, obs_dim, thresh)
next_state = train_state_buffer.append_state(o2)
ep_ret += r
ep_len += 1
# Ignore the "done" signal if it comes from hitting the time
# horizon (that is, when it's an artificial terminal signal
# that isn't based on the agent's state)
d = False if ep_len==max_ep_len else d
# Store experience to replay buffer
replay_buffer.store(state, a, r, next_state, d)
# Super critical, easy to overlook step: make sure to update
# most recent observation!
o = o2
state = next_state
if d or (ep_len == max_ep_len):
"""
Perform all SAC updates at the end of the trajectory.
This is a slight difference from the SAC specified in the
original paper.
"""
for j in range(ep_len):
batch = replay_buffer.sample_batch(batch_size)
feed_dict = {x_ph: batch['obs1'],
x2_ph: batch['obs2'],
a_ph: batch['acts'],
r_ph: batch['rews'],
d_ph: batch['done'],
}
outs = sess.run(step_ops, feed_dict)
logger.store(LossPi=outs[0], LossQ1=outs[1], LossQ2=outs[2],
Q1Vals=outs[3], Q2Vals=outs[4], LogPi=outs[5], TargEntropy=outs[6],
LossAlpha=outs[7], Alpha=outs[8])
logger.store(EpRet=ep_ret, EpLen=ep_len)
o, r, d, ep_ret, ep_len, state = reset(train_env, train_state_buffer)
# End of epoch wrap-up
if t > 0 and t % steps_per_epoch == 0:
epoch = t // steps_per_epoch
# Save model
if save_freq is not None:
if (epoch % save_freq == 0) or (epoch == epochs-1):
logger.save_state({'env': train_env}, itr=save_iter)
save_iter+=1
# Test the performance of the deterministic version of the agent.
test_agent(n=2, render=render)
# Log info about epoch
logger.log_tabular('Epoch', epoch)
logger.log_tabular('EpRet', with_min_and_max=True)
logger.log_tabular('TestEpRet', with_min_and_max=True)
logger.log_tabular('EpLen', average_only=True)
logger.log_tabular('TestEpLen', average_only=True)
logger.log_tabular('TotalEnvInteracts', t)
logger.log_tabular('Q1Vals', with_min_and_max=True)
logger.log_tabular('Q2Vals', with_min_and_max=True)
logger.log_tabular('LogPi', average_only=True)
logger.log_tabular('TargEntropy', average_only=True)
logger.log_tabular('Alpha', average_only=True)
logger.log_tabular('LossPi', average_only=True)
logger.log_tabular('LossQ1', average_only=True)
logger.log_tabular('LossQ2', average_only=True)
logger.log_tabular('LossAlpha', average_only=True)
logger.log_tabular('Time', time.time()-start_time)
logger.dump_tabular()
if __name__ == '__main__':
from spinup.utils.run_utils import setup_logger_kwargs
network_params = {
'input_dims':[96,96,4],
'conv_filters':(16, 32),
'kernel_width':(8,4),
'strides':(4,2),
'pooling':'none',
'pooling_width':2,
'pooling_strides':1,
'dense_units':(512,),
'hidden_activation':'relu',
'output_activation':'linear',
'batch_norm':False,
'dropout':0.0
}
rl_params = {
# env params
'env_name':'CarRacing-v0',
'thresh':False,
# control params
'seed':int(0),
'epochs':int(50),
'steps_per_epoch':5000,
'replay_size':int(1e5),
'batch_size':64,
'start_steps':4000,
'max_ep_len':1000,
'save_freq':5,
'render':True,
# rl params
'gamma':0.99,
'polyak':0.995,
'lr':0.001,
'grad_clip_val':None,
# entropy params
'alpha': 'auto', # fixed or auto balance
'target_entropy':'auto', # fixed or auto define with act_dim
}
saved_model_dir = '../../saved_models'
logger_kwargs = setup_logger_kwargs(exp_name='sac_cont_image_' + rl_params['env_name'], seed=rl_params['seed'], data_dir=saved_model_dir, datestamp=False)
env = gym.make(rl_params['env_name'])
sac(lambda:env, logger_kwargs=logger_kwargs,
network_params=network_params, rl_params=rl_params)
|
the-stack_0_10680 | #!/usr/bin/env python
"""
_Template_
Template class for all Step Template implementations to inherit and implement
the API
"""
import os
from WMCore.WMSpec.WMStep import WMStepHelper
from WMCore.WMSpec.ConfigSectionTree import nodeName
class CoreHelper(WMStepHelper):
"""
_CoreHelper_
Helper API for core settings
"""
def stepName(self):
"""
_stepName_
Get the name of the step
"""
return nodeName(self.data)
def addEnvironmentVariable(self, varname, setting):
"""
_addEnvironmentVariable_
add a key = value style setting to the environment for this
step
"""
setattr(self.data.environment.variables, varname, setting)
return
def addEnvironmentPath(self, pathname, setting):
"""
_addEnvironmentPath_
add a key = value1:value2:value3 environment setting to this step
"""
if getattr(self.data.environment.paths, pathname, None) == None:
setattr(self.data.environment.paths, pathname, [])
pathentry = getattr(self.data.environment.paths, pathname)
pathentry.append(setting)
return
def environment(self):
"""
_environment_
Get the environment settings for this step
"""
return self.data.environment
def addDirectory(self, dirName):
"""
_addDirectory_
Add a subdirectory structure to the template that will be built by
the builder
"""
split = dirName.split("/")
split = [ x for x in split if x.strip() != "" ]
dirs = getattr(self.data.build.directories, self.stepName())
for subdir in split:
exists = getattr(dirs, subdir, None)
if exists == None:
dirs.section_(subdir)
dirs = getattr(dirs, subdir)
return dirs
def addFile(self, fileName, newLocation = None):
"""
_addFile_
Add a file to the job at build time. This file must be
a local filesystem file available at fileName.
An optional location within the step can be specified which
may include a path structure that gets translated into calls
to addDirectory
"""
dirs = getattr(self.data.build.directories, self.stepName())
if newLocation != None:
filename = os.path.basename(newLocation)
dirname = os.path.dirname(newLocation)
dirs = self.addDirectory(dirname)
setattr(dirs, filename, { "Source" : fileName, "Target" : filename})
else:
filename = os.path.basename(fileName)
setattr(dirs, filename, {"Target" : filename, "Source" : fileName })
return
def directoryStructure(self):
"""
_directoryStructure_
Util to retrieve the directory structure
"""
return self.data.build.directories
class Template:
"""
_Template_
Base interface definition for any WMStep Template
"""
def __init__(self):
pass
def __call__(self, step):
"""
_operator(step)_
Install the template on the step instance provided
"""
self.coreInstall(step)
self.install(step)
def coreInstall(self, step):
"""
_coreInstall_
Install attributes common to all steps
"""
# Environment settings to pass to the step
step.section_("environment")
step.environment.section_("variables")
step.environment.section_("paths")
# Directory structure and files to be included in the job
# beyond those that would be added by a Step Specific builder
# Step Specific subclasses can simply append to these to get files
# and dirs into the job
step.section_("build")
step.build.section_("directories")
step.build.directories.section_(nodeName(step))
def install(self, step):
"""
_install_
Override this method to install the required attributes
in the step Instance provided
"""
msg = "WMSpec.Steps.Template.install method not overridden in "
msg += "implementation: %s\n" % self.__class__.__name__
raise NotImplementedError(msg)
def helper(self, step):
"""
_helper_
Wrap the step instance in a helper class tailored to this particular
step type
"""
msg = "WMSpec.Steps.Template.helper method not overridden in "
msg += "implementation: %s\n" % self.__class__.__name__
raise NotImplementedError(msg)
|
the-stack_0_10684 | import enum
from ipaddress import IPv4Address
import yaml
from CybORG import CybORG
from CybORG.Emulator.AWS import AWSConfig
def enum_representer(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', f'{str(data.name)}')
def ipv4_representer(dumper, data):
return dumper.represent_scalar(u'tag:yaml.org,2002:str', f'{str(data)}')
yaml.add_multi_representer(enum.Enum, enum_representer)
yaml.add_representer(IPv4Address, ipv4_representer)
scenario = '/home/max/PycharmProjects/Autonomous-Cyber-Ops/CybORG/Shared/Scenarios/SingleHostScenario.yaml'
image = "Velociraptor_Server"
sm = {'Hosts': {'Test_Host': {'image': image}}}
cyborg = CybORG(scenario, environment='aws', env_config={
"config": AWSConfig.load_and_setup_logger(test=True),
"create_tunnel": False
})
#This checks to see that the data given has all the required information and prints
#that the state is true as it dumps the data into the outfile.
try:
info_required = {'Test_Host': {'User_info': 'All',
'System_info': 'All',
'Processes': 'All',
'Files': ['/root', '/bin', '/sbin', '/etc', '/home', '/usr/sbin/', '/usr/bin/']}}
true_state = cyborg.get_true_state(info_required)
true_state.data.pop('success')
assert true_state.data != {}
for key, data in true_state.data.items():
if "Interface" in data:
data.pop("Interface")
if 'Processes' in data:
for proc in data['Processes']:
if 'Known Process' in proc:
proc.pop('Known Process')
if 'Known Path' in proc:
proc.pop('Known Path')
if 'System info' in data and 'Hostname' in data['System info']:
data['System info'].pop('Hostname')
if 'User Info' in data:
for user in data['User Info']:
if 'Groups' in user:
for group in user['Groups']:
if 'Builtin Group' in group:
group.pop('Builtin Group')
print(true_state)
with open(f'{image}_image.yaml', 'w') as outfile:
yaml.dump(true_state.data, outfile, default_flow_style=False)
finally:
cyborg.shutdown(teardown=True)
|
the-stack_0_10686 | import hashlib
import requests
from datetime import datetime, timedelta
from .filter import McDailyFilter
class McDailyAccount:
def __init__(self):
""" User info """
self.username = '' # Username
self.password = '' # Password
self.access_token = '' # Token
self.param_string = '' # username + password
self.card_no = '' # Card no
""" System info """
self.str1 = datetime.strftime(datetime.now(), '%Y/%m/%d %H:%M:%S') # Device Time
self.str2 = '2.2.0' # App Version
self.str3 = datetime.strftime(datetime.now(), '%Y%m%d%H%M%S') # Call time
self.model_id = 'Pixel XL' # Model ID
self.os_version = '9' # Android OS Version
self.platform = 'Android' # platform
self.device_uuid = 'device_uuid' # Device Uuid
self.order_no = self.device_uuid + self.str3 # Order No
""" Request json data """
self.json = {
"access_token" : self.access_token,
"source_info" : {
"app_version" : self.str2,
"device_time" : self.str1,
"device_uuid" : self.device_uuid,
"model_id" : self.model_id,
"os_version" : self.os_version,
"platform" : self.platform,
}
}
def login(self, username, password):
self.username = username
self.password = password
self.param_string = username + password
""" Mask = md5('Mc' + order_no + platform + os_version + model_id + device_uuid + str1 + str2 + param_string + 'Donalds') """
data = 'Mc%s%s%s%s%s%s%s%sDonalds' % (
self.order_no,
self.platform,
self.os_version,
self.model_id,
self.device_uuid,
self.str1,
self.str2,
self.param_string
)
hash = hashlib.md5()
hash.update(data.encode('utf-8'))
json = {
"account" : self.username,
"password" : self.password,
"OrderNo" : self.order_no,
"mask" : hash.hexdigest(),
"source_info" : {
"app_version" : self.str2,
"device_time" : self.str1,
"device_uuid" : self.device_uuid,
"model_id" : self.model_id,
"os_version" : self.os_version,
"Platform" : self.platform,
}
}
response = requests.post('https://api.mcddaily.com.tw/login_by_mobile', json = json, headers = {'user-agent' : 'okhttp/3.10.0'})
self.set_token(response.json()['results']['member_info']['access_token'])
return response
def set_token(self, access_token):
self.access_token = access_token
self.json['access_token'] = access_token
def get_card_query(self, card_no):
self.card_no = card_no
""" Mask = md5('Mc' + order_no + access_token + card_no + callTime + 'Donalds') """
data = 'Mc%s%s%s%sDonalds' % (
self.order_no,
self.access_token,
self.card_no,
self.str3,
)
hash = hashlib.md5()
hash.update(data.encode('utf-8'))
json = {
"OrderNo" : self.order_no,
"access_token" : self.access_token,
"callTime" : self.str3,
"cardNo" : self.card_no,
"mask" : mask.hexdigest(),
}
respones = requests.post('https://api.mcddaily.com.tw/queryBonus', json = json, headers = {'user-agent' : 'okhttp/3.10.0'})
return respones
def lottery_get_item(self):
respones = requests.post('https://api1.mcddailyapp.com/lottery/get_item', json = self.json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
def coupon_get_list(self):
respones = requests.post('https://api1.mcddailyapp.com/coupon/get_list', json = self.json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
def sticker_get_list(self):
respones = requests.post('https://api1.mcddailyapp.com/sticker/get_list', json = self.json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
def sticker_redeem(self):
sticker_list = self.sticker_get_list()
if len(sticker_list) < 6:
return 'Just %d stickers' % len(sticker_list)
sticker_id_list = []
for i in range(6):
sticker_id_list.append(sticker_list[i].sticker_id)
json = self.json
json['sticker_ids'] = sticker_id_list
respones = requests.post('https://api1.mcddailyapp.com/sticker/redeem', json = json, headers = {'user-agent' : 'okhttp/3.10.0'})
return McDailyFilter(respones.json()).get_object()
|
the-stack_0_10690 | #!/usr/bin/python3
import binascii
import json
import logging
import re
import sys
from collections import defaultdict
MASK_MAGIC_REGEX = re.compile(r'[*?!@$]')
def to_unixnano(timestamp):
return int(timestamp) * (10**9)
# include/atheme/channels.h
CMODE_FLAG_TO_MODE = {
0x001: 'i', # CMODE_INVITE
0x010: 'n', # CMODE_NOEXT
0x080: 's', # CMODE_SEC
0x100: 't', # CMODE_TOPIC
}
# attempt to interpret certfp as a hex-encoded SHA-256 fingerprint
def validate_certfp(certfp):
try:
dec = binascii.unhexlify(certfp)
except:
return False
return len(dec) == 32
def convert(infile):
out = {
'version': 1,
'source': 'atheme',
'users': defaultdict(dict),
'channels': defaultdict(dict),
}
group_to_founders = defaultdict(list)
channel_to_founder = defaultdict(lambda: (None, None))
while True:
line = infile.readline()
if not line:
break
line = line.rstrip(b'\r\n')
try:
line = line.decode('utf-8')
except UnicodeDecodeError:
line = line.decode('utf-8', 'replace')
logging.warning("line contained invalid utf8 data " + line)
parts = line.split(' ')
category = parts[0]
if category == 'GACL':
# Note: all group definitions precede channel access entries (token CA) by design, so it
# should be safe to read this in using one pass.
groupname = parts[1]
user = parts[2]
flags = parts[3]
if 'F' in flags:
group_to_founders[groupname].append(user)
elif category == 'MU':
# user account
# MU AAAAAAAAB shivaram $1$hcspif$nCm4r3S14Me9ifsOPGuJT. [email protected] 1600134392 1600467343 +sC default
name = parts[2]
user = {'name': name, 'hash': parts[3], 'email': parts[4], 'registeredAt': to_unixnano(parts[5])}
out['users'][name].update(user)
pass
elif category == 'MN':
# grouped nick
# MN shivaram slingamn 1600218831 1600467343
username, groupednick = parts[1], parts[2]
if username != groupednick:
user = out['users'][username]
user.setdefault('additionalnicks', []).append(groupednick)
elif category == 'MDU':
if parts[2] == 'private:usercloak':
username = parts[1]
out['users'][username]['vhost'] = parts[3]
elif category == 'MCFP':
username, certfp = parts[1], parts[2]
if validate_certfp(certfp):
user = out['users'][username]
user.setdefault('certfps', []).append(certfp.lower())
elif category == 'MC':
# channel registration
# MC #mychannel 1600134478 1600467343 +v 272 0 0
# MC #NEWCHANNELTEST 1602270889 1602270974 +vg 1 0 0 jaeger4
chname = parts[1]
chdata = out['channels'][chname]
# XXX just give everyone +nt, regardless of lock status; they can fix it later
chdata.update({'name': chname, 'registeredAt': to_unixnano(parts[2])})
if parts[8] != '':
chdata['key'] = parts[8]
modes = {'n', 't'}
mlock_on, mlock_off = int(parts[5]), int(parts[6])
for flag, mode in CMODE_FLAG_TO_MODE.items():
if flag & mlock_on != 0:
modes.add(mode)
elif flag & mlock_off != 0 and mode in modes:
modes.remove(mode)
chdata['modes'] = ''.join(sorted(modes))
chdata['limit'] = int(parts[7])
elif category == 'MDC':
# auxiliary data for a channel registration
# MDC #mychannel private:topic:setter s
# MDC #mychannel private:topic:text hi again
# MDC #mychannel private:topic:ts 1600135864
chname = parts[1]
category = parts[2]
if category == 'private:topic:text':
out['channels'][chname]['topic'] = line.split(maxsplit=3)[3]
elif category == 'private:topic:setter':
out['channels'][chname]['topicSetBy'] = parts[3]
elif category == 'private:topic:ts':
out['channels'][chname]['topicSetAt'] = to_unixnano(parts[3])
elif category == 'private:mlockext':
# the channel forward mode is +L on insp/unreal, +f on charybdis
# charybdis has a +L ("large banlist") taking no argument
# and unreal has a +f ("flood limit") taking two colon-delimited numbers,
# so check for an argument that starts with a #
if parts[3].startswith('L#') or parts[3].startswith('f#'):
out['channels'][chname]['forward'] = parts[3][1:]
elif category == 'CA':
# channel access lists
# CA #mychannel shivaram +AFORafhioqrstv 1600134478 shivaram
chname, username, flags, set_at = parts[1], parts[2], parts[3], int(parts[4])
chname = parts[1]
chdata = out['channels'][chname]
flags = parts[3]
set_at = int(parts[4])
if 'amode' not in chdata:
chdata['amode'] = {}
# see libathemecore/flags.c: +o is op, +O is autoop, etc.
if 'F' in flags:
# If the username starts with "!", it's actually a GroupServ group.
if username.startswith('!'):
group_founders = group_to_founders.get(username)
if not group_founders:
# skip this and warn about it later
continue
# attempt to promote the first group founder to channel founder
username = group_founders[0]
# but everyone gets the +q flag
for founder in group_founders:
chdata['amode'][founder] = 'q'
# there can only be one founder
preexisting_founder, preexisting_set_at = channel_to_founder[chname]
if preexisting_founder is None or set_at < preexisting_set_at:
chdata['founder'] = username
channel_to_founder[chname] = (username, set_at)
# but multiple people can receive the 'q' amode
chdata['amode'][username] = 'q'
continue
if MASK_MAGIC_REGEX.search(username):
# ignore groups, masks, etc. for any field other than founder
continue
# record the first appearing successor, if necessary
if 'S' in flags:
if not chdata.get('successor'):
chdata['successor'] = username
# finally, handle amodes
if 'q' in flags:
chdata['amode'][username] = 'q'
elif 'a' in flags:
chdata['amode'][username] = 'a'
elif 'o' in flags or 'O' in flags:
chdata['amode'][username] = 'o'
elif 'h' in flags or 'H' in flags:
chdata['amode'][username] = 'h'
elif 'v' in flags or 'V' in flags:
chdata['amode'][username] = 'v'
else:
pass
# do some basic integrity checks
def validate_user(name):
if not name:
return False
return bool(out['users'].get(name))
invalid_channels = []
for chname, chdata in out['channels'].items():
if not validate_user(chdata.get('founder')):
if validate_user(chdata.get('successor')):
chdata['founder'] = chdata['successor']
else:
invalid_channels.append(chname)
for chname in invalid_channels:
logging.warning("Unable to find a valid founder for channel %s, discarding it", chname)
del out['channels'][chname]
return out
def main():
if len(sys.argv) != 3:
raise Exception("Usage: atheme2json.py atheme_db output.json")
with open(sys.argv[1], 'rb') as infile:
output = convert(infile)
with open(sys.argv[2], 'w') as outfile:
json.dump(output, outfile)
if __name__ == '__main__':
logging.basicConfig()
sys.exit(main())
|
the-stack_0_10691 | #
# Python Macro Language for Dragon NaturallySpeaking
# (c) Copyright 1999 by Joel Gould
# Portions (c) Copyright 1999 by Dragon Systems, Inc.
#
# _mouse.py
# Sample macro file which implements mouse and keyboard movement modes
# similar to DragonDictate for Windows
#
# April 1, 2000
# Updates from Jonathan Epstein
# - cancel arrow movement when the active window changes
# - add support for tray icon during arrow movement
#
# In the grammar we map some keywords into pixel counts according to the
# following dictionary. These numbers can be safely changed within reason.
amountDict = {
'little':3, # as in 'move a little left'
'lot':10 } # as in 'move left a lot'
# For caret movement, this represents the default speed in milliseconds
# between arrow keys
defaultMoveSpeed = 250
# For caret movement, this is the rate change applied when you make it
# faster. For example, 1.5 is a 50% speed increase.
moveRateChange = 2.0
# For mouse movement, this represents the default speed in milliseconds
# between pixel movements and the default number of pixels per move. We
# do not want the update rate to be less than 50 milliseconds so if it
# gets faster than that, we adjust the mouse pixels instead.
defaultMouseSpeed = 100
defaultMousePixels = 1
# For mouse movement, this is the rate change applied when you make it
# faster. For example, 1.5 is a 50% speed increase.
mouseRateChange = 3.0
############################################################################
#
# Here are some of our instance variables
#
# self.haveCallback set when the timer callback in installed
# self.curMode 1 for caret movement, 2 for mouse movement, or None
# self.curSpeed current movement speed (milliseconds for timer)
# self.curPixels for mouse movement, pixels per move
# self.lastClock time of last timer callback or 0
# self.curDirection direction of movement as string
#
import string # for atoi
import time # for clock
import natlink
from natlinkutils import *
class ThisGrammar(GrammarBase):
# when we unload the grammar, we must make sure we clear the timer
# callback so we keep a variable which is set when we currently own
# the timer callback
def __init__(self):
self.haveCallback = 0
self.curMode = None
self.iconState = 0
GrammarBase.__init__(self)
def unload(self):
if self.haveCallback:
natlink.setTimerCallback(None,0)
self.haveCallback = 0
GrammarBase.unload(self)
# This is our grammar. The rule 'start' is what is normally active. The
# rules 'nowMoving' and 'nowMousing' are used when we are in caret or
# mouse movement mode.
gramDefn = """
# this is the rule which is normally active
<start> exported = <startMoving> | <startMousing> |
<nudgeMouse> | <mouseButton>;
# this rule is active when we are moving the caret
<nowMoving> exported =
[ move ] ( {direction} | [much] faster | [much] slower ) |
stop [ moving ];
# this rule is active when we are moving the mouse
<nowMousing> exported =
[ move ] ( {direction} | faster | slower ) |
stop [ moving ] | <mouseButton> | <mouseButton>;
# here are the subrules which deal with caret movement
<startMoving> = move {direction} | start moving {direction};
# here are the subrules which deal with mouse movement
<startMousing> = [ start moving ] mouse {direction};
<nudgeMouse> =
nudge mouse {direction} |
[ move ] mouse {direction} ( a little | a lot | {count} pixels ) |
[ move ] mouse ( a little | a lot | {count} pixels ) {direction};
<mouseButton> =
[ mouse ] [ left | middle | right ] [ single | double ] click;
"""
# These are the lists which we use in our grammar. The directions and
# counts are implemented as lists to make parsing easier (words from
# lists are referenced as part of the rule which includes the list).
listDefn = {
'direction' : ['up','down','left','right'],
'count' : ['1','2','3','4','5','6','7','8','9','10','11','12','13',
'14','15','16','17','18','19','20','25','30','35','40','45','50'] }
# Load the grammar, build the direction and count lists and activate the
# main rule ('start')
def initialize(self):
self.load(self.gramDefn)
for listName in self.listDefn.keys():
self.setList(listName,self.listDefn[listName])
self.activateSet(['start'],exclusive=0)
# This subroutine moves the mouse cursor in an indicated direction
# by an indicated number of pixels
def moveMouse(self,direction,count):
xPos,yPos = natlink.getCursorPos()
if direction == 'up': yPos = yPos - count
elif direction == 'down': yPos = yPos + count
elif direction == 'left': xPos = xPos - count
elif direction == 'right': xPos = xPos + count
xSize,ySize = natlink.getScreenSize()
if xPos < 0: xPos = 0
if xPos >= xSize: xPos = xSize - 1
if yPos < 0: yPos = 0
if yPos >= ySize: yPos = ySize - 1
natlink.playEvents([(wm_mousemove,xPos,yPos)])
# This subroutine cancels any active movement mode
def cancelMode(self):
self.curMode = None
if self.haveCallback:
natlink.setTimerCallback(None,0)
self.haveCallback = 0
self.activateSet(['start'],exclusive=0)
natlink.setTrayIcon()
# This function is called on a timer event. If we are in a movement
# mode then we move the mouse or caret by the indicated amount.
#
# The apparent speed for mouse movement is the speed divided by the
# number of pixels per move. We calculate the number of pixels per
# move to ensure that the speed is never faster than 50 milliseconds.
def onTimer(self):
if self.lastClock:
diff = int( (time.clock() - self.lastClock) * 1000 )
self.lastClock = time.clock()
if self.curMode == 1:
moduleInfo = natlink.getCurrentModule()
if natlink.getMicState() == 'on' and moduleInfo == self.moduleInfo:
self.setTrayIcon(1)
# Note: it is often during a playString operation that the
# "stop moving" command occurs
natlink.playString('{'+self.curDirection+'}')
else:
self.cancelMode()
elif self.curMode == 2:
self.moveMouse(self.curDirection,self.curPixels)
# This handles the nudgeMouse rule. We want to extract the direction
# and the count or amount.
def gotResults_nudgeMouse(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
count = findKeyWord(words,self.listDefn['count'])
amount = findKeyWord(words,amountDict.keys())
if count:
count = string.atoi(count)
elif amount:
count = amountDict[amount]
self.moveMouse(direction,count)
# This handles the mouseButton rule. We want to extract the button
# name (if specified) and whether this is a single or double click.
def gotResults_mouseButton(self,words,fullResults):
self.cancelMode()
which = findKeyWord(words,['left','right','middle'])
if not which: which = 'left'
if 'double' in words: count = 2
else: count = 1
buttonClick(which,count)
# This handles the startMoving rule. We only need to extract the
# direction. To turn on cursor movement mode we need to install a
# timer callback (warning: this is global) and set the recognition
# state to be exclusively from the rule <nowMoving>. The cursor only
# moves in the timer callback itself.
def gotResults_startMoving(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
self.curMode = 1
self.curDirection = direction
self.setTrayIcon(0)
self.moduleInfo = natlink.getCurrentModule()
self.curSpeed = defaultMoveSpeed
self.lastClock = time.clock()
natlink.setTimerCallback(self.onTimer,defaultMoveSpeed)
self.haveCallback = 1
self.activateSet(['nowMoving'],exclusive=1)
# This handles the nowMoving rule. We want to extract the keyword which
# tells us what to do.
def gotResults_nowMoving(self,words,fullResults):
direction = findKeyWord(words,self.listDefn['direction'])
if direction:
self.curDirection = direction
self.setTrayIcon(0)
elif 'stop' in words:
self.cancelMode()
elif 'faster' in words:
speed = int(self.curSpeed / moveRateChange)
if 'much' in words:
speed = int(speed / (moveRateChange*moveRateChange))
if speed < 50: speed = 50
self.curSpeed = speed
natlink.setTimerCallback(self.onTimer,speed)
elif 'slower' in words:
speed = int(self.curSpeed * moveRateChange)
if 'much' in words:
speed = int(speed * (moveRateChange*moveRateChange))
if speed > 4000: speed = 4000
self.curSpeed = speed
natlink.setTimerCallback(self.onTimer,speed)
# This handles the startMousing rule. We only need to extract the
# direction. To turn on cursor movement mode we need to install a
# timer callback (warning: this is global) and set the recognition
# state to be exclusively from the rule <nowMoving>. The cursor only
# moves in the timer callback itself.
def gotResults_startMousing(self,words,fullResults):
self.cancelMode()
direction = findKeyWord(words,self.listDefn['direction'])
self.curMode = 2
self.curDirection = direction
self.curSpeed = defaultMouseSpeed
self.curPixels = defaultMousePixels
self.lastClock = time.clock()
natlink.setTimerCallback(self.onTimer,defaultMouseSpeed)
self.haveCallback = 1
self.activateSet(['nowMousing'],exclusive=1)
# This handles the nowMousing rule. We want to extract the keyword which
# tells us what to do.
def gotResults_nowMousing(self,words,fullResults):
direction = findKeyWord(words,self.listDefn['direction'])
if direction:
self.curDirection = direction
elif 'stop' in words:
self.cancelMode()
elif 'faster' in words:
speed = int(self.curSpeed / moveRateChange)
pixels = self.curPixels
while speed < 50:
speed = speed * 2
pixels = pixels * 2
if pixels > 10: pixels = 10
self.curSpeed = speed
self.curPixels = pixels
natlink.setTimerCallback(self.onTimer,speed)
elif 'slower' in words:
speed = int(self.curSpeed * moveRateChange)
pixels = self.curPixels
while pixels > defaultMousePixels and speed >= 2*50:
speed = speed / 2
pixels = pixels / 2
if speed > 2000: speed = 2000
self.curSpeed = speed
self.curPixels = pixels
natlink.setTimerCallback(self.onTimer,speed)
# This turns on the tray icon depending on the movement direction.
# self.iconState is used to toggle the image to animate the icon.
def setTrayIcon(self,toggleIcon):
iconName = self.curDirection
toolTip = 'moving '+self.curDirection
if not toggleIcon or self.iconState:
self.iconState = 0
else:
self.iconState = 1
iconName = iconName + '2'
natlink.setTrayIcon(iconName,toolTip,self.onTrayIcon)
# This is called if the user clicks on the tray icon. We simply cancel
# movement in all cases.
def onTrayIcon(self,message):
self.cancelMode()
# This is a simple utility subroutine. It takes two lists of words and
# returns the first word it finds which is in both lists. We use this to
# extract special words (like the direction) from recognition results.
def findKeyWord(list1,list2):
for word in list1:
if word in list2:
return word
return None
#
# Here is the initialization and termination code. See wordpad.py for more
# comments.
#
thisGrammar = ThisGrammar()
thisGrammar.initialize()
def unload():
global thisGrammar
if thisGrammar: thisGrammar.unload()
thisGrammar = None
|
the-stack_0_10692 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2016 Twitter. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
''' log.py '''
import logging
import logging.handlers
# Create the logger
Log = logging.getLogger('heron-state')
def configure(level, logfile=None):
""" configure logging """
log_format = "%(asctime)s-%(levelname)s: %(message)s"
date_format = '%a, %d %b %Y %H:%M:%S'
logging.basicConfig(format=log_format, datefmt=date_format)
Log.setLevel(level)
if logfile is not None:
fh = logging.FileHandler(logfile)
fh.setFormatter(logging.Formatter(log_format))
Log.addHandler(fh)
|
the-stack_0_10694 | from django.db import models
from django.utils import timezone
# Create your models here.
class Feedback(models.Model):
data = models.DateTimeField(blank = True)
result = models.CharField(max_length = 3, null=True)
def store(self):
self.data = timezone.now()
self.save()
class Document(models.Model):
upload = models.FileField() |
the-stack_0_10696 | import argparse
import logging
import numpy as np
import os
import random
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score
import sys
from baselines.vectorizers import build_vectorizer_from_df, load_vectorized_data
from baselines.avg_fasttext import build_avg_fasttext_from_df, load_avg_fasttext
from baselines.doc2vec import build_doc2vec_from_df, load_doc2vec
from shared.global_constants import RES_DIR
from shared.loaders import load_train_val_nodes
from shared.utils import save_cli_options, save_dict_to_json
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
def parse_arguments(args_to_parse):
""" Parse CLI arguments """
descr = 'Train a baseline model'
parser = argparse.ArgumentParser(description=descr)
general = parser.add_argument_group('General settings')
general.add_argument('name', type=str, help="The name of the experimental directory - used for saving and loading.")
general.add_argument(
'--input-data-dir',
type=str,
required=True,
help="The name of the directory from which to load the pre-processed data",
)
general.add_argument(
"--stemmer-path",
type=str,
required=True,
help="Path to the SALAMA stemming dictionary",
)
general.add_argument(
'--model',
type=str,
default='tf-idf',
choices=['tf-idf', 'count', 'doc2vec', 'fasttext'],
help='Select the model type to use before feeding into a logistic regression layer',
)
general.add_argument("--seed", type=int, default=12321, help='Random seed for reproducability')
training = parser.add_argument_group('Training settings')
training.add_argument(
'--train-set-label-proportion',
type=float,
default=0.2,
choices=[0.01, 0.05, 0.1, 0.2],
help='Ratio of nodes in the training set which we keep labelled',
)
# CLI options of the form `--doc2vec-XXXX` pertain to doc2vec
training.add_argument(
'--doc2vec-epochs',
type=int,
default=10,
help="The number of epochs to run when training Doc2Vec",
)
training.add_argument(
'--doc2vec-feature-dims',
type=int,
default=300,
help="The Doc2vec feature vector size",
)
training.add_argument(
'--doc2vec-dm',
type=int,
choices=[0, 1],
default=1,
help="The training regime to use for Doc2Vec: Distributed Memory (1) or Distributed Bag of Words (0)",
)
return parser.parse_args(args_to_parse)
def main(args):
""" Entry point for training a doc2vec model """
random.seed(args.seed)
np.random.seed(args.seed)
results_dir = os.path.join(RES_DIR, args.name)
os.makedirs(results_dir, exist_ok=True)
save_cli_options(args, results_dir)
preproc_dir = os.path.join(results_dir, 'preproc')
if args.model == 'tf-idf' or args.model == 'count':
if not os.path.isdir(preproc_dir):
os.makedirs(preproc_dir, exist_ok=True)
build_vectorizer_from_df(
vectorizer_name=args.model,
save_dir=preproc_dir,
df_path=os.path.join(RES_DIR, args.input_data_dir, 'dataset.csv'),
stemming_map_path=os.path.join(RES_DIR, args.stemmer_path),
text_column='document_content',
label_column='document_type',
)
print(f'Load {args.model} data...')
input_features, labels = load_vectorized_data(preproc_dir, args.model)
elif args.model == 'fasttext':
if not os.path.isdir(preproc_dir):
os.makedirs(preproc_dir, exist_ok=True)
build_avg_fasttext_from_df(
save_dir=preproc_dir,
df_path=os.path.join(RES_DIR, args.input_data_dir, 'dataset.csv'),
stemming_map_path=os.path.join(RES_DIR, args.stemmer_path),
text_column='document_content',
label_column='document_type',
)
print('Load average FastText data...')
input_features, labels = load_avg_fasttext(preproc_dir)
elif args.model == 'doc2vec':
if not os.path.isdir(preproc_dir):
os.makedirs(preproc_dir, exist_ok=True)
build_doc2vec_from_df(
save_dir=preproc_dir,
df_path=os.path.join(RES_DIR, args.input_data_dir, 'dataset.csv'),
stemming_map_path=os.path.join(RES_DIR, args.stemmer_path),
text_column='document_content',
label_column='document_type',
training_regime=args.doc2vec_dm,
embedding_dimension=args.doc2vec_feature_dims,
num_epochs=args.doc2vec_epochs,
)
print('Load Doc2vec data...')
input_features, labels = load_doc2vec(preproc_dir)
else:
raise Exception(f'Unrecognised model type: {args.model}')
train_nodes, val_nodes, test_nodes = load_train_val_nodes(
preproc_dir=os.path.join(RES_DIR, args.input_data_dir),
train_set_label_proportion=args.train_set_label_proportion,
as_numpy=True,
)
print('Train classifier ...')
classifier = LogisticRegression(random_state=args.seed).fit(input_features[train_nodes, :], labels[train_nodes])
print('Get accuracies...')
train_predictions = classifier.predict(input_features[train_nodes, :])
val_predictions = classifier.predict(input_features[val_nodes, :])
test_predictions = classifier.predict(input_features[test_nodes, :])
train_accuracy = sum(train_predictions == labels[train_nodes]) / len(train_predictions)
val_accuracy = sum(val_predictions == labels[val_nodes]) / len(val_predictions)
test_accuracy = sum(test_predictions == labels[test_nodes]) / len(test_predictions)
test_micro_f1 = f1_score(labels[test_nodes], test_predictions, average='micro')
test_macro_f1 = f1_score(labels[test_nodes], test_predictions, average='macro')
print(f'Train Accuracy: {train_accuracy}')
print(f'Validation Accuracy: {val_accuracy}')
print(f'Test Accuracy: {test_accuracy}')
print(f'Test Micro F1: {test_micro_f1}')
print(f'Test Macro F1: {test_macro_f1}')
output_save_dir = os.path.join(results_dir, f'model_{args.train_set_label_proportion}')
os.makedirs(output_save_dir, exist_ok=True)
save_dict_to_json(
{
'train_accuracy': train_accuracy,
'val_accuracy': val_accuracy,
'test_accuracy': test_accuracy,
'test_micro_f1': test_micro_f1,
'test_macro_f1': test_macro_f1,
},
os.path.join(output_save_dir, 'metric.json'),
)
# from sklearn.model_selection import learning_curve
# train_sizes, train_scores, test_scores = learning_curve(
# classifier, input_features[train_nodes, :], labels[train_nodes]
# )
# print(train_scores)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.