ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a32a65ae13adcbe8ae39c2625f358d6b614e70e | # -*- coding: utf-8 -*-
# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.
# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,
# session persistence, api calls, and more.
# This sample is built using the handler classes approach in skill builder.
import logging
import ask_sdk_core.utils as ask_utils
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.dispatch_components import AbstractExceptionHandler
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class LaunchRequestHandler(AbstractRequestHandler):
"""Handler for Skill Launch."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("LaunchRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Welcome to EduToken, my name is Veronica. What can I do for you? "
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CraftonCollegeIntentHandler(AbstractRequestHandler):
"""Handler for CraftonCollegeIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("CraftonCollegeIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "With its dedicated professors, ample extracurricular opportunities, supportive staff, and beautiful surroundings, Crafton Hills College is a place where students thrive. "
speak_output += "CHC offers more than 50 majors in the liberal arts and sciences, vocations, and technical studies, and currently serves about 4,500 students."
speak_output += "Professors are experts in their field, and are active in their professions outside of the classroom."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallEnrollmentIntentHandler(AbstractRequestHandler):
"""Handler for FallEnrollmentIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FallEnrollmentIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "Depending on your priority level, registration for the Fall 2021 term is open from May 10th through August 15th. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallStartIntentHandler(AbstractRequestHandler):
"""Handler for FallStartIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FallStartIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "The Fall 2021 term commences on August 16th. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FinancialAidAvailableIntentHandler(AbstractRequestHandler):
"""Handler for FinancialAidAvailableIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FinancialAidAvailableIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "For the 2021 and 2022 academic year, financial aid applications opened on October 1st, 2020. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FounderIntentHandler(AbstractRequestHandler):
"""Handler for FounderIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("FounderIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "EduToken was created by Aruna Bisht, Lucas Manning, and Aaron Montano at the UC Berkeley FinTech Bootcamp in June, 2021. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class JokeIntentHandler(AbstractRequestHandler):
"""Handler for JokeIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("JokeIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "I don't mean to brag about my financial skills...but my bank calls me every day to tell me that my debt is outstanding. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class OverviewIntentHandler(AbstractRequestHandler):
"""Handler for OverviewIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("OverviewIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "EduToken is a revolutionary system of redeeming and ensuring educational costs. "
speak_output += "Our goal is to ensure that all of your educational needs are properly accounted for."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SummerEnrollmentIntentHandler(AbstractRequestHandler):
"""Handler for SummerEnrollmentIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("SummerEnrollmentIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "Depending on your priority level, enrollment for the 2021 Summer term begins on April 12th, and ends on May 31st. "
speak_output += "Please visit the Crafton Hills College Admissions page to determine your priority level."
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class SummerSchoolStartIntentHandler(AbstractRequestHandler):
"""Handler for SummerSchoolStartIntent. """
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("SummerSchoolStartIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> response
speak_output = "Summer classes begin June 1st, June 14th, and July 6th. "
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class HelpIntentHandler(AbstractRequestHandler):
"""Handler for Help Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.HelpIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "You can say hello to me! How can I help?"
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
class CancelOrStopIntentHandler(AbstractRequestHandler):
"""Single handler for Cancel and Stop Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return (ask_utils.is_intent_name("AMAZON.CancelIntent")(handler_input) or
ask_utils.is_intent_name("AMAZON.StopIntent")(handler_input))
def handle(self, handler_input):
# type: (HandlerInput) -> Response
speak_output = "Goodbye!"
return (
handler_input.response_builder
.speak(speak_output)
.response
)
class FallbackIntentHandler(AbstractRequestHandler):
"""Single handler for Fallback Intent."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_intent_name("AMAZON.FallbackIntent")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
logger.info("In FallbackIntentHandler")
speech = "Hmm, I'm not sure. You can say Hello or Help. What would you like to do?"
reprompt = "I didn't catch that. What can I help you with?"
return handler_input.response_builder.speak(speech).ask(reprompt).response
class SessionEndedRequestHandler(AbstractRequestHandler):
"""Handler for Session End."""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("SessionEndedRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
# Any cleanup logic goes here.
return handler_input.response_builder.response
class IntentReflectorHandler(AbstractRequestHandler):
"""The intent reflector is used for interaction model testing and debugging.
It will simply repeat the intent the user said. You can create custom handlers
for your intents by defining them above, then also adding them to the request
handler chain below.
"""
def can_handle(self, handler_input):
# type: (HandlerInput) -> bool
return ask_utils.is_request_type("IntentRequest")(handler_input)
def handle(self, handler_input):
# type: (HandlerInput) -> Response
intent_name = ask_utils.get_intent_name(handler_input)
speak_output = "You just triggered " + intent_name + "."
return (
handler_input.response_builder
.speak(speak_output)
# .ask("add a reprompt if you want to keep the session open for the user to respond")
.response
)
class CatchAllExceptionHandler(AbstractExceptionHandler):
"""Generic error handling to capture any syntax or routing errors. If you receive an error
stating the request handler chain is not found, you have not implemented a handler for
the intent being invoked or included it in the skill builder below.
"""
def can_handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> bool
return True
def handle(self, handler_input, exception):
# type: (HandlerInput, Exception) -> Response
logger.error(exception, exc_info=True)
speak_output = "Sorry, I had trouble doing what you asked. Please try again."
return (
handler_input.response_builder
.speak(speak_output)
.ask(speak_output)
.response
)
# The SkillBuilder object acts as the entry point for your skill, routing all request and response
# payloads to the handlers above. Make sure any new handlers or interceptors you've
# defined are included below. The order matters - they're processed top to bottom.
sb = SkillBuilder()
sb.add_request_handler(LaunchRequestHandler())
sb.add_request_handler(CraftonCollegeIntentHandler())
sb.add_request_handler(FallEnrollmentIntentHandler())
sb.add_request_handler(FallStartIntentHandler())
sb.add_request_handler(FinancialAidAvailableIntentHandler())
sb.add_request_handler(FounderIntentHandler())
sb.add_request_handler(JokeIntentHandler())
sb.add_request_handler(OverviewIntentHandler())
sb.add_request_handler(SummerEnrollmentIntentHandler())
sb.add_request_handler(SummerSchoolStartIntentHandler())
sb.add_request_handler(HelpIntentHandler())
sb.add_request_handler(CancelOrStopIntentHandler())
sb.add_request_handler(FallbackIntentHandler())
sb.add_request_handler(SessionEndedRequestHandler())
sb.add_request_handler(IntentReflectorHandler()) # make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers
sb.add_exception_handler(CatchAllExceptionHandler())
lambda_handler = sb.lambda_handler()
|
py | 1a32a66412ac752dfef1346767dada6ace8dbe66 | import socket
import struct
send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
data_bytes = struct.pack("!BBBB", 0, 0, 255, 255)
header = struct.pack("!BIIH", 0, 0, 0, len(data_bytes))
message = header + data_bytes
send_sock.sendto(message, ("localhost", 42000)) |
py | 1a32a6d95eb08cf8a9c3aedd782f8f574e321aa1 | # coding: utf-8
import pprint
import re
import six
class DeleteBatchTaskFileRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'file_id': 'str'
}
attribute_map = {
'instance_id': 'Instance-Id',
'file_id': 'file_id'
}
def __init__(self, instance_id=None, file_id=None):
"""DeleteBatchTaskFileRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._file_id = None
self.discriminator = None
if instance_id is not None:
self.instance_id = instance_id
self.file_id = file_id
@property
def instance_id(self):
"""Gets the instance_id of this DeleteBatchTaskFileRequest.
:return: The instance_id of this DeleteBatchTaskFileRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this DeleteBatchTaskFileRequest.
:param instance_id: The instance_id of this DeleteBatchTaskFileRequest.
:type: str
"""
self._instance_id = instance_id
@property
def file_id(self):
"""Gets the file_id of this DeleteBatchTaskFileRequest.
:return: The file_id of this DeleteBatchTaskFileRequest.
:rtype: str
"""
return self._file_id
@file_id.setter
def file_id(self, file_id):
"""Sets the file_id of this DeleteBatchTaskFileRequest.
:param file_id: The file_id of this DeleteBatchTaskFileRequest.
:type: str
"""
self._file_id = file_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeleteBatchTaskFileRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | 1a32a755a13ca1eec9164162ca4334666b974765 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 1.0.0
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# %autosave 0
# %load_ext autoreload
# %autoreload 2
import os
import logging
import numpy as np
import pandas as pd
import pkg_resources
import seaborn as sns
import matplotlib.pyplot as plt
from math import sqrt
from GIPlot import GIPlot
from crispy.Utils import Utils
from crispy.QCPlot import QCplot
from scipy.stats import spearmanr, skew
from minlib.Utils import density_interpolate
from sklearn.metrics import mean_squared_error
from minlib.Utils import project_score_sample_map
from crispy.CRISPRData import CRISPRDataSet, Library
from crispy.LibRepresentationReport import LibraryRepresentaion
LOG = logging.getLogger("Crispy")
DPATH = pkg_resources.resource_filename("crispy", "data/")
RPATH = pkg_resources.resource_filename("notebooks", "minlib/reports/")
# MinLibCas9 library information
#
mlib = Library.load_library("MinLibCas9.csv.gz", set_index=False)
mlib.index = [f"{i}" for i in mlib["WGE_ID"]]
mlib["sgRNA"] = [s if len(s) == 19 else s[1:-3] for s in mlib["WGE_Sequence"]]
# Assemble raw counts matrix
#
SPATH = pkg_resources.resource_filename("notebooks", "minlib/minlibcas9_screens")
plasmid_counts = pd.read_csv(f"{SPATH}/Minimal_library_output_108.csv", index_col=0).rename(columns=dict(counts="MinLibCas9"))
#
#
lib_report = LibraryRepresentaion(plasmid_counts[["MinLibCas9"]])
pal = dict(MHG_library_v1=QCplot.PAL_DBGD[0], MinLibCas9=QCplot.PAL_DBGD[1])
# Lorenz curves#
lib_report.lorenz_curve(palette=pal)
plt.gcf().set_size_inches(2., 2.)
plt.savefig(f"{RPATH}/librepresentation_lorenz_curve.pdf", bbox_inches="tight", dpi=600)
plt.close("all")
# Lorenz curves#
plot_df = plasmid_counts["MinLibCas9"].sort_values().reset_index()
skew_ratio = plot_df["MinLibCas9"].quantile([.9, .1])
skew_ratio = skew_ratio[.9] / skew_ratio[.1]
fig, ax = plt.subplots(1, 1, figsize=(2.5, 1.5), dpi=600)
ax.plot(
plot_df.index,
plot_df["MinLibCas9"],
color=pal["MinLibCas9"],
# edgecolor="w",
lw=1,
# s=6,
alpha=.8,
zorder=3,
)
ax.set_xlabel("Ranked sgRNAs")
ax.set_ylabel("Number of reads")
ax.set_xticks([0, plot_df.shape[0] / 2, plot_df.shape[0]])
ax.grid(True, ls="-", lw=0.1, alpha=1.0, zorder=0, axis="y")
annot_text = f"Skew ratio = {skew_ratio:.2f}"
ax.text(
0.95,
0.05,
annot_text,
fontsize=6,
transform=ax.transAxes,
ha="right",
)
plt.savefig(f"{RPATH}/librepresentation_scatter.pdf", bbox_inches="tight", dpi=600)
plt.close("all")
|
py | 1a32a809590ad5571f2993aa39ebf621fe2df074 | """
WSGI config for caremanager project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'caremanager.settings')
application = get_wsgi_application()
|
py | 1a32a82190b7ca4eb50b0d8b9125b04c92ad8925 | import torch
import torch.nn as nn
from mmcv.cnn.utils.weight_init import xavier_init
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class PlainRefiner(nn.Module):
"""Simple refiner from Deep Image Matting.
Args:
conv_channels (int): Number of channels produced by the three main
convolutional layer.
loss_refine (dict): Config of the loss of the refiner. Default: None.
pretrained (str): Name of pretrained model. Default: None.
"""
def __init__(self, conv_channels=64, pretrained=None):
super(PlainRefiner, self).__init__()
self.refine_conv1 = nn.Conv2d(
4, conv_channels, kernel_size=3, padding=1)
self.refine_conv2 = nn.Conv2d(
conv_channels, conv_channels, kernel_size=3, padding=1)
self.refine_conv3 = nn.Conv2d(
conv_channels, conv_channels, kernel_size=3, padding=1)
self.refine_pred = nn.Conv2d(
conv_channels, 1, kernel_size=3, padding=1)
self.relu = nn.ReLU(inplace=True)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m)
def forward(self, x, raw_alpha):
"""Forward function.
Args:
x (Tensor): The input feature map of refiner.
raw_alpha (Tensor): The raw predicted alpha matte.
Returns:
Tensor: The refined alpha matte.
"""
out = self.relu(self.refine_conv1(x))
out = self.relu(self.refine_conv2(out))
out = self.relu(self.refine_conv3(out))
raw_refine = self.refine_pred(out)
pred_refine = torch.sigmoid(raw_alpha + raw_refine)
return pred_refine
|
py | 1a32a8328a03762e6550a70872b294749dcf4d7f | import tensorflow as tf
def get_loss_and_keys(which, params, ff_means_only=False):
"""
Returns the loss function.
:param which: submodel, must be one of "flux_fractions" or "histograms"
:param params: parameter dictionary
:param ff_means_only: even if aleatoric uncertainties for flux fractions are enabled, only train the means
:return: loss function, list of keys required (apart from true label, which is always assumed to be first input)
"""
if which == "flux_fractions":
loss, loss_keys = get_loss_and_keys_flux_fractions(params.train["ff_loss"],
do_var=params.nn.ff["alea_var"] and not ff_means_only,
do_covar=params.nn.ff["alea_covar"] and not ff_means_only)
elif which == "histograms":
loss, loss_keys = get_loss_and_keys_histograms(params.train["hist_loss"],
smoothing_empl=params.train["hist_pinball_smoothing"])
else:
raise NotImplementedError
return loss, loss_keys
def get_loss_and_keys_flux_fractions(ff_loss_str, do_var=False, do_covar=False):
"""
Returns the loss function for the flux fraction estimation.
:param ff_loss_str: : string specifying histogram loss
:param do_var: estimate aleatoric variances?
:param do_covar: estimate aleatoric covariance matrix?
:return: loss function, list of keys required (apart from true label, which is always assumed to be first input)
"""
assert not (do_var and do_covar), "Either 'do_var' or 'do_covar' should be chosen, not both!"
if do_var or do_covar:
assert ff_loss_str.lower() in ["l2", "mse"], "For flux fraction uncertainty estimation choose 'l2' loss!"
if ff_loss_str.lower() in ["l2", "mse"]:
if do_covar:
loss = max_llh_loss_covar
loss_keys = ["ff_mean", "ff_covar"]
elif do_var:
loss = max_llh_loss_var
loss_keys = ["ff_mean", "ff_logvar"]
else:
loss = tf.keras.losses.mse
loss_keys = ["ff_mean"]
elif ff_loss_str.lower() in ["l1", "mae"]:
loss = tf.keras.losses.mae
loss_keys = ["ff_mean"]
elif ff_loss_str.lower() in ["x-ent", "x_ent"]:
loss = tf.keras.losses.categorical_crossentropy
loss_keys = ["ff_mean"]
else:
raise NotImplementedError
return loss, loss_keys
def get_loss_and_keys_histograms(hist_loss_str, smoothing_empl=None):
"""
Returns the loss function for the SCD histogram estimation.
:param hist_loss_str: string specifying histogram loss
:param smoothing_empl: scalar determining the smoothing for Earth Mover's Pinball loss
:return: loss function, list of keys required (apart from true label, which is always assumed to be first input)
"""
loss_keys = ["hist"]
if hist_loss_str.lower() in ["l2", "mse"]:
def loss(y_true, y_pred): return tf.reduce_mean(tf.keras.losses.mse(y_true, y_pred), 1) # avg. over channels
elif hist_loss_str.lower() in ["l1", "mae"]:
def loss(y_true, y_pred): return tf.reduce_mean(tf.keras.losses.mae(y_true, y_pred), 1) # avg. over channels
elif hist_loss_str.lower() in ["x-ent", "x_ent"]:
def loss(y_true, y_pred): return tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true, y_pred), 1)
elif hist_loss_str.lower() in ["em1", "em_1"]:
def loss(y_true, y_pred): return emd_loss(y_true, y_pred, r=1)
elif hist_loss_str.lower() in ["em2", "em_2"]:
def loss(y_true, y_pred): return emd_loss(y_true, y_pred, r=2)
elif hist_loss_str.lower() == "cjs":
loss = cjs_loss
elif hist_loss_str.lower() == "empl":
def loss(y_true, y_pred, tau): return empl(y_true, y_pred, tau, smoothing=smoothing_empl)
loss_keys += ["tau"]
else:
raise NotImplementedError
return loss, loss_keys
############################
# FLUX FRACTION LOSSES
############################
def max_llh_loss_covar(y_true, y_pred, covar, eps=None):
"""
(Neg.) maximum likelihood loss function for a full Gaussian covariance matrix.
:param y_true: label
:param y_pred: prediction
:param covar: uncertainty covariance matrix
:param eps: small number for numerical stability, defaults to tf.keras.backend.epsilon()
:return: max. likelihood loss (up to a constant)
"""
if eps is None:
eps = tf.keras.backend.epsilon()
err = tf.expand_dims(y_pred - y_true, -1)
term1 = tf.squeeze(err * tf.linalg.matmul(tf.linalg.inv(covar), err), -1)
term2 = tf.math.log(eps + tf.linalg.det(covar))
max_llh_loss = (tf.reduce_sum(term1, 1) + term2) / 2.0
return max_llh_loss
def max_llh_loss_var(y_true, y_pred, logvar):
"""
(Neg.) maximum likelihood loss function for a diagonal Gaussian covariance matrix.
:param y_true: label
:param y_pred: prediction
:param logvar: uncertainty log-variances
:return: max. likelihood loss (up to a constant)
"""
err = y_pred - y_true
precision = tf.exp(-logvar)
term1 = err ** 2 * precision
term2 = logvar
max_llh_loss = tf.reduce_sum(term1 + term2, 1) / 2.0
return max_llh_loss
############################
# HISTOGRAM LOSSES
############################
def emd_loss(y_true, y_pred, r=2, weights=None, do_root=False):
"""
Computes the Earth Mover's Distance loss.
Hou, Le, Chen-Ping Yu, and Dimitris Samaras. "Squared Earth Mover's
Distance-based Loss for Training Deep Neural Networks." arXiv:1611.05916.
:param y_true: a 2-D (or 3-D) `Tensor` of the ground truth probability mass functions
:param y_pred: a 2-D (or 3-D) `Tensor` of the estimated p.m.f.-s
:param r: a constant for the r-norm.
:param weights: weight the loss differently for different samples
:param do_root: if True: raise result to the power of "1/r"
`y_true` and `y_pred` are assumed to have equal mass as
\sum^{N}_{i=1} {y_true}_i = \sum^{N}_{i=1} {y_pred}_i
:return: A 0-D `Tensor` with EMD loss.
"""
ecdf_true = tf.math.cumsum(y_true, axis=1)
ecdf_pred = tf.math.cumsum(y_pred, axis=1)
if weights is None:
weights = tf.ones_like(ecdf_true)
if len(weights.shape) < len(y_true.shape): # if bin-dimension is missing
weights = tf.expand_dims(weights, 1)
if r == 1:
emd = tf.reduce_mean(tf.abs(ecdf_true - ecdf_pred) * weights, axis=1)
elif r == 2:
emd = tf.reduce_mean((ecdf_true - ecdf_pred) ** 2 * weights, axis=1)
if do_root:
emd = tf.sqrt(emd)
else:
emd = tf.reduce_mean(tf.pow(tf.abs(ecdf_true - ecdf_pred) * weights, r), axis=1)
if do_root:
emd = tf.pow(emd, 1 / r)
return tf.reduce_mean(emd, 1) # average over channels
def cjs_loss(y_true, y_pred, eps=1e-10):
"""
Computes the symmetrical discrete cumulative Jensen-Shannon divergence from https://arxiv.org/pdf/1708.07089.pdf
:param y_true: labels
:param y_pred: prediction
:param eps: lower cutoff for logarithm (for numerical stability)
:return CJS loss
"""
cdf_true = tf.cumsum(y_true, axis=1)
cdf_pred = tf.cumsum(y_pred, axis=1)
def accjs(p_, q_):
# if p(i) = 0 then ACCJS(p, q)(i) = 0 since xlog(x) -> 0 as x-> 0
p_ = tf.clip_by_value(p_, eps, 1.0)
return 0.5 * tf.reduce_sum(p_ * tf.math.log(p_ / (0.5 * (p_ + q_))), axis=1)
loss = accjs(cdf_pred, cdf_true) + accjs(cdf_true, cdf_pred)
return tf.reduce_mean(loss, 1) # average over channels
def empl(y_true, y_pred, tau, weights=None, smoothing=0.0):
"""
Compute the Earth Mover's Pinball Loss (arXiv:2106.02051).
:param y_true: label
:param y_pred: prediction
:param tau: quantile levels of interest
:param weights: weight the loss differently for different samples
:param smoothing: scalar >= 0 that determines smoothing of loss function around 0
:return Earth Mover's Pinball Loss
"""
ecdf_true = tf.math.cumsum(y_true, axis=1)
ecdf_pred = tf.math.cumsum(y_pred, axis=1)
delta = ecdf_pred - ecdf_true
# If there is an extra dimension for the channel: tau might need to be expanded
if len(tau.shape) == 2 and len(delta.shape) == 3:
tau = tf.expand_dims(tau, 2)
# Non-smooth C0 loss (default)
if smoothing == 0.0:
mask = tf.cast(tf.greater_equal(delta, tf.zeros_like(delta)), tf.float32) - tau
loss = mask * delta
# Smooth loss
else:
loss = -tau * delta + smoothing * tf.math.softplus(delta / smoothing)
if weights is None:
weights = tf.ones_like(ecdf_true)
if len(weights.shape) < len(y_true.shape): # if bin-dimension is missing
weights = tf.expand_dims(weights, 1)
# avg. the weighted loss over the bins (1) and channel dimension (2)
return tf.reduce_mean(loss * weights, [1, 2])
|
py | 1a32a8c27bcb19dbf33714c4992f30cf3f7b4a34 | from dataclasses import dataclass
from datetime import datetime
from datetime import timezone
from typing import Any
from telliot_core.datasource import DataSource
from telliot_core.dtypes.datapoint import DataPoint
from telliot_feed_examples.utils.log import get_logger
logger = get_logger(__name__)
@dataclass
class DivaManualSource(DataSource[Any]):
"""DataSource for Diva Protocol manually-entered data."""
reference_asset: str = ""
timestamp: int = 0
def parse_user_val(self) -> float:
"""Parse historical price from user input."""
print(
"Enter price to report for reference asset "
f"{self.reference_asset} at timestamp {self.timestamp}:"
)
data = None
while data is None:
inpt = input()
try:
inpt = float(inpt) # type: ignore
except ValueError:
print("Invalid input. Enter decimal value (float).")
continue
print(f"Submitting value: {inpt}\nPress [ENTER] to confirm.")
_ = input()
data = inpt
return data
async def fetch_new_datapoint(self) -> DataPoint[float]:
"""Update current value with time-stamped value fetched from user input.
Returns:
Current time-stamped value
"""
data = self.parse_user_val()
dt = datetime.fromtimestamp(self.timestamp, tz=timezone.utc)
datapoint = (data, dt)
self.store_datapoint(datapoint)
logger.info(f"Stored price of {self.reference_asset} at {dt}: {data}")
return datapoint
|
py | 1a32a93c5b72de37625ecaf6fe39059fe9d843ce | from collections import namedtuple
Electron = namedtuple('Electron', ['pt', 'hadronicOverEm'])
class Event(object):
def __init__(self, run_number, event_number, electrons=()):
self._electrons = electrons
self._run_number = run_number
self._event_number = event_number
def getRun(self):
return self._run_number
def id(self):
return self._event_number
@property
def electrons(self):
return self._electrons
def nice_electrons(self):
"""
same as 'Event.electrons' but accessed via a function
"""
return self._electrons
class CMSSWEvent(object):
"""
Emulates a CMSSW event
event number -> event._event.id().event()
run number -> event._event.getRun()
muons -> handle = Handle ('std::vector<pat::Muon>')
label = ("slimmedMuons");
event.getByLabel (label, handle)
muons = handle.product()
"""
def __init__(self):
pass
|
py | 1a32aa583301ad5563bf1bba091f0d487cdcf62f | from flaskr.db import get_db
def test_poly(client, app):
client.post("/api/poly", data="2x+x^2-y^2")
with app.app_context():
db = get_db()
count = db.execute("SELECT COUNT(id) FROM polynomials").fetchone()[0]
assert count == 1
with app.app_context():
db = get_db()
count = db.execute("SELECT COUNT(id) FROM p_members").fetchone()[0]
assert count == 3
response = client.get("/api/poly/eval?polynomial_id=1&x=3.0&y=3.0")
assert response.data == b"6.0"
|
py | 1a32aadc8db254713a28b8dd872370928c26620d | """
Django settings for ownphotos project.
Generated by 'django-admin startproject' using Django 1.11.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import datetime
import os
for envvar in (
'SECRET_KEY',
'BACKEND_HOST',
'DB_BACKEND',
'DB_NAME',
'DB_USER',
'DB_PASS',
'DB_HOST',
'DB_PORT'):
if not envvar in os.environ :
raise NameError('Environnement variable not set :' + envvar)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
RQ_API_TOKEN = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = (os.environ.get('DEBUG', '') == '1')
ALLOWED_HOSTS = ['backend', 'localhost', os.environ.get('BACKEND_HOST')]
AUTH_USER_MODEL = 'api.User'
SIMPLE_JWT = {
'ACCESS_TOKEN_LIFETIME': datetime.timedelta(minutes=5),
# 'ACCESS_TOKEN_LIFETIME': datetime.timedelta(minutes=60),
'REFRESH_TOKEN_LIFETIME': datetime.timedelta(days=7),
}
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.postgres',
'api',
'nextcloud',
'rest_framework',
'corsheaders',
'django_extensions',
"django_rq",
'constance',
'constance.backends.database',
'drf_yasg',
]
CONSTANCE_BACKEND = 'constance.backends.database.DatabaseBackend'
CONSTANCE_DATABASE_CACHE_BACKEND = 'default'
CONSTANCE_CONFIG = {
'ALLOW_REGISTRATION': (False, 'Publicly allow user registration', bool),
'IMAGE_DIRS': ("/data", 'Image dirs list (serialized json)', str),
}
INTERNAL_IPS = ('127.0.0.1', 'localhost', '192.168.1.100')
CORS_ALLOW_HEADERS = (
'cache-control',
'accept',
'accept-encoding',
'allow-credentials',
'withcredentials',
'authorization',
'content-type',
'dnt',
'origin',
'user-agent',
'x-csrftoken',
'x-requested-with',
)
CORS_ORIGIN_WHITELIST = (
'http://localhost:3000',
'http://192.168.1.100:3000'
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated', ),
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
'rest_framework.authentication.BasicAuthentication',
),
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
'DEFAULT_FILTER_BACKENDS':
('django_filters.rest_framework.DjangoFilterBackend', ),
'DEFAULT_PAGINATION_CLASS':
'rest_framework.pagination.LimitOffsetPagination',
'PAGE_SIZE':
20000,
}
REST_FRAMEWORK_EXTENSIONS = {
'DEFAULT_OBJECT_CACHE_KEY_FUNC':
'rest_framework_extensions.utils.default_object_cache_key_func',
'DEFAULT_LIST_CACHE_KEY_FUNC':
'rest_framework_extensions.utils.default_list_cache_key_func',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'api.middleware.FingerPrintMiddleware',
]
ROOT_URLCONF = 'ownphotos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ownphotos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.' + os.environ['DB_BACKEND'],
'NAME': os.environ['DB_NAME'],
'USER': os.environ['DB_USER'],
'PASSWORD': os.environ['DB_PASS'],
'HOST': os.environ['DB_HOST'],
'PORT': os.environ['DB_PORT'],
},
}
if 'REDIS_PATH' in os.environ:
redis_path = 'unix://' + os.environ['REDIS_PATH']
redis_path += '?db=' + os.environ.get('REDIS_DB', '0')
else:
redis_path = "redis://" + os.environ['REDIS_HOST']
redis_path += ":" + os.environ["REDIS_PORT"] + "/1"
if 'REDIS_PASS' in os.environ:
redis_password = os.environ['REDIS_PASS']
else:
redis_password = ""
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": redis_path,
"TIMEOUT": 60 * 60 * 24,
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
"PASSWORD": redis_password,
}
}
}
RQ_QUEUES = {
'default': {
'USE_REDIS_CACHE': 'default',
'DEFAULT_TIMEOUT': 60 * 60 * 24 * 7,
'DB': 0
}
}
RQ = {
'DEFAULT_RESULT_TTL': 60,
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME':
'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME':
'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = os.environ['TIME_ZONE']
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
# Allow to define data folder like /var/lib/librephotos
BASE_DATA = os.environ.get('BASE_DATA', '/')
BASE_LOGS = os.environ.get('BASE_LOGS', '/logs/')
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DATA, 'protected_media' )
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
DATA_ROOT = os.path.join(BASE_DATA, 'data' )
IM2TXT_ROOT = os.path.join(BASE_DATA, 'data_models', 'im2txt')
PLACES365_ROOT = os.path.join(BASE_DATA, 'data_models', 'places365', 'model' )
CLIP_ROOT = os.path.join(BASE_DATA, 'data_models', 'clip-embeddings' )
LOGS_ROOT = BASE_LOGS
THUMBNAIL_SIZE_TINY = 100
THUMBNAIL_SIZE_SMALL = 200
THUMBNAIL_SIZE_MEDIUM = 400
THUMBNAIL_SIZE = 800
THUMBNAIL_SIZE_BIG = (2048, 2048)
FULLPHOTO_SIZE = (1000, 1000)
DEFAULT_FAVORITE_MIN_RATING = os.environ.get('DEFAULT_FAVORITE_MIN_RATING', 4)
CORS_ORIGIN_ALLOW_ALL = False
CORS_ALLOW_CREDENTIALS = True
IMAGE_SIMILARITY_SERVER = 'http://localhost:8002'
#Must be less or egal of nb core CPU ( Nearly 2GB per process)
HEAVYWEIGHT_PROCESS_ENV = os.environ.get('HEAVYWEIGHT_PROCESS', '1')
HEAVYWEIGHT_PROCESS = int(HEAVYWEIGHT_PROCESS_ENV) if HEAVYWEIGHT_PROCESS_ENV.isnumeric() else 1
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django': {
'handlers': ['console'],
'level': 'INFO',
},
},
}
|
py | 1a32aaeddd997c98de77273588a37af05635123a | from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager
from django.conf import settings
class UserProfileManager(BaseUserManager):
"""Manager for user profiles"""
def create_user(self, email, name, password=None):
"""Create a new user profile"""
if not email:
raise ValueError("User must have an email address")
email = self.normalize_email(email)
user = self.model(email=email, name=name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, name, password):
"""Create and save a new superuser with given details"""
user = self.create_user(email, name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class UserProfile(AbstractBaseUser,PermissionsMixin):
"""Database model for users in the system"""
email = models.EmailField(max_length=255, unique=True)
name = models.CharField(max_length=255)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['name']
def get_full_name(self):
"""Retrieve the full name of the user"""
return self.name
def get_short_name(self):
"""Retrieve short name of user"""
return self.name
def __str__(self):
"""Return the string representation of the user"""
return self.email
class ProfileFeedItem(models.Model):
"""Profile status update"""
user_profile = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE
)
status_text = models.CharField(max_length=255)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
"""Return the model as a string"""
return self.status_text |
py | 1a32ac76fee85b045581cecf91f12fa870249f06 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_ingress_spec import V1beta1IngressSpec
class TestV1beta1IngressSpec(unittest.TestCase):
""" V1beta1IngressSpec unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1IngressSpec(self):
"""
Test V1beta1IngressSpec
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_ingress_spec.V1beta1IngressSpec()
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a32ac8b572d657ff0a866b6bfa4cca8f671928f | # coding=utf-8
import ast
from asdl.lang.py3.py3_transition_system import *
from asdl.hypothesis import *
import astor
if __name__ == '__main__':
# read in the grammar specification of Python 2.7, defined in ASDL
asdl_text = open('py3_asdl.simplified.txt').read()
grammar = ASDLGrammar.from_text(asdl_text)
py_code = """pandas.read('file.csv', nrows=100)"""
# get the (domain-specific) python AST of the example Python code snippet
py_ast = ast.parse(py_code)
# convert the python AST into general-purpose ASDL AST used by tranX
asdl_ast = python_ast_to_asdl_ast(py_ast.body[0], grammar)
print('String representation of the ASDL AST: \n%s' % asdl_ast.to_string())
print('Size of the AST: %d' % asdl_ast.size)
# we can also convert the ASDL AST back into Python AST
py_ast_reconstructed = asdl_ast_to_python_ast(asdl_ast, grammar)
# initialize the Python transition parser
parser = Python3TransitionSystem(grammar)
# get the sequence of gold-standard actions to construct the ASDL AST
actions = parser.get_actions(asdl_ast)
# a hypothesis is an (partial) ASDL AST generated using a sequence of tree-construction actions
hypothesis = Hypothesis()
for t, action in enumerate(actions, 1):
# the type of the action should belong to one of the valid continuing types
# of the transition system
assert action.__class__ in parser.get_valid_continuation_types(hypothesis)
# if it's an ApplyRule action, the production rule should belong to the
# set of rules with the same LHS type as the current rule
if isinstance(action, ApplyRuleAction) and hypothesis.frontier_node:
assert action.production in grammar[hypothesis.frontier_field.type]
p_t = hypothesis.frontier_node.created_time if hypothesis.frontier_node else -1
print('t=%d, p_t=%d, Action=%s' % (t, p_t, action))
hypothesis.apply_action(action)
# get the surface code snippets from the original Python AST,
# the reconstructed AST and the AST generated using actions
# they should be the same
src1 = astor.to_source(py_ast).strip()
src2 = astor.to_source(py_ast_reconstructed).strip()
src3 = astor.to_source(asdl_ast_to_python_ast(hypothesis.tree, grammar)).strip()
assert src1 == src2 == src3 == "pandas.read('file.csv', nrows=100)"
|
py | 1a32ac9768a1125fccd5044a8775a479b96a9e75 | __author__ = "David Northcote"
__organisation__ = "The Univeristy of Strathclyde"
__support__ = "https://github.com/strath-sdr/rfsoc_sam"
import numpy as np
import ipywidgets as ipw
import plotly.graph_objs as go
import matplotlib.colors as mcolors
import time
from .spectrum_analyser import SpectrumAnalyser
from .bandwidth_selector import BandwidthSelector
from .quick_widgets import FloatText, IntText, Button, Accordion, DropDown, Label, Image, CheckBox, QuickButton
DDC_SPURS = ['rx_alias', 'rx_image', 'nyquist_up', 'nyquist_down',
'hd2', 'hd2_image', 'hd3', 'hd3_image',
'pll_mix_up', 'pll_mix_up_image', 'pll_mix_down', 'pll_mix_down_image',
'tis_spur', 'tis_spur_image', 'offset_spur', 'offset_spur_image']
class RadioOfdmAnalyser():
def __init__(self,
adc_tile,
adc_block,
adc_description,
spectrum_analyser,
ofdm_receiver,
decimator,
inspector):
self._tile = adc_tile
self._block = adc_block
self._spectrum_analyser = spectrum_analyser
self._decimator = decimator
self._ofdm_receiver = ofdm_receiver
self._inspector = inspector
self._adc_description = adc_description
self._ofdm_receiver.reset_synchronisation()
@property
def constellation_enable(self):
if self._inspector.stopped:
return False
else:
return True
@constellation_enable.setter
def constellation_enable(self, enable):
if enable:
self._inspector.start()
else:
self._inspector.stop()
@property
def centre_frequency(self):
return abs(self._block.MixerSettings['Freq'])
@centre_frequency.setter
def centre_frequency(self, centre_frequency):
nyquist_zone = int(np.ceil(centre_frequency/(self._block.BlockStatus['SamplingFreq']*1e3/2)))
if nyquist_zone == 0:
nyquist_zone = 1
if nyquist_zone != self._block.NyquistZone:
self._block.NyquistZone = nyquist_zone
if (nyquist_zone % 2) == 0:
self._block.MixerSettings['Freq'] = centre_frequency
else:
self._block.MixerSettings['Freq'] = -centre_frequency
self._spectrum_analyser.centre_frequency = centre_frequency*1e6
self._block.UpdateEvent(1)
@property
def decimation_factor(self):
if self._decimator.decimation_factor > 0:
return self._block.DecimationFactor * self._decimator.decimation_factor
else:
return self._block.DecimationFactor
@decimation_factor.setter
def decimation_factor(self, decimation_factor):
word_lut = [8, 4, 2]
sel = int(np.log2(decimation_factor))
if decimation_factor in [2, 4, 8]:
self._block.DecimationFactor = decimation_factor
self._block.FabRdVldWords = word_lut[sel-1]
self._spectrum_analyser.ssr_packetsize = 0
self._spectrum_analyser.ssr_mode = 4-sel
self._safe_restart()
self._decimator.decimation_factor = 0
self._spectrum_analyser.sample_frequency = self._block.BlockStatus['SamplingFreq']*1e9
self._spectrum_analyser.decimation_factor = decimation_factor
self._spectrum_analyser.ssr_packetsize = int(self._spectrum_analyser.fft_size/8)
elif decimation_factor in [16, 32, 64, 128, 256, 512, 1024, 2048]:
self._block.DecimationFactor = 8
self._block.FabRdVldWords = 2
self._spectrum_analyser.ssr_packetsize = 0
self._spectrum_analyser.ssr_mode = 0
self._safe_restart()
self._decimator.decimation_factor = int(decimation_factor/8)
self._spectrum_analyser.sample_frequency = self._block.BlockStatus['SamplingFreq']*1e9
self._spectrum_analyser.decimation_factor = decimation_factor
self._spectrum_analyser.ssr_packetsize = int(self._spectrum_analyser.fft_size/8)
@property
def number_frames(self):
return self._spectrum_analyser.plot.data_windowsize
@number_frames.setter
def number_frames(self, number_frames):
if number_frames in range(1, 65):
self._spectrum_analyser.plot.data_windowsize = int(number_frames)
@property
def sample_frequency(self):
return self._block.BlockStatus['SamplingFreq']*1e9
@property
def calibration_mode(self):
return self._block.CalibrationMode
@calibration_mode.setter
def calibration_mode(self, calibration_mode):
if calibration_mode in [1, 2]:
self._block.CalibrationMode = calibration_mode
self._safe_restart()
@property
def nyquist_stopband(self):
return self._spectrum_analyser.nyquist_stopband * 100
@nyquist_stopband.setter
def nyquist_stopband(self, nyquist_stopband):
self._spectrum_analyser.nyquist_stopband = nyquist_stopband/100
@property
def fftsize(self):
return self._spectrum_analyser.fft_size
@fftsize.setter
def fftsize(self, fftsize):
self._spectrum_analyser.fft_size = fftsize
@property
def spectrum_type(self):
return self._spectrum_analyser.spectrum_type
@spectrum_type.setter
def spectrum_type(self, spectrum_type):
self._spectrum_analyser.spectrum_type = spectrum_type
@property
def spectrum_units(self):
return self._spectrum_analyser.spectrum_units
@spectrum_units.setter
def spectrum_units(self, spectrum_units):
self._spectrum_analyser.spectrum_units = spectrum_units
@property
def window(self):
return self._spectrum_analyser.window
@window.setter
def window(self, window_type):
self._spectrum_analyser.window = window_type
@property
def spectrum_window(self):
return self._spectrum_analyser.spectrum_window
@property
def height(self):
return self._spectrum_analyser.height
@height.setter
def height(self, height):
self._spectrum_analyser.height = height
@property
def width(self):
return self._spectrum_analyser.width
@width.setter
def width(self, width):
self._spectrum_analyser.width = width
@property
def spectrum_enable(self):
return self._spectrum_analyser.plot.enable_updates
@spectrum_enable.setter
def spectrum_enable(self, enable):
if enable:
self._spectrum_analyser.plot.enable_updates = True
else:
self._spectrum_analyser.plot.enable_updates = False
@property
def waterfall_enable(self):
return self._spectrum_analyser.spectrogram.enable_updates
@waterfall_enable.setter
def waterfall_enable(self, enable):
if enable:
self._spectrum_analyser.spectrogram.enable_updates = True
else:
self._spectrum_analyser.spectrogram.enable_updates = False
@property
def dma_enable(self):
return self._spectrum_analyser.dma_enable
@dma_enable.setter
def dma_enable(self, enable):
if enable:
self._spectrum_analyser.dma_enable = 1
self._spectrum_analyser.timer.start()
else:
self._spectrum_analyser.timer.stop()
self._spectrum_analyser.dma_enable = 0
@property
def update_frequency(self):
return self._spectrum_analyser.update_frequency
@update_frequency.setter
def update_frequency(self, update_frequency):
self._spectrum_analyser.update_frequency = update_frequency
@property
def plotly_theme(self):
return self._spectrum_analyser.plotly_theme
@plotly_theme.setter
def plotly_theme(self, plotly_theme):
self._spectrum_analyser.plotly_theme = plotly_theme
self._inspector._c_plot._plot.layout.template = plotly_theme
@property
def line_colour(self):
return self._spectrum_analyser.line_colour
@line_colour.setter
def line_colour(self, line_colour):
self._spectrum_analyser.line_colour = line_colour
@property
def line_fill(self):
return self._spectrum_analyser.line_fill
@line_fill.setter
def line_fill(self, line_fill):
self._spectrum_analyser.line_fill = line_fill
@property
def zmin(self):
return self._spectrum_analyser.zmin
@zmin.setter
def zmin(self, zmin):
self._spectrum_analyser.zmin = zmin
@property
def zmax(self):
return self._spectrum_analyser.zmax
@zmax.setter
def zmax(self, zmax):
self._spectrum_analyser.zmax = zmax
@property
def quality(self):
return self._spectrum_analyser.quality
@quality.setter
def quality(self, quality):
self._spectrum_analyser.quality = quality
@property
def post_process(self):
return self._spectrum_analyser.plot.post_process
@post_process.setter
def post_process(self, post_process):
if post_process in ['max', 'min', 'average', 'median']:
self._spectrum_analyser.plot.post_process = post_process
else:
self._spectrum_analyser.plot.post_process = 'none'
@property
def display_max(self):
return self._spectrum_analyser.plot.display_max
@display_max.setter
def display_max(self, display_max):
self._spectrum_analyser.plot.display_max = display_max
@property
def display_min(self):
return self._spectrum_analyser.plot.display_min
@display_min.setter
def display_min(self, display_min):
self._spectrum_analyser.plot.display_min = display_min
@property
def number_max_indices(self):
return self._spectrum_analyser.plot.number_max_indices
@number_max_indices.setter
def number_max_indices(self, number_max_indices):
self._spectrum_analyser.plot.number_max_indices = number_max_indices
@property
def colour_map(self):
return self._spectrum_analyser.spectrogram.cmap
@colour_map.setter
def colour_map(self, colour_map):
self._spectrum_analyser.spectrogram.cmap = colour_map
@property
def spectrogram_performance(self):
return self._spectrum_analyser.spectrogram.ypixel
@spectrogram_performance.setter
def spectrogram_performance(self, performance):
self._spectrum_analyser.spectrogram.ypixel = performance
@property
def ymin(self):
return self._spectrum_analyser.plot.yrange[0]
@ymin.setter
def ymin(self, ymin):
temp_range = list(self._spectrum_analyser.plot.yrange)
temp_range[0] = ymin
self._spectrum_analyser.plot.yrange = tuple(temp_range)
@property
def ymax(self):
return self._spectrum_analyser.plot.yrange[1]
@ymax.setter
def ymax(self, ymax):
temp_range = list(self._spectrum_analyser.plot.yrange)
temp_range[1] = ymax
self._spectrum_analyser.plot.yrange = tuple(temp_range)
@property
def number_min_indices(self):
return self._spectrum_analyser.plot.number_min_indices
@number_min_indices.setter
def number_min_indices(self, number_min_indices):
self._spectrum_analyser.plot.number_min_indices = number_min_indices
@property
def display_ddc_plan(self):
return self._spectrum_analyser.plot.display_ddc_plan
@display_ddc_plan.setter
def display_ddc_plan(self, display_ddc_plan):
self._spectrum_analyser.plot.display_ddc_plan = display_ddc_plan
@property
def ddc_centre_frequency(self):
return self._spectrum_analyser.plot.ddc_centre_frequency*1e-6
@ddc_centre_frequency.setter
def ddc_centre_frequency(self, ddc_centre_frequency):
self._spectrum_analyser.plot.ddc_centre_frequency = ddc_centre_frequency*1e6
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_hd2_db(self):
return self._spectrum_analyser.plot.ddc_plan.hd2_db
@ddc_plan_hd2_db.setter
def ddc_plan_hd2_db(self, hd2_db):
self._spectrum_analyser.plot.ddc_plan.hd2_db = hd2_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_hd3_db(self):
return self._spectrum_analyser.plot.ddc_plan.hd3_db
@ddc_plan_hd3_db.setter
def ddc_plan_hd3_db(self, hd3_db):
self._spectrum_analyser.plot.ddc_plan.hd3_db = hd3_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_nsd_db(self):
return self._spectrum_analyser.plot.ddc_plan.nsd_db
@ddc_plan_nsd_db.setter
def ddc_plan_nsd_db(self, nsd_db):
self._spectrum_analyser.plot.ddc_plan.nsd_db = nsd_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_pll_mix_db(self):
return self._spectrum_analyser.plot.ddc_plan.pll_mix_db
@ddc_plan_pll_mix_db.setter
def ddc_plan_pll_mix_db(self, pll_mix_db):
self._spectrum_analyser.plot.ddc_plan.pll_mix_db = pll_mix_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_off_spur_db(self):
return self._spectrum_analyser.plot.ddc_plan.off_spur_db
@ddc_plan_off_spur_db.setter
def ddc_plan_off_spur_db(self, off_spur_db):
self._spectrum_analyser.plot.ddc_plan.off_spur_db = off_spur_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_tis_spur_db(self):
return self._spectrum_analyser.plot.ddc_plan.tis_spur_db
@ddc_plan_tis_spur_db.setter
def ddc_plan_tis_spur_db(self, tis_spur_db):
self._spectrum_analyser.plot.ddc_plan.tis_spur_db = tis_spur_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def dma_status(self):
return self._spectrum_analyser.dma_status
def spectrum(self):
return self._spectrum_analyser.plot.get_plot()
def waterfall(self):
return self._spectrum_analyser.spectrogram.get_plot()
def reset_ofdm_receiver(self):
self._ofdm_receiver.reset_synchronisation()
def _safe_restart(self):
tile_number = self._adc_description[0]
self._tile.ShutDown()
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
while running:
time.sleep(0.1)
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
self._tile.StartUp()
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
while not running:
time.sleep(0.1)
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
#_freq_planner_props = [("enable_rx_alias"),
# ("enable_rx_image"),
# ("enable_nyquist_up"),
# ("enable_nyquist_down"),
# ("enable_hd2"),
# ("enable_hd2_image"),
# ("enable_hd3"),
# ("enable_hd3_image"),
# ("enable_pll_mix_up"),
# ("enable_pll_mix_up_image"),
# ("enable_pll_mix_down"),
# ("enable_pll_mix_down_image"),
# ("enable_tis_spur"),
# ("enable_tis_spur_image"),
# ("enable_offset_spur"),
# ("enable_offset_spur_image")]
#_freq_planner_desc = [("RX Alias"),
# ("RX Image"),
# ("Nyquist Up"),
# ("Nyquist Down"),
# ("HD2"),
# ("HD2 Image"),
# ("HD3"),
# ("HD3 Image"),
# ("PLL Mix Up"),
# ("PLL Mix Up Image"),
# ("PLL Mix Down"),
# ("PLL Mix Down Image"),
# ("TIS Spur"),
# ("TIS Spur Image"),
# ("Offset Spur"),
# ("Offset Spur Image")]
_freq_planner_props = [("enable_rx_alias"),
("enable_rx_image"),
("enable_hd2"),
("enable_hd2_image"),
("enable_hd3"),
("enable_hd3_image"),
("enable_pll_mix_up"),
("enable_pll_mix_up_image"),
("enable_pll_mix_down"),
("enable_pll_mix_down_image")]
_freq_planner_desc = [("Fc"),
("Fc Image"),
("HD2"),
("HD2 Image"),
("HD3"),
("HD3 Image"),
("PLL Mix Up"),
("PLL Mix Up Image"),
("PLL Mix Down"),
("PLL Mix Down Image")]
def _create_mmio_property(idx):
def _get(self):
return self._spectrum_analyser.plot.display_ddc_plan[idx]
def _set(self, value):
if value:
self._spectrum_analyser.plot.display_ddc_plan[idx] = True
else:
self._spectrum_analyser.plot.display_ddc_plan[idx] = False
self._spectrum_analyser.plot.update_ddc_plan()
return property(_get, _set)
for idx, name in enumerate(_freq_planner_props):
setattr(RadioOfdmAnalyser, name, _create_mmio_property(idx))
class RadioOfdmAnalyserGUI():
def __init__(self,
adc_tile,
adc_block,
adc_description,
spectrum_analyser,
decimator,
ofdm_receiver,
inspector):
self._widgets = {}
self._accordions = {}
self._running_update = False
self._update_que = []
self._stopped = False
self._runtime_status = {'spectrum_enable' : False, 'waterfall_enable' : False}
self._inspector = inspector
self.analyser = RadioOfdmAnalyser(adc_tile=adc_tile,
adc_block=adc_block,
adc_description=adc_description,
spectrum_analyser=spectrum_analyser,
decimator=decimator,
ofdm_receiver=ofdm_receiver,
inspector=self._inspector)
self._config = {'centre_frequency' : 819,
'nyquist_stopband' : 80,
'decimation_factor' : self.analyser.decimation_factor,
'calibration_mode' : self.analyser.calibration_mode,
'fftsize' : 2048,
'spectrum_type' : self.analyser.spectrum_type,
'spectrum_units' : self.analyser.spectrum_units,
'window' : 'hanning',
'height' : self.analyser.height,
'spectrum_enable' : self.analyser.spectrum_enable,
'waterfall_enable' : self.analyser.waterfall_enable,
'constellation_enable' : self.analyser.constellation_enable,
'dma_enable' : self.analyser.dma_enable,
'update_frequency' : 10,
'plotly_theme' : self.analyser.plotly_theme,
'line_colour' : self.analyser.line_colour,
'zmin' : self.analyser.zmin,
'zmax' : self.analyser.zmax,
'quality' : self.analyser.quality,
'width' : self.analyser.width,
'post_process' : 'average',
'number_frames' : 6,
'display_max' : False,
'display_min' : False,
'number_max_indices' : 1,
'number_min_indices' : 1,
'colour_map' : self.analyser.colour_map,
'spectrogram_performance' : 4,
'ymin' : self.analyser.ymin,
'ymax' : self.analyser.ymax,
'enable_rx_alias' : False,
'enable_rx_image' : False,
'enable_hd2' : False,
'enable_hd2_image' : False,
'enable_hd3' : False,
'enable_hd3_image' : False,
'enable_pll_mix_up' : False,
'enable_pll_mix_up_image' : False,
'enable_pll_mix_down' : False,
'enable_pll_mix_down_image' : False,
'ddc_centre_frequency' : 0,
'ddc_plan_hd2_db' : self.analyser.ddc_plan_hd2_db,
'ddc_plan_hd3_db' : self.analyser.ddc_plan_hd3_db,
'ddc_plan_nsd_db' : self.analyser.ddc_plan_nsd_db,
'ddc_plan_pll_mix_db' : self.analyser.ddc_plan_pll_mix_db,
'ddc_plan_off_spur_db' : self.analyser.ddc_plan_off_spur_db,
'ddc_plan_tis_spur_db' : self.analyser.ddc_plan_tis_spur_db}
self._initialise_frontend()
@property
def config(self):
return self._config
@config.setter
def config(self, config_dict):
self._update_config(config_dict)
def start(self):
self.config = {'spectrum_enable' : self._runtime_status['spectrum_enable'],
'waterfall_enable' : self._runtime_status['waterfall_enable']}
self._stopped = False
def stop(self):
if not self._stopped:
self._runtime_status.update({'spectrum_enable' : self._config['spectrum_enable'],
'waterfall_enable' : self._config['waterfall_enable']})
self.config = {'spectrum_enable' : False,
'waterfall_enable' : False}
self._stopped = True
def _initialise_frontend(self):
self._widgets.update({'ddc_centre_frequency' :
FloatText(callback=self._update_config,
value=self._config['ddc_centre_frequency'],
min_value=0,
max_value=self.analyser._block.BlockStatus['SamplingFreq']*1e3,
step=1,
dict_id='ddc_centre_frequency',
description='Centre Frequency (MHz):')})
self._widgets.update({'ddc_plan_hd2_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_hd2_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_hd2_db',
description='HD2 (dB)')})
self._widgets.update({'ddc_plan_hd3_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_hd3_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_hd3_db',
description='HD3 (dB)')})
self._widgets.update({'ddc_plan_nsd_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_nsd_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_nsd_db',
description='NSD (dBFs/Hz)')})
self._widgets.update({'ddc_plan_pll_mix_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_pll_mix_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_pll_mix_db',
description='PLL Ref Mixing (dB)')})
self._widgets.update({'ddc_plan_off_spur_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_off_spur_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_off_spur_db',
description='Offset Spur (dB)')})
self._widgets.update({'ddc_plan_tis_spur_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_tis_spur_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_tis_spur_db',
description='TI Spur (dB)')})
for idx, freq_prop in enumerate(_freq_planner_props):
self._widgets.update({freq_prop :
CheckBox(callback=self._update_config,
description=_freq_planner_desc[idx],
value=self._config[freq_prop],
indent=False,
layout_width='150px',
dict_id=freq_prop)})
self._widgets.update({'decimation_factor' :
DropDown(callback=self._update_config,
options=[2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048],
value=self._config['decimation_factor'],
dict_id='decimation_factor',
description='Decimation Factor:')})
self._widgets.update({'spectrum_type' :
DropDown(callback=self._update_config,
options=[('Power Spectrum'),
('Power Spectral Density')],
value=self._config['spectrum_type'],
dict_id='spectrum_type',
description='Spectrum Type:',
description_width='100px')})
self._widgets.update({'spectrum_units' :
DropDown(callback=self._update_config,
options=[('dBFS'),
('dBm')],
value=self._config['spectrum_units'],
dict_id='spectrum_units',
description='Spectrum Units:',
description_width='100px')})
self._widgets.update({'post_process' :
DropDown(callback=self._update_config,
options=[('None', 'none'),
('Maximum Hold', 'max'),
('Minimum Hold', 'min'),
('Running Average', 'average'),
('Running Median', 'median')],
value=self._config['post_process'],
dict_id='post_process',
description='Post Processing:',
description_width='100px')})
self._widgets.update({'fftsize' :
DropDown(callback=self._update_config,
options=[64, 128, 256, 512, 1024, 2048, 4096, 8192],
value=4096,
dict_id='fftsize',
description = 'FFT Size:')})
self._widgets.update({'calibration_mode' :
DropDown(callback=self._update_config,
options=[('1 (Fs/2 ≤ ±30%)', 1),
('2 (Fs/2 > ±30%)', 2)],
value=self._config['calibration_mode'],
dict_id='calibration_mode',
description='Calibration Mode:')})
self._widgets.update({'window' :
DropDown(callback=self._update_config,
options=[('Rectangular', 'rectangular'),
('Bartlett', 'bartlett'),
('Blackman', 'blackman'),
('Hamming', 'hamming'),
('Hanning', 'hanning')],
value='rectangular',
dict_id='window',
description='')})
self._widgets.update({'plotly_theme' :
DropDown(callback=self._update_config,
options=[('Seaborn', 'seaborn'),
('Simple White', 'simple_white'),
('Plotly', 'plotly'),
('Plotly White', 'plotly_white'),
('Plotly Dark', 'plotly_dark')],
value='plotly',
dict_id='plotly_theme',
description='Plotly Theme:')})
self._widgets.update({'colour_map' :
DropDown(callback=self._update_config,
options=[('Grey' , 'gray'),
('Spring' , 'spring'),
('Summer' , 'summer'),
('Autumn' , 'autumn'),
('Winter' , 'winter'),
('Cool' , 'cool'),
('Hot' , 'hot'),
('Copper' , 'copper'),
('Rainbow', 'rainbow'),
('Jet' , 'jet')],
value='gray',
dict_id='colour_map',
description='Colour Map:',
description_width='100px')})
self._widgets.update({'line_colour' :
DropDown(callback=self._update_config,
options=list(mcolors.CSS4_COLORS),
value='white',
dict_id='line_colour',
description='Line Colour:')})
self._widgets.update({'line_fill' :
DropDown(callback=self._update_config,
options=list(mcolors.CSS4_COLORS),
value='lightpink',
dict_id='line_fill',
description='Line Fill:')})
self._widgets.update({'spectrogram_performance' :
DropDown(callback=self._update_config,
options=[('Low', 8),
('Medium', 4),
('High', 2)],
value=2,
dict_id='spectrogram_performance',
description='Resolution:',
description_width='100px')})
self._widgets.update({'number_max_indices' :
IntText(callback=self._update_config,
value=self._config['number_max_indices'],
min_value=1,
max_value=64,
step=1,
dict_id='number_max_indices',
description='Number of Maximums:')})
self._widgets.update({'number_min_indices' :
IntText(callback=self._update_config,
value=self._config['number_min_indices'],
min_value=1,
max_value=64,
step=1,
dict_id='number_min_indices',
description='Number of Minimums:')})
self._widgets.update({'number_frames' :
FloatText(callback=self._update_config,
value=self._config['number_frames'],
min_value=1,
max_value=64,
step=1,
dict_id='number_frames',
description='Number Frames:',
description_width='100px')})
self._widgets.update({'ymin' :
FloatText(callback=self._update_config,
value=self._config['ymin'],
min_value=-300,
max_value=300,
step=1,
dict_id='ymin',
description='Y-Low (dB):',
description_width='100px')})
self._widgets.update({'ymax' :
FloatText(callback=self._update_config,
value=self._config['ymax'],
min_value=-300,
max_value=300,
step=1,
dict_id='ymax',
description='Y-High (dB):',
description_width='100px')})
self._widgets.update({'centre_frequency' :
FloatText(callback=self._update_config,
value=self._config['centre_frequency'],
min_value=0,
max_value=self.analyser._block.BlockStatus['SamplingFreq']*1e3,
step=1,
dict_id='centre_frequency',
description='Centre Frequency (MHz):')})
self._widgets.update({'nyquist_stopband' :
FloatText(callback=self._update_config,
value=self._config['nyquist_stopband'],
min_value=50,
max_value=100,
step=1,
dict_id='nyquist_stopband',
description='Nyquist Stopband (%):')})
self._widgets.update({'height' :
FloatText(callback=self._update_config,
value=self._config['height'],
min_value=200,
max_value=2160,
step=1,
dict_id='height',
description='Plot Height (Px):')})
self._widgets.update({'width' :
FloatText(callback=self._update_config,
value=self._config['width'],
min_value=400,
max_value=4096,
step=1,
dict_id='width',
description='Plot Width (Px):')})
#self._widgets.update({'update_frequency' :
# FloatText(callback=self._update_config,
# value=self._config['update_frequency'],
# min_value=5,
# max_value=12,
# step=1,
# dict_id='update_frequency',
# description='Update Frequency:')})
self._widgets.update({'update_frequency' :
DropDown(callback=self._update_config,
options=[('Low', 5),
('Medium', 10),
('High', 15)],
value=5,
dict_id='update_frequency',
description='Plot Performance:')})
self._widgets.update({'zmin' :
FloatText(callback=self._update_config,
value=self._config['zmin'],
min_value=-300,
max_value=300,
step=1,
dict_id='zmin',
description='Z-Low (dB):',
description_width='100px')})
self._widgets.update({'zmax' :
FloatText(callback=self._update_config,
value=self._config['zmax'],
min_value=-300,
max_value=300,
step=1,
dict_id='zmax',
description='Z-High (dB):',
description_width='100px')})
self._widgets.update({'quality' :
FloatText(callback=self._update_config,
value=self._config['quality'],
min_value=80,
max_value=100,
step=1,
dict_id='quality',
description='Quality (%):',
description_width='100px')})
self._widgets.update({'constellation_enable' :
Button(callback=self._update_config,
description_on = 'On',
description_off = 'Off',
state=False,
dict_id='constellation_enable')})
self._widgets.update({'dma_enable' :
Button(callback=self._update_config,
description_on = 'On',
description_off = 'Off',
state=False,
dict_id='dma_enable')})
self._widgets.update({'spectrum_enable' :
Button(callback=self._update_config,
description_on = 'On',
description_off = 'Off',
state=False,
dict_id='spectrum_enable')})
self._widgets.update({'waterfall_enable' :
Button(callback=self._update_config,
description_on = 'On',
description_off = 'Off',
state=False,
dict_id='waterfall_enable')})
self._widgets.update({'reset_ofdm_receiver' :
QuickButton(callback=self.analyser.reset_ofdm_receiver,
description_on = 'Resetting',
description_off = 'Reset',
state=False,
dict_id='reset_ofdm_receiver')})
self._widgets.update({'sample_frequency_label' :
Label(value=str((self.analyser.sample_frequency/self.analyser.decimation_factor)*1e-6),
svalue='Sample Frequency: ',
evalue=' MHz',
dict_id='sample_frequency_label')})
self._widgets.update({'resolution_bandwidth_label' :
Label(value=str(((self.analyser.sample_frequency/self.analyser.decimation_factor)/ \
self.analyser.fftsize)*1e-3),
svalue='Frequency Resolution: ',
evalue=' kHz',
dict_id='resolution_bandwidth_label')})
self._widgets.update({'display_max' :
CheckBox(callback=self._update_config,
description='Display Maximum',
value=self._config['display_max'],
dict_id='display_max')})
self._widgets.update({'display_min' :
CheckBox(callback=self._update_config,
description='Display Minimum',
value=self._config['display_min'],
dict_id='display_min')})
self._window_plot = go.FigureWidget(layout={'hovermode' : 'closest',
'height' : 225,
'width' : 300,
'margin' : {
't':0, 'b':20, 'l':0, 'r':0
},
'showlegend' : False,
},
data=[{
'x': np.arange(self.analyser.fftsize),
'y': np.ones(self.analyser.fftsize),
'line':{
'color' : 'palevioletred',
'width' : 2
},
'fill' : 'tozeroy',
'fillcolor' : 'rgba(128, 128, 128, 0.5)'
}])
self._accordions.update({'properties' :
ipw.Accordion(children=[ipw.HBox(
[ipw.VBox([ipw.Label(value='Spectrum Analyzer: ', layout=ipw.Layout(width='150px')),
ipw.Label(value='Spectrogram: ', layout=ipw.Layout(width='150px'))]),
ipw.VBox([self._widgets['spectrum_enable'].get_widget(),
self._widgets['waterfall_enable'].get_widget()])],
layout=ipw.Layout(justify_content='space-around')),
ipw.VBox([self._widgets['centre_frequency'].get_widget(),
self._widgets['decimation_factor'].get_widget(),
self._widgets['fftsize'].get_widget()]),
ipw.VBox([self._widgets['post_process'].get_widget(),
self._widgets['number_frames'].get_widget(),
self._widgets['ymin'].get_widget(),
self._widgets['ymax'].get_widget()]),
ipw.VBox([ipw.Label(value='Experimental Control Panel'),
self._widgets['ddc_centre_frequency'].get_widget(),
ipw.HBox([
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(0,int(len(_freq_planner_props)/2))]),
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(int(len(_freq_planner_props)/2),len(_freq_planner_props))])
])
]),
ipw.VBox([self._widgets['spectrogram_performance'].get_widget(),
self._widgets['colour_map'].get_widget(),
self._widgets['zmin'].get_widget(),
self._widgets['zmax'].get_widget()]),
ipw.VBox([self._window_plot,
self._widgets['window'].get_widget()]),
ipw.VBox([self._widgets['nyquist_stopband'].get_widget(),
self._widgets['height'].get_widget(),
self._widgets['width'].get_widget(),
self._widgets['update_frequency'].get_widget()])
])})
""" Frequency Planner Widgets
ipw.VBox([self._widgets['ddc_centre_frequency'].get_widget(),
self._widgets['ddc_plan_hd2_db'].get_widget(),
self._widgets['ddc_plan_hd3_db'].get_widget(),
self._widgets['ddc_plan_pll_mix_db'].get_widget(),
self._widgets['ddc_plan_off_spur_db'].get_widget(),
self._widgets['ddc_plan_tis_spur_db'].get_widget(),
self._widgets['ddc_plan_nsd_db'].get_widget(),
ipw.HBox([
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(0,int(len(_freq_planner_props)/2))]),
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(int(len(_freq_planner_props)/2),len(_freq_planner_props))])
])
]),
"""
self._accordions['properties'].set_title(0, 'System')
self._accordions['properties'].set_title(1, 'Receiver')
self._accordions['properties'].set_title(2, 'Spectrum Analyzer')
self._accordions['properties'].set_title(3, 'Frequency Planner')
self._accordions['properties'].set_title(4, 'Spectrogram')
self._accordions['properties'].set_title(5, 'Window Settings')
self._accordions['properties'].set_title(6, 'Plot Settings')
"""The transmit system accordion"""
self._accordions.update({'constellation_properties' :
ipw.Accordion(children=[ipw.HBox([ipw.VBox([ipw.Label(value='Constellation: ', layout=ipw.Layout(width='150px')),
ipw.Label(value='Reset Receiver: ', layout=ipw.Layout(width='150px'))]),
ipw.VBox([self._widgets['constellation_enable'].get_widget(),
self._widgets['reset_ofdm_receiver'].get_widget()])],
layout=ipw.Layout(justify_content='space-around'))],
layout=ipw.Layout(justify_content='flex-start',
width='initial'))})
self._accordions['constellation_properties'].set_title(0, 'System')
self._update_config(self._config)
def _update_config(self, config_dict):
for key in config_dict.keys():
if key not in self._config:
raise KeyError(''.join(['Key ', str(key), ' not in dictionary.']))
self._config.update(config_dict)
self._update_que.append(config_dict.keys())
if not self._running_update:
self._running_update = True
self._update_frontend()
def _update_frontend(self):
if self._update_que:
plot_running = self._config['spectrum_enable']
self.analyser.spectrum_enable = False
while self.analyser.dma_status != 32:
time.sleep(0.1)
while self._running_update:
keys = self._update_que.pop(0)
for key in keys:
if key in self._config:
if key in ['centre_frequency', 'decimation_factor', 'quality']:
self._widgets['waterfall_enable'].value = False
self.analyser.waterfall_enable = False
setattr(self.analyser, key, self._config[key])
self._widgets[key].value = self._config[key]
if key in ['plotly_theme', 'line_colour', 'decimation_factor',
'spectrum_enable', 'waterfall_enable']:
self._update_widgets(key)
if key in ['fftsize', 'window']:
self._update_figurewidgets(key)
self._update_textwidgets()
time.sleep(0.2)
if not self._update_que:
self.analyser.spectrum_enable = plot_running
self._running_update = False
self._running_update = False
def _update_textwidgets(self):
self._widgets['sample_frequency_label'].value = str((self.analyser.sample_frequency/ \
self.analyser.decimation_factor)*1e-6)
self._widgets['resolution_bandwidth_label'].value = str(((self.analyser.sample_frequency/ \
self.analyser.decimation_factor)/self.analyser.fftsize)*1e-3)
def _update_figurewidgets(self, key):
if key in ['fftsize']:
self._window_plot.data[0].x = np.arange(self.analyser.fftsize)
self._window_plot.data[0].y = self.analyser.spectrum_window
elif key in ['window']:
self._window_plot.data[0].y = self.analyser.spectrum_window
def _update_widgets(self, key):
if key in ['line_colour']:
self._window_plot.data[0].line.color = self._config['line_colour']
self._widgets['dma_enable'].button_colour = self._config['line_colour']
self._widgets['spectrum_enable'].button_colour = self._config['line_colour']
self._widgets['waterfall_enable'].button_colour = self._config['line_colour']
self._widgets['constellation_enable'].button_colour = self._config['line_colour']
self._widgets['reset_ofdm_receiver'].button_colour = self._config['line_colour']
elif key in ['plotly_theme']:
self._window_plot.layout.template = self._config['plotly_theme']
elif key in ['decimation_factor']:
step_list = [10, 1, 1, 1, 0.1, 0.1, 0.1, 0.01, 0.01, 0.01, 0.001]
self._widgets['centre_frequency'].step = step_list[int(np.log2(self._config['decimation_factor']) - 1)]
elif key in ['spectrum_enable']:
if self._config['spectrum_enable']:
self._widgets['dma_enable'].configure_state(True)
else:
if not self._config['waterfall_enable']:
self._widgets['dma_enable'].configure_state(False)
elif key in ['waterfall_enable']:
if self._config['waterfall_enable']:
self._widgets['dma_enable'].configure_state(True)
else:
if not self._config['spectrum_enable']:
self._widgets['dma_enable'].configure_state(False)
def spectrum_analyser(self, config=None):
if config is not None:
self.config = config
return ipw.VBox([ipw.HBox([ipw.VBox([self.analyser.spectrum(),
self.analyser.waterfall(),
ipw.HBox([self._widgets['sample_frequency_label'].get_widget(),
ipw.Label(value=' | '),
self._widgets['resolution_bandwidth_label'].get_widget()],
layout=ipw.Layout(justify_content='flex-end'))
]),
self._accordions['properties']
])
])
def constellation_plot(self):
return ipw.HBox([self._inspector.constellation_plot(),
self._accordions['constellation_properties']
])
class RadioAnalyser():
def __init__(self,
adc_tile,
adc_block,
adc_description,
spectrum_analyser,
decimator):
self._tile = adc_tile
self._block = adc_block
self._spectrum_analyser = spectrum_analyser
self._decimator = decimator
self._adc_description = adc_description
@property
def centre_frequency(self):
return abs(self._block.MixerSettings['Freq'])
@centre_frequency.setter
def centre_frequency(self, centre_frequency):
nyquist_zone = int(np.ceil(centre_frequency/(self._block.BlockStatus['SamplingFreq']*1e3/2)))
if nyquist_zone == 0:
nyquist_zone = 1
if nyquist_zone != self._block.NyquistZone:
self._block.NyquistZone = nyquist_zone
if (nyquist_zone % 2) == 0:
self._block.MixerSettings['Freq'] = centre_frequency
else:
self._block.MixerSettings['Freq'] = -centre_frequency
self._spectrum_analyser.centre_frequency = centre_frequency*1e6
self._block.UpdateEvent(1)
@property
def decimation_factor(self):
if self._decimator.decimation_factor > 0:
return self._block.DecimationFactor * self._decimator.decimation_factor
else:
return self._block.DecimationFactor
@decimation_factor.setter
def decimation_factor(self, decimation_factor):
word_lut = [8, 4, 2]
sel = int(np.log2(decimation_factor))
if decimation_factor in [2, 4, 8]:
self._block.DecimationFactor = decimation_factor
self._block.FabRdVldWords = word_lut[sel-1]
self._spectrum_analyser.ssr_packetsize = 0
self._spectrum_analyser.ssr_mode = 4-sel
self._safe_restart()
self._decimator.decimation_factor = 0
self._spectrum_analyser.sample_frequency = self._block.BlockStatus['SamplingFreq']*1e9
self._spectrum_analyser.decimation_factor = decimation_factor
self._spectrum_analyser.ssr_packetsize = int(self._spectrum_analyser.fft_size/8)
elif decimation_factor in [16, 32, 64, 128, 256, 512, 1024, 2048]:
self._block.DecimationFactor = 8
self._block.FabRdVldWords = 2
self._spectrum_analyser.ssr_packetsize = 0
self._spectrum_analyser.ssr_mode = 0
self._safe_restart()
self._decimator.decimation_factor = int(decimation_factor/8)
self._spectrum_analyser.sample_frequency = self._block.BlockStatus['SamplingFreq']*1e9
self._spectrum_analyser.decimation_factor = decimation_factor
self._spectrum_analyser.ssr_packetsize = int(self._spectrum_analyser.fft_size/8)
@property
def number_frames(self):
return self._spectrum_analyser.plot.data_windowsize
@number_frames.setter
def number_frames(self, number_frames):
if number_frames in range(1, 65):
self._spectrum_analyser.plot.data_windowsize = int(number_frames)
@property
def sample_frequency(self):
return self._block.BlockStatus['SamplingFreq']*1e9
@property
def calibration_mode(self):
return self._block.CalibrationMode
@calibration_mode.setter
def calibration_mode(self, calibration_mode):
if calibration_mode in [1, 2]:
self._block.CalibrationMode = calibration_mode
self._safe_restart()
@property
def nyquist_stopband(self):
return self._spectrum_analyser.nyquist_stopband * 100
@nyquist_stopband.setter
def nyquist_stopband(self, nyquist_stopband):
self._spectrum_analyser.nyquist_stopband = nyquist_stopband/100
@property
def fftsize(self):
return self._spectrum_analyser.fft_size
@fftsize.setter
def fftsize(self, fftsize):
self._spectrum_analyser.fft_size = fftsize
@property
def spectrum_type(self):
return self._spectrum_analyser.spectrum_type
@spectrum_type.setter
def spectrum_type(self, spectrum_type):
self._spectrum_analyser.spectrum_type = spectrum_type
@property
def spectrum_units(self):
return self._spectrum_analyser.spectrum_units
@spectrum_units.setter
def spectrum_units(self, spectrum_units):
self._spectrum_analyser.spectrum_units = spectrum_units
@property
def window(self):
return self._spectrum_analyser.window
@window.setter
def window(self, window_type):
self._spectrum_analyser.window = window_type
@property
def spectrum_window(self):
return self._spectrum_analyser.spectrum_window
@property
def height(self):
return self._spectrum_analyser.height
@height.setter
def height(self, height):
self._spectrum_analyser.height = height
@property
def width(self):
return self._spectrum_analyser.width
@width.setter
def width(self, width):
self._spectrum_analyser.width = width
@property
def spectrum_enable(self):
return self._spectrum_analyser.plot.enable_updates
@spectrum_enable.setter
def spectrum_enable(self, enable):
if enable:
self._spectrum_analyser.plot.enable_updates = True
else:
self._spectrum_analyser.plot.enable_updates = False
@property
def waterfall_enable(self):
return self._spectrum_analyser.spectrogram.enable_updates
@waterfall_enable.setter
def waterfall_enable(self, enable):
if enable:
self._spectrum_analyser.spectrogram.enable_updates = True
else:
self._spectrum_analyser.spectrogram.enable_updates = False
@property
def dma_enable(self):
return self._spectrum_analyser.dma_enable
@dma_enable.setter
def dma_enable(self, enable):
if enable:
self._spectrum_analyser.dma_enable = 1
self._spectrum_analyser.timer.start()
else:
self._spectrum_analyser.timer.stop()
self._spectrum_analyser.dma_enable = 0
@property
def update_frequency(self):
return self._spectrum_analyser.update_frequency
@update_frequency.setter
def update_frequency(self, update_frequency):
self._spectrum_analyser.update_frequency = update_frequency
@property
def plotly_theme(self):
return self._spectrum_analyser.plotly_theme
@plotly_theme.setter
def plotly_theme(self, plotly_theme):
self._spectrum_analyser.plotly_theme = plotly_theme
@property
def line_colour(self):
return self._spectrum_analyser.line_colour
@line_colour.setter
def line_colour(self, line_colour):
self._spectrum_analyser.line_colour = line_colour
@property
def line_fill(self):
return self._spectrum_analyser.line_fill
@line_fill.setter
def line_fill(self, line_fill):
self._spectrum_analyser.line_fill = line_fill
@property
def zmin(self):
return self._spectrum_analyser.zmin
@zmin.setter
def zmin(self, zmin):
self._spectrum_analyser.zmin = zmin
@property
def zmax(self):
return self._spectrum_analyser.zmax
@zmax.setter
def zmax(self, zmax):
self._spectrum_analyser.zmax = zmax
@property
def quality(self):
return self._spectrum_analyser.quality
@quality.setter
def quality(self, quality):
self._spectrum_analyser.quality = quality
@property
def post_process(self):
return self._spectrum_analyser.plot.post_process
@post_process.setter
def post_process(self, post_process):
if post_process in ['max', 'min', 'average', 'median']:
self._spectrum_analyser.plot.post_process = post_process
else:
self._spectrum_analyser.plot.post_process = 'none'
@property
def display_max(self):
return self._spectrum_analyser.plot.display_max
@display_max.setter
def display_max(self, display_max):
self._spectrum_analyser.plot.display_max = display_max
@property
def display_min(self):
return self._spectrum_analyser.plot.display_min
@display_min.setter
def display_min(self, display_min):
self._spectrum_analyser.plot.display_min = display_min
@property
def number_max_indices(self):
return self._spectrum_analyser.plot.number_max_indices
@number_max_indices.setter
def number_max_indices(self, number_max_indices):
self._spectrum_analyser.plot.number_max_indices = number_max_indices
@property
def colour_map(self):
return self._spectrum_analyser.spectrogram.cmap
@colour_map.setter
def colour_map(self, colour_map):
self._spectrum_analyser.spectrogram.cmap = colour_map
@property
def spectrogram_performance(self):
return self._spectrum_analyser.spectrogram.ypixel
@spectrogram_performance.setter
def spectrogram_performance(self, performance):
self._spectrum_analyser.spectrogram.ypixel = performance
@property
def ymin(self):
return self._spectrum_analyser.plot.yrange[0]
@ymin.setter
def ymin(self, ymin):
temp_range = list(self._spectrum_analyser.plot.yrange)
temp_range[0] = ymin
self._spectrum_analyser.plot.yrange = tuple(temp_range)
@property
def ymax(self):
return self._spectrum_analyser.plot.yrange[1]
@ymax.setter
def ymax(self, ymax):
temp_range = list(self._spectrum_analyser.plot.yrange)
temp_range[1] = ymax
self._spectrum_analyser.plot.yrange = tuple(temp_range)
@property
def number_min_indices(self):
return self._spectrum_analyser.plot.number_min_indices
@number_min_indices.setter
def number_min_indices(self, number_min_indices):
self._spectrum_analyser.plot.number_min_indices = number_min_indices
@property
def display_ddc_plan(self):
return self._spectrum_analyser.plot.display_ddc_plan
@display_ddc_plan.setter
def display_ddc_plan(self, display_ddc_plan):
self._spectrum_analyser.plot.display_ddc_plan = display_ddc_plan
@property
def ddc_centre_frequency(self):
return self._spectrum_analyser.plot.ddc_centre_frequency*1e-6
@ddc_centre_frequency.setter
def ddc_centre_frequency(self, ddc_centre_frequency):
self._spectrum_analyser.plot.ddc_centre_frequency = ddc_centre_frequency*1e6
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_hd2_db(self):
return self._spectrum_analyser.plot.ddc_plan.hd2_db
@ddc_plan_hd2_db.setter
def ddc_plan_hd2_db(self, hd2_db):
self._spectrum_analyser.plot.ddc_plan.hd2_db = hd2_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_hd3_db(self):
return self._spectrum_analyser.plot.ddc_plan.hd3_db
@ddc_plan_hd3_db.setter
def ddc_plan_hd3_db(self, hd3_db):
self._spectrum_analyser.plot.ddc_plan.hd3_db = hd3_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_nsd_db(self):
return self._spectrum_analyser.plot.ddc_plan.nsd_db
@ddc_plan_nsd_db.setter
def ddc_plan_nsd_db(self, nsd_db):
self._spectrum_analyser.plot.ddc_plan.nsd_db = nsd_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_pll_mix_db(self):
return self._spectrum_analyser.plot.ddc_plan.pll_mix_db
@ddc_plan_pll_mix_db.setter
def ddc_plan_pll_mix_db(self, pll_mix_db):
self._spectrum_analyser.plot.ddc_plan.pll_mix_db = pll_mix_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_off_spur_db(self):
return self._spectrum_analyser.plot.ddc_plan.off_spur_db
@ddc_plan_off_spur_db.setter
def ddc_plan_off_spur_db(self, off_spur_db):
self._spectrum_analyser.plot.ddc_plan.off_spur_db = off_spur_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def ddc_plan_tis_spur_db(self):
return self._spectrum_analyser.plot.ddc_plan.tis_spur_db
@ddc_plan_tis_spur_db.setter
def ddc_plan_tis_spur_db(self, tis_spur_db):
self._spectrum_analyser.plot.ddc_plan.tis_spur_db = tis_spur_db
self._spectrum_analyser.plot.update_ddc_plan()
@property
def dma_status(self):
return self._spectrum_analyser.dma_status
def spectrum(self):
return self._spectrum_analyser.plot.get_plot()
def waterfall(self):
return self._spectrum_analyser.spectrogram.get_plot()
def _safe_restart(self):
tile_number = self._adc_description[0]
self._tile.ShutDown()
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
while running:
time.sleep(0.1)
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
self._tile.StartUp()
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
while not running:
time.sleep(0.1)
running = self._tile._parent.IPStatus['ADCTileStatus'][tile_number]['PowerUpState']
def _create_mmio_property(idx):
def _get(self):
return self._spectrum_analyser.plot.display_ddc_plan[idx]
def _set(self, value):
if value:
self._spectrum_analyser.plot.display_ddc_plan[idx] = True
else:
self._spectrum_analyser.plot.display_ddc_plan[idx] = False
self._spectrum_analyser.plot.update_ddc_plan()
return property(_get, _set)
for idx, name in enumerate(_freq_planner_props):
setattr(RadioAnalyser, name, _create_mmio_property(idx))
class RadioAnalyserGUI():
def __init__(self,
adc_tile,
adc_block,
adc_description,
spectrum_analyser,
decimator):
self._widgets = {}
self._accordions = {}
self._running_update = False
self._update_que = []
self._stopped = False
self._runtime_status = {'spectrum_enable' : False, 'waterfall_enable' : False}
self.analyser = RadioAnalyser(adc_tile=adc_tile,
adc_block=adc_block,
adc_description=adc_description,
spectrum_analyser=spectrum_analyser,
decimator=decimator)
self._config = {'centre_frequency' : 819,
'nyquist_stopband' : 80,
'decimation_factor' : self.analyser.decimation_factor,
'calibration_mode' : self.analyser.calibration_mode,
'fftsize' : 2048,
'spectrum_type' : self.analyser.spectrum_type,
'spectrum_units' : self.analyser.spectrum_units,
'window' : 'hanning',
'height' : self.analyser.height,
'spectrum_enable' : self.analyser.spectrum_enable,
'waterfall_enable' : self.analyser.waterfall_enable,
'dma_enable' : self.analyser.dma_enable,
'update_frequency' : 10,
'plotly_theme' : self.analyser.plotly_theme,
'line_colour' : self.analyser.line_colour,
'zmin' : self.analyser.zmin,
'zmax' : self.analyser.zmax,
'quality' : self.analyser.quality,
'width' : self.analyser.width,
'post_process' : 'average',
'number_frames' : 6,
'display_max' : False,
'display_min' : False,
'number_max_indices' : 1,
'number_min_indices' : 1,
'colour_map' : self.analyser.colour_map,
'spectrogram_performance' : 4,
'ymin' : self.analyser.ymin,
'ymax' : self.analyser.ymax,
'enable_rx_alias' : False,
'enable_rx_image' : False,
'enable_hd2' : False,
'enable_hd2_image' : False,
'enable_hd3' : False,
'enable_hd3_image' : False,
'enable_pll_mix_up' : False,
'enable_pll_mix_up_image' : False,
'enable_pll_mix_down' : False,
'enable_pll_mix_down_image' : False,
'ddc_centre_frequency' : 0,
'ddc_plan_hd2_db' : self.analyser.ddc_plan_hd2_db,
'ddc_plan_hd3_db' : self.analyser.ddc_plan_hd3_db,
'ddc_plan_nsd_db' : self.analyser.ddc_plan_nsd_db,
'ddc_plan_pll_mix_db' : self.analyser.ddc_plan_pll_mix_db,
'ddc_plan_off_spur_db' : self.analyser.ddc_plan_off_spur_db,
'ddc_plan_tis_spur_db' : self.analyser.ddc_plan_tis_spur_db}
self._initialise_frontend()
@property
def config(self):
return self._config
@config.setter
def config(self, config_dict):
self._update_config(config_dict)
def start(self):
self.config = {'spectrum_enable' : self._runtime_status['spectrum_enable'],
'waterfall_enable' : self._runtime_status['waterfall_enable']}
self._stopped = False
def stop(self):
if not self._stopped:
self._runtime_status.update({'spectrum_enable' : self._config['spectrum_enable'],
'waterfall_enable' : self._config['waterfall_enable']})
self.config = {'spectrum_enable' : False,
'waterfall_enable' : False}
self._stopped = True
def _initialise_frontend(self):
self._widgets.update({'ddc_centre_frequency' :
FloatText(callback=self._update_config,
value=self._config['ddc_centre_frequency'],
min_value=0,
max_value=self.analyser._block.BlockStatus['SamplingFreq']*1e3,
step=1,
dict_id='ddc_centre_frequency',
description='Centre Frequency (MHz):')})
self._widgets.update({'ddc_plan_hd2_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_hd2_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_hd2_db',
description='HD2 (dB)')})
self._widgets.update({'ddc_plan_hd3_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_hd3_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_hd3_db',
description='HD3 (dB)')})
self._widgets.update({'ddc_plan_nsd_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_nsd_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_nsd_db',
description='NSD (dBFs/Hz)')})
self._widgets.update({'ddc_plan_pll_mix_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_pll_mix_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_pll_mix_db',
description='PLL Ref Mixing (dB)')})
self._widgets.update({'ddc_plan_off_spur_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_off_spur_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_off_spur_db',
description='Offset Spur (dB)')})
self._widgets.update({'ddc_plan_tis_spur_db' :
FloatText(callback=self._update_config,
value=self._config['ddc_plan_tis_spur_db'],
min_value=-300,
max_value=300,
step=1,
dict_id='ddc_plan_tis_spur_db',
description='TI Spur (dB)')})
for idx, freq_prop in enumerate(_freq_planner_props):
self._widgets.update({freq_prop :
CheckBox(callback=self._update_config,
description=_freq_planner_desc[idx],
value=self._config[freq_prop],
indent=False,
layout_width='150px',
dict_id=freq_prop)})
self._widgets.update({'decimation_factor' :
DropDown(callback=self._update_config,
options=[2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048],
value=self._config['decimation_factor'],
dict_id='decimation_factor',
description='Decimation Factor:')})
self._widgets.update({'spectrum_type' :
DropDown(callback=self._update_config,
options=[('Power Spectrum'),
('Power Spectral Density')],
value=self._config['spectrum_type'],
dict_id='spectrum_type',
description='Spectrum Type:',
description_width='100px')})
self._widgets.update({'spectrum_units' :
DropDown(callback=self._update_config,
options=[('dBFS'),
('dBm')],
value=self._config['spectrum_units'],
dict_id='spectrum_units',
description='Spectrum Units:',
description_width='100px')})
self._widgets.update({'post_process' :
DropDown(callback=self._update_config,
options=[('None', 'none'),
('Maximum Hold', 'max'),
('Minimum Hold', 'min'),
('Running Average', 'average'),
('Running Median', 'median')],
value=self._config['post_process'],
dict_id='post_process',
description='Post Processing:',
description_width='100px')})
self._widgets.update({'fftsize' :
DropDown(callback=self._update_config,
options=[64, 128, 256, 512, 1024, 2048, 4096, 8192],
value=4096,
dict_id='fftsize',
description = 'FFT Size:')})
self._widgets.update({'calibration_mode' :
DropDown(callback=self._update_config,
options=[('1 (Fs/2 ≤ ±30%)', 1),
('2 (Fs/2 > ±30%)', 2)],
value=self._config['calibration_mode'],
dict_id='calibration_mode',
description='Calibration Mode:')})
self._widgets.update({'window' :
DropDown(callback=self._update_config,
options=[('Rectangular', 'rectangular'),
('Bartlett', 'bartlett'),
('Blackman', 'blackman'),
('Hamming', 'hamming'),
('Hanning', 'hanning')],
value='rectangular',
dict_id='window',
description='')})
self._widgets.update({'plotly_theme' :
DropDown(callback=self._update_config,
options=[('Seaborn', 'seaborn'),
('Simple White', 'simple_white'),
('Plotly', 'plotly'),
('Plotly White', 'plotly_white'),
('Plotly Dark', 'plotly_dark')],
value='plotly',
dict_id='plotly_theme',
description='Plotly Theme:')})
self._widgets.update({'colour_map' :
DropDown(callback=self._update_config,
options=[('Grey' , 'gray'),
('Spring' , 'spring'),
('Summer' , 'summer'),
('Autumn' , 'autumn'),
('Winter' , 'winter'),
('Cool' , 'cool'),
('Hot' , 'hot'),
('Copper' , 'copper'),
('Rainbow', 'rainbow'),
('Jet' , 'jet')],
value='gray',
dict_id='colour_map',
description='Colour Map:',
description_width='100px')})
self._widgets.update({'line_colour' :
DropDown(callback=self._update_config,
options=list(mcolors.CSS4_COLORS),
value='white',
dict_id='line_colour',
description='Line Colour:')})
self._widgets.update({'line_fill' :
DropDown(callback=self._update_config,
options=list(mcolors.CSS4_COLORS),
value='lightpink',
dict_id='line_fill',
description='Line Fill:')})
self._widgets.update({'spectrogram_performance' :
DropDown(callback=self._update_config,
options=[('Low', 8),
('Medium', 4),
('High', 2)],
value=2,
dict_id='spectrogram_performance',
description='Resolution:',
description_width='100px')})
self._widgets.update({'number_max_indices' :
IntText(callback=self._update_config,
value=self._config['number_max_indices'],
min_value=1,
max_value=64,
step=1,
dict_id='number_max_indices',
description='Number of Maximums:')})
self._widgets.update({'number_min_indices' :
IntText(callback=self._update_config,
value=self._config['number_min_indices'],
min_value=1,
max_value=64,
step=1,
dict_id='number_min_indices',
description='Number of Minimums:')})
self._widgets.update({'number_frames' :
FloatText(callback=self._update_config,
value=self._config['number_frames'],
min_value=1,
max_value=64,
step=1,
dict_id='number_frames',
description='Number Frames:',
description_width='100px')})
self._widgets.update({'ymin' :
FloatText(callback=self._update_config,
value=self._config['ymin'],
min_value=-300,
max_value=300,
step=1,
dict_id='ymin',
description='Y-Low (dB):',
description_width='100px')})
self._widgets.update({'ymax' :
FloatText(callback=self._update_config,
value=self._config['ymax'],
min_value=-300,
max_value=300,
step=1,
dict_id='ymax',
description='Y-High (dB):',
description_width='100px')})
self._widgets.update({'centre_frequency' :
FloatText(callback=self._update_config,
value=self._config['centre_frequency'],
min_value=0,
max_value=self.analyser._block.BlockStatus['SamplingFreq']*1e3,
step=1,
dict_id='centre_frequency',
description='Centre Frequency (MHz):')})
self._widgets.update({'nyquist_stopband' :
FloatText(callback=self._update_config,
value=self._config['nyquist_stopband'],
min_value=50,
max_value=100,
step=1,
dict_id='nyquist_stopband',
description='Nyquist Stopband (%):')})
self._widgets.update({'height' :
FloatText(callback=self._update_config,
value=self._config['height'],
min_value=200,
max_value=2160,
step=1,
dict_id='height',
description='Plot Height (Px):')})
self._widgets.update({'width' :
FloatText(callback=self._update_config,
value=self._config['width'],
min_value=400,
max_value=4096,
step=1,
dict_id='width',
description='Plot Width (Px):')})
#self._widgets.update({'update_frequency' :
# FloatText(callback=self._update_config,
# value=self._config['update_frequency'],
# min_value=5,
# max_value=12,
# step=1,
# dict_id='update_frequency',
# description='Update Frequency:')})
self._widgets.update({'update_frequency' :
DropDown(callback=self._update_config,
options=[('Low', 5),
('Medium', 10),
('High', 15)],
value=5,
dict_id='update_frequency',
description='Plot Performance:')})
self._widgets.update({'zmin' :
FloatText(callback=self._update_config,
value=self._config['zmin'],
min_value=-300,
max_value=300,
step=1,
dict_id='zmin',
description='Z-Low (dB):',
description_width='100px')})
self._widgets.update({'zmax' :
FloatText(callback=self._update_config,
value=self._config['zmax'],
min_value=-300,
max_value=300,
step=1,
dict_id='zmax',
description='Z-High (dB):',
description_width='100px')})
self._widgets.update({'quality' :
FloatText(callback=self._update_config,
value=self._config['quality'],
min_value=80,
max_value=100,
step=1,
dict_id='quality',
description='Quality (%):',
description_width='100px')})
self._widgets.update({'dma_enable' :
Button(callback=self._update_config,
description_on = 'On',
description_off = 'Off',
state=False,
dict_id='dma_enable')})
self._widgets.update({'spectrum_enable' :
Button(callback=self._update_config,
description_on = 'On',
description_off = 'Off',
state=False,
dict_id='spectrum_enable')})
self._widgets.update({'waterfall_enable' :
Button(callback=self._update_config,
description_on = 'On',
description_off = 'Off',
state=False,
dict_id='waterfall_enable')})
self._widgets.update({'sample_frequency_label' :
Label(value=str((self.analyser.sample_frequency/self.analyser.decimation_factor)*1e-6),
svalue='Sample Frequency: ',
evalue=' MHz',
dict_id='sample_frequency_label')})
self._widgets.update({'resolution_bandwidth_label' :
Label(value=str(((self.analyser.sample_frequency/self.analyser.decimation_factor)/ \
self.analyser.fftsize)*1e-3),
svalue='Frequency Resolution: ',
evalue=' kHz',
dict_id='resolution_bandwidth_label')})
self._widgets.update({'display_max' :
CheckBox(callback=self._update_config,
description='Display Maximum',
value=self._config['display_max'],
dict_id='display_max')})
self._widgets.update({'display_min' :
CheckBox(callback=self._update_config,
description='Display Minimum',
value=self._config['display_min'],
dict_id='display_min')})
self._window_plot = go.FigureWidget(layout={'hovermode' : 'closest',
'height' : 225,
'width' : 300,
'margin' : {
't':0, 'b':20, 'l':0, 'r':0
},
'showlegend' : False,
},
data=[{
'x': np.arange(self.analyser.fftsize),
'y': np.ones(self.analyser.fftsize),
'line':{
'color' : 'palevioletred',
'width' : 2
},
'fill' : 'tozeroy',
'fillcolor' : 'rgba(128, 128, 128, 0.5)'
}])
self._accordions.update({'properties' :
ipw.Accordion(children=[ipw.HBox(
[ipw.VBox([ipw.Label(value='Spectrum Analyzer: ', layout=ipw.Layout(width='150px')),
ipw.Label(value='Spectrogram: ', layout=ipw.Layout(width='150px'))]),
ipw.VBox([self._widgets['spectrum_enable'].get_widget(),
self._widgets['waterfall_enable'].get_widget()])],
layout=ipw.Layout(justify_content='space-around')),
ipw.VBox([self._widgets['centre_frequency'].get_widget(),
self._widgets['decimation_factor'].get_widget(),
self._widgets['fftsize'].get_widget()]),
ipw.VBox([self._widgets['post_process'].get_widget(),
self._widgets['number_frames'].get_widget(),
self._widgets['ymin'].get_widget(),
self._widgets['ymax'].get_widget()]),
ipw.VBox([ipw.Label(value='Experimental Control Panel'),
self._widgets['ddc_centre_frequency'].get_widget(),
ipw.HBox([
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(0,int(len(_freq_planner_props)/2))]),
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(int(len(_freq_planner_props)/2),len(_freq_planner_props))])
])
]),
ipw.VBox([self._widgets['spectrogram_performance'].get_widget(),
self._widgets['colour_map'].get_widget(),
self._widgets['zmin'].get_widget(),
self._widgets['zmax'].get_widget()]),
ipw.VBox([self._window_plot,
self._widgets['window'].get_widget()]),
ipw.VBox([self._widgets['nyquist_stopband'].get_widget(),
self._widgets['height'].get_widget(),
self._widgets['width'].get_widget(),
self._widgets['update_frequency'].get_widget()])
])})
""" Frequency Planner Widgets
ipw.VBox([self._widgets['ddc_centre_frequency'].get_widget(),
self._widgets['ddc_plan_hd2_db'].get_widget(),
self._widgets['ddc_plan_hd3_db'].get_widget(),
self._widgets['ddc_plan_pll_mix_db'].get_widget(),
self._widgets['ddc_plan_off_spur_db'].get_widget(),
self._widgets['ddc_plan_tis_spur_db'].get_widget(),
self._widgets['ddc_plan_nsd_db'].get_widget(),
ipw.HBox([
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(0,int(len(_freq_planner_props)/2))]),
ipw.VBox([self._widgets[_freq_planner_props[i]].get_widget() for i in range(int(len(_freq_planner_props)/2),len(_freq_planner_props))])
])
]),
"""
self._accordions['properties'].set_title(0, 'System')
self._accordions['properties'].set_title(1, 'Receiver')
self._accordions['properties'].set_title(2, 'Spectrum Analyzer')
self._accordions['properties'].set_title(3, 'Frequency Planner')
self._accordions['properties'].set_title(4, 'Spectrogram')
self._accordions['properties'].set_title(5, 'Window Settings')
self._accordions['properties'].set_title(6, 'Plot Settings')
self._update_config(self._config)
def _update_config(self, config_dict):
for key in config_dict.keys():
if key not in self._config:
raise KeyError(''.join(['Key ', str(key), ' not in dictionary.']))
self._config.update(config_dict)
self._update_que.append(config_dict.keys())
if not self._running_update:
self._running_update = True
self._update_frontend()
def _update_frontend(self):
if self._update_que:
plot_running = self._config['spectrum_enable']
self.analyser.spectrum_enable = False
while self.analyser.dma_status != 32:
time.sleep(0.1)
while self._running_update:
keys = self._update_que.pop(0)
for key in keys:
if key in self._config:
if key in ['centre_frequency', 'decimation_factor', 'quality']:
self._widgets['waterfall_enable'].value = False
self.analyser.waterfall_enable = False
setattr(self.analyser, key, self._config[key])
self._widgets[key].value = self._config[key]
if key in ['plotly_theme', 'line_colour', 'decimation_factor',
'spectrum_enable', 'waterfall_enable']:
self._update_widgets(key)
if key in ['fftsize', 'window']:
self._update_figurewidgets(key)
self._update_textwidgets()
time.sleep(0.2)
if not self._update_que:
self.analyser.spectrum_enable = plot_running
self._running_update = False
self._running_update = False
def _update_textwidgets(self):
self._widgets['sample_frequency_label'].value = str((self.analyser.sample_frequency/ \
self.analyser.decimation_factor)*1e-6)
self._widgets['resolution_bandwidth_label'].value = str(((self.analyser.sample_frequency/ \
self.analyser.decimation_factor)/self.analyser.fftsize)*1e-3)
def _update_figurewidgets(self, key):
if key in ['fftsize']:
self._window_plot.data[0].x = np.arange(self.analyser.fftsize)
self._window_plot.data[0].y = self.analyser.spectrum_window
elif key in ['window']:
self._window_plot.data[0].y = self.analyser.spectrum_window
def _update_widgets(self, key):
if key in ['line_colour']:
self._window_plot.data[0].line.color = self._config['line_colour']
self._widgets['dma_enable'].button_colour = self._config['line_colour']
self._widgets['spectrum_enable'].button_colour = self._config['line_colour']
self._widgets['waterfall_enable'].button_colour = self._config['line_colour']
elif key in ['plotly_theme']:
self._window_plot.layout.template = self._config['plotly_theme']
elif key in ['decimation_factor']:
step_list = [10, 1, 1, 1, 0.1, 0.1, 0.1, 0.01, 0.01, 0.01, 0.001]
self._widgets['centre_frequency'].step = step_list[int(np.log2(self._config['decimation_factor']) - 1)]
elif key in ['spectrum_enable']:
if self._config['spectrum_enable']:
self._widgets['dma_enable'].configure_state(True)
else:
if not self._config['waterfall_enable']:
self._widgets['dma_enable'].configure_state(False)
elif key in ['waterfall_enable']:
if self._config['waterfall_enable']:
self._widgets['dma_enable'].configure_state(True)
else:
if not self._config['spectrum_enable']:
self._widgets['dma_enable'].configure_state(False)
def spectrum_analyser(self, config=None):
if config is not None:
self.config = config
return ipw.VBox([ipw.HBox([ipw.VBox([self.analyser.spectrum(),
self.analyser.waterfall(),
ipw.HBox([self._widgets['sample_frequency_label'].get_widget(),
ipw.Label(value=' | '),
self._widgets['resolution_bandwidth_label'].get_widget()],
layout=ipw.Layout(justify_content='flex-end'))
]),
self._accordions['properties']
])
])
|
py | 1a32aca416a52445a0cd13b7d89392948ebcf337 | #!/usr/bin/env python
import sys
from nfbuildwindows import NFBuildWindows
def main():
library_target = 'NFDecoder'
nfbuild = NFBuildWindows()
nfbuild.build_print("Installing Dependencies")
nfbuild.installDependencies(android=True)
# Make our main build artifacts
nfbuild.build_print("C++ Build Start (x86)")
nfbuild.makeBuildDirectory()
nfbuild.generateProject(android=True, android_arm=False)
targets = [library_target]
for target in targets:
nfbuild.buildTarget(target)
nfbuild.build_print("C++ Build Start (arm64)")
nfbuild.makeBuildDirectory()
nfbuild.generateProject(android=False, android_arm=True)
targets = [library_target]
for target in targets:
nfbuild.buildTarget(target)
if __name__ == "__main__":
main()
|
py | 1a32acaa75f77356e6bf7432fc61c9646cf4740a | import random
from collections import deque
from typing import List, Tuple
from IPython.display import clear_output
import matplotlib.pyplot as plt
import numpy as np
def epsilon(current_episode, num_episodes):
"""
epsilon decays as the current episode gets higher because we want the agent to
explore more in earlier episodes (when it hasn't learned anything)
explore less in later episodes (when it has learned something)
i.e. assume that episode number is directly related to learning
"""
# return 1 - (current_episode/num_episodes)
return .5 * .9**current_episode
def update_q_prime(Qprincipal, Qtarget):
for v, v_ in zip(Qprincipal.model.parameters(), Qtarget.model.parameters()):
v_.data.copy_(v.data)
def plot_episode_rewards(values, title=''):
""" Plot the reward curve and histogram of results over time."""
# Update the window after each episode
clear_output(wait=True)
# Define the figure
f, ax = plt.subplots(nrows=1, ncols=2, figsize=(12,5))
f.suptitle(title)
ax[0].plot(values, label='score per run')
ax[0].axhline(195, c='red', ls='--', label='goal')
ax[0].set_xlabel('Episodes')
ax[0].set_ylabel('Reward')
x = range(len(values))
ax[0].legend()
# Calculate the trend
try:
z = np.polyfit(x, values, 1)
p = np.poly1d(z)
ax[0].plot(x, p(x), "--", label='trend')
except:
print('')
# Plot the histogram of results
ax[1].hist(values[-50:])
ax[1].axvline(195, c='red', label='goal')
ax[1].set_xlabel('Scores per Last 50 Episodes')
ax[1].set_ylabel('Frequency')
ax[1].legend()
plt.show()
class Experience(Tuple):
"""A tuple containing (state, action, reward, done, next_state).
state (Tensor)
action (int)
reward (float)
done (bool)
next_state (Tensor)
"""
class ReplayBuffer(object):
def __init__(self, maxlength: int):
"""
maxlength: max number of tuples to store in the buffer
if there are more tuples than maxlength, pop out the oldest tuples
"""
self.buffer = deque()
self.number: int = 0
self.maxlength: int = maxlength
def __len__(self) -> int:
return self.number
def append(self, experience: Experience):
"""
this function implements appending new experience tuple
experience: a tuple of the form (s,a,r,s^\prime)
"""
self.buffer.append(experience)
self.number += 1
def pop(self):
"""
pop out the oldest tuples if self.number > self.maxlength
"""
while self.number > self.maxlength:
self.buffer.popleft()
self.number -= 1
def sample(self, batchsize: int) -> List[Experience]:
"""Samples 'batchsize' experience tuples
Args:
batchsize (int)
Returns:
(List[Experience])
"""
minibatch: List[Experience] = random.sample(self.buffer, batchsize)
return minibatch
|
py | 1a32ad3e207f61f694f57613b1892f69f611da0a | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file or at
# https://developers.google.com/open-source/licenses/bsd
"""Unittests for monorail.tracker.issuedetailezt."""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import logging
import mock
import mox
import time
import unittest
import settings
from businesslogic import work_env
from proto import features_pb2
from features import hotlist_views
from features import send_notifications
from framework import authdata
from framework import exceptions
from framework import framework_views
from framework import framework_helpers
from framework import urls
from framework import permissions
from framework import profiler
from framework import sorting
from framework import template_helpers
from proto import project_pb2
from proto import tracker_pb2
from proto import user_pb2
from services import service_manager
from services import issue_svc
from services import tracker_fulltext
from testing import fake
from testing import testing_helpers
from tracker import issuedetailezt
from tracker import tracker_constants
from tracker import tracker_helpers
class GetAdjacentIssueTest(unittest.TestCase):
def setUp(self):
self.cnxn = 'fake cnxn'
self.services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
user=fake.UserService(),
project=fake.ProjectService(),
issue_star=fake.IssueStarService(),
spam=fake.SpamService())
self.services.project.TestAddProject('proj', project_id=789)
self.mr = testing_helpers.MakeMonorailRequest()
self.mr.auth.user_id = 111
self.mr.auth.effective_ids = {111}
self.mr.me_user_id = 111
self.work_env = work_env.WorkEnv(
self.mr, self.services, 'Testing phase')
def testGetAdjacentIssue_PrevIssue(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
next_issue = fake.MakeTestIssue(789, 3, 'sum', 'New', 111, issue_id=78903)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(next_issue)
self.services.issue.TestAddIssue(prev_issue)
with self.work_env as we:
we.FindIssuePositionInSearch = mock.Mock(
return_value=[78901, 1, 78903, 3])
actual_issue = issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue)
self.assertEqual(prev_issue, actual_issue)
we.FindIssuePositionInSearch.assert_called_once_with(cur_issue)
def testGetAdjacentIssue_NextIssue(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
next_issue = fake.MakeTestIssue(789, 3, 'sum', 'New', 111, issue_id=78903)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(next_issue)
self.services.issue.TestAddIssue(prev_issue)
with self.work_env as we:
we.FindIssuePositionInSearch = mock.Mock(
return_value=[78901, 1, 78903, 3])
actual_issue = issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue, next_issue=True)
self.assertEqual(next_issue, actual_issue)
we.FindIssuePositionInSearch.assert_called_once_with(cur_issue)
def testGetAdjacentIssue_NotFound(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(prev_issue)
with self.work_env as we:
we.FindIssuePositionInSearch = mock.Mock(
return_value=[78901, 1, 78903, 3])
with self.assertRaises(exceptions.NoSuchIssueException):
issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue, next_issue=True)
we.FindIssuePositionInSearch.assert_called_once_with(cur_issue)
def testGetAdjacentIssue_Hotlist(self):
cur_issue = fake.MakeTestIssue(789, 2, 'sum', 'New', 111, issue_id=78902)
next_issue = fake.MakeTestIssue(789, 3, 'sum', 'New', 111, issue_id=78903)
prev_issue = fake.MakeTestIssue(789, 1, 'sum', 'New', 111, issue_id=78901)
self.services.issue.TestAddIssue(cur_issue)
self.services.issue.TestAddIssue(next_issue)
self.services.issue.TestAddIssue(prev_issue)
hotlist = fake.Hotlist('name', 678, owner_ids=[111])
with self.work_env as we:
we.GetIssuePositionInHotlist = mock.Mock(
return_value=[78901, 1, 78903, 3])
actual_issue = issuedetailezt.GetAdjacentIssue(
self.mr, we, cur_issue, hotlist=hotlist, next_issue=True)
self.assertEqual(next_issue, actual_issue)
we.GetIssuePositionInHotlist.assert_called_once_with(
cur_issue, hotlist, self.mr.can, self.mr.sort_spec,
self.mr.group_by_spec)
class FlipperRedirectTest(unittest.TestCase):
def setUp(self):
self.services = service_manager.Services(
config=fake.ConfigService(),
features=fake.FeaturesService(),
issue=fake.IssueService(),
user=fake.UserService(),
project=fake.ProjectService())
self.project = self.services.project.TestAddProject(
'proj', project_id=987, committer_ids=[111])
self.next_servlet = issuedetailezt.FlipperNext(
'req', 'res', services=self.services)
self.prev_servlet = issuedetailezt.FlipperPrev(
'req', 'res', services=self.services)
self.list_servlet = issuedetailezt.FlipperList(
'req', 'res', services=self.services)
mr = testing_helpers.MakeMonorailRequest(project=self.project)
mr.local_id = 123
mr.me_user_id = 111
self.next_servlet.mr = mr
self.prev_servlet.mr = mr
self.list_servlet.mr = mr
self.fake_issue_1 = fake.MakeTestIssue(987, 123, 'summary', 'New', 111,
project_name='rutabaga')
self.services.issue.TestAddIssue(self.fake_issue_1)
self.fake_issue_2 = fake.MakeTestIssue(987, 456, 'summary', 'New', 111,
project_name='rutabaga')
self.services.issue.TestAddIssue(self.fake_issue_2)
self.fake_issue_3 = fake.MakeTestIssue(987, 789, 'summary', 'New', 111,
project_name='potato')
self.services.issue.TestAddIssue(self.fake_issue_3)
self.next_servlet.redirect = mock.Mock()
self.prev_servlet.redirect = mock.Mock()
self.list_servlet.redirect = mock.Mock()
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperNext(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_2
self.next_servlet.mr.GetIntParam = mock.Mock(return_value=None)
self.next_servlet.get(project_name='proj', viewed_username=None)
self.next_servlet.mr.GetIntParam.assert_called_once_with('hotlist_id')
patchGetAdjacentIssue.assert_called_once()
self.next_servlet.redirect.assert_called_once_with(
'/p/rutabaga/issues/detail?id=456')
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperNext_Hotlist(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_3
self.next_servlet.mr.GetIntParam = mock.Mock(return_value=123)
# TODO(jeffcarp): Mock hotlist_id param on path here.
self.next_servlet.get(project_name='proj', viewed_username=None)
self.next_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
self.next_servlet.redirect.assert_called_once_with(
'/p/potato/issues/detail?id=789')
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperPrev(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_2
self.next_servlet.mr.GetIntParam = mock.Mock(return_value=None)
self.prev_servlet.get(project_name='proj', viewed_username=None)
self.prev_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
patchGetAdjacentIssue.assert_called_once()
self.prev_servlet.redirect.assert_called_once_with(
'/p/rutabaga/issues/detail?id=456')
@mock.patch('tracker.issuedetailezt.GetAdjacentIssue')
def testFlipperPrev_Hotlist(self, patchGetAdjacentIssue):
patchGetAdjacentIssue.return_value = self.fake_issue_3
self.prev_servlet.mr.GetIntParam = mock.Mock(return_value=123)
# TODO(jeffcarp): Mock hotlist_id param on path here.
self.prev_servlet.get(project_name='proj', viewed_username=None)
self.prev_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
self.prev_servlet.redirect.assert_called_once_with(
'/p/potato/issues/detail?id=789')
@mock.patch('tracker.issuedetailezt._ComputeBackToListURL')
def testFlipperList(self, patch_ComputeBackToListURL):
patch_ComputeBackToListURL.return_value = '/p/test/issues/list'
self.list_servlet.mr.GetIntParam = mock.Mock(return_value=None)
self.list_servlet.get()
self.list_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
patch_ComputeBackToListURL.assert_called_once()
self.list_servlet.redirect.assert_called_once_with(
'/p/test/issues/list')
@mock.patch('tracker.issuedetailezt._ComputeBackToListURL')
def testFlipperList_Hotlist(self, patch_ComputeBackToListURL):
patch_ComputeBackToListURL.return_value = '/p/test/issues/list'
self.list_servlet.mr.GetIntParam = mock.Mock(return_value=123)
self.list_servlet.get()
self.list_servlet.mr.GetIntParam.assert_called_with('hotlist_id')
self.list_servlet.redirect.assert_called_once_with(
'/p/test/issues/list')
class ShouldShowFlipperTest(unittest.TestCase):
def setUp(self):
self.cnxn = 'fake cnxn'
def VerifyShouldShowFlipper(
self, expected, query, sort_spec, can, create_issues=0):
"""Instantiate a _Flipper and check if makes a pipeline or not."""
services = service_manager.Services(
config=fake.ConfigService(),
issue=fake.IssueService(),
project=fake.ProjectService(),
user=fake.UserService())
project = services.project.TestAddProject(
'proj', project_id=987, committer_ids=[111])
mr = testing_helpers.MakeMonorailRequest(project=project)
mr.query = query
mr.sort_spec = sort_spec
mr.can = can
mr.project_name = project.project_name
mr.project = project
for idx in range(create_issues):
_local_id, _ = services.issue.CreateIssue(
self.cnxn, services, project.project_id,
'summary_%d' % idx, 'status', 111, [], [], [], [], 111,
'description_%d' % idx)
self.assertEqual(expected, issuedetailezt._ShouldShowFlipper(mr, services))
def testShouldShowFlipper_RegularSizedProject(self):
# If the user is looking for a specific issue, no flipper.
self.VerifyShouldShowFlipper(
False, '123', '', tracker_constants.OPEN_ISSUES_CAN)
self.VerifyShouldShowFlipper(False, '123', '', 5)
self.VerifyShouldShowFlipper(
False, '123', 'priority', tracker_constants.OPEN_ISSUES_CAN)
# If the user did a search or sort or all in a small can, show flipper.
self.VerifyShouldShowFlipper(
True, 'memory leak', '', tracker_constants.OPEN_ISSUES_CAN)
self.VerifyShouldShowFlipper(
True, 'id=1,2,3', '', tracker_constants.OPEN_ISSUES_CAN)
# Any can other than 1 or 2 is doing a query and so it should have a
# failry narrow result set size. 5 is issues starred by me.
self.VerifyShouldShowFlipper(True, '', '', 5)
self.VerifyShouldShowFlipper(
True, '', 'status', tracker_constants.OPEN_ISSUES_CAN)
# In a project without a huge number of issues, still show the flipper even
# if there was no specific query.
self.VerifyShouldShowFlipper(
True, '', '', tracker_constants.OPEN_ISSUES_CAN)
def testShouldShowFlipper_LargeSizedProject(self):
settings.threshold_to_suppress_prev_next = 1
# In a project that has tons of issues, save time by not showing the
# flipper unless there was a specific query, sort, or can.
self.VerifyShouldShowFlipper(
False, '', '', tracker_constants.ALL_ISSUES_CAN, create_issues=3)
self.VerifyShouldShowFlipper(
False, '', '', tracker_constants.OPEN_ISSUES_CAN, create_issues=3)
|
py | 1a32ad7e8b148eb1790f256ae1f907cf87f917a6 | """ The list of words used by DPG """
words = [ "and", "ask", "ass", "ape", "ate", "axe", "air", "aim", "ana", "awe", "act", "add", "age", "all", "ant",
"bat", "ban", "bar", "bed", "bee", "bet", "bit", "bug", "bob", "bot", "boy", "bud", "but",
"cab", "can", "cap", "cat", "car", "cog", "con", "cop", "cot", "cow", "coy", "cub", "cut",
"dad", "dam", "dan", "day", "den", "did", "dig", "dip", "doc", "dog", "don", "dot", "dry", "dug",
"ear", "eat", "egg", "ego", "elf", "elk", "elm", "end", "eye", "eve",
"fad", "fan", "far", "fat", "fax", "fig", "fit", "fix", "fly", "few", "foe", "fog", "for", "fur",
"gag", "gap", "gel", "gem", "get", "god", "goo", "got", "gum", "gun", "gut", "guy", "gym",
"hot", "how", "has", "had", "ham", "hat", "him", "her", "hit", "hop",
"ice", "icy", "ill", "ink", "inn", "ion", "its", "ivy",
"jam", "jar", "jaw", "jay", "jet", "jim", "joe", "jog", "jot", "joy", "jug",
"keg", "ken", "key", "kid", "kim", "kit", "kin",
"lab", "lad", "lap", "law", "lie", "lee", "let", "lip", "lob", "log", "lot", "low", "lug",
"mac", "mag", "map", "man", "mat", "max", "meg", "men", "met", "mom", "moo", "mop", "mow", "mud", "mug", "mut",
"nab", "nag", "nap", "net", "new", "nip", "nod", "not", "now", "nun", "nut",
"oak", "oat", "oar", "off", "oil", "old", "one", "our", "out", "own",
"pan", "pal", "pam", "pat", "pea", "pen", "pet", "pig", "pit", "pot",
"rag", "ray", "run", "ram", "ran", "rap", "rat", "rig", "rip", "rob", "ron", "rot",
"sad", "sag", "sam", "sat", "say", "see", "sex", "set", "she", "shy", "sin", "sir", "sit", "sky", "soy", "sun",
"tan", "tap", "tar", "tea", "ted", "too", "the", "tim", "tip", "toe", "tom", "toy",
"wag", "was", "wax", "way", "web", "wee", "wet", "why", "wig", "win", "wow", "won",
"yak", "yam", "yap", "yen", "yep", "yes", "yet", "yew", "you", "yum",
"zag", "zig", "zit", "zap", "zip", "zoo" ]
|
py | 1a32add1c2015636bafa91b86ed5f98205901d66 | import logging
from typing import List, Optional, Union, Tuple
from venidium.types.blockchain_format.program import Program, SerializedProgram
from venidium.types.generator_types import BlockGenerator, GeneratorArg, GeneratorBlockCacheInterface, CompressorArg
from venidium.util.ints import uint32, uint64
from venidium.wallet.puzzles.load_clvm import load_clvm
from venidium.wallet.puzzles.rom_bootstrap_generator import get_generator
GENERATOR_MOD = get_generator()
DECOMPRESS_BLOCK = load_clvm("block_program_zero.clvm", package_or_requirement="venidium.wallet.puzzles")
DECOMPRESS_PUZZLE = load_clvm("decompress_puzzle.clvm", package_or_requirement="venidium.wallet.puzzles")
# DECOMPRESS_CSE = load_clvm("decompress_coin_spend_entry.clvm", package_or_requirement="venidium.wallet.puzzles")
DECOMPRESS_CSE_WITH_PREFIX = load_clvm(
"decompress_coin_spend_entry_with_prefix.clvm", package_or_requirement="venidium.wallet.puzzles"
)
log = logging.getLogger(__name__)
def create_block_generator(
generator: SerializedProgram, block_heights_list: List[uint32], generator_block_cache: GeneratorBlockCacheInterface
) -> Optional[BlockGenerator]:
"""`create_block_generator` will returns None if it fails to look up any referenced block"""
generator_arg_list: List[GeneratorArg] = []
for i in block_heights_list:
previous_generator = generator_block_cache.get_generator_for_block_height(i)
if previous_generator is None:
log.error(f"Failed to look up generator for block {i}. Ref List: {block_heights_list}")
return None
generator_arg_list.append(GeneratorArg(i, previous_generator))
return BlockGenerator(generator, generator_arg_list)
def create_generator_args(generator_ref_list: List[SerializedProgram]) -> Program:
"""
`create_generator_args`: The format and contents of these arguments affect consensus.
"""
gen_ref_list = [bytes(g) for g in generator_ref_list]
return Program.to([gen_ref_list])
def create_compressed_generator(
original_generator: CompressorArg,
compressed_cse_list: List[List[Union[List[uint64], List[Union[bytes, None, Program]]]]],
) -> BlockGenerator:
"""
Bind the generator block program template to a particular reference block,
template bytes offsets, and SpendBundle.
"""
start = original_generator.start
end = original_generator.end
program = DECOMPRESS_BLOCK.curry(
DECOMPRESS_PUZZLE, DECOMPRESS_CSE_WITH_PREFIX, Program.to(start), Program.to(end), compressed_cse_list
)
generator_arg = GeneratorArg(original_generator.block_height, original_generator.generator)
return BlockGenerator(program, [generator_arg])
def setup_generator_args(self: BlockGenerator) -> Tuple[SerializedProgram, Program]:
args = create_generator_args(self.generator_refs())
return self.program, args
def run_generator(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]:
program, args = setup_generator_args(self)
return GENERATOR_MOD.run_safe_with_cost(max_cost, program, args)
def run_generator_unsafe(self: BlockGenerator, max_cost: int) -> Tuple[int, SerializedProgram]:
"""This mode is meant for accepting possibly soft-forked transactions into the mempool"""
program, args = setup_generator_args(self)
return GENERATOR_MOD.run_with_cost(max_cost, program, args)
|
py | 1a32adf81af18b1ad9e4600ca6dc544620746e91 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Worker process for running remote inference.
The worker wraps the inference model in an infinte loop: input features are
fetched via RPC at the top of the loop, and inference output is written back
at the bottom (again, via RPC).
"""
import abc
from contextlib import contextmanager
import sys
import time
import threading
from absl import flags
import grpc
import numpy as np
from proto import inference_service_pb2
from proto import inference_service_pb2_grpc
import tensorflow as tf
from tensorflow.python.training import saver
import dual_net
import features as features_lib
import go
from utils import dbg
flags.DEFINE_string("model", "", "Path to the TensorFlow model.")
flags.DEFINE_string("checkpoint_dir", "",
"Path to a directory containing TensorFlow model "
"checkpoints. The inference worker will monitor this "
"when a new checkpoint is found, load the model and use it "
"for futher inferences.")
flags.DEFINE_string("server_address", "localhost:50051",
"Inference server local address.")
flags.DEFINE_string("descriptor",
"proto/inference_service_py_pb2.pb.descriptor_set",
"Path to the InferenceService proto descriptor.")
flags.DEFINE_integer("parallel_tpus", 8,
"Number of TPU cores to run on in parallel.")
FLAGS = flags.FLAGS
# The default maximum receive RPC size is only 4MB, which isn't large enough
# for our messages.
GRPC_OPTIONS = [
("grpc.max_message_length", 50 * 1024 * 1024),
("grpc.max_receive_message_length", 50 * 1024 * 1024),
]
NUM_WORKER_THREADS = 2
class RwMutex(object):
"""A simple read/write mutex.
I'm surprised Python doesn't provide one of these by default.
"""
def __init__(self):
self._resource_lock = threading.Semaphore()
self._read_lock = threading.Semaphore()
self._read_count = 0
@contextmanager
def write_lock(self):
self._acquire_write()
try:
yield
finally:
self._release_write()
@contextmanager
def read_lock(self):
self._acquire_read()
try:
yield
finally:
self._release_read()
def _acquire_write(self):
self._resource_lock.acquire()
def _release_write(self):
self._resource_lock.release()
def _acquire_read(self):
with self._read_lock:
self._read_count += 1
if self._read_count == 1:
self._resource_lock.acquire()
def _release_read(self):
with self._read_lock:
self._read_count -= 1
if self._read_count == 0:
self._resource_lock.release()
def const_model_inference_fn(features):
"""Builds the model graph with weights marked as constant.
This improves TPU inference performance because it prevents the weights
being transferred to the TPU every call to Session.run().
Returns:
(policy_output, value_output, logits) tuple of tensors.
"""
def custom_getter(getter, name, *args, **kwargs):
with tf.control_dependencies(None):
return tf.guarantee_const(
getter(name, *args, **kwargs), name=name + "/GuaranteeConst")
with tf.variable_scope("", custom_getter=custom_getter):
return dual_net.model_inference_fn(features, False)
class Session(abc.ABC):
def __init__(self, sess):
self._sess = sess
# Event that gets set after a model is loaded.
# The worker threads wait for this event before starting inference.
self.model_available = threading.Event()
self._model_path = None
self._mutex = RwMutex()
def maybe_load_model(self, path):
"""Loads the given model if it's different from the current one."""
with self._mutex.read_lock():
if path == self._model_path:
return
with self._mutex.write_lock():
dbg(time.time(), "loading %s" % path)
self._locked_load_model(path)
self._model_path = path
dbg(time.time(), "loaded %s" % path)
self.model_available.set()
def run(self, raw_features):
"""Performs inference on the given raw features."""
features = self._prepare_features(raw_features)
with self._mutex.read_lock():
policy, value = self._locked_run(features)
local_model_path = self._model_path
return policy, value, local_model_path
def shutdown(self):
"""Shuts down the session."""
with self._mutex.write_lock():
self._locked_shutdown()
@abc.abstractmethod
def _locked_load_model(self, path):
"""Device-specific wrapper around a call to _load_graph.
Must be called with self._lock held for write.
"""
pass
@abc.abstractmethod
def _locked_run(self, raw_features):
"""Device-specific evaluation of the model with the given raw features.
Must be called with self._lock held for read.
"""
pass
@abc.abstractmethod
def _locked_shutdown(self, raw_features):
"""Device-specific shutdown.
Must be called with self._lock held for write.
"""
pass
@abc.abstractmethod
def _prepare_features(self, raw_features):
"""Device-specific preparation of raw features.
Does not require a lock to be held.
"""
pass
class BasicSession(Session):
def __init__(self):
Session.__init__(self, tf.Session(graph=tf.Graph()))
with self._sess.graph.as_default():
self._feature_placeholder = tf.placeholder(
tf.float32, [None, go.N, go.N,
features_lib.NEW_FEATURES_PLANES],
name='pos_tensor')
def _locked_shutdown(self):
pass
def _locked_load_model(self, path):
tf.reset_default_graph()
if path[-3:] == ".pb":
graph_def = tf.GraphDef()
with tf.gfile.FastGFile(path, 'rb') as f:
graph_def.ParseFromString(f.read())
with self._sess.graph.as_default():
self._outputs = tf.import_graph_def(
graph_def,
input_map={'pos_tensor': self._feature_placeholder},
return_elements=['policy_output:0', 'value_output:0'])
else:
with self._sess.graph.as_default():
self._outputs = dual_net.model_inference_fn(
self._feature_placeholder, training=False)
tf.train.Saver().restore(self._sess, path)
def _locked_run(self, features):
outputs = self._sess.run(self._outputs,
{self._feature_placeholder: features})
return outputs[0], outputs[1]
def _prepare_features(self, raw_features):
features = np.frombuffer(raw_features, dtype=np.int8)
features = features.reshape([-1, go.N, go.N,
features_lib.NEW_FEATURES_PLANES])
return features
class TpuSession(Session):
def __init__(self, tpu_name, parallel_tpus, batch_size):
tpu = [tpu_name] if tpu_name else None
tpu_grpc_url = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu=tpu).get_master()
sess = tf.Session(tpu_grpc_url)
Session.__init__(self, sess)
self._parallel_tpus = parallel_tpus
self._batch_size = batch_size
# Create init & shutdown ops up front. This is probably not really
# necessary but it's what the sample code does.
self._tpu_init = tf.contrib.tpu.initialize_system()
self._tpu_shutdown = tf.contrib.tpu.shutdown_system()
self._feature_placeholders = []
with self._sess.graph.as_default():
for i in range(parallel_tpus):
features = tf.placeholder(
tf.float32, [None, go.N, go.N,
features_lib.NEW_FEATURES_PLANES],
name='pos_tensor')
self._feature_placeholders.append((features,))
self._outputs = tf.contrib.tpu.replicate(
const_model_inference_fn, self._feature_placeholders)
# tpu.replicate requires a list, but sess.run requires a tuple...
self._feature_placeholders = tuple(self._feature_placeholders)
def _locked_shutdown(self):
self._sess.run(self._tpu_shutdown)
def _locked_load_model(self, path):
if self._model_path:
dbg("shutting down tpu")
self._sess.run(self._tpu_shutdown)
with self._sess.graph.as_default():
tf.train.Saver().restore(self._sess, path)
dbg("initializing tpu")
self._sess.run(self._tpu_init)
def _locked_run(self, features):
outputs = self._sess.run(self._outputs,
{self._feature_placeholders: features})
policy = []
value = []
for x in outputs:
policy.extend(x[0])
value.extend(x[1])
return policy, value
def _prepare_features(self, raw_features):
num_board_features = go.N * go.N * features_lib.NEW_FEATURES_PLANES
num_features = self._batch_size * num_board_features
assert len(raw_features) == num_features * self._parallel_tpus
features = []
for i in range(self._parallel_tpus):
begin = i * num_features
x = np.frombuffer(
raw_features, dtype=np.int8, count=num_features, offset=begin)
x = x.reshape([self._batch_size, go.N, go.N,
features_lib.NEW_FEATURES_PLANES])
features.append(x)
return features
class Worker(object):
def __init__(self):
self.parallel_inferences = FLAGS.parallel_tpus if FLAGS.use_tpu else 1
self._get_server_config()
if FLAGS.use_tpu:
self.sess = TpuSession(
FLAGS.tpu_name, self.parallel_inferences, self.batch_size)
else:
self.sess = BasicSession()
if FLAGS.model:
self.sess.maybe_load_model(FLAGS.model)
def run(self):
self._running = True
try:
self._run_threads()
finally:
self._running = False
dbg("shutting down session")
self.sess.shutdown()
dbg("all done!")
def _get_server_config(self):
while True:
try:
channel = grpc.insecure_channel(FLAGS.server_address)
self.stub = inference_service_pb2_grpc.InferenceServiceStub(
channel)
config = self.stub.GetConfig(
inference_service_pb2.GetConfigRequest())
break
except grpc.RpcError:
dbg("Waiting for server")
time.sleep(1)
if config.board_size != go.N:
raise RuntimeError("Board size mismatch: server=%d, worker=%d" % (
config.board_size, go.N))
positions_per_inference = (config.games_per_inference *
config.virtual_losses)
if positions_per_inference % self.parallel_inferences != 0:
raise RuntimeError(
"games_per_inference * virtual_losses must be divisible by "
"parallel_tpus")
self.batch_size = positions_per_inference // self.parallel_inferences
dbg("parallel_inferences = %d" % self.parallel_inferences)
dbg("games_per_inference = %d" % config.games_per_inference)
dbg("virtual_losses = %d" % config.virtual_losses)
dbg("positions_per_inference = %d" % positions_per_inference)
dbg("batch_size = %d" % self.batch_size)
def _run_threads(self):
"""Run inference threads and optionally a thread that updates the model.
Synchronization between the inference threads and the model update
thread is performed using a RwLock that protects access to self.sess.
The inference threads enter the critical section using a read lock, so
they can both run inference concurrently. The model update thread enters
the critical section using a write lock for exclusive access.
"""
threads = []
# Start the worker threads before the checkpoint thread: if the parent
# process dies, the worker thread RPCs will fail and the thread will
# exit. This gives us a chance below to set self._running to False,
# telling the checkpoint thread to exit.
for i in range(NUM_WORKER_THREADS):
threads.append(threading.Thread(
target=self._worker_thread, args=[i]))
if FLAGS.checkpoint_dir:
threads.append(threading.Thread(target=self._checkpoint_thread))
for t in threads:
t.start()
for i, t in enumerate(threads):
t.join()
dbg("joined thread %d" % i)
# Once the first thread has joined, tell the remaining ones to stop.
self._running = False
def _checkpoint_thread(self):
dbg("starting model loader thread")
while self._running:
freshest = saver.latest_checkpoint(FLAGS.checkpoint_dir)
if freshest:
self.sess.maybe_load_model(freshest)
# Wait a few seconds before checking again.
time.sleep(5)
def _worker_thread(self, thread_id):
dbg("waiting for model")
while self._running and not self.sess.model_available.wait(1):
pass
dbg("running worker", thread_id)
while self._running:
features_response = self.stub.GetFeatures(
inference_service_pb2.GetFeaturesRequest())
policy, value, model_path = self.sess.run(
features_response.features)
put_outputs_request = inference_service_pb2.PutOutputsRequest(
batch_id=features_response.batch_id,
policy=np.concatenate(policy), value=value,
model_path=model_path)
self.stub.PutOutputs(put_outputs_request)
dbg("stopping worker", thread_id)
def main():
tf.logging.set_verbosity(tf.logging.DEBUG)
worker = Worker()
worker.run()
if __name__ == "__main__":
flags.FLAGS(sys.argv, known_only=True)
main()
|
py | 1a32aeaa69b41cf0693416173fc788d7d7177059 | ##
##
# File auto-generated by PythonFileGenerator
__all__ = [
'ClusterActivationNotification',
'SiteActivationNotification'
]
from .ClusterActivationNotification import ClusterActivationNotification
from .SiteActivationNotification import SiteActivationNotification
|
py | 1a32aefa5b795e61bec8c03086f4c8e59a728974 | """
us_tx_jails.py - Retrieve a list of TDCJ Jails in Texas.
These data were scraped from https://www.tdcj.texas.gov/unit_directory/
on Feb 7, 2020.
Copyright (c) 2020 by Thomas J. Daley, J.D. All Rights Reserved.
"""
from docassemble.base.util import Person, Address
from docassemble.base.logger import logmessage
JAILS = {
"Allred": {
"unit_name": "James V. Allred Unit",
"telephone": "(940) 855-7477 (**069)",
"address": "2101 FM 369 North",
"city": " Iowa Park",
"state": "TX",
"zip": "76367",
},
"Bell": {
"unit_name": "Oliver J. Bell Unit",
"telephone": "(281) 592-9559",
"address": "P.O. Box 1678",
"city": " Cleveland",
"state": "TX",
"zip": "77328",
},
"Beto": {
"unit_name": "George Beto Unit",
"telephone": "(903) 928-2217 (**022)",
"address": "1391 FM 3328",
"city": " Tennessee Colony",
"state": "TX",
"zip": "75880",
},
"Boyd": {
"unit_name": "William R. Boyd Unit",
"telephone": "(254) 739-5555 (**051)",
"address": "200 Spur 113",
"city": " Teague",
"state": "TX",
"zip": "75860-20",
},
"Bradshaw": {
"unit_name": "James Bradshaw State Jail",
"telephone": "(903) 655-0880",
"address": "P.O. Box 9000",
"city": " Henderson",
"state": "TX",
"zip": "75653",
},
"Bridgeport": {
"unit_name": "Bridgeport Correctional Center",
"telephone": "(940) 683-3010 (**674)",
"address": "4000 North Tenth Street",
"city": " Bridgeport",
"state": "TX",
"zip": "76426",
},
"Briscoe": {
"unit_name": "Dolph Briscoe Unit",
"telephone": "(830) 965-4444 (**052)",
"address": "1459 West Highway 85",
"city": " Dilley",
"state": "TX",
"zip": "78017",
},
"Byrd": {
"unit_name": 'James "Jay H. Byrd Unit',
"telephone": "(936) 295-5768 (**008)",
"address": "21 FM 247",
"city": " Huntsville",
"state": "TX",
"zip": "77320",
},
"Clemens": {
"unit_name": "Clemens Unit",
"telephone": "(979) 798-2188 (**005)",
"address": "11034 Hwy 36",
"city": " Brazoria",
"state": "TX",
"zip": "77422",
},
"Clements": {
"unit_name": "William P. Clements Unit",
"telephone": "(806) 381-7080 (**037)",
"address": "9601 Spur 591",
"city": " Amarillo",
"state": "TX",
"zip": "79107-96",
},
"Coffield": {
"unit_name": "H. H. Coffield Unit",
"telephone": "(903) 928-2211 (**006)",
"address": "2661 FM 2054",
"city": " Tennessee Colony",
"state": "TX",
"zip": "75884",
},
"Cole": {
"unit_name": "Buster Cole State Jail",
"telephone": "(903) 583-1100 (**102)",
"address": "3801 Silo Road",
"city": " Bonham",
"state": "TX",
"zip": "75418",
},
"Connally": {
"unit_name": "John B. Connally Unit",
"telephone": "(830) 583-4003 (**068)",
"address": "899 FM 632",
"city": " Kenedy",
"state": "TX",
"zip": "78119",
},
"Cotulla": {
"unit_name": "Cotulla Transfer Facility",
"telephone": "(830) 879-3077 (**061)",
"address": "610 FM 624",
"city": " Cotulla",
"state": "TX",
"zip": "78014",
},
"Crain": {
"unit_name": "Christina Melton Crain Unit",
"telephone": "(254) 865-8431 (**024)",
"address": "1401 State School Road",
"city": " Gatesville",
"state": "TX",
"zip": "76599-29",
},
"Dalhart": {
"unit_name": "Dalhart Unit",
"telephone": "(806) 249-8655 (**072)",
"address": "11950 FM 998",
"city": " Dalhart",
"state": "TX",
"zip": "79022",
},
"Daniel": {
"unit_name": "Price Daniel Unit",
"telephone": "(325) 573-1114 (**038)",
"address": "938 South FM 1673",
"city": " Snyder",
"state": "TX",
"zip": "79549",
},
"Darrington": {
"unit_name": "Darrington Unit",
"telephone": "(281) 595-3465 (**007)",
"address": "59 Darrington Road",
"city": " Rosharon",
"state": "TX",
"zip": "77583",
},
"Diboll": {
"unit_name": "Diboll Correctional Center",
"telephone": "(936) 829-2295",
"address": "1604 South First Street",
"city": " Diboll",
"state": "TX",
"zip": "75941",
},
"Dominguez": {
"unit_name": "Fabian Dale Dominguez State Jail",
"telephone": "(210) 675-6620 (**098)",
"address": "6535 Cagnon Road",
"city": " San Antonio",
"state": "TX",
"zip": "78252-22",
},
"Duncan": {
"unit_name": "Rufus H. Duncan Geriatric Facility",
"telephone": "(936) 829-2616 (**063)",
"address": "1502 South First Street",
"city": " Diboll",
"state": "TX",
"zip": "75941",
},
"East Texas": {
"unit_name": "East Texas Multi-Use Facility",
"telephone": "(903) 655-3300",
"address": "900 Industrial Drive",
"city": " Henderson",
"state": "TX",
"zip": "75652",
},
"Eastham": {
"unit_name": "Eastham Unit",
"telephone": "(936) 636-7321 (**009)",
"address": "2665 Prison Road #1",
"city": " Lovelady",
"state": "TX",
"zip": "75851",
},
"Ellis": {
"unit_name": "O.B. Ellis Unit",
"telephone": "(936) 295-5756 (**010)",
"address": "1697 FM 980",
"city": " Huntsville",
"state": "TX",
"zip": "77343",
},
"Estelle": {
"unit_name": 'W. J. "Jim" Estelle Unit',
"telephone": "(936) 291-4200 (**032)",
"address": "264 FM 3478",
"city": " Huntsville",
"state": "TX",
"zip": "77320-33",
},
"Estes": {
"unit_name": 'Sanders "Sandy" Estes Unit"',
"telephone": "(972) 366-3334 (**670)",
"address": "1100 Hwy 1807",
"city": " Venus",
"state": "TX",
"zip": "76084",
},
"Ferguson": {
"unit_name": "Jim Ferguson Unit",
"telephone": "(936) 348-3751 (**011)",
"address": "12120 Savage Drive",
"city": " Midway",
"state": "TX",
"zip": "75852",
},
"Formby": {
"unit_name": "Formby State Jail",
"telephone": "(806) 296-2448 (**106)",
"address": "998 County Road AA",
"city": " Plainview",
"state": "TX",
"zip": "79072",
},
"Fort Stockton": {
"unit_name": "Fort Stockton Transfer Facility",
"telephone": "(432) 336-7676 (**062)",
"address": "1536 IH-10 East",
"city": " Fort Stockton",
"state": "TX",
"zip": "79735",
},
"Garza East": {
"unit_name": "Garza East Transfer Facility",
"telephone": "(361) 358-9880 (**096)",
"address": "4304 Highway 202",
"city": " Beeville",
"state": "TX",
"zip": "78102",
},
"Garza West": {
"unit_name": "Garza West Transfer Facility",
"telephone": "(361) 358-9890 (**095)",
"address": "4250 Highway 202",
"city": " Beeville",
"state": "TX",
"zip": "78102",
},
"Gist": {
"unit_name": "Larry Gist State Jail",
"telephone": "(409) 727-8400 (**097)",
"address": "3295 FM 3514",
"city": " Beaumont",
"state": "TX",
"zip": "77705",
},
"Glossbrenner": {
"unit_name": "Ernestine Glossbrenner Unit",
"telephone": "(361) 279-2705 (**088)",
"address": "5100 South FM 1329",
"city": " San Diego",
"state": "TX",
"zip": "78384",
},
"Goodman": {
"unit_name": "Glen Ray Goodman Transfer Facility",
"telephone": "(409) 383-0012 (**086)",
"address": "349 Private Road 8430",
"city": " Jasper",
"state": "TX",
"zip": "75951",
},
"Goree": {
"unit_name": "Thomas Goree Unit",
"telephone": "(936) 295-6331 (**012)",
"address": "7405 Hwy 75 South",
"city": " Huntsville",
"state": "TX",
"zip": "77344",
},
"Gurney": {
"unit_name": "Joe F. Gurney Transfer Facility",
"telephone": "(903) 928-3118 (**094)",
"address": "1385 FM 3328",
"city": " Palestine",
"state": "TX",
"zip": "75803",
},
"Halbert": {
"unit_name": "Ellen Halbert Unit",
"telephone": "(512) 756-6171 (**084)",
"address": "800 Ellen Halbert Drive",
"city": " Burnet",
"state": "TX",
"zip": "78611",
},
"Hamilton": {
"unit_name": "J. W. Hamilton Unit",
"telephone": "(979) 779-1633 (**077)",
"address": "200 Lee Morrison Lane",
"city": " Bryan",
"state": "TX",
"zip": "77807",
},
"Havins": {
"unit_name": "Thomas R. Havins Unit",
"telephone": "(325) 643-5575 (**082)",
"address": "500 FM 45 East",
"city": " Brownwood",
"state": "TX",
"zip": "76801",
},
"Henley": {
"unit_name": "Dempsie Henley State Jail",
"telephone": "(936) 258-2476 (**083)",
"address": "7581 Hwy 321",
"city": " Dayton",
"state": "TX",
"zip": "77535",
},
"Hightower": {
"unit_name": "L.V. Hightower Unit",
"telephone": "(936) 258-8013 (**041)",
"address": "902 FM 686",
"city": " Dayton",
"state": "TX",
"zip": "77535",
},
"Hilltop": {
"unit_name": "Hilltop Unit",
"telephone": "(254) 865-8901 (**031)",
"address": "1500 State School Road",
"city": " Gatesville",
"state": "TX",
"zip": "76598-2",
},
"Hobby": {
"unit_name": "William P. Hobby Unit",
"telephone": "(254) 883-5561 (**039)",
"address": "742 FM 712",
"city": " Marlin",
"state": "TX",
"zip": "76661",
},
"Hodge": {
"unit_name": "Jerry H. Hodge Unit",
"telephone": "(903) 683-5781 (**075)",
"address": "379 FM 2972 West",
"city": " Rusk",
"state": "TX",
"zip": "75785-36",
},
"Holliday": {
"unit_name": "Reverend C.A. Holliday Transfer Facility",
"telephone": "(936) 295-8200 (**092)",
"address": "295 IH-45 North",
"city": " Huntsville",
"state": "TX",
"zip": "77320-84",
},
"Hospital Galveston": {
"unit_name": "Hospital Galveston",
"telephone": "(409) 772-2875 (**023)",
"address": "P.O. Box 48 Substation #1",
"city": "Galveston",
"state": "TX",
"zip": "77555",
},
"Hughes": {
"unit_name": "Alfred D. Hughes Unit",
"telephone": "(254) 865-6663 (**042)",
"address": "Route 2 Box 4400",
"city": " Gatesville",
"state": "TX",
"zip": "76597",
},
"Huntsville": {
"unit_name": "Huntsville Unit",
"telephone": "(936) 437-1555 (**013)",
"address": "815 12th Street",
"city": " Huntsville",
"state": "TX",
"zip": "77348",
},
"Hutchins": {
"unit_name": "Hutchins State Jail",
"telephone": "(972) 225-1304 (**099)",
"address": "1500 East Langdon Road",
"city": " Dallas",
"state": "TX",
"zip": "75241",
},
"Jester I": {
"unit_name": "Beauford H. Jester I Unit",
"telephone": "(281) 277-3030 (**014)",
"address": "1 Jester Road",
"city": " Richmond",
"state": "TX",
"zip": "77406",
},
"Jester III": {
"unit_name": "Beauford H. Jester III Unit",
"telephone": "(281) 277-7000 (**030)",
"address": "3 Jester Road",
"city": " Richmond",
"state": "TX",
"zip": "77406",
},
"Jester IV": {
"unit_name": "Beauford H. Jester IV Unit",
"telephone": "(281) 277-3700 (**033)",
"address": "4 Jester Road",
"city": " Richmond",
"state": "TX",
"zip": "77406",
},
"Johnston": {
"unit_name": "Clyde M. Johnston Unit",
"telephone": "(903) 342-6166 (**089)",
"address": "703 Airport Road",
"city": " Winnsboro",
"state": "TX",
"zip": "75494",
},
"Jordan": {
"unit_name": "Rufe Jordan Unit / Baten Intermediate Sanction Facility",
"telephone": "(806) 665-7070 (**056)",
"address": "1992 Helton Road",
"city": " Pampa",
"state": "TX",
"zip": "79065",
},
"Kyle": {
"unit_name": "Kyle Correctional Center",
"telephone": "(512) 268-0079 (**633)",
"address": "23001 IH-35",
"city": " Kyle",
"state": "TX",
"zip": "78640",
},
"LeBlanc": {
"unit_name": "Richard P. LeBlanc Unit",
"telephone": "(409) 724-1515 (**076)",
"address": "3695 FM 3514",
"city": " Beaumont",
"state": "TX",
"zip": "77705",
},
"Lewis": {
"unit_name": "Gib Lewis Unit",
"telephone": "(409) 283-8181 (**040)",
"address": "777 FM 3497",
"city": " Woodville",
"state": "TX",
"zip": "75990",
},
"Lindsey": {
"unit_name": "John R. Lindsey State Jail",
"telephone": "(940) 567-2272",
"address": "1620 FM 3344",
"city": " Jacksboro",
"state": "TX",
"zip": "76458",
},
"Lockhart": {
"unit_name": "Lockhart Correctional Facility",
"telephone": "(512) 398-3480 (**109)",
"address": "1400 Industrial Blvd",
"city": " Lockhart",
"state": "TX",
"zip": "78644",
},
"Lopez": {
"unit_name": "Reynoldo V. Lopez State Jail",
"telephone": "(956) 316-3810 (**103)",
"address": "1203 El Cibolo Road",
"city": " Edinburg",
"state": "TX",
"zip": "78542",
},
"Luther": {
"unit_name": "O.L. Luther Unit",
"telephone": "(936) 825-7547 (**029)",
"address": "1800 Luther Drive",
"city": " Navasota",
"state": "TX",
"zip": "77868",
},
"Lychner": {
"unit_name": "Pam Lychner State Jail",
"telephone": "(281) 454-5036 (**100)",
"address": "2350 Atascocita Road",
"city": " Humble",
"state": "TX",
"zip": "77396",
},
"Lynaugh": {
"unit_name": "James Lynaugh Unit",
"telephone": "(432) 395-2938 (**073)",
"address": "1098 South Highway 2037",
"city": " Fort Stockton",
"state": "TX",
"zip": "79735",
},
"Marlin": {
"unit_name": "Marlin Transfer Facility",
"telephone": "(254) 883-3858 (**064)",
"address": "2893 State Hwy 6",
"city": " Marlin",
"state": "TX",
"zip": "76661-65",
},
"McConnell": {
"unit_name": "William G. McConnell Unit",
"telephone": "(361) 362-2300 (**048)",
"address": "3001 South Emily Drive",
"city": " Beeville",
"state": "TX",
"zip": "78102",
},
"Michael": {
"unit_name": "Mark W. Michael Unit",
"telephone": "(903) 928-2311 (**036)",
"address": "2664 FM 2054",
"city": " Tennessee Colony",
"state": "TX",
"zip": "75886",
},
"Middleton": {
"unit_name": "John Middleton Transfer Facility",
"telephone": "(325) 548-9075 (**093)",
"address": "13055 FM 3522",
"city": " Abilene",
"state": "TX",
"zip": "79601",
},
"Montford/West Texas Hospital": {
"unit_name": "John Montford Unit",
"telephone": "(806) 745-1021 (**090)",
"address": "8602 Peach Street",
"city": " Lubbock",
"state": "TX",
"zip": "79404",
},
"Moore, B.": {
"unit_name": "Billy Moore Correctional Center",
"telephone": "(903) 834-6186",
"address": "8500 North FM 3053",
"city": " Overton",
"state": "TX",
"zip": "75684",
},
"Moore, C.": {
"unit_name": "Choice Moore Transfer Facility",
"telephone": "(903) 583-4464 (**079)",
"address": "1700 North FM 87",
"city": " Bonham",
"state": "TX",
"zip": "75418",
},
"Mountain View": {
"unit_name": "Mountain View Unit",
"telephone": "(254) 865-7226 (**016)",
"address": "2305 Ransom Road",
"city": " Gatesville",
"state": "TX",
"zip": "76528",
},
"Murray": {
"unit_name": "Dr. Lane Murray Unit",
"telephone": "(254) 865-2000 (**105)",
"address": "1916 North Hwy 36 Bypass",
"city": " Gatesville",
"state": "TX",
"zip": "76596",
},
"Neal": {
"unit_name": "Nathaniel J. Neal Unit",
"telephone": "(806) 383-1175 (**070)",
"address": "9055 Spur 591",
"city": " Amarillo",
"state": "TX",
"zip": "79107-96",
},
"Ney": {
"unit_name": "Joe Ney State Jail",
"telephone": "(830) 426-8030 (**085)",
"address": "114 Private Road 4303",
"city": " Hondo",
"state": "TX",
"zip": "78861-3",
},
"Pack": {
"unit_name": "Wallace Pack Unit",
"telephone": "(936) 825-3728 (**026)",
"address": "2400 Wallace Pack Road",
"city": " Navasota",
"state": "TX",
"zip": "77868",
},
"Plane/Santa Maria Baby Bonding": {
"unit_name": "Lucile Plane State Jail",
"telephone": "(936) 258-2476 (**101)",
"address": "904 FM 686",
"city": " Dayton",
"state": "TX",
"zip": "77535",
},
"Polunsky": {
"unit_name": "Allan B. Polunsky Unit",
"telephone": "(936) 967-8082 (**054)",
"address": "3872 FM 350 South",
"city": " Livingston",
"state": "TX",
"zip": "77351",
},
"Powledge": {
"unit_name": "Louis C. Powledge Unit",
"telephone": "(903) 723-5074 (**028)",
"address": "1400 FM 3452",
"city": " Palestine",
"state": "TX",
"zip": "75803",
},
"Ramsey": {
"unit_name": "W. F. Ramsey Unit",
"telephone": "(281) 595-3491 (**017)",
"address": "1100 FM 655",
"city": " Rosharon",
"state": "TX",
"zip": "77583",
},
"Roach": {
"unit_name": "T.L. Roach Unit",
"telephone": "(940) 937-6364 (**050)",
"address": "15845 FM 164",
"city": " Childress",
"state": "TX",
"zip": "79201",
},
"Robertson": {
"unit_name": "French Robertson Unit",
"telephone": "(325) 548-9035 (**047)",
"address": "12071 FM 3522",
"city": " Abilene",
"state": "TX",
"zip": "79601",
},
"San Saba": {
"unit_name": "San Saba Transfer Facility",
"telephone": "(325) 372-4255 (**65)",
"address": "206 South Wallace Creek Road",
"city": " San Saba",
"state": "TX",
"zip": "76877",
},
"Sanchez": {
"unit_name": "Rogelio Sanchez State Jail",
"telephone": "(915) 856-0046 (**108)",
"address": "3901 State Jail Road",
"city": " El Paso",
"state": "TX",
"zip": "79938-84",
},
"Sayle": {
"unit_name": "Walker Sayle Unit",
"telephone": "(254) 559-1581 (**080)",
"address": "4176 FM 1800",
"city": " Breckenridge",
"state": "TX",
"zip": "76424-73",
},
"Scott": {
"unit_name": "Wayne Scott Unit",
"telephone": "(979) 849-9306 (**019)",
"address": "6999 Retrieve",
"city": " Angleton",
"state": "TX",
"zip": "77515",
},
"Segovia": {
"unit_name": "Manuel A. Segovia Unit",
"telephone": "(956) 316-2400 (**078)",
"address": "1201 E. El Cibolo Road",
"city": " Edinburg",
"state": "TX",
"zip": "78542",
},
"Skyview": {
"unit_name": "Skyview Unit",
"telephone": "(903) 683-5781 (**034)",
"address": "379 FM 2972 West",
"city": " Rusk",
"state": "TX",
"zip": "75785-36",
},
"Smith": {
"unit_name": "Preston E. Smith Unit",
"telephone": "(806) 872-6741 (**053)",
"address": "1313 CR 19",
"city": " Lamesa",
"state": "TX",
"zip": "79331-18",
},
"Stevenson": {
"unit_name": "Clarence N. Stevenson Unit",
"telephone": "(361) 275-2075 (**071)",
"address": "1525 FM 766",
"city": " Cuero",
"state": "TX",
"zip": "77954",
},
"Stiles": {
"unit_name": "Mark W. Stiles Unit",
"telephone": "(409) 722-5255 (**049)",
"address": "3060 FM 3514",
"city": " Beaumont",
"state": "TX",
"zip": "77705",
},
"Stringfellow": {
"unit_name": 'A.M. "Mac" Stringfellow Unit',
"telephone": "(281) 595-3413 (**018)",
"address": "1200 FM 655",
"city": " Rosharon",
"state": "TX",
"zip": "77583",
},
"Telford": {
"unit_name": "Barry B. Telford Unit",
"telephone": "(903) 628-3171 (**067)",
"address": "3899 Hwy 98",
"city": " New Boston",
"state": "TX",
"zip": "75570",
},
"Terrell": {
"unit_name": "C.T. Terrell Unit",
"telephone": "(281) 595-3481 (**027)",
"address": "1300 FM 655",
"city": " Rosharon",
"state": "TX",
"zip": "77583",
},
"Torres": {
"unit_name": "Ruben M. Torres Unit",
"telephone": "(830) 426-5325 (**055)",
"address": "125 Private Road 4303",
"city": " Hondo",
"state": "TX",
"zip": "78861",
},
"Travis County": {
"unit_name": "Travis County State Jail",
"telephone": "(512) 926-4482 (**118)",
"address": "8101 FM 969",
"city": " Austin",
"state": "TX",
"zip": "78724",
},
"Tulia": {
"unit_name": "Tulia Transfer Facility",
"telephone": "(806) 995-4109 (**066)",
"address": "4000 Highway 86 West",
"city": " Tulia",
"state": "TX",
"zip": "79088",
},
"Vance": {
"unit_name": "Carol S. Vance Unit",
"telephone": "(281) 277-3030 (**015)",
"address": "2 Jester Road",
"city": " Richmond",
"state": "TX",
"zip": "77406",
},
"Wallace/San Angelo Work Camp": {
"unit_name": "Daniel Webster Wallace Unit",
"telephone": "(325) 728-2162 (**074)",
"address": "1675 South FM 3525",
"city": " Colorado City",
"state": "TX",
"zip": "79512",
},
"Wheeler": {
"unit_name": "J.B. Wheeler State Jail",
"telephone": "(806) 293-1081 (**087)",
"address": "986 County Road AA",
"city": " Plainview",
"state": "TX",
"zip": "79072",
},
"Willacy County": {
"unit_name": "Willacy County State Jail",
"telephone": "(956) 689-4900",
"address": "1695 South Buffalo Drive",
"city": " Raymondville",
"state": "TX",
"zip": "78580",
},
"Woodman": {
"unit_name": "Linda Woodman State Jail",
"telephone": "(254) 865-9398 (**107)",
"address": "1210 Coryell City Road",
"city": " Gatesville",
"state": "TX",
"zip": "76528",
},
"Wynne": {
"unit_name": "John M. Wynne Unit",
"telephone": "(936) 295-9126 (**020)",
"address": "810 FM 2821",
"city": " Huntsville",
"state": "TX",
"zip": "77349",
},
"Young": {
"unit_name": "Carole S. Young Medical Facility",
"telephone": "(409) 948-0001 (**129)",
"address": "5509 Attwater Ave.",
"city": " Dickinson",
"state": "TX",
"zip": "77539",
},
}
class UsTxJails(object):
"""
A database of Texas Department of Corrections Facilities.
"""
def get_jails(self) -> list:
"""
Returns:
(list): A list of jails operated by the state
"""
the_list = [self.make_jail(jail) for short_name, jail in JAILS.items()]
return the_list
def get_jail(self, short_name: str) -> Person:
"""
Return a given jail.
Args:
short_name (str): Key into the JAILS dict
Returns:
(Person): Name and address of the requested jail facility
"""
jail = JAILS.get(short_name, None)
if not jail:
return None
return self.make_jail(jail)
def make_jail(self, jail: dict) -> Person:
"""
Make a jail from a dict.
"""
address = Address(
address = jail['address'],
city = jail['city'],
state = jail['state'],
zip = jail['zip']
)
result = Person(address=address, name=str(jail['unit_name']))
return result
|
py | 1a32afcd24234f926a0cb7047654dd666e5727c8 | from __future__ import annotations
import functools
import operator
from abc import abstractmethod
from typing import (
Callable,
Dict,
NamedTuple,
Protocol,
Tuple,
TypeVar,
Union,
overload,
runtime_checkable,
)
from torch import Tensor
from torch import device as Device
from torch import dtype as DType
from . import constants
T = TypeVar("T", covariant=True)
V = TypeVar("V", contravariant=True)
@runtime_checkable
class Runnable(Protocol[T]):
@abstractmethod
def run(self) -> T:
...
@runtime_checkable
class TensorMixin(Protocol):
@overload
@abstractmethod
def size(self) -> Tuple[int, ...]:
...
@overload
@abstractmethod
def size(self, dim: int) -> int:
...
@abstractmethod
def size(self, dim: int | None = None) -> int | Tuple[int, ...]:
...
def numel(self) -> int:
return functools.reduce(operator.mul, self.size(), 1)
def dim(self) -> int:
return len(self.size())
@abstractmethod
def dtype(self) -> DType:
...
@abstractmethod
def device(self) -> str | Device:
...
class BatchNoBatch(NamedTuple):
batch: int
no_batch: int
class BatchInfo(NamedTuple):
index: int
value: int
def map(self, func: Callable[[int], int]) -> BatchInfo:
index = func(self.index)
return BatchInfo(index, self.value)
@runtime_checkable
class RunnableTensor(Runnable[Tensor], TensorMixin, Protocol):
@abstractmethod
def batch(self) -> BatchInfo | None:
...
@abstractmethod
def take_batch(self, low: int, high: int) -> Tensor:
...
@abstractmethod
def visit(self, nodes: Dict[int, TensorLike]) -> None:
...
def buffer(self) -> Dict[int, TensorLike]:
nodes = {}
self.visit(nodes)
return nodes
def buffer_numel(self) -> BatchNoBatch:
buffer = self.buffer().values()
return BatchNoBatch(
sum(t.numel() for t in buffer if bat(t) is not None),
sum(t.numel() for t in buffer if bat(t) is None),
)
def buffer_memory(self) -> BatchNoBatch:
buffer = self.buffer().values()
return BatchNoBatch(
sum(mem(t) for t in buffer if bat(t) is not None),
sum(mem(t) for t in buffer if bat(t) is None),
)
def memory(self) -> int:
return mem(self)
def dtyp(tensor: TensorLike) -> DType:
if isinstance(tensor, Tensor):
return tensor.dtype
return tensor.dtype()
def dev(tensor: TensorLike) -> str | Device:
if isinstance(tensor, Tensor):
return tensor.device
return tensor.device()
def mem(tensor: TensorLike) -> int:
dt = dtyp(tensor)
numel = tensor.numel()
return constants.MEMORY_BYTES[dt] * numel
def bat(tensor: TensorLike) -> BatchInfo | None:
if isinstance(tensor, RunnableTensor):
return tensor.batch()
return None
TensorLike = Union[Tensor, RunnableTensor]
|
py | 1a32b020673b7b4d09fa91acc41e9717c592520d | import argparse
def parse_args():
parser = argparse.ArgumentParser(
description="Get parameters for the ABM Simulation"
)
# Name and seed
parser.add_argument("--name", help="experiment name", required=True)
parser.add_argument("--seed", help="seed for reproducibility", type=int, default=42)
return parser.parse_args()
if __name__ == "__main__":
config = parse_args()
|
py | 1a32b0acff8c82d48cd95f575d1dad710e5fc0dd | from pydigree.common import spans
from pydigree.genotypes import Alleles
try:
import line_profiler
except ImportError:
print("No line profiler, skipping test.")
import sys
sys.exit(0)
test_data = Alleles([0] * 10000)
func = spans
for start in range(1, 10000, 200):
test_data[start:(start+100)] = 1
profile = line_profiler.LineProfiler(func)
profile.runcall(func, test_data)
profile.print_stats() |
py | 1a32b1d60e24425ae8ab02d277373372e2c3244a | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AssignmentPrincipalResponse',
'CanonicalProfileDefinitionResponse',
'CanonicalProfileDefinitionResponseProperties',
'ConnectorMappingAvailabilityResponse',
'ConnectorMappingCompleteOperationResponse',
'ConnectorMappingErrorManagementResponse',
'ConnectorMappingFormatResponse',
'ConnectorMappingPropertiesResponse',
'ConnectorMappingStructureResponse',
'DataSourcePrecedenceResponse',
'HubBillingInfoFormatResponse',
'KpiAliasResponse',
'KpiExtractResponse',
'KpiGroupByMetadataResponse',
'KpiParticipantProfilesMetadataResponse',
'KpiThresholdsResponse',
'ParticipantProfilePropertyReferenceResponse',
'ParticipantPropertyReferenceResponse',
'PredictionDistributionDefinitionResponse',
'PredictionDistributionDefinitionResponseDistributions',
'PredictionResponseGrades',
'PredictionResponseMappings',
'PredictionResponseSystemGeneratedEntities',
'ProfileEnumValidValuesFormatResponse',
'PropertyDefinitionResponse',
'RelationshipLinkFieldMappingResponse',
'RelationshipTypeFieldMappingResponse',
'RelationshipTypeMappingResponse',
'ResourceSetDescriptionResponse',
'StrongIdResponse',
'TypePropertiesMappingResponse',
]
@pulumi.output_type
class AssignmentPrincipalResponse(dict):
"""
The AssignmentPrincipal
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "principalId":
suggest = "principal_id"
elif key == "principalType":
suggest = "principal_type"
elif key == "principalMetadata":
suggest = "principal_metadata"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssignmentPrincipalResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssignmentPrincipalResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssignmentPrincipalResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
principal_id: str,
principal_type: str,
principal_metadata: Optional[Mapping[str, str]] = None):
"""
The AssignmentPrincipal
:param str principal_id: The principal id being assigned to.
:param str principal_type: The Type of the principal ID.
:param Mapping[str, str] principal_metadata: Other metadata for the principal.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "principal_type", principal_type)
if principal_metadata is not None:
pulumi.set(__self__, "principal_metadata", principal_metadata)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal id being assigned to.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="principalType")
def principal_type(self) -> str:
"""
The Type of the principal ID.
"""
return pulumi.get(self, "principal_type")
@property
@pulumi.getter(name="principalMetadata")
def principal_metadata(self) -> Optional[Mapping[str, str]]:
"""
Other metadata for the principal.
"""
return pulumi.get(self, "principal_metadata")
@pulumi.output_type
class CanonicalProfileDefinitionResponse(dict):
"""
Definition of canonical profile.
"""
def __init__(__self__, *,
canonical_profile_id: Optional[int] = None,
properties: Optional[Sequence['outputs.CanonicalProfileDefinitionResponseProperties']] = None):
"""
Definition of canonical profile.
:param int canonical_profile_id: Canonical profile ID.
:param Sequence['CanonicalProfileDefinitionResponseProperties'] properties: Properties of the canonical profile.
"""
if canonical_profile_id is not None:
pulumi.set(__self__, "canonical_profile_id", canonical_profile_id)
if properties is not None:
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter(name="canonicalProfileId")
def canonical_profile_id(self) -> Optional[int]:
"""
Canonical profile ID.
"""
return pulumi.get(self, "canonical_profile_id")
@property
@pulumi.getter
def properties(self) -> Optional[Sequence['outputs.CanonicalProfileDefinitionResponseProperties']]:
"""
Properties of the canonical profile.
"""
return pulumi.get(self, "properties")
@pulumi.output_type
class CanonicalProfileDefinitionResponseProperties(dict):
"""
The definition of a canonical profile property.
"""
def __init__(__self__, *,
profile_name: Optional[str] = None,
profile_property_name: Optional[str] = None,
rank: Optional[int] = None,
type: Optional[str] = None,
value: Optional[str] = None):
"""
The definition of a canonical profile property.
:param str profile_name: Profile name.
:param str profile_property_name: Property name of profile.
:param int rank: The rank.
:param str type: Type of canonical property value.
:param str value: Value of the canonical property.
"""
if profile_name is not None:
pulumi.set(__self__, "profile_name", profile_name)
if profile_property_name is not None:
pulumi.set(__self__, "profile_property_name", profile_property_name)
if rank is not None:
pulumi.set(__self__, "rank", rank)
if type is not None:
pulumi.set(__self__, "type", type)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="profileName")
def profile_name(self) -> Optional[str]:
"""
Profile name.
"""
return pulumi.get(self, "profile_name")
@property
@pulumi.getter(name="profilePropertyName")
def profile_property_name(self) -> Optional[str]:
"""
Property name of profile.
"""
return pulumi.get(self, "profile_property_name")
@property
@pulumi.getter
def rank(self) -> Optional[int]:
"""
The rank.
"""
return pulumi.get(self, "rank")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of canonical property value.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def value(self) -> Optional[str]:
"""
Value of the canonical property.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class ConnectorMappingAvailabilityResponse(dict):
"""
Connector mapping property availability.
"""
def __init__(__self__, *,
interval: int,
frequency: Optional[str] = None):
"""
Connector mapping property availability.
:param int interval: The interval of the given frequency to use.
:param str frequency: The frequency to update.
"""
pulumi.set(__self__, "interval", interval)
if frequency is not None:
pulumi.set(__self__, "frequency", frequency)
@property
@pulumi.getter
def interval(self) -> int:
"""
The interval of the given frequency to use.
"""
return pulumi.get(self, "interval")
@property
@pulumi.getter
def frequency(self) -> Optional[str]:
"""
The frequency to update.
"""
return pulumi.get(self, "frequency")
@pulumi.output_type
class ConnectorMappingCompleteOperationResponse(dict):
"""
The complete operation.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "completionOperationType":
suggest = "completion_operation_type"
elif key == "destinationFolder":
suggest = "destination_folder"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ConnectorMappingCompleteOperationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ConnectorMappingCompleteOperationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ConnectorMappingCompleteOperationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
completion_operation_type: Optional[str] = None,
destination_folder: Optional[str] = None):
"""
The complete operation.
:param str completion_operation_type: The type of completion operation.
:param str destination_folder: The destination folder where files will be moved to once the import is done.
"""
if completion_operation_type is not None:
pulumi.set(__self__, "completion_operation_type", completion_operation_type)
if destination_folder is not None:
pulumi.set(__self__, "destination_folder", destination_folder)
@property
@pulumi.getter(name="completionOperationType")
def completion_operation_type(self) -> Optional[str]:
"""
The type of completion operation.
"""
return pulumi.get(self, "completion_operation_type")
@property
@pulumi.getter(name="destinationFolder")
def destination_folder(self) -> Optional[str]:
"""
The destination folder where files will be moved to once the import is done.
"""
return pulumi.get(self, "destination_folder")
@pulumi.output_type
class ConnectorMappingErrorManagementResponse(dict):
"""
The error management.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "errorManagementType":
suggest = "error_management_type"
elif key == "errorLimit":
suggest = "error_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ConnectorMappingErrorManagementResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ConnectorMappingErrorManagementResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ConnectorMappingErrorManagementResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
error_management_type: str,
error_limit: Optional[int] = None):
"""
The error management.
:param str error_management_type: The type of error management to use for the mapping.
:param int error_limit: The error limit allowed while importing data.
"""
pulumi.set(__self__, "error_management_type", error_management_type)
if error_limit is not None:
pulumi.set(__self__, "error_limit", error_limit)
@property
@pulumi.getter(name="errorManagementType")
def error_management_type(self) -> str:
"""
The type of error management to use for the mapping.
"""
return pulumi.get(self, "error_management_type")
@property
@pulumi.getter(name="errorLimit")
def error_limit(self) -> Optional[int]:
"""
The error limit allowed while importing data.
"""
return pulumi.get(self, "error_limit")
@pulumi.output_type
class ConnectorMappingFormatResponse(dict):
"""
Connector mapping property format.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "formatType":
suggest = "format_type"
elif key == "acceptLanguage":
suggest = "accept_language"
elif key == "arraySeparator":
suggest = "array_separator"
elif key == "columnDelimiter":
suggest = "column_delimiter"
elif key == "quoteCharacter":
suggest = "quote_character"
elif key == "quoteEscapeCharacter":
suggest = "quote_escape_character"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ConnectorMappingFormatResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ConnectorMappingFormatResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ConnectorMappingFormatResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
format_type: str,
accept_language: Optional[str] = None,
array_separator: Optional[str] = None,
column_delimiter: Optional[str] = None,
quote_character: Optional[str] = None,
quote_escape_character: Optional[str] = None):
"""
Connector mapping property format.
:param str format_type: The type mapping format.
:param str accept_language: The oData language.
:param str array_separator: Character separating array elements.
:param str column_delimiter: The character that signifies a break between columns.
:param str quote_character: Quote character, used to indicate enquoted fields.
:param str quote_escape_character: Escape character for quotes, can be the same as the quoteCharacter.
"""
pulumi.set(__self__, "format_type", format_type)
if accept_language is not None:
pulumi.set(__self__, "accept_language", accept_language)
if array_separator is not None:
pulumi.set(__self__, "array_separator", array_separator)
if column_delimiter is not None:
pulumi.set(__self__, "column_delimiter", column_delimiter)
if quote_character is not None:
pulumi.set(__self__, "quote_character", quote_character)
if quote_escape_character is not None:
pulumi.set(__self__, "quote_escape_character", quote_escape_character)
@property
@pulumi.getter(name="formatType")
def format_type(self) -> str:
"""
The type mapping format.
"""
return pulumi.get(self, "format_type")
@property
@pulumi.getter(name="acceptLanguage")
def accept_language(self) -> Optional[str]:
"""
The oData language.
"""
return pulumi.get(self, "accept_language")
@property
@pulumi.getter(name="arraySeparator")
def array_separator(self) -> Optional[str]:
"""
Character separating array elements.
"""
return pulumi.get(self, "array_separator")
@property
@pulumi.getter(name="columnDelimiter")
def column_delimiter(self) -> Optional[str]:
"""
The character that signifies a break between columns.
"""
return pulumi.get(self, "column_delimiter")
@property
@pulumi.getter(name="quoteCharacter")
def quote_character(self) -> Optional[str]:
"""
Quote character, used to indicate enquoted fields.
"""
return pulumi.get(self, "quote_character")
@property
@pulumi.getter(name="quoteEscapeCharacter")
def quote_escape_character(self) -> Optional[str]:
"""
Escape character for quotes, can be the same as the quoteCharacter.
"""
return pulumi.get(self, "quote_escape_character")
@pulumi.output_type
class ConnectorMappingPropertiesResponse(dict):
"""
The connector mapping properties.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "completeOperation":
suggest = "complete_operation"
elif key == "errorManagement":
suggest = "error_management"
elif key == "fileFilter":
suggest = "file_filter"
elif key == "folderPath":
suggest = "folder_path"
elif key == "hasHeader":
suggest = "has_header"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ConnectorMappingPropertiesResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ConnectorMappingPropertiesResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ConnectorMappingPropertiesResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
availability: 'outputs.ConnectorMappingAvailabilityResponse',
complete_operation: 'outputs.ConnectorMappingCompleteOperationResponse',
error_management: 'outputs.ConnectorMappingErrorManagementResponse',
format: 'outputs.ConnectorMappingFormatResponse',
structure: Sequence['outputs.ConnectorMappingStructureResponse'],
file_filter: Optional[str] = None,
folder_path: Optional[str] = None,
has_header: Optional[bool] = None):
"""
The connector mapping properties.
:param 'ConnectorMappingAvailabilityResponse' availability: The availability of mapping property.
:param 'ConnectorMappingCompleteOperationResponse' complete_operation: The operation after import is done.
:param 'ConnectorMappingErrorManagementResponse' error_management: The error management setting for the mapping.
:param 'ConnectorMappingFormatResponse' format: The format of mapping property.
:param Sequence['ConnectorMappingStructureResponse'] structure: Ingestion mapping information at property level.
:param str file_filter: The file filter for the mapping.
:param str folder_path: The folder path for the mapping.
:param bool has_header: If the file contains a header or not.
"""
pulumi.set(__self__, "availability", availability)
pulumi.set(__self__, "complete_operation", complete_operation)
pulumi.set(__self__, "error_management", error_management)
pulumi.set(__self__, "format", format)
pulumi.set(__self__, "structure", structure)
if file_filter is not None:
pulumi.set(__self__, "file_filter", file_filter)
if folder_path is not None:
pulumi.set(__self__, "folder_path", folder_path)
if has_header is not None:
pulumi.set(__self__, "has_header", has_header)
@property
@pulumi.getter
def availability(self) -> 'outputs.ConnectorMappingAvailabilityResponse':
"""
The availability of mapping property.
"""
return pulumi.get(self, "availability")
@property
@pulumi.getter(name="completeOperation")
def complete_operation(self) -> 'outputs.ConnectorMappingCompleteOperationResponse':
"""
The operation after import is done.
"""
return pulumi.get(self, "complete_operation")
@property
@pulumi.getter(name="errorManagement")
def error_management(self) -> 'outputs.ConnectorMappingErrorManagementResponse':
"""
The error management setting for the mapping.
"""
return pulumi.get(self, "error_management")
@property
@pulumi.getter
def format(self) -> 'outputs.ConnectorMappingFormatResponse':
"""
The format of mapping property.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def structure(self) -> Sequence['outputs.ConnectorMappingStructureResponse']:
"""
Ingestion mapping information at property level.
"""
return pulumi.get(self, "structure")
@property
@pulumi.getter(name="fileFilter")
def file_filter(self) -> Optional[str]:
"""
The file filter for the mapping.
"""
return pulumi.get(self, "file_filter")
@property
@pulumi.getter(name="folderPath")
def folder_path(self) -> Optional[str]:
"""
The folder path for the mapping.
"""
return pulumi.get(self, "folder_path")
@property
@pulumi.getter(name="hasHeader")
def has_header(self) -> Optional[bool]:
"""
If the file contains a header or not.
"""
return pulumi.get(self, "has_header")
@pulumi.output_type
class ConnectorMappingStructureResponse(dict):
"""
Connector mapping property structure.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "columnName":
suggest = "column_name"
elif key == "propertyName":
suggest = "property_name"
elif key == "customFormatSpecifier":
suggest = "custom_format_specifier"
elif key == "isEncrypted":
suggest = "is_encrypted"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ConnectorMappingStructureResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ConnectorMappingStructureResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ConnectorMappingStructureResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
column_name: str,
property_name: str,
custom_format_specifier: Optional[str] = None,
is_encrypted: Optional[bool] = None):
"""
Connector mapping property structure.
:param str column_name: The column name of the import file.
:param str property_name: The property name of the mapping entity.
:param str custom_format_specifier: Custom format specifier for input parsing.
:param bool is_encrypted: Indicates if the column is encrypted.
"""
pulumi.set(__self__, "column_name", column_name)
pulumi.set(__self__, "property_name", property_name)
if custom_format_specifier is not None:
pulumi.set(__self__, "custom_format_specifier", custom_format_specifier)
if is_encrypted is not None:
pulumi.set(__self__, "is_encrypted", is_encrypted)
@property
@pulumi.getter(name="columnName")
def column_name(self) -> str:
"""
The column name of the import file.
"""
return pulumi.get(self, "column_name")
@property
@pulumi.getter(name="propertyName")
def property_name(self) -> str:
"""
The property name of the mapping entity.
"""
return pulumi.get(self, "property_name")
@property
@pulumi.getter(name="customFormatSpecifier")
def custom_format_specifier(self) -> Optional[str]:
"""
Custom format specifier for input parsing.
"""
return pulumi.get(self, "custom_format_specifier")
@property
@pulumi.getter(name="isEncrypted")
def is_encrypted(self) -> Optional[bool]:
"""
Indicates if the column is encrypted.
"""
return pulumi.get(self, "is_encrypted")
@pulumi.output_type
class DataSourcePrecedenceResponse(dict):
"""
The data source precedence is a way to know the precedence of each data source.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataSourceReferenceId":
suggest = "data_source_reference_id"
elif key == "dataSourceType":
suggest = "data_source_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in DataSourcePrecedenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
DataSourcePrecedenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
DataSourcePrecedenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_source_reference_id: str,
data_source_type: str,
id: int,
name: str,
status: str,
precedence: Optional[int] = None):
"""
The data source precedence is a way to know the precedence of each data source.
:param str data_source_reference_id: The data source reference id.
:param str data_source_type: The data source type.
:param int id: The data source ID.
:param str name: The data source name
:param str status: The data source status.
:param int precedence: the precedence value.
"""
pulumi.set(__self__, "data_source_reference_id", data_source_reference_id)
pulumi.set(__self__, "data_source_type", data_source_type)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "status", status)
if precedence is not None:
pulumi.set(__self__, "precedence", precedence)
@property
@pulumi.getter(name="dataSourceReferenceId")
def data_source_reference_id(self) -> str:
"""
The data source reference id.
"""
return pulumi.get(self, "data_source_reference_id")
@property
@pulumi.getter(name="dataSourceType")
def data_source_type(self) -> str:
"""
The data source type.
"""
return pulumi.get(self, "data_source_type")
@property
@pulumi.getter
def id(self) -> int:
"""
The data source ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The data source name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def status(self) -> str:
"""
The data source status.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def precedence(self) -> Optional[int]:
"""
the precedence value.
"""
return pulumi.get(self, "precedence")
@pulumi.output_type
class HubBillingInfoFormatResponse(dict):
"""
Hub billing info.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxUnits":
suggest = "max_units"
elif key == "minUnits":
suggest = "min_units"
elif key == "skuName":
suggest = "sku_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in HubBillingInfoFormatResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
HubBillingInfoFormatResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
HubBillingInfoFormatResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_units: Optional[int] = None,
min_units: Optional[int] = None,
sku_name: Optional[str] = None):
"""
Hub billing info.
:param int max_units: The maximum number of units can be used. One unit is 10,000 Profiles and 100,000 Interactions.
:param int min_units: The minimum number of units will be billed. One unit is 10,000 Profiles and 100,000 Interactions.
:param str sku_name: The sku name.
"""
if max_units is not None:
pulumi.set(__self__, "max_units", max_units)
if min_units is not None:
pulumi.set(__self__, "min_units", min_units)
if sku_name is not None:
pulumi.set(__self__, "sku_name", sku_name)
@property
@pulumi.getter(name="maxUnits")
def max_units(self) -> Optional[int]:
"""
The maximum number of units can be used. One unit is 10,000 Profiles and 100,000 Interactions.
"""
return pulumi.get(self, "max_units")
@property
@pulumi.getter(name="minUnits")
def min_units(self) -> Optional[int]:
"""
The minimum number of units will be billed. One unit is 10,000 Profiles and 100,000 Interactions.
"""
return pulumi.get(self, "min_units")
@property
@pulumi.getter(name="skuName")
def sku_name(self) -> Optional[str]:
"""
The sku name.
"""
return pulumi.get(self, "sku_name")
@pulumi.output_type
class KpiAliasResponse(dict):
"""
The KPI alias.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "aliasName":
suggest = "alias_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KpiAliasResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KpiAliasResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KpiAliasResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
alias_name: str,
expression: str):
"""
The KPI alias.
:param str alias_name: KPI alias name.
:param str expression: The expression.
"""
pulumi.set(__self__, "alias_name", alias_name)
pulumi.set(__self__, "expression", expression)
@property
@pulumi.getter(name="aliasName")
def alias_name(self) -> str:
"""
KPI alias name.
"""
return pulumi.get(self, "alias_name")
@property
@pulumi.getter
def expression(self) -> str:
"""
The expression.
"""
return pulumi.get(self, "expression")
@pulumi.output_type
class KpiExtractResponse(dict):
"""
The KPI extract.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "extractName":
suggest = "extract_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KpiExtractResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KpiExtractResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KpiExtractResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
expression: str,
extract_name: str):
"""
The KPI extract.
:param str expression: The expression.
:param str extract_name: KPI extract name.
"""
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "extract_name", extract_name)
@property
@pulumi.getter
def expression(self) -> str:
"""
The expression.
"""
return pulumi.get(self, "expression")
@property
@pulumi.getter(name="extractName")
def extract_name(self) -> str:
"""
KPI extract name.
"""
return pulumi.get(self, "extract_name")
@pulumi.output_type
class KpiGroupByMetadataResponse(dict):
"""
The KPI GroupBy field metadata.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "displayName":
suggest = "display_name"
elif key == "fieldName":
suggest = "field_name"
elif key == "fieldType":
suggest = "field_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KpiGroupByMetadataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KpiGroupByMetadataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KpiGroupByMetadataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
display_name: Optional[Mapping[str, str]] = None,
field_name: Optional[str] = None,
field_type: Optional[str] = None):
"""
The KPI GroupBy field metadata.
:param Mapping[str, str] display_name: The display name.
:param str field_name: The name of the field.
:param str field_type: The type of the field.
"""
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if field_name is not None:
pulumi.set(__self__, "field_name", field_name)
if field_type is not None:
pulumi.set(__self__, "field_type", field_type)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[Mapping[str, str]]:
"""
The display name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="fieldName")
def field_name(self) -> Optional[str]:
"""
The name of the field.
"""
return pulumi.get(self, "field_name")
@property
@pulumi.getter(name="fieldType")
def field_type(self) -> Optional[str]:
"""
The type of the field.
"""
return pulumi.get(self, "field_type")
@pulumi.output_type
class KpiParticipantProfilesMetadataResponse(dict):
"""
The KPI participant profile metadata.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "typeName":
suggest = "type_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KpiParticipantProfilesMetadataResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KpiParticipantProfilesMetadataResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KpiParticipantProfilesMetadataResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type_name: str):
"""
The KPI participant profile metadata.
:param str type_name: Name of the type.
"""
pulumi.set(__self__, "type_name", type_name)
@property
@pulumi.getter(name="typeName")
def type_name(self) -> str:
"""
Name of the type.
"""
return pulumi.get(self, "type_name")
@pulumi.output_type
class KpiThresholdsResponse(dict):
"""
Defines the KPI Threshold limits.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "increasingKpi":
suggest = "increasing_kpi"
elif key == "lowerLimit":
suggest = "lower_limit"
elif key == "upperLimit":
suggest = "upper_limit"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in KpiThresholdsResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
KpiThresholdsResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
KpiThresholdsResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
increasing_kpi: bool,
lower_limit: float,
upper_limit: float):
"""
Defines the KPI Threshold limits.
:param bool increasing_kpi: Whether or not the KPI is an increasing KPI.
:param float lower_limit: The lower threshold limit.
:param float upper_limit: The upper threshold limit.
"""
pulumi.set(__self__, "increasing_kpi", increasing_kpi)
pulumi.set(__self__, "lower_limit", lower_limit)
pulumi.set(__self__, "upper_limit", upper_limit)
@property
@pulumi.getter(name="increasingKpi")
def increasing_kpi(self) -> bool:
"""
Whether or not the KPI is an increasing KPI.
"""
return pulumi.get(self, "increasing_kpi")
@property
@pulumi.getter(name="lowerLimit")
def lower_limit(self) -> float:
"""
The lower threshold limit.
"""
return pulumi.get(self, "lower_limit")
@property
@pulumi.getter(name="upperLimit")
def upper_limit(self) -> float:
"""
The upper threshold limit.
"""
return pulumi.get(self, "upper_limit")
@pulumi.output_type
class ParticipantProfilePropertyReferenceResponse(dict):
"""
The participant profile property reference.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "interactionPropertyName":
suggest = "interaction_property_name"
elif key == "profilePropertyName":
suggest = "profile_property_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ParticipantProfilePropertyReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ParticipantProfilePropertyReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ParticipantProfilePropertyReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
interaction_property_name: str,
profile_property_name: str):
"""
The participant profile property reference.
:param str interaction_property_name: The source interaction property that maps to the target profile property.
:param str profile_property_name: The target profile property that maps to the source interaction property.
"""
pulumi.set(__self__, "interaction_property_name", interaction_property_name)
pulumi.set(__self__, "profile_property_name", profile_property_name)
@property
@pulumi.getter(name="interactionPropertyName")
def interaction_property_name(self) -> str:
"""
The source interaction property that maps to the target profile property.
"""
return pulumi.get(self, "interaction_property_name")
@property
@pulumi.getter(name="profilePropertyName")
def profile_property_name(self) -> str:
"""
The target profile property that maps to the source interaction property.
"""
return pulumi.get(self, "profile_property_name")
@pulumi.output_type
class ParticipantPropertyReferenceResponse(dict):
"""
The participant property reference.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sourcePropertyName":
suggest = "source_property_name"
elif key == "targetPropertyName":
suggest = "target_property_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ParticipantPropertyReferenceResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ParticipantPropertyReferenceResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ParticipantPropertyReferenceResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
source_property_name: str,
target_property_name: str):
"""
The participant property reference.
:param str source_property_name: The source property that maps to the target property.
:param str target_property_name: The target property that maps to the source property.
"""
pulumi.set(__self__, "source_property_name", source_property_name)
pulumi.set(__self__, "target_property_name", target_property_name)
@property
@pulumi.getter(name="sourcePropertyName")
def source_property_name(self) -> str:
"""
The source property that maps to the target property.
"""
return pulumi.get(self, "source_property_name")
@property
@pulumi.getter(name="targetPropertyName")
def target_property_name(self) -> str:
"""
The target property that maps to the source property.
"""
return pulumi.get(self, "target_property_name")
@pulumi.output_type
class PredictionDistributionDefinitionResponse(dict):
"""
The definition of the prediction distribution.
"""
def __init__(__self__, *,
distributions: Optional[Sequence['outputs.PredictionDistributionDefinitionResponseDistributions']] = None,
total_negatives: Optional[float] = None,
total_positives: Optional[float] = None):
"""
The definition of the prediction distribution.
:param Sequence['PredictionDistributionDefinitionResponseDistributions'] distributions: Distributions of the prediction.
:param float total_negatives: Total negatives in the distribution.
:param float total_positives: Total positive in the distribution.
"""
if distributions is not None:
pulumi.set(__self__, "distributions", distributions)
if total_negatives is not None:
pulumi.set(__self__, "total_negatives", total_negatives)
if total_positives is not None:
pulumi.set(__self__, "total_positives", total_positives)
@property
@pulumi.getter
def distributions(self) -> Optional[Sequence['outputs.PredictionDistributionDefinitionResponseDistributions']]:
"""
Distributions of the prediction.
"""
return pulumi.get(self, "distributions")
@property
@pulumi.getter(name="totalNegatives")
def total_negatives(self) -> Optional[float]:
"""
Total negatives in the distribution.
"""
return pulumi.get(self, "total_negatives")
@property
@pulumi.getter(name="totalPositives")
def total_positives(self) -> Optional[float]:
"""
Total positive in the distribution.
"""
return pulumi.get(self, "total_positives")
@pulumi.output_type
class PredictionDistributionDefinitionResponseDistributions(dict):
"""
The definition of a prediction distribution.
"""
def __init__(__self__, *,
negatives: Optional[float] = None,
negatives_above_threshold: Optional[float] = None,
positives: Optional[float] = None,
positives_above_threshold: Optional[float] = None,
score_threshold: Optional[int] = None):
"""
The definition of a prediction distribution.
:param float negatives: Number of negatives.
:param float negatives_above_threshold: Number of negatives above threshold.
:param float positives: Number of positives.
:param float positives_above_threshold: Number of positives above threshold.
:param int score_threshold: Score threshold.
"""
if negatives is not None:
pulumi.set(__self__, "negatives", negatives)
if negatives_above_threshold is not None:
pulumi.set(__self__, "negatives_above_threshold", negatives_above_threshold)
if positives is not None:
pulumi.set(__self__, "positives", positives)
if positives_above_threshold is not None:
pulumi.set(__self__, "positives_above_threshold", positives_above_threshold)
if score_threshold is not None:
pulumi.set(__self__, "score_threshold", score_threshold)
@property
@pulumi.getter
def negatives(self) -> Optional[float]:
"""
Number of negatives.
"""
return pulumi.get(self, "negatives")
@property
@pulumi.getter(name="negativesAboveThreshold")
def negatives_above_threshold(self) -> Optional[float]:
"""
Number of negatives above threshold.
"""
return pulumi.get(self, "negatives_above_threshold")
@property
@pulumi.getter
def positives(self) -> Optional[float]:
"""
Number of positives.
"""
return pulumi.get(self, "positives")
@property
@pulumi.getter(name="positivesAboveThreshold")
def positives_above_threshold(self) -> Optional[float]:
"""
Number of positives above threshold.
"""
return pulumi.get(self, "positives_above_threshold")
@property
@pulumi.getter(name="scoreThreshold")
def score_threshold(self) -> Optional[int]:
"""
Score threshold.
"""
return pulumi.get(self, "score_threshold")
@pulumi.output_type
class PredictionResponseGrades(dict):
"""
The definition of a prediction grade.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "gradeName":
suggest = "grade_name"
elif key == "maxScoreThreshold":
suggest = "max_score_threshold"
elif key == "minScoreThreshold":
suggest = "min_score_threshold"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PredictionResponseGrades. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PredictionResponseGrades.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PredictionResponseGrades.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
grade_name: Optional[str] = None,
max_score_threshold: Optional[int] = None,
min_score_threshold: Optional[int] = None):
"""
The definition of a prediction grade.
:param str grade_name: Name of the grade.
:param int max_score_threshold: Maximum score threshold.
:param int min_score_threshold: Minimum score threshold.
"""
if grade_name is not None:
pulumi.set(__self__, "grade_name", grade_name)
if max_score_threshold is not None:
pulumi.set(__self__, "max_score_threshold", max_score_threshold)
if min_score_threshold is not None:
pulumi.set(__self__, "min_score_threshold", min_score_threshold)
@property
@pulumi.getter(name="gradeName")
def grade_name(self) -> Optional[str]:
"""
Name of the grade.
"""
return pulumi.get(self, "grade_name")
@property
@pulumi.getter(name="maxScoreThreshold")
def max_score_threshold(self) -> Optional[int]:
"""
Maximum score threshold.
"""
return pulumi.get(self, "max_score_threshold")
@property
@pulumi.getter(name="minScoreThreshold")
def min_score_threshold(self) -> Optional[int]:
"""
Minimum score threshold.
"""
return pulumi.get(self, "min_score_threshold")
@pulumi.output_type
class PredictionResponseMappings(dict):
"""
Definition of the link mapping of prediction.
"""
def __init__(__self__, *,
grade: str,
reason: str,
score: str):
"""
Definition of the link mapping of prediction.
:param str grade: The grade of the link mapping.
:param str reason: The reason of the link mapping.
:param str score: The score of the link mapping.
"""
pulumi.set(__self__, "grade", grade)
pulumi.set(__self__, "reason", reason)
pulumi.set(__self__, "score", score)
@property
@pulumi.getter
def grade(self) -> str:
"""
The grade of the link mapping.
"""
return pulumi.get(self, "grade")
@property
@pulumi.getter
def reason(self) -> str:
"""
The reason of the link mapping.
"""
return pulumi.get(self, "reason")
@property
@pulumi.getter
def score(self) -> str:
"""
The score of the link mapping.
"""
return pulumi.get(self, "score")
@pulumi.output_type
class PredictionResponseSystemGeneratedEntities(dict):
"""
System generated entities.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "generatedInteractionTypes":
suggest = "generated_interaction_types"
elif key == "generatedKpis":
suggest = "generated_kpis"
elif key == "generatedLinks":
suggest = "generated_links"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PredictionResponseSystemGeneratedEntities. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PredictionResponseSystemGeneratedEntities.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PredictionResponseSystemGeneratedEntities.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
generated_interaction_types: Optional[Sequence[str]] = None,
generated_kpis: Optional[Mapping[str, str]] = None,
generated_links: Optional[Sequence[str]] = None):
"""
System generated entities.
:param Sequence[str] generated_interaction_types: Generated interaction types.
:param Mapping[str, str] generated_kpis: Generated KPIs.
:param Sequence[str] generated_links: Generated links.
"""
if generated_interaction_types is not None:
pulumi.set(__self__, "generated_interaction_types", generated_interaction_types)
if generated_kpis is not None:
pulumi.set(__self__, "generated_kpis", generated_kpis)
if generated_links is not None:
pulumi.set(__self__, "generated_links", generated_links)
@property
@pulumi.getter(name="generatedInteractionTypes")
def generated_interaction_types(self) -> Optional[Sequence[str]]:
"""
Generated interaction types.
"""
return pulumi.get(self, "generated_interaction_types")
@property
@pulumi.getter(name="generatedKpis")
def generated_kpis(self) -> Optional[Mapping[str, str]]:
"""
Generated KPIs.
"""
return pulumi.get(self, "generated_kpis")
@property
@pulumi.getter(name="generatedLinks")
def generated_links(self) -> Optional[Sequence[str]]:
"""
Generated links.
"""
return pulumi.get(self, "generated_links")
@pulumi.output_type
class ProfileEnumValidValuesFormatResponse(dict):
"""
Valid enum values in case of an enum property.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "localizedValueNames":
suggest = "localized_value_names"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ProfileEnumValidValuesFormatResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ProfileEnumValidValuesFormatResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ProfileEnumValidValuesFormatResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
localized_value_names: Optional[Mapping[str, str]] = None,
value: Optional[int] = None):
"""
Valid enum values in case of an enum property.
:param Mapping[str, str] localized_value_names: Localized names of the enum member.
:param int value: The integer value of the enum member.
"""
if localized_value_names is not None:
pulumi.set(__self__, "localized_value_names", localized_value_names)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="localizedValueNames")
def localized_value_names(self) -> Optional[Mapping[str, str]]:
"""
Localized names of the enum member.
"""
return pulumi.get(self, "localized_value_names")
@property
@pulumi.getter
def value(self) -> Optional[int]:
"""
The integer value of the enum member.
"""
return pulumi.get(self, "value")
@pulumi.output_type
class PropertyDefinitionResponse(dict):
"""
Property definition.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "dataSourcePrecedenceRules":
suggest = "data_source_precedence_rules"
elif key == "fieldName":
suggest = "field_name"
elif key == "fieldType":
suggest = "field_type"
elif key == "arrayValueSeparator":
suggest = "array_value_separator"
elif key == "enumValidValues":
suggest = "enum_valid_values"
elif key == "isArray":
suggest = "is_array"
elif key == "isAvailableInGraph":
suggest = "is_available_in_graph"
elif key == "isEnum":
suggest = "is_enum"
elif key == "isFlagEnum":
suggest = "is_flag_enum"
elif key == "isImage":
suggest = "is_image"
elif key == "isLocalizedString":
suggest = "is_localized_string"
elif key == "isName":
suggest = "is_name"
elif key == "isRequired":
suggest = "is_required"
elif key == "maxLength":
suggest = "max_length"
elif key == "propertyId":
suggest = "property_id"
elif key == "schemaItemPropLink":
suggest = "schema_item_prop_link"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in PropertyDefinitionResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
PropertyDefinitionResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
PropertyDefinitionResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
data_source_precedence_rules: Sequence['outputs.DataSourcePrecedenceResponse'],
field_name: str,
field_type: str,
array_value_separator: Optional[str] = None,
enum_valid_values: Optional[Sequence['outputs.ProfileEnumValidValuesFormatResponse']] = None,
is_array: Optional[bool] = None,
is_available_in_graph: Optional[bool] = None,
is_enum: Optional[bool] = None,
is_flag_enum: Optional[bool] = None,
is_image: Optional[bool] = None,
is_localized_string: Optional[bool] = None,
is_name: Optional[bool] = None,
is_required: Optional[bool] = None,
max_length: Optional[int] = None,
property_id: Optional[str] = None,
schema_item_prop_link: Optional[str] = None):
"""
Property definition.
:param Sequence['DataSourcePrecedenceResponse'] data_source_precedence_rules: This is specific to interactions modeled as activities. Data sources are used to determine where data is stored and also in precedence rules.
:param str field_name: Name of the property.
:param str field_type: Type of the property.
:param str array_value_separator: Array value separator for properties with isArray set.
:param Sequence['ProfileEnumValidValuesFormatResponse'] enum_valid_values: Describes valid values for an enum property.
:param bool is_array: Indicates if the property is actually an array of the fieldType above on the data api.
:param bool is_available_in_graph: Whether property is available in graph or not.
:param bool is_enum: Indicates if the property is an enum.
:param bool is_flag_enum: Indicates if the property is an flag enum.
:param bool is_image: Whether the property is an Image.
:param bool is_localized_string: Whether the property is a localized string.
:param bool is_name: Whether the property is a name or a part of name.
:param bool is_required: Whether property value is required on instances, IsRequired field only for Interaction. Profile Instance will not check for required field.
:param int max_length: Max length of string. Used only if type is string.
:param str property_id: The ID associated with the property.
:param str schema_item_prop_link: URL encoded schema.org item prop link for the property.
"""
pulumi.set(__self__, "data_source_precedence_rules", data_source_precedence_rules)
pulumi.set(__self__, "field_name", field_name)
pulumi.set(__self__, "field_type", field_type)
if array_value_separator is not None:
pulumi.set(__self__, "array_value_separator", array_value_separator)
if enum_valid_values is not None:
pulumi.set(__self__, "enum_valid_values", enum_valid_values)
if is_array is not None:
pulumi.set(__self__, "is_array", is_array)
if is_available_in_graph is not None:
pulumi.set(__self__, "is_available_in_graph", is_available_in_graph)
if is_enum is not None:
pulumi.set(__self__, "is_enum", is_enum)
if is_flag_enum is not None:
pulumi.set(__self__, "is_flag_enum", is_flag_enum)
if is_image is not None:
pulumi.set(__self__, "is_image", is_image)
if is_localized_string is not None:
pulumi.set(__self__, "is_localized_string", is_localized_string)
if is_name is not None:
pulumi.set(__self__, "is_name", is_name)
if is_required is not None:
pulumi.set(__self__, "is_required", is_required)
if max_length is not None:
pulumi.set(__self__, "max_length", max_length)
if property_id is not None:
pulumi.set(__self__, "property_id", property_id)
if schema_item_prop_link is not None:
pulumi.set(__self__, "schema_item_prop_link", schema_item_prop_link)
@property
@pulumi.getter(name="dataSourcePrecedenceRules")
def data_source_precedence_rules(self) -> Sequence['outputs.DataSourcePrecedenceResponse']:
"""
This is specific to interactions modeled as activities. Data sources are used to determine where data is stored and also in precedence rules.
"""
return pulumi.get(self, "data_source_precedence_rules")
@property
@pulumi.getter(name="fieldName")
def field_name(self) -> str:
"""
Name of the property.
"""
return pulumi.get(self, "field_name")
@property
@pulumi.getter(name="fieldType")
def field_type(self) -> str:
"""
Type of the property.
"""
return pulumi.get(self, "field_type")
@property
@pulumi.getter(name="arrayValueSeparator")
def array_value_separator(self) -> Optional[str]:
"""
Array value separator for properties with isArray set.
"""
return pulumi.get(self, "array_value_separator")
@property
@pulumi.getter(name="enumValidValues")
def enum_valid_values(self) -> Optional[Sequence['outputs.ProfileEnumValidValuesFormatResponse']]:
"""
Describes valid values for an enum property.
"""
return pulumi.get(self, "enum_valid_values")
@property
@pulumi.getter(name="isArray")
def is_array(self) -> Optional[bool]:
"""
Indicates if the property is actually an array of the fieldType above on the data api.
"""
return pulumi.get(self, "is_array")
@property
@pulumi.getter(name="isAvailableInGraph")
def is_available_in_graph(self) -> Optional[bool]:
"""
Whether property is available in graph or not.
"""
return pulumi.get(self, "is_available_in_graph")
@property
@pulumi.getter(name="isEnum")
def is_enum(self) -> Optional[bool]:
"""
Indicates if the property is an enum.
"""
return pulumi.get(self, "is_enum")
@property
@pulumi.getter(name="isFlagEnum")
def is_flag_enum(self) -> Optional[bool]:
"""
Indicates if the property is an flag enum.
"""
return pulumi.get(self, "is_flag_enum")
@property
@pulumi.getter(name="isImage")
def is_image(self) -> Optional[bool]:
"""
Whether the property is an Image.
"""
return pulumi.get(self, "is_image")
@property
@pulumi.getter(name="isLocalizedString")
def is_localized_string(self) -> Optional[bool]:
"""
Whether the property is a localized string.
"""
return pulumi.get(self, "is_localized_string")
@property
@pulumi.getter(name="isName")
def is_name(self) -> Optional[bool]:
"""
Whether the property is a name or a part of name.
"""
return pulumi.get(self, "is_name")
@property
@pulumi.getter(name="isRequired")
def is_required(self) -> Optional[bool]:
"""
Whether property value is required on instances, IsRequired field only for Interaction. Profile Instance will not check for required field.
"""
return pulumi.get(self, "is_required")
@property
@pulumi.getter(name="maxLength")
def max_length(self) -> Optional[int]:
"""
Max length of string. Used only if type is string.
"""
return pulumi.get(self, "max_length")
@property
@pulumi.getter(name="propertyId")
def property_id(self) -> Optional[str]:
"""
The ID associated with the property.
"""
return pulumi.get(self, "property_id")
@property
@pulumi.getter(name="schemaItemPropLink")
def schema_item_prop_link(self) -> Optional[str]:
"""
URL encoded schema.org item prop link for the property.
"""
return pulumi.get(self, "schema_item_prop_link")
@pulumi.output_type
class RelationshipLinkFieldMappingResponse(dict):
"""
The fields mapping for Relationships.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "interactionFieldName":
suggest = "interaction_field_name"
elif key == "relationshipFieldName":
suggest = "relationship_field_name"
elif key == "linkType":
suggest = "link_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RelationshipLinkFieldMappingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RelationshipLinkFieldMappingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RelationshipLinkFieldMappingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
interaction_field_name: str,
relationship_field_name: str,
link_type: Optional[str] = None):
"""
The fields mapping for Relationships.
:param str interaction_field_name: The field name on the Interaction Type.
:param str relationship_field_name: The field name on the Relationship metadata.
:param str link_type: Link type.
"""
pulumi.set(__self__, "interaction_field_name", interaction_field_name)
pulumi.set(__self__, "relationship_field_name", relationship_field_name)
if link_type is not None:
pulumi.set(__self__, "link_type", link_type)
@property
@pulumi.getter(name="interactionFieldName")
def interaction_field_name(self) -> str:
"""
The field name on the Interaction Type.
"""
return pulumi.get(self, "interaction_field_name")
@property
@pulumi.getter(name="relationshipFieldName")
def relationship_field_name(self) -> str:
"""
The field name on the Relationship metadata.
"""
return pulumi.get(self, "relationship_field_name")
@property
@pulumi.getter(name="linkType")
def link_type(self) -> Optional[str]:
"""
Link type.
"""
return pulumi.get(self, "link_type")
@pulumi.output_type
class RelationshipTypeFieldMappingResponse(dict):
"""
Map a field of profile to its corresponding StrongId in Related Profile.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "profileFieldName":
suggest = "profile_field_name"
elif key == "relatedProfileKeyProperty":
suggest = "related_profile_key_property"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RelationshipTypeFieldMappingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RelationshipTypeFieldMappingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RelationshipTypeFieldMappingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
profile_field_name: str,
related_profile_key_property: str):
"""
Map a field of profile to its corresponding StrongId in Related Profile.
:param str profile_field_name: Specifies the fieldName in profile.
:param str related_profile_key_property: Specifies the KeyProperty (from StrongId) of the related profile.
"""
pulumi.set(__self__, "profile_field_name", profile_field_name)
pulumi.set(__self__, "related_profile_key_property", related_profile_key_property)
@property
@pulumi.getter(name="profileFieldName")
def profile_field_name(self) -> str:
"""
Specifies the fieldName in profile.
"""
return pulumi.get(self, "profile_field_name")
@property
@pulumi.getter(name="relatedProfileKeyProperty")
def related_profile_key_property(self) -> str:
"""
Specifies the KeyProperty (from StrongId) of the related profile.
"""
return pulumi.get(self, "related_profile_key_property")
@pulumi.output_type
class RelationshipTypeMappingResponse(dict):
"""
Maps fields in Profile to their corresponding StrongIds in Related Profile.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "fieldMappings":
suggest = "field_mappings"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RelationshipTypeMappingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RelationshipTypeMappingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RelationshipTypeMappingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
field_mappings: Sequence['outputs.RelationshipTypeFieldMappingResponse']):
"""
Maps fields in Profile to their corresponding StrongIds in Related Profile.
:param Sequence['RelationshipTypeFieldMappingResponse'] field_mappings: Maps a profile property with the StrongId of related profile. This is an array to support StrongIds that are composite key as well.
"""
pulumi.set(__self__, "field_mappings", field_mappings)
@property
@pulumi.getter(name="fieldMappings")
def field_mappings(self) -> Sequence['outputs.RelationshipTypeFieldMappingResponse']:
"""
Maps a profile property with the StrongId of related profile. This is an array to support StrongIds that are composite key as well.
"""
return pulumi.get(self, "field_mappings")
@pulumi.output_type
class ResourceSetDescriptionResponse(dict):
"""
The resource set description.
"""
def __init__(__self__, *,
elements: Optional[Sequence[str]] = None,
exceptions: Optional[Sequence[str]] = None):
"""
The resource set description.
:param Sequence[str] elements: The elements included in the set.
:param Sequence[str] exceptions: The elements that are not included in the set, in case elements contains '*' indicating 'all'.
"""
if elements is not None:
pulumi.set(__self__, "elements", elements)
if exceptions is not None:
pulumi.set(__self__, "exceptions", exceptions)
@property
@pulumi.getter
def elements(self) -> Optional[Sequence[str]]:
"""
The elements included in the set.
"""
return pulumi.get(self, "elements")
@property
@pulumi.getter
def exceptions(self) -> Optional[Sequence[str]]:
"""
The elements that are not included in the set, in case elements contains '*' indicating 'all'.
"""
return pulumi.get(self, "exceptions")
@pulumi.output_type
class StrongIdResponse(dict):
"""
Property/Properties which represent a unique ID.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "keyPropertyNames":
suggest = "key_property_names"
elif key == "strongIdName":
suggest = "strong_id_name"
elif key == "displayName":
suggest = "display_name"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StrongIdResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StrongIdResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StrongIdResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
key_property_names: Sequence[str],
strong_id_name: str,
description: Optional[Mapping[str, str]] = None,
display_name: Optional[Mapping[str, str]] = None):
"""
Property/Properties which represent a unique ID.
:param Sequence[str] key_property_names: The properties which make up the unique ID.
:param str strong_id_name: The Name identifying the strong ID.
:param Mapping[str, str] description: Localized descriptions.
:param Mapping[str, str] display_name: Localized display name.
"""
pulumi.set(__self__, "key_property_names", key_property_names)
pulumi.set(__self__, "strong_id_name", strong_id_name)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
@property
@pulumi.getter(name="keyPropertyNames")
def key_property_names(self) -> Sequence[str]:
"""
The properties which make up the unique ID.
"""
return pulumi.get(self, "key_property_names")
@property
@pulumi.getter(name="strongIdName")
def strong_id_name(self) -> str:
"""
The Name identifying the strong ID.
"""
return pulumi.get(self, "strong_id_name")
@property
@pulumi.getter
def description(self) -> Optional[Mapping[str, str]]:
"""
Localized descriptions.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[Mapping[str, str]]:
"""
Localized display name.
"""
return pulumi.get(self, "display_name")
@pulumi.output_type
class TypePropertiesMappingResponse(dict):
"""
Metadata for a Link's property mapping.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sourcePropertyName":
suggest = "source_property_name"
elif key == "targetPropertyName":
suggest = "target_property_name"
elif key == "linkType":
suggest = "link_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in TypePropertiesMappingResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
TypePropertiesMappingResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
TypePropertiesMappingResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
source_property_name: str,
target_property_name: str,
link_type: Optional[str] = None):
"""
Metadata for a Link's property mapping.
:param str source_property_name: Property name on the source Entity Type.
:param str target_property_name: Property name on the target Entity Type.
:param str link_type: Link type.
"""
pulumi.set(__self__, "source_property_name", source_property_name)
pulumi.set(__self__, "target_property_name", target_property_name)
if link_type is not None:
pulumi.set(__self__, "link_type", link_type)
@property
@pulumi.getter(name="sourcePropertyName")
def source_property_name(self) -> str:
"""
Property name on the source Entity Type.
"""
return pulumi.get(self, "source_property_name")
@property
@pulumi.getter(name="targetPropertyName")
def target_property_name(self) -> str:
"""
Property name on the target Entity Type.
"""
return pulumi.get(self, "target_property_name")
@property
@pulumi.getter(name="linkType")
def link_type(self) -> Optional[str]:
"""
Link type.
"""
return pulumi.get(self, "link_type")
|
py | 1a32b1d64d5769000be80a47743e98198a16f32b | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Aug 18 2017
@author: Alvaro Radigales
A simple Python implementation of the Lanchester Linear Law. Force
strength for each time pulse of the simulation is stored in a NumPy
array, and later plotted using MatPlotLib.
"""
import numpy
import matplotlib.pyplot as plot
from math import ceil
# The length of the time step will not alter the end result.
# Use only to determine the resolution of the graph.
timeStart = 0.0
timeEnd = 10.0
timeStep = 0.01
steps = int((timeEnd - timeStart) / timeStep)
# Initialise numpy arrays covering each step of the simulation.
blue = numpy.zeros(steps)
red = numpy.zeros(steps)
time = numpy.zeros(steps)
# To remove the frontage constraint, change the frontage variable to
# the smaller remaining force, both in its declaration and in the loop.
blue[0] = 42
red[0] = 30
frontage = 5
blueLethality = 1
redLethality = 1
time[0] = timeStart
for i in range(steps -1):
frontage = min(frontage, ceil(red[i]), ceil(blue[i]))
blue[i+1] = max(0, blue[i] - timeStep * (frontage * redLethality))
red[i+1] = max(0, red[i] - timeStep * (frontage * blueLethality))
time[i+1] = time[i] + timeStep
# Remaining forces at the end of the simulation, for plot label purposes.
blueRemaining = int(blue[len(blue)-1])
redRemaining = int(red[len(red)-1])
# Plot code.
plot.figure()
plot.step(time, blue, '-b', where = 'post', label = 'Blue army')
plot.step(time, red, '-r', where = 'post', label = 'Red army')
plot.ylabel('Strength')
plot.xlabel('Time')
plot.legend()
plot.annotate(blueRemaining,
xy=(timeEnd, blue[len(blue)-1]),
xytext=(-15,10),
textcoords='offset points')
plot.annotate(redRemaining,
xy=(timeEnd, red[len(red)-1]),
xytext=(-15,10),
textcoords='offset points')
plot.show() |
py | 1a32b1fcbb1c9940433d71d90e8c69b39988a486 | """Loguru utils"""
# List of files in `fairseq_cli` that use logging. Any files other than these
# attempting to use logging will have their logger with the same file name
# (i.e., `__name__`).
name_list = ["eval_lm", "generate", "hydra_train", "interactive", "preprocess",
"train", "validate"]
def loguru_name_patcher(record):
filename = record["file"].name # filename, e.g., `train.py`
name = ".".join(filename.split(".")[:-1]) # remove the ".py" part
if name in name_list:
name = f"fairseq_cli.{name}" # legacy name, e.g., `fairseq_cli.train`
record["extra"].update(name=name)
def loguru_reset_logger(logger):
"""Remove all handlers"""
handlers_count = logger._core.handlers
for _ in range(len(handlers_count)):
logger.remove()
class LoguruLevels:
TRACE = 5
DEBUG = 10
INFO = 20
SUCCESS = 25
WARNING = 30
ERROR = 40
CRITICAL = 50
def loguru_set_level(logger, level):
"""Set level of all handlers of the provided logger. Note that this
implementation is very non-standard. Avoid using in any case."""
for handler in logger._core.handlers.values():
handler._levelno = level
def get_effective_level(logger):
"""Get effective level of the logger by finding the smallest level among
all handlers."""
levels = []
for handler in logger._core.handlers.values():
levels.append(handler.levelno)
return min(levels)
def loguru_emit_some_handlers(logger, handler_ids, message, name,
level_id="INFO"):
"""Emit message using specific handlers while ignoring others. Currently
only supports for non-colorized messages.
Parameters
----------
logger : loguru._logger.Logger
Loguru logger.
handler_ids : list of int
List of handler IDs to deal with.
message : str
Message to emit.
name : str
Logger name.
level_id : str
Level name.
"""
from loguru._recattrs import RecordLevel
from loguru._datetime import aware_now
core = logger._core
level_name, level_no, _, level_icon = core.levels[level_id]
for handler_id in handler_ids:
handler = core.handlers[handler_id]
log_record = {
"message": message,
"level": RecordLevel(level_name, level_no, level_icon),
"exception": None,
"time": aware_now(),
"extra": {"name": name},
}
handler.emit(log_record, level_id, from_decorator=False, is_raw=False,
colored_message=None)
|
py | 1a32b42dad5832ff1fc3bf73796d36e5c11cc019 | import pytest
import fsspec
pytest.importorskip("distributed")
@pytest.fixture()
def cli(tmpdir):
import dask.distributed
client = dask.distributed.Client(n_workers=1)
def setup():
m = fsspec.filesystem("memory")
with m.open('afile', 'wb') as f:
f.write(b'data')
client.run(setup)
try:
yield client
finally:
client.close()
def test_basic(cli):
fs = fsspec.filesystem('dask', remote_protocol='memory')
assert fs.ls('') == ['afile']
assert fs.cat('afile') == b'data'
|
py | 1a32b52dd26333e774be5de99dec594b6cbc3684 | # Copyright (c) 2015-2016, 2018-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2015-2016 Ceridwen <[email protected]>
# Copyright (c) 2015 Florian Bruhin <[email protected]>
# Copyright (c) 2016 Derek Gustafson <[email protected]>
# Copyright (c) 2018 hippo91 <[email protected]>
# Copyright (c) 2018 Bryce Guinta <[email protected]>
# Copyright (c) 2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
# For details: https://github.com/PyCQA/astroid/blob/main/LICENSE
"""
Inference objects are a way to represent composite AST nodes,
which are used only as inference results, so they can't be found in the
original AST tree. For instance, inferring the following frozenset use,
leads to an inferred FrozenSet:
Call(func=Name('frozenset'), args=Tuple(...))
"""
from astroid import bases, decorators, node_classes, scoped_nodes, util
from astroid.const import BUILTINS
from astroid.exceptions import (
AttributeInferenceError,
InferenceError,
MroError,
SuperError,
)
from astroid.manager import AstroidManager
objectmodel = util.lazy_import("interpreter.objectmodel")
class FrozenSet(node_classes._BaseContainer):
"""class representing a FrozenSet composite node"""
def pytype(self):
return "%s.frozenset" % BUILTINS
def _infer(self, context=None):
yield self
@decorators.cachedproperty
def _proxied(self): # pylint: disable=method-hidden
ast_builtins = AstroidManager().builtins_module
return ast_builtins.getattr("frozenset")[0]
class Super(node_classes.NodeNG):
"""Proxy class over a super call.
This class offers almost the same behaviour as Python's super,
which is MRO lookups for retrieving attributes from the parents.
The *mro_pointer* is the place in the MRO from where we should
start looking, not counting it. *mro_type* is the object which
provides the MRO, it can be both a type or an instance.
*self_class* is the class where the super call is, while
*scope* is the function where the super call is.
"""
# pylint: disable=unnecessary-lambda
special_attributes = util.lazy_descriptor(lambda: objectmodel.SuperModel())
def __init__(self, mro_pointer, mro_type, self_class, scope):
self.type = mro_type
self.mro_pointer = mro_pointer
self._class_based = False
self._self_class = self_class
self._scope = scope
super().__init__()
def _infer(self, context=None):
yield self
def super_mro(self):
"""Get the MRO which will be used to lookup attributes in this super."""
if not isinstance(self.mro_pointer, scoped_nodes.ClassDef):
raise SuperError(
"The first argument to super must be a subtype of "
"type, not {mro_pointer}.",
super_=self,
)
if isinstance(self.type, scoped_nodes.ClassDef):
# `super(type, type)`, most likely in a class method.
self._class_based = True
mro_type = self.type
else:
mro_type = getattr(self.type, "_proxied", None)
if not isinstance(mro_type, (bases.Instance, scoped_nodes.ClassDef)):
raise SuperError(
"The second argument to super must be an "
"instance or subtype of type, not {type}.",
super_=self,
)
if not mro_type.newstyle:
raise SuperError("Unable to call super on old-style classes.", super_=self)
mro = mro_type.mro()
if self.mro_pointer not in mro:
raise SuperError(
"The second argument to super must be an "
"instance or subtype of type, not {type}.",
super_=self,
)
index = mro.index(self.mro_pointer)
return mro[index + 1 :]
@decorators.cachedproperty
def _proxied(self):
ast_builtins = AstroidManager().builtins_module
return ast_builtins.getattr("super")[0]
def pytype(self):
return "%s.super" % BUILTINS
def display_type(self):
return "Super of"
@property
def name(self):
"""Get the name of the MRO pointer."""
return self.mro_pointer.name
def qname(self):
return "super"
def igetattr(self, name, context=None):
"""Retrieve the inferred values of the given attribute name."""
if name in self.special_attributes:
yield self.special_attributes.lookup(name)
return
try:
mro = self.super_mro()
# Don't let invalid MROs or invalid super calls
# leak out as is from this function.
except SuperError as exc:
raise AttributeInferenceError(
(
"Lookup for {name} on {target!r} because super call {super!r} "
"is invalid."
),
target=self,
attribute=name,
context=context,
super_=exc.super_,
) from exc
except MroError as exc:
raise AttributeInferenceError(
(
"Lookup for {name} on {target!r} failed because {cls!r} has an "
"invalid MRO."
),
target=self,
attribute=name,
context=context,
mros=exc.mros,
cls=exc.cls,
) from exc
found = False
for cls in mro:
if name not in cls.locals:
continue
found = True
for inferred in bases._infer_stmts([cls[name]], context, frame=self):
if not isinstance(inferred, scoped_nodes.FunctionDef):
yield inferred
continue
# We can obtain different descriptors from a super depending
# on what we are accessing and where the super call is.
if inferred.type == "classmethod":
yield bases.BoundMethod(inferred, cls)
elif self._scope.type == "classmethod" and inferred.type == "method":
yield inferred
elif self._class_based or inferred.type == "staticmethod":
yield inferred
elif isinstance(inferred, Property):
function = inferred.function
try:
yield from function.infer_call_result(
caller=self, context=context
)
except InferenceError:
yield util.Uninferable
elif bases._is_property(inferred):
# TODO: support other descriptors as well.
try:
yield from inferred.infer_call_result(self, context)
except InferenceError:
yield util.Uninferable
else:
yield bases.BoundMethod(inferred, cls)
if not found:
raise AttributeInferenceError(target=self, attribute=name, context=context)
def getattr(self, name, context=None):
return list(self.igetattr(name, context=context))
class ExceptionInstance(bases.Instance):
"""Class for instances of exceptions
It has special treatment for some of the exceptions's attributes,
which are transformed at runtime into certain concrete objects, such as
the case of .args.
"""
@decorators.cachedproperty
def special_attributes(self):
qname = self.qname()
instance = objectmodel.BUILTIN_EXCEPTIONS.get(
qname, objectmodel.ExceptionInstanceModel
)
return instance()(self)
class DictInstance(bases.Instance):
"""Special kind of instances for dictionaries
This instance knows the underlying object model of the dictionaries, which means
that methods such as .values or .items can be properly inferred.
"""
# pylint: disable=unnecessary-lambda
special_attributes = util.lazy_descriptor(lambda: objectmodel.DictModel())
# Custom objects tailored for dictionaries, which are used to
# disambiguate between the types of Python 2 dict's method returns
# and Python 3 (where they return set like objects).
class DictItems(bases.Proxy):
__str__ = node_classes.NodeNG.__str__
__repr__ = node_classes.NodeNG.__repr__
class DictKeys(bases.Proxy):
__str__ = node_classes.NodeNG.__str__
__repr__ = node_classes.NodeNG.__repr__
class DictValues(bases.Proxy):
__str__ = node_classes.NodeNG.__str__
__repr__ = node_classes.NodeNG.__repr__
class PartialFunction(scoped_nodes.FunctionDef):
"""A class representing partial function obtained via functools.partial"""
def __init__(
self, call, name=None, doc=None, lineno=None, col_offset=None, parent=None
):
super().__init__(name, doc, lineno, col_offset, parent)
self.filled_positionals = len(call.positional_arguments[1:])
self.filled_args = call.positional_arguments[1:]
self.filled_keywords = call.keyword_arguments
def infer_call_result(self, caller=None, context=None):
if context:
current_passed_keywords = {
keyword for (keyword, _) in context.callcontext.keywords
}
for keyword, value in self.filled_keywords.items():
if keyword not in current_passed_keywords:
context.callcontext.keywords.append((keyword, value))
call_context_args = context.callcontext.args or []
context.callcontext.args = self.filled_args + call_context_args
return super().infer_call_result(caller=caller, context=context)
def qname(self):
return self.__class__.__name__
# TODO: Hack to solve the circular import problem between node_classes and objects
# This is not needed in 2.0, which has a cleaner design overall
node_classes.Dict.__bases__ = (node_classes.NodeNG, DictInstance)
class Property(scoped_nodes.FunctionDef):
"""Class representing a Python property"""
def __init__(
self, function, name=None, doc=None, lineno=None, col_offset=None, parent=None
):
self.function = function
super().__init__(name, doc, lineno, col_offset, parent)
# pylint: disable=unnecessary-lambda
special_attributes = util.lazy_descriptor(lambda: objectmodel.PropertyModel())
type = "property"
def pytype(self):
return "%s.property" % BUILTINS
def infer_call_result(self, caller=None, context=None):
raise InferenceError("Properties are not callable")
def infer(self, context=None, **kwargs):
return iter((self,))
|
py | 1a32b5a8fd12b569b9f297c385b75190207f093a | import time
import random
format = "R01G02T%02d%02d+%s+"
types = ["WT","DT","LT","PT", "XX"]
def slot(min, sec, label):
for m in range(min,-1, -1):
for s in range(sec,-1, -1):
print format % (m, s, label)
time.sleep(1)
while 1:
slot(0,30,"PT")
slot(0,30, "NF")
slot(0,30, "WT")
slot(0,30, "LT")
slot(0,30, "DT")
|
py | 1a32b671b2f4531a89f5c60ff6aeca97b940b93a | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, List, Optional, TypeVar
from msrest import Serializer
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
location: str,
publisher_name: str,
offer: str,
skus: str,
version: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"skus": _SERIALIZER.url("skus", skus, 'str'),
"version": _SERIALIZER.url("version", version, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_request(
location: str,
publisher_name: str,
offer: str,
skus: str,
subscription_id: str,
*,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"skus": _SERIALIZER.url("skus", skus, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
if expand is not None:
_query_parameters['$expand'] = _SERIALIZER.query("expand", expand, 'str')
if top is not None:
_query_parameters['$top'] = _SERIALIZER.query("top", top, 'int')
if orderby is not None:
_query_parameters['$orderby'] = _SERIALIZER.query("orderby", orderby, 'str')
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_offers_request(
location: str,
publisher_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_publishers_request(
location: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
def build_list_skus_request(
location: str,
publisher_name: str,
offer: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
accept = "application/json"
# Construct URL
_url = kwargs.pop("template_url", "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus") # pylint: disable=line-too-long
path_format_arguments = {
"location": _SERIALIZER.url("location", location, 'str'),
"publisherName": _SERIALIZER.url("publisher_name", publisher_name, 'str'),
"offer": _SERIALIZER.url("offer", offer, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
_query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
_header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
_header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=_url,
params=_query_parameters,
headers=_header_parameters,
**kwargs
)
class VirtualMachineImagesOperations(object):
"""VirtualMachineImagesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.compute.v2018_10_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
location: str,
publisher_name: str,
offer: str,
skus: str,
version: str,
**kwargs: Any
) -> "_models.VirtualMachineImage":
"""Gets a virtual machine image.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param version: A valid image SKU version.
:type version: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: VirtualMachineImage, or the result of cls(response)
:rtype: ~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImage
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.VirtualMachineImage"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
request = build_get_request(
location=location,
publisher_name=publisher_name,
offer=offer,
skus=skus,
version=version,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('VirtualMachineImage', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions/{version}"} # type: ignore
@distributed_trace
def list(
self,
location: str,
publisher_name: str,
offer: str,
skus: str,
expand: Optional[str] = None,
top: Optional[int] = None,
orderby: Optional[str] = None,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of all virtual machine image versions for the specified location, publisher, offer,
and SKU.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:param skus: A valid image SKU.
:type skus: str
:param expand: The expand expression to apply on the operation. Default value is None.
:type expand: str
:param top: Default value is None.
:type top: int
:param orderby: Default value is None.
:type orderby: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
request = build_list_request(
location=location,
publisher_name=publisher_name,
offer=offer,
skus=skus,
subscription_id=self._config.subscription_id,
api_version=api_version,
expand=expand,
top=top,
orderby=orderby,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus/{skus}/versions"} # type: ignore
@distributed_trace
def list_offers(
self,
location: str,
publisher_name: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image offers for the specified location and publisher.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
request = build_list_offers_request(
location=location,
publisher_name=publisher_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_offers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_offers.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers"} # type: ignore
@distributed_trace
def list_publishers(
self,
location: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image publishers for the specified Azure location.
:param location: The name of a supported Azure region.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
request = build_list_publishers_request(
location=location,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_publishers.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_publishers.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers"} # type: ignore
@distributed_trace
def list_skus(
self,
location: str,
publisher_name: str,
offer: str,
**kwargs: Any
) -> List["_models.VirtualMachineImageResource"]:
"""Gets a list of virtual machine image SKUs for the specified location, publisher, and offer.
:param location: The name of a supported Azure region.
:type location: str
:param publisher_name: A valid image publisher.
:type publisher_name: str
:param offer: A valid image publisher offer.
:type offer: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: list of VirtualMachineImageResource, or the result of cls(response)
:rtype: list[~azure.mgmt.compute.v2018_10_01.models.VirtualMachineImageResource]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[List["_models.VirtualMachineImageResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = kwargs.pop('api_version', "2018-10-01") # type: str
request = build_list_skus_request(
location=location,
publisher_name=publisher_name,
offer=offer,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_skus.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run( # pylint: disable=protected-access
request,
stream=False,
**kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('[VirtualMachineImageResource]', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list_skus.metadata = {'url': "/subscriptions/{subscriptionId}/providers/Microsoft.Compute/locations/{location}/publishers/{publisherName}/artifacttypes/vmimage/offers/{offer}/skus"} # type: ignore
|
py | 1a32b6f71ddcf87bacce11b337f4f6b09170a71f | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RNp(RPackage):
"""This package provides a variety of nonparametric (and semiparametric)
kernel methods that seamlessly handle a mix of continuous, unordered, and
ordered factor data types. We would like to gratefully acknowledge support
from the Natural Sciences and Engineering Research Council of Canada
(NSERC:www.nserc.ca), the Social Sciences and Humanities Research Council
of Canada (SSHRC:www.sshrc.ca), and the Shared Hierarchical Academic
Research Computing Network (SHARCNET:www.sharcnet.ca)."""
homepage = "https://github.com/JeffreyRacine/R-Package-np/"
url = "https://cran.r-project.org/src/contrib/np_0.60-2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/np"
version('0.60-2', 'e094d52ddff7280272b41e6cb2c74389')
depends_on('r-boot', type=('build', 'run'))
depends_on('r-cubature', type=('build', 'run'))
|
py | 1a32b74d8195f15f1dfd0539520f9a05fabf5eff | import pytest
from mock import Mock, patch
from service import get_maps
@patch('service.map_service_common.get_all_maps')
def test_get_maps(get_all_maps_mock):
get_all_maps_mock.return_value = {}
response = get_maps.lambda_handler({}, None)
valid_response = {'statusCode': 200, 'body': '{"Maps": {}}', 'headers': {'Access-Control-Allow-Origin': '*'}}
get_all_maps_mock.assert_called()
assert response == valid_response
@patch('service.map_service_common.get_all_maps')
def test_get_maps_dynamo_failure(get_all_maps_mock):
with pytest.raises(IOError):
get_all_maps_mock.side_effect = Mock(side_effect=IOError('Dynamo Exception'))
get_maps.lambda_handler({}, None)
|
py | 1a32b761f54543810989b90cc761016884577745 | import os
import json
import random
META = "../../important_data/"
N = 10000
def load_metadata(filename):
print("Start reading " + filename)
with open(os.path.join(META, filename)) as f:
data = json.load(f)
return data
def remove_version_ending(arxiv_id):
return arxiv_id.rsplit("v", 1)[0]
def format_name_to_id(name):
return ''.join(name.split()).lower()
def path_to_id(name):
""" Convert filepath name of ArXiv file to ArXiv ID """
if '.' in name: # new ID
return name
split = name.split("_")
return "/".join(split)
valid_aids = set(list(map(remove_version_ending, load_metadata("sampled_aids_100k.json"))))
arxiv_metadata = load_metadata("arxiv_id_to_doi_title.json")
# Get ground truth
print("Get Ground truth")
# Filter mag data to only include ID from the sampled 100k and which have an DOI
mag_gt_overall = {key: value for key, value in load_metadata("aid_to_ref_magids.json").items()
if ((key in valid_aids) and (arxiv_metadata[key][0]))}
# Add empty references
for key in load_metadata("aids_without_ref.json"):
if (key in valid_aids) and (arxiv_metadata[key][0]):
mag_gt_overall[key] = []
# Sample 10k
random.seed("Random Seed for similar results upon rerunning the code")
gt_keys = set(random.sample(list(mag_gt_overall.keys()), N))
# Build sampled_gt
mag_data = load_metadata("magid_to_data.json")
# Get (title,arxiv_id) for all references entries by MAG data
gt_aid_to_data = {key: [(mag_data[ref_mag_id][2], mag_data[ref_mag_id][3]) for ref_mag_id in mag_gt_overall[key]]
for key in gt_keys}
gt_len = len(gt_aid_to_data)
gt_overall_references = sum([len(value) for key, value in gt_aid_to_data.items()])
# Get Bierbaum Work
print("Get Bierbaum data")
bierbaum_overall = load_metadata("merged_internal-citations.json")
bierbaum_compare_with_version = {key: value for key, value in bierbaum_overall.items() if
remove_version_ending(key) in gt_keys}
bierbaum_compare = {}
for key, value in bierbaum_compare_with_version.items():
aid_without_version = remove_version_ending(key)
arxiv_citations_without_version = [remove_version_ending(tmp_aid) for tmp_aid in value]
bierbaum_compare[aid_without_version] = arxiv_citations_without_version
# Compare Bierbaum Work
print("Compare Bierbaum data")
bb_len = len(bierbaum_compare)
bb_overall_references = sum([len(value) for key, value in bierbaum_compare.items()])
bb_hit = 0
bb_miss = 0
bb_self = 0
# Compare based only arxiv IDs as bierbaum's work only has arxiv IDs
arxiv_metadata_keys = set(arxiv_metadata.keys())
for arxiv_id, references in bierbaum_compare.items():
# Get values of Ground truth
gt_references = gt_aid_to_data[arxiv_id]
gt_arxiv_ids = set([data[1] for data in gt_references if data[1]])
gt_titles = set([format_name_to_id(data[0]) for data in gt_references])
# Check compliance
for ref_aid in references:
# Skip in case of self reference
if ref_aid == arxiv_id:
bb_self += 1
continue
if ref_aid in gt_arxiv_ids:
bb_hit += 1
continue
if (ref_aid in arxiv_metadata_keys) and (format_name_to_id(arxiv_metadata[ref_aid][1]) in gt_titles):
bb_hit += 1
continue
# Unable to match the reference found by bierbaum to a reference in the ground truth
bb_miss += 1
# Compare parser results
print("Get Parser data")
parsed_100k = load_metadata("parsed_sampled_100k.json")
parsed_sampled_without_version = {}
for key, p_references in parsed_100k.items():
# Remove version
aid_no_version = remove_version_ending(key)
# Make _ to / as its coming from file names
aid_fixed = path_to_id(aid_no_version)
if aid_fixed in gt_keys:
parsed_sampled_without_version[aid_fixed] = [format_name_to_id(ref["title"]) for ref in p_references]
print("Compare Parser data")
p_len = len(parsed_sampled_without_version)
p_overall_references = sum([len(value) for key, value in parsed_sampled_without_version.items()])
p_hit = 0
p_miss = 0
p_self = 0
# Compare based only arxiv IDs as bierbaum's work only has arxiv IDs
for arxiv_id, references in parsed_sampled_without_version.items():
# Get values of Ground truth
gt_references = gt_aid_to_data[arxiv_id]
gt_titles = set([format_name_to_id(data[0]) for data in gt_references])
# Check compliance
for ref_title in references:
# Skip in case of self reference
if (arxiv_id in arxiv_metadata_keys) and (ref_title == format_name_to_id(arxiv_metadata[arxiv_id][1])):
p_self += 1
continue
if ref_title in gt_titles:
p_hit += 1
continue
# Unable to match the reference found by bierbaum to a reference in the ground truth
p_miss += 1
print("\n[Ground Truth (GT) MAG] Entries: {}; Overall references {}".format(gt_len, gt_overall_references))
print(("[Bierbaum] Entries: {} (%-of-GT: {:.2%}); Overall references {} (%-of-GT: {:.2%}); " +
"Found {} of GT references ({:.2%}). Found {} references not in GT (%-of-Bierbaum-Refs: {:.2%}). Self-references: {}").format(
bb_len, bb_len / gt_len, bb_overall_references, bb_overall_references / gt_overall_references,
bb_hit, bb_hit / gt_overall_references, bb_miss, bb_miss / bb_overall_references, bb_self)
)
print(("[Parser] Entries: {} (%-of-GT: {:.2%}); Overall references {} (%-of-GT: {:.2%}); " +
"Found {} of GT references ({:.2%}). Found {} references not in GT (%-of-Parser-Refs: {:.2%}). Self-references: {}").format(
p_len, p_len / gt_len, p_overall_references, p_overall_references / gt_overall_references,
p_hit, p_hit / gt_overall_references, p_miss, p_miss / p_overall_references, p_self)
)
|
py | 1a32b7c6c59975846de1a35c9d2df8c8d7fdbe06 | # mininode.py - Bitcoin P2P network half-a-node
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# This python code was modified from ArtForz' public domain half-a-node, as
# found in the mini-node branch of http://github.com/jgarzik/pynode.
#
# NodeConn: an object which manages p2p connectivity to a bitcoin node
# NodeConnCB: a base class that describes the interface for receiving
# callbacks with network messages from a NodeConn
# CBlock, CTransaction, CBlockHeader, CTxIn, CTxOut, etc....:
# data structures that should map to corresponding structures in
# bitcoin/primitives
# msg_block, msg_tx, msg_headers, etc.:
# data structures that represent network messages
# ser_*, deser_*: functions that handle serialization/deserialization
import struct
import socket
import asyncore
import binascii
import time
import sys
import random
import cStringIO
import hashlib
from threading import RLock
from threading import Thread
import logging
import copy
from pyblake2 import blake2b
from .equihash import (
gbp_basic,
gbp_validate,
hash_nonce,
vcoin_person,
)
OVERWINTER_PROTO_VERSION = 170003
BIP0031_VERSION = 60000
SPROUT_PROTO_VERSION = 170002 # past bip-31 for ping/pong
SAPLING_PROTO_VERSION = 170006
MY_SUBVERSION = "/python-mininode-tester:0.0.1/"
OVERWINTER_VERSION_GROUP_ID = 0x03C48270
MAX_INV_SZ = 50000
COIN = 100000000 # 1 zel in zatoshis
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
# NodeConn acquires this lock whenever delivering a message to to a NodeConnCB,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
# access to any data shared with the NodeConnCB or NodeConn.
mininode_lock = RLock()
# Serialization/deserialization tools
def sha256(s):
return hashlib.new('sha256', s).digest()
def hash256(s):
return sha256(sha256(s))
def deser_string(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
return f.read(nit)
def ser_string(s):
if len(s) < 253:
return chr(len(s)) + s
elif len(s) < 0x10000:
return chr(253) + struct.pack("<H", len(s)) + s
elif len(s) < 0x100000000L:
return chr(254) + struct.pack("<I", len(s)) + s
return chr(255) + struct.pack("<Q", len(s)) + s
def deser_uint256(f):
r = 0L
for i in xrange(8):
t = struct.unpack("<I", f.read(4))[0]
r += t << (i * 32)
return r
def ser_uint256(u):
rs = ""
for i in xrange(8):
rs += struct.pack("<I", u & 0xFFFFFFFFL)
u >>= 32
return rs
def uint256_from_str(s):
r = 0L
t = struct.unpack("<IIIIIIII", s[:32])
for i in xrange(8):
r += t[i] << (i * 32)
return r
def uint256_from_compact(c):
nbytes = (c >> 24) & 0xFF
v = (c & 0xFFFFFFL) << (8 * (nbytes - 3))
return v
def deser_vector(f, c):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = c()
t.deserialize(f)
r.append(t)
return r
def ser_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += i.serialize()
return r
def deser_uint256_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_uint256(f)
r.append(t)
return r
def ser_uint256_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += ser_uint256(i)
return r
def deser_string_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = deser_string(f)
r.append(t)
return r
def ser_string_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for sv in l:
r += ser_string(sv)
return r
def deser_int_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<i", f.read(4))[0]
r.append(t)
return r
def ser_int_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += struct.pack("<i", i)
return r
def deser_char_vector(f):
nit = struct.unpack("<B", f.read(1))[0]
if nit == 253:
nit = struct.unpack("<H", f.read(2))[0]
elif nit == 254:
nit = struct.unpack("<I", f.read(4))[0]
elif nit == 255:
nit = struct.unpack("<Q", f.read(8))[0]
r = []
for i in xrange(nit):
t = struct.unpack("<B", f.read(1))[0]
r.append(t)
return r
def ser_char_vector(l):
r = ""
if len(l) < 253:
r = chr(len(l))
elif len(l) < 0x10000:
r = chr(253) + struct.pack("<H", len(l))
elif len(l) < 0x100000000L:
r = chr(254) + struct.pack("<I", len(l))
else:
r = chr(255) + struct.pack("<Q", len(l))
for i in l:
r += chr(i)
return r
# Objects that map to bitcoind objects, which can be serialized/deserialized
class CAddress(object):
def __init__(self):
self.nServices = 1
self.pchReserved = "\x00" * 10 + "\xff" * 2
self.ip = "0.0.0.0"
self.port = 0
def deserialize(self, f):
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.pchReserved = f.read(12)
self.ip = socket.inet_ntoa(f.read(4))
self.port = struct.unpack(">H", f.read(2))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nServices)
r += self.pchReserved
r += socket.inet_aton(self.ip)
r += struct.pack(">H", self.port)
return r
def __repr__(self):
return "CAddress(nServices=%i ip=%s port=%i)" % (self.nServices,
self.ip, self.port)
class CInv(object):
typemap = {
0: "Error",
1: "TX",
2: "Block"}
def __init__(self, t=0, h=0L):
self.type = t
self.hash = h
def deserialize(self, f):
self.type = struct.unpack("<i", f.read(4))[0]
self.hash = deser_uint256(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.type)
r += ser_uint256(self.hash)
return r
def __repr__(self):
return "CInv(type=%s hash=%064x)" \
% (self.typemap[self.type], self.hash)
class CBlockLocator(object):
def __init__(self):
self.nVersion = SPROUT_PROTO_VERSION
self.vHave = []
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.vHave = deser_uint256_vector(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256_vector(self.vHave)
return r
def __repr__(self):
return "CBlockLocator(nVersion=%i vHave=%s)" \
% (self.nVersion, repr(self.vHave))
G1_PREFIX_MASK = 0x02
G2_PREFIX_MASK = 0x0a
class ZCProof(object):
def __init__(self):
self.g_A = None
self.g_A_prime = None
self.g_B = None
self.g_B_prime = None
self.g_C = None
self.g_C_prime = None
self.g_K = None
self.g_H = None
def deserialize(self, f):
def deser_g1(self, f):
leadingByte = struct.unpack("<B", f.read(1))[0]
return {
'y_lsb': leadingByte & 1,
'x': f.read(32),
}
def deser_g2(self, f):
leadingByte = struct.unpack("<B", f.read(1))[0]
return {
'y_gt': leadingByte & 1,
'x': f.read(64),
}
self.g_A = deser_g1(f)
self.g_A_prime = deser_g1(f)
self.g_B = deser_g2(f)
self.g_B_prime = deser_g1(f)
self.g_C = deser_g1(f)
self.g_C_prime = deser_g1(f)
self.g_K = deser_g1(f)
self.g_H = deser_g1(f)
def serialize(self):
def ser_g1(self, p):
return chr(G1_PREFIX_MASK | p['y_lsb']) + p['x']
def ser_g2(self, p):
return chr(G2_PREFIX_MASK | p['y_gt']) + p['x']
r = ""
r += ser_g1(self.g_A)
r += ser_g1(self.g_A_prime)
r += ser_g2(self.g_B)
r += ser_g1(self.g_B_prime)
r += ser_g1(self.g_C)
r += ser_g1(self.g_C_prime)
r += ser_g1(self.g_K)
r += ser_g1(self.g_H)
return r
def __repr__(self):
return "ZCProof(g_A=%s g_A_prime=%s g_B=%s g_B_prime=%s g_C=%s g_C_prime=%s g_K=%s g_H=%s)" \
% (repr(self.g_A), repr(self.g_A_prime),
repr(self.g_B), repr(self.g_B_prime),
repr(self.g_C), repr(self.g_C_prime),
repr(self.g_K), repr(self.g_H))
ZC_NUM_JS_INPUTS = 2
ZC_NUM_JS_OUTPUTS = 2
ZC_NOTEPLAINTEXT_LEADING = 1
ZC_V_SIZE = 8
ZC_RHO_SIZE = 32
ZC_R_SIZE = 32
ZC_MEMO_SIZE = 512
ZC_NOTEPLAINTEXT_SIZE = (
ZC_NOTEPLAINTEXT_LEADING +
ZC_V_SIZE +
ZC_RHO_SIZE +
ZC_R_SIZE +
ZC_MEMO_SIZE
)
NOTEENCRYPTION_AUTH_BYTES = 16
ZC_NOTECIPHERTEXT_SIZE = (
ZC_NOTEPLAINTEXT_SIZE +
NOTEENCRYPTION_AUTH_BYTES
)
class JSDescription(object):
def __init__(self):
self.vpub_old = 0
self.vpub_new = 0
self.anchor = 0
self.nullifiers = [0] * ZC_NUM_JS_INPUTS
self.commitments = [0] * ZC_NUM_JS_OUTPUTS
self.onetimePubKey = 0
self.randomSeed = 0
self.macs = [0] * ZC_NUM_JS_INPUTS
self.proof = None
self.ciphertexts = [None] * ZC_NUM_JS_OUTPUTS
def deserialize(self, f):
self.vpub_old = struct.unpack("<q", f.read(8))[0]
self.vpub_new = struct.unpack("<q", f.read(8))[0]
self.anchor = deser_uint256(f)
self.nullifiers = []
for i in range(ZC_NUM_JS_INPUTS):
self.nullifiers.append(deser_uint256(f))
self.commitments = []
for i in range(ZC_NUM_JS_OUTPUTS):
self.commitments.append(deser_uint256(f))
self.onetimePubKey = deser_uint256(f)
self.randomSeed = deser_uint256(f)
self.macs = []
for i in range(ZC_NUM_JS_INPUTS):
self.macs.append(deser_uint256(f))
self.proof = ZCProof()
self.proof.deserialize(f)
self.ciphertexts = []
for i in range(ZC_NUM_JS_OUTPUTS):
self.ciphertexts.append(f.read(ZC_NOTECIPHERTEXT_SIZE))
def serialize(self):
r = ""
r += struct.pack("<q", self.vpub_old)
r += struct.pack("<q", self.vpub_new)
r += ser_uint256(self.anchor)
for i in range(ZC_NUM_JS_INPUTS):
r += ser_uint256(self.nullifiers[i])
for i in range(ZC_NUM_JS_OUTPUTS):
r += ser_uint256(self.commitments[i])
r += ser_uint256(self.onetimePubKey)
r += ser_uint256(self.randomSeed)
for i in range(ZC_NUM_JS_INPUTS):
r += ser_uint256(self.macs[i])
r += self.proof.serialize()
for i in range(ZC_NUM_JS_OUTPUTS):
r += ser_uint256(self.ciphertexts[i])
return r
def __repr__(self):
return "JSDescription(vpub_old=%i.%08i vpub_new=%i.%08i anchor=%064x onetimePubKey=%064x randomSeed=%064x proof=%s)" \
% (self.vpub_old, self.vpub_new, self.anchor,
self.onetimePubKey, self.randomSeed, repr(self.proof))
class COutPoint(object):
def __init__(self, hash=0, n=0):
self.hash = hash
self.n = n
def deserialize(self, f):
self.hash = deser_uint256(f)
self.n = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += ser_uint256(self.hash)
r += struct.pack("<I", self.n)
return r
def __repr__(self):
return "COutPoint(hash=%064x n=%i)" % (self.hash, self.n)
class CTxIn(object):
def __init__(self, outpoint=None, scriptSig="", nSequence=0):
if outpoint is None:
self.prevout = COutPoint()
else:
self.prevout = outpoint
self.scriptSig = scriptSig
self.nSequence = nSequence
def deserialize(self, f):
self.prevout = COutPoint()
self.prevout.deserialize(f)
self.scriptSig = deser_string(f)
self.nSequence = struct.unpack("<I", f.read(4))[0]
def serialize(self):
r = ""
r += self.prevout.serialize()
r += ser_string(self.scriptSig)
r += struct.pack("<I", self.nSequence)
return r
def __repr__(self):
return "CTxIn(prevout=%s scriptSig=%s nSequence=%i)" \
% (repr(self.prevout), binascii.hexlify(self.scriptSig),
self.nSequence)
class CTxOut(object):
def __init__(self, nValue=0, scriptPubKey=""):
self.nValue = nValue
self.scriptPubKey = scriptPubKey
def deserialize(self, f):
self.nValue = struct.unpack("<q", f.read(8))[0]
self.scriptPubKey = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<q", self.nValue)
r += ser_string(self.scriptPubKey)
return r
def __repr__(self):
return "CTxOut(nValue=%i.%08i scriptPubKey=%s)" \
% (self.nValue // 100000000, self.nValue % 100000000,
binascii.hexlify(self.scriptPubKey))
class CTransaction(object):
def __init__(self, tx=None):
if tx is None:
self.fOverwintered = False
self.nVersion = 1
self.nVersionGroupId = 0
self.vin = []
self.vout = []
self.nLockTime = 0
self.nExpiryHeight = 0
self.vJoinSplit = []
self.joinSplitPubKey = None
self.joinSplitSig = None
self.sha256 = None
self.hash = None
else:
self.fOverwintered = tx.fOverwintered
self.nVersion = tx.nVersion
self.nVersionGroupId = tx.nVersionGroupId
self.vin = copy.deepcopy(tx.vin)
self.vout = copy.deepcopy(tx.vout)
self.nLockTime = tx.nLockTime
self.nExpiryHeight = tx.nExpiryHeight
self.vJoinSplit = copy.deepcopy(tx.vJoinSplit)
self.joinSplitPubKey = tx.joinSplitPubKey
self.joinSplitSig = tx.joinSplitSig
self.sha256 = None
self.hash = None
def deserialize(self, f):
header = struct.unpack("<I", f.read(4))[0]
self.fOverwintered = bool(header >> 31)
self.nVersion = header & 0x7FFFFFFF
self.nVersionGroupId = (struct.unpack("<I", f.read(4))[0]
if self.fOverwintered else 0)
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
self.vin = deser_vector(f, CTxIn)
self.vout = deser_vector(f, CTxOut)
self.nLockTime = struct.unpack("<I", f.read(4))[0]
if isOverwinterV3:
self.nExpiryHeight = struct.unpack("<I", f.read(4))[0]
if self.nVersion >= 2:
self.vJoinSplit = deser_vector(f, JSDescription)
if len(self.vJoinSplit) > 0:
self.joinSplitPubKey = deser_uint256(f)
self.joinSplitSig = f.read(64)
self.sha256 = None
self.hash = None
def serialize(self):
header = (int(self.fOverwintered)<<31) | self.nVersion
isOverwinterV3 = (self.fOverwintered and
self.nVersionGroupId == OVERWINTER_VERSION_GROUP_ID and
self.nVersion == 3)
r = ""
r += struct.pack("<I", header)
if self.fOverwintered:
r += struct.pack("<I", self.nVersionGroupId)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
r += struct.pack("<I", self.nLockTime)
if isOverwinterV3:
r += struct.pack("<I", self.nExpiryHeight)
if self.nVersion >= 2:
r += ser_vector(self.vJoinSplit)
if len(self.vJoinSplit) > 0:
r += ser_uint256(self.joinSplitPubKey)
r += self.joinSplitSig
return r
def rehash(self):
self.sha256 = None
self.calc_sha256()
def calc_sha256(self):
if self.sha256 is None:
self.sha256 = uint256_from_str(hash256(self.serialize()))
self.hash = hash256(self.serialize())[::-1].encode('hex_codec')
def is_valid(self):
self.calc_sha256()
for tout in self.vout:
if tout.nValue < 0 or tout.nValue > 21000000L * 100000000L:
return False
return True
def __repr__(self):
r = ("CTransaction(fOverwintered=%r nVersion=%i nVersionGroupId=0x%08x "
"vin=%s vout=%s nLockTime=%i nExpiryHeight=%i"
% (self.fOverwintered, self.nVersion, self.nVersionGroupId,
repr(self.vin), repr(self.vout), self.nLockTime, self.nExpiryHeight))
if self.nVersion >= 2:
r += " vJoinSplit=%s" % repr(self.vJoinSplit)
if len(self.vJoinSplit) > 0:
r += " joinSplitPubKey=%064x joinSplitSig=%064x" \
(self.joinSplitPubKey, self.joinSplitSig)
r += ")"
return r
class CBlockHeader(object):
def __init__(self, header=None):
if header is None:
self.set_null()
else:
self.nVersion = header.nVersion
self.hashPrevBlock = header.hashPrevBlock
self.hashMerkleRoot = header.hashMerkleRoot
self.hashReserved = header.hashReserved
self.nTime = header.nTime
self.nBits = header.nBits
self.nNonce = header.nNonce
self.nSolution = header.nSolution
self.sha256 = header.sha256
self.hash = header.hash
self.calc_sha256()
def set_null(self):
self.nVersion = 4
self.hashPrevBlock = 0
self.hashMerkleRoot = 0
self.hashReserved = 0
self.nTime = 0
self.nBits = 0
self.nNonce = 0
self.nSolution = []
self.sha256 = None
self.hash = None
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.hashPrevBlock = deser_uint256(f)
self.hashMerkleRoot = deser_uint256(f)
self.hashReserved = deser_uint256(f)
self.nTime = struct.unpack("<I", f.read(4))[0]
self.nBits = struct.unpack("<I", f.read(4))[0]
self.nNonce = deser_uint256(f)
self.nSolution = deser_char_vector(f)
self.sha256 = None
self.hash = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += ser_uint256(self.hashReserved)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += ser_uint256(self.nNonce)
r += ser_char_vector(self.nSolution)
return r
def calc_sha256(self):
if self.sha256 is None:
r = ""
r += struct.pack("<i", self.nVersion)
r += ser_uint256(self.hashPrevBlock)
r += ser_uint256(self.hashMerkleRoot)
r += ser_uint256(self.hashReserved)
r += struct.pack("<I", self.nTime)
r += struct.pack("<I", self.nBits)
r += ser_uint256(self.nNonce)
r += ser_char_vector(self.nSolution)
self.sha256 = uint256_from_str(hash256(r))
self.hash = hash256(r)[::-1].encode('hex_codec')
def rehash(self):
self.sha256 = None
self.calc_sha256()
return self.sha256
def __repr__(self):
return "CBlockHeader(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashReserved=%064x nTime=%s nBits=%08x nNonce=%064x nSolution=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot, self.hashReserved,
time.ctime(self.nTime), self.nBits, self.nNonce, repr(self.nSolution))
class CBlock(CBlockHeader):
def __init__(self, header=None):
super(CBlock, self).__init__(header)
self.vtx = []
def deserialize(self, f):
super(CBlock, self).deserialize(f)
self.vtx = deser_vector(f, CTransaction)
def serialize(self):
r = ""
r += super(CBlock, self).serialize()
r += ser_vector(self.vtx)
return r
def calc_merkle_root(self):
hashes = []
for tx in self.vtx:
tx.calc_sha256()
hashes.append(ser_uint256(tx.sha256))
while len(hashes) > 1:
newhashes = []
for i in xrange(0, len(hashes), 2):
i2 = min(i+1, len(hashes)-1)
newhashes.append(hash256(hashes[i] + hashes[i2]))
hashes = newhashes
return uint256_from_str(hashes[0])
def is_valid(self, n=48, k=5):
# H(I||...
digest = blake2b(digest_size=(512/n)*n/8, person=vcoin_person(n, k))
digest.update(super(CBlock, self).serialize()[:108])
hash_nonce(digest, self.nNonce)
if not gbp_validate(self.nSolution, digest, n, k):
return False
self.calc_sha256()
target = uint256_from_compact(self.nBits)
if self.sha256 > target:
return False
for tx in self.vtx:
if not tx.is_valid():
return False
if self.calc_merkle_root() != self.hashMerkleRoot:
return False
return True
def solve(self, n=48, k=5):
target = uint256_from_compact(self.nBits)
# H(I||...
digest = blake2b(digest_size=(512/n)*n/8, person=vcoin_person(n, k))
digest.update(super(CBlock, self).serialize()[:108])
self.nNonce = 0
while True:
# H(I||V||...
curr_digest = digest.copy()
hash_nonce(curr_digest, self.nNonce)
# (x_1, x_2, ...) = A(I, V, n, k)
solns = gbp_basic(curr_digest, n, k)
for soln in solns:
assert(gbp_validate(curr_digest, soln, n, k))
self.nSolution = soln
self.rehash()
if self.sha256 <= target:
return
self.nNonce += 1
def __repr__(self):
return "CBlock(nVersion=%i hashPrevBlock=%064x hashMerkleRoot=%064x hashReserved=%064x nTime=%s nBits=%08x nNonce=%064x nSolution=%s vtx=%s)" \
% (self.nVersion, self.hashPrevBlock, self.hashMerkleRoot,
self.hashReserved, time.ctime(self.nTime), self.nBits,
self.nNonce, repr(self.nSolution), repr(self.vtx))
class CUnsignedAlert(object):
def __init__(self):
self.nVersion = 1
self.nRelayUntil = 0
self.nExpiration = 0
self.nID = 0
self.nCancel = 0
self.setCancel = []
self.nMinVer = 0
self.nMaxVer = 0
self.setSubVer = []
self.nPriority = 0
self.strComment = ""
self.strStatusBar = ""
self.strReserved = ""
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
self.nRelayUntil = struct.unpack("<q", f.read(8))[0]
self.nExpiration = struct.unpack("<q", f.read(8))[0]
self.nID = struct.unpack("<i", f.read(4))[0]
self.nCancel = struct.unpack("<i", f.read(4))[0]
self.setCancel = deser_int_vector(f)
self.nMinVer = struct.unpack("<i", f.read(4))[0]
self.nMaxVer = struct.unpack("<i", f.read(4))[0]
self.setSubVer = deser_string_vector(f)
self.nPriority = struct.unpack("<i", f.read(4))[0]
self.strComment = deser_string(f)
self.strStatusBar = deser_string(f)
self.strReserved = deser_string(f)
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<q", self.nRelayUntil)
r += struct.pack("<q", self.nExpiration)
r += struct.pack("<i", self.nID)
r += struct.pack("<i", self.nCancel)
r += ser_int_vector(self.setCancel)
r += struct.pack("<i", self.nMinVer)
r += struct.pack("<i", self.nMaxVer)
r += ser_string_vector(self.setSubVer)
r += struct.pack("<i", self.nPriority)
r += ser_string(self.strComment)
r += ser_string(self.strStatusBar)
r += ser_string(self.strReserved)
return r
def __repr__(self):
return "CUnsignedAlert(nVersion %d, nRelayUntil %d, nExpiration %d, nID %d, nCancel %d, nMinVer %d, nMaxVer %d, nPriority %d, strComment %s, strStatusBar %s, strReserved %s)" \
% (self.nVersion, self.nRelayUntil, self.nExpiration, self.nID,
self.nCancel, self.nMinVer, self.nMaxVer, self.nPriority,
self.strComment, self.strStatusBar, self.strReserved)
class CAlert(object):
def __init__(self):
self.vchMsg = ""
self.vchSig = ""
def deserialize(self, f):
self.vchMsg = deser_string(f)
self.vchSig = deser_string(f)
def serialize(self):
r = ""
r += ser_string(self.vchMsg)
r += ser_string(self.vchSig)
return r
def __repr__(self):
return "CAlert(vchMsg.sz %d, vchSig.sz %d)" \
% (len(self.vchMsg), len(self.vchSig))
# Objects that correspond to messages on the wire
class msg_version(object):
command = "version"
def __init__(self, protocol_version=SPROUT_PROTO_VERSION):
self.nVersion = protocol_version
self.nServices = 1
self.nTime = time.time()
self.addrTo = CAddress()
self.addrFrom = CAddress()
self.nNonce = random.getrandbits(64)
self.strSubVer = MY_SUBVERSION
self.nStartingHeight = -1
def deserialize(self, f):
self.nVersion = struct.unpack("<i", f.read(4))[0]
if self.nVersion == 10300:
self.nVersion = 300
self.nServices = struct.unpack("<Q", f.read(8))[0]
self.nTime = struct.unpack("<q", f.read(8))[0]
self.addrTo = CAddress()
self.addrTo.deserialize(f)
if self.nVersion >= 106:
self.addrFrom = CAddress()
self.addrFrom.deserialize(f)
self.nNonce = struct.unpack("<Q", f.read(8))[0]
self.strSubVer = deser_string(f)
if self.nVersion >= 209:
self.nStartingHeight = struct.unpack("<i", f.read(4))[0]
else:
self.nStartingHeight = None
else:
self.addrFrom = None
self.nNonce = None
self.strSubVer = None
self.nStartingHeight = None
def serialize(self):
r = ""
r += struct.pack("<i", self.nVersion)
r += struct.pack("<Q", self.nServices)
r += struct.pack("<q", self.nTime)
r += self.addrTo.serialize()
r += self.addrFrom.serialize()
r += struct.pack("<Q", self.nNonce)
r += ser_string(self.strSubVer)
r += struct.pack("<i", self.nStartingHeight)
return r
def __repr__(self):
return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i)' \
% (self.nVersion, self.nServices, time.ctime(self.nTime),
repr(self.addrTo), repr(self.addrFrom), self.nNonce,
self.strSubVer, self.nStartingHeight)
class msg_verack(object):
command = "verack"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_verack()"
class msg_addr(object):
command = "addr"
def __init__(self):
self.addrs = []
def deserialize(self, f):
self.addrs = deser_vector(f, CAddress)
def serialize(self):
return ser_vector(self.addrs)
def __repr__(self):
return "msg_addr(addrs=%s)" % (repr(self.addrs))
class msg_alert(object):
command = "alert"
def __init__(self):
self.alert = CAlert()
def deserialize(self, f):
self.alert = CAlert()
self.alert.deserialize(f)
def serialize(self):
r = ""
r += self.alert.serialize()
return r
def __repr__(self):
return "msg_alert(alert=%s)" % (repr(self.alert), )
class msg_inv(object):
command = "inv"
def __init__(self, inv=None):
if inv is None:
self.inv = []
else:
self.inv = inv
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_inv(inv=%s)" % (repr(self.inv))
class msg_getdata(object):
command = "getdata"
def __init__(self):
self.inv = []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_getdata(inv=%s)" % (repr(self.inv))
class msg_notfound(object):
command = "notfound"
def __init__(self):
self.inv = []
def deserialize(self, f):
self.inv = deser_vector(f, CInv)
def serialize(self):
return ser_vector(self.inv)
def __repr__(self):
return "msg_notfound(inv=%s)" % (repr(self.inv))
class msg_getblocks(object):
command = "getblocks"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getblocks(locator=%s hashstop=%064x)" \
% (repr(self.locator), self.hashstop)
class msg_tx(object):
command = "tx"
def __init__(self, tx=CTransaction()):
self.tx = tx
def deserialize(self, f):
self.tx.deserialize(f)
def serialize(self):
return self.tx.serialize()
def __repr__(self):
return "msg_tx(tx=%s)" % (repr(self.tx))
class msg_block(object):
command = "block"
def __init__(self, block=None):
if block is None:
self.block = CBlock()
else:
self.block = block
def deserialize(self, f):
self.block.deserialize(f)
def serialize(self):
return self.block.serialize()
def __repr__(self):
return "msg_block(block=%s)" % (repr(self.block))
class msg_getaddr(object):
command = "getaddr"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_getaddr()"
class msg_ping_prebip31(object):
command = "ping"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_ping() (pre-bip31)"
class msg_ping(object):
command = "ping"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_ping(nonce=%08x)" % self.nonce
class msg_pong(object):
command = "pong"
def __init__(self, nonce=0L):
self.nonce = nonce
def deserialize(self, f):
self.nonce = struct.unpack("<Q", f.read(8))[0]
def serialize(self):
r = ""
r += struct.pack("<Q", self.nonce)
return r
def __repr__(self):
return "msg_pong(nonce=%08x)" % self.nonce
class msg_mempool(object):
command = "mempool"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_mempool()"
# getheaders message has
# number of entries
# vector of hashes
# hash_stop (hash of last desired block header, 0 to get as many as possible)
class msg_getheaders(object):
command = "getheaders"
def __init__(self):
self.locator = CBlockLocator()
self.hashstop = 0L
def deserialize(self, f):
self.locator = CBlockLocator()
self.locator.deserialize(f)
self.hashstop = deser_uint256(f)
def serialize(self):
r = ""
r += self.locator.serialize()
r += ser_uint256(self.hashstop)
return r
def __repr__(self):
return "msg_getheaders(locator=%s, stop=%064x)" \
% (repr(self.locator), self.hashstop)
# headers message has
# <count> <vector of block headers>
class msg_headers(object):
command = "headers"
def __init__(self):
self.headers = []
def deserialize(self, f):
# comment in bitcoind indicates these should be deserialized as blocks
blocks = deser_vector(f, CBlock)
for x in blocks:
self.headers.append(CBlockHeader(x))
def serialize(self):
blocks = [CBlock(x) for x in self.headers]
return ser_vector(blocks)
def __repr__(self):
return "msg_headers(headers=%s)" % repr(self.headers)
class msg_reject(object):
command = "reject"
def __init__(self):
self.message = ""
self.code = ""
self.reason = ""
self.data = 0L
def deserialize(self, f):
self.message = deser_string(f)
self.code = struct.unpack("<B", f.read(1))[0]
self.reason = deser_string(f)
if (self.message == "block" or self.message == "tx"):
self.data = deser_uint256(f)
def serialize(self):
r = ser_string(self.message)
r += struct.pack("<B", self.code)
r += ser_string(self.reason)
if (self.message == "block" or self.message == "tx"):
r += ser_uint256(self.data)
return r
def __repr__(self):
return "msg_reject: %s %d %s [%064x]" \
% (self.message, self.code, self.reason, self.data)
class msg_filteradd(object):
command = "filteradd"
def __init__(self):
self.data = ""
def deserialize(self, f):
self.data = deser_string(f)
def serialize(self):
return ser_string(self.data)
def __repr__(self):
return "msg_filteradd(data=%s)" % (repr(self.data))
class msg_filterclear(object):
command = "filterclear"
def __init__(self):
pass
def deserialize(self, f):
pass
def serialize(self):
return ""
def __repr__(self):
return "msg_filterclear()"
# This is what a callback should look like for NodeConn
# Reimplement the on_* functions to provide handling for events
class NodeConnCB(object):
def __init__(self):
self.verack_received = False
# Derived classes should call this function once to set the message map
# which associates the derived classes' functions to incoming messages
def create_callback_map(self):
self.cbmap = {
"version": self.on_version,
"verack": self.on_verack,
"addr": self.on_addr,
"alert": self.on_alert,
"inv": self.on_inv,
"getdata": self.on_getdata,
"notfound": self.on_notfound,
"getblocks": self.on_getblocks,
"tx": self.on_tx,
"block": self.on_block,
"getaddr": self.on_getaddr,
"ping": self.on_ping,
"pong": self.on_pong,
"headers": self.on_headers,
"getheaders": self.on_getheaders,
"reject": self.on_reject,
"mempool": self.on_mempool
}
def deliver(self, conn, message):
with mininode_lock:
try:
self.cbmap[message.command](conn, message)
except:
print "ERROR delivering %s (%s)" % (repr(message),
sys.exc_info()[0])
def on_version(self, conn, message):
if message.nVersion >= 209:
conn.send_message(msg_verack())
conn.ver_send = min(SPROUT_PROTO_VERSION, message.nVersion)
if message.nVersion < 209:
conn.ver_recv = conn.ver_send
def on_verack(self, conn, message):
conn.ver_recv = conn.ver_send
self.verack_received = True
def on_inv(self, conn, message):
want = msg_getdata()
for i in message.inv:
if i.type != 0:
want.inv.append(i)
if len(want.inv):
conn.send_message(want)
def on_addr(self, conn, message): pass
def on_alert(self, conn, message): pass
def on_getdata(self, conn, message): pass
def on_notfound(self, conn, message): pass
def on_getblocks(self, conn, message): pass
def on_tx(self, conn, message): pass
def on_block(self, conn, message): pass
def on_getaddr(self, conn, message): pass
def on_headers(self, conn, message): pass
def on_getheaders(self, conn, message): pass
def on_ping(self, conn, message):
if conn.ver_send > BIP0031_VERSION:
conn.send_message(msg_pong(message.nonce))
def on_reject(self, conn, message): pass
def on_close(self, conn): pass
def on_mempool(self, conn): pass
def on_pong(self, conn, message): pass
# The actual NodeConn class
# This class provides an interface for a p2p connection to a specified node
class NodeConn(asyncore.dispatcher):
messagemap = {
"version": msg_version,
"verack": msg_verack,
"addr": msg_addr,
"alert": msg_alert,
"inv": msg_inv,
"getdata": msg_getdata,
"notfound": msg_notfound,
"getblocks": msg_getblocks,
"tx": msg_tx,
"block": msg_block,
"getaddr": msg_getaddr,
"ping": msg_ping,
"pong": msg_pong,
"headers": msg_headers,
"getheaders": msg_getheaders,
"reject": msg_reject,
"mempool": msg_mempool
}
MAGIC_BYTES = {
"mainnet": "\x24\xe9\x27\x64", # mainnet
"testnet3": "\xfa\x1a\xf9\xbf", # testnet3
"regtest": "\xaa\xe8\x3f\x5f" # regtest
}
def __init__(self, dstaddr, dstport, rpc, callback, net="regtest", protocol_version=SPROUT_PROTO_VERSION):
asyncore.dispatcher.__init__(self, map=mininode_socket_map)
self.log = logging.getLogger("NodeConn(%s:%d)" % (dstaddr, dstport))
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.sendbuf = ""
self.recvbuf = ""
self.ver_send = 209
self.ver_recv = 209
self.last_sent = 0
self.state = "connecting"
self.network = net
self.cb = callback
self.disconnect = False
# stuff version msg into sendbuf
vt = msg_version(protocol_version)
vt.addrTo.ip = self.dstaddr
vt.addrTo.port = self.dstport
vt.addrFrom.ip = "0.0.0.0"
vt.addrFrom.port = 0
self.send_message(vt, True)
print 'MiniNode: Connecting to Bitcoin Node IP # ' + dstaddr + ':' \
+ str(dstport) + ' using version ' + str(protocol_version)
try:
self.connect((dstaddr, dstport))
except:
self.handle_close()
self.rpc = rpc
def show_debug_msg(self, msg):
self.log.debug(msg)
def handle_connect(self):
self.show_debug_msg("MiniNode: Connected & Listening: \n")
self.state = "connected"
def handle_close(self):
self.show_debug_msg("MiniNode: Closing Connection to %s:%d... "
% (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = ""
self.sendbuf = ""
try:
self.close()
except:
pass
self.cb.on_close(self)
def handle_read(self):
try:
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
self.got_data()
except:
pass
def readable(self):
return True
def writable(self):
with mininode_lock:
length = len(self.sendbuf)
return (length > 0)
def handle_write(self):
with mininode_lock:
try:
sent = self.send(self.sendbuf)
except:
self.handle_close()
return
self.sendbuf = self.sendbuf[sent:]
def got_data(self):
while True:
if len(self.recvbuf) < 4:
return
if self.recvbuf[:4] != self.MAGIC_BYTES[self.network]:
raise ValueError("got garbage %s" % repr(self.recvbuf))
if self.ver_recv < 209:
if len(self.recvbuf) < 4 + 12 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = None
if len(self.recvbuf) < 4 + 12 + 4 + msglen:
return
msg = self.recvbuf[4+12+4:4+12+4+msglen]
self.recvbuf = self.recvbuf[4+12+4+msglen:]
else:
if len(self.recvbuf) < 4 + 12 + 4 + 4:
return
command = self.recvbuf[4:4+12].split("\x00", 1)[0]
msglen = struct.unpack("<i", self.recvbuf[4+12:4+12+4])[0]
checksum = self.recvbuf[4+12+4:4+12+4+4]
if len(self.recvbuf) < 4 + 12 + 4 + 4 + msglen:
return
msg = self.recvbuf[4+12+4+4:4+12+4+4+msglen]
th = sha256(msg)
h = sha256(th)
if checksum != h[:4]:
raise ValueError("got bad checksum " + repr(self.recvbuf))
self.recvbuf = self.recvbuf[4+12+4+4+msglen:]
if command in self.messagemap:
f = cStringIO.StringIO(msg)
t = self.messagemap[command]()
t.deserialize(f)
self.got_message(t)
else:
self.show_debug_msg("Unknown command: '" + command + "' " +
repr(msg))
def send_message(self, message, pushbuf=False):
if self.state != "connected" and not pushbuf:
return
self.show_debug_msg("Send %s" % repr(message))
command = message.command
data = message.serialize()
tmsg = self.MAGIC_BYTES[self.network]
tmsg += command
tmsg += "\x00" * (12 - len(command))
tmsg += struct.pack("<I", len(data))
if self.ver_send >= 209:
th = sha256(data)
h = sha256(th)
tmsg += h[:4]
tmsg += data
with mininode_lock:
self.sendbuf += tmsg
self.last_sent = time.time()
def got_message(self, message):
if message.command == "version":
if message.nVersion <= BIP0031_VERSION:
self.messagemap['ping'] = msg_ping_prebip31
if self.last_sent + 30 * 60 < time.time():
self.send_message(self.messagemap['ping']())
self.show_debug_msg("Recv %s" % repr(message))
self.cb.deliver(self, message)
def disconnect_node(self):
self.disconnect = True
class NetworkThread(Thread):
def run(self):
while mininode_socket_map:
# We check for whether to disconnect outside of the asyncore
# loop to workaround the behavior of asyncore when using
# select
disconnected = []
for fd, obj in mininode_socket_map.items():
if obj.disconnect:
disconnected.append(obj)
[ obj.handle_close() for obj in disconnected ]
asyncore.loop(0.1, use_poll=True, map=mininode_socket_map, count=1)
# An exception we can raise if we detect a potential disconnect
# (p2p or rpc) before the test is complete
class EarlyDisconnectError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
|
py | 1a32b7fbf3c450655f5a3bd5a4e6407bbe56c86d | # Copyright 2021 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from ament_cmake_python import find_packages_data
class TestFindPackagesData(unittest.TestCase):
def test_all_packages_data_is_found(self):
data = find_packages_data()
assert set(data) == {'foo', 'foo.bar', 'baz'}
assert set(data['foo']) == {'data', 'data.txt'}
assert set(data['foo.bar']) == {
'data.txt',
os.path.join('resources', 'fizz.txt'),
os.path.join('resources', 'buzz.txt')
}
assert set(data['baz']) == {'data.bin', 'data'}
def test_whole_package_data_is_included(self):
data = find_packages_data(
include=('foo', 'foo.*'))
assert set(data) == {'foo', 'foo.bar'}
assert set(data['foo']) == {'data', 'data.txt'}
assert set(data['foo.bar']) == {
'data.txt',
os.path.join('resources', 'fizz.txt'),
os.path.join('resources', 'buzz.txt')
}
def test_whole_package_data_is_excluded(self):
data = find_packages_data(
include=('foo', 'foo.*'),
exclude=('foo.bar',))
assert set(data) == {'foo'}
assert set(data['foo']) == {'data', 'data.txt'}
def test_partial_package_data_is_excluded(self):
data = find_packages_data(
include=('foo', 'foo.*'),
exclude={'foo.bar': ['resources/*']})
assert set(data) == {'foo', 'foo.bar'}
assert set(data['foo']) == {'data', 'data.txt'}
assert set(data['foo.bar']) == {'data.txt'}
def test_partial_package_data_is_included(self):
data = find_packages_data(
include={
'foo': ['*.txt'],
'foo.*': ['resources/*.txt']
},
)
assert set(data) == {'foo', 'foo.bar'}
assert set(data['foo']) == {'data.txt'}
assert set(data['foo.bar']) == {
os.path.join('resources', 'fizz.txt'),
os.path.join('resources', 'buzz.txt')
}
def test_nested_packages_data_is_found(self):
data = find_packages_data(where='nested/pkgs')
assert set(data) == {'fizz', 'fizz.buzz'}
assert set(data['fizz']) == {
os.path.join('data', 'buzz.bin')
}
assert set(data['fizz.buzz']) == {'data.txt'}
if __name__ == '__main__':
unittest.main()
|
py | 1a32b86407ddaf240907aa9427ac218f5280be6d | """Classes for providing extra information about an :class:`ihm.Entity`"""
# Handle different naming of urllib in Python 2/3
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import sys
class Reference(object):
"""Base class for extra information about an :class:`ihm.Entity`.
This class is not used directly; instead, use a subclass such as
:class:`Sequence` or :class:`UniProtSequence`. These objects are
then typically passed to the :class:`ihm.Entity` constructor."""
pass
class Sequence(Reference):
"""Point to the sequence of an :class:`ihm.Entity` in a sequence database;
convenience subclasses are provided for common sequence databases such
as :class:`UniProtSequence`.
These objects are typically passed to the :class:`ihm.Entity`
constructor.
See also :attr:`alignments` to describe the correspondence between
the database and entity sequences.
:param str db_name: The name of the database.
:param str db_code: The name of the sequence in the database.
:param str accession: The database accession.
:param str sequence: The complete sequence, as a string of
one-letter codes.
:param str details: Longer text describing the sequence.
"""
def __init__(self, db_name, db_code, accession, sequence, details=None):
self.db_name, self.db_code, self.accession = db_name, db_code, accession
self.sequence, self.details = sequence, details
#: All alignments between the reference and entity sequences, as
#: :class:`Alignment` objects. If none are provided, a simple 1:1
#: alignment is assumed.
self.alignments = []
def _get_alignments(self):
if self.alignments:
return self.alignments
elif not hasattr(self, '_default_alignment'):
self._default_alignment = Alignment()
return [self._default_alignment]
class UniProtSequence(Sequence):
"""Point to the sequence of an :class:`ihm.Entity` in UniProt.
These objects are typically passed to the :class:`ihm.Entity`
constructor.
:param str db_code: The UniProt name (e.g. NUP84_YEAST)
:param str accession: The UniProt accession (e.g. P52891)
See :class:`Sequence` for a description of the remaining parameters.
"""
_db_name = 'UNP'
def __init__(self, db_code, accession, sequence, details=None):
super(UniProtSequence, self).__init__(
self._db_name, db_code, accession, sequence, details)
def __str__(self):
return "<ihm.reference.UniProtSequence(%s)>" % self.accession
@classmethod
def from_accession(cls, accession):
"""Create :class:`UniProtSequence` from just an accession.
This is done by querying the UniProt web API, so requires network
access.
:param str accession: The UniProt accession (e.g. P52891)
"""
# urlopen returns bytes
if sys.version_info[0] >= 3:
def decode(t):
return t.decode('ascii')
else:
decode = lambda t: t
url = 'https://www.uniprot.org/uniprot/%s.fasta' % accession
with urllib2.urlopen(url) as fh:
header = decode(fh.readline())
spl = header.split('|')
if len(spl) < 3 or spl[0] != '>sp':
raise ValueError("Cannot parse UniProt header %s" % header)
cd = spl[2].split(None, 1)
code = cd[0]
details = cd[1].rstrip('\r\n') if len(cd) > 1 else None
seq = decode(fh.read()).replace('\n', '')
return cls(code, accession, seq, details)
class Alignment(object):
"""A sequence range that aligns between the database and the entity.
This describes part of the sequence in the sequence database
(:class:`Sequence`) and in the :class:`ihm.Entity`. The two ranges
must be the same length and have the same primary sequence (any
differences must be described with :class:`SeqDif` objects).
:param int db_begin: The first residue in the database sequence
that is used (defaults to the entire sequence).
:param int db_end: The last residue in the database sequence
that is used (or None, the default, to use the entire sequence).
:param int entity_begin: The first residue in the :class:`~ihm.Entity`
sequence that is taken from the reference (defaults to the entire
entity sequence).
:param int entity_end: The last residue in the :class:`~ihm.Entity`
sequence that is taken from the reference (or None, the default,
to use the entire sequence).
:param seq_dif: Single-point mutations made to the sequence.
:type seq_dif: Sequence of :class:`SeqDif` objects.
"""
def __init__(self, db_begin=1, db_end=None, entity_begin=1,
entity_end=None, seq_dif=[]):
self.db_begin, self.db_end = db_begin, db_end
self.entity_begin, self.entity_end = entity_begin, entity_end
self.seq_dif = []
self.seq_dif.extend(seq_dif)
class SeqDif(object):
"""Annotate a sequence difference between a reference and entity sequence.
See :class:`Alignment`.
:param int seq_id: The residue index in the entity sequence.
:param db_monomer: The monomer type (as a :class:`~ihm.ChemComp` object)
in the reference sequence.
:type db_monomer: :class:`ihm.ChemComp`
:param monomer: The monomer type (as a :class:`~ihm.ChemComp` object)
in the entity sequence.
:type monomer: :class:`ihm.ChemComp`
:param str details: Descriptive text for the sequence difference.
"""
def __init__(self, seq_id, db_monomer, monomer, details=None):
self.seq_id, self.db_monomer, self.monomer = seq_id, db_monomer, monomer
self.details = details
|
py | 1a32b87cdef7ae1f0c3b1478a5a2c17718f8525b | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import socket
import time
import traceback
from telemetry import decorators
from telemetry.internal.backends.chrome_inspector import inspector_websocket
from telemetry.internal.backends.chrome_inspector import websocket
from tracing.trace_data import trace_data as trace_data_module
class TracingUnsupportedException(Exception):
pass
class TracingTimeoutException(Exception):
pass
class TracingUnrecoverableException(Exception):
pass
class TracingHasNotRunException(Exception):
pass
class TracingUnexpectedResponseException(Exception):
pass
class ClockSyncResponseException(Exception):
pass
class _DevToolsStreamReader(object):
def __init__(self, inspector_socket, stream_handle):
self._inspector_websocket = inspector_socket
self._handle = stream_handle
self._trace_file_handle = None
self._callback = None
def Read(self, callback):
# Do not allow the instance of this class to be reused, as
# we only read data sequentially at the moment, so a stream
# can only be read once.
assert not self._callback
self._trace_file_handle = trace_data_module.TraceFileHandle()
self._trace_file_handle.Open()
self._callback = callback
self._ReadChunkFromStream()
# The below is not a typo -- queue one extra read ahead to avoid latency.
self._ReadChunkFromStream()
def _ReadChunkFromStream(self):
# Limit max block size to avoid fragmenting memory in sock.recv(),
# (see https://github.com/liris/websocket-client/issues/163 for details)
req = {'method': 'IO.read', 'params': {
'handle': self._handle, 'size': 32768}}
self._inspector_websocket.AsyncRequest(req, self._GotChunkFromStream)
def _GotChunkFromStream(self, response):
# Quietly discard responses from reads queued ahead after EOF.
if self._trace_file_handle is None:
return
if 'error' in response:
raise TracingUnrecoverableException(
'Reading trace failed: %s' % response['error']['message'])
result = response['result']
# Convert the trace data that's receive as UTF32 to its native encoding of
# UTF8 in order to reduce its size.
self._trace_file_handle.AppendTraceData(result['data'].encode('utf8'))
if not result.get('eof', False):
self._ReadChunkFromStream()
return
req = {'method': 'IO.close', 'params': {'handle': self._handle}}
self._inspector_websocket.SendAndIgnoreResponse(req)
self._trace_file_handle.Close()
self._callback(self._trace_file_handle)
self._trace_file_handle = None
class TracingBackend(object):
_TRACING_DOMAIN = 'Tracing'
def __init__(self, inspector_socket, is_tracing_running=False,
support_modern_devtools_tracing_start_api=False):
self._inspector_websocket = inspector_socket
self._inspector_websocket.RegisterDomain(
self._TRACING_DOMAIN, self._NotificationHandler)
self._is_tracing_running = is_tracing_running
self._start_issued = False
self._can_collect_data = False
self._has_received_all_tracing_data = False
self._support_modern_devtools_tracing_start_api = (
support_modern_devtools_tracing_start_api)
self._trace_data_builder = None
@property
def is_tracing_running(self):
return self._is_tracing_running
def StartTracing(self, chrome_trace_config, timeout=10):
"""When first called, starts tracing, and returns True.
If called during tracing, tracing is unchanged, and it returns False.
"""
if self.is_tracing_running:
return False
assert not self._can_collect_data, 'Data not collected from last trace.'
# Reset collected tracing data from previous tracing calls.
if not self.IsTracingSupported():
raise TracingUnsupportedException(
'Chrome tracing not supported for this app.')
params = {'transferMode': 'ReturnAsStream'}
if self._support_modern_devtools_tracing_start_api:
params['traceConfig'] = (
chrome_trace_config.GetChromeTraceConfigForDevTools())
else:
if chrome_trace_config.requires_modern_devtools_tracing_start_api:
raise TracingUnsupportedException(
'Trace options require modern Tracing.start DevTools API, '
'which is NOT supported by the browser')
params['categories'], params['options'] = (
chrome_trace_config.GetChromeTraceCategoriesAndOptionsForDevTools())
req = {'method': 'Tracing.start', 'params': params}
logging.info('Start Tracing Request: %r', req)
response = self._inspector_websocket.SyncRequest(req, timeout)
if 'error' in response:
raise TracingUnexpectedResponseException(
'Inspector returned unexpected response for '
'Tracing.start:\n' + json.dumps(response, indent=2))
self._is_tracing_running = True
self._start_issued = True
return True
def RecordClockSyncMarker(self, sync_id):
assert self.is_tracing_running, 'Tracing must be running to clock sync.'
req = {
'method': 'Tracing.recordClockSyncMarker',
'params': {
'syncId': sync_id
}
}
rc = self._inspector_websocket.SyncRequest(req, timeout=2)
if 'error' in rc:
raise ClockSyncResponseException(rc['error']['message'])
def StopTracing(self):
"""Stops tracing and pushes results to the supplied TraceDataBuilder.
If this is called after tracing has been stopped, trace data from the last
tracing run is pushed.
"""
if not self.is_tracing_running:
raise TracingHasNotRunException()
else:
if not self._start_issued:
# Tracing is running but start was not issued so, startup tracing must
# be in effect. Issue another Tracing.start to update the transfer mode.
# TODO(caseq): get rid of it when streaming is the default.
params = {
'transferMode': 'ReturnAsStream',
'traceConfig': {}
}
req = {'method': 'Tracing.start', 'params': params}
self._inspector_websocket.SendAndIgnoreResponse(req)
req = {'method': 'Tracing.end'}
self._inspector_websocket.SendAndIgnoreResponse(req)
self._is_tracing_running = False
self._start_issued = False
self._can_collect_data = True
def DumpMemory(self, timeout=30):
"""Dumps memory.
Returns:
GUID of the generated dump if successful, None otherwise.
Raises:
TracingTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
TracingUnrecoverableException: If there is a websocket error.
TracingUnexpectedResponseException: If the response contains an error
or does not contain the expected result.
"""
request = {
'method': 'Tracing.requestMemoryDump'
}
try:
response = self._inspector_websocket.SyncRequest(request, timeout)
except websocket.WebSocketTimeoutException:
raise TracingTimeoutException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
except (socket.error, websocket.WebSocketException,
inspector_websocket.WebSocketDisconnected):
raise TracingUnrecoverableException(
'Exception raised while sending a Tracing.requestMemoryDump '
'request:\n' + traceback.format_exc())
if ('error' in response or
'result' not in response or
'success' not in response['result'] or
'dumpGuid' not in response['result']):
raise TracingUnexpectedResponseException(
'Inspector returned unexpected response for '
'Tracing.requestMemoryDump:\n' + json.dumps(response, indent=2))
result = response['result']
return result['dumpGuid'] if result['success'] else None
def CollectTraceData(self, trace_data_builder, timeout=60):
if not self._can_collect_data:
raise Exception('Cannot collect before tracing is finished.')
self._CollectTracingData(trace_data_builder, timeout)
self._can_collect_data = False
def _CollectTracingData(self, trace_data_builder, timeout):
"""Collects tracing data. Assumes that Tracing.end has already been sent.
Args:
trace_data_builder: An instance of TraceDataBuilder to put results into.
timeout: The timeout in seconds.
Raises:
TracingTimeoutException: If more than |timeout| seconds has passed
since the last time any data is received.
TracingUnrecoverableException: If there is a websocket error.
"""
self._has_received_all_tracing_data = False
start_time = time.time()
self._trace_data_builder = trace_data_builder
try:
while True:
try:
self._inspector_websocket.DispatchNotifications(timeout)
start_time = time.time()
except websocket.WebSocketTimeoutException:
pass
except (socket.error, websocket.WebSocketException):
raise TracingUnrecoverableException(
'Exception raised while collecting tracing data:\n' +
traceback.format_exc())
if self._has_received_all_tracing_data:
break
elapsed_time = time.time() - start_time
if elapsed_time > timeout:
raise TracingTimeoutException(
'Only received partial trace data due to timeout after %s '
'seconds. If the trace data is big, you may want to increase '
'the timeout amount.' % elapsed_time)
finally:
self._trace_data_builder = None
def _NotificationHandler(self, res):
if 'Tracing.dataCollected' == res.get('method'):
value = res.get('params', {}).get('value')
self._trace_data_builder.AddTraceFor(
trace_data_module.CHROME_TRACE_PART, value)
elif 'Tracing.tracingComplete' == res.get('method'):
stream_handle = res.get('params', {}).get('stream')
if not stream_handle:
self._has_received_all_tracing_data = True
return
reader = _DevToolsStreamReader(self._inspector_websocket, stream_handle)
reader.Read(self._ReceivedAllTraceDataFromStream)
def _ReceivedAllTraceDataFromStream(self, trace_handle):
self._trace_data_builder.AddTraceFor(
trace_data_module.CHROME_TRACE_PART, trace_handle)
self._has_received_all_tracing_data = True
def Close(self):
self._inspector_websocket.UnregisterDomain(self._TRACING_DOMAIN)
self._inspector_websocket = None
@decorators.Cache
def IsTracingSupported(self):
req = {'method': 'Tracing.hasCompleted'}
res = self._inspector_websocket.SyncRequest(req, timeout=10)
return not res.get('response')
|
py | 1a32b88261831a9f0445bf22acab03b4c2f98915 | import graphene
from graphql_jwt.exceptions import PermissionDenied
from ...core.permissions import WebhookPermissions
from ...webhook import models, payloads
from ...webhook.event_types import WebhookEventType
from ..utils import sort_queryset
from .sorters import WebhookSortField
from .types import Webhook, WebhookEvent
def resolve_webhooks(info, sort_by=None, **_kwargs):
service_account = info.context.service_account
if service_account:
qs = models.Webhook.objects.filter(service_account=service_account)
else:
user = info.context.user
if not user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
raise PermissionDenied()
qs = models.Webhook.objects.all()
return sort_queryset(qs, sort_by, WebhookSortField)
def resolve_webhook(info, webhook_id):
service_account = info.context.service_account
if service_account:
_, webhook_id = graphene.Node.from_global_id(webhook_id)
return service_account.webhooks.filter(id=webhook_id).first()
user = info.context.user
if user.has_perm(WebhookPermissions.MANAGE_WEBHOOKS):
return graphene.Node.get_node_from_global_id(info, webhook_id, Webhook)
raise PermissionDenied()
def resolve_webhook_events():
return [
WebhookEvent(event_type=event_type[0])
for event_type in WebhookEventType.CHOICES
]
def resolve_sample_payload(info, event_name):
service_account = info.context.service_account
required_permission = WebhookEventType.PERMISSIONS.get(event_name)
if required_permission:
if service_account and service_account.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
if info.context.user.has_perm(required_permission):
return payloads.generate_sample_payload(event_name)
raise PermissionDenied()
|
py | 1a32b9a3eb0199af17fdd77d79acc6e8732721ef | import factory
USERS = 1000
GROUPS = 5
EVENTS = 5
NOTIFICATIONS = 5000
OCCURENCES = 2000
AUDITLOGS = 1000
if __name__ == '__main__': # noqa: C901
import os
import random
from django.core.wsgi import get_wsgi_application
os.environ['DJANGO_SETTINGS_MODULE'] = 'bitcaster.config.settings'
application = get_wsgi_application()
from bitcaster.models import Organization, Subscription, Notification, Event
from bitcaster.utils.tests import factories
org = Organization.objects.first()
app = factories.ApplicationFactory(organization=org, name='Dummy1')
users = []
for i in range(0, USERS):
print('.', end='')
user = factories.UserFactory(email='user%[email protected]' % i)
org_member = factories.OrganizationMemberFactory(organization=org, user=user)
users.append(org_member)
factories.ApplicationMemberFactory(application=app, org_member=org_member)
assert Organization.objects.count() == 1
if not users:
users = list(org.memberships.all())
channels = list(org.channels.all())
for i in range(0, GROUPS):
group = factories.OrganizationGroupFactory(organization=org, name='Group-%s' % i)
assert Organization.objects.count() == 1
if not group.members.exists():
members = random.sample(users, random.randint(5, 15))
for m in members:
group.members.add(m)
for i in range(0, EVENTS):
event = factories.EventFactory(application=app, enabled=True, name='Event-%s' % i)
assert Organization.objects.count() == 1
chs = random.sample(channels, random.randint(1, 5))
for ch in chs:
event.channels.add(ch)
factories.MessageFactory(event=event,
channel=ch, body='test')
members = random.sample(users, random.randint(5, 300))
for m in members:
ch = random.choice(chs)
factories.AddressFactory(user=m.user, address='123')
assert Organization.objects.count() == 1
factories.SubscriptionFactory(subscriber=m.user,
trigger_by=m.user,
channel=ch,
event=event)
assert Organization.objects.count() == 1
fld = Notification._meta.get_field('timestamp')
fld.auto_now_add = False
Notification.timestamp.auto_now_add = False
events = list(app.events.all())
subscriptions = list(Subscription.objects.filter(event__application=app))
# Notification Log
for i in range(NOTIFICATIONS):
subscription = random.choice(subscriptions)
factories.NotificationFactory(id=i,
application=app,
event=subscription.event,
subscription=subscription,
channel=subscription.channel)
assert Organization.objects.count() == 1
# Notification Log
for i in range(OCCURENCES):
factories.OccurenceFactory(id=i,
organization=org,
application=app,
event=factory.LazyAttribute(lambda a: Event.objects.order_by('?').first()))
# Audit Log
for i in range(AUDITLOGS):
e = factories.AuditLogEntryFactory(id=i,
organization=org,
)
|
py | 1a32bba38cd5789f71c42d6b2e9589368c27d81d | from rpaths import Path
import subprocess
import unittest
class TestTimeDelta(unittest.TestCase):
def call_function(self, delta):
p = subprocess.Popen(
['/bin/sh', '-s', '%d' % delta],
cwd=(Path(__file__).parent.parent /
'tej/remotes/default/commands').path,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = p.communicate(b"""
#!/bin/sh
set -e
. "lib/utils.sh"
format_timedelta "$1"
""")
self.assertEqual(p.wait(), 0)
return stdout
def test_format(self):
self.assertEqual(self.call_function(0),
b'0:00\n')
self.assertEqual(self.call_function(4),
b'0:04\n')
self.assertEqual(self.call_function(42),
b'0:42\n')
self.assertEqual(self.call_function(120),
b'2:00\n')
self.assertEqual(self.call_function(124),
b'2:04\n')
self.assertEqual(self.call_function(154),
b'2:34\n')
self.assertEqual(self.call_function(3599),
b'59:59\n')
self.assertEqual(self.call_function(3600),
b'1:00:00\n')
self.assertEqual(self.call_function(3602),
b'1:00:02\n')
self.assertEqual(self.call_function(3660),
b'1:01:00\n')
self.assertEqual(self.call_function(5400),
b'1:30:00\n')
self.assertEqual(self.call_function(3722),
b'1:02:02\n')
self.assertEqual(self.call_function(3762),
b'1:02:42\n')
self.assertEqual(self.call_function(9762),
b'2:42:42\n')
self.assertEqual(self.call_function(25200),
b'7:00:00\n')
|
py | 1a32bbf503ec88197be787ac4bcbc6141c6208a1 | #!/usr/bin/env python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from hashlib import sha1
from optparse import OptionParser
from os import link, makedirs, path, remove
import shutil
from subprocess import check_call, CalledProcessError
from sys import stderr
from util import hash_file, resolve_url
from zipfile import ZipFile, BadZipfile, LargeZipFile
GERRIT_HOME = path.expanduser('~/.gerritcodereview')
# TODO(davido): Rename in bazel-cache
CACHE_DIR = path.join(GERRIT_HOME, 'buck-cache', 'downloaded-artifacts')
LOCAL_PROPERTIES = 'local.properties'
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
def download_properties(root_dir):
""" Get the download properties.
First tries to find the properties file in the given root directory,
and if not found there, tries in the Gerrit settings folder in the
user's home directory.
Returns a set of download properties, which may be empty.
"""
p = {}
local_prop = path.join(root_dir, LOCAL_PROPERTIES)
if not path.isfile(local_prop):
local_prop = path.join(GERRIT_HOME, LOCAL_PROPERTIES)
if path.isfile(local_prop):
try:
with open(local_prop) as fd:
for line in fd:
if line.startswith('download.'):
d = [e.strip() for e in line.split('=', 1)]
name, url = d[0], d[1]
p[name[len('download.'):]] = url
except OSError:
pass
return p
def cache_entry(args):
if args.v:
h = args.v
else:
h = sha1(args.u.encode('utf-8')).hexdigest()
name = '%s-%s' % (path.basename(args.o), h)
return path.join(CACHE_DIR, name)
opts = OptionParser()
opts.add_option('-o', help='local output file')
opts.add_option('-u', help='URL to download')
opts.add_option('-v', help='expected content SHA-1')
opts.add_option('-x', action='append', help='file to delete from ZIP')
opts.add_option('--exclude_java_sources', action='store_true')
opts.add_option('--unsign', action='store_true')
args, _ = opts.parse_args()
root_dir = args.o
while root_dir and path.dirname(root_dir) != root_dir:
root_dir, n = path.split(root_dir)
if n == 'WORKSPACE':
break
redirects = download_properties(root_dir)
cache_ent = cache_entry(args)
src_url = resolve_url(args.u, redirects)
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
except OSError as err:
print('error creating directory %s: %s' %
(path.dirname(cache_ent), err), file=stderr)
exit(1)
print('Download %s' % src_url, file=stderr)
try:
check_call(['curl', '--proxy-anyauth', '-ksSfLo', cache_ent, src_url])
except OSError as err:
print('could not invoke curl: %s\nis curl installed?' % err, file=stderr)
exit(1)
except CalledProcessError as err:
print('error using curl: %s' % err, file=stderr)
exit(1)
if args.v:
have = hash_file(sha1(), cache_ent).hexdigest()
if args.v != have:
print((
'%s:\n' +
'expected %s\n' +
'received %s\n') % (src_url, args.v, have), file=stderr)
try:
remove(cache_ent)
except OSError as err:
if path.exists(cache_ent):
print('error removing %s: %s' % (cache_ent, err), file=stderr)
exit(1)
exclude = []
if args.x:
exclude += args.x
if args.exclude_java_sources:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
if args.unsign:
try:
with ZipFile(cache_ent, 'r') as zf:
for n in zf.namelist():
if (n.endswith('.RSA')
or n.endswith('.SF')
or n.endswith('.LIST')):
exclude.append(n)
except (BadZipfile, LargeZipFile) as err:
print('error opening %s: %s' % (cache_ent, err), file=stderr)
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
try:
check_call(['zip', '-d', args.o] + exclude)
except CalledProcessError as err:
print('error removing files from zip: %s' % err, file=stderr)
exit(1)
else:
try:
link(cache_ent, args.o)
except OSError as err:
try:
shutil.copyfile(cache_ent, args.o)
except (shutil.Error, IOError) as err:
print('error copying to %s: %s' % (args.o, err), file=stderr)
exit(1)
|
py | 1a32bc1be2a6f281a7b726d52e862e9032647ba8 |
# Testing the basic intent functionality of ONOS
# TODO: Replace the CLI calls with REST API equivalents as they become available.
# - May need to write functions in the onosrestdriver.py file to do this
# TODO: Complete implementation of case 3000, 4000, and 6000 as REST API allows
# -Currently there is no support in the REST API for Multi to Single and Single to Multi point intents
# As such, these cases are incomplete and should not be enabled in the .params file
import time
import json
class FUNCintentRest:
def __init__( self ):
self.default = ''
def CASE1( self, main ):
import time
import imp
import re
"""
- Construct tests variables
- GIT ( optional )
- Checkout ONOS master branch
- Pull latest ONOS code
- Building ONOS ( optional )
- Install ONOS package
- Build ONOS package
"""
main.case( "Constructing test variables and building ONOS package" )
main.step( "Constructing test variables" )
main.caseExplanation = "This test case is mainly for loading " +\
"from params file, and pull and build the " +\
" latest ONOS package"
stepResult = main.FALSE
# Test variables
try:
main.testOnDirectory = re.sub( "(/tests)$", "", main.testDir )
main.apps = main.params[ 'ENV' ][ 'cellApps' ]
gitBranch = main.params[ 'GIT' ][ 'branch' ]
main.dependencyPath = main.testOnDirectory + \
main.params[ 'DEPENDENCY' ][ 'path' ]
main.topology = main.params[ 'DEPENDENCY' ][ 'topology' ]
main.scale = ( main.params[ 'SCALE' ][ 'size' ] ).split( "," )
if main.ONOSbench.maxNodes:
main.maxNodes = int( main.ONOSbench.maxNodes )
else:
main.maxNodes = 0
wrapperFile1 = main.params[ 'DEPENDENCY' ][ 'wrapper1' ]
wrapperFile2 = main.params[ 'DEPENDENCY' ][ 'wrapper2' ]
wrapperFile3 = main.params[ 'DEPENDENCY' ][ 'wrapper3' ]
main.startUpSleep = int( main.params[ 'SLEEP' ][ 'startup' ] )
main.checkIntentSleep = int( main.params[ 'SLEEP' ][ 'checkintent' ] )
main.removeIntentsleeo = int( main.params[ 'SLEEP' ][ 'removeintent' ] )
main.rerouteSleep = int( main.params[ 'SLEEP' ][ 'reroute' ] )
main.fwdSleep = int( main.params[ 'SLEEP' ][ 'fwd' ] )
main.addIntentSleep = int( main.params[ 'SLEEP' ][ 'addIntent' ] )
main.checkTopoAttempts = int( main.params[ 'SLEEP' ][ 'topoAttempts' ] )
gitPull = main.params[ 'GIT' ][ 'pull' ]
main.numSwitch = int( main.params[ 'MININET' ][ 'switch' ] )
main.numLinks = int( main.params[ 'MININET' ][ 'links' ] )
main.cellData = {} # for creating cell file
main.hostsData = {}
main.CLIs = []
main.CLIs2 = []
main.ONOSip = []
main.scapyHostNames = main.params[ 'SCAPY' ][ 'HOSTNAMES' ].split( ',' )
main.scapyHosts = [] # List of scapy hosts for iterating
main.assertReturnString = '' # Assembled assert return string
main.ONOSip = main.ONOSbench.getOnosIps()
print main.ONOSip
# Assigning ONOS cli handles to a list
try:
for i in range( 1, main.maxNodes + 1 ):
main.CLIs.append( getattr( main, 'ONOSrest' + str( i ) ) )
main.CLIs2.append( getattr( main, 'ONOScli' + str( i ) ) )
except AttributeError:
main.log.warn( "A " + str( main.maxNodes ) + " node cluster " +
"was defined in env variables, but only " +
str( len( main.CLIs ) ) +
" nodes were defined in the .topo file. " +
"Using " + str( len( main.CLIs ) ) +
" nodes for the test." )
# -- INIT SECTION, ONLY RUNS ONCE -- #
main.startUp = imp.load_source( wrapperFile1,
main.dependencyPath +
wrapperFile1 +
".py" )
main.intentFunction = imp.load_source( wrapperFile2,
main.dependencyPath +
wrapperFile2 +
".py" )
main.topo = imp.load_source( wrapperFile3,
main.dependencyPath +
wrapperFile3 +
".py" )
copyResult1 = main.ONOSbench.scp( main.Mininet1,
main.dependencyPath +
main.topology,
main.Mininet1.home + "custom/",
direction="to" )
if main.CLIs and main.CLIs2:
stepResult = main.TRUE
else:
main.log.error( "Did not properly created list of ONOS CLI handle" )
stepResult = main.FALSE
except Exception as e:
main.log.exception(e)
main.cleanup()
main.exit()
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully construct " +
"test variables ",
onfail="Failed to construct test variables" )
if gitPull == 'True':
main.step( "Building ONOS in " + gitBranch + " branch" )
onosBuildResult = main.startUp.onosBuild( main, gitBranch )
stepResult = onosBuildResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully compiled " +
"latest ONOS",
onfail="Failed to compile " +
"latest ONOS" )
else:
main.log.warn( "Did not pull new code so skipping mvn " +
"clean install" )
main.ONOSbench.getVersion( report=True )
def CASE2( self, main ):
"""
- Set up cell
- Create cell file
- Set cell file
- Verify cell file
- Kill ONOS process
- Uninstall ONOS cluster
- Verify ONOS start up
- Install ONOS cluster
- Connect to cli
"""
# main.scale[ 0 ] determines the current number of ONOS controller
main.numCtrls = int( main.scale[ 0 ] )
main.case( "Starting up " + str( main.numCtrls ) +
" node(s) ONOS cluster" )
main.caseExplanation = "Set up ONOS with " + str( main.numCtrls ) +\
" node(s) ONOS cluster"
#kill off all onos processes
main.log.info( "Safety check, killing all ONOS processes" +
" before initiating environment setup" )
time.sleep( main.startUpSleep )
main.step( "Uninstalling ONOS package" )
onosUninstallResult = main.TRUE
for ip in main.ONOSip:
onosUninstallResult = onosUninstallResult and \
main.ONOSbench.onosUninstall( nodeIp=ip )
stepResult = onosUninstallResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully uninstalled ONOS package",
onfail="Failed to uninstall ONOS package" )
time.sleep( main.startUpSleep )
for i in range( main.maxNodes ):
main.ONOSbench.onosDie( main.ONOSip[ i ] )
print "NODE COUNT = ", main.numCtrls
tempOnosIp = []
for i in range( main.numCtrls ):
tempOnosIp.append( main.ONOSip[i] )
main.ONOSbench.createCellFile( main.ONOSbench.ip_address,
"temp", main.Mininet1.ip_address,
main.apps, tempOnosIp )
main.step( "Apply cell to environment" )
cellResult = main.ONOSbench.setCell( "temp" )
verifyResult = main.ONOSbench.verifyCell()
stepResult = cellResult and verifyResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully applied cell to " + \
"environment",
onfail="Failed to apply cell to environment " )
main.step( "Creating ONOS package" )
packageResult = main.ONOSbench.onosPackage()
stepResult = packageResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully created ONOS package",
onfail="Failed to create ONOS package" )
time.sleep( main.startUpSleep )
main.step( "Installing ONOS package" )
onosInstallResult = main.TRUE
for i in range( main.numCtrls ):
onosInstallResult = onosInstallResult and \
main.ONOSbench.onosInstall( node=main.ONOSip[ i ] )
stepResult = onosInstallResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully installed ONOS package",
onfail="Failed to install ONOS package" )
time.sleep( main.startUpSleep )
main.step( "Starting ONOS service" )
stopResult = main.TRUE
startResult = main.TRUE
onosIsUp = main.TRUE
for i in range( main.numCtrls ):
onosIsUp = onosIsUp and main.ONOSbench.isup( main.ONOSip[ i ] )
if onosIsUp == main.TRUE:
main.log.report( "ONOS instance is up and ready" )
else:
main.log.report( "ONOS instance may not be up, stop and " +
"start ONOS again " )
for i in range( main.numCtrls ):
stopResult = stopResult and \
main.ONOSbench.onosStop( main.ONOSip[ i ] )
for i in range( main.numCtrls ):
startResult = startResult and \
main.ONOSbench.onosStart( main.ONOSip[ i ] )
stepResult = onosIsUp and stopResult and startResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="ONOS service is ready",
onfail="ONOS service did not start properly" )
# Start an ONOS cli to provide functionality that is not currently
# supported by the Rest API remove this when Leader Checking is supported
# by the REST API
main.step( "Start ONOS cli" )
cliResult = main.TRUE
for i in range( main.numCtrls ):
cliResult = cliResult and \
main.CLIs2[ i ].startOnosCli( main.ONOSip[ i ] )
stepResult = cliResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully start ONOS cli",
onfail="Failed to start ONOS cli" )
# Remove the first element in main.scale list
main.scale.remove( main.scale[ 0 ] )
main.intentFunction.report( main )
def CASE8( self, main ):
# OLD FUNCintentRest CASE 8
# This remains here for archiving and reference purposes and will be
# removed when the new FUNCintentRest is verified to work.
# """
# Compare Topo
# """
# import json
# main.case( "Compare ONOS Topology view to Mininet topology" )
# main.caseExplanation = "Compare topology elements between Mininet" +\
# " and ONOS"
# main.step( "Gathering topology information" )
# # TODO: add a paramaterized sleep here
# devicesResults = main.TRUE # Overall Boolean for device correctness
# linksResults = main.TRUE # Overall Boolean for link correctness
# hostsResults = main.TRUE # Overall Boolean for host correctness
# devices = main.topo.getAllDevices( main )
# hosts = main.topo.getAllHosts( main )
# ports = main.topo.getAllPorts( main )
# links = main.topo.getAllLinks( main )
# clusters = main.topo.getAllClusters( main )
# mnSwitches = main.Mininet1.getSwitches()
# mnLinks = main.Mininet1.getLinks()
# mnHosts = main.Mininet1.getHosts()
# main.step( "Comparing MN topology to ONOS topology" )
# for controller in range( main.numCtrls ):
# controllerStr = str( controller + 1 )
# if devices[ controller ] and ports[ controller ] and\
# "Error" not in devices[ controller ] and\
# "Error" not in ports[ controller ]:
# currentDevicesResult = main.Mininet1.compareSwitches(
# mnSwitches,
# json.loads( devices[ controller ] ),
# json.loads( ports[ controller ] ) )
# else:
# currentDevicesResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentDevicesResult,
# onpass="ONOS" + controllerStr +
# " Switches view is correct",
# onfail="ONOS" + controllerStr +
# " Switches view is incorrect" )
# if links[ controller ] and "Error" not in links[ controller ]:
# currentLinksResult = main.Mininet1.compareLinks(
# mnSwitches, mnLinks,
# json.loads( links[ controller ] ) )
# else:
# currentLinksResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentLinksResult,
# onpass="ONOS" + controllerStr +
# " links view is correct",
# onfail="ONOS" + controllerStr +
# " links view is incorrect" )
# if hosts[ controller ] or "Error" not in hosts[ controller ]:
# currentHostsResult = main.Mininet1.compareHosts(
# mnHosts,
# json.loads( hosts[ controller ] ) )
# else:
# currentHostsResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentHostsResult,
# onpass="ONOS" + controllerStr +
# " hosts exist in Mininet",
# onfail="ONOS" + controllerStr +
# " hosts don't match Mininet" )
# NEW FUNCintentRest Case 8 as based off of the CASE 8 from FUNCintent
"""
Compare ONOS Topology to Mininet Topology
"""
import json
main.case( "Compare ONOS Topology view to Mininet topology" )
main.caseExplanation = "Compare topology elements between Mininet" +\
" and ONOS"
main.log.info( "Gathering topology information from Mininet" )
devicesResults = main.FALSE # Overall Boolean for device correctness
linksResults = main.FALSE # Overall Boolean for link correctness
hostsResults = main.FALSE # Overall Boolean for host correctness
deviceFails = [] # Nodes where devices are incorrect
linkFails = [] # Nodes where links are incorrect
hostFails = [] # Nodes where hosts are incorrect
attempts = main.checkTopoAttempts # Remaining Attempts
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
main.step( "Comparing Mininet topology to ONOS topology" )
while ( attempts >= 0 ) and\
( not devicesResults or not linksResults or not hostsResults ):
time.sleep( 2 )
if not devicesResults:
devices = main.topo.getAllDevices( main )
ports = main.topo.getAllPorts( main )
devicesResults = main.TRUE
deviceFails = [] # Reset for each failed attempt
if not linksResults:
links = main.topo.getAllLinks( main )
linksResults = main.TRUE
linkFails = [] # Reset for each failed attempt
if not hostsResults:
hosts = main.topo.getAllHosts( main )
hostsResults = main.TRUE
hostFails = [] # Reset for each failed attempt
# Check for matching topology on each node
for controller in range( main.numCtrls ):
controllerStr = str( controller + 1 ) # ONOS node number
# Compare Devices
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
try:
deviceData = json.loads( devices[ controller ] )
portData = json.loads( ports[ controller ] )
except (TypeError,ValueError):
main.log.error( "Could not load json: {0} or {1}".format( str( devices[ controller ] ), str( ports[ controller ] ) ) )
currentDevicesResult = main.FALSE
else:
currentDevicesResult = main.Mininet1.compareSwitches(
mnSwitches,deviceData,portData )
else:
currentDevicesResult = main.FALSE
if not currentDevicesResult:
deviceFails.append( controllerStr )
devicesResults = devicesResults and currentDevicesResult
# Compare Links
if links[ controller ] and "Error" not in links[ controller ]:
try:
linkData = json.loads( links[ controller ] )
except (TypeError,ValueError):
main.log.error("Could not load json:" + str( links[ controller ] ) )
currentLinksResult = main.FALSE
else:
currentLinksResult = main.Mininet1.compareLinks(
mnSwitches, mnLinks,linkData )
else:
currentLinksResult = main.FALSE
if not currentLinksResult:
linkFails.append( controllerStr )
linksResults = linksResults and currentLinksResult
# Compare Hosts
if hosts[ controller ] and "Error" not in hosts[ controller ]:
try:
hostData = json.loads( hosts[ controller ] )
except (TypeError,ValueError):
main.log.error("Could not load json:" + str( hosts[ controller ] ) )
currentHostsResult = main.FALSE
else:
currentHostsResult = main.Mininet1.compareHosts(
mnHosts,hostData )
else:
currentHostsResult = main.FALSE
if not currentHostsResult:
hostFails.append( controllerStr )
hostsResults = hostsResults and currentHostsResult
# Decrement Attempts Remaining
attempts -= 1
utilities.assert_equals( expect=[],
actual=deviceFails,
onpass="ONOS correctly discovered all devices",
onfail="ONOS incorrectly discovered devices on nodes: " +
str( deviceFails ) )
utilities.assert_equals( expect=[],
actual=linkFails,
onpass="ONOS correctly discovered all links",
onfail="ONOS incorrectly discovered links on nodes: " +
str( linkFails ) )
utilities.assert_equals( expect=[],
actual=hostFails,
onpass="ONOS correctly discovered all hosts",
onfail="ONOS incorrectly discovered hosts on nodes: " +
str( hostFails ) )
topoResults = hostsResults and linksResults and devicesResults
utilities.assert_equals( expect=main.TRUE,
actual=topoResults,
onpass="ONOS correctly discovered the topology",
onfail="ONOS incorrectly discovered the topology" )
def CASE9( self, main ):
'''
Report errors/warnings/exceptions
'''
main.log.info( "Error report: \n" )
main.ONOSbench.logReport( globalONOSip[0],
[ "INFO", "FOLLOWER", "WARN", "flow", "ERROR" , "Except" ],
"s" )
#main.ONOSbench.logReport( globalONOSip[1], [ "INFO" ], "d" )
def CASE10( self, main ):
"""
Start Mininet topology with OF 1.0 switches
"""
main.OFProtocol = "1.0"
main.log.report( "Start Mininet topology with OF 1.0 switches" )
main.case( "Start Mininet topology with OF 1.0 switches" )
main.caseExplanation = "Start mininet topology with OF 1.0 " +\
"switches to test intents, exits out if " +\
"topology did not start correctly"
main.step( "Starting Mininet topology with OF 1.0 switches" )
args = "--switch ovs,protocols=OpenFlow10"
topoResult = main.Mininet1.startNet( topoFile=main.dependencyPath +
main.topology,
args=args )
stepResult = topoResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully loaded topology",
onfail="Failed to load topology" )
# Exit if topology did not load properly
if not topoResult:
main.cleanup()
main.exit()
def CASE11( self, main ):
"""
Start Mininet topology with OF 1.3 switches
"""
main.OFProtocol = "1.3"
main.log.report( "Start Mininet topology with OF 1.3 switches" )
main.case( "Start Mininet topology with OF 1.3 switches" )
main.caseExplanation = "Start mininet topology with OF 1.3 " +\
"switches to test intents, exits out if " +\
"topology did not start correctly"
main.step( "Starting Mininet topology with OF 1.3 switches" )
args = "--switch ovs,protocols=OpenFlow13"
topoResult = main.Mininet1.startNet( topoFile=main.dependencyPath +
main.topology,
args=args )
stepResult = topoResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully loaded topology",
onfail="Failed to load topology" )
# Exit if topology did not load properly
if not topoResult:
main.cleanup()
main.exit()
def CASE12( self, main ):
"""
Assign mastership to controllers
"""
import re
main.case( "Assign switches to controllers" )
main.step( "Assigning switches to controllers" )
main.caseExplanation = "Assign OF " + main.OFProtocol +\
" switches to ONOS nodes"
assignResult = main.TRUE
switchList = []
# Creates a list switch name, use getSwitch() function later...
for i in range( 1, ( main.numSwitch + 1 ) ):
switchList.append( 's' + str( i ) )
tempONOSip = []
for i in range( main.numCtrls ):
tempONOSip.append( main.ONOSip[ i ] )
assignResult = main.Mininet1.assignSwController( sw=switchList,
ip=tempONOSip,
port='6653' )
if not assignResult:
main.cleanup()
main.exit()
for i in range( 1, ( main.numSwitch + 1 ) ):
response = main.Mininet1.getSwController( "s" + str( i ) )
print( "Response is " + str( response ) )
if re.search( "tcp:" + main.ONOSip[ 0 ], response ):
assignResult = assignResult and main.TRUE
else:
assignResult = main.FALSE
stepResult = assignResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully assigned switches" +
"to controller",
onfail="Failed to assign switches to " +
"controller" )
def CASE13( self,main ):
"""
Create Scapy components
"""
main.case( "Create scapy components" )
main.step( "Create scapy components" )
import json
scapyResult = main.TRUE
for hostName in main.scapyHostNames:
main.Scapy1.createHostComponent( hostName )
main.scapyHosts.append( getattr( main, hostName ) )
main.step( "Start scapy components" )
for host in main.scapyHosts:
host.startHostCli()
host.startScapy()
host.updateSelf()
main.log.debug( host.name )
main.log.debug( host.hostIp )
main.log.debug( host.hostMac )
utilities.assert_equals( expect=main.TRUE,
actual=scapyResult,
onpass="Successfully created Scapy Components",
onfail="Failed to discover Scapy Components" )
def CASE14( self, main ):
"""
Discover all hosts and store its data to a dictionary
"""
main.case( "Discover all hosts" )
stepResult = main.TRUE
main.step( "Discover all ipv4 host hosts " )
hostList = []
# List of host with default vlan
defaultHosts = [ "h1", "h3", "h8", "h9", "h11", "h16", "h17", "h19", "h24" ]
# Lists of host with unique vlan
vlanHosts1 = [ "h4", "h12", "h20" ]
vlanHosts2 = [ "h5", "h13", "h21" ]
vlanHosts3 = [ "h6", "h14", "h22" ]
vlanHosts4 = [ "h7", "h15", "h23" ]
hostList.append( defaultHosts )
hostList.append( vlanHosts1 )
hostList.append( vlanHosts2 )
hostList.append( vlanHosts3 )
hostList.append( vlanHosts4 )
stepResult = main.intentFunction.getHostsData( main, hostList )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully discovered hosts",
onfail="Failed to discover hosts" )
def CASE15( self, main ):
"""
Discover all hosts with scapy arp packets and store its data to a dictionary
"""
main.case( "Discover all hosts using scapy" )
main.step( "Send packets from each host to the first host and confirm onos discovery" )
import collections
if len( main.scapyHosts ) < 1:
main.log.error( "No scapy hosts have been created" )
main.skipCase()
# Send ARP packets from each scapy host component
main.intentFunction.sendDiscoveryArp( main, main.scapyHosts )
stepResult = utilities.retry( f=main.intentFunction.confirmHostDiscovery,
retValue=main.FALSE, args=[ main ],
attempts=main.checkTopoAttempts, sleep=2 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="ONOS correctly discovered all hosts",
onfail="ONOS incorrectly discovered hosts" )
main.step( "Populate hostsData" )
stepResult = main.intentFunction.populateHostData( main )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully populated hostsData",
onfail="Failed to populate hostsData" )
def CASE16( self, main ):
"""
Balance Masters
"""
main.case( "Balance mastership of switches" )
main.step( "Balancing mastership of switches" )
balanceResult = main.FALSE
balanceResult = utilities.retry( f=main.CLIs2[ 0 ].balanceMasters, retValue=main.FALSE, args=[] )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully balanced mastership of switches",
onfail="Failed to balance mastership of switches" )
def CASE17( self, main ):
"""
Stop mininet and remove scapy hosts
"""
main.log.report( "Stop Mininet and Scapy" )
main.case( "Stop Mininet and Scapy" )
main.caseExplanation = "Stopping the current mininet topology " +\
"to start up fresh"
main.step( "Stopping and Removing Scapy Host Components" )
scapyResult = main.TRUE
for host in main.scapyHosts:
scapyResult = scapyResult and host.stopScapy()
main.log.info( "Stopped Scapy Host: {0}".format( host.name ) )
for host in main.scapyHosts:
scapyResult = scapyResult and main.Scapy1.removeHostComponent( host.name )
main.log.info( "Removed Scapy Host Component: {0}".format( host.name ) )
main.scapyHosts = []
main.scapyHostIPs = []
utilities.assert_equals( expect=main.TRUE,
actual=scapyResult,
onpass="Successfully stopped scapy and removed host components",
onfail="Failed to stop mininet and scapy" )
main.step( "Stopping Mininet Topology" )
mininetResult = main.Mininet1.stopNet( )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully stop mininet",
onfail="Failed to stop mininet" )
# Exit if topology did not load properly
if not (mininetResult and scapyResult ):
main.cleanup()
main.exit()
def CASE1000( self, main ):
"""
Add host intents between 2 host:
- Discover hosts
- Add host intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
import time
import json
import re
# Assert variables - These variable's name|format must be followed
# if you want to use the wrapper function
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
# Save leader candidates
intentLeadersOld = main.CLIs2[ 0 ].leaderCandidates()
main.case( "Host Intents Test - " + str( main.numCtrls ) +
" NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case tests Host intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN " +\
"etc;\nThe test will use OF " + main.OFProtocol\
+ " OVS running in Mininet"
main.step( "IPV4: Add and test host intents between h1 and h9" )
main.assertReturnString = "Assertion result for IPV4 host intent with mac addresses\n"
host1 = { "name":"h1","id":"00:00:00:00:00:01/-1" }
host2 = { "name":"h9","id":"00:00:00:00:00:09/-1" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='IPV4',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='IPV4',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "DUALSTACK1: Add host intents between h3 and h11" )
main.assertReturnString = "Assertion Result for dualstack IPV4 with MAC addresses\n"
host1 = { "name":"h3", "id":"00:00:00:00:00:03/-1" }
host2 = { "name":"h11","id":"00:00:00:00:00:0B/-1"}
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='DUALSTACK1',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='DUALSTACK1',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString)
main.step( "DUALSTACK2: Add host intents between h1 and h11" )
main.assertReturnString = "Assertion Result for dualstack2 host intent\n"
host1 = { "name":"h1" }
host2 = { "name":"h11" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='DUALSTACK2',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='DUALSTACK2',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "1HOP: Add host intents between h1 and h3" )
main.assertReturnString = "Assertion Result for 1HOP for IPV4 same switch\n"
host1 = { "name":"h1" }
host2 = { "name":"h3" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='1HOP',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='1HOP',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "VLAN1: Add vlan host intents between h4 and h12" )
main.assertReturnString = "Assertion Result vlan IPV4\n"
host1 = { "name":"h4","id":"00:00:00:00:00:04/100" }
host2 = { "name":"h12","id":"00:00:00:00:00:0C/100" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='VLAN1',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='VLAN1',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "VLAN2: Add inter vlan host intents between h13 and h20" )
main.assertReturnString = "Assertion Result different VLAN negative test\n"
host1 = { "name":"h13" }
host2 = { "name":"h20" }
testResult = main.FALSE
installResult = main.intentFunction.installHostIntent( main,
name='VLAN2',
onosNode='0',
host1=host1,
host2=host2 )
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='VLAN2',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1='s5',
sw2='s2',
expectedLink = 18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
# Change the following to use the REST API when leader checking is
# supported by it
main.step( "Confirm that ONOS leadership is unchanged")
intentLeadersNew = main.CLIs2[ 0 ].leaderCandidates()
main.intentFunction.checkLeaderChange( intentLeadersOld,
intentLeadersNew )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass="ONOS Leaders Unchanged",
onfail="ONOS Leader Mismatch")
main.intentFunction.report( main )
def CASE2000( self, main ):
"""
Add point intents between 2 hosts:
- Get device ids | ports
- Add point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
import time
import json
import re
# Assert variables - These variable's name|format must be followed
# if you want to use the wrapper function
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Point Intents Test - " + str( main.numCtrls ) +
" NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test point to point" +\
" intents using " + str( main.numCtrls ) +\
" node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
# No option point intents
main.step( "NOOPTION: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for NOOPTION point intent\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1" }
]
testResult = main.FALSE
installResult = main.intentFunction.installPointIntent(
main,
name="NOOPTION",
senders=senders,
recipients=recipients )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="NOOPTION",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "IPV4: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for IPV4 point intent\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1","mac":"00:00:00:00:00:09" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="IPV4",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="IPV4",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "IPV4_2: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for IPV4 no mac address point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1" }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="IPV4_2",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="IPV4_2",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "SDNIP-ICMP: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for SDNIP-ICMP IPV4 using TCP point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01",
"ip":main.h1.hostIp }
]
recipients = [
{ "name":"h9","device":"of:0000000000000006/1","mac":"00:00:00:00:00:09",
"ip":main.h9.hostIp }
]
ipProto = main.params[ 'SDNIP' ][ 'icmpProto' ]
# Uneccessary, not including this in the selectors
tcpSrc = main.params[ 'SDNIP' ][ 'srcPort' ]
tcpDst = main.params[ 'SDNIP' ][ 'dstPort' ]
installResult = main.intentFunction.installPointIntent(
main,
name="SDNIP-ICMP",
senders=senders,
recipients=recipients,
ethType="IPV4",
ipProto=ipProto,
tcpSrc=tcpSrc,
tcpDst=tcpDst )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="SDNIP_ICMP",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "SDNIP-TCP: Add point intents between h1 and h9" )
main.assertReturnString = "Assertion Result for SDNIP-TCP IPV4 using ICMP point intents\n"
mac1 = main.hostsData[ 'h1' ][ 'mac' ]
mac2 = main.hostsData[ 'h9' ][ 'mac' ]
ip1 = str( main.hostsData[ 'h1' ][ 'ipAddresses' ][ 0 ] ) + "/32"
ip2 = str( main.hostsData[ 'h9' ][ 'ipAddresses' ][ 0 ] ) + "/32"
ipProto = main.params[ 'SDNIP' ][ 'tcpProto' ]
tcp1 = main.params[ 'SDNIP' ][ 'srcPort' ]
tcp2 = main.params[ 'SDNIP' ][ 'dstPort' ]
stepResult = main.intentFunction.pointIntentTcp(
main,
name="SDNIP-TCP",
host1="h1",
host2="h9",
deviceId1="of:0000000000000005/1",
deviceId2="of:0000000000000006/1",
mac1=mac1,
mac2=mac2,
ethType="IPV4",
ipProto=ipProto,
ip1=ip1,
ip2=ip2,
tcp1=tcp1,
tcp2=tcp2 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "DUALSTACK1: Add point intents between h3 and h11" )
main.assertReturnString = "Assertion Result for Dualstack1 IPV4 with mac address point intents\n"
senders = [
{ "name":"h3","device":"of:0000000000000005/3","mac":"00:00:00:00:00:03" }
]
recipients = [
{ "name":"h11","device":"of:0000000000000006/3","mac":"00:00:00:00:00:0B" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "VLAN: Add point intents between h5 and h21" )
main.assertReturnString = "Assertion Result for VLAN IPV4 with mac address point intents\n"
senders = [
{ "name":"h5","device":"of:0000000000000005/5","mac":"00:00:00:00:00:05" }
]
recipients = [
{ "name":"h21","device":"of:0000000000000007/5","mac":"00:00:00:00:00:15" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="DUALSTACK1",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.step( "1HOP: Add point intents between h1 and h3" )
main.assertReturnString = "Assertion Result for 1HOP IPV4 with no mac address point intents\n"
senders = [
{ "name":"h1","device":"of:0000000000000005/1","mac":"00:00:00:00:00:01" }
]
recipients = [
{ "name":"h3","device":"of:0000000000000005/3","mac":"00:00:00:00:00:03" }
]
installResult = main.intentFunction.installPointIntent(
main,
name="1HOP IPV4",
senders=senders,
recipients=recipients,
ethType="IPV4" )
if installResult:
testResult = main.intentFunction.testPointIntent(
main,
intentId=installResult,
name="1HOP IPV4",
senders=senders,
recipients=recipients,
sw1="s5",
sw2="s2",
expectedLink=18)
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.intentFunction.report( main )
def CASE3000( self, main ):
"""
Add single point to multi point intents
- Get device ids
- Add single point to multi point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Single To Multi Point Intents Test - " +
str( main.numCtrls ) + " NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test single point to" +\
" multi point intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
main.step( "NOOPTION: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
devices = [ 'of:0000000000000005/8', 'of:0000000000000006/8', \
'of:0000000000000007/8' ]
macs = [ '00:00:00:00:00:08', '00:00:00:00:00:10', '00:00:00:00:00:18' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="NOOPTION",
hostNames=hostNames,
devices=devices,
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="NOOPTION: Successfully added single "
+ " point to multi point intents" +
" with no match action",
onfail="NOOPTION: Failed to add single point"
+ " point to multi point intents" +
" with no match action" )
main.step( "IPV4: Add single point to multi point intents" )
stepResult = main.TRUE
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="IPV4",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and MAC addresses",
onfail="IPV4: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" )
main.step( "IPV4_2: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="IPV4",
hostNames=hostNames,
ethType="IPV4",
lambdaAlloc=False )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4_2: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and no MAC addresses",
onfail="IPV4_2: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and no MAC addresses" )
main.step( "VLAN: Add single point to multi point intents" )
stepResult = main.TRUE
hostNames = [ 'h4', 'h12', 'h20' ]
devices = [ 'of:0000000000000005/4', 'of:0000000000000006/4', \
'of:0000000000000007/4' ]
macs = [ '00:00:00:00:00:04', '00:00:00:00:00:0C', '00:00:00:00:00:14' ]
stepResult = main.intentFunction.singleToMultiIntent(
main,
name="VLAN",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="VLAN: Successfully added single "
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN",
onfail="VLAN: Failed to add single point"
+ " point to multi point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN")
def CASE4000( self, main ):
"""
Add multi point to single point intents
- Get device ids
- Add multi point to single point intents
- Check intents
- Verify flows
- Ping hosts
- Reroute
- Link down
- Verify flows
- Check topology
- Ping hosts
- Link up
- Verify flows
- Check topology
- Ping hosts
- Remove intents
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Multi To Single Point Intents Test - " +
str( main.numCtrls ) + " NODE(S) - OF " + main.OFProtocol )
main.caseExplanation = "This test case will test single point to" +\
" multi point intents using " +\
str( main.numCtrls ) + " node(s) cluster;\n" +\
"Different type of hosts will be tested in " +\
"each step such as IPV4, Dual stack, VLAN etc" +\
";\nThe test will use OF " + main.OFProtocol +\
" OVS running in Mininet"
main.step( "NOOPTION: Add multi point to single point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
devices = [ 'of:0000000000000005/8', 'of:0000000000000006/8', \
'of:0000000000000007/8' ]
macs = [ '00:00:00:00:00:08', '00:00:00:00:00:10', '00:00:00:00:00:18' ]
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="NOOPTION",
hostNames=hostNames,
devices=devices,
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="NOOPTION: Successfully added multi "
+ " point to single point intents" +
" with no match action",
onfail="NOOPTION: Failed to add multi point" +
" to single point intents" +
" with no match action" )
main.step( "IPV4: Add multi point to single point intents" )
stepResult = main.TRUE
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="IPV4",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4: Successfully added multi point"
+ " to single point intents" +
" with IPV4 type and MAC addresses",
onfail="IPV4: Failed to add multi point" +
" to single point intents" +
" with IPV4 type and MAC addresses" )
main.step( "IPV4_2: Add multi point to single point intents" )
stepResult = main.TRUE
hostNames = [ 'h8', 'h16', 'h24' ]
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="IPV4",
hostNames=hostNames,
ethType="IPV4",
lambdaAlloc=False )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="IPV4_2: Successfully added multi point"
+ " to single point intents" +
" with IPV4 type and no MAC addresses",
onfail="IPV4_2: Failed to add multi point" +
" to single point intents" +
" with IPV4 type and no MAC addresses" )
main.step( "VLAN: Add multi point to single point intents" )
stepResult = main.TRUE
hostNames = [ 'h5', 'h13', 'h21' ]
devices = [ 'of:0000000000000005/5', 'of:0000000000000006/5', \
'of:0000000000000007/5' ]
macs = [ '00:00:00:00:00:05', '00:00:00:00:00:0D', '00:00:00:00:00:15' ]
stepResult = main.intentFunction.multiToSingleIntent(
main,
name="VLAN",
hostNames=hostNames,
devices=devices,
ports=None,
ethType="IPV4",
macs=macs,
bandwidth="",
lambdaAlloc=False,
ipProto="",
ipAddresses="",
tcp="",
sw1="s5",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="VLAN: Successfully added multi point"
+ " to single point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN",
onfail="VLAN: Failed to add multi point" +
" to single point intents" )
def CASE5000( self, main ):
# """
# Will add description in next patch set
# """
# assert main, "There is no main"
# assert main.CLIs, "There is no main.CLIs"
# assert main.Mininet1, "Mininet handle should be named Mininet1"
# assert main.numSwitch, "Placed the total number of switch topology in \
# main.numSwitch"
# main.case( "Test host mobility with host intents " )
# main.step( " Testing host mobility by moving h1 from s5 to s6" )
# h1PreMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
# main.log.info( "Moving h1 from s5 to s6")
# main.Mininet1.moveHost( "h1","s5","s6" )
# main.intentFunction.getHostsData( main )
# h1PostMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
# utilities.assert_equals( expect="of:0000000000000006",
# actual=h1PostMove,
# onpass="Mobility: Successfully moved h1 to s6",
# onfail="Mobility: Failed to moved h1 to s6" +
# " to single point intents" +
# " with IPV4 type and MAC addresses" +
# " in the same VLAN" )
# main.step( "IPV4: Add host intents between h1 and h9" )
# stepResult = main.TRUE
# stepResult = main.intentFunction.hostIntent( main,
# onosNode='0',
# name='IPV4',
# host1='h1',
# host2='h9',
# host1Id='00:00:00:00:00:01/-1',
# host2Id='00:00:00:00:00:09/-1' )
# utilities.assert_equals( expect=main.TRUE,
# actual=stepResult,
# onpass="IPV4: Host intent test successful " +
# "between two IPV4 hosts",
# onfail="IPV4: Host intent test failed " +
# "between two IPV4 hosts")
"""
Tests Host Mobility
Modifies the topology location of h1
"""
assert main, "There is no main"
assert main.CLIs, "There is no main.CLIs"
assert main.Mininet1, "Mininet handle should be named Mininet1"
assert main.numSwitch, "Placed the total number of switch topology in \
main.numSwitch"
main.case( "Test host mobility with host intents " )
main.step( "Testing host mobility by moving h1 from s5 to s6" )
h1PreMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
main.log.info( "Moving h1 from s5 to s6")
main.Mininet1.moveHost( "h1","s5","s6" )
# Send discovery ping from moved host
# Moving the host brings down the default interfaces and creates a new one.
# Scapy is restarted on this host to detect the new interface
main.h1.stopScapy()
main.h1.startScapy()
# Discover new host location in ONOS and populate host data.
# Host 1 IP and MAC should be unchanged
main.intentFunction.sendDiscoveryArp( main, [ main.h1 ] )
main.intentFunction.populateHostData( main )
h1PostMove = main.hostsData[ "h1" ][ "location" ][ 0:19 ]
utilities.assert_equals( expect="of:0000000000000006",
actual=h1PostMove,
onpass="Mobility: Successfully moved h1 to s6",
onfail="Mobility: Failed to move h1 to s6" +
" to single point intents" +
" with IPV4 type and MAC addresses" +
" in the same VLAN" )
main.step( "IPV4: Add host intents between h1 and h9" )
main.assertReturnString = "Assert result for IPV4 host intent between h1, moved, and h9\n"
host1 = { "name":"h1","id":"00:00:00:00:00:01/-1" }
host2 = { "name":"h9","id":"00:00:00:00:00:09/-1" }
installResult = main.intentFunction.installHostIntent( main,
name='IPV4 Mobility IPV4',
onosNode='0',
host1=host1,
host2=host2)
if installResult:
testResult = main.intentFunction.testHostIntent( main,
name='Host Mobility IPV4',
intentId = installResult,
onosNode='0',
host1=host1,
host2=host2,
sw1="s6",
sw2="s2",
expectedLink=18 )
utilities.assert_equals( expect=main.TRUE,
actual=testResult,
onpass=main.assertReturnString,
onfail=main.assertReturnString )
main.intentFunction.report( main )
|
py | 1a32bcf9e733abe7a49cd10963bf802c836bbb5b | #! /usr/bin/env python
# -*- encoding: UTF-8 -*-
"""Example: Use getFootSteps Method"""
import qi
import argparse
import sys
import time
def main(session):
"""
This example uses the getFootSteps method.
"""
# Get the services ALMotion & ALRobotPosture.
motion_service = session.service("ALMotion")
posture_service = session.service("ALRobotPosture")
# Wake up robot
motion_service.wakeUp()
# Send NAO to Pose Init
posture_service.goToPosture("StandInit", 0.5)
#####################################
# A small example using getFootSteps
#####################################
# First call of move API
# Use _async=True argument to not be blocking here.
motion_service.moveTo(0.3, 0.0, 0.5, _async=True)
# wait that the move process start running
time.sleep(1.0)
# get the foot steps vector
footSteps = motion_service.getFootSteps()
# print the result
leftFootWorldPosition = footSteps[0][0]
print "leftFootWorldPosition:"
print leftFootWorldPosition
print ""
rightFootWorldPosition = footSteps[0][1]
print "rightFootWorldPosition:"
print rightFootWorldPosition
print ""
footStepsUnchangeable = footSteps[1]
print "Unchangeable:"
print footStepsUnchangeable
print ""
footStepsChangeable = footSteps[2]
print "Changeable:"
print footStepsChangeable
print ""
motion_service.waitUntilMoveIsFinished()
# Go to rest position
motion_service.rest()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session)
|
py | 1a32c030dc0b9c4e7069d638671045e0bad37f2f | import re
from parso.python import tree
from jedi._compatibility import zip_longest
from jedi import debug
from jedi.evaluate import analysis
from jedi.evaluate.lazy_context import LazyKnownContext, LazyKnownContexts, \
LazyTreeContext, get_merged_lazy_context
from jedi.evaluate.filters import ParamName
from jedi.evaluate.base_context import NO_CONTEXTS
from jedi.evaluate.context import iterable
from jedi.evaluate.param import get_executed_params, ExecutedParam
def try_iter_content(types, depth=0):
"""Helper method for static analysis."""
if depth > 10:
# It's possible that a loop has references on itself (especially with
# CompiledObject). Therefore don't loop infinitely.
return
for typ in types:
try:
f = typ.py__iter__
except AttributeError:
pass
else:
for lazy_context in f():
try_iter_content(lazy_context.infer(), depth + 1)
def repack_with_argument_clinic(string, keep_arguments_param=False):
"""
Transforms a function or method with arguments to the signature that is
given as an argument clinic notation.
Argument clinic is part of CPython and used for all the functions that are
implemented in C (Python 3.7):
str.split.__text_signature__
# Results in: '($self, /, sep=None, maxsplit=-1)'
"""
clinic_args = list(_parse_argument_clinic(string))
def decorator(func):
def wrapper(*args, **kwargs):
if keep_arguments_param:
arguments = kwargs['arguments']
else:
arguments = kwargs.pop('arguments')
try:
args += tuple(_iterate_argument_clinic(arguments, clinic_args))
except ValueError:
return NO_CONTEXTS
else:
return func(*args, **kwargs)
return wrapper
return decorator
def _iterate_argument_clinic(arguments, parameters):
"""Uses a list with argument clinic information (see PEP 436)."""
iterator = arguments.unpack()
for i, (name, optional, allow_kwargs) in enumerate(parameters):
key, argument = next(iterator, (None, None))
if key is not None:
debug.warning('Keyword arguments in argument clinic are currently not supported.')
raise ValueError
if argument is None and not optional:
debug.warning('TypeError: %s expected at least %s arguments, got %s',
name, len(parameters), i)
raise ValueError
context_set = NO_CONTEXTS if argument is None else argument.infer()
if not context_set and not optional:
# For the stdlib we always want values. If we don't get them,
# that's ok, maybe something is too hard to resolve, however,
# we will not proceed with the evaluation of that function.
debug.warning('argument_clinic "%s" not resolvable.', name)
raise ValueError
yield context_set
def _parse_argument_clinic(string):
allow_kwargs = False
optional = False
while string:
# Optional arguments have to begin with a bracket. And should always be
# at the end of the arguments. This is therefore not a proper argument
# clinic implementation. `range()` for exmple allows an optional start
# value at the beginning.
match = re.match('(?:(?:(\[),? ?|, ?|)(\w+)|, ?/)\]*', string)
string = string[len(match.group(0)):]
if not match.group(2): # A slash -> allow named arguments
allow_kwargs = True
continue
optional = optional or bool(match.group(1))
word = match.group(2)
yield (word, optional, allow_kwargs)
class AbstractArguments(object):
context = None
argument_node = None
trailer = None
def eval_all(self, funcdef=None):
"""
Evaluates all arguments as a support for static analysis
(normally Jedi).
"""
for key, lazy_context in self.unpack():
types = lazy_context.infer()
try_iter_content(types)
def get_calling_nodes(self):
return []
def unpack(self, funcdef=None):
raise NotImplementedError
def get_executed_params(self, execution_context):
return get_executed_params(execution_context, self)
class AnonymousArguments(AbstractArguments):
def get_executed_params(self, execution_context):
from jedi.evaluate.dynamic import search_params
return search_params(
execution_context.evaluator,
execution_context,
execution_context.tree_node
)
def __repr__(self):
return '%s()' % self.__class__.__name__
class TreeArguments(AbstractArguments):
def __init__(self, evaluator, context, argument_node, trailer=None):
"""
The argument_node is either a parser node or a list of evaluated
objects. Those evaluated objects may be lists of evaluated objects
themselves (one list for the first argument, one for the second, etc).
:param argument_node: May be an argument_node or a list of nodes.
"""
self.argument_node = argument_node
self.context = context
self._evaluator = evaluator
self.trailer = trailer # Can be None, e.g. in a class definition.
def _split(self):
if self.argument_node is None:
return
# Allow testlist here as well for Python2's class inheritance
# definitions.
if not (self.argument_node.type in ('arglist', 'testlist') or (
# in python 3.5 **arg is an argument, not arglist
(self.argument_node.type == 'argument') and
self.argument_node.children[0] in ('*', '**'))):
yield 0, self.argument_node
return
iterator = iter(self.argument_node.children)
for child in iterator:
if child == ',':
continue
elif child in ('*', '**'):
yield len(child.value), next(iterator)
elif child.type == 'argument' and \
child.children[0] in ('*', '**'):
assert len(child.children) == 2
yield len(child.children[0].value), child.children[1]
else:
yield 0, child
def unpack(self, funcdef=None):
named_args = []
for star_count, el in self._split():
if star_count == 1:
arrays = self.context.eval_node(el)
iterators = [_iterate_star_args(self.context, a, el, funcdef)
for a in arrays]
for values in list(zip_longest(*iterators)):
# TODO zip_longest yields None, that means this would raise
# an exception?
yield None, get_merged_lazy_context(
[v for v in values if v is not None]
)
elif star_count == 2:
arrays = self.context.eval_node(el)
for dct in arrays:
for key, values in _star_star_dict(self.context, dct, el, funcdef):
yield key, values
else:
if el.type == 'argument':
c = el.children
if len(c) == 3: # Keyword argument.
named_args.append((c[0].value, LazyTreeContext(self.context, c[2]),))
else: # Generator comprehension.
# Include the brackets with the parent.
comp = iterable.GeneratorComprehension(
self._evaluator, self.context, self.argument_node.parent)
yield None, LazyKnownContext(comp)
else:
yield None, LazyTreeContext(self.context, el)
# Reordering var_args is necessary, because star args sometimes appear
# after named argument, but in the actual order it's prepended.
for named_arg in named_args:
yield named_arg
def as_tree_tuple_objects(self):
for star_count, argument in self._split():
if argument.type == 'argument':
argument, default = argument.children[::2]
else:
default = None
yield argument, default, star_count
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self.argument_node)
def get_calling_nodes(self):
from jedi.evaluate.dynamic import DynamicExecutedParams
old_arguments_list = []
arguments = self
while arguments not in old_arguments_list:
if not isinstance(arguments, TreeArguments):
break
old_arguments_list.append(arguments)
for name, default, star_count in reversed(list(arguments.as_tree_tuple_objects())):
if not star_count or not isinstance(name, tree.Name):
continue
names = self._evaluator.goto(arguments.context, name)
if len(names) != 1:
break
if not isinstance(names[0], ParamName):
break
param = names[0].get_param()
if isinstance(param, DynamicExecutedParams):
# For dynamic searches we don't even want to see errors.
return []
if not isinstance(param, ExecutedParam):
break
if param.var_args is None:
break
arguments = param.var_args
break
if arguments.argument_node is not None:
return [arguments.argument_node]
if arguments.trailer is not None:
return [arguments.trailer]
return []
class ValuesArguments(AbstractArguments):
def __init__(self, values_list):
self._values_list = values_list
def unpack(self, funcdef=None):
for values in self._values_list:
yield None, LazyKnownContexts(values)
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._values_list)
def _iterate_star_args(context, array, input_node, funcdef=None):
try:
iter_ = array.py__iter__
except AttributeError:
if funcdef is not None:
# TODO this funcdef should not be needed.
m = "TypeError: %s() argument after * must be a sequence, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star', input_node, message=m)
else:
for lazy_context in iter_():
yield lazy_context
def _star_star_dict(context, array, input_node, funcdef):
from jedi.evaluate.context.instance import CompiledInstance
if isinstance(array, CompiledInstance) and array.name.string_name == 'dict':
# For now ignore this case. In the future add proper iterators and just
# make one call without crazy isinstance checks.
return {}
elif isinstance(array, iterable.Sequence) and array.array_type == 'dict':
return array.exact_key_items()
else:
if funcdef is not None:
m = "TypeError: %s argument after ** must be a mapping, not %s" \
% (funcdef.name.value, array)
analysis.add(context, 'type-error-star-star', input_node, message=m)
return {}
|
py | 1a32c057399a84dedcf6a5b350eeeec81371a657 | import json
class PEXEL:
def __init__(self, annotations_file):
print("Loading captions from pexels dataset ...")
self.annotations_file = annotations_file
self.dataset = dict()
self.anns = dict()
if not annotations_file == None:
self.dataset = json.load(open(annotations_file, 'r'))
self.createIndex()
def createIndex(self):
anns = {}
for entry in self.dataset:
anns[int(entry['_id'])] = entry['annotation']
self.anns = anns
print('pexels: loaded {} captions'.format(len(anns)))
def getImgPath(self, id):
return 'img_{}.jpg'.format(id)
|
py | 1a32c12c7689394022da8a298a27e92eb79e5ac7 | # Copyright 2015 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import expect
import os.path
from glslc_test_framework import inside_glslc_testsuite
from placeholder import FileShader, TempFileName
@inside_glslc_testsuite('OptionDashO')
class TestOptionDashOConcatenatedArg(expect.SuccessfulReturn,
expect.CorrectObjectFilePreamble):
"""Tests that we can concatenate -o and the output filename."""
shader = FileShader('#version 140\nvoid main() {}', '.vert')
glslc_args = ['-ofoo', shader]
def check_output_foo(self, status):
output_name = os.path.join(status.directory, 'foo')
return self.verify_object_file_preamble(output_name)
@inside_glslc_testsuite('OptionDashO')
class ManyOutputFilesWithDashO(expect.ErrorMessage):
"""Tests -o and -c with several files generates an error."""
shader1 = FileShader('', '.vert')
shader2 = FileShader('', '.frag')
glslc_args = ['-o', 'foo', '-c', shader1, shader2]
expected_error = [
'glslc: error: cannot specify -o when '
'generating multiple output files\n']
@inside_glslc_testsuite('OptionDashO')
class OutputFileLocation(expect.SuccessfulReturn,
expect.CorrectObjectFilePreamble):
"""Tests that the -o flag puts a file in a new location."""
shader = FileShader('#version 310 es\nvoid main() {}', '.frag')
glslc_args = [shader, '-o', TempFileName('a.out')]
def check_output_a_out(self, status):
output_name = os.path.join(status.directory, 'a.out')
return self.verify_object_file_preamble(output_name)
@inside_glslc_testsuite('OptionDashO')
class DashOMissingArgumentIsAnError(expect.ErrorMessage):
"""Tests that -o without an argument is an error."""
glslc_args = ['-o']
expected_error = ['glslc: error: argument to \'-o\' is missing ' +
'(expected 1 value)\n']
|
py | 1a32c166af9e57e7fe68c3c39230344b9dfa131e | cmd.do('list = setting.get_name_list();[print("%s => %s" % (name, setting.get_setting_text(name))) for name in list];')
|
py | 1a32c1974d6b0b324cc34912e841502bc76bfbbd | # Generated by Django 2.0.3 on 2018-06-03 04:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('questionnaire', '0015_auto_20180602_2026'),
]
operations = [
migrations.RenameField(
model_name='questionnaireblockgroupmembershowcondition',
old_name='need_to_be_member',
new_name='group',
),
migrations.RenameField(
model_name='questionnaireblockvariantcheckedshowcondition',
old_name='need_to_be_checked',
new_name='variant',
),
]
|
py | 1a32c24c2207027c751a4f49641d6e408b4e788e | from .engine import TradingEngine # noqa: F401
|
py | 1a32c2606880d97edf6d14de0d558f02fdc16751 | import os
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from app import app, server
style = {'maxWidth': '960px', 'margin': 'auto'}
app.layout = html.Div([
dcc.Tabs(id='tabs', value='tab-intro', children=[
dcc.Tab(label='Intro', value='tab-intro'),
dcc.Tab(label='Examples', value='tab-examples'),
dcc.Tab(label='Map', value='tab-map')
]),
html.Div(id='tabs-content')
], style=style)
from tabs import intro, map, examples
@app.callback(Output('tabs-content', 'children'),
[Input('tabs', 'value')])
def render_content(tab):
if tab == 'tab-intro': return intro.layout
elif tab == 'tab-examples': return examples.layout
elif tab == 'tab-map': return map.layout
if __name__ == '__main__':
app.run_server(debug=True) |
py | 1a32c27e1ee728816d74e0edc97ea6801e4a18eb |
from datetime import datetime
from typing import Dict, Optional
from datetime import datetime
from dateutil.tz import tzutc
from ..util import Estimator
from ..const import SOURCE_GOOGLE_DRIVE, SOURCE_HA
from ..logger import getLogger
logger = getLogger(__name__)
PROP_KEY_SLUG = "snapshot_slug"
PROP_KEY_DATE = "snapshot_date"
PROP_KEY_NAME = "snapshot_name"
PROP_TYPE = "type"
PROP_VERSION = "version"
PROP_PROTECTED = "protected"
PROP_RETAINED = "retained"
DRIVE_KEY_TEXT = "Google Drive's snapshot metadata"
HA_KEY_TEXT = "Home Assistant's snapshot metadata"
class AbstractSnapshot():
def __init__(self, name: str, slug: str, source: str, date: str, size: int, version: str, snapshotType: str, protected: bool, retained: bool = False, uploadable: bool = False, details={}):
self._options = None
self._name = name
self._slug = slug
self._source = source
self._date = date
self._size = size
self._retained = retained
self._uploadable = uploadable
self._details = details
self._version = version
self._snapshotType = snapshotType
self._protected = protected
def setOptions(self, options):
self._options = options
def getOptions(self):
return self._options
def name(self) -> str:
return self._name
def slug(self) -> str:
return self._slug
def size(self) -> int:
return self._size
def sizeInt(self) -> int:
try:
return int(self.size())
except ValueError:
return 0
def date(self) -> datetime:
return self._date
def source(self) -> str:
return self._source
def retained(self) -> str:
return self._retained
def version(self):
return self._version
def snapshotType(self):
return self._snapshotType
def protected(self):
return self._protected
def setRetained(self, retained):
self._retained = retained
def uploadable(self) -> bool:
return self._uploadable
def considerForPurge(self) -> bool:
return not self.retained()
def setUploadable(self, uploadable):
self._uploadable = uploadable
def details(self):
return self._details
def status(self):
return None
class Snapshot(object):
"""
Represents a Home Assistant snapshot stored on Google Drive, locally in
Home Assistant, or a pending snapshot we expect to see show up later
"""
def __init__(self, snapshot: Optional[AbstractSnapshot] = None):
self.sources: Dict[str, AbstractSnapshot] = {}
self._purgeNext: Dict[str, bool] = {}
self._options = None
self._status_override = None
self._status_override_args = None
if snapshot is not None:
self.addSource(snapshot)
def setOptions(self, options):
self._options = options
def getOptions(self):
return self._options
def updatePurge(self, source: str, purge: bool):
self._purgeNext[source] = purge
def addSource(self, snapshot: AbstractSnapshot):
self.sources[snapshot.source()] = snapshot
if snapshot.getOptions() and not self.getOptions():
self.setOptions(snapshot.getOptions())
def removeSource(self, source):
if source in self.sources:
del self.sources[source]
if source in self._purgeNext:
del self._purgeNext[source]
def getPurges(self):
return self._purgeNext
def getSource(self, source: str):
return self.sources.get(source, None)
def name(self):
for snapshot in self.sources.values():
return snapshot.name()
return "error"
def slug(self) -> str:
for snapshot in self.sources.values():
return snapshot.slug()
return "error"
def size(self) -> int:
for snapshot in self.sources.values():
return snapshot.size()
return 0
def sizeInt(self) -> int:
for snapshot in self.sources.values():
return snapshot.sizeInt()
return 0
def snapshotType(self) -> str:
for snapshot in self.sources.values():
return snapshot.snapshotType()
return "error"
def version(self) -> str:
for snapshot in self.sources.values():
return snapshot.snapshotType()
return "?"
def details(self):
for snapshot in self.sources.values():
return snapshot.details()
return "?"
def protected(self) -> bool:
for snapshot in self.sources.values():
return snapshot.protected()
return False
def date(self) -> datetime:
for snapshot in self.sources.values():
return snapshot.date()
return datetime.now(tzutc())
def sizeString(self) -> str:
size_string = self.size()
if type(size_string) == str:
return size_string
return Estimator.asSizeString(size_string)
def status(self) -> str:
if self._status_override is not None:
return self._status_override.format(*self._status_override_args)
for snapshot in self.sources.values():
status = snapshot.status()
if status:
return status
inDrive = self.getSource(SOURCE_GOOGLE_DRIVE) is not None
inHa = self.getSource(SOURCE_HA) is not None
if inDrive and inHa:
return "Backed Up"
if inDrive:
return "Drive Only"
if inHa:
return "HA Only"
return "Deleted"
def isDeleted(self) -> bool:
return len(self.sources) == 0
def overrideStatus(self, format, *args) -> None:
self._status_override = format
self._status_override_args = args
def clearStatus(self):
self._status_override = None
self._status_override_args = None
def __str__(self) -> str:
return "<Slug: {0} {1} {2}>".format(self.slug(), " ".join(self.sources), self.date().isoformat())
def __format__(self, format_spec: str) -> str:
return self.__str__()
def __repr__(self) -> str:
return self.__str__()
|
py | 1a32c2e78670cf2750c4ac9ea861f4e177e34d9c | #!/usr/bin/env python
#
# Copyright 2007 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.appengine.runtime.context."""
import os
from unittest import mock
from google.appengine.runtime import context
from google.appengine.runtime.context import ctx_test_util
from absl.testing import absltest
@ctx_test_util.isolated_context
class ContextTest(absltest.TestCase):
def setUp(self):
super().setUp()
orig_val = context.READ_FROM_OS_ENVIRON
def restore():
context.READ_FROM_OS_ENVIRON = orig_val
self.addCleanup(restore)
def testBooleanConversionOnWrite(self):
context.init_from_wsgi_environ({
'HTTP_X_APPENGINE_USER_IS_ADMIN': '1',
})
self.assertEqual(context.gae_headers.USER_IS_ADMIN.get(), True)
def testBooleanConversionOnRead(self):
context.READ_FROM_OS_ENVIRON = False
context.gae_headers.USER_IS_ADMIN.set(True)
self.assertEqual(context.get('USER_IS_ADMIN'), '1')
@mock.patch.dict(os.environ)
@mock.patch.object(context, 'READ_FROM_OS_ENVIRON')
def testReadFrom(self, mock_read_from_os_environ):
del mock_read_from_os_environ
context.gae_headers.USER_ID.set('value in context')
os.environ['USER_ID'] = 'value in os.environ'
with self.subTest('contextvars'):
context.READ_FROM_OS_ENVIRON = False
self.assertEqual(context.get('USER_ID'), 'value in context')
with self.subTest('os.environ'):
context.READ_FROM_OS_ENVIRON = True
self.assertEqual(context.get('USER_ID'), 'value in os.environ')
if __name__ == '__main__':
absltest.main()
|
py | 1a32c31639824cc735976337b519959149c52ceb | """
Copyright: Wenyi Tang 2017-2018
Author: Wenyi Tang
Email: [email protected]
Created Date: June 8th 2018
Updated Date: June 8th 2018
Image Super-Resolution via Deep Recursive Residual Network (CVPR 2017)
See http://cvlab.cse.msu.edu/pdfs/Tai_Yang_Liu_CVPR2017.pdf
"""
from ..Framework.SuperResolution import SuperResolution
from ..Util.Utility import *
class DRRN(SuperResolution):
"""Image Super-Resolution via Deep Recursive Residual Network
Args:
residual_unit: number of residual blocks in one recursion
recursive_block: number of recursions
grad_clip: gradient clip ratio according to the paper
custom_upsample: use --add_custom_callbacks=upsample during fitting, or
use `bicubic_rescale`. TODO: REMOVE IN FUTURE.
"""
def __init__(self, residual_unit=3, recursive_block=3,
custom_upsample=False,
grad_clip=0.01, name='drrn', **kwargs):
self.ru = residual_unit
self.rb = recursive_block
self.grad_clip = grad_clip
self.do_up = not custom_upsample
self.name = name
super(DRRN, self).__init__(**kwargs)
def display(self):
super(DRRN, self).display()
tf.logging.info('Recursive Blocks: %d' % self.rb)
tf.logging.info('Residual Units: %d' % self.ru)
def _shared_resblock(self, inputs, **kwargs):
x = self.relu_conv2d(inputs, 128, 3)
for _ in range(self.ru):
x = self.resblock(x, 128, 3, reuse=tf.AUTO_REUSE, name='Res')
return x
def build_graph(self):
super(DRRN, self).build_graph()
with tf.variable_scope(self.name):
x = self.inputs_preproc[-1]
if self.do_up:
x = bicubic_rescale(self.inputs_preproc[-1], self.scale)
bic = x
for _ in range(self.rb):
x = self._shared_resblock(x)
x = self.conv2d(x, self.channel, 3)
self.outputs.append(x + bic)
def build_loss(self):
with tf.name_scope('loss'):
y_true = self.label[-1]
y_pred = self.outputs[-1]
mse = tf.losses.mean_squared_error(y_true, y_pred)
loss = tf.add_n([mse] + tf.losses.get_regularization_losses())
opt = tf.train.AdamOptimizer(self.learning_rate)
if self.grad_clip > 0:
grads_and_vars = []
for grad, var in opt.compute_gradients(loss):
grads_and_vars.append((
tf.clip_by_value(
grad,
-self.grad_clip / self.learning_rate,
self.grad_clip / self.learning_rate),
var))
op = opt.apply_gradients(grads_and_vars, self.global_steps)
else:
op = opt.minimize(loss, self.global_steps)
self.loss.append(op)
self.train_metric['loss'] = loss
self.metrics['mse'] = mse
self.metrics['psnr'] = tf.reduce_mean(
tf.image.psnr(y_true, y_pred, 255))
self.metrics['ssim'] = tf.reduce_mean(
tf.image.ssim(y_true, y_pred, 255))
def build_saver(self):
self.savers[self.name] = tf.train.Saver(tf.global_variables(self.name),
max_to_keep=1)
|
py | 1a32c324dad9862fb504757442989e480fc9c2f2 | from turtle import Turtle, Screen
turtle = Turtle()
screen = Screen()
for a in range(4):
turtle.fd(100)
turtle.right(90)
screen.exitonclick() |
py | 1a32c339f16e335f67a778abac8be68559694e03 | """
Modules to test data gathering. This is quite complex testing as it requires
mock projects, sites, evals metadata and evals data to appropriately test
everything. Recall that evals data is simply spectra data reduced to the
evaluation frequencies. It is, however, separated into its own data folder to
avoid in potential future ambiguity in which data is being fetched.
The test works towards gathering data for three sites
- site1: measurements meas1, meas2, meas3
- site2: measurements run1, run2
- site3: measurements data1
The intention is to setup intersite processing with a remote reference. Each
site will have two channels, namely,
- site1: Ex, Ey (output site)
- site2: Hx, Hy (input site)
- site3: Hx, Hy (remote/cross site)
There are only two frequencies per decimation level, but multiple windows
"""
from typing import Dict
from pathlib import Path
import numpy as np
import pandas as pd
import pytest
from resistics.errors import ChannelNotFoundError
from resistics.project import ProjectMetadata, Project
from resistics.project import get_meas_evals_path
from resistics.decimate import DecimationSetup
from resistics.spectra import SpectraData, SpectraDataReader, SpectraMetadata
import resistics.gather as gather
from resistics.transfunc import TransferFunction, ImpedanceTensor
from testing_data_evals import get_evals_metadata_site1, get_evals_data_site1
from testing_data_evals import get_evals_metadata_site2, get_evals_data_site2
from testing_data_evals import get_evals_metadata_site3, get_evals_data_site3
from testing_data_evals import SITE1_COMBINED_DATA, SITE2_COMBINED_DATA
from testing_data_evals import SITE3_COMBINED_DATA
from testing_data_evals import SITE2_RUN2_QUICK_OUT, SITE2_RUN2_QUICK_IN
from testing_data_evals import SITE2_RUN2_QUICK_CROSS
TEST_PROJECT_PATH = Path(".")
TEST_CONFIG_NAME = "test"
def get_evals_metadata(site_name: str, meas_name: str) -> SpectraMetadata:
"""Get example evals metadata for testing"""
if site_name == "site1":
return get_evals_metadata_site1(meas_name)
if site_name == "site2":
return get_evals_metadata_site2(meas_name)
if site_name == "site3":
return get_evals_metadata_site3(meas_name)
raise ValueError(f"Site {site_name} not known")
def get_evals_metadata_by_path(spectra_path: Path):
"""Get example evals metadata for testing"""
# site 1
for meas_name in ["meas1", "meas2", "meas3"]:
if spectra_path == get_meas_evals_path(
TEST_PROJECT_PATH, "site1", meas_name, TEST_CONFIG_NAME
):
return get_evals_metadata("site1", meas_name)
# site 2
for meas_name in ["run1", "run2"]:
if spectra_path == get_meas_evals_path(
TEST_PROJECT_PATH, "site2", meas_name, TEST_CONFIG_NAME
):
return get_evals_metadata("site2", meas_name)
# site 3
for meas_name in ["data1"]:
if spectra_path == get_meas_evals_path(
TEST_PROJECT_PATH, "site3", meas_name, TEST_CONFIG_NAME
):
return get_evals_metadata("site3", meas_name)
raise ValueError("Spectra path not as expected")
def get_evals_data(site_name: str, meas_name: str) -> SpectraData:
"""Get example evals data for testing"""
if site_name == "site1":
return get_evals_data_site1(meas_name)
if site_name == "site2":
return get_evals_data_site2(meas_name)
if site_name == "site3":
return get_evals_data_site3(meas_name)
raise ValueError(f"Site {site_name} not known")
def get_evals_data_by_path(evals_path: Path):
"""Get example evals data for testing"""
# site 1
for meas_name in ["meas1", "meas2", "meas3"]:
if evals_path == get_meas_evals_path(
TEST_PROJECT_PATH, "site1", meas_name, TEST_CONFIG_NAME
):
return get_evals_data("site1", meas_name)
# site 2
for meas_name in ["run1", "run2"]:
if evals_path == get_meas_evals_path(
TEST_PROJECT_PATH, "site2", meas_name, TEST_CONFIG_NAME
):
return get_evals_data("site2", meas_name)
# site 3
for meas_name in ["data1"]:
if evals_path == get_meas_evals_path(
TEST_PROJECT_PATH, "site3", meas_name, TEST_CONFIG_NAME
):
return get_evals_data("site3", meas_name)
raise ValueError("Spectra path not as expected")
def get_test_project(project_path) -> Project:
"""Get a testing project"""
metadata = ProjectMetadata(ref_time="2021-01-01 00:00:00")
dir_path = project_path
begin_time = "2021-01-01 01:00:00"
end_time = "2021-01-01 05:00:00"
proj = Project(
dir_path=dir_path, begin_time=begin_time, end_time=end_time, metadata=metadata
)
return proj
class MockMeas:
def __init__(self, name: str):
"""Initialise with the measurement name"""
self.name = name
class MockSite:
def __init__(self, name: str):
"""Initialise"""
self.name = name
def __getitem__(self, meas_name: str) -> Dict[str, str]:
"""Get a mock measurement"""
return MockMeas(meas_name)
def get_measurements(self, fs: float):
"""Get the measurements for the site"""
if self.name == "site1":
return ["meas1", "meas2", "meas3"]
if self.name == "site2":
return ["run1", "run2"]
if self.name == "site3":
return ["data1"]
@pytest.fixture
def mock_project_site(monkeypatch):
"""Mock getting of site from project"""
def mock_project_get_site(*args, **kwargs):
"""Mock for getting a site"""
site_name = args[1]
return MockSite(site_name)
monkeypatch.setattr(Project, "get_site", mock_project_get_site)
@pytest.fixture
def mock_spec_reader_metadata_only(monkeypatch):
"""Mock fixture for reading spectra metadata"""
def mock_spectra_data_reader_run(*args, **kwargs):
"""Mock for reading spectra metadata"""
evals_path = args[1]
if "metadata_only" in kwargs:
return get_evals_metadata_by_path(evals_path)
else:
return get_evals_data_by_path(evals_path)
monkeypatch.setattr(SpectraDataReader, "run", mock_spectra_data_reader_run)
def get_selection():
"""Get a selection as it is used in multiple places"""
proj = get_test_project(TEST_PROJECT_PATH)
site_names = ["site1", "site2", "site3"]
dec_params = DecimationSetup(n_levels=4, per_level=2).run(128)
selection = gather.Selector().run(TEST_CONFIG_NAME, proj, site_names, dec_params)
return selection
def test_get_site_evals_metadata(mock_project_site, mock_spec_reader_metadata_only):
"""Test gathering of spectra metadata"""
proj = get_test_project(TEST_PROJECT_PATH)
meas_metadata = gather.get_site_evals_metadata(TEST_CONFIG_NAME, proj, "site1", 128)
assert len(meas_metadata) == 3
for meas_name, metadata in meas_metadata.items():
assert get_evals_metadata("site1", meas_name) == metadata
def test_get_site_level_wins():
"""Test getting site level windows for decimation level 0"""
meas_metadata = {}
for meas_name in ["meas1", "meas2", "meas3"]:
meas_metadata[meas_name] = get_evals_metadata("site1", meas_name)
table = gather.get_site_level_wins(meas_metadata, 0)
# fmt:off
index = [4, 5, 6, 7, 8, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 41, 42, 43, 44, 45, 46, 47]
data = ["meas1", "meas1", "meas1", "meas1", "meas1"]
data += ["meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2"]
data += ["meas3", "meas3", "meas3", "meas3", "meas3", "meas3", "meas3"]
# fmt:on
pd.testing.assert_series_equal(table, pd.Series(data=data, index=index))
def test_get_site_wins(mock_project_site, mock_spec_reader_metadata_only):
"""Test getting site windows for all decimation levels"""
proj = get_test_project(TEST_PROJECT_PATH)
tables = gather.get_site_wins(TEST_CONFIG_NAME, proj, "site1", 128)
# level 0
# fmt:off
index = [4, 5, 6, 7, 8, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 41, 42, 43, 44, 45, 46, 47]
data = ["meas1", "meas1", "meas1", "meas1", "meas1"]
data += ["meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2"]
data += ["meas3", "meas3", "meas3", "meas3", "meas3", "meas3", "meas3"]
# fmt:on
pd.testing.assert_series_equal(tables[0], pd.Series(data=data, index=index))
# level 1
# fmt:off
index = [2, 3, 4, 5, 10, 11, 12, 13, 14, 15, 16, 17, 18, 38, 39, 40, 41]
data = ["meas1", "meas1", "meas1", "meas1"]
data += ["meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2"]
data += ["meas3", "meas3", "meas3", "meas3"]
# fmt:on
pd.testing.assert_series_equal(tables[1], pd.Series(data=data, index=index))
# level 2
# fmt:off
index = [1, 2, 3, 8, 9, 10, 11, 12, 13, 14, 35, 36]
data = ["meas1", "meas1", "meas1"]
data += ["meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2"]
data += ["meas3", "meas3"]
# fmt:on
pd.testing.assert_series_equal(tables[2], pd.Series(data=data, index=index))
def test_selector(mock_project_site, mock_spec_reader_metadata_only):
"""Test the Selector"""
selection = get_selection()
selector_site_names = [x.name for x in selection.sites]
expected_site_names = ["site1", "site2", "site3"]
assert set(expected_site_names) == set(selector_site_names)
assert selection.n_levels == 2
assert selection.get_n_evals() == 4
assert selection.get_n_wins(0, 0) == 15
assert selection.get_n_wins(0, 1) == 15
assert selection.get_n_wins(1, 0) == 5
assert selection.get_n_wins(1, 1) == 5
assert selection.get_measurements(MockSite("site1")) == ["meas1", "meas2"]
assert selection.get_measurements(MockSite("site2")) == ["run1", "run2"]
assert selection.get_measurements(MockSite("site3")) == ["data1"]
# level 0
index = [4, 5, 6, 7, 8, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]
data = {}
# fmt:off
data["site1"] = ["meas1", "meas1", "meas1", "meas1", "meas1", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2", "meas2"]
data["site2"] = ["run1", "run1", "run1", "run1", "run1", "run2", "run2", "run2", "run2", "run2", "run2", "run2", "run2", "run2", "run2"]
data["site3"] = ["data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1", "data1"]
data[0] = [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]
data[1] = [True, True, True, True, True, True, True, True, True, True, True, True, True, True, True]
# fmt:on
pd.testing.assert_frame_equal(
pd.DataFrame(data=data, index=index), selection.tables[0]
)
eval_wins = pd.DataFrame(data=data, index=index).drop([0, 1], axis=1)
pd.testing.assert_frame_equal(eval_wins, selection.get_eval_wins(0, 0))
pd.testing.assert_frame_equal(eval_wins, selection.get_eval_wins(0, 1))
# level 1
index = [3, 10, 11, 12, 13]
data = {}
data["site1"] = ["meas1", "meas2", "meas2", "meas2", "meas2"]
data["site2"] = ["run1", "run2", "run2", "run2", "run2"]
data["site3"] = ["data1", "data1", "data1", "data1", "data1"]
data[0] = [True, True, True, True, True]
data[1] = [True, True, True, True, True]
pd.testing.assert_frame_equal(
pd.DataFrame(data=data, index=index), selection.tables[1]
)
eval_wins = pd.DataFrame(data=data, index=index).drop([0, 1], axis=1)
pd.testing.assert_frame_equal(eval_wins, selection.get_eval_wins(1, 0))
pd.testing.assert_frame_equal(eval_wins, selection.get_eval_wins(1, 1))
def test_projectgather_get_empty_data(
mock_project_site, mock_spec_reader_metadata_only
):
"""Test creating empty data"""
selection = get_selection()
chans = ["Hx", "Hy"]
n_chans = len(chans)
# now test gatherer._get_empty_data
gatherer = gather.ProjectGather()
empty_data = gatherer._get_empty_data(selection, chans)
assert len(empty_data) == 4
assert sorted(list(empty_data.keys())) == [0, 1, 2, 3]
# check size of arrays
for eval_idx, data in empty_data.items():
# two decimation levels
level = eval_idx // 2
eval_level_idx = eval_idx - level * 2
n_wins = selection.get_n_wins(level, eval_level_idx)
assert data.shape == (n_wins, n_chans)
assert data.dtype == np.complex128
def test_projectgather_get_indices_site1_meas1(
mock_project_site, mock_spec_reader_metadata_only
):
"""Test project gathering of data"""
# get required data
site = MockSite("site1")
meas_name = "meas1"
metadata = get_evals_metadata(site.name, meas_name)
selection = get_selection()
# now test gatherer._get_indices
gatherer = gather.ProjectGather()
# site 1, meas1, level 0
eval_wins = selection.get_eval_wins(0, 0)
level_metadata = metadata.levels_metadata[0]
spectra_indices, combined_indices = gatherer._get_indices(
eval_wins, site, meas_name, level_metadata
)
np.testing.assert_array_equal(spectra_indices, np.array([0, 1, 2, 3, 4]))
np.testing.assert_array_equal(combined_indices, np.array([0, 1, 2, 3, 4]))
# site 1, meas1, level 1
eval_wins = selection.get_eval_wins(1, 0)
level_metadata = metadata.levels_metadata[1]
spectra_indices, combined_indices = gatherer._get_indices(
eval_wins, site, meas_name, level_metadata
)
np.testing.assert_array_equal(spectra_indices, np.array([1]))
np.testing.assert_array_equal(combined_indices, np.array([0]))
def test_projectgather_get_indices_site2_run2(
mock_project_site, mock_spec_reader_metadata_only
):
"""Test project gathering of data"""
# get required data
site = MockSite("site2")
meas_name = "run2"
metadata = get_evals_metadata(site.name, meas_name)
selection = get_selection()
# now test gatherer._get_indices
gatherer = gather.ProjectGather()
# site 2, run1, level 0
eval_wins = selection.get_eval_wins(0, 0)
level_metadata = metadata.levels_metadata[0]
spectra_indices, combined_indices = gatherer._get_indices(
eval_wins, site, meas_name, level_metadata
)
expected_spectra = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
expeceted_combined = [5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
np.testing.assert_array_equal(spectra_indices, np.array(expected_spectra))
np.testing.assert_array_equal(combined_indices, np.array(expeceted_combined))
# site 2, run1, level 1
eval_wins = selection.get_eval_wins(1, 0)
level_metadata = metadata.levels_metadata[1]
spectra_indices, combined_indices = gatherer._get_indices(
eval_wins, site, meas_name, level_metadata
)
np.testing.assert_array_equal(spectra_indices, np.array([1, 2, 3, 4]))
np.testing.assert_array_equal(combined_indices, np.array([1, 2, 3, 4]))
def test_projectgather_get_indices_site3_data1(
mock_project_site, mock_spec_reader_metadata_only
):
"""Test project gathering of data"""
# get required data
site = MockSite("site3")
meas_name = "data1"
metadata = get_evals_metadata(site.name, meas_name)
selection = get_selection()
# now test gatherer._get_indices
gatherer = gather.ProjectGather()
# site 3, data1, level 0
eval_wins = selection.get_eval_wins(0, 0)
level_metadata = metadata.levels_metadata[0]
spectra_indices, combined_indices = gatherer._get_indices(
eval_wins, site, meas_name, level_metadata
)
expected_spectra = [0, 1, 2, 3, 4, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]
expected_combined = np.arange(15)
np.testing.assert_array_equal(spectra_indices, np.array(expected_spectra))
np.testing.assert_array_equal(combined_indices, expected_combined)
# site 3, data1, level 1
eval_wins = selection.get_eval_wins(1, 0)
level_metadata = metadata.levels_metadata[1]
spectra_indices, combined_indices = gatherer._get_indices(
eval_wins, site, meas_name, level_metadata
)
np.testing.assert_array_equal(spectra_indices, np.array([0, 7, 8, 9, 10]))
np.testing.assert_array_equal(combined_indices, np.array([0, 1, 2, 3, 4]))
def test_projectgather_get_site_data_site1(
mock_project_site, mock_spec_reader_metadata_only
):
"""Test combining data for site1"""
# get required data
proj = get_test_project(TEST_PROJECT_PATH)
site_name = "site1"
selection = get_selection()
# now test gatherer._get_indices
gatherer = gather.ProjectGather()
combined_data = gatherer._get_site_data(
TEST_CONFIG_NAME, proj, selection, site_name, ["Ex", "Ey"]
)
assert len(combined_data.data) == 4
assert combined_data.metadata.chans == ["Ex", "Ey"]
assert combined_data.metadata.n_evals == 4
assert combined_data.metadata.measurements == ["meas1", "meas2"]
np.testing.assert_equal(combined_data.data[0], SITE1_COMBINED_DATA[0])
np.testing.assert_equal(combined_data.data[1], SITE1_COMBINED_DATA[1])
np.testing.assert_equal(combined_data.data[2], SITE1_COMBINED_DATA[2])
np.testing.assert_equal(combined_data.data[3], SITE1_COMBINED_DATA[3])
def test_projectgather_get_site_data_site2(
mock_project_site, mock_spec_reader_metadata_only
):
"""Test combining data for site2"""
# get required data
proj = get_test_project(TEST_PROJECT_PATH)
site_name = "site2"
selection = get_selection()
# now test gatherer._get_indices
gatherer = gather.ProjectGather()
combined_data = gatherer._get_site_data(
TEST_CONFIG_NAME, proj, selection, site_name, ["Hx", "Hy"]
)
assert len(combined_data.data) == 4
assert combined_data.metadata.chans == ["Hx", "Hy"]
assert combined_data.metadata.n_evals == 4
assert combined_data.metadata.measurements == ["run1", "run2"]
np.testing.assert_equal(combined_data.data[0], SITE2_COMBINED_DATA[0])
np.testing.assert_equal(combined_data.data[1], SITE2_COMBINED_DATA[1])
np.testing.assert_equal(combined_data.data[2], SITE2_COMBINED_DATA[2])
np.testing.assert_equal(combined_data.data[3], SITE2_COMBINED_DATA[3])
def test_projectgather_get_site_data_site3(
mock_project_site, mock_spec_reader_metadata_only
):
"""Test combining data for site1"""
# get required data
proj = get_test_project(TEST_PROJECT_PATH)
site_name = "site3"
selection = get_selection()
# now test gatherer._get_indices
gatherer = gather.ProjectGather()
combined_data = gatherer._get_site_data(
TEST_CONFIG_NAME, proj, selection, site_name, ["Hx", "Hy"]
)
assert len(combined_data.data) == 4
assert combined_data.metadata.chans == ["Hx", "Hy"]
assert combined_data.metadata.n_evals == 4
assert combined_data.metadata.measurements == ["data1"]
np.testing.assert_equal(combined_data.data[0], SITE3_COMBINED_DATA[0])
np.testing.assert_equal(combined_data.data[1], SITE3_COMBINED_DATA[1])
np.testing.assert_equal(combined_data.data[2], SITE3_COMBINED_DATA[2])
np.testing.assert_equal(combined_data.data[3], SITE3_COMBINED_DATA[3])
def test_projectgather_run(mock_project_site, mock_spec_reader_metadata_only):
"""Test gathering data for all sites"""
# get required data
proj = get_test_project(TEST_PROJECT_PATH)
tf = ImpedanceTensor()
selection = get_selection()
gathered_data = gather.ProjectGather().run(
TEST_CONFIG_NAME,
proj,
selection,
tf,
out_name="site1",
in_name="site2",
cross_name="site3",
)
# output data
assert gathered_data.out_data.metadata.site_name == "site1"
assert gathered_data.out_data.metadata.chans == ["Ex", "Ey"]
assert gathered_data.out_data.metadata.fs == 128
assert gathered_data.out_data.metadata.n_evals == 4
np.testing.assert_equal(gathered_data.out_data.data[0], SITE1_COMBINED_DATA[0])
np.testing.assert_equal(gathered_data.out_data.data[1], SITE1_COMBINED_DATA[1])
np.testing.assert_equal(gathered_data.out_data.data[2], SITE1_COMBINED_DATA[2])
np.testing.assert_equal(gathered_data.out_data.data[3], SITE1_COMBINED_DATA[3])
# input data
assert gathered_data.in_data.metadata.site_name == "site2"
assert gathered_data.in_data.metadata.chans == ["Hx", "Hy"]
assert gathered_data.in_data.metadata.fs == 128
assert gathered_data.in_data.metadata.n_evals == 4
np.testing.assert_equal(gathered_data.in_data.data[0], SITE2_COMBINED_DATA[0])
np.testing.assert_equal(gathered_data.in_data.data[1], SITE2_COMBINED_DATA[1])
np.testing.assert_equal(gathered_data.in_data.data[2], SITE2_COMBINED_DATA[2])
np.testing.assert_equal(gathered_data.in_data.data[3], SITE2_COMBINED_DATA[3])
# cross data
assert gathered_data.cross_data.metadata.site_name == "site3"
assert gathered_data.cross_data.metadata.chans == ["Hx", "Hy"]
assert gathered_data.cross_data.metadata.fs == 128
assert gathered_data.cross_data.metadata.n_evals == 4
np.testing.assert_equal(gathered_data.cross_data.data[0], SITE3_COMBINED_DATA[0])
np.testing.assert_equal(gathered_data.cross_data.data[1], SITE3_COMBINED_DATA[1])
np.testing.assert_equal(gathered_data.cross_data.data[2], SITE3_COMBINED_DATA[2])
np.testing.assert_equal(gathered_data.cross_data.data[3], SITE3_COMBINED_DATA[3])
def test_quickgather_run():
"""Test quick gathering with some spectra data"""
dir_path = Path("test")
dec_params = DecimationSetup(n_levels=4, per_level=2).run(128)
tf = TransferFunction(out_chans=["Hy"], in_chans=["Hx"], cross_chans=["Ex", "Ey"])
eval_data = get_evals_data("site2", "run2")
with pytest.raises(ChannelNotFoundError):
# there are no electronic channels in the data
gathered_data = gather.QuickGather().run(dir_path, dec_params, tf, eval_data)
tf = TransferFunction(in_chans=["Hx"], out_chans=["Hy"], cross_chans=["Hx", "Hy"])
gathered_data = gather.QuickGather().run(dir_path, dec_params, tf, eval_data)
# output data
assert gathered_data.out_data.metadata.site_name == "test"
assert gathered_data.out_data.metadata.chans == ["Hy"]
assert gathered_data.out_data.metadata.fs == 128
assert gathered_data.out_data.metadata.n_evals == 4
np.testing.assert_equal(gathered_data.out_data.data[0], SITE2_RUN2_QUICK_OUT[0])
np.testing.assert_equal(gathered_data.out_data.data[1], SITE2_RUN2_QUICK_OUT[1])
np.testing.assert_equal(gathered_data.out_data.data[2], SITE2_RUN2_QUICK_OUT[2])
np.testing.assert_equal(gathered_data.out_data.data[3], SITE2_RUN2_QUICK_OUT[3])
# input data
assert gathered_data.in_data.metadata.site_name == "test"
assert gathered_data.in_data.metadata.chans == ["Hx"]
assert gathered_data.in_data.metadata.fs == 128
assert gathered_data.in_data.metadata.n_evals == 4
np.testing.assert_equal(gathered_data.in_data.data[0], SITE2_RUN2_QUICK_IN[0])
np.testing.assert_equal(gathered_data.in_data.data[1], SITE2_RUN2_QUICK_IN[1])
np.testing.assert_equal(gathered_data.in_data.data[2], SITE2_RUN2_QUICK_IN[2])
np.testing.assert_equal(gathered_data.in_data.data[3], SITE2_RUN2_QUICK_IN[3])
# cross data
assert gathered_data.cross_data.metadata.site_name == "test"
assert gathered_data.cross_data.metadata.chans == ["Hx", "Hy"]
assert gathered_data.cross_data.metadata.fs == 128
assert gathered_data.cross_data.metadata.n_evals == 4
np.testing.assert_equal(gathered_data.cross_data.data[0], SITE2_RUN2_QUICK_CROSS[0])
np.testing.assert_equal(gathered_data.cross_data.data[1], SITE2_RUN2_QUICK_CROSS[1])
np.testing.assert_equal(gathered_data.cross_data.data[2], SITE2_RUN2_QUICK_CROSS[2])
np.testing.assert_equal(gathered_data.cross_data.data[3], SITE2_RUN2_QUICK_CROSS[3])
|
py | 1a32c3623e132c9ad235341f526b60314b31e412 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 CERN.
#
# CDS-ILS is free software; you can redistribute it and/or modify it under
# the terms of the MIT License; see LICENSE file for more details.
"""Test loan migration."""
import os
import pytest
from invenio_app_ils.circulation.indexer import LoanIndexer
from invenio_circulation.api import Loan
from invenio_circulation.pidstore.pids import CIRCULATION_LOAN_PID_TYPE
from invenio_circulation.proxies import current_circulation
from invenio_search import current_search
from cds_ils.migrator.errors import LoanMigrationError
from cds_ils.migrator.loans.api import import_loans_from_json
from tests.migrator.utils import reindex_record
def test_import_loan_returned(test_data_migration, patrons, es_clear):
filepath = os.path.join(os.path.dirname(__file__), "data", "loans.json")
with open(filepath) as fp:
import_loans_from_json(fp)
reindex_record(CIRCULATION_LOAN_PID_TYPE, Loan, LoanIndexer())
current_search.flush_and_refresh(index="*")
loan_search = current_circulation.loan_search_cls
search = (
loan_search()
.filter("term", document_pid="docid-1")
.filter("term", state="ITEM_RETURNED")
)
results = search.execute()
assert results.hits.total.value == 1
assert results[0].start_date == "2010-09-29T00:00:00"
search = (
loan_search()
.filter("term", document_pid="docid-1")
.filter("term", state="ITEM_ON_LOAN")
.filter("term", item_pid__value="itemid-1")
)
results = search.execute()
assert results.hits.total.value == 1
assert results[0].start_date == "2009-07-07T00:00:00"
def test_import_invalid_loan(testdata):
filepath = os.path.join(
os.path.dirname(__file__), "data", "loan_ongoing_anonymous_user.json"
)
with open(filepath) as fp:
with pytest.raises(LoanMigrationError):
import_loans_from_json(fp, raise_exceptions=True)
|
py | 1a32c43c84d2bbd166c94b5569589bae045fec9c | '''
Tenable.io
==========
.. autoclass:: TenableIO
:members:
.. toctree::
:hidden:
:glob:
cs/index
access_groups_v2
access_groups
agent_config
agent_exclusions
agent_groups
agents
assets
audit_log
credentials
editor
exclusions
exports
files
filters
folders
groups
networks
permissions
plugins
policies
remediation_scans
scanner_groups
scanners
scans
server
session
tags
target_groups
users
workbenches
'''
from typing import Dict, List, Optional
from requests import Response
from tenable.base.platform import APIPlatform
from .access_groups import AccessGroupsAPI
from .access_groups_v2 import AccessGroupsV2API
from .agent_config import AgentConfigAPI
from .agent_exclusions import AgentExclusionsAPI
from .agent_groups import AgentGroupsAPI
from .agents import AgentsAPI
from .assets import AssetsAPI
from .audit_log import AuditLogAPI
from .credentials import CredentialsAPI
from .editor import EditorAPI
from .exclusions import ExclusionsAPI
from .exports import ExportsAPI
from .files import FileAPI
from .filters import FiltersAPI
from .folders import FoldersAPI
from .groups import GroupsAPI
from .networks import NetworksAPI
from .permissions import PermissionsAPI
from .plugins import PluginsAPI
from .policies import PoliciesAPI
from .scanner_groups import ScannerGroupsAPI
from .scanners import ScannersAPI
from .scans import ScansAPI
from .remediation_scans import RemediationScansAPI
from .server import ServerAPI
from .session import SessionAPI
from .tags import TagsAPI
from .target_groups import TargetGroupsAPI
from .users import UsersAPI
from .workbenches import WorkbenchesAPI
class TenableIO(APIPlatform): # noqa: PLR0904
'''
The Tenable.io object is the primary interaction point for users to
interface with Tenable.io via the pyTenable library. All of the API
endpoint classes that have been written will be grafted onto this class.
Args:
access_key (str, optional):
The user's API access key for Tenable.io If an access key isn't
specified, then the library will attempt to read the environment
variable ``TIO_ACCESS_KEY`` to acquire the key.
secret_key (str, optional):
The user's API secret key for Tenable.io If a secret key isn't
specified, then the library will attempt to read the environment
variable ``TIO_SECRET_KEY`` to acquire the key.
url (str, optional):
The base URL that the paths will be appended onto. The default
is ``https://cloud.tenable.com``
retries (int, optional):
The number of retries to make before failing a request. The
default is ``5``.
backoff (float, optional):
If a 429 response is returned, how much do we want to backoff
if the response didn't send a Retry-After header. The default
backoff is ``1`` second.
vendor (str, optional):
The vendor name for the User-Agent string.
product (str, optional):
The product name for the User-Agent string.
build (str, optional):
The version or build identifier for the User-Agent string.
timeout (int, optional):
The connection timeout parameter informing the library how long to
wait in seconds for a stalled response before terminating the
connection. If unspecified, the default is 120 seconds.
Examples:
Basic Example:
>>> from tenable.io import TenableIO
>>> tio = TenableIO('ACCESS_KEY', 'SECRET_KEY')
Example with proper identification:
>>> tio = TenableIO('ACCESS_KEY', 'SECRET_KEY',
>>> vendor='Company Name',
>>> product='My Awesome Widget',
>>> build='1.0.0')
Example with proper identification leveraging environment variables for
access and secret keys:
>>> tio = TenableIO(
>>> vendor='Company Name', product='Widget', build='1.0.0')
'''
_env_base = 'TIO'
_tzcache = None
_url = 'https://cloud.tenable.com'
_timeout = 120
def __init__(self,
access_key: Optional[str] = None,
secret_key: Optional[str] = None,
**kwargs
):
if access_key:
kwargs['access_key'] = access_key
if secret_key:
kwargs['secret_key'] = secret_key
super().__init__(**kwargs)
def _retry_request(self,
response: Response,
retries: int,
**kwargs) -> Dict:
'''
If the call is retried, we will need to set some additional headers
'''
kwargs['headers'] = kwargs.get('headers', {})
# if the request uuid exists in the response, then we will send the
# uuid back so that there is solid request chain in the Tenable.io
# platform logs.
request_uuid = response.headers.get('X-Tio-Last-Request-Uuid')
if request_uuid:
kwargs['headers']['X-Tio-Last-Request-Uuid'] = request_uuid
# We also need to return the number of times that we have attempted to
# retry this call.
kwargs['headers']['X-Tio-Retry-Count'] = str(retries)
# Return the keyword arguments back to the caller.
return kwargs
@property
def _tz(self):
'''
As we will be using the timezone listing in a lot of parameter
checking, we should probably cache the response as a private attribute
to speed up checking times.
'''
if not self._tzcache:
self._tzcache = self.scans.timezones()
return self._tzcache
@property
def access_groups(self):
'''
The interface object for the
:doc:`Tenable.io Access Groups APIs <access_groups>`.
'''
return AccessGroupsAPI(self)
@property
def access_groups_v2(self):
'''
The interface object for the
:doc:`Tenable.io Access Groups v2 APIs <access_groups_v2>`.
'''
return AccessGroupsV2API(self)
@property
def agent_config(self):
'''
The interface object for the
:doc:`Tenable.io Agent Config APIs <agent_config>`.
'''
return AgentConfigAPI(self)
@property
def agent_groups(self):
'''
The interface object for the
:doc:`Tenable.io Agent Groups APIs <agent_groups>`.
'''
return AgentGroupsAPI(self)
@property
def agent_exclusions(self):
'''
The interface object for the
:doc:`Tenable.io Agent Exclusions APIs <agent_exclusions>`.
'''
return AgentExclusionsAPI(self)
@property
def agents(self):
'''
The interface object for the
:doc:`Tenable.io Agents APIs <agents>`.
'''
return AgentsAPI(self)
@property
def assets(self):
'''
The interface object for the
:doc:`Tenable.io assets APIs <assets>`.
'''
return AssetsAPI(self)
@property
def audit_log(self):
'''
The interface object for the
:doc:`Tenable.io Audit Log APIs <audit_log>`.
'''
return AuditLogAPI(self)
@property
def credentials(self):
'''
The interface object for the
:doc:`Tenable.io Credentials APIs <credentials>`.
'''
return CredentialsAPI(self)
@property
def editor(self):
'''
The interface object for the
:doc:`Tenable.io Editor APIs <editor>`.
'''
return EditorAPI(self)
@property
def exclusions(self):
'''
The interface object for the
:doc:`Tenable.io Exclusions APIs <exclusions>`.
'''
return ExclusionsAPI(self)
@property
def exports(self):
'''
The interface object for the
:doc:`Tenable.io Exports APIs <exports>`.
'''
return ExportsAPI(self)
@property
def files(self):
'''
The interface object for the
:doc:`Tenable.io Files APIs <files>`.
'''
return FileAPI(self)
@property
def filters(self):
'''
The interface object for the
:doc:`Tenable.io Filters APIs <filters>`.
'''
return FiltersAPI(self)
@property
def folders(self):
'''
The interface object for the
:doc:`Tenable.io Folders APIs <folders>`.
'''
return FoldersAPI(self)
@property
def groups(self):
'''
The interface object for the
:doc:`Tenable.io Groups APIs <groups>`.
'''
return GroupsAPI(self)
@property
def networks(self):
'''
The interface object for the
:doc:`Tenable.io Networks APIs <networks>`.
'''
return NetworksAPI(self)
@property
def permissions(self):
'''
The interface object for the
:doc:`Tenable.io Permissions APIs <permissions>`.
'''
return PermissionsAPI(self)
@property
def plugins(self):
'''
The interface object for the
:doc:`Tenable.io Plugins APIs <plugins>`.
'''
return PluginsAPI(self)
@property
def policies(self):
'''
The interface object for the
:doc:`Tenable.io Policies APIs <policies>`.
'''
return PoliciesAPI(self)
@property
def scanner_groups(self):
'''
The interface object for the
:doc:`Tenable.io Scanner Groups APIs <scanner_groups>`.
'''
return ScannerGroupsAPI(self)
@property
def scanners(self):
'''
The interface object for the
:doc:`Tenable.io Scanners APIs <scanners>`.
'''
return ScannersAPI(self)
@property
def scans(self):
'''
The interface object for the
:doc:`Tenable.io Scans APIs <scans>`.
'''
return ScansAPI(self)
@property
def remediationscans(self):
'''
The interface object for the
:doc:`Tenable.io Remediation Scans APIs <remediation_scans>`.
'''
return RemediationScansAPI(self)
@property
def server(self):
'''
The interface object for the
:doc:`Tenable.io Server APIs <server>`.
'''
return ServerAPI(self)
@property
def session(self):
'''
The interface object for the
:doc:`Tenable.io Session APIs <session>`.
'''
return SessionAPI(self)
@property
def tags(self):
'''
The interface object for the
:doc:`Tenable.io Tags APIs <tags>`.
'''
return TagsAPI(self)
@property
def target_groups(self):
'''
The interface object for the
:doc:`Tenable.io Target Groups APIs <target_groups>`.
'''
return TargetGroupsAPI(self)
@property
def users(self):
'''
The interface object for the
:doc:`Tenable.io Users APIs <users>`.
'''
return UsersAPI(self)
@property
def workbenches(self):
'''
The interface object for the
:doc:`Tenable.io Workbenches APIs <workbenches>`.
'''
return WorkbenchesAPI(self)
|
py | 1a32c482c404b4448fbd922eada6984f21502a89 | try:
import ustruct as struct
except:
import struct
print(struct.calcsize("<bI"))
print(struct.unpack("<bI", b"\x80\0\0\x01\0"))
print(struct.calcsize(">bI"))
print(struct.unpack(">bI", b"\x80\0\0\x01\0"))
# 32-bit little-endian specific
#print(struct.unpack("bI", b"\x80\xaa\x55\xaa\0\0\x01\0"))
print(struct.pack("<i", 1))
print(struct.pack(">i", 1))
print(struct.pack("<h", 1))
print(struct.pack(">h", 1))
print(struct.pack("<b", 1))
print(struct.pack(">b", 1))
print(struct.pack("<bI", -128, 256))
print(struct.pack(">bI", -128, 256))
print(struct.calcsize("100sI"))
print(struct.calcsize("97sI"))
print(struct.unpack("<6sH", b"foo\0\0\0\x12\x34"))
print(struct.pack("<6sH", b"foo", 10000))
s = struct.pack("BHBI", 10, 100, 200, 300)
v = struct.unpack("BHBI", s)
print(v == (10, 100, 200, 300))
# check maximum pack on 32-bit machine
print(struct.pack("<I", 2**32 - 1))
print(struct.pack("<I", 0xffffffff))
# long long ints
print(struct.pack("<Q", 2**64 - 1))
print(struct.pack(">Q", 2**64 - 1))
print(struct.pack("<Q", 0xffffffffffffffff))
print(struct.pack(">Q", 0xffffffffffffffff))
print(struct.pack("<q", -1))
print(struct.pack(">q", -1))
print(struct.pack("<Q", 1234567890123456789))
print(struct.pack("<q", -1234567890123456789))
print(struct.pack(">Q", 1234567890123456789))
print(struct.pack(">q", -1234567890123456789))
print(struct.unpack("<Q", b"\x12\x34\x56\x78\x90\x12\x34\x56"))
print(struct.unpack(">Q", b"\x12\x34\x56\x78\x90\x12\x34\x56"))
print(struct.unpack("<q", b"\x12\x34\x56\x78\x90\x12\x34\xf6"))
print(struct.unpack(">q", b"\xf2\x34\x56\x78\x90\x12\x34\x56"))
# check maximum unpack
print(struct.unpack("<I", b"\xff\xff\xff\xff"))
print(struct.unpack("<Q", b"\xff\xff\xff\xff\xff\xff\xff\xff"))
# network byte order
print(struct.pack('!i', 123))
# first arg must be a string
try:
struct.pack(1, 2)
except TypeError:
print('TypeError')
|
py | 1a32c60d5739934e4de9fdb01d123aeafa5104fc | _base_ = [
'../../_base_/models/faster_rcnn_r50_dc5.py',
'../../_base_/datasets/imagenet_vid_fgfa_style.py',
'../../_base_/default_runtime.py'
]
model = dict(
type='SELSA',
detector=dict(
roi_head=dict(
type='SelsaRoIHead',
bbox_roi_extractor=dict(
type='TemporalRoIAlign',
num_most_similar_points=2,
num_temporal_attention_blocks=4,
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=512,
featmap_strides=[16]),
bbox_head=dict(
type='SelsaBBoxHead',
num_shared_fcs=3,
aggregator=dict(
type='SelsaAggregator',
in_channels=1024,
num_attention_blocks=16)))))
# dataset settings
data = dict(
val=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')),
test=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[2, 5])
# runtime settings
total_epochs = 7
evaluation = dict(metric=['bbox'], interval=7)
|
py | 1a32c690bf64efe9e393d682ff5fb0adfd598222 | from __future__ import absolute_import, division, print_function, unicode_literals
import logging
import numpy as np
import os
from madminer.analysis import DataAnalyzer
from madminer.utils.various import math_commands, weighted_quantile, sanitize_array, mdot
from madminer.utils.various import less_logging
from madminer.ml import ParameterizedRatioEstimator, ScoreEstimator, Ensemble, load_estimator
logger = logging.getLogger(__name__)
class FisherInformation(DataAnalyzer):
"""
Functions to calculate expected Fisher information matrices.
After inializing a `FisherInformation` instance with the filename of a MadMiner file, different information matrices
can be calculated:
* `FisherInformation.truth_information()` calculates the full truth-level Fisher information.
This is the information in an idealized measurement where all parton-level particles with their charges, flavours,
and four-momenta can be accessed with perfect accuracy.
* `FisherInformation.full_information()` calculates the full Fisher information in
realistic detector-level observations, estimated with neural networks. In addition to the MadMiner file, this
requires a trained SALLY or SALLINO estimator as well as an unweighted evaluation sample.
* `FisherInformation.rate_information()` calculates the Fisher information in the total cross
section.
* `FisherInformation.histo_information()` calculates the Fisher information in the histogram of
one (parton-level or detector-level) observable.
* `FisherInformation.histo_information_2d()` calculates the Fisher information in a two-dimensional
histogram of two (parton-level or detector-level) observables.
* `FisherInformation.histogram_of_information()` calculates the full truth-level Fisher information in
different slices of one observable (the "distribution of the Fisher information").
Finally, don't forget that in the presence of nuisance parameters the constraint terms also affect the Fisher
information. This term is given by `FisherInformation.calculate_fisher_information_nuisance_constraints()`.
Parameters
----------
filename : str
Path to MadMiner file (for instance the output of `madminer.delphes.DelphesProcessor.save()`).
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
"""
def __init__(self, filename, include_nuisance_parameters=True):
super(FisherInformation, self).__init__(filename, False, include_nuisance_parameters)
def truth_information(
self, theta, luminosity=300000.0, cuts=None, efficiency_functions=None, include_nuisance_parameters=True
):
"""
Calculates the full Fisher information at parton / truth level. This is the information in an idealized
measurement where all parton-level particles with their charges, flavours, and four-momenta can be accessed with
perfect accuracy, i.e. the latent variables `z_parton` can be measured directly.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
Returns
-------
fisher_information : ndarray
Expected full truth-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
include_nuisance_parameters = include_nuisance_parameters and (self.nuisance_parameters is not None)
# Loop over batches
n_all_parameters = self.n_parameters
if include_nuisance_parameters:
n_all_parameters += self.n_nuisance_parameters
fisher_info = np.zeros((n_all_parameters, n_all_parameters))
covariance = np.zeros((n_all_parameters, n_all_parameters, n_all_parameters, n_all_parameters))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher information
this_fisher_info, this_covariance = self._calculate_fisher_information(
theta,
weights,
luminosity,
sum_events=True,
calculate_uncertainty=True,
include_nuisance_parameters=include_nuisance_parameters,
)
fisher_info += this_fisher_info
covariance += this_covariance
return fisher_info, covariance
def full_information(
self,
theta,
model_file,
unweighted_x_sample_file=None,
luminosity=300000.0,
include_xsec_info=True,
mode="score",
calculate_covariance=True,
batch_size=100000,
test_split=0.2,
):
"""
Calculates the full Fisher information in realistic detector-level observations, estimated with neural networks.
In addition to the MadMiner file, this requires a trained SALLY or SALLINO estimator.
Nuisance parameter are taken into account automatically if the SALLY / SALLINO model was trained with them.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
model_file : str
Filename of a trained local score regression model that was trained on samples from `theta` (see
`madminer.ml.Estimator`).
unweighted_x_sample_file : str or None
Filename of an unweighted x sample that is sampled according to theta and obeys the cuts
(see `madminer.sampling.SampleAugmenter.extract_samples_train_local()`). If None, the Fisher information
is instead calculated on the full, weighted samples (the data in the MadMiner file). Default value: None.
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
include_xsec_info : bool, optional
Whether the rate information is included in the returned Fisher information. Default value: True.
mode : {"score", "information"}, optional
How the ensemble uncertainty on the kinematic Fisher information is calculated. If mode is "information",
the Fisher information for each estimator is calculated individually and only then
are the sample mean and covariance calculated. If mode is "score", the sample mean is
calculated for the score for each event. Default value: "score".
calculate_covariance : bool, optional
If True, the covariance between the different estimators is calculated. Default value: True.
batch_size : int, optional
Batch size. Default value: 100000.
test_split : float or None, optional
If unweighted_x_sample_file is None, this determines the fraction of weighted events used for evaluation.
If None, all events are used (this will probably include events used during training!). Default value: 0.2.
Returns
-------
fisher_information : ndarray or list of ndarray
Estimated expected full detector-level Fisher information matrix with shape `(n_parameters, n_parameters)`.
If more then one value ensemble_vote_expectation_weight is given, this is a list with results for all
entries in ensemble_vote_expectation_weight.
fisher_information_uncertainty : ndarray or list of ndarray or None
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`. If more then one value
ensemble_vote_expectation_weight is given, this is a list with results for all entries in
ensemble_vote_expectation_weight.
"""
# Check input
if mode not in ["score", "information", "modified_score"]:
raise ValueError("Unknown mode {}, has to be 'score', 'modified_score', or 'information'!".format(mode))
# Load Estimator model
if os.path.isdir(model_file) and os.path.exists(model_file + "/ensemble.json"):
model_is_ensemble = True
model = Ensemble()
model.load(model_file)
if isinstance(model.estimators[0], ParameterizedRatioEstimator):
model_type = "Parameterized Ratio Ensemble"
elif isinstance(model.estimators[0], ScoreEstimator):
model_type = "Score Ensemble"
else:
raise RuntimeError("Ensemble is not a score or parameterized_ratio type!")
else:
model_is_ensemble = False
model = load_estimator(model_file)
if isinstance(model, ParameterizedRatioEstimator):
model_type = "Parameterized Ratio Estimator"
elif isinstance(model, ScoreEstimator):
model_type = "Score Estimator"
else:
raise RuntimeError("Estimator is not a score or parameterized_ratio type!")
# Nuisance parameters?
if model.n_parameters == self.n_parameters:
logger.info(
"Found %s parameters in %s model, matching %s physical parameters in MadMiner file",
model.n_parameters,
model_type,
self.n_parameters,
)
include_nuisance_parameters = False
elif model.n_parameters == self.n_parameters + self.n_nuisance_parameters:
logger.info(
"Found %s parameters in %s model, matching %s physical parameters + %s nuisance parameters"
+ " in MadMiner file",
model.n_parameters,
model_type,
self.n_parameters,
self.n_nuisance_parameters,
)
include_nuisance_parameters = True
else:
raise RuntimeError(
"Inconsistent numbers of parameters! Found %s in %s model, %s physical parameters in "
"MadMiner file, and %s nuisance parameters in MadMiner file.",
model.n_parameters,
model_type,
self.n_parameters,
self.n_nuisance_parameters,
)
if include_nuisance_parameters:
logger.debug("Including nuisance parameters")
else:
logger.debug("Not including nuisance parameters")
# Total xsec
total_xsec = self._calculate_xsec(theta=theta)
logger.debug("Total cross section: %s pb", total_xsec)
# Rate part of Fisher information
fisher_info_rate = 0.0
rate_covariance = 0.0
if include_xsec_info:
logger.info("Evaluating rate Fisher information")
fisher_info_rate, rate_covariance = self.rate_information(
theta=theta, luminosity=luminosity, include_nuisance_parameters=include_nuisance_parameters
)
# Evaluation from weighted events
if unweighted_x_sample_file is None:
# Which events to sum over
if test_split is None or test_split <= 0.0 or test_split >= 1.0:
start_event = 0
else:
start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1
if start_event > 0:
total_sum_weights_theta = self._calculate_xsec(theta=theta, start_event=start_event)
else:
total_sum_weights_theta = total_xsec
# Theta morphing matrix
theta_matrix = self._get_theta_benchmark_matrix(theta)
# Prepare output
fisher_info_kin = None
covariance = None
# Number of batches
n_batches = int(np.ceil((self.n_samples - start_event) / batch_size))
n_batches_verbose = max(int(round(n_batches / 10, 0)), 1)
for i_batch, (observations, weights_benchmarks) in enumerate(
self.event_loader(
batch_size=batch_size, start=start_event, include_nuisance_parameters=include_nuisance_parameters
)
):
if (i_batch + 1) % n_batches_verbose == 0:
logger.info("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
else:
logger.debug("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
weights_theta = mdot(theta_matrix, weights_benchmarks)
# Calculate Fisher info on this batch
if model_is_ensemble:
with less_logging():
this_fisher_info, this_covariance = model.calculate_fisher_information(
x=observations,
theta=theta,
obs_weights=weights_theta,
n_events=luminosity * total_xsec * np.sum(weights_theta) / total_sum_weights_theta,
calculate_covariance=calculate_covariance,
mode=mode,
)
else:
with less_logging():
this_fisher_info = model.calculate_fisher_information(
x=observations,
theta=theta,
weights=weights_theta,
n_events=luminosity * total_xsec * np.sum(weights_theta) / total_sum_weights_theta,
)
this_covariance = None
# Sum up results
if fisher_info_kin is None:
fisher_info_kin = this_fisher_info
elif isinstance(fisher_info_kin, list):
for i in range(len(fisher_info_kin)):
fisher_info_kin[i] += this_fisher_info[i]
else:
fisher_info_kin += this_fisher_info
if this_covariance is not None:
if covariance is None:
covariance = this_covariance
elif isinstance(covariance, list):
for i in range(len(covariance)):
covariance[i] += this_covariance[i]
else:
covariance += this_covariance
# Evaluation from unweighted event sample
else:
with less_logging():
if model_is_ensemble:
fisher_info_kin, covariance = model.calculate_fisher_information(
x=unweighted_x_sample_file,
theta=theta,
n_events=luminosity * total_xsec,
mode=mode,
calculate_covariance=calculate_covariance,
)
else:
fisher_info_kin = model.calculate_fisher_information(
x=unweighted_x_sample_file, n_events=luminosity * total_xsec, theta=theta
)
covariance = None
# Returns
if model_is_ensemble:
return fisher_info_rate + fisher_info_kin, rate_covariance + covariance
return fisher_info_rate + fisher_info_kin, rate_covariance
def rate_information(
self, theta, luminosity, cuts=None, efficiency_functions=None, include_nuisance_parameters=True
):
"""
Calculates the Fisher information in a measurement of the total cross section (without any kinematic
information).
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the total cross section with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
include_nuisance_parameters = include_nuisance_parameters and (self.nuisance_parameters is not None)
# Get weights at benchmarks
weights_benchmarks, weights_benchmark_uncertainties = self._calculate_xsec(
cuts=cuts,
efficiency_functions=efficiency_functions,
return_benchmark_xsecs=True,
return_error=True,
include_nuisance_parameters=include_nuisance_parameters,
)
weights_benchmarks = weights_benchmarks.reshape((1, -1))
weights_benchmark_uncertainties = weights_benchmark_uncertainties.reshape((1, -1))
# Get Fisher information
fisher_info, covariance = self._calculate_fisher_information(
theta=theta,
weights_benchmarks=weights_benchmarks,
luminosity=luminosity,
sum_events=True,
calculate_uncertainty=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
include_nuisance_parameters=include_nuisance_parameters,
)
return fisher_info, covariance
def histo_information(
self,
theta,
luminosity,
observable,
bins,
histrange=None,
cuts=None,
efficiency_functions=None,
n_events_dynamic_binning=None,
):
"""
Calculates the Fisher information in the one-dimensional histogram of an (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observable.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable : str
Expression for the observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins : int or ndarray
If int: number of bins in the histogram, excluding overflow bins. Otherwise, defines the bin boundaries
(excluding overflow bins).
histrange : tuple of float or None, optional
Minimum and maximum value of the histogram in the form `(min, max)`. Overflow bins are always added. If
None and bins is an int, variable-width bins with equal cross section are constructed automatically.
Default value: None.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
n_events_dynamic_binning : int or None, optional
Number of events used to calculate the dynamic binning (if histrange is None). If None, all events are used.
Note that these events are not shuffled, so if the events in the MadMiner file are sorted, using a value
different from None can cause issues. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
bin_boundaries, n_bins_total = self._calculate_binning(
bins, cuts, efficiency_functions, histrange, n_events_dynamic_binning, observable, theta
)
# Loop over batches
weights_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
weights_squared_benchmarks = np.zeros((n_bins_total, self.n_benchmarks))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
i_bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= i_bins) & (i_bins < n_bins_total)).all(), "Wrong bin {}".format(i_bins)
# Add up
for i in range(n_bins_total):
if len(weights[i_bins == i]) > 0:
weights_benchmarks[i] += np.sum(weights[i_bins == i], axis=0)
weights_squared_benchmarks[i] += np.sum(weights[i_bins == i] ** 2, axis=0)
weights_benchmark_uncertainties = weights_squared_benchmarks ** 0.5
# Check cross sections per bin
self._check_binning_stats(weights_benchmarks, weights_benchmark_uncertainties, theta)
# Calculate Fisher information in histogram
fisher_info, covariance = self._calculate_fisher_information(
theta,
weights_benchmarks,
luminosity,
sum_events=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
calculate_uncertainty=True,
)
return fisher_info, covariance
def histo_information_2d(
self,
theta,
luminosity,
observable1,
bins1,
observable2,
bins2,
histrange1=None,
histrange2=None,
cuts=None,
efficiency_functions=None,
n_events_dynamic_binning=None,
):
"""
Calculates the Fisher information in a two-dimensional histogram of two (parton-level or detector-level,
depending on how the observations in the MadMiner file were calculated) observables.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
luminosity : float
Luminosity in pb^-1.
observable1 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins1 : int or ndarray
If int: number of bins along the first axis in the histogram in the histogram, excluding overflow bins.
Otherwise, defines the bin boundaries along the first axis in the histogram (excluding overflow bins).
observable2 : str
Expression for the first observable to be histogrammed. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
bins2 : int or ndarray
If int: number of bins along the second axis in the histogram in the histogram, excluding overflow bins.
Otherwise, defines the bin boundaries along the second axis in the histogram (excluding overflow bins).
histrange1 : tuple of float or None, optional
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added. If None, variable-width bins with equal cross section are constructed automatically. Default
value: None.
histrange2 : tuple of float or None, optional
Minimum and maximum value of the first axis of the histogram in the form `(min, max)`. Overflow bins are
always added. If None, variable-width bins with equal cross section are constructed automatically. Default
value: None.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
n_events_dynamic_binning : int or None, optional
Number of events used to calculate the dynamic binning (if histrange is None). If None, all events are used.
Note that these events are not shuffled, so if the events in the MadMiner file are sorted, using a value
different from None can cause issues. Default value: None.
Returns
-------
fisher_information : ndarray
Expected Fisher information in the histogram with shape `(n_parameters, n_parameters)`.
fisher_information_uncertainty : ndarray
Covariance matrix of the Fisher information matrix with shape
`(n_parameters, n_parameters, n_parameters, n_parameters)`, calculated with plain Gaussian error
propagation.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
bin1_boundaries, n_bins1_total = self._calculate_binning(
bins1, cuts, efficiency_functions, histrange1, n_events_dynamic_binning, observable1, theta
)
bin2_boundaries, n_bins2_total = self._calculate_binning(
bins2, cuts, efficiency_functions, histrange2, n_events_dynamic_binning, observable2, theta
)
# Loop over batches
weights_benchmarks = np.zeros((n_bins1_total, n_bins2_total, self.n_benchmarks))
weights_squared_benchmarks = np.zeros((n_bins1_total, n_bins2_total, self.n_benchmarks))
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo1_observables = np.asarray(
[self._eval_observable(obs_event, observable1) for obs_event in observations]
)
histo2_observables = np.asarray(
[self._eval_observable(obs_event, observable2) for obs_event in observations]
)
# Find bins
i_bins1 = np.searchsorted(bin1_boundaries, histo1_observables)
i_bins2 = np.searchsorted(bin2_boundaries, histo2_observables)
assert ((0 <= i_bins1) & (i_bins1 < n_bins1_total)).all(), "Wrong bin {}".format(i_bins1)
assert ((0 <= i_bins2) & (i_bins2 < n_bins1_total)).all(), "Wrong bin {}".format(i_bins2)
# Add up
for i in range(n_bins1_total):
for j in range(n_bins2_total):
if len(weights[(i_bins1 == i) & (i_bins2 == j)]) > 0:
weights_benchmarks[i, j] += np.sum(weights[(i_bins1 == i) & (i_bins2 == j)], axis=0)
weights_squared_benchmarks[i, j] += np.sum(
weights[(i_bins1 == i) & (i_bins2 == j)] ** 2, axis=0
)
weights_benchmark_uncertainties = weights_squared_benchmarks ** 0.5
# Calculate Fisher information in histogram
weights_benchmarks = weights_benchmarks.reshape(-1, self.n_benchmarks)
weights_benchmark_uncertainties = weights_benchmark_uncertainties.reshape(-1, self.n_benchmarks)
self._check_binning_stats(
weights_benchmarks, weights_benchmark_uncertainties, theta, n_bins_last_axis=n_bins2_total
)
fisher_info, covariance = self._calculate_fisher_information(
theta,
weights_benchmarks,
luminosity,
sum_events=True,
weights_benchmark_uncertainties=weights_benchmark_uncertainties,
calculate_uncertainty=True,
)
return fisher_info, covariance
def histogram_of_information(
self,
theta,
observable,
nbins,
histrange,
model_file=None,
luminosity=300000.0,
cuts=None,
efficiency_functions=None,
batch_size=100000,
test_split=0.2,
):
"""
Calculates the full and rate-only Fisher information in slices of one observable. For the full
information, it will return the truth-level information if model_file is None, and otherwise the
detector-level information based on the SALLY-type score estimator saved in model_file.
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
observable : str
Expression for the observable to be sliced. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the slicing, excluding overflow bins.
histrange : tuple of float
Minimum and maximum value of the slicing in the form `(min, max)`. Overflow bins are always added.
model_file : str or None, optional
If None, the truth-level Fisher information is calculated. If str, filename of a trained local score
regression model that was trained on samples from `theta` (see `madminer.ml.Estimator`). Default value:
None.
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
batch_size : int, optional
If model_file is not None: Batch size. Default value: 100000.
test_split : float or None, optional
If model_file is not None: If unweighted_x_sample_file is None, this determines the fraction of weighted
events used for evaluation.
If None, all events are used (this will probably include events used during training!). Default value: 0.2.
Returns
-------
bin_boundaries : ndarray
Observable slice boundaries.
sigma_bins : ndarray
Cross section in pb in each of the slices.
fisher_infos_rate : ndarray
Expected rate-only Fisher information for each slice. Has shape `(n_slices, n_parameters, n_parameters)`.
fisher_infos_full : ndarray
Expected full Fisher information for each slice. Has shape
`(n_slices, n_parameters, n_parameters)`.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Theta morphing matrix
theta_matrix = self._get_theta_benchmark_matrix(theta)
# Number of bins
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Prepare output
weights_benchmarks_bins = np.zeros((n_bins_total, self.n_benchmarks))
fisher_info_full_bins = np.zeros((n_bins_total, self.n_parameters, self.n_parameters))
# Main loop: truth-level case
if model_file is None:
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Fisher info per event
fisher_info_events = self._calculate_fisher_information(theta, weights, luminosity, sum_events=False)
# Evaluate histogrammed observable
histo_observables = np.asarray(
[self._eval_observable(obs_event, observable) for obs_event in observations]
)
# Get rid of nuisance parameters
fisher_info_events = fisher_info_events[:, : self.n_parameters, : self.n_parameters]
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights[bins == i], axis=0)
fisher_info_full_bins[i] += np.sum(fisher_info_events[bins == i], axis=0)
# ML case
else:
# Load SALLY model
if os.path.isdir(model_file) and os.path.exists(model_file + "/ensemble.json"):
model_is_ensemble = True
model = Ensemble()
model.load(model_file)
else:
model_is_ensemble = False
model = ScoreEstimator()
model.load(model_file)
# Nuisance parameters?
if model.n_parameters == self.n_parameters:
logger.debug(
"Found %s parameters in SALLY model, matching %s physical parameters in MadMiner file",
model.n_parameters,
self.n_parameters,
)
include_nuisance_parameters = False
elif model.n_parameters == self.n_parameters + self.n_nuisance_parameters:
logger.debug(
"Found %s parameters in SALLY model, matching %s physical parameters + %s nuisance parameters"
+ " in MadMiner file",
model.n_parameters,
self.n_parameters,
self.n_nuisance_parameters,
)
include_nuisance_parameters = True
else:
raise RuntimeError(
"Inconsistent numbers of parameters! Found %s in SALLY model, %s physical parameters in "
"MadMiner file, and %s nuisance parameters in MadMiner file.",
model.n_parameters,
self.n_parameters,
self.n_nuisance_parameters,
)
# Total xsec
total_xsec = self._calculate_xsec(theta=theta)
logger.debug("Total cross section: %s pb", total_xsec)
# Which events to sum over
if test_split is None or test_split <= 0.0 or test_split >= 1.0:
start_event = 0
else:
start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1
if start_event > 0:
total_sum_weights_theta = self._calculate_xsec(theta=theta, start_event=start_event)
else:
total_sum_weights_theta = total_xsec
# Number of batches
n_batches = int(np.ceil((self.n_samples - start_event) / batch_size))
n_batches_verbose = max(int(round(n_batches / 10, 0)), 1)
# ML main loop
for i_batch, (observations, weights_benchmarks) in enumerate(
self.event_loader(
batch_size=batch_size, start=start_event, include_nuisance_parameters=include_nuisance_parameters
)
):
if (i_batch + 1) % n_batches_verbose == 0:
logger.info("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
else:
logger.debug("Evaluating kinematic Fisher information on batch %s / %s", i_batch + 1, n_batches)
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights_benchmarks = weights_benchmarks[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights_benchmarks *= efficiencies[:, np.newaxis]
# Rescale for test_split
if test_split is not None:
correction = np.array([1.0 / test_split for obs_event in observations])
weights_benchmarks *= correction[:, np.newaxis]
weights_theta = mdot(theta_matrix, weights_benchmarks)
# Calculate Fisher info on this batch
if model_is_ensemble:
fisher_info_events, _ = model.calculate_fisher_information(
x=observations,
obs_weights=weights_theta,
n_events=luminosity * np.sum(weights_theta),
mode="score",
calculate_covariance=False,
sum_events=False,
)
else:
fisher_info_events = model.calculate_fisher_information(
x=observations,
weights=weights_theta,
n_events=luminosity * np.sum(weights_theta),
sum_events=False,
)
# Get rid of nuisance parameters
if include_nuisance_parameters:
fisher_info_events = fisher_info_events[:, : self.n_parameters, : self.n_parameters]
# Evaluate histogrammed observable
histo_observables = np.asarray(
[self._eval_observable(obs_event, observable) for obs_event in observations]
)
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights_benchmarks[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights_benchmarks[bins == i], axis=0)
fisher_info_full_bins[i] += np.sum(fisher_info_events[bins == i], axis=0)
# Calculate xsecs in bins
sigma_bins = mdot(theta_matrix, weights_benchmarks_bins) # (n_bins,)
# Calculate rate-only Fisher informations in bins
fisher_info_rate_bins = self._calculate_fisher_information(
theta, weights_benchmarks_bins, luminosity, sum_events=False
)
# Get rid of nuisance parameters
fisher_info_rate_bins = fisher_info_rate_bins[:, : self.n_parameters, : self.n_parameters]
# If ML: xsec info is still missing !
if model_file is not None:
fisher_info_full_bins += fisher_info_rate_bins
return bin_boundaries, sigma_bins, fisher_info_rate_bins, fisher_info_full_bins
def histogram_of_sigma_dsigma(self, theta, observable, nbins, histrange, cuts=None, efficiency_functions=None):
"""
Fills events into histograms and calculates the cross section and first derivative for each bin
Parameters
----------
theta : ndarray
Parameter point `theta` at which the Fisher information matrix `I_ij(theta)` is evaluated.
observable : str
Expression for the observable to be sliced. The str will be parsed by Python's `eval()` function
and can use the names of the observables in the MadMiner files.
nbins : int
Number of bins in the slicing, excluding overflow bins.
histrange : tuple of float
Minimum and maximum value of the slicing in the form `(min, max)`. Overflow bins are always added.
cuts : None or list of str, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
Returns
-------
bin_boundaries : ndarray
Observable slice boundaries.
sigma_bins : ndarray
Cross section in pb in each of the slices.
dsigma_bins : ndarray
Cross section in pb in each of the slices.
"""
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Binning
dynamic_binning = histrange is None
if dynamic_binning:
n_bins_total = nbins
bin_boundaries = self._calculate_dynamic_binning(observable, theta, nbins, None, cuts, efficiency_functions)
else:
n_bins_total = nbins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# # Number of bins
# n_bins_total = nbins + 2
# bin_boundaries = np.linspace(histrange[0], histrange[1], num=nbins + 1)
# Prepare output
weights_benchmarks_bins = np.zeros((n_bins_total, self.n_benchmarks))
# Main loop: truth-level case
for observations, weights in self.event_loader():
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables = np.asarray([self._eval_observable(obs_event, observable) for obs_event in observations])
# Find bins
bins = np.searchsorted(bin_boundaries, histo_observables)
assert ((0 <= bins) & (bins < n_bins_total)).all(), "Wrong bin {}".format(bins)
# Add up
for i in range(n_bins_total):
if len(weights[bins == i]) > 0:
weights_benchmarks_bins[i] += np.sum(weights[bins == i], axis=0)
# Get morphing matrices
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
dtheta_matrix = self._get_dtheta_benchmark_matrix(theta, zero_pad=False) # (n_parameters, n_benchmarks_phys)
# Calculate xsecs in bins
sigma_bins = mdot(theta_matrix, weights_benchmarks_bins) # (n_bins,)
dsigma_bins = mdot(dtheta_matrix, weights_benchmarks_bins) # (n_parameters,n_bins,)
return bin_boundaries, sigma_bins, dsigma_bins
def nuisance_constraint_information(self):
""" Builds the Fisher information term representing the Gaussian constraints on the nuisance parameters """
diagonal = np.array([0.0 for _ in range(self.n_parameters)] + [1.0 for _ in range(self.n_nuisance_parameters)])
return np.diag(diagonal)
def _check_binning_stats(
self, weights_benchmarks, weights_benchmark_uncertainties, theta, report=5, n_bins_last_axis=None
):
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
sigma = mdot(theta_matrix, weights_benchmarks) # Shape (n_bins,)
sigma_uncertainties = mdot(theta_matrix, weights_benchmark_uncertainties) # Shape (n_bins,)
rel_uncertainties = sigma_uncertainties / np.maximum(sigma, 1.0e-12)
order = np.argsort(rel_uncertainties)[::-1]
logger.info("Bins with largest statistical uncertainties on rates:")
for i_bin in order[:report]:
bin_nd = i_bin + 1
if n_bins_last_axis is not None:
bin_nd = (i_bin // n_bins_last_axis + 1, i_bin % n_bins_last_axis + 1)
logger.info(
" Bin %s: (%.5f +/- %.5f) fb (%.0f %%)",
bin_nd,
1000.0 * sigma[i_bin],
1000.0 * sigma_uncertainties[i_bin],
100.0 * rel_uncertainties[i_bin],
)
def _calculate_binning(
self, bins, cuts, efficiency_functions, histrange, n_events_dynamic_binning, observable, theta
):
dynamic_binning = histrange is None and isinstance(bins, int)
if dynamic_binning:
n_bins_total = bins
bin_boundaries = self._calculate_dynamic_binning(
observable, theta, bins, n_events_dynamic_binning, cuts, efficiency_functions
)
logger.debug("Automatic dynamic binning: bin boundaries %s", bin_boundaries)
elif isinstance(bins, int):
n_bins_total = bins + 2
bin_boundaries = np.linspace(histrange[0], histrange[1], num=bins + 1)
else:
bin_boundaries = bins
n_bins_total = len(bins) + 1
return bin_boundaries, n_bins_total
def _calculate_fisher_information(
self,
theta,
weights_benchmarks,
luminosity=300000.0,
include_nuisance_parameters=True,
sum_events=False,
calculate_uncertainty=False,
weights_benchmark_uncertainties=None,
):
"""
Low-level function that calculates a list of full Fisher information matrices for a given parameter point and
benchmark weights. Do not use this function directly, instead use the other `FisherInformation` functions.
Parameters
----------
theta : ndarray
Parameter point.
weights_benchmarks : ndarray
Benchmark weights. Shape (n_events, n_benchmark).
luminosity : float, optional
Luminosity in pb^-1. Default value: 300000.
include_nuisance_parameters : bool, optional
If True, nuisance parameters are taken into account. Default value: True.
sum_events : bool, optional
If True, returns the summed FIsher information. Otherwise, a list of Fisher
information matrices for each event. Default value: False.
calculate_uncertainty : bool, optional
Whether an uncertainty of the result is calculated. Note that this uncertainty is currently only
implemented for the "physical" part of the FIsher information, not for the nuisance parameters. Default
value: False.
weights_benchmark_uncertainties : ndarray or None, optional
If calculate_uncertainty is True, weights_benchmark_uncertainties sets the uncertainties on each entry of
weights_benchmarks. If None, weights_benchmark_uncertainties = weights_benchmarks is assumed.
Returns
-------
fisher_information : ndarray
If sum_events is True, the return value is an nxn matrix, the total Fisher information
summed over all events. Otherwise, a n_events x n_parameters x n_parameters tensor is returned that
includes the Fisher information matrices for each event separately.
fisher_information_uncertainty : ndarray
Only returned if calculate_uncertainty is True. Covariance matrix of the Fisher information. Note that this
does not take into account any uncertainty on the nuisance parameter part of the Fisher information, and
correlations between events are neglected. Note that independent of sum_events, the covariance matrix is
always summed over the events.
"""
include_nuisance_parameters = include_nuisance_parameters and self.include_nuisance_parameters
# Get morphing matrices
theta_matrix = self._get_theta_benchmark_matrix(theta, zero_pad=False) # (n_benchmarks_phys,)
dtheta_matrix = self._get_dtheta_benchmark_matrix(theta, zero_pad=False) # (n_parameters, n_benchmarks_phys)
# Get differential xsec per event, and the derivative wrt to theta
sigma = mdot(theta_matrix, weights_benchmarks) # Shape (n_events,)
total_xsec = np.sum(sigma)
inv_sigma = sanitize_array(1.0 / sigma) # Shape (n_events,)
dsigma = mdot(dtheta_matrix, weights_benchmarks) # Shape (n_parameters, n_events)
# Calculate physics Fisher info for this event
fisher_info_phys = luminosity * np.einsum("n,in,jn->nij", inv_sigma, dsigma, dsigma)
# Nuisance parameter Fisher info
if include_nuisance_parameters:
nuisance_a = self.nuisance_morpher.calculate_a(weights_benchmarks) # Shape (n_nuisance_params, n_events)
# grad_i dsigma(x), where i is a nuisance parameter, is given by
# sigma[np.newaxis, :] * a
fisher_info_nuisance = luminosity * np.einsum("n,in,jn->nij", sigma, nuisance_a, nuisance_a)
fisher_info_mix = luminosity * np.einsum("in,jn->nij", dsigma, nuisance_a)
fisher_info_mix_transposed = luminosity * np.einsum("in,jn->nji", dsigma, nuisance_a)
n_all_parameters = self.n_parameters + self.n_nuisance_parameters
fisher_info = np.zeros((fisher_info_phys.shape[0], n_all_parameters, n_all_parameters))
fisher_info[:, : self.n_parameters, : self.n_parameters] = fisher_info_phys
fisher_info[:, : self.n_parameters, self.n_parameters :] = fisher_info_mix
fisher_info[:, self.n_parameters :, : self.n_parameters] = fisher_info_mix_transposed
fisher_info[:, self.n_parameters :, self.n_parameters :] = fisher_info_nuisance
else:
n_all_parameters = self.n_parameters
fisher_info = fisher_info_phys
# Error propagation
if calculate_uncertainty:
if weights_benchmarks.shape[1] > self.n_benchmarks_phys:
weights_benchmarks_phys = weights_benchmarks[:, np.logical_not(self.benchmark_is_nuisance)]
else:
weights_benchmarks_phys = weights_benchmarks
n_events = weights_benchmarks_phys.shape[0]
# Input uncertainties
if weights_benchmark_uncertainties is None:
weights_benchmark_uncertainties = weights_benchmarks_phys # Shape (n_events, n_benchmarks_phys)
# Build covariance matrix of inputs
# We assume full correlation between weights_benchmarks[i, b1] and weights_benchmarks[i, b2]
covariance_inputs = np.zeros((n_events, self.n_benchmarks_phys, self.n_benchmarks_phys))
for i in range(n_events):
for b1 in range(self.n_benchmarks_phys):
for b2 in range(self.n_benchmarks_phys):
if b1 == b2: # Diagonal
covariance_inputs[i, b1, b2] = weights_benchmark_uncertainties[i, b1] ** 2
else: # Off-diagonal, same event
covariance_inputs[i, b1, b2] = (
weights_benchmark_uncertainties[i, b1] * weights_benchmark_uncertainties[i, b2]
)
# Jacobian
temp1 = np.einsum("ib,jn,n->ijnb", dtheta_matrix, dsigma, inv_sigma)
temp2 = np.einsum("jb,in,n->ijnb", dtheta_matrix, dsigma, inv_sigma)
temp3 = np.einsum("b,in,jn,n,n->ijnb", theta_matrix, dsigma, dsigma, inv_sigma, inv_sigma)
temp1, temp2, temp3 = sanitize_array(temp1), sanitize_array(temp2), sanitize_array(temp3)
jacobian = luminosity * (temp1 + temp2 + temp3) # (n_parameters, n_parameters, n_events, n_benchmarks_phys)
# Covariance of information
covariance_information_phys = np.einsum("ijnb,nbc,klnc->ijkl", jacobian, covariance_inputs, jacobian)
if include_nuisance_parameters:
covariance_information = np.zeros(
(n_all_parameters, n_all_parameters, n_all_parameters, n_all_parameters)
)
covariance_information[
: self.n_parameters, : self.n_parameters, : self.n_parameters, : self.n_parameters
] = covariance_information_phys
else:
covariance_information = covariance_information_phys
if sum_events:
return np.sum(fisher_info, axis=0), covariance_information
return fisher_info, covariance_information
if sum_events:
return np.sum(fisher_info, axis=0)
return fisher_info
def _pass_cuts(self, observations, cuts=None):
"""
Checks if an event, specified by a list of observations, passes a set of cuts.
Parameters
----------
observations : list of float
list of float. Values of the observables for a single event.
cuts : list of str or None, optional
Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
Returns
-------
passes : bool
True if the event passes all cuts, False otherwise.
"""
# Check inputs
if cuts is None:
cuts = []
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in cuts
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
for cut in cuts:
if not bool(eval(cut, variables)):
return False
return True
def _eval_efficiency(self, observations, efficiency_functions=None):
"""
Calculates the efficiency for an event.
Parameters
----------
observations : list of float
Values of the observables.
efficiency_functions : list of str or None
Each entry is a parseable Python expression that returns a float for the efficiency of one component.
Default value: None.
Returns
-------
efficiency : float
Efficiency (0. <= efficiency <= 1.), product of the results of the calls to all entries in
efficiency_functions.
"""
# Check inputs
if efficiency_functions is None:
efficiency_functions = []
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in efficiency functions
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
efficiency = 1.0
for efficency_function in efficiency_functions:
efficiency *= float(eval(efficency_function, variables))
return efficiency
def _eval_observable(self, observations, observable_definition):
"""
Calculates an observable expression for an event.
Parameters
----------
observations : ndarray
Values of the observables for an event, should have shape `(n_observables,)`.
observable_definition : str
A parseable Python expression that returns the value of the observable to be calculated.
Returns
-------
observable_value : float
Value of the observable defined in observable_definition.
"""
assert len(observations) == len(self.observables), "Mismatch between observables and observations"
# Variables that can be used in efficiency functions
variables = math_commands()
for observable_name, observable_value in zip(self.observables, observations):
variables[observable_name] = observable_value
# Check cuts
return float(eval(observable_definition, variables))
def _calculate_xsec(
self,
theta=None,
cuts=None,
efficiency_functions=None,
return_benchmark_xsecs=False,
return_error=False,
include_nuisance_parameters=True,
start_event=0,
):
"""
Calculates the total cross section for a parameter point.
Parameters
----------
theta : ndarray or None, optional
The parameter point. If None, return_benchmark_xsecs should be True. Default value: None.
cuts : list of str or None, optional
Cuts. Each entry is a parseable Python expression that returns a bool (True if the event should pass a cut,
False otherwise). Default value: None.
efficiency_functions : list of str or None
Efficiencies. Each entry is a parseable Python expression that returns a float for the efficiency of one
component. Default value: None.
return_benchmark_xsecs : bool, optional
If True, this function returns the benchmark xsecs. Otherwise, it returns the xsec at theta. Default value:
False.
return_error : bool, optional
If True, this function also returns the square root of the summed squared weights.
include_nuisance_parameters : bool, optional
If True and if return_benchmark_xsecs is True, the nuisance benchmarks are included in the output. Default
value: True.
start_event : int, optional
Index of first event in MadMiner file to consider. Default value: 0.
Returns
-------
xsec : ndarray or float
If return_benchmark_xsecs is True, an ndarray of benchmark xsecs in pb is returned. Otherwise, the cross
section at theta in pb is returned.
xsec_uncertainty : ndarray or float
Only returned if return_error is True. Uncertainty (square root of the summed squared weights) on xsec.
"""
logger.debug("Calculating total cross section for theta = %s", theta)
# Input
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
assert (theta is not None) or return_benchmark_xsecs, "Please supply theta or set return_benchmark_xsecs=True"
# Total xsecs for benchmarks
xsecs_benchmarks = None
xsecs_uncertainty_benchmarks = None
for observations, weights in self.event_loader(
start=start_event, include_nuisance_parameters=include_nuisance_parameters
):
# Cuts
cut_filter = [self._pass_cuts(obs_event, cuts) for obs_event in observations]
observations = observations[cut_filter]
weights = weights[cut_filter]
# Efficiencies
efficiencies = np.array(
[self._eval_efficiency(obs_event, efficiency_functions) for obs_event in observations]
)
weights *= efficiencies[:, np.newaxis]
# xsecs
if xsecs_benchmarks is None:
xsecs_benchmarks = np.sum(weights, axis=0)
xsecs_uncertainty_benchmarks = np.sum(weights ** 2, axis=0)
else:
xsecs_benchmarks += np.sum(weights, axis=0)
xsecs_uncertainty_benchmarks += np.sum(weights ** 2, axis=0)
assert xsecs_benchmarks is not None, "No events passed cuts"
xsecs_uncertainty_benchmarks = xsecs_uncertainty_benchmarks ** 0.5
logger.debug("Benchmarks xsecs [pb]: %s", xsecs_benchmarks)
if return_benchmark_xsecs:
if return_error:
return xsecs_benchmarks, xsecs_uncertainty_benchmarks
return xsecs_benchmarks
# Translate to xsec for theta
theta_matrix = self._get_theta_benchmark_matrix(theta)
xsec = mdot(theta_matrix, xsecs_benchmarks)
xsec_error = mdot(theta_matrix, xsecs_uncertainty_benchmarks)
logger.debug("Theta matrix: %s", theta_matrix)
logger.debug("Cross section at theta: %s pb", xsec)
if return_error:
return xsec, xsec_error
return xsec
def _calculate_dynamic_binning(
self, observable, theta, n_bins, n_events=None, cuts=None, efficiency_functions=None
):
if cuts is None:
cuts = []
if efficiency_functions is None:
efficiency_functions = []
# Quantile values
quantile_values = np.linspace(0.0, 1.0, n_bins + 1)
# Get data
x_pilot, weights_pilot = next(self.event_loader(batch_size=n_events))
# Cuts
cut_filter = [self._pass_cuts(x, cuts) for x in x_pilot]
x_pilot = x_pilot[cut_filter]
weights_pilot = weights_pilot[cut_filter]
# Efficiencies
efficiencies = np.array([self._eval_efficiency(x, efficiency_functions) for x in x_pilot])
weights_pilot *= efficiencies[:, np.newaxis]
# Evaluate histogrammed observable
histo_observables_pilot = np.asarray([self._eval_observable(x, observable) for x in x_pilot])
# Weights at theta
theta_matrix = self._get_theta_benchmark_matrix(theta)
weight_theta_pilot = mdot(theta_matrix, weights_pilot)
# Bin boundaries
bin_boundaries = weighted_quantile(histo_observables_pilot, quantile_values, weight_theta_pilot)
bin_boundaries = bin_boundaries[1:-1]
return bin_boundaries
# Aliases for backward compatibility
calculate_fisher_information_full_truth = truth_information
calculate_fisher_information_full_detector = full_information
calculate_fisher_information_rate = rate_information
calculate_fisher_information_hist1d = histo_information
calculate_fisher_information_hist2d = histo_information_2d
histogram_of_fisher_information = histogram_of_information
calculate_fisher_information_nuisance_constraints = nuisance_constraint_information
|
py | 1a32c695156c4ae0788e9414d27d3cc793cd81d7 | # Suite B tests
# Copyright (c) 2014-2015, Jouni Malinen <[email protected]>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import time
import logging
logger = logging.getLogger()
import hostapd
from utils import HwsimSkip
def check_suite_b_capa(dev):
if "GCMP" not in dev[0].get_capability("pairwise"):
raise HwsimSkip("GCMP not supported")
if "BIP-GMAC-128" not in dev[0].get_capability("group_mgmt"):
raise HwsimSkip("BIP-GMAC-128 not supported")
if "WPA-EAP-SUITE-B" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("WPA-EAP-SUITE-B not supported")
check_suite_b_tls_lib(dev)
def check_suite_b_tls_lib(dev):
tls = dev[0].request("GET tls_library")
if not tls.startswith("OpenSSL"):
raise HwsimSkip("TLS library not supported for Suite B: " + tls);
supported = False
for ver in [ '1.0.2', '1.1.0' ]:
if "build=OpenSSL " + ver in tls and "run=OpenSSL " + ver in tls:
supported = True
break
if not supported:
raise HwsimSkip("OpenSSL version not supported for Suite B: " + tls)
def test_suite_b(dev, apdev):
"""WPA2/GCMP connection at Suite B 128-bit level"""
check_suite_b_capa(dev)
dev[0].flush_scan_cache()
params = { "ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B",
"rsn_pairwise": "GCMP",
"group_mgmt_cipher": "BIP-GMAC-128",
"ieee80211w": "2",
"ieee8021x": "1",
"openssl_ciphers": "SUITEB128",
#"dh_file": "auth_serv/dh.conf",
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ec-ca.pem",
"server_cert": "auth_serv/ec-server.pem",
"private_key": "auth_serv/ec-server.key" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B", ieee80211w="2",
openssl_ciphers="SUITEB128",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec-ca.pem",
client_cert="auth_serv/ec-user.pem",
private_key="auth_serv/ec-user.key",
pairwise="GCMP", group="GCMP", scan_freq="2412")
tls_cipher = dev[0].get_status_field("EAP TLS cipher")
if tls_cipher != "ECDHE-ECDSA-AES128-GCM-SHA256":
raise Exception("Unexpected TLS cipher: " + tls_cipher)
bss = dev[0].get_bss(apdev[0]['bssid'])
if 'flags' not in bss:
raise Exception("Could not get BSS flags from BSS table")
if "[WPA2-EAP-SUITE-B-GCMP]" not in bss['flags']:
raise Exception("Unexpected BSS flags: " + bss['flags'])
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=20)
dev[0].dump_monitor()
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
def suite_b_as_params():
params = {}
params['ssid'] = 'as'
params['beacon_int'] = '2000'
params['radius_server_clients'] = 'auth_serv/radius_clients.conf'
params['radius_server_auth_port'] = '18129'
params['eap_server'] = '1'
params['eap_user_file'] = 'auth_serv/eap_user.conf'
params['ca_cert'] = 'auth_serv/ec-ca.pem'
params['server_cert'] = 'auth_serv/ec-server.pem'
params['private_key'] = 'auth_serv/ec-server.key'
params['openssl_ciphers'] = 'SUITEB128'
return params
def test_suite_b_radius(dev, apdev):
"""WPA2/GCMP (RADIUS) connection at Suite B 128-bit level"""
check_suite_b_capa(dev)
dev[0].flush_scan_cache()
params = suite_b_as_params()
hostapd.add_ap(apdev[1]['ifname'], params)
params = { "ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B",
"rsn_pairwise": "GCMP",
"group_mgmt_cipher": "BIP-GMAC-128",
"ieee80211w": "2",
"ieee8021x": "1",
'auth_server_addr': "127.0.0.1",
'auth_server_port': "18129",
'auth_server_shared_secret': "radius",
'nas_identifier': "nas.w1.fi" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B", ieee80211w="2",
openssl_ciphers="SUITEB128",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec-ca.pem",
client_cert="auth_serv/ec-user.pem",
private_key="auth_serv/ec-user.key",
pairwise="GCMP", group="GCMP", scan_freq="2412")
def check_suite_b_192_capa(dev):
if "GCMP-256" not in dev[0].get_capability("pairwise"):
raise HwsimSkip("GCMP-256 not supported")
if "BIP-GMAC-256" not in dev[0].get_capability("group_mgmt"):
raise HwsimSkip("BIP-GMAC-256 not supported")
if "WPA-EAP-SUITE-B-192" not in dev[0].get_capability("key_mgmt"):
raise HwsimSkip("WPA-EAP-SUITE-B-192 not supported")
check_suite_b_tls_lib(dev)
def test_suite_b_192(dev, apdev):
"""WPA2/GCMP-256 connection at Suite B 192-bit level"""
check_suite_b_192_capa(dev)
dev[0].flush_scan_cache()
params = { "ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B-192",
"rsn_pairwise": "GCMP-256",
"group_mgmt_cipher": "BIP-GMAC-256",
"ieee80211w": "2",
"ieee8021x": "1",
"openssl_ciphers": "SUITEB192",
"eap_server": "1",
"eap_user_file": "auth_serv/eap_user.conf",
"ca_cert": "auth_serv/ec2-ca.pem",
"server_cert": "auth_serv/ec2-server.pem",
"private_key": "auth_serv/ec2-server.key" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192",
ieee80211w="2",
openssl_ciphers="SUITEB192",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec2-ca.pem",
client_cert="auth_serv/ec2-user.pem",
private_key="auth_serv/ec2-user.key",
pairwise="GCMP-256", group="GCMP-256", scan_freq="2412")
tls_cipher = dev[0].get_status_field("EAP TLS cipher")
if tls_cipher != "ECDHE-ECDSA-AES256-GCM-SHA384":
raise Exception("Unexpected TLS cipher: " + tls_cipher)
bss = dev[0].get_bss(apdev[0]['bssid'])
if 'flags' not in bss:
raise Exception("Could not get BSS flags from BSS table")
if "[WPA2-EAP-SUITE-B-192-GCMP-256]" not in bss['flags']:
raise Exception("Unexpected BSS flags: " + bss['flags'])
dev[0].request("DISCONNECT")
dev[0].wait_disconnected(timeout=20)
dev[0].dump_monitor()
dev[0].request("RECONNECT")
ev = dev[0].wait_event(["CTRL-EVENT-EAP-STARTED",
"CTRL-EVENT-CONNECTED"], timeout=20)
if ev is None:
raise Exception("Roaming with the AP timed out")
if "CTRL-EVENT-EAP-STARTED" in ev:
raise Exception("Unexpected EAP exchange")
def test_suite_b_192_radius(dev, apdev):
"""WPA2/GCMP-256 (RADIUS) connection at Suite B 192-bit level"""
check_suite_b_192_capa(dev)
dev[0].flush_scan_cache()
params = suite_b_as_params()
params['ca_cert'] = 'auth_serv/ec2-ca.pem'
params['server_cert'] = 'auth_serv/ec2-server.pem'
params['private_key'] = 'auth_serv/ec2-server.key'
params['openssl_ciphers'] = 'SUITEB192'
hostapd.add_ap(apdev[1]['ifname'], params)
params = { "ssid": "test-suite-b",
"wpa": "2",
"wpa_key_mgmt": "WPA-EAP-SUITE-B-192",
"rsn_pairwise": "GCMP-256",
"group_mgmt_cipher": "BIP-GMAC-256",
"ieee80211w": "2",
"ieee8021x": "1",
'auth_server_addr': "127.0.0.1",
'auth_server_port': "18129",
'auth_server_shared_secret': "radius",
'nas_identifier': "nas.w1.fi" }
hapd = hostapd.add_ap(apdev[0]['ifname'], params)
dev[0].connect("test-suite-b", key_mgmt="WPA-EAP-SUITE-B-192",
ieee80211w="2",
openssl_ciphers="SUITEB192",
eap="TLS", identity="tls user",
ca_cert="auth_serv/ec2-ca.pem",
client_cert="auth_serv/ec2-user.pem",
private_key="auth_serv/ec2-user.key",
pairwise="GCMP-256", group="GCMP-256", scan_freq="2412")
|
py | 1a32c6c7f9fa4b99a25541f74fd763fb8b32a26e | #coding:utf-8
#
# id: bugs.core_0014
# title: Trigger do it wrong
# decription: Computed by columns inside triggers always=NULL
# tracker_id: CORE-0014
# min_versions: ['2.5.0']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
test_script_1 = """
-- Works OK on 1.5.6 and up to 4.0.0.
create domain dom_datum_vreme as timestamp not null;
create domain dom_dokid as varchar(20) not null;
create domain dom_jid as integer;
create domain dom_kolicina as integer default 0;
create domain dom_naziv as varchar(100) not null;
create domain dom_novac as double precision default 0;
create domain dom_rabat as float default 0;
create domain dom_status as integer default 0;
commit;
create table ulaz_master
(
ulzid dom_jid not null,
datum dom_datum_vreme not null,
broj_racuna dom_dokid not null,
dobid dom_jid not null,
dobavljac dom_naziv not null,
napid dom_jid not null,
nacin_placanja dom_naziv not null,
datumprispeca timestamp, -- dom_datum_vreme null ,
vrednost dom_novac default 0,
rvrednost dom_novac default 0,
status dom_status default 0,
constraint pk_ulaz_master primary key (ulzid)
);
create table ulaz_detalji
(
ulzid dom_jid not null,
artid dom_jid not null,
artikal dom_naziv not null,
kolicina dom_kolicina default 0 not null,
cena dom_novac default 0 not null,
rabat dom_rabat default 0 not null,
ukupno computed by (kolicina * cena),
vratio dom_kolicina default 0,
constraint pk_ulaz_detalji primary key (ulzid, artid)
);
set term ^;
create trigger trig_ulaz_detalji_ai for ulaz_detalji
active after insert position 0
as
begin
update ulaz_master u set u.vrednost = u.vrednost + new.ukupno
where u.ulzid = new.ulzid;
update ulaz_master u set u.rvrednost = u.rvrednost + (1 - new.rabat/100) * new.ukupno
where u.ulzid = new.ulzid;
end
^
set term ;^
commit;
-- this trigger sets fiedls to null on rc8.
-- on rc6 it works as it should.
insert into ulaz_master(ulzid, datum, broj_racuna, dobid, dobavljac, napid, nacin_placanja)
values(1000, '19.03.2016 12:01:03', 'qwerty123', 78966, 'foo-bar', 32101, 'asd-fgh-jkl' );
/*
create domain dom_datum_vreme as timestamp not null;
create domain dom_dokid as varchar(20) not null;
create domain dom_jid as integer;
create domain dom_kolicina as integer default 0;
create domain dom_naziv as varchar(100) not null;
create domain dom_novac as double precision default 0;
create domain dom_rabat as float default 0;
create domain dom_status as integer default 0;
datum dom_datum_vreme not null,
broj_racuna dom_dokid not null,
dobid dom_jid not null,
dobavljac dom_naziv not null,
napid dom_jid not null,
nacin_placanja dom_naziv not null,
*/
set list on;
set count on;
select
ulzid, datum, broj_racuna, dobid, dobavljac, napid, nacin_placanja, datumprispeca
,cast(vrednost as numeric(10,2)) as vrednost
,cast(rvrednost as numeric(10,2)) as rvrednost
,status
from ulaz_master;
insert into
ulaz_detalji(ulzid, artid, artikal, kolicina, cena, rabat, vratio)
values(1000, 1000, 'liste', 19, 7, 30, 0);
select
ulzid, artid, artikal, kolicina,
cast(cena as numeric(12,2)) as cena,
rabat,
cast(ukupno as numeric(12,2)) as ukupno,
vratio
from ulaz_detalji;
select
ulzid, datum, broj_racuna, dobid, dobavljac, napid, nacin_placanja, datumprispeca
,cast(vrednost as numeric(10,2)) as vrednost
,cast(rvrednost as numeric(10,2)) as rvrednost
,status
from ulaz_master;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ULZID 1000
DATUM 2016-03-19 12:01:03.0000
BROJ_RACUNA qwerty123
DOBID 78966
DOBAVLJAC foo-bar
NAPID 32101
NACIN_PLACANJA asd-fgh-jkl
DATUMPRISPECA <null>
VREDNOST 0.00
RVREDNOST 0.00
STATUS 0
Records affected: 1
Records affected: 1
ULZID 1000
ARTID 1000
ARTIKAL liste
KOLICINA 19
CENA 7.00
RABAT 30
UKUPNO 133.00
VRATIO 0
Records affected: 1
ULZID 1000
DATUM 2016-03-19 12:01:03.0000
BROJ_RACUNA qwerty123
DOBID 78966
DOBAVLJAC foo-bar
NAPID 32101
NACIN_PLACANJA asd-fgh-jkl
DATUMPRISPECA <null>
VREDNOST 133.00
RVREDNOST 93.10
STATUS 0
Records affected: 1
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_expected_stdout == act_1.clean_stdout
|
py | 1a32c70bd1cb2b6de541d19cd2d4344950263c81 | # Generated by Django 3.0 on 2020-03-19 01:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('generative_model', '0002_auto_20200318_0449'),
]
operations = [
migrations.RenameField(
model_name='generativemodel',
old_name='sdf',
new_name='pdb',
),
migrations.AddField(
model_name='generativemodel',
name='iterations',
field=models.IntegerField(default=3),
),
migrations.AddField(
model_name='generativemodel',
name='residue_name',
field=models.CharField(default='LIG', max_length=3),
),
]
|
py | 1a32c727dba2591f40e976b3820ecec21000954d | import math
import itertools
def menu():
while True:
try:
print(menuStr)
print(stars)
choose = int(input("For Encrypter = 1\nFor Decrypter = 2\nFor Exit = 0\nChoose : "))
print(stars)
break
except:
print("Girdi de bir problem var.")
pass
if choose == 1:
print("Encrypter opening...")
print(stars)
myEncrypter()
elif choose == 2:
print("Decrypter opening...")
myDecrypter()
elif choose == 0:
print("Cya dude.")
else:
print("Hata menüde.")
menu()
def myEncrypter():
inputWord = str(input("Input : "))
print(stars)
iList = list(inputWord)
aList = []
tempList = []
outputList = []
t=0
for item in iList:
try:
aList.append(((alphabet.index(item)+1)*2)%29)
except:
print("Şuan sadece alfabe ile işlem yapabilirsiniz.")
iList.clear()
myEncrypter()
for x in aList:
tempList = list(str(math.sin(x)))
tempList.reverse()
for items in itertools.islice(tempList,0,3):
outputList.append(items)
for a in outputList:
if t == 0 :print("Sonucunuz: ",end="")
t+=1
print(a,end="")
def myDecrypter():
print("b")
alphabet = ['a', 'b', 'c', 'ç', 'd', 'e', 'f', 'g', 'ğ', 'h', 'ı', 'i', 'j',
'k', 'l', 'm', 'n', 'o', 'ö', 'p', 'r', 's', 'ş', 't', 'u', 'ü',
'v', 'y', 'z']
menuStr = """
_______________________________________________________________________________________
_____ _
_ __ ___ _ _| ____|_ __ ___ _ __ _ _ _ __ | |_ ___ _ __
| '_ ` _ \| | | | _| | '_ \ / __| '__| | | | '_ \| __/ _ \ '__|
| | | | | | |_| | |___| | | | (__| | | |_| | |_) | || __/ |
|_| |_| |_|\__, |_____|_| |_|\___|_| \__, | .__/ \__\___|_|
|___/ |___/|_| v.0.0.2
_________________________________________________________________________________________
"""
stars = "_" * 89
menu() |
py | 1a32c7c33fb38f7161db3e9c8eaa1cbc24b5131a | __author__ = 'hisham'
info = {
"title": "Feature List",
"description": "Map and configurable feature list side by side. Map and feature list are connected in zoom and pan. Use for store locations or similar apps.",
"author": 'Cartologic',
"home_page": 'http://cartologic.com/cartoview/apps/feature_list',
"help_url": "http://cartologic.com/cartoview/apps/feature_list/help/",
"tags": [
'map',
'viewer',
'feature_list',
'features',
'list'],
"licence": 'BSD',
"author_website": "http://www.cartologic.com",
"single_instance": False}
def install():
pass
def uninstall():
pass
|
py | 1a32c7cf858e7ec3d8b83fbe23815676d192ee60 | # pylint: disable=W0401
from .base import *
from .test_views import *
from .test_filters import *
from .test_translation import *
from .test_trek_relationship import *
from .test_models import *
from .test_admin import * |
py | 1a32c9736dc17aed0969063be582a9ac42080b96 | import unittest
import pandas as pd
import numpy as np
from copy import deepcopy
from darts.dataprocessing.transformers import BoxCox, Mapper
from darts.utils.timeseries_generation import sine_timeseries, linear_timeseries
from darts import TimeSeries
class BoxCoxTestCase(unittest.TestCase):
sine_series = sine_timeseries(length=50, value_y_offset=5, value_frequency=0.05)
lin_series = linear_timeseries(start_value=1, end_value=10, length=50)
multi_series = sine_series.stack(lin_series)
def test_boxbox_lambda(self):
boxcox = BoxCox(lmbda=0.3)
boxcox.fit(self.multi_series)
self.assertEqual(boxcox._fitted_params, [[0.3, 0.3]])
boxcox = BoxCox(lmbda=[0.3, 0.4])
boxcox.fit(self.multi_series)
self.assertEqual(boxcox._fitted_params, [[0.3, 0.4]])
with self.assertRaises(ValueError):
boxcox = BoxCox(lmbda=[0.2, 0.4, 0.5])
boxcox.fit(self.multi_series)
boxcox = BoxCox(optim_method="mle")
boxcox.fit(self.multi_series)
lmbda1 = boxcox._fitted_params[0].tolist()
boxcox = BoxCox(optim_method="pearsonr")
boxcox.fit(self.multi_series)
lmbda2 = boxcox._fitted_params[0].tolist()
self.assertNotEqual(lmbda1, lmbda2)
def test_boxcox_transform(self):
log_mapper = Mapper(lambda x: np.log(x))
boxcox = BoxCox(lmbda=0)
transformed1 = log_mapper.transform(self.sine_series)
transformed2 = boxcox.fit(self.sine_series).transform(self.sine_series)
np.testing.assert_almost_equal(
transformed1.all_values(copy=False),
transformed2.all_values(copy=False),
decimal=4,
)
def test_boxcox_inverse(self):
boxcox = BoxCox()
transformed = boxcox.fit_transform(self.multi_series)
back = boxcox.inverse_transform(transformed)
pd.testing.assert_frame_equal(
self.multi_series.pd_dataframe(), back.pd_dataframe(), check_exact=False
)
def test_boxcox_multi_ts(self):
test_cases = [
([[0.2, 0.4], [0.3, 0.6]]), # full lambda
(0.4), # single value
None, # None
]
for lmbda in test_cases:
box_cox = BoxCox(lmbda=lmbda)
transformed = box_cox.fit_transform([self.multi_series, self.multi_series])
back = box_cox.inverse_transform(transformed)
pd.testing.assert_frame_equal(
self.multi_series.pd_dataframe(),
back[0].pd_dataframe(),
check_exact=False,
)
pd.testing.assert_frame_equal(
self.multi_series.pd_dataframe(),
back[1].pd_dataframe(),
check_exact=False,
)
def test_boxcox_multiple_calls_to_fit(self):
"""
This test checks whether calling the scaler twice is calculating new lambdas instead of
keeping the old ones
"""
box_cox = BoxCox()
box_cox.fit(self.sine_series)
lambda1 = deepcopy(box_cox._fitted_params)[0].tolist()
box_cox.fit(self.lin_series)
lambda2 = deepcopy(box_cox._fitted_params)[0].tolist()
self.assertNotEqual(
lambda1, lambda2, "Lambdas should change when the transformer is retrained"
)
def test_multivariate_stochastic_series(self):
transformer = BoxCox()
vals = np.random.rand(10, 5, 10)
series = TimeSeries.from_values(vals)
new_series = transformer.fit_transform(series)
series_back = transformer.inverse_transform(new_series)
# Test inverse transform
np.testing.assert_allclose(series.all_values(), series_back.all_values())
|
py | 1a32ca380b2192e95523f14f1d91018c381666d1 | #!/usr/bin/python3
from brownie import *
from scripts.deployment.deploy_protocol import deployProtocol
from scripts.deployment.deploy_loanToken import deployLoanTokens
from scripts.deployment.deploy_tokens import deployTokens, readTokens
from scripts.deployment.deploy_multisig import deployMultisig
import shared
import json
from munch import Munch
'''
Deploys all of the contracts.
1. deploys the tokens or reads exsiting token contracts.
if configData contains token addresses, use the given addresses
else, deploy new tokens
2. deploys the base protocol contracts.
3. deploys, configures and tests the loan token contracts.
4. writes the relevant contract addresses into swap_test.json.
'''
def main():
global configData
#owners = [accounts[0], accounts[1], accounts[2]]
requiredConf=2
configData = {} # deploy new tokens
'''
configData = {
'WRBTC': '0x69FE5cEC81D5eF92600c1A0dB1F11986AB3758Ab',
'SUSD': '0xCb46C0DdC60d18eFEB0e586c17AF6Ea36452DaE0',
'medianizer': '0x2d39Cc54dc44FF27aD23A91a9B5fd750dae4B218'
}
'''
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
elif thisNetwork == "testnet" or thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
else:
raise Exception("network not supported")
if('WRBTC' in configData and 'SUSD' in configData):
tokens = readTokens(acct, configData['WRBTC'], configData['SUSD'])
elif('SUSD' in configData):
tokens = deployWRBTC(acct, configData['SUSD'])
else:
tokens = deployTokens(acct)
if(not 'medianizer' in configData):
medianizer = deployMoCMockup(acct)
configData['medianizer'] = medianizer.address
if(not 'mocState' in configData):
mocState = deployBProPriceFeedMockup(acct)
configData['mocState'] = mocState.address
(sovryn, feeds) = deployProtocol(acct, tokens, configData['medianizer'])
(loanTokenSUSD, loanTokenWRBTC, loanTokenSettingsSUSD,
loanTokenSettingsWRBTC) = deployLoanTokens(acct, sovryn, tokens)
#deployMultisig(sovryn, acct, owners, requiredConf)
configData["sovrynProtocol"] = sovryn.address
configData["PriceFeeds"] = feeds.address
configData["WRBTC"] = tokens.wrbtc.address
configData["SUSD"] = tokens.susd.address
configData["loanTokenSettingsSUSD"] = loanTokenSettingsSUSD.address
configData["loanTokenSUSD"] = loanTokenSUSD.address
configData["loanTokenSettingsWRBTC"] = loanTokenSettingsWRBTC.address
configData["loanTokenRBTC"] = loanTokenWRBTC.address
with open('./scripts/swapTest/swap_test.json', 'w') as configFile:
json.dump(configData, configFile)
def deployMoCMockup(acct):
priceFeedMockup = acct.deploy(PriceFeedsMoCMockup)
priceFeedMockup.setHas(True)
priceFeedMockup.setValue(10000e18)
return priceFeedMockup
def deployBProPriceFeedMockup(acct):
bproPriceFeedMockup = acct.deploy(BProPriceFeedMockup)
bproPriceFeedMockup.setValue(20000e18)
return bproPriceFeedMockup |
py | 1a32cb7e536194f03f5af7ed75eec272d6d93adf | # Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinderclient.v1 import client as cinder_client_v1
from cinderclient.v2 import client as cinder_client_v2
from requests_mock.contrib import fixture
from testtools import matchers
from nova import context
from nova import exception
from nova import test
from nova.volume import cinder
_image_metadata = {
'kernel_id': 'fake',
'ramdisk_id': 'fake'
}
class BaseCinderTestCase(object):
def setUp(self):
super(BaseCinderTestCase, self).setUp()
cinder.reset_globals()
self.requests = self.useFixture(fixture.Fixture())
self.api = cinder.API()
self.context = context.RequestContext('username',
'project_id',
auth_token='token',
service_catalog=self.CATALOG)
def flags(self, *args, **kwargs):
super(BaseCinderTestCase, self).flags(*args, **kwargs)
cinder.reset_globals()
def create_client(self):
return cinder.cinderclient(self.context)
def test_context_with_catalog(self):
self.assertEqual(self.URL, self.create_client().client.get_endpoint())
def test_cinder_http_retries(self):
retries = 42
self.flags(http_retries=retries, group='cinder')
self.assertEqual(retries, self.create_client().client.connect_retries)
def test_cinder_api_insecure(self):
# The True/False negation is awkward, but better for the client
# to pass us insecure=True and we check verify_cert == False
self.flags(insecure=True, group='cinder')
self.assertFalse(self.create_client().client.session.verify)
def test_cinder_http_timeout(self):
timeout = 123
self.flags(timeout=timeout, group='cinder')
self.assertEqual(timeout, self.create_client().client.session.timeout)
def test_cinder_api_cacert_file(self):
cacert = "/etc/ssl/certs/ca-certificates.crt"
self.flags(cafile=cacert, group='cinder')
self.assertEqual(cacert, self.create_client().client.session.verify)
class CinderTestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v1 api."""
URL = "http://localhost:8776/v1/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinderv2",
"endpoints": [{"publicURL": URL}]
}]
def create_client(self):
c = super(CinderTestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v1.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'display_name': None,
'display_description': None,
"attachments": [],
"availability_zone": "cinder",
"created_at": "2012-09-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v1/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v1/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
m = self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertThat(m.last_request.path,
matchers.EndsWith('/volumes/5678'))
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
class CinderV2TestCase(BaseCinderTestCase, test.NoDBTestCase):
"""Test case for cinder volume v2 api."""
URL = "http://localhost:8776/v2/project_id"
CATALOG = [{
"type": "volumev2",
"name": "cinder",
"endpoints": [{"publicURL": URL}]
}]
def setUp(self):
super(CinderV2TestCase, self).setUp()
cinder.CONF.set_override('catalog_info',
'volumev2:cinder:publicURL', group='cinder')
self.addCleanup(cinder.CONF.reset)
def create_client(self):
c = super(CinderV2TestCase, self).create_client()
self.assertIsInstance(c, cinder_client_v2.Client)
return c
def stub_volume(self, **kwargs):
volume = {
'name': None,
'description': None,
"attachments": [],
"availability_zone": "cinderv2",
"created_at": "2013-08-10T00:00:00.000000",
"id": '00000000-0000-0000-0000-000000000000',
"metadata": {},
"size": 1,
"snapshot_id": None,
"status": "available",
"volume_type": "None",
"bootable": "true"
}
volume.update(kwargs)
return volume
def test_cinder_endpoint_template(self):
endpoint = 'http://other_host:8776/v2/%(project_id)s'
self.flags(endpoint_template=endpoint, group='cinder')
self.assertEqual('http://other_host:8776/v2/project_id',
self.create_client().client.endpoint_override)
def test_get_non_existing_volume(self):
self.requests.get(self.URL + '/volumes/nonexisting',
status_code=404)
self.assertRaises(exception.VolumeNotFound, self.api.get, self.context,
'nonexisting')
def test_volume_with_image_metadata(self):
v = self.stub_volume(id='1234', volume_image_metadata=_image_metadata)
self.requests.get(self.URL + '/volumes/5678', json={'volume': v})
volume = self.api.get(self.context, '5678')
self.assertIn('volume_image_metadata', volume)
self.assertEqual(_image_metadata, volume['volume_image_metadata'])
|
py | 1a32cc48698e063422822614e8e31777ec7c3f68 | from selenium import webdriver
chrome_options = webdriver.ChromeOptions()
chrome_options.add_argument("headless")
chrome_options.add_argument("no-sandbox")
chrome_options.add_argument("disable-dev-shm-usage")
driver = webdriver.Chrome(options=chrome_options)
base_url = "https://www.google.com/"
driver.get(base_url)
source = driver.page_source
if "I'm Feeling Lucky" in source:
print("Test passed")
else:
print("Test failed")
driver.close()
#asda |
py | 1a32cc92e68fa84098c8459efe92559cd0639bfb | #!/usr/bin/env python3
import json
import os
import platform
import struct
import sys
import subprocess
def main():
message = get_message()
url = message.get("url")
args = ["mpv", "--", url] # need to remove terminal because it need to capture the output of yt-dlp
kwargs = {}
# https://developer.mozilla.org/en-US/docs/Mozilla/Add-ons/WebExtensions/Native_messaging#Closing_the_native_app
if platform.system() == "Windows":
kwargs["creationflags"] = subprocess.CREATE_BREAKAWAY_FROM_JOB
# HACK(ww): On macOS, graphical applications inherit their path from `launchd`
# rather than the default path list in `/etc/paths`. `launchd` doesn't include
# `/usr/local/bin` in its default list, which means that any installations
# of MPV and/or youtube-dl under that prefix aren't visible when spawning
# from, say, Firefox. The real fix is to modify `launchd.conf`, but that's
# invasive and maybe not what users want in the general case.
# Hence this nasty hack.
if platform.system() == "Darwin":
path = os.environ.get("PATH")
os.environ["PATH"] = f"/usr/local/bin:{path}"
process = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE,**kwargs)
pOut, pErr = process.communicate() # @see https://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate
# Need to respond something to avoid "Error: An unexpected error occurred"
# in Browser Console.
if "ERROR" not in str(pOut) :
send_message("ok")
else :
send_message(pOut.decode("utf-8"))
# https://developer.mozilla.org/en-US/Add-ons/WebExtensions/Native_messaging#App_side
def get_message():
raw_length = sys.stdin.buffer.read(4)
if not raw_length:
return {}
length = struct.unpack("@I", raw_length)[0]
message = sys.stdin.buffer.read(length).decode("utf-8")
return json.loads(message)
def send_message(message):
content = json.dumps(message).encode("utf-8")
length = struct.pack("@I", len(content))
sys.stdout.buffer.write(length)
sys.stdout.buffer.write(content)
sys.stdout.buffer.flush()
if __name__ == "__main__":
main()
|
py | 1a32ccbce61149b0a9320d807a510298c503ebde | """ @ saving utils
"""
import math
import torch
from torchvision import utils
import matplotlib.pyplot as plt
from PIL import Image
from torchvision import transforms
def numpy_grid(x, pad=0, nrow=None, uint8=True):
""" thin wrap to make_grid to return frames ready to save to file
args
pad (int [0]) same as utils.make_grid(padding)
nrow (int [None]) # defaults to horizonally biased rectangle closest to square
uint8 (bool [True]) convert to img in range 0-255 uint8
"""
x = x.clone().detach().cpu()
nrow = nrow or int(math.sqrt(x.shape[0]))
x = ((utils.make_grid(x, nrow=nrow, padding=pad).permute(1,2,0) - x.min())/(x.max()-x.min())).numpy()
if uint8:
x = (x*255).astype("uint8")
return x
def to_image(image, save=True, show=True, pad=1):
""" util tensor to image, show, save
"""
image = numpy_grid(image, pad=pad)
if save:
im = Image.fromarray(image)
im.save(save)
print(f"saved image {save}")
if show:
plt.imshow(image)
plt.axis("off")
plt.show()
return image
def strf (x):
""" format time output
strf = lambda x: f"{int(x//86400)}D{int((x//3600)%24):02d}:{int((x//60)%60):02d}:{int(x%60):02d}s"
"""
days = int(x//86400)
hours = int((x//3600)%24)
minutes = int((x//60)%60)
seconds = int(x%60)
out = f"{minutes:02d}:{seconds:02d}"
if hours or days:
out = f"{hours:02d}:{out}"
if days:
out = f"{days}_{out}"
return out
# pylint: disable=no-member
def open_image(path, channels=3, image_size=128):
""" open img with same transforms as ddpm dataset
"""
if isinstance(path, (list, tuple)):
return torch.cat([open_image(p, channels=channels, image_size=image_size)
for p in path])
img = Image.open(path)
if channels == 1:
img = img.convert('L')
else:
img = img.convert('RGB')
transform = transforms.Compose([
transforms.Resize(image_size),
transforms.RandomHorizontalFlip(),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Lambda(lambda t: (t * 2) - 1)
])
return transform(img)[None,...]
|
py | 1a32cce4e4027d0364e58dbf562cabc6edc382a3 | from __future__ import absolute_import, unicode_literals
import os
import pytest
import kombu
from .common import (
BasicFunctionality, BaseExchangeTypes,
BaseTimeToLive, BasePriority, BaseFailover
)
def get_connection(hostname, port, vhost):
return kombu.Connection('pyamqp://{}:{}'.format(hostname, port))
def get_failover_connection(hostname, port, vhost):
return kombu.Connection(
'pyamqp://localhost:12345;pyamqp://{}:{}'.format(hostname, port)
)
@pytest.fixture()
def connection(request):
return get_connection(
hostname=os.environ.get('RABBITMQ_HOST', 'localhost'),
port=os.environ.get('RABBITMQ_5672_TCP', '5672'),
vhost=getattr(
request.config, "slaveinput", {}
).get("slaveid", None),
)
@pytest.fixture()
def failover_connection(request):
return get_failover_connection(
hostname=os.environ.get('RABBITMQ_HOST', 'localhost'),
port=os.environ.get('RABBITMQ_5672_TCP', '5672'),
vhost=getattr(
request.config, "slaveinput", {}
).get("slaveid", None),
)
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPBasicFunctionality(BasicFunctionality):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPBaseExchangeTypes(BaseExchangeTypes):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPTimeToLive(BaseTimeToLive):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPPriority(BasePriority):
pass
@pytest.mark.env('py-amqp')
@pytest.mark.flaky(reruns=5, reruns_delay=2)
class test_PyAMQPFailover(BaseFailover):
pass
|
py | 1a32cda512ca87f0794d66ca1617d840568dd523 | """
Functions for applying functions that act on arrays to xarray's labeled data.
"""
from __future__ import annotations
import functools
import itertools
import operator
import warnings
from collections import Counter
from typing import (
TYPE_CHECKING,
AbstractSet,
Any,
Callable,
Hashable,
Iterable,
Mapping,
Sequence,
overload,
)
import numpy as np
from . import dtypes, duck_array_ops, utils
from .alignment import align, deep_align
from .common import zeros_like
from .duck_array_ops import datetime_to_numeric
from .indexes import Index, filter_indexes_from_coords
from .merge import merge_attrs, merge_coordinates_without_align
from .options import OPTIONS, _get_keep_attrs
from .pycompat import is_duck_dask_array
from .utils import is_dict_like
from .variable import Variable
if TYPE_CHECKING:
from .coordinates import Coordinates
from .dataarray import DataArray
from .dataset import Dataset
from .types import T_Xarray
_NO_FILL_VALUE = utils.ReprObject("<no-fill-value>")
_DEFAULT_NAME = utils.ReprObject("<default-name>")
_JOINS_WITHOUT_FILL_VALUES = frozenset({"inner", "exact"})
def _first_of_type(args, kind):
"""Return either first object of type 'kind' or raise if not found."""
for arg in args:
if isinstance(arg, kind):
return arg
raise ValueError("This should be unreachable.")
def _all_of_type(args, kind):
"""Return all objects of type 'kind'"""
return [arg for arg in args if isinstance(arg, kind)]
class _UFuncSignature:
"""Core dimensions signature for a given function.
Based on the signature provided by generalized ufuncs in NumPy.
Attributes
----------
input_core_dims : tuple[tuple]
Core dimension names on each input variable.
output_core_dims : tuple[tuple]
Core dimension names on each output variable.
"""
__slots__ = (
"input_core_dims",
"output_core_dims",
"_all_input_core_dims",
"_all_output_core_dims",
"_all_core_dims",
)
def __init__(self, input_core_dims, output_core_dims=((),)):
self.input_core_dims = tuple(tuple(a) for a in input_core_dims)
self.output_core_dims = tuple(tuple(a) for a in output_core_dims)
self._all_input_core_dims = None
self._all_output_core_dims = None
self._all_core_dims = None
@property
def all_input_core_dims(self):
if self._all_input_core_dims is None:
self._all_input_core_dims = frozenset(
dim for dims in self.input_core_dims for dim in dims
)
return self._all_input_core_dims
@property
def all_output_core_dims(self):
if self._all_output_core_dims is None:
self._all_output_core_dims = frozenset(
dim for dims in self.output_core_dims for dim in dims
)
return self._all_output_core_dims
@property
def all_core_dims(self):
if self._all_core_dims is None:
self._all_core_dims = self.all_input_core_dims | self.all_output_core_dims
return self._all_core_dims
@property
def dims_map(self):
return {
core_dim: f"dim{n}" for n, core_dim in enumerate(sorted(self.all_core_dims))
}
@property
def num_inputs(self):
return len(self.input_core_dims)
@property
def num_outputs(self):
return len(self.output_core_dims)
def __eq__(self, other):
try:
return (
self.input_core_dims == other.input_core_dims
and self.output_core_dims == other.output_core_dims
)
except AttributeError:
return False
def __ne__(self, other):
return not self == other
def __repr__(self):
return "{}({!r}, {!r})".format(
type(self).__name__,
list(self.input_core_dims),
list(self.output_core_dims),
)
def __str__(self):
lhs = ",".join("({})".format(",".join(dims)) for dims in self.input_core_dims)
rhs = ",".join("({})".format(",".join(dims)) for dims in self.output_core_dims)
return f"{lhs}->{rhs}"
def to_gufunc_string(self, exclude_dims=frozenset()):
"""Create an equivalent signature string for a NumPy gufunc.
Unlike __str__, handles dimensions that don't map to Python
identifiers.
Also creates unique names for input_core_dims contained in exclude_dims.
"""
input_core_dims = [
[self.dims_map[dim] for dim in core_dims]
for core_dims in self.input_core_dims
]
output_core_dims = [
[self.dims_map[dim] for dim in core_dims]
for core_dims in self.output_core_dims
]
# enumerate input_core_dims contained in exclude_dims to make them unique
if exclude_dims:
exclude_dims = [self.dims_map[dim] for dim in exclude_dims]
counter = Counter()
def _enumerate(dim):
if dim in exclude_dims:
n = counter[dim]
counter.update([dim])
dim = f"{dim}_{n}"
return dim
input_core_dims = [
[_enumerate(dim) for dim in arg] for arg in input_core_dims
]
alt_signature = type(self)(input_core_dims, output_core_dims)
return str(alt_signature)
def result_name(objects: list) -> Any:
# use the same naming heuristics as pandas:
# https://github.com/blaze/blaze/issues/458#issuecomment-51936356
names = {getattr(obj, "name", _DEFAULT_NAME) for obj in objects}
names.discard(_DEFAULT_NAME)
if len(names) == 1:
(name,) = names
else:
name = None
return name
def _get_coords_list(args) -> list[Coordinates]:
coords_list = []
for arg in args:
try:
coords = arg.coords
except AttributeError:
pass # skip this argument
else:
coords_list.append(coords)
return coords_list
def build_output_coords_and_indexes(
args: list,
signature: _UFuncSignature,
exclude_dims: AbstractSet = frozenset(),
combine_attrs: str = "override",
) -> tuple[list[dict[Any, Variable]], list[dict[Any, Index]]]:
"""Build output coordinates and indexes for an operation.
Parameters
----------
args : list
List of raw operation arguments. Any valid types for xarray operations
are OK, e.g., scalars, Variable, DataArray, Dataset.
signature : _UfuncSignature
Core dimensions signature for the operation.
exclude_dims : set, optional
Dimensions excluded from the operation. Coordinates along these
dimensions are dropped.
Returns
-------
Dictionaries of Variable and Index objects with merged coordinates.
"""
coords_list = _get_coords_list(args)
if len(coords_list) == 1 and not exclude_dims:
# we can skip the expensive merge
(unpacked_coords,) = coords_list
merged_vars = dict(unpacked_coords.variables)
merged_indexes = dict(unpacked_coords.xindexes)
else:
merged_vars, merged_indexes = merge_coordinates_without_align(
coords_list, exclude_dims=exclude_dims, combine_attrs=combine_attrs
)
output_coords = []
output_indexes = []
for output_dims in signature.output_core_dims:
dropped_dims = signature.all_input_core_dims - set(output_dims)
if dropped_dims:
filtered_coords = {
k: v for k, v in merged_vars.items() if dropped_dims.isdisjoint(v.dims)
}
filtered_indexes = filter_indexes_from_coords(
merged_indexes, set(filtered_coords)
)
else:
filtered_coords = merged_vars
filtered_indexes = merged_indexes
output_coords.append(filtered_coords)
output_indexes.append(filtered_indexes)
return output_coords, output_indexes
def apply_dataarray_vfunc(
func,
*args,
signature,
join="inner",
exclude_dims=frozenset(),
keep_attrs="override",
):
"""Apply a variable level function over DataArray, Variable and/or ndarray
objects.
"""
from .dataarray import DataArray
if len(args) > 1:
args = deep_align(
args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False
)
objs = _all_of_type(args, DataArray)
if keep_attrs == "drop":
name = result_name(args)
else:
first_obj = _first_of_type(args, DataArray)
name = first_obj.name
result_coords, result_indexes = build_output_coords_and_indexes(
args, signature, exclude_dims, combine_attrs=keep_attrs
)
data_vars = [getattr(a, "variable", a) for a in args]
result_var = func(*data_vars)
if signature.num_outputs > 1:
out = tuple(
DataArray(
variable, coords=coords, indexes=indexes, name=name, fastpath=True
)
for variable, coords, indexes in zip(
result_var, result_coords, result_indexes
)
)
else:
(coords,) = result_coords
(indexes,) = result_indexes
out = DataArray(
result_var, coords=coords, indexes=indexes, name=name, fastpath=True
)
attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs)
if isinstance(out, tuple):
for da in out:
da.attrs = attrs
else:
out.attrs = attrs
return out
def ordered_set_union(all_keys: list[Iterable]) -> Iterable:
return {key: None for keys in all_keys for key in keys}.keys()
def ordered_set_intersection(all_keys: list[Iterable]) -> Iterable:
intersection = set(all_keys[0])
for keys in all_keys[1:]:
intersection.intersection_update(keys)
return [key for key in all_keys[0] if key in intersection]
def assert_and_return_exact_match(all_keys):
first_keys = all_keys[0]
for keys in all_keys[1:]:
if keys != first_keys:
raise ValueError(
"exact match required for all data variable names, "
f"but {keys!r} != {first_keys!r}"
)
return first_keys
_JOINERS: dict[str, Callable] = {
"inner": ordered_set_intersection,
"outer": ordered_set_union,
"left": operator.itemgetter(0),
"right": operator.itemgetter(-1),
"exact": assert_and_return_exact_match,
}
def join_dict_keys(objects: Iterable[Mapping | Any], how: str = "inner") -> Iterable:
joiner = _JOINERS[how]
all_keys = [obj.keys() for obj in objects if hasattr(obj, "keys")]
return joiner(all_keys)
def collect_dict_values(
objects: Iterable[Mapping | Any], keys: Iterable, fill_value: object = None
) -> list[list]:
return [
[obj.get(key, fill_value) if is_dict_like(obj) else obj for obj in objects]
for key in keys
]
def _as_variables_or_variable(arg):
try:
return arg.variables
except AttributeError:
try:
return arg.variable
except AttributeError:
return arg
def _unpack_dict_tuples(
result_vars: Mapping[Any, tuple[Variable, ...]], num_outputs: int
) -> tuple[dict[Hashable, Variable], ...]:
out: tuple[dict[Hashable, Variable], ...] = tuple({} for _ in range(num_outputs))
for name, values in result_vars.items():
for value, results_dict in zip(values, out):
results_dict[name] = value
return out
def apply_dict_of_variables_vfunc(
func, *args, signature, join="inner", fill_value=None
):
"""Apply a variable level function over dicts of DataArray, DataArray,
Variable and ndarray objects.
"""
args = [_as_variables_or_variable(arg) for arg in args]
names = join_dict_keys(args, how=join)
grouped_by_name = collect_dict_values(args, names, fill_value)
result_vars = {}
for name, variable_args in zip(names, grouped_by_name):
result_vars[name] = func(*variable_args)
if signature.num_outputs > 1:
return _unpack_dict_tuples(result_vars, signature.num_outputs)
else:
return result_vars
def _fast_dataset(
variables: dict[Hashable, Variable],
coord_variables: Mapping[Hashable, Variable],
indexes: dict[Hashable, Index],
) -> Dataset:
"""Create a dataset as quickly as possible.
Beware: the `variables` dict is modified INPLACE.
"""
from .dataset import Dataset
variables.update(coord_variables)
coord_names = set(coord_variables)
return Dataset._construct_direct(variables, coord_names, indexes=indexes)
def apply_dataset_vfunc(
func,
*args,
signature,
join="inner",
dataset_join="exact",
fill_value=_NO_FILL_VALUE,
exclude_dims=frozenset(),
keep_attrs="override",
):
"""Apply a variable level function over Dataset, dict of DataArray,
DataArray, Variable and/or ndarray objects.
"""
from .dataset import Dataset
if dataset_join not in _JOINS_WITHOUT_FILL_VALUES and fill_value is _NO_FILL_VALUE:
raise TypeError(
"to apply an operation to datasets with different "
"data variables with apply_ufunc, you must supply the "
"dataset_fill_value argument."
)
objs = _all_of_type(args, Dataset)
if len(args) > 1:
args = deep_align(
args, join=join, copy=False, exclude=exclude_dims, raise_on_invalid=False
)
list_of_coords, list_of_indexes = build_output_coords_and_indexes(
args, signature, exclude_dims, combine_attrs=keep_attrs
)
args = [getattr(arg, "data_vars", arg) for arg in args]
result_vars = apply_dict_of_variables_vfunc(
func, *args, signature=signature, join=dataset_join, fill_value=fill_value
)
if signature.num_outputs > 1:
out = tuple(
_fast_dataset(*args)
for args in zip(result_vars, list_of_coords, list_of_indexes)
)
else:
(coord_vars,) = list_of_coords
(indexes,) = list_of_indexes
out = _fast_dataset(result_vars, coord_vars, indexes=indexes)
attrs = merge_attrs([x.attrs for x in objs], combine_attrs=keep_attrs)
if isinstance(out, tuple):
for ds in out:
ds.attrs = attrs
else:
out.attrs = attrs
return out
def _iter_over_selections(obj, dim, values):
"""Iterate over selections of an xarray object in the provided order."""
from .groupby import _dummy_copy
dummy = None
for value in values:
try:
obj_sel = obj.sel(**{dim: value})
except (KeyError, IndexError):
if dummy is None:
dummy = _dummy_copy(obj)
obj_sel = dummy
yield obj_sel
def apply_groupby_func(func, *args):
"""Apply a dataset or datarray level function over GroupBy, Dataset,
DataArray, Variable and/or ndarray objects.
"""
from .groupby import GroupBy, peek_at
from .variable import Variable
groupbys = [arg for arg in args if isinstance(arg, GroupBy)]
assert groupbys, "must have at least one groupby to iterate over"
first_groupby = groupbys[0]
if any(not first_groupby._group.equals(gb._group) for gb in groupbys[1:]):
raise ValueError(
"apply_ufunc can only perform operations over "
"multiple GroupBy objects at once if they are all "
"grouped the same way"
)
grouped_dim = first_groupby._group.name
unique_values = first_groupby._unique_coord.values
iterators = []
for arg in args:
if isinstance(arg, GroupBy):
iterator = (value for _, value in arg)
elif hasattr(arg, "dims") and grouped_dim in arg.dims:
if isinstance(arg, Variable):
raise ValueError(
"groupby operations cannot be performed with "
"xarray.Variable objects that share a dimension with "
"the grouped dimension"
)
iterator = _iter_over_selections(arg, grouped_dim, unique_values)
else:
iterator = itertools.repeat(arg)
iterators.append(iterator)
applied = (func(*zipped_args) for zipped_args in zip(*iterators))
applied_example, applied = peek_at(applied)
combine = first_groupby._combine
if isinstance(applied_example, tuple):
combined = tuple(combine(output) for output in zip(*applied))
else:
combined = combine(applied)
return combined
def unified_dim_sizes(
variables: Iterable[Variable], exclude_dims: AbstractSet = frozenset()
) -> dict[Hashable, int]:
dim_sizes: dict[Hashable, int] = {}
for var in variables:
if len(set(var.dims)) < len(var.dims):
raise ValueError(
"broadcasting cannot handle duplicate "
f"dimensions on a variable: {list(var.dims)}"
)
for dim, size in zip(var.dims, var.shape):
if dim not in exclude_dims:
if dim not in dim_sizes:
dim_sizes[dim] = size
elif dim_sizes[dim] != size:
raise ValueError(
"operands cannot be broadcast together "
"with mismatched lengths for dimension "
f"{dim}: {dim_sizes[dim]} vs {size}"
)
return dim_sizes
SLICE_NONE = slice(None)
def broadcast_compat_data(
variable: Variable,
broadcast_dims: tuple[Hashable, ...],
core_dims: tuple[Hashable, ...],
) -> Any:
data = variable.data
old_dims = variable.dims
new_dims = broadcast_dims + core_dims
if new_dims == old_dims:
# optimize for the typical case
return data
set_old_dims = set(old_dims)
missing_core_dims = [d for d in core_dims if d not in set_old_dims]
if missing_core_dims:
raise ValueError(
"operand to apply_ufunc has required core dimensions {}, but "
"some of these dimensions are absent on an input variable: {}".format(
list(core_dims), missing_core_dims
)
)
set_new_dims = set(new_dims)
unexpected_dims = [d for d in old_dims if d not in set_new_dims]
if unexpected_dims:
raise ValueError(
"operand to apply_ufunc encountered unexpected "
f"dimensions {unexpected_dims!r} on an input variable: these are core "
"dimensions on other input or output variables"
)
# for consistency with numpy, keep broadcast dimensions to the left
old_broadcast_dims = tuple(d for d in broadcast_dims if d in set_old_dims)
reordered_dims = old_broadcast_dims + core_dims
if reordered_dims != old_dims:
order = tuple(old_dims.index(d) for d in reordered_dims)
data = duck_array_ops.transpose(data, order)
if new_dims != reordered_dims:
key_parts: list[slice | None] = []
for dim in new_dims:
if dim in set_old_dims:
key_parts.append(SLICE_NONE)
elif key_parts:
# no need to insert new axes at the beginning that are already
# handled by broadcasting
key_parts.append(np.newaxis)
data = data[tuple(key_parts)]
return data
def _vectorize(func, signature, output_dtypes, exclude_dims):
if signature.all_core_dims:
func = np.vectorize(
func,
otypes=output_dtypes,
signature=signature.to_gufunc_string(exclude_dims),
)
else:
func = np.vectorize(func, otypes=output_dtypes)
return func
def apply_variable_ufunc(
func,
*args,
signature,
exclude_dims=frozenset(),
dask="forbidden",
output_dtypes=None,
vectorize=False,
keep_attrs="override",
dask_gufunc_kwargs=None,
):
"""Apply a ndarray level function over Variable and/or ndarray objects."""
from .variable import Variable, as_compatible_data
dim_sizes = unified_dim_sizes(
(a for a in args if hasattr(a, "dims")), exclude_dims=exclude_dims
)
broadcast_dims = tuple(
dim for dim in dim_sizes if dim not in signature.all_core_dims
)
output_dims = [broadcast_dims + out for out in signature.output_core_dims]
input_data = [
broadcast_compat_data(arg, broadcast_dims, core_dims)
if isinstance(arg, Variable)
else arg
for arg, core_dims in zip(args, signature.input_core_dims)
]
if any(is_duck_dask_array(array) for array in input_data):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
numpy_func = func
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
allow_rechunk = dask_gufunc_kwargs.get("allow_rechunk", None)
if allow_rechunk is None:
for n, (data, core_dims) in enumerate(
zip(input_data, signature.input_core_dims)
):
if is_duck_dask_array(data):
# core dimensions cannot span multiple chunks
for axis, dim in enumerate(core_dims, start=-len(core_dims)):
if len(data.chunks[axis]) != 1:
raise ValueError(
f"dimension {dim} on {n}th function argument to "
"apply_ufunc with dask='parallelized' consists of "
"multiple chunks, but is also a core dimension. To "
"fix, either rechunk into a single dask array chunk along "
f"this dimension, i.e., ``.chunk(dict({dim}=-1))``, or "
"pass ``allow_rechunk=True`` in ``dask_gufunc_kwargs`` "
"but beware that this may significantly increase memory usage."
)
dask_gufunc_kwargs["allow_rechunk"] = True
output_sizes = dask_gufunc_kwargs.pop("output_sizes", {})
if output_sizes:
output_sizes_renamed = {}
for key, value in output_sizes.items():
if key not in signature.all_output_core_dims:
raise ValueError(
f"dimension '{key}' in 'output_sizes' must correspond to output_core_dims"
)
output_sizes_renamed[signature.dims_map[key]] = value
dask_gufunc_kwargs["output_sizes"] = output_sizes_renamed
for key in signature.all_output_core_dims:
if key not in signature.all_input_core_dims and key not in output_sizes:
raise ValueError(
f"dimension '{key}' in 'output_core_dims' needs corresponding (dim, size) in 'output_sizes'"
)
def func(*arrays):
import dask.array as da
res = da.apply_gufunc(
numpy_func,
signature.to_gufunc_string(exclude_dims),
*arrays,
vectorize=vectorize,
output_dtypes=output_dtypes,
**dask_gufunc_kwargs,
)
return res
elif dask == "allowed":
pass
else:
raise ValueError(
"unknown setting for dask array handling in "
"apply_ufunc: {}".format(dask)
)
else:
if vectorize:
func = _vectorize(
func, signature, output_dtypes=output_dtypes, exclude_dims=exclude_dims
)
result_data = func(*input_data)
if signature.num_outputs == 1:
result_data = (result_data,)
elif (
not isinstance(result_data, tuple) or len(result_data) != signature.num_outputs
):
raise ValueError(
"applied function does not have the number of "
"outputs specified in the ufunc signature. "
"Result is not a tuple of {} elements: {!r}".format(
signature.num_outputs, result_data
)
)
objs = _all_of_type(args, Variable)
attrs = merge_attrs(
[obj.attrs for obj in objs],
combine_attrs=keep_attrs,
)
output = []
for dims, data in zip(output_dims, result_data):
data = as_compatible_data(data)
if data.ndim != len(dims):
raise ValueError(
"applied function returned data with unexpected "
f"number of dimensions. Received {data.ndim} dimension(s) but "
f"expected {len(dims)} dimensions with names: {dims!r}"
)
var = Variable(dims, data, fastpath=True)
for dim, new_size in var.sizes.items():
if dim in dim_sizes and new_size != dim_sizes[dim]:
raise ValueError(
"size of dimension {!r} on inputs was unexpectedly "
"changed by applied function from {} to {}. Only "
"dimensions specified in ``exclude_dims`` with "
"xarray.apply_ufunc are allowed to change size.".format(
dim, dim_sizes[dim], new_size
)
)
var.attrs = attrs
output.append(var)
if signature.num_outputs == 1:
return output[0]
else:
return tuple(output)
def apply_array_ufunc(func, *args, dask="forbidden"):
"""Apply a ndarray level function over ndarray objects."""
if any(is_duck_dask_array(arg) for arg in args):
if dask == "forbidden":
raise ValueError(
"apply_ufunc encountered a dask array on an "
"argument, but handling for dask arrays has not "
"been enabled. Either set the ``dask`` argument "
"or load your data into memory first with "
"``.load()`` or ``.compute()``"
)
elif dask == "parallelized":
raise ValueError(
"cannot use dask='parallelized' for apply_ufunc "
"unless at least one input is an xarray object"
)
elif dask == "allowed":
pass
else:
raise ValueError(f"unknown setting for dask array handling: {dask}")
return func(*args)
def apply_ufunc(
func: Callable,
*args: Any,
input_core_dims: Sequence[Sequence] = None,
output_core_dims: Sequence[Sequence] | None = ((),),
exclude_dims: AbstractSet = frozenset(),
vectorize: bool = False,
join: str = "exact",
dataset_join: str = "exact",
dataset_fill_value: object = _NO_FILL_VALUE,
keep_attrs: bool | str | None = None,
kwargs: Mapping | None = None,
dask: str = "forbidden",
output_dtypes: Sequence | None = None,
output_sizes: Mapping[Any, int] | None = None,
meta: Any = None,
dask_gufunc_kwargs: dict[str, Any] | None = None,
) -> Any:
"""Apply a vectorized function for unlabeled arrays on xarray objects.
The function will be mapped over the data variable(s) of the input
arguments using xarray's standard rules for labeled computation, including
alignment, broadcasting, looping over GroupBy/Dataset variables, and
merging of coordinates.
Parameters
----------
func : callable
Function to call like ``func(*args, **kwargs)`` on unlabeled arrays
(``.data``) that returns an array or tuple of arrays. If multiple
arguments with non-matching dimensions are supplied, this function is
expected to vectorize (broadcast) over axes of positional arguments in
the style of NumPy universal functions [1]_ (if this is not the case,
set ``vectorize=True``). If this function returns multiple outputs, you
must set ``output_core_dims`` as well.
*args : Dataset, DataArray, DataArrayGroupBy, DatasetGroupBy, Variable, numpy.ndarray, dask.array.Array or scalar
Mix of labeled and/or unlabeled arrays to which to apply the function.
input_core_dims : sequence of sequence, optional
List of the same length as ``args`` giving the list of core dimensions
on each input argument that should not be broadcast. By default, we
assume there are no core dimensions on any input arguments.
For example, ``input_core_dims=[[], ['time']]`` indicates that all
dimensions on the first argument and all dimensions other than 'time'
on the second argument should be broadcast.
Core dimensions are automatically moved to the last axes of input
variables before applying ``func``, which facilitates using NumPy style
generalized ufuncs [2]_.
output_core_dims : list of tuple, optional
List of the same length as the number of output arguments from
``func``, giving the list of core dimensions on each output that were
not broadcast on the inputs. By default, we assume that ``func``
outputs exactly one array, with axes corresponding to each broadcast
dimension.
Core dimensions are assumed to appear as the last dimensions of each
output in the provided order.
exclude_dims : set, optional
Core dimensions on the inputs to exclude from alignment and
broadcasting entirely. Any input coordinates along these dimensions
will be dropped. Each excluded dimension must also appear in
``input_core_dims`` for at least one argument. Only dimensions listed
here are allowed to change size between input and output objects.
vectorize : bool, optional
If True, then assume ``func`` only takes arrays defined over core
dimensions as input and vectorize it automatically with
:py:func:`numpy.vectorize`. This option exists for convenience, but is
almost always slower than supplying a pre-vectorized function.
Using this option requires NumPy version 1.12 or newer.
join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining the indexes of the passed objects along each
dimension, and the variables of Dataset objects with mismatched
data variables:
- 'outer': use the union of object indexes
- 'inner': use the intersection of object indexes
- 'left': use indexes from the first object with each dimension
- 'right': use indexes from the last object with each dimension
- 'exact': raise `ValueError` instead of aligning when indexes to be
aligned are not equal
dataset_join : {"outer", "inner", "left", "right", "exact"}, default: "exact"
Method for joining variables of Dataset objects with mismatched
data variables.
- 'outer': take variables from both Dataset objects
- 'inner': take only overlapped variables
- 'left': take only variables from the first object
- 'right': take only variables from the last object
- 'exact': data variables on all Dataset objects must match exactly
dataset_fill_value : optional
Value used in place of missing variables on Dataset inputs when the
datasets do not share the exact same ``data_vars``. Required if
``dataset_join not in {'inner', 'exact'}``, otherwise ignored.
keep_attrs : bool, optional
Whether to copy attributes from the first argument to the output.
kwargs : dict, optional
Optional keyword arguments passed directly on to call ``func``.
dask : {"forbidden", "allowed", "parallelized"}, default: "forbidden"
How to handle applying to objects containing lazy data in the form of
dask arrays:
- 'forbidden' (default): raise an error if a dask array is encountered.
- 'allowed': pass dask arrays directly on to ``func``. Prefer this option if
``func`` natively supports dask arrays.
- 'parallelized': automatically parallelize ``func`` if any of the
inputs are a dask array by using :py:func:`dask.array.apply_gufunc`. Multiple output
arguments are supported. Only use this option if ``func`` does not natively
support dask arrays (e.g. converts them to numpy arrays).
dask_gufunc_kwargs : dict, optional
Optional keyword arguments passed to :py:func:`dask.array.apply_gufunc` if
dask='parallelized'. Possible keywords are ``output_sizes``, ``allow_rechunk``
and ``meta``.
output_dtypes : list of dtype, optional
Optional list of output dtypes. Only used if ``dask='parallelized'`` or
``vectorize=True``.
output_sizes : dict, optional
Optional mapping from dimension names to sizes for outputs. Only used
if dask='parallelized' and new dimensions (not found on inputs) appear
on outputs. ``output_sizes`` should be given in the ``dask_gufunc_kwargs``
parameter. It will be removed as direct parameter in a future version.
meta : optional
Size-0 object representing the type of array wrapped by dask array. Passed on to
:py:func:`dask.array.apply_gufunc`. ``meta`` should be given in the
``dask_gufunc_kwargs`` parameter . It will be removed as direct parameter
a future version.
Returns
-------
Single value or tuple of Dataset, DataArray, Variable, dask.array.Array or
numpy.ndarray, the first type on that list to appear on an input.
Notes
-----
This function is designed for the more common case where ``func`` can work on numpy
arrays. If ``func`` needs to manipulate a whole xarray object subset to each block
it is possible to use :py:func:`xarray.map_blocks`.
Note that due to the overhead :py:func:`xarray.map_blocks` is considerably slower than ``apply_ufunc``.
Examples
--------
Calculate the vector magnitude of two arguments:
>>> def magnitude(a, b):
... func = lambda x, y: np.sqrt(x**2 + y**2)
... return xr.apply_ufunc(func, a, b)
...
You can now apply ``magnitude()`` to :py:class:`DataArray` and :py:class:`Dataset`
objects, with automatically preserved dimensions and coordinates, e.g.,
>>> array = xr.DataArray([1, 2, 3], coords=[("x", [0.1, 0.2, 0.3])])
>>> magnitude(array, -array)
<xarray.DataArray (x: 3)>
array([1.41421356, 2.82842712, 4.24264069])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Plain scalars, numpy arrays and a mix of these with xarray objects is also
supported:
>>> magnitude(3, 4)
5.0
>>> magnitude(3, np.array([0, 4]))
array([3., 5.])
>>> magnitude(array, 0)
<xarray.DataArray (x: 3)>
array([1., 2., 3.])
Coordinates:
* x (x) float64 0.1 0.2 0.3
Other examples of how you could use ``apply_ufunc`` to write functions to
(very nearly) replicate existing xarray functionality:
Compute the mean (``.mean``) over one dimension:
>>> def mean(obj, dim):
... # note: apply always moves core dimensions to the end
... return apply_ufunc(
... np.mean, obj, input_core_dims=[[dim]], kwargs={"axis": -1}
... )
...
Inner product over a specific dimension (like :py:func:`dot`):
>>> def _inner(x, y):
... result = np.matmul(x[..., np.newaxis, :], y[..., :, np.newaxis])
... return result[..., 0, 0]
...
>>> def inner_product(a, b, dim):
... return apply_ufunc(_inner, a, b, input_core_dims=[[dim], [dim]])
...
Stack objects along a new dimension (like :py:func:`concat`):
>>> def stack(objects, dim, new_coord):
... # note: this version does not stack coordinates
... func = lambda *x: np.stack(x, axis=-1)
... result = apply_ufunc(
... func,
... *objects,
... output_core_dims=[[dim]],
... join="outer",
... dataset_fill_value=np.nan
... )
... result[dim] = new_coord
... return result
...
If your function is not vectorized but can be applied only to core
dimensions, you can use ``vectorize=True`` to turn into a vectorized
function. This wraps :py:func:`numpy.vectorize`, so the operation isn't
terribly fast. Here we'll use it to calculate the distance between
empirical samples from two probability distributions, using a scipy
function that needs to be applied to vectors:
>>> import scipy.stats
>>> def earth_mover_distance(first_samples, second_samples, dim="ensemble"):
... return apply_ufunc(
... scipy.stats.wasserstein_distance,
... first_samples,
... second_samples,
... input_core_dims=[[dim], [dim]],
... vectorize=True,
... )
...
Most of NumPy's builtin functions already broadcast their inputs
appropriately for use in ``apply_ufunc``. You may find helper functions such as
:py:func:`numpy.broadcast_arrays` helpful in writing your function. ``apply_ufunc`` also
works well with :py:func:`numba.vectorize` and :py:func:`numba.guvectorize`.
See Also
--------
numpy.broadcast_arrays
numba.vectorize
numba.guvectorize
dask.array.apply_gufunc
xarray.map_blocks
:ref:`dask.automatic-parallelization`
User guide describing :py:func:`apply_ufunc` and :py:func:`map_blocks`.
References
----------
.. [1] https://numpy.org/doc/stable/reference/ufuncs.html
.. [2] https://numpy.org/doc/stable/reference/c-api/generalized-ufuncs.html
"""
from .dataarray import DataArray
from .groupby import GroupBy
from .variable import Variable
if input_core_dims is None:
input_core_dims = ((),) * (len(args))
elif len(input_core_dims) != len(args):
raise ValueError(
f"input_core_dims must be None or a tuple with the length same to "
f"the number of arguments. "
f"Given {len(input_core_dims)} input_core_dims: {input_core_dims}, "
f" but number of args is {len(args)}."
)
if kwargs is None:
kwargs = {}
signature = _UFuncSignature(input_core_dims, output_core_dims)
if exclude_dims:
if not isinstance(exclude_dims, set):
raise TypeError(
f"Expected exclude_dims to be a 'set'. Received '{type(exclude_dims).__name__}' instead."
)
if not exclude_dims <= signature.all_core_dims:
raise ValueError(
f"each dimension in `exclude_dims` must also be a "
f"core dimension in the function signature. "
f"Please make {(exclude_dims - signature.all_core_dims)} a core dimension"
)
# handle dask_gufunc_kwargs
if dask == "parallelized":
if dask_gufunc_kwargs is None:
dask_gufunc_kwargs = {}
else:
dask_gufunc_kwargs = dask_gufunc_kwargs.copy()
# todo: remove warnings after deprecation cycle
if meta is not None:
warnings.warn(
"``meta`` should be given in the ``dask_gufunc_kwargs`` parameter."
" It will be removed as direct parameter in a future version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("meta", meta)
if output_sizes is not None:
warnings.warn(
"``output_sizes`` should be given in the ``dask_gufunc_kwargs`` "
"parameter. It will be removed as direct parameter in a future "
"version.",
FutureWarning,
stacklevel=2,
)
dask_gufunc_kwargs.setdefault("output_sizes", output_sizes)
if kwargs:
func = functools.partial(func, **kwargs)
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if isinstance(keep_attrs, bool):
keep_attrs = "override" if keep_attrs else "drop"
variables_vfunc = functools.partial(
apply_variable_ufunc,
func,
signature=signature,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
# feed groupby-apply_ufunc through apply_groupby_func
if any(isinstance(a, GroupBy) for a in args):
this_apply = functools.partial(
apply_ufunc,
func,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
exclude_dims=exclude_dims,
join=join,
dataset_join=dataset_join,
dataset_fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
dask=dask,
vectorize=vectorize,
output_dtypes=output_dtypes,
dask_gufunc_kwargs=dask_gufunc_kwargs,
)
return apply_groupby_func(this_apply, *args)
# feed datasets apply_variable_ufunc through apply_dataset_vfunc
elif any(is_dict_like(a) for a in args):
return apply_dataset_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
dataset_join=dataset_join,
fill_value=dataset_fill_value,
keep_attrs=keep_attrs,
)
# feed DataArray apply_variable_ufunc through apply_dataarray_vfunc
elif any(isinstance(a, DataArray) for a in args):
return apply_dataarray_vfunc(
variables_vfunc,
*args,
signature=signature,
join=join,
exclude_dims=exclude_dims,
keep_attrs=keep_attrs,
)
# feed Variables directly through apply_variable_ufunc
elif any(isinstance(a, Variable) for a in args):
return variables_vfunc(*args)
else:
# feed anything else through apply_array_ufunc
return apply_array_ufunc(func, *args, dask=dask)
def cov(da_a, da_b, dim=None, ddof=1):
"""
Compute covariance between two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, optional
The dimension along which the covariance will be computed
ddof : int, optional
If ddof=1, covariance is normalized by N-1, giving an unbiased estimate,
else normalization is by N.
Returns
-------
covariance : DataArray
See Also
--------
pandas.Series.cov : corresponding pandas function
xarray.corr : respective function to calculate correlation
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)>
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)>
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> xr.cov(da_a, da_b)
<xarray.DataArray ()>
array(-3.53055556)
>>> xr.cov(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)>
array([ 0.2 , -0.5 , 1.69333333])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
"""
from .dataarray import DataArray
if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]):
raise TypeError(
"Only xr.DataArray is supported."
"Given {}.".format([type(arr) for arr in [da_a, da_b]])
)
return _cov_corr(da_a, da_b, dim=dim, ddof=ddof, method="cov")
def corr(da_a, da_b, dim=None):
"""
Compute the Pearson correlation coefficient between
two DataArray objects along a shared dimension.
Parameters
----------
da_a : DataArray
Array to compute.
da_b : DataArray
Array to compute.
dim : str, optional
The dimension along which the correlation will be computed
Returns
-------
correlation: DataArray
See Also
--------
pandas.Series.corr : corresponding pandas function
xarray.cov : underlying covariance function
Examples
--------
>>> from xarray import DataArray
>>> da_a = DataArray(
... np.array([[1, 2, 3], [0.1, 0.2, 0.3], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_a
<xarray.DataArray (space: 3, time: 3)>
array([[1. , 2. , 3. ],
[0.1, 0.2, 0.3],
[3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> da_b = DataArray(
... np.array([[0.2, 0.4, 0.6], [15, 10, 5], [3.2, 0.6, 1.8]]),
... dims=("space", "time"),
... coords=[
... ("space", ["IA", "IL", "IN"]),
... ("time", pd.date_range("2000-01-01", freq="1D", periods=3)),
... ],
... )
>>> da_b
<xarray.DataArray (space: 3, time: 3)>
array([[ 0.2, 0.4, 0.6],
[15. , 10. , 5. ],
[ 3.2, 0.6, 1.8]])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
* time (time) datetime64[ns] 2000-01-01 2000-01-02 2000-01-03
>>> xr.corr(da_a, da_b)
<xarray.DataArray ()>
array(-0.57087777)
>>> xr.corr(da_a, da_b, dim="time")
<xarray.DataArray (space: 3)>
array([ 1., -1., 1.])
Coordinates:
* space (space) <U2 'IA' 'IL' 'IN'
"""
from .dataarray import DataArray
if any(not isinstance(arr, DataArray) for arr in [da_a, da_b]):
raise TypeError(
"Only xr.DataArray is supported."
"Given {}.".format([type(arr) for arr in [da_a, da_b]])
)
return _cov_corr(da_a, da_b, dim=dim, method="corr")
def _cov_corr(da_a, da_b, dim=None, ddof=0, method=None):
"""
Internal method for xr.cov() and xr.corr() so only have to
sanitize the input arrays once and we don't repeat code.
"""
# 1. Broadcast the two arrays
da_a, da_b = align(da_a, da_b, join="inner", copy=False)
# 2. Ignore the nans
valid_values = da_a.notnull() & da_b.notnull()
da_a = da_a.where(valid_values)
da_b = da_b.where(valid_values)
valid_count = valid_values.sum(dim) - ddof
# 3. Detrend along the given dim
demeaned_da_a = da_a - da_a.mean(dim=dim)
demeaned_da_b = da_b - da_b.mean(dim=dim)
# 4. Compute covariance along the given dim
# N.B. `skipna=False` is required or there is a bug when computing
# auto-covariance. E.g. Try xr.cov(da,da) for
# da = xr.DataArray([[1, 2], [1, np.nan]], dims=["x", "time"])
cov = (demeaned_da_a * demeaned_da_b).sum(dim=dim, skipna=True, min_count=1) / (
valid_count
)
if method == "cov":
return cov
else:
# compute std + corr
da_a_std = da_a.std(dim=dim)
da_b_std = da_b.std(dim=dim)
corr = cov / (da_a_std * da_b_std)
return corr
def cross(
a: DataArray | Variable, b: DataArray | Variable, *, dim: Hashable
) -> DataArray | Variable:
"""
Compute the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector
perpendicular to both `a` and `b`. The vectors in `a` and `b` are
defined by the values along the dimension `dim` and can have sizes
1, 2 or 3. Where the size of either `a` or `b` is
1 or 2, the remaining components of the input vector is assumed to
be zero and the cross product calculated accordingly. In cases where
both input vectors have dimension 2, the z-component of the cross
product is returned.
Parameters
----------
a, b : DataArray or Variable
Components of the first and second vector(s).
dim : hashable
The dimension along which the cross product will be computed.
Must be available in both vectors.
Examples
--------
Vector cross-product with 3 dimensions:
>>> a = xr.DataArray([1, 2, 3])
>>> b = xr.DataArray([4, 5, 6])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)>
array([-3, 6, -3])
Dimensions without coordinates: dim_0
Vector cross-product with 2 dimensions, returns in the perpendicular
direction:
>>> a = xr.DataArray([1, 2])
>>> b = xr.DataArray([4, 5])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray ()>
array(-3)
Vector cross-product with 3 dimensions but zeros at the last axis
yields the same results as with 2 dimensions:
>>> a = xr.DataArray([1, 2, 0])
>>> b = xr.DataArray([4, 5, 0])
>>> xr.cross(a, b, dim="dim_0")
<xarray.DataArray (dim_0: 3)>
array([ 0, 0, -3])
Dimensions without coordinates: dim_0
One vector with dimension 2:
>>> a = xr.DataArray(
... [1, 2],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "y"])),
... )
>>> b = xr.DataArray(
... [4, 5, 6],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (cartesian: 3)>
array([12, -6, -3])
Coordinates:
* cartesian (cartesian) <U1 'x' 'y' 'z'
One vector with dimension 2 but coords in other positions:
>>> a = xr.DataArray(
... [1, 2],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "z"])),
... )
>>> b = xr.DataArray(
... [4, 5, 6],
... dims=["cartesian"],
... coords=dict(cartesian=(["cartesian"], ["x", "y", "z"])),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (cartesian: 3)>
array([-10, 2, 5])
Coordinates:
* cartesian (cartesian) <U1 'x' 'y' 'z'
Multiple vector cross-products. Note that the direction of the
cross product vector is defined by the right-hand rule:
>>> a = xr.DataArray(
... [[1, 2, 3], [4, 5, 6]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> b = xr.DataArray(
... [[4, 5, 6], [1, 2, 3]],
... dims=("time", "cartesian"),
... coords=dict(
... time=(["time"], [0, 1]),
... cartesian=(["cartesian"], ["x", "y", "z"]),
... ),
... )
>>> xr.cross(a, b, dim="cartesian")
<xarray.DataArray (time: 2, cartesian: 3)>
array([[-3, 6, -3],
[ 3, -6, 3]])
Coordinates:
* time (time) int64 0 1
* cartesian (cartesian) <U1 'x' 'y' 'z'
Cross can be called on Datasets by converting to DataArrays and later
back to a Dataset:
>>> ds_a = xr.Dataset(dict(x=("dim_0", [1]), y=("dim_0", [2]), z=("dim_0", [3])))
>>> ds_b = xr.Dataset(dict(x=("dim_0", [4]), y=("dim_0", [5]), z=("dim_0", [6])))
>>> c = xr.cross(
... ds_a.to_array("cartesian"), ds_b.to_array("cartesian"), dim="cartesian"
... )
>>> c.to_dataset(dim="cartesian")
<xarray.Dataset>
Dimensions: (dim_0: 1)
Dimensions without coordinates: dim_0
Data variables:
x (dim_0) int64 -3
y (dim_0) int64 6
z (dim_0) int64 -3
See Also
--------
numpy.cross : Corresponding numpy function
"""
if dim not in a.dims:
raise ValueError(f"Dimension {dim!r} not on a")
elif dim not in b.dims:
raise ValueError(f"Dimension {dim!r} not on b")
if not 1 <= a.sizes[dim] <= 3:
raise ValueError(
f"The size of {dim!r} on a must be 1, 2, or 3 to be "
f"compatible with a cross product but is {a.sizes[dim]}"
)
elif not 1 <= b.sizes[dim] <= 3:
raise ValueError(
f"The size of {dim!r} on b must be 1, 2, or 3 to be "
f"compatible with a cross product but is {b.sizes[dim]}"
)
all_dims = list(dict.fromkeys(a.dims + b.dims))
if a.sizes[dim] != b.sizes[dim]:
# Arrays have different sizes. Append zeros where the smaller
# array is missing a value, zeros will not affect np.cross:
if (
not isinstance(a, Variable) # Only used to make mypy happy.
and dim in getattr(a, "coords", {})
and not isinstance(b, Variable) # Only used to make mypy happy.
and dim in getattr(b, "coords", {})
):
# If the arrays have coords we know which indexes to fill
# with zeros:
a, b = align(
a,
b,
fill_value=0,
join="outer",
exclude=set(all_dims) - {dim},
)
elif min(a.sizes[dim], b.sizes[dim]) == 2:
# If the array doesn't have coords we can only infer
# that it has composite values if the size is at least 2.
# Once padded, rechunk the padded array because apply_ufunc
# requires core dimensions not to be chunked:
if a.sizes[dim] < b.sizes[dim]:
a = a.pad({dim: (0, 1)}, constant_values=0)
# TODO: Should pad or apply_ufunc handle correct chunking?
a = a.chunk({dim: -1}) if is_duck_dask_array(a.data) else a
else:
b = b.pad({dim: (0, 1)}, constant_values=0)
# TODO: Should pad or apply_ufunc handle correct chunking?
b = b.chunk({dim: -1}) if is_duck_dask_array(b.data) else b
else:
raise ValueError(
f"{dim!r} on {'a' if a.sizes[dim] == 1 else 'b'} is incompatible:"
" dimensions without coordinates must have have a length of 2 or 3"
)
c = apply_ufunc(
np.cross,
a,
b,
input_core_dims=[[dim], [dim]],
output_core_dims=[[dim] if a.sizes[dim] == 3 else []],
dask="parallelized",
output_dtypes=[np.result_type(a, b)],
)
c = c.transpose(*all_dims, missing_dims="ignore")
return c
def dot(*arrays, dims=None, **kwargs):
"""Generalized dot product for xarray objects. Like np.einsum, but
provides a simpler interface based on array dimensions.
Parameters
----------
*arrays : DataArray or Variable
Arrays to compute.
dims : ..., str or tuple of str, optional
Which dimensions to sum over. Ellipsis ('...') sums over all dimensions.
If not specified, then all the common dimensions are summed over.
**kwargs : dict
Additional keyword arguments passed to numpy.einsum or
dask.array.einsum
Returns
-------
DataArray
Examples
--------
>>> da_a = xr.DataArray(np.arange(3 * 2).reshape(3, 2), dims=["a", "b"])
>>> da_b = xr.DataArray(np.arange(3 * 2 * 2).reshape(3, 2, 2), dims=["a", "b", "c"])
>>> da_c = xr.DataArray(np.arange(2 * 3).reshape(2, 3), dims=["c", "d"])
>>> da_a
<xarray.DataArray (a: 3, b: 2)>
array([[0, 1],
[2, 3],
[4, 5]])
Dimensions without coordinates: a, b
>>> da_b
<xarray.DataArray (a: 3, b: 2, c: 2)>
array([[[ 0, 1],
[ 2, 3]],
<BLANKLINE>
[[ 4, 5],
[ 6, 7]],
<BLANKLINE>
[[ 8, 9],
[10, 11]]])
Dimensions without coordinates: a, b, c
>>> da_c
<xarray.DataArray (c: 2, d: 3)>
array([[0, 1, 2],
[3, 4, 5]])
Dimensions without coordinates: c, d
>>> xr.dot(da_a, da_b, dims=["a", "b"])
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=["a"])
<xarray.DataArray (b: 2, c: 2)>
array([[40, 46],
[70, 79]])
Dimensions without coordinates: b, c
>>> xr.dot(da_a, da_b, da_c, dims=["b", "c"])
<xarray.DataArray (a: 3, d: 3)>
array([[ 9, 14, 19],
[ 93, 150, 207],
[273, 446, 619]])
Dimensions without coordinates: a, d
>>> xr.dot(da_a, da_b)
<xarray.DataArray (c: 2)>
array([110, 125])
Dimensions without coordinates: c
>>> xr.dot(da_a, da_b, dims=...)
<xarray.DataArray ()>
array(235)
"""
from .dataarray import DataArray
from .variable import Variable
if any(not isinstance(arr, (Variable, DataArray)) for arr in arrays):
raise TypeError(
"Only xr.DataArray and xr.Variable are supported."
"Given {}.".format([type(arr) for arr in arrays])
)
if len(arrays) == 0:
raise TypeError("At least one array should be given.")
if isinstance(dims, str):
dims = (dims,)
common_dims = set.intersection(*[set(arr.dims) for arr in arrays])
all_dims = []
for arr in arrays:
all_dims += [d for d in arr.dims if d not in all_dims]
einsum_axes = "abcdefghijklmnopqrstuvwxyz"
dim_map = {d: einsum_axes[i] for i, d in enumerate(all_dims)}
if dims is ...:
dims = all_dims
elif dims is None:
# find dimensions that occur more than one times
dim_counts = Counter()
for arr in arrays:
dim_counts.update(arr.dims)
dims = tuple(d for d, c in dim_counts.items() if c > 1)
dims = tuple(dims) # make dims a tuple
# dimensions to be parallelized
broadcast_dims = tuple(d for d in all_dims if d in common_dims and d not in dims)
input_core_dims = [
[d for d in arr.dims if d not in broadcast_dims] for arr in arrays
]
output_core_dims = [tuple(d for d in all_dims if d not in dims + broadcast_dims)]
# construct einsum subscripts, such as '...abc,...ab->...c'
# Note: input_core_dims are always moved to the last position
subscripts_list = [
"..." + "".join(dim_map[d] for d in ds) for ds in input_core_dims
]
subscripts = ",".join(subscripts_list)
subscripts += "->..." + "".join(dim_map[d] for d in output_core_dims[0])
join = OPTIONS["arithmetic_join"]
# using "inner" emulates `(a * b).sum()` for all joins (except "exact")
if join != "exact":
join = "inner"
# subscripts should be passed to np.einsum as arg, not as kwargs. We need
# to construct a partial function for apply_ufunc to work.
func = functools.partial(duck_array_ops.einsum, subscripts, **kwargs)
result = apply_ufunc(
func,
*arrays,
input_core_dims=input_core_dims,
output_core_dims=output_core_dims,
join=join,
dask="allowed",
)
return result.transpose(*all_dims, missing_dims="ignore")
def where(cond, x, y, keep_attrs=None):
"""Return elements from `x` or `y` depending on `cond`.
Performs xarray-like broadcasting across input arguments.
All dimension coordinates on `x` and `y` must be aligned with each
other and with `cond`.
Parameters
----------
cond : scalar, array, Variable, DataArray or Dataset
When True, return values from `x`, otherwise returns values from `y`.
x : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is True
y : scalar, array, Variable, DataArray or Dataset
values to choose from where `cond` is False
keep_attrs : bool or str or callable, optional
How to treat attrs. If True, keep the attrs of `x`.
Returns
-------
Dataset, DataArray, Variable or array
In priority order: Dataset, DataArray, Variable or array, whichever
type appears as an input argument.
Examples
--------
>>> x = xr.DataArray(
... 0.1 * np.arange(10),
... dims=["lat"],
... coords={"lat": np.arange(10)},
... name="sst",
... )
>>> x
<xarray.DataArray 'sst' (lat: 10)>
array([0. , 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
Coordinates:
* lat (lat) int64 0 1 2 3 4 5 6 7 8 9
>>> xr.where(x < 0.5, x, x * 100)
<xarray.DataArray 'sst' (lat: 10)>
array([ 0. , 0.1, 0.2, 0.3, 0.4, 50. , 60. , 70. , 80. , 90. ])
Coordinates:
* lat (lat) int64 0 1 2 3 4 5 6 7 8 9
>>> y = xr.DataArray(
... 0.1 * np.arange(9).reshape(3, 3),
... dims=["lat", "lon"],
... coords={"lat": np.arange(3), "lon": 10 + np.arange(3)},
... name="sst",
... )
>>> y
<xarray.DataArray 'sst' (lat: 3, lon: 3)>
array([[0. , 0.1, 0.2],
[0.3, 0.4, 0.5],
[0.6, 0.7, 0.8]])
Coordinates:
* lat (lat) int64 0 1 2
* lon (lon) int64 10 11 12
>>> xr.where(y.lat < 1, y, -1)
<xarray.DataArray (lat: 3, lon: 3)>
array([[ 0. , 0.1, 0.2],
[-1. , -1. , -1. ],
[-1. , -1. , -1. ]])
Coordinates:
* lat (lat) int64 0 1 2
* lon (lon) int64 10 11 12
>>> cond = xr.DataArray([True, False], dims=["x"])
>>> x = xr.DataArray([1, 2], dims=["y"])
>>> xr.where(cond, x, 0)
<xarray.DataArray (x: 2, y: 2)>
array([[1, 2],
[0, 0]])
Dimensions without coordinates: x, y
See Also
--------
numpy.where : corresponding numpy function
Dataset.where, DataArray.where :
equivalent methods
"""
if keep_attrs is None:
keep_attrs = _get_keep_attrs(default=False)
if keep_attrs is True:
# keep the attributes of x, the second parameter, by default to
# be consistent with the `where` method of `DataArray` and `Dataset`
keep_attrs = lambda attrs, context: getattr(x, "attrs", {})
# alignment for three arguments is complicated, so don't support it yet
return apply_ufunc(
duck_array_ops.where,
cond,
x,
y,
join="exact",
dataset_join="exact",
dask="allowed",
keep_attrs=keep_attrs,
)
@overload
def polyval(coord: DataArray, coeffs: DataArray, degree_dim: Hashable) -> DataArray:
...
@overload
def polyval(coord: T_Xarray, coeffs: Dataset, degree_dim: Hashable) -> Dataset:
...
@overload
def polyval(coord: Dataset, coeffs: T_Xarray, degree_dim: Hashable) -> Dataset:
...
def polyval(
coord: T_Xarray, coeffs: T_Xarray, degree_dim: Hashable = "degree"
) -> T_Xarray:
"""Evaluate a polynomial at specific values
Parameters
----------
coord : DataArray or Dataset
Values at which to evaluate the polynomial.
coeffs : DataArray or Dataset
Coefficients of the polynomial.
degree_dim : Hashable, default: "degree"
Name of the polynomial degree dimension in `coeffs`.
Returns
-------
DataArray or Dataset
Evaluated polynomial.
See Also
--------
xarray.DataArray.polyfit
numpy.polynomial.polynomial.polyval
"""
if degree_dim not in coeffs._indexes:
raise ValueError(
f"Dimension `{degree_dim}` should be a coordinate variable with labels."
)
if not np.issubdtype(coeffs[degree_dim].dtype, int):
raise ValueError(
f"Dimension `{degree_dim}` should be of integer dtype. Received {coeffs[degree_dim].dtype} instead."
)
max_deg = coeffs[degree_dim].max().item()
coeffs = coeffs.reindex(
{degree_dim: np.arange(max_deg + 1)}, fill_value=0, copy=False
)
coord = _ensure_numeric(coord)
# using Horner's method
# https://en.wikipedia.org/wiki/Horner%27s_method
res = coeffs.isel({degree_dim: max_deg}, drop=True) + zeros_like(coord)
for deg in range(max_deg - 1, -1, -1):
res *= coord
res += coeffs.isel({degree_dim: deg}, drop=True)
return res
def _ensure_numeric(data: T_Xarray) -> T_Xarray:
"""Converts all datetime64 variables to float64
Parameters
----------
data : DataArray or Dataset
Variables with possible datetime dtypes.
Returns
-------
DataArray or Dataset
Variables with datetime64 dtypes converted to float64.
"""
from .dataset import Dataset
def to_floatable(x: DataArray) -> DataArray:
if x.dtype.kind in "mM":
return x.copy(
data=datetime_to_numeric(
x.data,
offset=np.datetime64("1970-01-01"),
datetime_unit="ns",
),
)
return x
if isinstance(data, Dataset):
return data.map(to_floatable)
else:
return to_floatable(data)
def _calc_idxminmax(
*,
array,
func: Callable,
dim: Hashable = None,
skipna: bool = None,
fill_value: Any = dtypes.NA,
keep_attrs: bool = None,
):
"""Apply common operations for idxmin and idxmax."""
# This function doesn't make sense for scalars so don't try
if not array.ndim:
raise ValueError("This function does not apply for scalars")
if dim is not None:
pass # Use the dim if available
elif array.ndim == 1:
# it is okay to guess the dim if there is only 1
dim = array.dims[0]
else:
# The dim is not specified and ambiguous. Don't guess.
raise ValueError("Must supply 'dim' argument for multidimensional arrays")
if dim not in array.dims:
raise KeyError(f'Dimension "{dim}" not in dimension')
if dim not in array.coords:
raise KeyError(f'Dimension "{dim}" does not have coordinates')
# These are dtypes with NaN values argmin and argmax can handle
na_dtypes = "cfO"
if skipna or (skipna is None and array.dtype.kind in na_dtypes):
# Need to skip NaN values since argmin and argmax can't handle them
allna = array.isnull().all(dim)
array = array.where(~allna, 0)
# This will run argmin or argmax.
indx = func(array, dim=dim, axis=None, keep_attrs=keep_attrs, skipna=skipna)
# Handle dask arrays.
if is_duck_dask_array(array.data):
import dask.array
chunks = dict(zip(array.dims, array.chunks))
dask_coord = dask.array.from_array(array[dim].data, chunks=chunks[dim])
res = indx.copy(data=dask_coord[indx.data.ravel()].reshape(indx.shape))
# we need to attach back the dim name
res.name = dim
else:
res = array[dim][(indx,)]
# The dim is gone but we need to remove the corresponding coordinate.
del res.coords[dim]
if skipna or (skipna is None and array.dtype.kind in na_dtypes):
# Put the NaN values back in after removing them
res = res.where(~allna, fill_value)
# Copy attributes from argmin/argmax, if any
res.attrs = indx.attrs
return res
def unify_chunks(*objects: T_Xarray) -> tuple[T_Xarray, ...]:
"""
Given any number of Dataset and/or DataArray objects, returns
new objects with unified chunk size along all chunked dimensions.
Returns
-------
unified (DataArray or Dataset) – Tuple of objects with the same type as
*objects with consistent chunk sizes for all dask-array variables
See Also
--------
dask.array.core.unify_chunks
"""
from .dataarray import DataArray
# Convert all objects to datasets
datasets = [
obj._to_temp_dataset() if isinstance(obj, DataArray) else obj.copy()
for obj in objects
]
# Get arguments to pass into dask.array.core.unify_chunks
unify_chunks_args = []
sizes: dict[Hashable, int] = {}
for ds in datasets:
for v in ds._variables.values():
if v.chunks is not None:
# Check that sizes match across different datasets
for dim, size in v.sizes.items():
try:
if sizes[dim] != size:
raise ValueError(
f"Dimension {dim!r} size mismatch: {sizes[dim]} != {size}"
)
except KeyError:
sizes[dim] = size
unify_chunks_args += [v._data, v._dims]
# No dask arrays: Return inputs
if not unify_chunks_args:
return objects
# Run dask.array.core.unify_chunks
from dask.array.core import unify_chunks
_, dask_data = unify_chunks(*unify_chunks_args)
dask_data_iter = iter(dask_data)
out = []
for obj, ds in zip(objects, datasets):
for k, v in ds._variables.items():
if v.chunks is not None:
ds._variables[k] = v.copy(data=next(dask_data_iter))
out.append(obj._from_temp_dataset(ds) if isinstance(obj, DataArray) else ds)
return tuple(out)
|
py | 1a32ce48df83198856eb71892f2de080e9c01dcb | import datetime
import json
import logging
import os
import re
import shutil
import cherrypy
import core
from core import plugins, snatcher
from core.library import Metadata, Manage
from core.downloaders import PutIO
logging = logging.getLogger(__name__)
class Postprocessing(object):
def __init__(self):
shutil.copystat = self.null
def null(*args, **kwargs):
return
@cherrypy.expose
def putio_process(self, *args, **transfer_data):
''' Method to handle postprocessing callbacks from Put.io
Gets called from Put.IO when download completes via POST request including download
metadata as transfer_data kwargs.
Sample kwargs:
{
"apikey": "APIKEY",
"percent_done": "100",
"peers_getting_from_us": "0",
"completion_percent": "0",
"seconds_seeding": "0",
"current_ratio": "0.00",
"created_torrent": "False",
"size": "507637",
"up_speed": "0",
"callback_url": "http://MYDDNS/watcher/postprocessing/putio_process?apikey=APIKEY",
"source": "<full magnet uri including trackers>",
"peers_connected": "0",
"down_speed": "0",
"is_private": "False",
"id": "45948956", # Download ID
"simulated": "True",
"type": "TORRENT",
"save_parent_id": "536510251",
"file_id": "536514172", # Put.io file ID #
"download_id": "21596709",
"torrent_link": "https://api.put.io/v2/transfers/<transferid>/torrent",
"finished_at": "2018-04-09 04:13:58",
"status": "COMPLETED",
"downloaded": "0",
"extract": "False",
"name": "<download name>",
"status_message": "Completed",
"created_at": "2018-04-09 04:13:57",
"uploaded": "0",
"peers_sending_to_us": "0"
}
'''
logging.info('########################################')
logging.info('PUT.IO Post-processing request received.')
logging.info('########################################')
conf = core.CONFIG['Downloader']['Torrent']['PutIO']
data = {'downloadid': str(transfer_data['id'])}
if transfer_data['source'].startswith('magnet'):
data['guid'] = transfer_data['source'].split('btih:')[1].split('&')[0]
else:
data['guid'] = None
data.update(self.get_movie_info(data))
if conf['downloadwhencomplete']:
logging.info('Downloading Put.IO files and processing locally.')
download = PutIO.download(transfer_data['file_id'])
if not download['response']:
logging.error('PutIO processing failed.')
return
data['path'] = download['path']
data['original_file'] = self.get_movie_file(data['path'])
data.update(self.complete(data))
if data['status'] == 'finished' and conf['deleteafterdownload']:
data['tasks']['delete_putio'] = PutIO.delete(transfer_data['file_id'])
else:
logging.info('Marking guid as Finished.')
guid_result = {}
if data['guid']:
if Manage.searchresults(data['guid'], 'Finished'):
guid_result['update_SEARCHRESULTS'] = True
else:
guid_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid'], 'Finished', imdbid=data['imdbid']):
guid_result['update_MARKEDRESULTS'] = True
else:
guid_result['update_MARKEDRESULTS'] = False
# create result entry for guid
data['tasks'][data['guid']] = guid_result
# update MOVIES table
if data.get('imdbid'):
db_update = {'finished_file': 'https://app.put.io/files/{}'.format(transfer_data['file_id']), 'status': 'finished'}
core.sql.update_multiple_values('MOVIES', db_update, 'imdbid', data['imdbid'])
title = data['data'].get('title')
year = data['data'].get('year')
imdbid = data['data'].get('imdbid')
resolution = data['data'].get('resolution')
rated = data['data'].get('rated')
original_file = data['data'].get('original_file')
finished_file = data['data'].get('finished_file')
downloadid = data['data'].get('downloadid')
finished_date = data['data'].get('finished_date')
quality = data['data'].get('quality')
plugins.finished(title, year, imdbid, resolution, rated, original_file, finished_file, downloadid, finished_date, quality)
logging.info('#################################')
logging.info('Post-processing complete.')
logging.info(data)
logging.info('#################################')
@cherrypy.expose
@cherrypy.tools.json_out()
def default(self, **data):
''' Handles post-processing requests.
**data: keyword params send through POST request payload
Required kw params:
apikey (str): Watcher api key
mode (str): post-processing mode (complete, failed)
guid (str): download link of file. Can be url or magnet link.
path (str): absolute path to downloaded files. Can be single file or dir
Optional kw params:
imdbid (str): imdb identification number (tt123456)
downloadid (str): id number from downloader
While processing many variables are produced to track files through renaming, moving, etc
Perhaps the most important name is data['finished_file'], which is the current name/location
of the file being processed. This is updated when renamed, moved, etc.
Returns dict of post-processing tasks and data
'''
logging.info('#################################')
logging.info('Post-processing request received.')
logging.info('#################################')
# check for required keys
for key in ('apikey', 'mode', 'guid', 'path'):
if key not in data:
logging.warning('Missing key {}'.format(key))
return {'response': False, 'error': 'missing key: {}'.format(key)}
# check if api key is correct
if data['apikey'] != core.CONFIG['Server']['apikey']:
logging.warning('Incorrect API key.'.format(key))
return {'response': False, 'error': 'incorrect api key'}
# check if mode is valid
if data['mode'] not in ('failed', 'complete'):
logging.warning('Invalid mode value: {}.'.format(data['mode']))
return {'response': False, 'error': 'invalid mode value'}
logging.debug(data)
# modify path based on remote mapping
data['path'] = self.map_remote(data['path'])
# get the actual movie file name
data['original_file'] = self.get_movie_file(data['path'], check_size=False if data['mode'] == 'failed' else True)
data['parent_dir'] = os.path.basename(os.path.dirname(data['original_file'])) if data.get('original_file') else ''
if not data['original_file']:
logging.warning('Movie file not found')
data['mode'] = 'failed'
# Get possible local data or get TMDB data to merge with self.params.
logging.info('Gathering release information.')
data.update(self.get_movie_info(data))
# At this point we have all of the information we're going to get.
if data['mode'] == 'failed':
logging.warning('Post-processing as Failed.')
response = self.failed(data)
elif data['mode'] == 'complete':
logging.info('Post-processing as Complete.')
if 'task' not in data:
directory = core.CONFIG['Postprocessing']['Scanner']['directory']
if data['path'] == directory:
core.sql.save_postprocessed_path(data['original_file'])
else:
core.sql.save_postprocessed_path(data['path'])
response = self.complete(data)
response['data'].pop('backlog', '')
response['data'].pop('predb', '')
response['data'].pop('source', '')
title = response['data'].get('title')
year = response['data'].get('year')
imdbid = response['data'].get('imdbid')
resolution = response['data'].get('resolution')
rated = response['data'].get('rated')
original_file = response['data'].get('original_file')
finished_file = response['data'].get('finished_file')
downloadid = response['data'].get('downloadid')
finished_date = response['data'].get('finished_date')
quality = response['data'].get('quality')
plugins.finished(title, year, imdbid, resolution, rated, original_file, finished_file, downloadid, finished_date, quality)
else:
logging.warning('Invalid mode value: {}.'.format(data['mode']))
return {'response': False, 'error': 'invalid mode value'}
logging.info('#################################')
logging.info('Post-processing complete.')
logging.info(json.dumps(response, indent=2, sort_keys=True))
logging.info('#################################')
return response
def get_movie_file(self, path, check_size=True):
''' Looks for the filename of the movie being processed
path (str): url-passed path to download dir
If path is a file, just returns path.
If path is a directory, recursively finds the largest file in that dir.
Returns str absolute path of movie file
'''
logging.info('Finding movie file.')
if os.path.isfile(path):
logging.info('Post-processing file {}.'.format(path))
return path
else:
# Find the biggest file in the dir. Assume that this is the movie.
biggestfile = None
try:
s = 0
for root, dirs, filenames in os.walk(path):
for file in filenames:
f = os.path.join(root, file)
logging.debug('Found file {} in postprocessing dir.'.format(f))
size = os.path.getsize(f)
if size > s:
biggestfile = f
s = size
except Exception as e: # noqa
logging.warning('Unable to find file to process.', exc_info=True)
return None
if biggestfile:
minsize = core.CONFIG['Postprocessing']['Scanner']['minsize'] * 1048576
if check_size and os.path.getsize(os.path.join(path, biggestfile)) < minsize:
logging.info('Largest file in directory {} is {}, but is smaller than the minimum size of {} bytes'.format(path, biggestfile, minsize))
return None
logging.info('Largest file in directory {} is {}, processing this file.'.format(path, biggestfile.replace(path, '')))
else:
logging.warning('Unable to determine largest file. Postprocessing may fail at a later point.')
return biggestfile
def get_movie_info(self, data):
''' Gets score, imdbid, and other information to help process
data (dict): url-passed params with any additional info
Uses guid to look up local details.
If that fails, uses downloadid.
If that fails, searches tmdb for imdbid
If everything fails returns empty dict {}
Returns dict of any gathered information
'''
# try to get searchresult imdbid using guid first then downloadid
result = None
if data.get('guid'):
logging.info('Searching local database for guid.')
result = core.sql.get_single_search_result('guid', data['guid'])
if result:
logging.info('Local release info found by guid.')
else:
logging.info('Unable to find local release info by guid.')
if not result: # not found from guid
logging.info('Guid not found.')
if data.get('downloadid'):
logging.info('Searching local database for downloadid.')
result = core.sql.get_single_search_result('downloadid', str(data['downloadid']))
if result:
logging.info('Local release info found by downloadid.')
if result['guid'] != data['guid']:
logging.info('Guid for downloadid does not match local data. Adding guid2 to processing data.')
data['guid2'] = result['guid']
else:
logging.info('Unable to find local release info by downloadid.')
if not result: # not found from guid or downloadid
fname = os.path.basename(data.get('path'))
if fname:
logging.info('Searching local database for release name {}'.format(fname))
result = core.sql.get_single_search_result('title', fname)
if result:
logging.info('Found match for {} in releases.'.format(fname))
else:
logging.info('Unable to find local release info by release name, trying fuzzy search.')
result = core.sql.get_single_search_result('title', re.sub(r'[\[\]\(\)\-.:]', '_', fname), like=True)
if result:
logging.info('Found match for {} in releases.'.format(fname))
else:
logging.info('Unable to find local release info by release name.')
# if we found it, get local movie info
if result:
logging.info('Searching local database by imdbid.')
local = core.sql.get_movie_details('imdbid', result['imdbid'])
if local:
logging.info('Movie data found locally by imdbid.')
data.update(local)
data['guid'] = result['guid']
data['finished_score'] = result['score']
data['resolution'] = result['resolution']
data['downloadid'] = result['downloadid']
else:
logging.info('Unable to find movie in local db.')
# Still no luck? Try to get the info from TMDB
else:
logging.info('Unable to find local data for release. Using only data found from file.')
if data and data.get('original_file'):
mdata = Metadata.from_file(data['original_file'], imdbid=data.get('imdbid'))
mdata.update(data)
if not mdata.get('quality'):
data['quality'] = 'Default'
return mdata
elif data:
return data
else:
return {}
def failed(self, data):
''' Post-process a failed download
data (dict): of gathered data from downloader and localdb/tmdb
In SEARCHRESULTS marks guid as Bad
In MARKEDRESULTS:
Creates or updates entry for guid and optional guid2 with status=Bad
Updates MOVIES status
If Clean Up is enabled will delete path and contents.
If Auto Grab is enabled will grab next best release.
Returns dict of post-processing results
'''
config = core.CONFIG['Postprocessing']
# dict we will json.dump and send back to downloader
result = {}
result['status'] = 'finished'
result['data'] = data
result['tasks'] = {}
# mark guid in both results tables
logging.info('Marking guid as Bad.')
guid_result = {'url': data['guid']}
if data['guid']: # guid can be empty string
if Manage.searchresults(data['guid'], 'Bad'):
guid_result['update_SEARCHRESULTS'] = True
else:
guid_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid'], 'Bad', imdbid=data['imdbid']):
guid_result['update_MARKEDRESULTS'] = True
else:
guid_result['update_MARKEDRESULTS'] = False
# create result entry for guid
result['tasks']['guid'] = guid_result
# if we have a guid2, do it all again
if 'guid2' in data.keys():
logging.info('Marking guid2 as Bad.')
guid2_result = {'url': data['guid2']}
if Manage.searchresults(data['guid2'], 'Bad'):
guid2_result['update SEARCHRESULTS'] = True
else:
guid2_result['update SEARCHRESULTS'] = False
if Manage.markedresults(data['guid2'], 'Bad', imdbid=data['imdbid'], ):
guid2_result['update_MARKEDRESULTS'] = True
else:
guid2_result['update_MARKEDRESULTS'] = False
# create result entry for guid2
result['tasks']['guid2'] = guid2_result
# set movie status
if data['imdbid']:
logging.info('Setting MOVIE status.')
r = Manage.movie_status(data['imdbid'])
else:
logging.info('Imdbid not supplied or found, unable to update Movie status.')
r = ''
result['tasks']['update_movie_status'] = r
# delete failed files
if config['cleanupfailed']:
result['tasks']['cleanup'] = {'enabled': True, 'path': data['path']}
logging.info('Deleting leftover files from failed download.')
if self.cleanup(data['path']) is True:
result['tasks']['cleanup']['response'] = True
else:
result['tasks']['cleanup']['response'] = False
else:
result['tasks']['cleanup'] = {'enabled': False}
# grab the next best release
if core.CONFIG['Search']['autograb']:
result['tasks']['autograb'] = {'enabled': True}
logging.info('Grabbing the next best release.')
if data.get('imdbid') and data.get('quality'):
best_release = snatcher.get_best_release(data)
if best_release and snatcher.download(best_release):
r = True
else:
r = False
else:
r = False
result['tasks']['autograb']['response'] = r
else:
result['tasks']['autograb'] = {'enabled': False}
# all done!
result['status'] = 'finished'
return result
def complete(self, data):
''' Post-processes a complete, successful download
data (dict): all gathered file information and metadata
data must include the following keys:
path (str): path to downloaded item. Can be file or directory
guid (str): nzb guid or torrent hash
downloadid (str): download id from download client
All params can be empty strings if unknown
In SEARCHRESULTS marks guid as Finished
In MARKEDRESULTS:
Creates or updates entry for guid and optional guid with status=bad
In MOVIES updates finished_score and finished_date
Updates MOVIES status
Checks to see if we found a movie file. If not, ends here.
If Renamer is enabled, renames movie file according to core.CONFIG
If Mover is enabled, moves file to location in core.CONFIG, then...
If Clean Up enabled, deletes path after Mover finishes.
Clean Up will not execute without Mover success.
Returns dict of post-processing results
'''
config = core.CONFIG['Postprocessing']
# dict we will json.dump and send back to downloader
result = {}
result['status'] = 'incomplete'
result['data'] = data
result['data']['finished_date'] = str(datetime.date.today())
result['tasks'] = {}
# mark guid in both results tables
logging.info('Marking guid as Finished.')
data['guid'] = data['guid'].lower()
guid_result = {}
if data['guid'] and data.get('imdbid'):
if Manage.searchresults(data['guid'], 'Finished', movie_info=data):
guid_result['update_SEARCHRESULTS'] = True
else:
guid_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid'], 'Finished', imdbid=data['imdbid']):
guid_result['update_MARKEDRESULTS'] = True
else:
guid_result['update_MARKEDRESULTS'] = False
# create result entry for guid
result['tasks'][data['guid']] = guid_result
# if we have a guid2, do it all again
if data.get('guid2') and data.get('imdbid'):
logging.info('Marking guid2 as Finished.')
guid2_result = {}
if Manage.searchresults(data['guid2'], 'Finished', movie_info=data):
guid2_result['update_SEARCHRESULTS'] = True
else:
guid2_result['update_SEARCHRESULTS'] = False
if Manage.markedresults(data['guid2'], 'Finished', imdbid=data['imdbid']):
guid2_result['update_MARKEDRESULTS'] = True
else:
guid2_result['update_MARKEDRESULTS'] = False
# create result entry for guid2
result['tasks'][data['guid2']] = guid2_result
# set movie status and add finished date/score
if data.get('imdbid'):
if core.sql.row_exists('MOVIES', imdbid=data['imdbid']):
data['category'] = core.sql.get_movie_details('imdbid', data['imdbid'])['category']
else:
logging.info('{} not found in library, adding now.'.format(data.get('title')))
data['status'] = 'Disabled'
Manage.add_movie(data)
logging.info('Setting MOVIE status.')
r = Manage.movie_status(data['imdbid'])
db_update = {'finished_date': result['data']['finished_date'], 'finished_score': result['data'].get('finished_score')}
core.sql.update_multiple_values('MOVIES', db_update, 'imdbid', data['imdbid'])
else:
logging.info('Imdbid not supplied or found, unable to update Movie status.')
r = ''
result['tasks']['update_movie_status'] = r
data.update(Metadata.convert_to_db(data))
# mover. sets ['finished_file']
if config['moverenabled']:
result['tasks']['mover'] = {'enabled': True}
response = self.mover(data)
if not response:
result['tasks']['mover']['response'] = False
else:
data['finished_file'] = response
result['tasks']['mover']['response'] = True
else:
logging.info('Mover disabled.')
data['finished_file'] = data.get('original_file')
result['tasks']['mover'] = {'enabled': False}
# renamer
if config['renamerenabled']:
result['tasks']['renamer'] = {'enabled': True}
new_file_name = self.renamer(data)
if new_file_name == '':
result['tasks']['renamer']['response'] = False
else:
path = os.path.split(data['finished_file'])[0]
data['finished_file'] = os.path.join(path, new_file_name)
result['tasks']['renamer']['response'] = True
else:
logging.info('Renamer disabled.')
result['tasks']['renamer'] = {'enabled': False}
if data.get('imdbid') and data['imdbid'] is not 'N/A':
core.sql.update('MOVIES', 'finished_file', result['data'].get('finished_file'), 'imdbid', data['imdbid'])
# Delete leftover dir. Skip if file links are enabled or if mover disabled/failed
if config['cleanupenabled']:
result['tasks']['cleanup'] = {'enabled': True}
if config['movermethod'] in ('copy', 'hardlink', 'symboliclink'):
logging.info('File copy or linking enabled -- skipping Cleanup.')
result['tasks']['cleanup']['response'] = None
return result
elif os.path.isfile(data['path']):
logging.info('Download is file, not directory -- skipping Cleanup.')
result['tasks']['cleanup']['response'] = None
return result
# fail if mover disabled or failed
if config['moverenabled'] is False or result['tasks']['mover']['response'] is False:
logging.info('Mover either disabled or failed -- skipping Cleanup.')
result['tasks']['cleanup']['response'] = None
else:
if self.cleanup(data['path']):
r = True
else:
r = False
result['tasks']['cleanup']['response'] = r
else:
result['tasks']['cleanup'] = {'enabled': False}
# all done!
result['status'] = 'finished'
return result
def map_remote(self, path):
''' Alters directory based on remote mappings settings
path (str): path from download client
Replaces the base of the file tree with the 'local' mapping.
Ie, '/home/user/downloads/Watcher' becomes '//server/downloads/Watcher'
'path' can be file or directory, it doesn't matter.
If more than one match is found, defaults to the longest path.
remote: local = '/home/users/downloads/': '//server/downloads/'
'/home/users/downloads/Watcher/': '//server/downloads/Watcher/'
In this case, a supplied remote '/home/users/downloads/Watcher/' will match a
startswith() for both supplied settings. So we will default to the longest path.
Returns str new path
'''
maps = core.CONFIG['Postprocessing']['RemoteMapping']
matches = []
for remote in maps.keys():
if path.startswith(remote):
matches.append(remote)
if not matches:
return path
else:
match = max(matches, key=len)
new_path = path.replace(match, maps[match])
logging.info('Changing remote path from {} to {}'.format(path, new_path))
return new_path
def compile_path(self, string, data, is_file=False):
''' Compiles string to file/path names
string (str): brace-formatted string to substitue values (ie '/movies/{title}/')
data (dict): of values to sub into string
is_file (bool): if path is a file, false if directory
Takes a renamer/mover path and adds values.
ie '{title} {year} {resolution}' -> 'Movie 2017 1080P'
Subs double spaces. Trims trailing spaces. Removes any invalid characters.
Can return blank string ''
Sends string to self.sanitize() to remove illegal characters
Returns str new path
'''
new_string = string
for k, v in data.items():
k = '{' + k + '}'
if k in new_string:
new_string = new_string.replace(k, (v or ''))
while ' ' in new_string:
new_string = new_string.replace(' ', ' ')
if not is_file:
new_string = self.map_remote(new_string).strip()
logging.debug('Created path "{}" from "{}"'.format(new_string, string))
return self.sanitize(new_string, is_file=is_file)
def renamer(self, data):
''' Renames movie file based on renamerstring.
data (dict): movie information.
Renames movie file based on params in core.CONFIG
Returns str new file name (blank string on failure)
'''
logging.info('## Renaming Downloaded Files')
config = core.CONFIG['Postprocessing']
renamer_string = config['renamerstring']
# check to see if we have a valid renamerstring
if re.match(r'{(.*?)}', renamer_string) is None:
logging.info('Invalid renamer string {}'.format(renamer_string))
return ''
# existing absolute path
path = os.path.split(data['finished_file'])[0]
# get the extension
ext = os.path.splitext(data['finished_file'])[1]
# get the new file name
new_name = self.compile_path(renamer_string, data, is_file=True)
if not new_name:
logging.info('New file name would be blank. Cancelling renamer.')
return ''
if core.CONFIG['Postprocessing']['replacespaces']:
new_name = new_name.replace(' ', '.')
new_name = new_name + ext
logging.info('Renaming {} to {}'.format(os.path.basename(data.get('original_file')), new_name))
try:
os.rename(data['finished_file'], os.path.join(path, new_name))
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e: # noqa
logging.error('Renamer failed: Could not rename file.', exc_info=True)
return ''
# return the new name so the mover knows what our file is
return new_name
def recycle(self, recycle_bin, abs_filepath):
''' Sends file to recycle bin dir
recycle_bin (str): absolute path to recycle bin directory
abs_filepath (str): absolute path of file to recycle
Creates recycle_bin dir if neccesary.
Moves file to recycle bin. If a file with the same name already
exists, overwrites existing file.
Returns bool
'''
file_dir, file_name = os.path.split(abs_filepath)
if not os.path.isdir(recycle_bin):
logging.info('Creating recycle bin directory {}'.format(recycle_bin))
try:
os.makedirs(recycle_bin)
except Exception as e:
logging.error('Recycling failed: Could not create Recycle Bin directory {}.'.format(recycle_bin), exc_info=True)
return False
logging.info('Recycling {} to recycle bin {}'.format(abs_filepath, recycle_bin))
try:
if os.path.isfile(os.path.join(recycle_bin, file_name)):
os.remove(os.path.join(recycle_bin, file_name))
shutil.move(abs_filepath, recycle_bin)
return True
except Exception as e: # noqa
logging.error('Recycling failed: Could not move file.', exc_info=True)
return False
def remove_additional_files(self, movie_file):
''' Removes addtional associated files of movie_file
movie_file (str): absolute file path of old movie file
Removes any file in original_file's directory that share the same file name
Does not cause mover failure on error.
Returns bool
'''
logging.info('## Removing additional files for {}'.format(movie_file))
path, file_name = os.path.split(movie_file)
fname = os.path.splitext(file_name)[0]
for i in os.listdir(path):
name = os.path.splitext(i)[0]
no_lang_name = None
# check if filename ends with .<2-char-lang-code>
if re.search(r'\.[a-z]{2}$', name, re.I):
no_lang_name = os.path.splitext(name)[0]
if name == fname or no_lang_name == fname:
logging.info('Removing additional file {}'.format(i))
try:
os.remove(os.path.join(path, i))
except Exception as e: # noqa
logging.warning('Unable to remove {}'.format(i), exc_info=True)
return False
return True
def mover(self, data):
'''Moves movie file to path constructed by moverstring
data (dict): movie information.
Moves file to location specified in core.CONFIG
If target file already exists either:
Delete it prior to copying new file in (since os.rename in windows doesn't overwrite)
OR:
Create Recycle Bin directory (if neccesary) and move the old file there.
Copies and renames additional files
Returns str new file location (blank string on failure)
'''
logging.info('## Moving Downloaded Files')
config = core.CONFIG['Postprocessing']
if config['recyclebinenabled']:
recycle_bin = self.compile_path(config['recyclebindirectory'], data)
category = data.get('category', None)
if category in core.CONFIG['Categories']:
moverpath = core.CONFIG['Categories'][category]['moverpath']
else:
moverpath = config['moverpath']
target_folder = os.path.normpath(self.compile_path(moverpath, data))
target_folder = os.path.join(target_folder, '')
# if the new folder doesn't exist, make it
try:
if not os.path.exists(target_folder):
os.makedirs(target_folder)
except Exception as e:
logging.error('Mover failed: Could not create directory {}.'.format(target_folder), exc_info=True)
return ''
current_file_path = data['original_file']
current_path, file_name = os.path.split(current_file_path)
# If finished_file exists, recycle or remove
if data.get('finished_file'):
old_movie = data['finished_file']
logging.info('Checking if old file {} exists.'.format(old_movie))
if os.path.isfile(old_movie):
if config['recyclebinenabled']:
logging.info('Old movie file found, recycling.')
if not self.recycle(recycle_bin, old_movie):
return ''
else:
logging.info('Deleting old file {}'.format(old_movie))
try:
os.remove(old_movie)
except Exception as e:
logging.error('Mover failed: Could not delete file.', exc_info=True)
return ''
if config['removeadditionalfiles']:
self.remove_additional_files(old_movie)
# Check if the target file name exists in target dir, recycle or remove
if os.path.isfile(os.path.join(target_folder, file_name)):
existing_movie_file = os.path.join(target_folder, file_name)
logging.info('Existing file {} found in {}'.format(file_name, target_folder))
if config['recyclebinenabled']:
if not self.recycle(recycle_bin, existing_movie_file):
return ''
else:
logging.info('Deleting old file {}'.format(existing_movie_file))
try:
os.remove(existing_movie_file)
except Exception as e:
logging.error('Mover failed: Could not delete file.', exc_info=True)
return ''
if config['removeadditionalfiles']:
self.remove_additional_files(existing_movie_file)
# Finally the actual move process
new_file_location = os.path.join(target_folder, os.path.basename(data['original_file']))
if config['movermethod'] == 'hardlink':
logging.info('Creating hardlink from {} to {}.'.format(data['original_file'], new_file_location))
try:
os.link(data['original_file'], new_file_location)
except Exception as e:
logging.error('Mover failed: Unable to create hardlink.', exc_info=True)
return ''
elif config['movermethod'] == 'copy':
logging.info('Copying {} to {}.'.format(data['original_file'], new_file_location))
try:
shutil.copy(data['original_file'], new_file_location)
except Exception as e:
logging.error('Mover failed: Unable to copy movie.', exc_info=True)
return ''
else:
logging.info('Moving {} to {}'.format(current_file_path, new_file_location))
try:
shutil.copyfile(current_file_path, new_file_location)
os.unlink(current_file_path)
except Exception as e:
logging.error('Mover failed: Could not move file.', exc_info=True)
return ''
if config['movermethod'] == 'symboliclink':
if core.PLATFORM == 'windows':
logging.warning('Attempting to create symbolic link on Windows. This will fail without SeCreateSymbolicLinkPrivilege.')
logging.info('Creating symbolic link from {} to {}'.format(new_file_location, data['original_file']))
try:
os.symlink(new_file_location, data['original_file'])
except Exception as e:
logging.error('Mover failed: Unable to create symbolic link.', exc_info=True)
return ''
keep_extensions = [i.strip() for i in config['moveextensions'].split(',') if i != '']
if len(keep_extensions) > 0:
logging.info('Moving additional files with extensions {}.'.format(','.join(keep_extensions)))
compiled_name = self.compile_path(config['renamerstring'], data)
for root, dirs, filenames in os.walk(data['path']):
for name in filenames:
old_abs_path = os.path.join(root, name)
fname, ext = os.path.splitext(name) # ('filename', '.ext')
# check if filename ends with .<2-char-lang-code>
if re.search(r'\.[a-z]{2}$', fname, re.I):
fname, lang = os.path.splitext(fname)
target_ext = lang + ext
else:
target_ext = ext
if config['renamerenabled']:
fname = compiled_name
target_file = '{}{}'.format(os.path.join(target_folder, fname), target_ext)
if ext.replace('.', '') in keep_extensions:
append = 0
while os.path.isfile(target_file):
append += 1
new_filename = '{}({})'.format(fname, str(append))
target_file = '{}{}'.format(os.path.join(target_folder, new_filename), target_ext)
try:
logging.info('Moving {} to {}'.format(old_abs_path, target_file))
shutil.copyfile(old_abs_path, target_file)
except Exception as e: # noqa
logging.error('Moving additional files failed: Could not copy {}.'.format(old_abs_path), exc_info=True)
return new_file_location
def cleanup(self, path):
''' Deletes specified path
path (str): of path to remover
path can be file or dir
Returns bool
'''
# if its a dir
if os.path.isdir(path):
try:
shutil.rmtree(path)
return True
except Exception as e:
logging.error('Could not delete path.', exc_info=True)
return False
elif os.path.isfile(path):
# if its a file
try:
os.remove(path)
return True
except Exception as e: # noqa
logging.error('Could not delete path.', exc_info=True)
return False
else:
# if it is somehow neither
return False
def sanitize(self, string, is_file=False):
''' Sanitize file names and paths
string (str): to sanitize
Removes all illegal characters or replaces them based on
user's config.
Returns str
'''
config = core.CONFIG['Postprocessing']
repl = config['replaceillegal']
if is_file:
string = re.sub(r'[\/"*?<>|:]+', repl, string)
else:
string = re.sub(r'["*?<>|]+', repl, string)
drive, path = os.path.splitdrive(string)
path = path.replace(':', repl)
return ''.join([drive, path])
|
py | 1a32d005d51058c9df8a6acb2b4f9abb70a76f1d | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-09-21 20:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('restaurant', '0004_auto_20180921_2121'),
]
operations = [
migrations.RemoveField(
model_name='restaurant',
name='inserted_at',
),
migrations.AddField(
model_name='restaurant',
name='longitude',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
py | 1a32d08583b1d561a6b4dfbccf5f95dc17525439 | import json
import datetime
import http.client
from time import time
from urllib.parse import quote
from urllib.parse import unquote # Only to remember
########################################################################################################################
##################################################### ENVIRONMENTS #####################################################
########################################################################################################################
#local
conn = http.client.HTTPConnection("localhost:9090")
########################################################################################################################
######################################################## USERS #########################################################
########################################################################################################################
conn.request("GET", "/mini-amazon/api/users", headers={'Content-type': 'application/json'})
#conn.request("GET", "/mini-amazon/api/users/mrblack4", headers={'Content-type': 'application/json'})
# create_user_post = {
# "username": "mryellow",
# "status":"active",
# "stars": 3.2
# }
# json_data_post = json.dumps(create_user_post)
# conn.request("POST", "/mini-amazon/api/users", json_data_post, headers={'Content-type': 'application/json'})
#conn.request("DELETE", "/mini-amazon/api/users/mrgreen", headers={'Content-type': 'application/json'})
start = datetime.datetime.now()
res = conn.getresponse()
end = datetime.datetime.now()
data = res.read()
elapsed = end - start
print(data.decode("utf-8"))
print("\"" + str(res.status) + "\"")
print("\"" + str(res.reason) + "\"")
print("\"elapsed seconds: " + str(elapsed) + "\"")
|
py | 1a32d2729f3294a33234a9a27c5c610eb4835537 | from .PyPCUtils import * |
py | 1a32d29acddbcd143fa533e26d94f6b80df387e1 | import sys
from collections import defaultdict, Counter
import math
## Reading the arguements
train_file = sys.argv[1] # split.train
### Reading the train file to get all the filenames
with open(train_file) as f1:
filenames = []
conservative_files_train = []
liberal_files_train = []
for line in f1:
line = line.replace("\n","")
filenames.append(line)
if "con" in line:
conservative_files_train.append(line) # Conservative filenames
else:
liberal_files_train.append(line) # Liberal filenames
###
### Calculating Class Prior Probabilities
Prob_conservative = math.log(len(conservative_files_train)) - (math.log(len(liberal_files_train) + len(conservative_files_train)))
Prob_liberal = math.log(len(liberal_files_train)) - (math.log(len(liberal_files_train) + len(conservative_files_train)))
###
### Generating the vocabulary
vocabulary = []
conservative_vocabulary = []
liberal_vocabulary = []
for train_file_name in filenames:
ftrain = open(train_file_name)
for line in ftrain:
line = line.replace("\n","")
line = line.lower() # Ignoring Case
if "con" in train_file_name:
conservative_vocabulary.append(line) # Conservative Vocabulary
else:
liberal_vocabulary.append(line) # Liberal Vocabulary
vocabulary.append(line)
###
distinct_vocabulary = set(vocabulary) # Distinct Vocabulary
Total_distinct_words = len(distinct_vocabulary) # Total distinct words in the vocabulary
### Creating default Dictionaries for 1. words and their total occurences
## 2. words and their Probabilities
## Liberal Class
Prob_words_dict_liberal = defaultdict(int)
nliberal = len(liberal_vocabulary)
## Conservative Class
Prob_words_dict_conservative = defaultdict(int)
nconservative = len(conservative_vocabulary)
###
### Filling the data in both the above dictionaries ---------------- Table 6.2 Tom Mitchell Book
## Liberal Class
# Count
Distinct_words_dict_liberal = Counter(liberal_vocabulary)
# Probability
for word in distinct_vocabulary:
Prob_words_dict_liberal[word] = math.exp((math.log(Distinct_words_dict_liberal[word] + 1)) - \
math.log(nliberal + Total_distinct_words))
## Conservative Class
# Count
Distinct_words_dict_conservative = Counter(conservative_vocabulary)
# Probability
for word in distinct_vocabulary:
Prob_words_dict_conservative[word] = math.exp((math.log(Distinct_words_dict_conservative[word] + 1)) - \
math.log(nconservative + Total_distinct_words))
###
# Finding log-odds ratio
lib_cons = {} # lib/cons
cons_lib = {} # cons/lib
for word in distinct_vocabulary:
lib_cons[word] = math.log(Prob_words_dict_liberal[word]/Prob_words_dict_conservative[word])
cons_lib[word] = math.log(Prob_words_dict_conservative[word]/Prob_words_dict_liberal[word])
count = 0
# similar to topwords.py but instead of probabilities, we have log odd ratio
# Printing top 20 words for both the classes
# lib_cons
for w in sorted(lib_cons, key = lib_cons.get,reverse=True):
print w , round(lib_cons[w],4) # Printing
count += 1
if count == 20:
break
print ""
count = 0
# cons_lib
for w in sorted(cons_lib, key = cons_lib.get,reverse=True):
print w ,round(cons_lib[w],4) # Printing
count += 1
if count == 20:
break |
py | 1a32d38849a30eeea3bb173d8a9f8aef31b7c719 | import frappe
import unittest
test_records = frappe.get_test_records('Opencart Product')
class TestOpencartProduct(unittest.TestCase):
pass
|
py | 1a32d4c361e2cbb58ad22707c7397a6b6b10321b | """
Support for WeMo device discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wemo/
"""
import logging
from homeassistant.components.discovery import SERVICE_WEMO
from homeassistant.helpers import discovery
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
REQUIREMENTS = ['pywemo==0.4.3']
DOMAIN = 'wemo'
# Mapping from Wemo model_name to component.
WEMO_MODEL_DISPATCH = {
'Bridge': 'light',
'Insight': 'switch',
'Maker': 'switch',
'Sensor': 'binary_sensor',
'Socket': 'switch',
'LightSwitch': 'switch'
}
SUBSCRIPTION_REGISTRY = None
KNOWN_DEVICES = []
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, config):
"""Common setup for WeMo devices."""
import pywemo
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for WeMo discovery events."""
# name, model, location, mac
_, model_name, _, _, serial = discovery_info
# Only register a device once
if serial in KNOWN_DEVICES:
return
_LOGGER.debug('Discovered unique device %s', serial)
KNOWN_DEVICES.append(serial)
component = WEMO_MODEL_DISPATCH.get(model_name, 'switch')
discovery.load_platform(hass, component, DOMAIN, discovery_info,
config)
discovery.listen(hass, SERVICE_WEMO, discovery_dispatch)
_LOGGER.info("Scanning for WeMo devices.")
devices = [(device.host, device) for device in pywemo.discover_devices()]
# Add static devices from the config file.
devices.extend((address, None)
for address in config.get(DOMAIN, {}).get('static', []))
for address, device in devices:
port = pywemo.ouimeaux_device.probe_wemo(address)
if not port:
_LOGGER.warning('Unable to probe wemo at %s', address)
continue
_LOGGER.info('Adding wemo at %s:%i', address, port)
url = 'http://%s:%i/setup.xml' % (address, port)
if device is None:
device = pywemo.discovery.device_from_description(url, None)
discovery_info = (device.name, device.model_name, url, device.mac,
device.serialnumber)
discovery.discover(hass, SERVICE_WEMO, discovery_info)
return True
|
py | 1a32d50e2d829f7a466690e306d348b2ab9c0e55 | #!/usr/bin/env python
import asyncio
import websockets
import time
import threading
connections=set()
def mandar():
global connections
vPrint=True
while True:
if len(connections)>0:
print(connections)
mensaje=input("Esto es un mensaje : ")
if mensaje!="u":
websockets.broadcast(connections,mensaje)
vPrint=True
elif vPrint:
print("Conexiones: ")
print(connections)
vPrint=False
async def handler(websocket):
global connections
connections.add(websocket)
await websocket.wait_closed()
connections.remove(websocket)
async def main():
threading.Thread (target=mandar).start()
async with websockets.serve(handler, "172.24.50.15", 8765):
await asyncio.Future() # run forever..
if __name__ == "__main__":
asyncio.run(main()) |
py | 1a32d61a13a77f39597cadc0bb4c570664f8f93b | import random
def int2bin_str(d):
return bin(d)[2:]
def txt2int(s):
result = ''
for c in s:
result += '{:03d}'.format(ord(c))
if result.startswith('0'):
result = '999' + result
return int(result)
def int2txt(d):
s = str(d)
if len(s) % 3 != 0:
print('bad int')
return
result = ''
for i in range(0, len(s), 3):
sub = s[i : i + 3]
if sub == '999': continue
result += chr(int(sub))
return result
def gen_one_time_pad(M2):
result = ''
bits = ['0', '1']
for _ in range(len(M2)):
result += random.choice(bits)
return result
def one_time_pad(x, y):
result = ''
for i in range(len(x)):
if x[i] != y[i]:
result += '1'
else:
result += '0'
return result
def extended_euc(a, b):
if a == 0: return b
if b == 0: return a
r0 = a
r1 = b
s0 = 1
s1 = 0
t0 = 0
t1 = 1
q1 = r0 // r1
r2 = r0 - q1 * r1
s2 = s0 - q1 * s1
t2 = t0 - q1 * t1
while r2 != 0:
r0, r1 = r1, r2
s0, s1 = s1, s2
t0, t1 = t1, t2
q1 = r0 // r1
r2 = r0 - q1 * r1
s2 = s0 - q1 * s1
t2 = t0 - q1 * t1
return (r1, s1, t1)
def discrete_exponentiation(N, b, e):
d = {1: b}
last_e = 1
next_e = 2
while next_e <= e:
d[next_e] = (d[last_e] ** 2) % N
last_e = next_e
next_e *= 2
binary = list(bin(e))
result = 1
curr = 1
while binary[-1] != 'b':
if binary.pop() == '1':
result = (result * d[curr]) % N
curr *= 2
return result
def gen_pub_pri_key(P, Q):
N = P * Q
phi = (P - 1) * (Q - 1)
D = random.randrange(phi)
(one, E, t) = extended_euc(D, phi)
while (one != 1):
D = random.randrange(phi)
(one, E, t) = extended_euc(D, phi)
return (N, D, E)
def pub_key_encr(N, E, M):
(valid, s, t) = extended_euc(M, N)
if not (valid == 1 and M < N):
print('bad message')
return
raised_to_public = discrete_exponentiation(N, M, E)
return raised_to_public
def pri_key_decr(N, D, code):
return discrete_exponentiation(N, code, D)
def closure_PGP(N, E, plaintext):
M_bin = int2bin_str(txt2int(plaintext))
pad_bin = gen_one_time_pad(M_bin)
padded_bin = one_time_pad(M_bin, pad_bin)
padded_dec = int(padded_bin, 2)
pad_dec = int(pad_bin, 2)
pad_encr = pub_key_encr(N, E, pad_dec)
return (padded_dec, pad_encr)
def closure_PGP_decode(N, D, X):
(padded_dec, pad_encr) = X
pad_dec = pri_key_decr(N, D, pad_encr)
pad_bin = int2bin_str(pad_dec)
padded_bin = int2bin_str(padded_dec)
while len(padded_bin) < len(pad_bin):
padded_bin = '0' + padded_bin
while len(pad_bin) < len(padded_bin):
pad_bin = '0' + pad_bin
M_bin = one_time_pad(padded_bin, pad_bin)
M_dec = int(M_bin, 2)
plaintext = int2txt(M_dec)
return plaintext
|
py | 1a32d63efa7f37856f609301f0552ca0d2b09059 | """
Building and world design commands
"""
import re
from django.core.paginator import Paginator
from django.conf import settings
from django.db.models import Q, Min, Max
from evennia import InterruptCommand
from evennia.scripts.models import ScriptDB
from evennia.objects.models import ObjectDB
from evennia.locks.lockhandler import LockException
from evennia.commands.cmdhandler import get_and_merge_cmdsets
from evennia.utils import create, utils, search, logger, funcparser
from evennia.utils.dbserialize import deserialize
from evennia.utils.utils import (
inherits_from,
class_from_module,
get_all_typeclasses,
variable_from_module,
dbref, crop,
interactive,
list_to_string,
display_len,
format_grid,
)
from evennia.utils.eveditor import EvEditor
from evennia.utils.evmore import EvMore
from evennia.utils.evtable import EvTable
from evennia.prototypes import spawner, prototypes as protlib, menus as olc_menus
from evennia.utils.ansi import raw as ansi_raw
COMMAND_DEFAULT_CLASS = class_from_module(settings.COMMAND_DEFAULT_CLASS)
_FUNCPARSER = None
_ATTRFUNCPARSER = None
# limit symbol import for API
__all__ = (
"ObjManipCommand",
"CmdSetObjAlias",
"CmdCopy",
"CmdCpAttr",
"CmdMvAttr",
"CmdCreate",
"CmdDesc",
"CmdDestroy",
"CmdDig",
"CmdTunnel",
"CmdLink",
"CmdUnLink",
"CmdSetHome",
"CmdListCmdSets",
"CmdName",
"CmdOpen",
"CmdSetAttribute",
"CmdTypeclass",
"CmdWipe",
"CmdLock",
"CmdExamine",
"CmdFind",
"CmdTeleport",
"CmdScripts",
"CmdObjects",
"CmdTag",
"CmdSpawn",
)
# used by set
from ast import literal_eval as _LITERAL_EVAL
LIST_APPEND_CHAR = "+"
# used by find
CHAR_TYPECLASS = settings.BASE_CHARACTER_TYPECLASS
ROOM_TYPECLASS = settings.BASE_ROOM_TYPECLASS
EXIT_TYPECLASS = settings.BASE_EXIT_TYPECLASS
_DEFAULT_WIDTH = settings.CLIENT_DEFAULT_WIDTH
_PROTOTYPE_PARENTS = None
class ObjManipCommand(COMMAND_DEFAULT_CLASS):
"""
This is a parent class for some of the defining objmanip commands
since they tend to have some more variables to define new objects.
Each object definition can have several components. First is
always a name, followed by an optional alias list and finally an
some optional data, such as a typeclass or a location. A comma ','
separates different objects. Like this:
name1;alias;alias;alias:option, name2;alias;alias ...
Spaces between all components are stripped.
A second situation is attribute manipulation. Such commands
are simpler and offer combinations
objname/attr/attr/attr, objname/attr, ...
"""
# OBS - this is just a parent - it's not intended to actually be
# included in a commandset on its own!
def parse(self):
"""
We need to expand the default parsing to get all
the cases, see the module doc.
"""
# get all the normal parsing done (switches etc)
super().parse()
obj_defs = ([], []) # stores left- and right-hand side of '='
obj_attrs = ([], []) # "
for iside, arglist in enumerate((self.lhslist, self.rhslist)):
# lhslist/rhslist is already split by ',' at this point
for objdef in arglist:
aliases, option, attrs = [], None, []
if ":" in objdef:
objdef, option = [part.strip() for part in objdef.rsplit(":", 1)]
if ";" in objdef:
objdef, aliases = [part.strip() for part in objdef.split(";", 1)]
aliases = [alias.strip() for alias in aliases.split(";") if alias.strip()]
if "/" in objdef:
objdef, attrs = [part.strip() for part in objdef.split("/", 1)]
attrs = [part.strip().lower() for part in attrs.split("/") if part.strip()]
# store data
obj_defs[iside].append({"name": objdef, "option": option, "aliases": aliases})
obj_attrs[iside].append({"name": objdef, "attrs": attrs})
# store for future access
self.lhs_objs = obj_defs[0]
self.rhs_objs = obj_defs[1]
self.lhs_objattr = obj_attrs[0]
self.rhs_objattr = obj_attrs[1]
class CmdSetObjAlias(COMMAND_DEFAULT_CLASS):
"""
adding permanent aliases for object
Usage:
alias <obj> [= [alias[,alias,alias,...]]]
alias <obj> =
alias/category <obj> = [alias[,alias,...]:<category>
Switches:
category - requires ending input with :category, to store the
given aliases with the given category.
Assigns aliases to an object so it can be referenced by more
than one name. Assign empty to remove all aliases from object. If
assigning a category, all aliases given will be using this category.
Observe that this is not the same thing as personal aliases
created with the 'nick' command! Aliases set with alias are
changing the object in question, making those aliases usable
by everyone.
"""
key = "@alias"
aliases = "setobjalias"
switch_options = ("category",)
locks = "cmd:perm(setobjalias) or perm(Builder)"
help_category = "Building"
def func(self):
"""Set the aliases."""
caller = self.caller
if not self.lhs:
string = "Usage: alias <obj> [= [alias[,alias ...]]]"
self.caller.msg(string)
return
objname = self.lhs
# Find the object to receive aliases
obj = caller.search(objname)
if not obj:
return
if self.rhs is None:
# no =, so we just list aliases on object.
aliases = obj.aliases.all(return_key_and_category=True)
if aliases:
caller.msg(
"Aliases for %s: %s"
% (
obj.get_display_name(caller),
", ".join(
"'%s'%s"
% (alias, "" if category is None else "[category:'%s']" % category)
for (alias, category) in aliases
),
)
)
else:
caller.msg("No aliases exist for '%s'." % obj.get_display_name(caller))
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have permission to do that.")
return
if not self.rhs:
# we have given an empty =, so delete aliases
old_aliases = obj.aliases.all()
if old_aliases:
caller.msg(
"Cleared aliases from %s: %s"
% (obj.get_display_name(caller), ", ".join(old_aliases))
)
obj.aliases.clear()
else:
caller.msg("No aliases to clear.")
return
category = None
if "category" in self.switches:
if ":" in self.rhs:
rhs, category = self.rhs.rsplit(":", 1)
category = category.strip()
else:
caller.msg(
"If specifying the /category switch, the category must be given "
"as :category at the end."
)
else:
rhs = self.rhs
# merge the old and new aliases (if any)
old_aliases = obj.aliases.get(category=category, return_list=True)
new_aliases = [alias.strip().lower() for alias in rhs.split(",") if alias.strip()]
# make the aliases only appear once
old_aliases.extend(new_aliases)
aliases = list(set(old_aliases))
# save back to object.
obj.aliases.add(aliases, category=category)
# we need to trigger this here, since this will force
# (default) Exits to rebuild their Exit commands with the new
# aliases
obj.at_cmdset_get(force_init=True)
# report all aliases on the object
caller.msg(
"Alias(es) for '%s' set to '%s'%s."
% (
obj.get_display_name(caller),
str(obj.aliases),
" (category: '%s')" % category if category else "",
)
)
class CmdCopy(ObjManipCommand):
"""
copy an object and its properties
Usage:
copy <original obj> [= <new_name>][;alias;alias..]
[:<new_location>] [,<new_name2> ...]
Create one or more copies of an object. If you don't supply any targets,
one exact copy of the original object will be created with the name *_copy.
"""
key = "@copy"
locks = "cmd:perm(copy) or perm(Builder)"
help_category = "Building"
def func(self):
"""Uses ObjManipCommand.parse()"""
caller = self.caller
args = self.args
if not args:
caller.msg(
"Usage: copy <obj> [=<new_name>[;alias;alias..]]"
"[:<new_location>] [, <new_name2>...]"
)
return
if not self.rhs:
# this has no target =, so an identical new object is created.
from_obj_name = self.args
from_obj = caller.search(from_obj_name)
if not from_obj:
return
to_obj_name = "%s_copy" % from_obj_name
to_obj_aliases = ["%s_copy" % alias for alias in from_obj.aliases.all()]
copiedobj = ObjectDB.objects.copy_object(
from_obj, new_key=to_obj_name, new_aliases=to_obj_aliases
)
if copiedobj:
string = "Identical copy of %s, named '%s' was created." % (
from_obj_name,
to_obj_name,
)
else:
string = "There was an error copying %s."
else:
# we have specified =. This might mean many object targets
from_obj_name = self.lhs_objs[0]["name"]
from_obj = caller.search(from_obj_name)
if not from_obj:
return
for objdef in self.rhs_objs:
# loop through all possible copy-to targets
to_obj_name = objdef["name"]
to_obj_aliases = objdef["aliases"]
to_obj_location = objdef["option"]
if to_obj_location:
to_obj_location = caller.search(to_obj_location, global_search=True)
if not to_obj_location:
return
copiedobj = ObjectDB.objects.copy_object(
from_obj,
new_key=to_obj_name,
new_location=to_obj_location,
new_aliases=to_obj_aliases,
)
if copiedobj:
string = "Copied %s to '%s' (aliases: %s)." % (
from_obj_name,
to_obj_name,
to_obj_aliases,
)
else:
string = "There was an error copying %s to '%s'." % (from_obj_name, to_obj_name)
# we are done, echo to user
caller.msg(string)
class CmdCpAttr(ObjManipCommand):
"""
copy attributes between objects
Usage:
cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
move - delete the attribute from the source object after copying.
Example:
cpattr coolness = Anna/chillout, Anna/nicety, Tom/nicety
->
copies the coolness attribute (defined on yourself), to attributes
on Anna and Tom.
Copy the attribute one object to one or more attributes on another object.
If you don't supply a source object, yourself is used.
"""
key = "@cpattr"
switch_options = ("move",)
locks = "cmd:perm(cpattr) or perm(Builder)"
help_category = "Building"
def check_from_attr(self, obj, attr, clear=False):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can copy the attr from the object in question. If not, return a
false value and the command will abort. An error message should be
provided by this function.
If clear is True, user is attempting to move the attribute.
"""
return True
def check_to_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Checks to make sure a
caller can write to the specified attribute on the specified object.
If not, return a false value and the attribute will be skipped. An
error message should be provided by this function.
"""
return True
def check_has_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and verify an object has an attribute.
"""
if not obj.attributes.has(attr):
self.caller.msg("%s doesn't have an attribute %s." % (obj.name, attr))
return False
return True
def get_attr(self, obj, attr):
"""
Hook for overriding on subclassed commands. Do any preprocessing
required and get the attribute from the object.
"""
return obj.attributes.get(attr)
def func(self):
"""
Do the copying.
"""
caller = self.caller
if not self.rhs:
string = """Usage:
cpattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
cpattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
cpattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
caller.msg(string)
return
lhs_objattr = self.lhs_objattr
to_objs = self.rhs_objattr
from_obj_name = lhs_objattr[0]["name"]
from_obj_attrs = lhs_objattr[0]["attrs"]
if not from_obj_attrs:
# this means the from_obj_name is actually an attribute
# name on self.
from_obj_attrs = [from_obj_name]
from_obj = self.caller
else:
from_obj = caller.search(from_obj_name)
if not from_obj or not to_objs:
caller.msg("You have to supply both source object and target(s).")
return
# copy to all to_obj:ects
if "move" in self.switches:
clear = True
else:
clear = False
if not self.check_from_attr(from_obj, from_obj_attrs[0], clear=clear):
return
for attr in from_obj_attrs:
if not self.check_has_attr(from_obj, attr):
return
if (len(from_obj_attrs) != len(set(from_obj_attrs))) and clear:
self.caller.msg("|RCannot have duplicate source names when moving!")
return
result = []
for to_obj in to_objs:
to_obj_name = to_obj["name"]
to_obj_attrs = to_obj["attrs"]
to_obj = caller.search(to_obj_name)
if not to_obj:
result.append("\nCould not find object '%s'" % to_obj_name)
continue
for inum, from_attr in enumerate(from_obj_attrs):
try:
to_attr = to_obj_attrs[inum]
except IndexError:
# if there are too few attributes given
# on the to_obj, we copy the original name instead.
to_attr = from_attr
if not self.check_to_attr(to_obj, to_attr):
continue
value = self.get_attr(from_obj, from_attr)
to_obj.attributes.add(to_attr, value)
if clear and not (from_obj == to_obj and from_attr == to_attr):
from_obj.attributes.remove(from_attr)
result.append(
"\nMoved %s.%s -> %s.%s. (value: %s)"
% (from_obj.name, from_attr, to_obj_name, to_attr, repr(value))
)
else:
result.append(
"\nCopied %s.%s -> %s.%s. (value: %s)"
% (from_obj.name, from_attr, to_obj_name, to_attr, repr(value))
)
caller.msg("".join(result))
class CmdMvAttr(ObjManipCommand):
"""
move attributes between objects
Usage:
mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]
Switches:
copy - Don't delete the original after moving.
Move an attribute from one object to one or more attributes on another
object. If you don't supply a source object, yourself is used.
"""
key = "@mvattr"
switch_options = ("copy",)
locks = "cmd:perm(mvattr) or perm(Builder)"
help_category = "Building"
def func(self):
"""
Do the moving
"""
if not self.rhs:
string = """Usage:
mvattr[/switch] <obj>/<attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <obj>/<attr> = <obj1> [,<obj2>,<obj3>,...]
mvattr[/switch] <attr> = <obj1>/<attr1> [,<obj2>/<attr2>,<obj3>/<attr3>,...]
mvattr[/switch] <attr> = <obj1>[,<obj2>,<obj3>,...]"""
self.caller.msg(string)
return
# simply use cpattr for all the functionality
if "copy" in self.switches:
self.execute_cmd("cpattr %s" % self.args)
else:
self.execute_cmd("cpattr/move %s" % self.args)
class CmdCreate(ObjManipCommand):
"""
create new objects
Usage:
create[/drop] <objname>[;alias;alias...][:typeclass], <objname>...
switch:
drop - automatically drop the new object into your current
location (this is not echoed). This also sets the new
object's home to the current location rather than to you.
Creates one or more new objects. If typeclass is given, the object
is created as a child of this typeclass. The typeclass script is
assumed to be located under types/ and any further
directory structure is given in Python notation. So if you have a
correct typeclass 'RedButton' defined in
types/examples/red_button.py, you could create a new
object of this type like this:
create/drop button;red : examples.red_button.RedButton
"""
key = "@create"
switch_options = ("drop",)
locks = "cmd:perm(create) or perm(Builder)"
help_category = "Building"
# lockstring of newly created objects, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_obj_lockstring = "control:id({id}) or perm(Admin);delete:id({id}) or perm(Admin)"
def func(self):
"""
Creates the object.
"""
caller = self.caller
if not self.args:
string = "Usage: create[/drop] <newname>[;alias;alias...] [:typeclass.path]"
caller.msg(string)
return
# create the objects
for objdef in self.lhs_objs:
string = ""
name = objdef["name"]
aliases = objdef["aliases"]
typeclass = objdef["option"]
# create object (if not a valid typeclass, the default
# object typeclass will automatically be used)
lockstring = self.new_obj_lockstring.format(id=caller.id)
obj = create.create_object(
typeclass,
name,
caller,
home=caller,
aliases=aliases,
locks=lockstring,
report_to=caller,
)
if not obj:
continue
if aliases:
string = "You create a new %s: %s (aliases: %s)."
string = string % (obj.typename, obj.name, ", ".join(aliases))
else:
string = "You create a new %s: %s."
string = string % (obj.typename, obj.name)
# set a default desc
if not obj.db.desc:
obj.db.desc = "You see nothing special."
if "drop" in self.switches:
if caller.location:
obj.home = caller.location
obj.move_to(caller.location, quiet=True)
if string:
caller.msg(string)
def _desc_load(caller):
return caller.db.evmenu_target.db.desc or ""
def _desc_save(caller, buf):
"""
Save line buffer to the desc prop. This should
return True if successful and also report its status to the user.
"""
caller.db.evmenu_target.db.desc = buf
caller.msg("Saved.")
return True
def _desc_quit(caller):
caller.attributes.remove("evmenu_target")
caller.msg("Exited editor.")
class CmdDesc(COMMAND_DEFAULT_CLASS):
"""
describe an object or the current room.
Usage:
desc [<obj> =] <description>
Switches:
edit - Open up a line editor for more advanced editing.
Sets the "desc" attribute on an object. If an object is not given,
describe the current room.
"""
key = "@desc"
switch_options = ("edit",)
locks = "cmd:perm(desc) or perm(Builder)"
help_category = "Building"
def edit_handler(self):
if self.rhs:
self.msg("|rYou may specify a value, or use the edit switch, but not both.|n")
return
if self.args:
obj = self.caller.search(self.args)
else:
obj = self.caller.location or self.msg("|rYou can't describe oblivion.|n")
if not obj:
return
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
self.caller.msg("You don't have permission to edit the description of %s." % obj.key)
return
self.caller.db.evmenu_target = obj
# launch the editor
EvEditor(
self.caller,
loadfunc=_desc_load,
savefunc=_desc_save,
quitfunc=_desc_quit,
key="desc",
persistent=True,
)
return
def func(self):
"""Define command"""
caller = self.caller
if not self.args and "edit" not in self.switches:
caller.msg("Usage: desc [<obj> =] <description>")
return
if "edit" in self.switches:
self.edit_handler()
return
if "=" in self.args:
# We have an =
obj = caller.search(self.lhs)
if not obj:
return
desc = self.rhs or ""
else:
obj = caller.location or self.msg("|rYou don't have a location to describe.|n")
if not obj:
return
desc = self.args
if obj.access(self.caller, "control") or obj.access(self.caller, "edit"):
obj.db.desc = desc
caller.msg("The description was set on %s." % obj.get_display_name(caller))
else:
caller.msg("You don't have permission to edit the description of %s." % obj.key)
class CmdDestroy(COMMAND_DEFAULT_CLASS):
"""
permanently delete objects
Usage:
destroy[/switches] [obj, obj2, obj3, [dbref-dbref], ...]
Switches:
override - The destroy command will usually avoid accidentally
destroying account objects. This switch overrides this safety.
force - destroy without confirmation.
Examples:
destroy house, roof, door, 44-78
destroy 5-10, flower, 45
destroy/force north
Destroys one or many objects. If dbrefs are used, a range to delete can be
given, e.g. 4-10. Also the end points will be deleted. This command
displays a confirmation before destroying, to make sure of your choice.
You can specify the /force switch to bypass this confirmation.
"""
key = "@destroy"
aliases = ["@delete", "@del"]
switch_options = ("override", "force")
locks = "cmd:perm(destroy) or perm(Builder)"
help_category = "Building"
confirm = True # set to False to always bypass confirmation
default_confirm = "yes" # what to assume if just pressing enter (yes/no)
def func(self):
"""Implements the command."""
caller = self.caller
delete = True
if not self.args or not self.lhslist:
caller.msg("Usage: destroy[/switches] [obj, obj2, obj3, [dbref-dbref],...]")
delete = False
def delobj(obj):
# helper function for deleting a single object
string = ""
if not obj.pk:
string = "\nObject %s was already deleted." % obj.db_key
else:
objname = obj.name
if not (obj.access(caller, "control") or obj.access(caller, "delete")):
return "\nYou don't have permission to delete %s." % objname
if obj.account and "override" not in self.switches:
return (
"\nObject %s is controlled by an active account. Use /override to delete anyway."
% objname
)
if obj.dbid == int(settings.DEFAULT_HOME.lstrip("#")):
return (
"\nYou are trying to delete |c%s|n, which is set as DEFAULT_HOME. "
"Re-point settings.DEFAULT_HOME to another "
"object before continuing." % objname
)
had_exits = hasattr(obj, "exits") and obj.exits
had_objs = hasattr(obj, "contents") and any(
obj
for obj in obj.contents
if not (hasattr(obj, "exits") and obj not in obj.exits)
)
# do the deletion
okay = obj.delete()
if not okay:
string += (
"\nERROR: %s not deleted, probably because delete() returned False."
% objname
)
else:
string += "\n%s was destroyed." % objname
if had_exits:
string += " Exits to and from %s were destroyed as well." % objname
if had_objs:
string += " Objects inside %s were moved to their homes." % objname
return string
objs = []
for objname in self.lhslist:
if not delete:
continue
if "-" in objname:
# might be a range of dbrefs
dmin, dmax = [utils.dbref(part, reqhash=False) for part in objname.split("-", 1)]
if dmin and dmax:
for dbref in range(int(dmin), int(dmax + 1)):
obj = caller.search("#" + str(dbref))
if obj:
objs.append(obj)
continue
else:
obj = caller.search(objname)
else:
obj = caller.search(objname)
if obj is None:
self.caller.msg(
" (Objects to destroy must either be local or specified with a unique #dbref.)"
)
elif obj not in objs:
objs.append(obj)
if objs and ("force" not in self.switches and type(self).confirm):
confirm = "Are you sure you want to destroy "
if len(objs) == 1:
confirm += objs[0].get_display_name(caller)
elif len(objs) < 5:
confirm += ", ".join([obj.get_display_name(caller) for obj in objs])
else:
confirm += ", ".join(["#{}".format(obj.id) for obj in objs])
confirm += " [yes]/no?" if self.default_confirm == "yes" else " yes/[no]"
answer = ""
answer = yield (confirm)
answer = self.default_confirm if answer == "" else answer
if answer and answer not in ("yes", "y", "no", "n"):
caller.msg(
"Canceled: Either accept the default by pressing return or specify yes/no."
)
delete = False
elif answer.strip().lower() in ("n", "no"):
caller.msg("Canceled: No object was destroyed.")
delete = False
if delete:
results = []
for obj in objs:
results.append(delobj(obj))
if results:
caller.msg("".join(results).strip())
class CmdDig(ObjManipCommand):
"""
build new rooms and connect them to the current location
Usage:
dig[/switches] <roomname>[;alias;alias...][:typeclass]
[= <exit_to_there>[;alias][:typeclass]]
[, <exit_to_here>[;alias][:typeclass]]
Switches:
tel or teleport - move yourself to the new room
Examples:
dig kitchen = north;n, south;s
dig house:myrooms.MyHouseTypeclass
dig sheer cliff;cliff;sheer = climb up, climb down
This command is a convenient way to build rooms quickly; it creates the
new room and you can optionally set up exits back and forth between your
current room and the new one. You can add as many aliases as you
like to the name of the room and the exits in question; an example
would be 'north;no;n'.
"""
key = "@dig"
switch_options = ("teleport",)
locks = "cmd:perm(dig) or perm(Builder)"
help_category = "Building"
# lockstring of newly created rooms, for easy overloading.
# Will be formatted with the {id} of the creating object.
new_room_lockstring = (
"control:id({id}) or perm(Admin); "
"delete:id({id}) or perm(Admin); "
"edit:id({id}) or perm(Admin)"
)
def func(self):
"""Do the digging. Inherits variables from ObjManipCommand.parse()"""
caller = self.caller
if not self.lhs:
string = "Usage: dig[/teleport] <roomname>[;alias;alias...]" "[:parent] [= <exit_there>"
string += "[;alias;alias..][:parent]] "
string += "[, <exit_back_here>[;alias;alias..][:parent]]"
caller.msg(string)
return
room = self.lhs_objs[0]
if not room["name"]:
caller.msg("You must supply a new room name.")
return
location = caller.location
# Create the new room
typeclass = room["option"]
if not typeclass:
typeclass = settings.BASE_ROOM_TYPECLASS
# create room
new_room = create.create_object(
typeclass, room["name"], aliases=room["aliases"], report_to=caller
)
lockstring = self.new_room_lockstring.format(id=caller.id)
new_room.locks.add(lockstring)
alias_string = ""
if new_room.aliases.all():
alias_string = " (%s)" % ", ".join(new_room.aliases.all())
room_string = "Created room %s(%s)%s of type %s." % (
new_room,
new_room.dbref,
alias_string,
typeclass,
)
# create exit to room
exit_to_string = ""
exit_back_string = ""
if self.rhs_objs:
to_exit = self.rhs_objs[0]
if not to_exit["name"]:
exit_to_string = "\nNo exit created to new room."
elif not location:
exit_to_string = "\nYou cannot create an exit from a None-location."
else:
# Build the exit to the new room from the current one
typeclass = to_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_to_exit = create.create_object(
typeclass,
to_exit["name"],
location,
aliases=to_exit["aliases"],
locks=lockstring,
destination=new_room,
report_to=caller,
)
alias_string = ""
if new_to_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_to_exit.aliases.all())
exit_to_string = "\nCreated Exit from %s to %s: %s(%s)%s."
exit_to_string = exit_to_string % (
location.name,
new_room.name,
new_to_exit,
new_to_exit.dbref,
alias_string,
)
# Create exit back from new room
if len(self.rhs_objs) > 1:
# Building the exit back to the current room
back_exit = self.rhs_objs[1]
if not back_exit["name"]:
exit_back_string = "\nNo back exit created."
elif not location:
exit_back_string = "\nYou cannot create an exit back to a None-location."
else:
typeclass = back_exit["option"]
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
new_back_exit = create.create_object(
typeclass,
back_exit["name"],
new_room,
aliases=back_exit["aliases"],
locks=lockstring,
destination=location,
report_to=caller,
)
alias_string = ""
if new_back_exit.aliases.all():
alias_string = " (%s)" % ", ".join(new_back_exit.aliases.all())
exit_back_string = "\nCreated Exit back from %s to %s: %s(%s)%s."
exit_back_string = exit_back_string % (
new_room.name,
location.name,
new_back_exit,
new_back_exit.dbref,
alias_string,
)
caller.msg("%s%s%s" % (room_string, exit_to_string, exit_back_string))
if new_room and "teleport" in self.switches:
caller.move_to(new_room)
class CmdTunnel(COMMAND_DEFAULT_CLASS):
"""
create new rooms in cardinal directions only
Usage:
tunnel[/switch] <direction>[:typeclass] [= <roomname>[;alias;alias;...][:typeclass]]
Switches:
oneway - do not create an exit back to the current location
tel - teleport to the newly created room
Example:
tunnel n
tunnel n = house;mike's place;green building
This is a simple way to build using pre-defined directions:
|wn,ne,e,se,s,sw,w,nw|n (north, northeast etc)
|wu,d|n (up and down)
|wi,o|n (in and out)
The full names (north, in, southwest, etc) will always be put as
main name for the exit, using the abbreviation as an alias (so an
exit will always be able to be used with both "north" as well as
"n" for example). Opposite directions will automatically be
created back from the new room unless the /oneway switch is given.
For more flexibility and power in creating rooms, use dig.
"""
key = "@tunnel"
aliases = ["@tun"]
switch_options = ("oneway", "tel")
locks = "cmd: perm(tunnel) or perm(Builder)"
help_category = "Building"
# store the direction, full name and its opposite
directions = {
"n": ("north", "s"),
"ne": ("northeast", "sw"),
"e": ("east", "w"),
"se": ("southeast", "nw"),
"s": ("south", "n"),
"sw": ("southwest", "ne"),
"w": ("west", "e"),
"nw": ("northwest", "se"),
"u": ("up", "d"),
"d": ("down", "u"),
"i": ("in", "o"),
"o": ("out", "i"),
}
def func(self):
"""Implements the tunnel command"""
if not self.args or not self.lhs:
string = (
"Usage: tunnel[/switch] <direction>[:typeclass] [= <roomname>"
"[;alias;alias;...][:typeclass]]"
)
self.caller.msg(string)
return
# If we get a typeclass, we need to get just the exitname
exitshort = self.lhs.split(":")[0]
if exitshort not in self.directions:
string = "tunnel can only understand the following directions: %s." % ",".join(
sorted(self.directions.keys())
)
string += "\n(use dig for more freedom)"
self.caller.msg(string)
return
# retrieve all input and parse it
exitname, backshort = self.directions[exitshort]
backname = self.directions[backshort][0]
# if we recieved a typeclass for the exit, add it to the alias(short name)
if ":" in self.lhs:
# limit to only the first : character
exit_typeclass = ":" + self.lhs.split(":", 1)[-1]
# exitshort and backshort are the last part of the exit strings,
# so we add our typeclass argument after
exitshort += exit_typeclass
backshort += exit_typeclass
roomname = "Some place"
if self.rhs:
roomname = self.rhs # this may include aliases; that's fine.
telswitch = ""
if "tel" in self.switches:
telswitch = "/teleport"
backstring = ""
if "oneway" not in self.switches:
backstring = ", %s;%s" % (backname, backshort)
# build the string we will use to call dig
digstring = "dig%s %s = %s;%s%s" % (telswitch, roomname, exitname, exitshort, backstring)
self.execute_cmd(digstring)
class CmdLink(COMMAND_DEFAULT_CLASS):
"""
link existing rooms together with exits
Usage:
link[/switches] <object> = <target>
link[/switches] <object> =
link[/switches] <object>
Switch:
twoway - connect two exits. For this to work, BOTH <object>
and <target> must be exit objects.
If <object> is an exit, set its destination to <target>. Two-way operation
instead sets the destination to the *locations* of the respective given
arguments.
The second form (a lone =) sets the destination to None (same as
the unlink command) and the third form (without =) just shows the
currently set destination.
"""
key = "@link"
locks = "cmd:perm(link) or perm(Builder)"
help_category = "Building"
def func(self):
"""Perform the link"""
caller = self.caller
if not self.args:
caller.msg("Usage: link[/twoway] <object> = <target>")
return
object_name = self.lhs
# try to search locally first
results = caller.search(object_name, quiet=True)
if len(results) > 1: # local results was a multimatch. Inform them to be more specific
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit(".", 1))
return _AT_SEARCH_RESULT(results, caller, query=object_name)
elif len(results) == 1: # A unique local match
obj = results[0]
else: # No matches. Search globally
obj = caller.search(object_name, global_search=True)
if not obj:
return
if self.rhs:
# this means a target name was given
target = caller.search(self.rhs, global_search=True)
if not target:
return
if target == obj:
self.caller.msg("Cannot link an object to itself.")
return
string = ""
note = "Note: %s(%s) did not have a destination set before. Make sure you linked the right thing."
if not obj.destination:
string = note % (obj.name, obj.dbref)
if "twoway" in self.switches:
if not (target.location and obj.location):
string = "To create a two-way link, %s and %s must both have a location" % (
obj,
target,
)
string += " (i.e. they cannot be rooms, but should be exits)."
self.caller.msg(string)
return
if not target.destination:
string += note % (target.name, target.dbref)
obj.destination = target.location
target.destination = obj.location
string += "\nLink created %s (in %s) <-> %s (in %s) (two-way)." % (
obj.name,
obj.location,
target.name,
target.location,
)
else:
obj.destination = target
string += "\nLink created %s -> %s (one way)." % (obj.name, target)
elif self.rhs is None:
# this means that no = was given (otherwise rhs
# would have been an empty string). So we inspect
# the home/destination on object
dest = obj.destination
if dest:
string = "%s is an exit to %s." % (obj.name, dest.name)
else:
string = "%s is not an exit. Its home location is %s." % (obj.name, obj.home)
else:
# We gave the command link 'obj = ' which means we want to
# clear destination.
if obj.destination:
obj.destination = None
string = "Former exit %s no longer links anywhere." % obj.name
else:
string = "%s had no destination to unlink." % obj.name
# give feedback
caller.msg(string.strip())
class CmdUnLink(CmdLink):
"""
remove exit-connections between rooms
Usage:
unlink <Object>
Unlinks an object, for example an exit, disconnecting
it from whatever it was connected to.
"""
# this is just a child of CmdLink
key = "unlink"
locks = "cmd:perm(unlink) or perm(Builder)"
help_key = "Building"
def func(self):
"""
All we need to do here is to set the right command
and call func in CmdLink
"""
caller = self.caller
if not self.args:
caller.msg("Usage: unlink <object>")
return
# This mimics 'link <obj> = ' which is the same as unlink
self.rhs = ""
# call the link functionality
super().func()
class CmdSetHome(CmdLink):
"""
set an object's home location
Usage:
sethome <obj> [= <home_location>]
sethom <obj>
The "home" location is a "safety" location for objects; they
will be moved there if their current location ceases to exist. All
objects should always have a home location for this reason.
It is also a convenient target of the "home" command.
If no location is given, just view the object's home location.
"""
key = "@sethome"
locks = "cmd:perm(sethome) or perm(Builder)"
help_category = "Building"
def func(self):
"""implement the command"""
if not self.args:
string = "Usage: sethome <obj> [= <home_location>]"
self.caller.msg(string)
return
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if not self.rhs:
# just view
home = obj.home
if not home:
string = "This object has no home location set!"
else:
string = "%s's current home is %s(%s)." % (obj, home, home.dbref)
else:
# set a home location
new_home = self.caller.search(self.rhs, global_search=True)
if not new_home:
return
old_home = obj.home
obj.home = new_home
if old_home:
string = "Home location of %s was changed from %s(%s) to %s(%s)." % (
obj,
old_home,
old_home.dbref,
new_home,
new_home.dbref,
)
else:
string = "Home location of %s was set to %s(%s)." % (obj, new_home, new_home.dbref)
self.caller.msg(string)
class CmdListCmdSets(COMMAND_DEFAULT_CLASS):
"""
list command sets defined on an object
Usage:
cmdsets <obj>
This displays all cmdsets assigned
to a user. Defaults to yourself.
"""
key = "@cmdsets"
locks = "cmd:perm(listcmdsets) or perm(Builder)"
help_category = "Building"
def func(self):
"""list the cmdsets"""
caller = self.caller
if self.arglist:
obj = caller.search(self.arglist[0])
if not obj:
return
else:
obj = caller
string = "%s" % obj.cmdset
caller.msg(string)
class CmdName(ObjManipCommand):
"""
change the name and/or aliases of an object
Usage:
name <obj> = <newname>;alias1;alias2
Rename an object to something new. Use *obj to
rename an account.
"""
key = "@name"
aliases = ["@rename"]
locks = "cmd:perm(rename) or perm(Builder)"
help_category = "Building"
def func(self):
"""change the name"""
caller = self.caller
if not self.args:
caller.msg("Usage: name <obj> = <newname>[;alias;alias;...]")
return
obj = None
if self.lhs_objs:
objname = self.lhs_objs[0]["name"]
if objname.startswith("*"):
# account mode
obj = caller.account.search(objname.lstrip("*"))
if obj:
if self.rhs_objs[0]["aliases"]:
caller.msg("Accounts can't have aliases.")
return
newname = self.rhs
if not newname:
caller.msg("No name defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have right to edit this account %s." % obj)
return
obj.username = newname
obj.save()
caller.msg("Account's name changed to '%s'." % newname)
return
# object search, also with *
obj = caller.search(objname)
if not obj:
return
if self.rhs_objs:
newname = self.rhs_objs[0]["name"]
aliases = self.rhs_objs[0]["aliases"]
else:
newname = self.rhs
aliases = None
if not newname and not aliases:
caller.msg("No names or aliases defined!")
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You don't have the right to edit %s." % obj)
return
# change the name and set aliases:
if newname:
obj.key = newname
astring = ""
if aliases:
[obj.aliases.add(alias) for alias in aliases]
astring = " (%s)" % (", ".join(aliases))
# fix for exits - we need their exit-command to change name too
if obj.destination:
obj.flush_from_cache(force=True)
caller.msg("Object's name changed to '%s'%s." % (newname, astring))
class CmdOpen(ObjManipCommand):
"""
open a new exit from the current room
Usage:
open <new exit>[;alias;alias..][:typeclass] [,<return exit>[;alias;..][:typeclass]]] = <destination>
Handles the creation of exits. If a destination is given, the exit
will point there. The <return exit> argument sets up an exit at the
destination leading back to the current room. Destination name
can be given both as a #dbref and a name, if that name is globally
unique.
"""
key = "@open"
locks = "cmd:perm(open) or perm(Builder)"
help_category = "Building"
new_obj_lockstring = "control:id({id}) or perm(Admin);delete:id({id}) or perm(Admin)"
# a custom member method to chug out exits and do checks
def create_exit(self, exit_name, location, destination, exit_aliases=None, typeclass=None):
"""
Helper function to avoid code duplication.
At this point we know destination is a valid location
"""
caller = self.caller
string = ""
# check if this exit object already exists at the location.
# we need to ignore errors (so no automatic feedback)since we
# have to know the result of the search to decide what to do.
exit_obj = caller.search(exit_name, location=location, quiet=True, exact=True)
if len(exit_obj) > 1:
# give error message and return
caller.search(exit_name, location=location, exact=True)
return None
if exit_obj:
exit_obj = exit_obj[0]
if not exit_obj.destination:
# we are trying to link a non-exit
string = "'%s' already exists and is not an exit!\nIf you want to convert it "
string += (
"to an exit, you must assign an object to the 'destination' property first."
)
caller.msg(string % exit_name)
return None
# we are re-linking an old exit.
old_destination = exit_obj.destination
if old_destination:
string = "Exit %s already exists." % exit_name
if old_destination.id != destination.id:
# reroute the old exit.
exit_obj.destination = destination
if exit_aliases:
[exit_obj.aliases.add(alias) for alias in exit_aliases]
string += " Rerouted its old destination '%s' to '%s' and changed aliases." % (
old_destination.name,
destination.name,
)
else:
string += " It already points to the correct place."
else:
# exit does not exist before. Create a new one.
lockstring = self.new_obj_lockstring.format(id=caller.id)
if not typeclass:
typeclass = settings.BASE_EXIT_TYPECLASS
exit_obj = create.create_object(
typeclass,
key=exit_name,
location=location,
aliases=exit_aliases,
locks=lockstring,
report_to=caller,
)
if exit_obj:
# storing a destination is what makes it an exit!
exit_obj.destination = destination
string = (
""
if not exit_aliases
else " (aliases: %s)" % (", ".join([str(e) for e in exit_aliases]))
)
string = "Created new Exit '%s' from %s to %s%s." % (
exit_name,
location.name,
destination.name,
string,
)
else:
string = "Error: Exit '%s' not created." % exit_name
# emit results
caller.msg(string)
return exit_obj
def parse(self):
super().parse()
self.location = self.caller.location
if not self.args or not self.rhs:
self.caller.msg("Usage: open <new exit>[;alias...][:typeclass]"
"[,<return exit>[;alias..][:typeclass]]] "
"= <destination>")
raise InterruptCommand
if not self.location:
self.caller.msg("You cannot create an exit from a None-location.")
raise InterruptCommand
self.destination = self.caller.search(self.rhs, global_search=True)
if not self.destination:
raise InterruptCommand
self.exit_name = self.lhs_objs[0]["name"]
self.exit_aliases = self.lhs_objs[0]["aliases"]
self.exit_typeclass = self.lhs_objs[0]["option"]
def func(self):
"""
This is where the processing starts.
Uses the ObjManipCommand.parser() for pre-processing
as well as the self.create_exit() method.
"""
# Create exit
ok = self.create_exit(self.exit_name, self.location, self.destination,
self.exit_aliases, self.exit_typeclass)
if not ok:
# an error; the exit was not created, so we quit.
return
# Create back exit, if any
if len(self.lhs_objs) > 1:
back_exit_name = self.lhs_objs[1]["name"]
back_exit_aliases = self.lhs_objs[1]["aliases"]
back_exit_typeclass = self.lhs_objs[1]["option"]
self.create_exit(back_exit_name, self.destination, self.location, back_exit_aliases,
back_exit_typeclass)
def _convert_from_string(cmd, strobj):
"""
Converts a single object in *string form* to its equivalent python
type.
Python earlier than 2.6:
Handles floats, ints, and limited nested lists and dicts
(can't handle lists in a dict, for example, this is mainly due to
the complexity of parsing this rather than any technical difficulty -
if there is a need for set-ing such complex structures on the
command line we might consider adding it).
Python 2.6 and later:
Supports all Python structures through literal_eval as long as they
are valid Python syntax. If they are not (such as [test, test2], ie
without the quotes around the strings), the entire structure will
be converted to a string and a warning will be given.
We need to convert like this since all data being sent over the
telnet connection by the Account is text - but we will want to
store it as the "real" python type so we can do convenient
comparisons later (e.g. obj.db.value = 2, if value is stored as a
string this will always fail).
"""
# Use literal_eval to parse python structure exactly.
try:
return _LITERAL_EVAL(strobj)
except (SyntaxError, ValueError):
# treat as string
strobj = utils.to_str(strobj)
string = (
'|RNote: name "|r%s|R" was converted to a string. '
"Make sure this is acceptable." % strobj
)
cmd.caller.msg(string)
return strobj
except Exception as err:
string = "|RUnknown error in evaluating Attribute: {}".format(err)
return string
class CmdSetAttribute(ObjManipCommand):
"""
set attribute on an object or account
Usage:
set[/switch] <obj>/<attr>[:category] = <value>
set[/switch] <obj>/<attr>[:category] = # delete attribute
set[/switch] <obj>/<attr>[:category] # view attribute
set[/switch] *<account>/<attr>[:category] = <value>
Switch:
edit: Open the line editor (string values only)
script: If we're trying to set an attribute on a script
channel: If we're trying to set an attribute on a channel
account: If we're trying to set an attribute on an account
room: Setting an attribute on a room (global search)
exit: Setting an attribute on an exit (global search)
char: Setting an attribute on a character (global search)
character: Alias for char, as above.
Example:
set self/foo = "bar"
set/delete self/foo
set self/foo = $dbref(#53)
Sets attributes on objects. The second example form above clears a
previously set attribute while the third form inspects the current value of
the attribute (if any). The last one (with the star) is a shortcut for
operating on a player Account rather than an Object.
If you want <value> to be an object, use $dbef(#dbref) or
$search(key) to assign it. You need control or edit access to
the object you are adding.
The most common data to save with this command are strings and
numbers. You can however also set Python primitives such as lists,
dictionaries and tuples on objects (this might be important for
the functionality of certain custom objects). This is indicated
by you starting your value with one of |c'|n, |c"|n, |c(|n, |c[|n
or |c{ |n.
Once you have stored a Python primitive as noted above, you can include
|c[<key>]|n in <attr> to reference nested values in e.g. a list or dict.
Remember that if you use Python primitives like this, you must
write proper Python syntax too - notably you must include quotes
around your strings or you will get an error.
"""
key = "@set"
locks = "cmd:perm(set) or perm(Builder)"
help_category = "Building"
nested_re = re.compile(r"\[.*?\]")
not_found = object()
def check_obj(self, obj):
"""
This may be overridden by subclasses in case restrictions need to be
placed on whether certain objects can have attributes set by certain
accounts.
This function is expected to display its own error message.
Returning False will abort the command.
"""
return True
def check_attr(self, obj, attr_name, category):
"""
This may be overridden by subclasses in case restrictions need to be
placed on what attributes can be set by who beyond the normal lock.
This functions is expected to display its own error message. It is
run once for every attribute that is checked, blocking only those
attributes which are not permitted and letting the others through.
"""
return attr_name
def split_nested_attr(self, attr):
"""
Yields tuples of (possible attr name, nested keys on that attr).
For performance, this is biased to the deepest match, but allows compatability
with older attrs that might have been named with `[]`'s.
> list(split_nested_attr("nested['asdf'][0]"))
[
('nested', ['asdf', 0]),
("nested['asdf']", [0]),
("nested['asdf'][0]", []),
]
"""
quotes = "\"'"
def clean_key(val):
val = val.strip("[]")
if val[0] in quotes:
return val.strip(quotes)
if val[0] == LIST_APPEND_CHAR:
# List insert/append syntax
return val
try:
return int(val)
except ValueError:
return val
parts = self.nested_re.findall(attr)
base_attr = ""
if parts:
base_attr = attr[: attr.find(parts[0])]
for index, part in enumerate(parts):
yield (base_attr, [clean_key(p) for p in parts[index:]])
base_attr += part
yield (attr, [])
def do_nested_lookup(self, value, *keys):
result = value
for key in keys:
try:
result = result.__getitem__(key)
except (IndexError, KeyError, TypeError):
return self.not_found
return result
def view_attr(self, obj, attr, category):
"""
Look up the value of an attribute and return a string displaying it.
"""
nested = False
for key, nested_keys in self.split_nested_attr(attr):
nested = True
if obj.attributes.has(key):
val = obj.attributes.get(key)
val = self.do_nested_lookup(val, *nested_keys)
if val is not self.not_found:
return f"\nAttribute {obj.name}/|w{attr}|n [category:{category}] = {val}"
error = f"\nAttribute {obj.name}/|w{attr} [category:{category}] does not exist."
if nested:
error += " (Nested lookups attempted)"
return error
def rm_attr(self, obj, attr, category):
"""
Remove an attribute from the object, or a nested data structure, and report back.
"""
nested = False
for key, nested_keys in self.split_nested_attr(attr):
nested = True
if obj.attributes.has(key, category):
if nested_keys:
del_key = nested_keys[-1]
val = obj.attributes.get(key, category=category)
deep = self.do_nested_lookup(val, *nested_keys[:-1])
if deep is not self.not_found:
try:
del deep[del_key]
except (IndexError, KeyError, TypeError):
continue
return f"\nDeleted attribute {obj.name}/|w{attr}|n [category:{category}]."
else:
exists = obj.attributes.has(key, category)
if exists:
obj.attributes.remove(attr, category=category)
return f"\nDeleted attribute {obj.name}/|w{attr}|n [category:{category}]."
else:
return (f"\nNo attribute {obj.name}/|w{attr}|n [category: {category}] "
"was found to delete.")
error = f"\nNo attribute {obj.name}/|w{attr}|n [category: {category}] was found to delete."
if nested:
error += " (Nested lookups attempted)"
return error
def set_attr(self, obj, attr, value, category):
done = False
for key, nested_keys in self.split_nested_attr(attr):
if obj.attributes.has(key, category) and nested_keys:
acc_key = nested_keys[-1]
lookup_value = obj.attributes.get(key, category)
deep = self.do_nested_lookup(lookup_value, *nested_keys[:-1])
if deep is not self.not_found:
# To support appending and inserting to lists
# a key that starts with LIST_APPEND_CHAR will insert a new item at that
# location, and move the other elements down.
# Using LIST_APPEND_CHAR alone will append to the list
if isinstance(acc_key, str) and acc_key[0] == LIST_APPEND_CHAR:
try:
if len(acc_key) > 1:
where = int(acc_key[1:])
deep.insert(where, value)
else:
deep.append(value)
except (ValueError, AttributeError):
pass
else:
value = lookup_value
attr = key
done = True
break
# List magic failed, just use like a key/index
try:
deep[acc_key] = value
except TypeError as err:
# Tuples can't be modified
return f"\n{err} - {deep}"
value = lookup_value
attr = key
done = True
break
verb = "Modified" if obj.attributes.has(attr) else "Created"
try:
if not done:
obj.attributes.add(attr, value, category)
return f"\n{verb} attribute {obj.name}/|w{attr}|n [category:{category}] = {value}"
except SyntaxError:
# this means literal_eval tried to parse a faulty string
return (
"\n|RCritical Python syntax error in your value. Only "
"primitive Python structures are allowed.\nYou also "
"need to use correct Python syntax. Remember especially "
"to put quotes around all strings inside lists and "
"dicts.|n"
)
@interactive
def edit_handler(self, obj, attr, caller):
"""Activate the line editor"""
def load(caller):
"""Called for the editor to load the buffer"""
try:
old_value = obj.attributes.get(attr, raise_exception=True)
except AttributeError:
# we set empty buffer on nonexisting Attribute because otherwise
# we'd always have the string "None" in the buffer to start with
old_value = ''
return str(old_value) # we already confirmed we are ok with this
def save(caller, buf):
"""Called when editor saves its buffer."""
obj.attributes.add(attr, buf)
caller.msg("Saved Attribute %s." % attr)
# check non-strings before activating editor
try:
old_value = obj.attributes.get(attr, raise_exception=True)
if not isinstance(old_value, str):
answer = yield(
f"|rWarning: Attribute |w{attr}|r is of type |w{type(old_value).__name__}|r. "
"\nTo continue editing, it must be converted to (and saved as) a string. "
"Continue? [Y]/N?")
if answer.lower() in ('n', 'no'):
self.caller.msg("Aborted edit.")
return
except AttributeError:
pass
# start the editor
EvEditor(self.caller, load, save, key=f"{obj}/{attr}")
def search_for_obj(self, objname):
"""
Searches for an object matching objname. The object may be of different typeclasses.
Args:
objname: Name of the object we're looking for
Returns:
A typeclassed object, or None if nothing is found.
"""
from evennia.utils.utils import variable_from_module
_AT_SEARCH_RESULT = variable_from_module(*settings.SEARCH_AT_RESULT.rsplit(".", 1))
caller = self.caller
if objname.startswith("*") or "account" in self.switches:
found_obj = caller.search_account(objname.lstrip("*"))
elif "script" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_script(objname), caller)
elif "channel" in self.switches:
found_obj = _AT_SEARCH_RESULT(search.search_channel(objname), caller)
else:
global_search = True
if "char" in self.switches or "character" in self.switches:
typeclass = settings.BASE_CHARACTER_TYPECLASS
elif "room" in self.switches:
typeclass = settings.BASE_ROOM_TYPECLASS
elif "exit" in self.switches:
typeclass = settings.BASE_EXIT_TYPECLASS
else:
global_search = False
typeclass = None
found_obj = caller.search(objname, global_search=global_search, typeclass=typeclass)
return found_obj
def func(self):
"""Implement the set attribute - a limited form of py."""
caller = self.caller
if not self.args:
caller.msg("Usage: set obj/attr[:category] = value. Use empty value to clear.")
return
# get values prepared by the parser
value = self.rhs
objname = self.lhs_objattr[0]["name"]
attrs = self.lhs_objattr[0]["attrs"]
category = self.lhs_objs[0].get("option") # None if unset
obj = self.search_for_obj(objname)
if not obj:
return
if not self.check_obj(obj):
return
result = []
if "edit" in self.switches:
# edit in the line editor
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
if len(attrs) > 1:
caller.msg("The Line editor can only be applied " "to one attribute at a time.")
return
if not attrs:
caller.msg("Use `set/edit <objname>/<attr>` to define the Attribute to edit.\nTo "
"edit the current room description, use `set/edit here/desc` (or "
"use the `desc` command).")
return
self.edit_handler(obj, attrs[0], caller)
return
if not value:
if self.rhs is None:
# no = means we inspect the attribute(s)
if not attrs:
attrs = [attr.key for attr in obj.attributes.get(category=None)]
for attr in attrs:
if not self.check_attr(obj, attr, category):
continue
result.append(self.view_attr(obj, attr, category))
# we view it without parsing markup.
self.caller.msg("".join(result).strip(), options={"raw": True})
return
else:
# deleting the attribute(s)
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr, category):
continue
result.append(self.rm_attr(obj, attr, category))
else:
# setting attribute(s). Make sure to convert to real Python type before saving.
# add support for $dbref() and $search() in set argument
global _ATTRFUNCPARSER
if not _ATTRFUNCPARSER:
_ATTRFUNCPARSER = funcparser.FuncParser(
{"dbref": funcparser.funcparser_callable_search,
"search": funcparser.funcparser_callable_search}
)
if not (obj.access(self.caller, "control") or obj.access(self.caller, "edit")):
caller.msg("You don't have permission to edit %s." % obj.key)
return
for attr in attrs:
if not self.check_attr(obj, attr, category):
continue
# from evennia import set_trace;set_trace()
parsed_value = _ATTRFUNCPARSER.parse(value, return_str=False, caller=caller)
if hasattr(parsed_value, "access"):
# if this is an object we must have the right to read it, if so,
# we will not convert it to a string
if not (parsed_value.access(caller, "control")
or parsed_value.access(self.caller, "edit")):
caller.msg("You don't have permission to set "
f"object with identifier '{value}'.")
continue
value = parsed_value
else:
value = _convert_from_string(self, value)
result.append(self.set_attr(obj, attr, value, category))
# send feedback
caller.msg("".join(result).strip("\n"))
class CmdTypeclass(COMMAND_DEFAULT_CLASS):
"""
set or change an object's typeclass
Usage:
typeclass[/switch] <object> [= typeclass.path]
typeclass/prototype <object> = prototype_key
typeclasses or typeclass/list/show [typeclass.path]
swap - this is a shorthand for using /force/reset flags.
update - this is a shorthand for using the /force/reload flag.
Switch:
show, examine - display the current typeclass of object (default) or, if
given a typeclass path, show the docstring of that typeclass.
update - *only* re-run at_object_creation on this object
meaning locks or other properties set later may remain.
reset - clean out *all* the attributes and properties on the
object - basically making this a new clean object. This will also
reset cmdsets!
force - change to the typeclass also if the object
already has a typeclass of the same name.
list - show available typeclasses. Only typeclasses in modules actually
imported or used from somewhere in the code will show up here
(those typeclasses are still available if you know the path)
prototype - clean and overwrite the object with the specified
prototype key - effectively making a whole new object.
Example:
type button = examples.red_button.RedButton
type/prototype button=a red button
If the typeclass_path is not given, the current object's typeclass is
assumed.
View or set an object's typeclass. If setting, the creation hooks of the
new typeclass will be run on the object. If you have clashing properties on
the old class, use /reset. By default you are protected from changing to a
typeclass of the same name as the one you already have - use /force to
override this protection.
The given typeclass must be identified by its location using python
dot-notation pointing to the correct module and class. If no typeclass is
given (or a wrong typeclass is given). Errors in the path or new typeclass
will lead to the old typeclass being kept. The location of the typeclass
module is searched from the default typeclass directory, as defined in the
server settings.
"""
key = "@typeclass"
aliases = ["@type", "@parent", "@swap", "@update", "@typeclasses"]
switch_options = ("show", "examine", "update", "reset", "force", "list", "prototype")
locks = "cmd:perm(typeclass) or perm(Builder)"
help_category = "Building"
def _generic_search(self, query, typeclass_path):
caller = self.caller
if typeclass_path:
# make sure we search the right database table
try:
new_typeclass = class_from_module(typeclass_path)
except ImportError:
# this could be a prototype and not a typeclass at all
return caller.search(query)
dbclass = new_typeclass.__dbclass__
if caller.__dbclass__ == dbclass:
# object or account match
obj = caller.search(query)
if not obj:
return
elif (self.account and self.account.__dbclass__ == dbclass):
# applying account while caller is object
caller.msg(f"Trying to search {new_typeclass} with query '{self.lhs}'.")
obj = self.account.search(query)
if not obj:
return
elif hasattr(caller, "puppet") and caller.puppet.__dbclass__ == dbclass:
# applying object while caller is account
caller.msg(f"Trying to search {new_typeclass} with query '{self.lhs}'.")
obj = caller.puppet.search(query)
if not obj:
return
else:
# other mismatch between caller and specified typeclass
caller.msg(f"Trying to search {new_typeclass} with query '{self.lhs}'.")
obj = new_typeclass.search(query)
if not obj:
if isinstance(obj, list):
caller.msg(f"Could not find {new_typeclass} with query '{self.lhs}'.")
return
else:
# no rhs, use caller's typeclass
obj = caller.search(query)
if not obj:
return
return obj
def func(self):
"""Implements command"""
caller = self.caller
if "list" in self.switches or self.cmdname in ('typeclasses', '@typeclasses'):
tclasses = get_all_typeclasses()
contribs = [key for key in sorted(tclasses) if key.startswith("evennia.contrib")] or [
"<None loaded>"
]
core = [
key for key in sorted(tclasses) if key.startswith("evennia") and key not in contribs
] or ["<None loaded>"]
game = [key for key in sorted(tclasses) if not key.startswith("evennia")] or [
"<None loaded>"
]
string = (
"|wCore typeclasses|n\n"
" {core}\n"
"|wLoaded Contrib typeclasses|n\n"
" {contrib}\n"
"|wGame-dir typeclasses|n\n"
" {game}"
).format(
core="\n ".join(core), contrib="\n ".join(contribs), game="\n ".join(game)
)
EvMore(caller, string, exit_on_lastpage=True)
return
if not self.args:
caller.msg("Usage: %s <object> [= typeclass]" % self.cmdstring)
return
if "show" in self.switches or "examine" in self.switches:
oquery = self.lhs
obj = caller.search(oquery, quiet=True)
if not obj:
# no object found to examine, see if it's a typeclass-path instead
tclasses = get_all_typeclasses()
matches = [
(key, tclass) for key, tclass in tclasses.items() if key.endswith(oquery)
]
nmatches = len(matches)
if nmatches > 1:
caller.msg(
"Multiple typeclasses found matching {}:\n {}".format(
oquery, "\n ".join(tup[0] for tup in matches)
)
)
elif not matches:
caller.msg("No object or typeclass path found to match '{}'".format(oquery))
else:
# one match found
caller.msg(
"Docstring for typeclass '{}':\n{}".format(oquery, matches[0][1].__doc__)
)
else:
# do the search again to get the error handling in case of multi-match
obj = caller.search(oquery)
if not obj:
return
caller.msg(
"{}'s current typeclass is '{}.{}'".format(
obj.name, obj.__class__.__module__, obj.__class__.__name__
)
)
return
obj = self._generic_search(self.lhs, self.rhs)
if not obj:
return
if not hasattr(obj, "__dbclass__"):
string = "%s is not a typed object." % obj.name
caller.msg(string)
return
new_typeclass = self.rhs or obj.path
prototype = None
if "prototype" in self.switches:
key = self.rhs
prototype = protlib.search_prototype(key=key)
if len(prototype) > 1:
caller.msg(
"More than one match for {}:\n{}".format(
key, "\n".join(proto.get("prototype_key", "") for proto in prototype)
)
)
return
elif prototype:
# one match
prototype = prototype[0]
else:
# no match
caller.msg("No prototype '{}' was found.".format(key))
return
new_typeclass = prototype["typeclass"]
self.switches.append("force")
if "show" in self.switches or "examine" in self.switches:
string = "%s's current typeclass is %s." % (obj.name, obj.__class__)
caller.msg(string)
return
if self.cmdstring in ("swap", "@swap"):
self.switches.append("force")
self.switches.append("reset")
elif self.cmdstring in ("update", "@update"):
self.switches.append("force")
self.switches.append("update")
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
if not hasattr(obj, "swap_typeclass"):
caller.msg("This object cannot have a type at all!")
return
is_same = obj.is_typeclass(new_typeclass, exact=True)
if is_same and "force" not in self.switches:
string = (f"{obj.name} already has the typeclass '{new_typeclass}'. "
"Use /force to override.")
else:
update = "update" in self.switches
reset = "reset" in self.switches
hooks = "at_object_creation" if update and not reset else "all"
old_typeclass_path = obj.typeclass_path
# special prompt for the user in cases where we want
# to confirm changes.
if "prototype" in self.switches:
diff, _ = spawner.prototype_diff_from_object(prototype, obj)
txt = spawner.format_diff(diff)
prompt = (
"Applying prototype '%s' over '%s' will cause the follow changes:\n%s\n"
% (prototype["key"], obj.name, txt)
)
if not reset:
prompt += "\n|yWARNING:|n Use the /reset switch to apply the prototype over a blank state."
prompt += "\nAre you sure you want to apply these changes [yes]/no?"
answer = yield (prompt)
if answer and answer in ("no", "n"):
caller.msg("Canceled: No changes were applied.")
return
# we let this raise exception if needed
obj.swap_typeclass(
new_typeclass, clean_attributes=reset, clean_cmdsets=reset, run_start_hooks=hooks
)
if "prototype" in self.switches:
modified = spawner.batch_update_objects_with_prototype(
prototype, objects=[obj], caller=self.caller)
prototype_success = modified > 0
if not prototype_success:
caller.msg("Prototype %s failed to apply." % prototype["key"])
if is_same:
string = "%s updated its existing typeclass (%s).\n" % (obj.name, obj.path)
else:
string = "%s changed typeclass from %s to %s.\n" % (
obj.name,
old_typeclass_path,
obj.typeclass_path,
)
if update:
string += "Only the at_object_creation hook was run (update mode)."
else:
string += "All object creation hooks were run."
if reset:
string += " All old attributes where deleted before the swap."
else:
string += " Attributes set before swap were not removed."
if "prototype" in self.switches and prototype_success:
string += (
" Prototype '%s' was successfully applied over the object type."
% prototype["key"]
)
caller.msg(string)
class CmdWipe(ObjManipCommand):
"""
clear all attributes from an object
Usage:
wipe <object>[/<attr>[/<attr>...]]
Example:
wipe box
wipe box/colour
Wipes all of an object's attributes, or optionally only those
matching the given attribute-wildcard search string.
"""
key = "@wipe"
locks = "cmd:perm(wipe) or perm(Builder)"
help_category = "Building"
def func(self):
"""
inp is the dict produced in ObjManipCommand.parse()
"""
caller = self.caller
if not self.args:
caller.msg("Usage: wipe <object>[/<attr>/<attr>...]")
return
# get the attributes set by our custom parser
objname = self.lhs_objattr[0]["name"]
attrs = self.lhs_objattr[0]["attrs"]
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
if not attrs:
# wipe everything
obj.attributes.clear()
string = "Wiped all attributes on %s." % obj.name
else:
for attrname in attrs:
obj.attributes.remove(attrname)
string = "Wiped attributes %s on %s."
string = string % (",".join(attrs), obj.name)
caller.msg(string)
class CmdLock(ObjManipCommand):
"""
assign a lock definition to an object
Usage:
lock <object or *account>[ = <lockstring>]
or
lock[/switch] <object or *account>/<access_type>
Switch:
del - delete given access type
view - view lock associated with given access type (default)
If no lockstring is given, shows all locks on
object.
Lockstring is of the form
access_type:[NOT] func1(args)[ AND|OR][ NOT] func2(args) ...]
Where func1, func2 ... valid lockfuncs with or without arguments.
Separator expressions need not be capitalized.
For example:
'get: id(25) or perm(Admin)'
The 'get' lock access_type is checked e.g. by the 'get' command.
An object locked with this example lock will only be possible to pick up
by Admins or by an object with id=25.
You can add several access_types after one another by separating
them by ';', i.e:
'get:id(25); delete:perm(Builder)'
"""
key = "@lock"
aliases = ["@locks"]
locks = "cmd: perm(locks) or perm(Builder)"
help_category = "Building"
def func(self):
"""Sets up the command"""
caller = self.caller
if not self.args:
string = (
"Usage: lock <object>[ = <lockstring>] or lock[/switch] " "<object>/<access_type>"
)
caller.msg(string)
return
if "/" in self.lhs:
# call of the form lock obj/access_type
objname, access_type = [p.strip() for p in self.lhs.split("/", 1)]
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip("*"))
if not obj:
obj = caller.search(objname)
if not obj:
return
has_control_access = obj.access(caller, "control")
if access_type == "control" and not has_control_access:
# only allow to change 'control' access if you have 'control' access already
caller.msg("You need 'control' access to change this type of lock.")
return
if not (has_control_access or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
lockdef = obj.locks.get(access_type)
if lockdef:
if "del" in self.switches:
obj.locks.delete(access_type)
string = "deleted lock %s" % lockdef
else:
string = lockdef
else:
string = "%s has no lock of access type '%s'." % (obj, access_type)
caller.msg(string)
return
if self.rhs:
# we have a = separator, so we are assigning a new lock
if self.switches:
swi = ", ".join(self.switches)
caller.msg(
"Switch(es) |w%s|n can not be used with a "
"lock assignment. Use e.g. "
"|wlock/del objname/locktype|n instead." % swi
)
return
objname, lockdef = self.lhs, self.rhs
obj = None
if objname.startswith("*"):
obj = caller.search_account(objname.lstrip("*"))
if not obj:
obj = caller.search(objname)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
ok = False
lockdef = re.sub(r"\'|\"", "", lockdef)
try:
ok = obj.locks.add(lockdef)
except LockException as e:
caller.msg(str(e))
if "cmd" in lockdef.lower() and inherits_from(
obj, "evennia.objects.objects.DefaultExit"
):
# special fix to update Exits since "cmd"-type locks won't
# update on them unless their cmdsets are rebuilt.
obj.at_init()
if ok:
caller.msg("Added lock '%s' to %s." % (lockdef, obj))
return
# if we get here, we are just viewing all locks on obj
obj = None
if self.lhs.startswith("*"):
obj = caller.search_account(self.lhs.lstrip("*"))
if not obj:
obj = caller.search(self.lhs)
if not obj:
return
if not (obj.access(caller, "control") or obj.access(caller, "edit")):
caller.msg("You are not allowed to do that.")
return
caller.msg("\n".join(obj.locks.all()))
class CmdExamine(ObjManipCommand):
"""
get detailed information about an object
Usage:
examine [<object>[/attrname]]
examine [*<account>[/attrname]]
Switch:
account - examine an Account (same as adding *)
object - examine an Object (useful when OOC)
script - examine a Script
channel - examine a Channel
The examine command shows detailed game info about an
object and optionally a specific attribute on it.
If object is not specified, the current location is examined.
Append a * before the search string to examine an account.
"""
key = "@examine"
aliases = ["@ex", "@exam"]
locks = "cmd:perm(examine) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
switch_options = ["account", "object", "script", "channel"]
object_type = "object"
detail_color = "|c"
header_color = "|w"
quell_color = "|r"
separator = "-"
def msg(self, text):
"""
Central point for sending messages to the caller. This tags
the message as 'examine' for eventual custom markup in the client.
Attributes:
text (str): The text to send.
"""
self.caller.msg(text=(text, {"type": "examine"}))
def format_key(self, obj):
return f"{obj.name} ({obj.dbref})"
def format_aliases(self, obj):
if hasattr(obj, "aliases") and obj.aliases.all():
return ", ".join(utils.make_iter(str(obj.aliases)))
def format_typeclass(self, obj):
if hasattr(obj, "typeclass_path"):
return f"{obj.typename} ({obj.typeclass_path})"
def format_sessions(self, obj):
if hasattr(obj, "sessions"):
sessions = obj.sessions.all()
if sessions:
return ", ".join(f"#{sess.sessid}" for sess in obj.sessions.all())
def format_email(self, obj):
if hasattr(obj, "email") and obj.email:
return f"{self.detail_color}{obj.email}|n"
def format_account_key(self, account):
return f"{self.detail_color}{account.name}|n ({account.dbref})"
def format_account_typeclass(self, account):
return f"{account.typename} ({account.typeclass_path})"
def format_account_permissions(self, account):
perms = account.permissions.all()
if account.is_superuser:
perms = ["<Superuser>"]
elif not perms:
perms = ["<None>"]
perms = ", ".join(perms)
if account.attributes.has("_quell"):
perms += f" {self.quell_color}(quelled)|n"
return perms
def format_location(self, obj):
if hasattr(obj, "location") and obj.location:
return f"{obj.location.key} (#{obj.location.id})"
def format_home(self, obj):
if hasattr(obj, "home") and obj.home:
return f"{obj.home.key} (#{obj.home.id})"
def format_destination(self, obj):
if hasattr(obj, "destination") and obj.destination:
return f"{obj.destination.key} (#{obj.destination.id})"
def format_permissions(self, obj):
perms = obj.permissions.all()
if perms:
perms_string = ", ".join(perms)
if obj.is_superuser:
perms_string += " <Superuser>"
return perms_string
def format_locks(self, obj):
locks = str(obj.locks)
if locks:
return utils.fill(
"; ".join([lock for lock in locks.split(";")]), indent=2
)
return "Default"
def format_scripts(self, obj):
if hasattr(obj, "scripts") and hasattr(obj.scripts, "all") and obj.scripts.all():
return f"{obj.scripts}"
def format_single_tag(self, tag):
if tag.db_category:
return f"{tag.db_key}[{tag.db_category}]"
else:
return f"{tag.db_key}"
def format_tags(self, obj):
if hasattr(obj, "tags"):
tags = sorted(obj.tags.all(return_objs=True))
if tags:
formatted_tags = [self.format_single_tag(tag) for tag in tags]
return utils.fill(", ".join(formatted_tags), indent=2)
def format_single_cmdset_options(self, cmdset):
def _truefalse(string, value):
if value is None:
return ""
if value:
return f"{string}: T"
return f"{string}: F"
return ", ".join(
_truefalse(opt, getattr(cmdset, opt))
for opt in ("no_exits", "no_objs", "no_channels", "duplicates")
if getattr(cmdset, opt) is not None
)
def format_single_cmdset(self, cmdset):
options = self.format_single_cmdset_options(cmdset)
return f"{cmdset.path} [{cmdset.key}] ({cmdset.mergetype}, prio {cmdset.priority}{options}"
def format_stored_cmdsets(self, obj):
if hasattr(obj, "cmdset"):
stored_cmdset_strings = []
stored_cmdsets = sorted(obj.cmdset.all(), key=lambda x: x.priority, reverse=True)
for cmdset in stored_cmdsets:
if cmdset.key != "_EMPTY_CMDSET":
stored_cmdset_strings.append(self.format_single_cmdset(cmdset))
return "\n " + "\n ".join(stored_cmdset_strings)
def format_merged_cmdsets(self, obj, current_cmdset):
if not hasattr(obj, "cmdset"):
return None
all_cmdsets = [(cmdset.key, cmdset) for cmdset in current_cmdset.merged_from]
# we always at least try to add account- and session sets since these are ignored
# if we merge on the object level.
if hasattr(obj, "account") and obj.account:
# get Attribute-cmdsets if they exist
all_cmdsets.extend([(cmdset.key, cmdset) for cmdset in obj.account.cmdset.all()])
if obj.sessions.count():
# if there are more sessions than one on objects it's because of multisession mode
# we only show the first session's cmdset here (it is -in principle- possible
# that different sessions have different cmdsets but for admins who want such
# madness it is better that they overload with their own CmdExamine to handle it).
all_cmdsets.extend([(cmdset.key, cmdset)
for cmdset in obj.account.sessions.all()[0].cmdset.all()])
else:
try:
# we have to protect this since many objects don't have sessions.
all_cmdsets.extend([(cmdset.key, cmdset)
for cmdset in obj.get_session(obj.sessions.get()).cmdset.all()])
except (TypeError, AttributeError):
# an error means we are merging an object without a session
pass
all_cmdsets = [cmdset for cmdset in dict(all_cmdsets).values()]
all_cmdsets.sort(key=lambda x: x.priority, reverse=True)
merged_cmdset_strings = []
for cmdset in all_cmdsets:
if cmdset.key != "_EMPTY_CMDSET":
merged_cmdset_strings.append(self.format_single_cmdset(cmdset))
return "\n " + "\n ".join(merged_cmdset_strings)
def format_current_cmds(self, obj, current_cmdset):
current_commands = sorted([cmd.key for cmd in current_cmdset if cmd.access(obj, "cmd")])
return "\n" + utils.fill(", ".join(current_commands), indent=2)
def _get_attribute_value_type(self, attrvalue):
typ = ""
if not isinstance(attrvalue, str):
try:
name = attrvalue.__class__.__name__
except AttributeError:
try:
name = attrvalue.__name__
except AttributeError:
name = attrvalue
if str(name).startswith("_Saver"):
try:
typ = str(type(deserialize(attrvalue)))
except Exception:
typ = str(type(deserialize(attrvalue)))
else:
typ = str(type(attrvalue))
return typ
def format_single_attribute_detail(self, obj, attr):
global _FUNCPARSER
if not _FUNCPARSER:
_FUNCPARSER = funcparser.FuncParser(settings.FUNCPARSER_OUTGOING_MESSAGES_MODULES)
key, category, value = attr.db_key, attr.db_category, attr.value
typ = self._get_attribute_value_type(value)
typ = f" |B[type: {typ}]|n" if typ else ""
value = utils.to_str(value)
value = _FUNCPARSER.parse(ansi_raw(value), escape=True)
return (f"Attribute {obj.name}/{self.header_color}{key}|n "
f"[category={category}]{typ}:\n\n{value}")
def format_single_attribute(self, attr):
global _FUNCPARSER
if not _FUNCPARSER:
_FUNCPARSER = funcparser.FuncParser(settings.FUNCPARSER_OUTGOING_MESSAGES_MODULES)
key, category, value = attr.db_key, attr.db_category, attr.value
typ = self._get_attribute_value_type(value)
typ = f" |B[type: {typ}]|n" if typ else ""
value = utils.to_str(value)
value = _FUNCPARSER.parse(ansi_raw(value), escape=True)
value = utils.crop(value)
if category:
return f"{self.header_color}{key}|n[{category}]={value}{typ}"
else:
return f"{self.header_color}{key}|n={value}{typ}"
def format_attributes(self, obj):
output = "\n " + "\n ".join(
sorted(self.format_single_attribute(attr)
for attr in obj.db_attributes.all())
)
if output.strip():
# we don't want just an empty line
return output
def format_nattributes(self, obj):
try:
ndb_attr = obj.nattributes.all(return_tuples=True)
except Exception:
return
if ndb_attr and ndb_attr[0]:
return "\n " + " \n".join(
sorted(self.format_single_attribute(attr)
for attr, value in ndb_attr)
)
def format_exits(self, obj):
if hasattr(obj, "exits"):
exits = ", ".join(f"{exit.name}({exit.dbref})" for exit in obj.exits)
return exits if exits else None
def format_chars(self, obj):
if hasattr(obj, "contents"):
chars = ", ".join(f"{obj.name}({obj.dbref})" for obj in obj.contents
if obj.account)
return chars if chars else None
def format_things(self, obj):
if hasattr(obj, "contents"):
things = ", ".join(f"{obj.name}({obj.dbref})" for obj in obj.contents
if not obj.account and not obj.destination)
return things if things else None
def format_script_desc(self, obj):
if hasattr(obj, "db_desc") and obj.db_desc:
return crop(obj.db_desc, 20)
def format_script_is_persistent(self, obj):
if hasattr(obj, "db_persistent"):
return "T" if obj.db_persistent else "F"
def format_script_timer_data(self, obj):
if hasattr(obj, "db_interval") and obj.db_interval > 0:
start_delay = "T" if obj.db_start_delay else "F"
next_repeat = obj.time_until_next_repeat()
active = "|grunning|n" if obj.db_is_active and next_repeat else "|rinactive|n"
interval = obj.db_interval
next_repeat = "N/A" if next_repeat is None else f"{next_repeat}s"
repeats = ""
if obj.db_repeats:
remaining_repeats = obj.remaining_repeats()
remaining_repeats = 0 if remaining_repeats is None else remaining_repeats
repeats = f" - {remaining_repeats}/{obj.db_repeats} remain"
return (f"{active} - interval: {interval}s "
f"(next: {next_repeat}{repeats}, start_delay: {start_delay})")
def format_channel_sub_totals(self, obj):
if hasattr(obj, "db_account_subscriptions"):
account_subs = obj.db_account_subscriptions.all()
object_subs = obj.db_object_subscriptions.all()
online = len(obj.subscriptions.online())
ntotal = account_subs.count() + object_subs.count()
return f"{ntotal} ({online} online)"
def format_channel_account_subs(self, obj):
if hasattr(obj, "db_account_subscriptions"):
account_subs = obj.db_account_subscriptions.all()
if account_subs:
return "\n " + "\n ".join(
format_grid([sub.key for sub in account_subs], sep=' ', width=_DEFAULT_WIDTH))
def format_channel_object_subs(self, obj):
if hasattr(obj, "db_object_subscriptions"):
object_subs = obj.db_object_subscriptions.all()
if object_subs:
return "\n " + "\n ".join(
format_grid([sub.key for sub in object_subs], sep=' ', width=_DEFAULT_WIDTH))
def get_formatted_obj_data(self, obj, current_cmdset):
"""
Calls all other `format_*` methods.
"""
objdata = {}
objdata["Name/key"] = self.format_key(obj)
objdata["Aliases"] = self.format_aliases(obj)
objdata["Typeclass"] = self.format_typeclass(obj)
objdata["Sessions"] = self.format_sessions(obj)
objdata["Email"] = self.format_email(obj)
if hasattr(obj, "has_account") and obj.has_account:
objdata["Account"] = self.format_account_key(obj.account)
objdata[" Account Typeclass"] = self.format_account_typeclass(obj.account)
objdata[" Account Permissions"] = self.format_account_permissions(obj.account)
objdata["Location"] = self.format_location(obj)
objdata["Home"] = self.format_home(obj)
objdata["Destination"] = self.format_destination(obj)
objdata["Permissions"] = self.format_permissions(obj)
objdata["Locks"] = self.format_locks(obj)
if (current_cmdset
and not (len(obj.cmdset.all()) == 1
and obj.cmdset.current.key == "_EMPTY_CMDSET")):
objdata["Stored Cmdset(s)"] = self.format_stored_cmdsets(obj)
objdata["Merged Cmdset(s)"] = self.format_merged_cmdsets(obj, current_cmdset)
objdata[f"Commands vailable to {obj.key} (result of Merged Cmdset(s))"] = (
self.format_current_cmds(obj, current_cmdset))
if self.object_type == "script":
objdata["Description"] = self.format_script_desc(obj)
objdata["Persistent"] = self.format_script_is_persistent(obj)
objdata["Script Repeat"] = self.format_script_timer_data(obj)
objdata["Scripts"] = self.format_scripts(obj)
objdata["Tags"] = self.format_tags(obj)
objdata["Persistent Attributes"] = self.format_attributes(obj)
objdata["Non-Persistent Attributes"] = self.format_nattributes(obj)
objdata["Exits"] = self.format_exits(obj)
objdata["Characters"] = self.format_chars(obj)
objdata["Content"] = self.format_things(obj)
if self.object_type == "channel":
objdata["Subscription Totals"] = self.format_channel_sub_totals(obj)
objdata["Account Subscriptions"] = self.format_channel_account_subs(obj)
objdata["Object Subscriptions"] = self.format_channel_object_subs(obj)
return objdata
def format_output(self, obj, current_cmdset):
"""
Formats the full examine page return.
"""
objdata = self.get_formatted_obj_data(obj, current_cmdset)
# format output
main_str = []
max_width = -1
for header, block in objdata.items():
if block is not None:
blockstr = f"{self.header_color}{header}|n: {block}"
max_width = max(max_width, max(display_len(line) for line in blockstr.split("\n")))
main_str.append(blockstr)
main_str = "\n".join(main_str)
max_width = max(0, min(self.client_width(), max_width))
sep = self.separator * max_width
return f"{sep}\n{main_str}\n{sep}"
def _search_by_object_type(self, obj_name, objtype):
"""
Route to different search functions depending on the object type being
examined. This also handles error reporting for multimatches/no matches.
Args:
obj_name (str): The search query.
objtype (str): One of 'object', 'account', 'script' or 'channel'.
Returns:
any: `None` if no match or multimatch, otherwise a single result.
"""
obj = None
if objtype == "object":
obj = self.caller.search(obj_name)
elif objtype == "account":
try:
obj = self.caller.search_account(obj_name.lstrip("*"))
except AttributeError:
# this means we are calling examine from an account object
obj = self.caller.search(
obj_name.lstrip("*"), search_object="object" in self.switches
)
else:
obj = getattr(search, f"search_{objtype}")(obj_name)
if not obj:
self.caller.msg(f"No {objtype} found with key {obj_name}.")
obj = None
elif len(obj) > 1:
err = "Multiple {objtype} found with key {obj_name}:\n{matches}"
self.caller.msg(err.format(
obj_name=obj_name,
matches=", ".join(f"{ob.key}(#{ob.id})" for ob in obj)
))
obj = None
else:
obj = obj[0]
return obj
def parse(self):
super().parse()
self.examine_objs = []
if not self.args:
# If no arguments are provided, examine the invoker's location.
if hasattr(self.caller, "location"):
self.examine_objs.append((self.caller.location, None))
else:
self.msg("You need to supply a target to examine.")
raise InterruptCommand
else:
for objdef in self.lhs_objattr:
# note that we check the objtype for every repeat; this will always
# be the same result, but it makes for a cleaner code and multi-examine
# is not so common anyway.
obj = None
obj_name = objdef["name"] # name
obj_attrs = objdef["attrs"] # /attrs
# identify object type, in prio account - script - channel
object_type = "object"
if (utils.inherits_from(self.caller, "evennia.accounts.accounts.DefaultAccount")
or "account" in self.switches or obj_name.startswith("*")):
object_type = "account"
elif "script" in self.switches:
object_type = "script"
elif "channel" in self.switches:
object_type = "channel"
self.object_type = object_type
obj = self._search_by_object_type(obj_name, object_type)
if obj:
self.examine_objs.append((obj, obj_attrs))
def func(self):
"""Process command"""
for obj, obj_attrs in self.examine_objs:
# these are parsed out in .parse already
if not obj.access(self.caller, "examine"):
# If we don't have special info access, just look
# at the object instead.
self.msg(self.caller.at_look(obj))
continue
if obj_attrs:
# we are only interested in specific attributes
attrs = [attr for attr in obj.db_attributes.all() if attr.db_key in obj_attrs]
if not attrs:
self.msg("No attributes found on {obj.name}.")
else:
out_strings = []
for attr in attrs:
out_strings.append(self.format_single_attribute_detail(obj, attr))
out_str = "\n".join(out_strings)
max_width = max(display_len(line) for line in out_strings)
max_width = max(0, min(max_width, self.client_width()))
sep = self.separator * max_width
self.msg(f"{sep}\n{out_str}")
return
# examine the obj itself
if self.object_type in ("object", "account"):
# for objects and accounts we need to set up an asynchronous
# fetch of the cmdset and not proceed with the examine display
# until the fetch is complete
session = None
if obj.sessions.count():
mergemode = "session"
session = obj.sessions.get()[0]
elif self.object_type == "account":
mergemode = "account"
else:
mergemode = "object"
account = None
objct = None
if self.object_type == "account":
account = obj
else:
account = obj.account
objct = obj
# this is usually handled when a command runs, but when we examine
# we may have leftover inherited cmdsets directly after a move etc.
obj.cmdset.update()
# using callback to print results whenever function returns.
def _get_cmdset_callback(current_cmdset):
self.msg(self.format_output(obj, current_cmdset).strip())
get_and_merge_cmdsets(
obj, session, account, objct, mergemode, self.raw_string
).addCallback(_get_cmdset_callback)
else:
# for objects without cmdsets we can proceed to examine immediately
self.msg(self.format_output(obj, None).strip())
class CmdFind(COMMAND_DEFAULT_CLASS):
"""
search the database for objects
Usage:
find[/switches] <name or dbref or *account> [= dbrefmin[-dbrefmax]]
locate - this is a shorthand for using the /loc switch.
Switches:
room - only look for rooms (location=None)
exit - only look for exits (destination!=None)
char - only look for characters (BASE_CHARACTER_TYPECLASS)
exact - only exact matches are returned.
loc - display object location if exists and match has one result
startswith - search for names starting with the string, rather than containing
Searches the database for an object of a particular name or exact #dbref.
Use *accountname to search for an account. The switches allows for
limiting object matches to certain game entities. Dbrefmin and dbrefmax
limits matches to within the given dbrefs range, or above/below if only
one is given.
"""
key = "@find"
aliases = ["@search", "@locate"]
switch_options = ("room", "exit", "char", "exact", "loc", "startswith")
locks = "cmd:perm(find) or perm(Builder)"
help_category = "Building"
def func(self):
"""Search functionality"""
caller = self.caller
switches = self.switches
if not self.args or (not self.lhs and not self.rhs):
caller.msg("Usage: find <string> [= low [-high]]")
return
if "locate" in self.cmdstring: # Use option /loc as a default for locate command alias
switches.append("loc")
searchstring = self.lhs
try:
# Try grabbing the actual min/max id values by database aggregation
qs = ObjectDB.objects.values("id").aggregate(low=Min("id"), high=Max("id"))
low, high = sorted(qs.values())
if not (low and high):
raise ValueError(
f"{self.__class__.__name__}: Min and max ID not returned by aggregation; falling back to queryset slicing."
)
except Exception as e:
logger.log_trace(e)
# If that doesn't work for some reason (empty DB?), guess the lower
# bound and do a less-efficient query to find the upper.
low, high = 1, ObjectDB.objects.all().order_by("-id").first().id
if self.rhs:
try:
# Check that rhs is either a valid dbref or dbref range
bounds = tuple(
sorted(dbref(x, False) for x in re.split("[-\s]+", self.rhs.strip()))
)
# dbref() will return either a valid int or None
assert bounds
# None should not exist in the bounds list
assert None not in bounds
low = bounds[0]
if len(bounds) > 1:
high = bounds[-1]
except AssertionError:
caller.msg("Invalid dbref range provided (not a number).")
return
except IndexError as e:
logger.log_err(
f"{self.__class__.__name__}: Error parsing upper and lower bounds of query."
)
logger.log_trace(e)
low = min(low, high)
high = max(low, high)
is_dbref = utils.dbref(searchstring)
is_account = searchstring.startswith("*")
restrictions = ""
if self.switches:
restrictions = ", %s" % (", ".join(self.switches))
if is_dbref or is_account:
if is_dbref:
# a dbref search
result = caller.search(searchstring, global_search=True, quiet=True)
string = "|wExact dbref match|n(#%i-#%i%s):" % (low, high, restrictions)
else:
# an account search
searchstring = searchstring.lstrip("*")
result = caller.search_account(searchstring, quiet=True)
string = "|wMatch|n(#%i-#%i%s):" % (low, high, restrictions)
if "room" in switches:
result = result if inherits_from(result, ROOM_TYPECLASS) else None
if "exit" in switches:
result = result if inherits_from(result, EXIT_TYPECLASS) else None
if "char" in switches:
result = result if inherits_from(result, CHAR_TYPECLASS) else None
if not result:
string += "\n |RNo match found.|n"
elif not low <= int(result[0].id) <= high:
string += "\n |RNo match found for '%s' in #dbref interval.|n" % searchstring
else:
result = result[0]
string += "\n|g %s - %s|n" % (result.get_display_name(caller), result.path)
if "loc" in self.switches and not is_account and result.location:
string += " (|wlocation|n: |g{}|n)".format(
result.location.get_display_name(caller)
)
else:
# Not an account/dbref search but a wider search; build a queryset.
# Searches for key and aliases
if "exact" in switches:
keyquery = Q(db_key__iexact=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__iexact=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
elif "startswith" in switches:
keyquery = Q(db_key__istartswith=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__istartswith=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
else:
keyquery = Q(db_key__icontains=searchstring, id__gte=low, id__lte=high)
aliasquery = Q(
db_tags__db_key__icontains=searchstring,
db_tags__db_tagtype__iexact="alias",
id__gte=low,
id__lte=high,
)
# Keep the initial queryset handy for later reuse
result_qs = ObjectDB.objects.filter(keyquery | aliasquery).distinct()
nresults = result_qs.count()
# Use iterator to minimize memory ballooning on large result sets
results = result_qs.iterator()
# Check and see if type filtering was requested; skip it if not
if any(x in switches for x in ("room", "exit", "char")):
obj_ids = set()
for obj in results:
if (
("room" in switches and inherits_from(obj, ROOM_TYPECLASS))
or ("exit" in switches and inherits_from(obj, EXIT_TYPECLASS))
or ("char" in switches and inherits_from(obj, CHAR_TYPECLASS))
):
obj_ids.add(obj.id)
# Filter previous queryset instead of requesting another
filtered_qs = result_qs.filter(id__in=obj_ids).distinct()
nresults = filtered_qs.count()
# Use iterator again to minimize memory ballooning
results = filtered_qs.iterator()
# still results after type filtering?
if nresults:
if nresults > 1:
header = f"{nresults} Matches"
else:
header = "One Match"
string = f"|w{header}|n(#{low}-#{high}{restrictions}):"
res = None
for res in results:
string += f"\n |g{res.get_display_name(caller)} - {res.path}|n"
if (
"loc" in self.switches
and nresults == 1
and res
and getattr(res, "location", None)
):
string += f" (|wlocation|n: |g{res.location.get_display_name(caller)}|n)"
else:
string = f"|wNo Matches|n(#{low}-#{high}{restrictions}):"
string += f"\n |RNo matches found for '{searchstring}'|n"
# send result
caller.msg(string.strip())
class ScriptEvMore(EvMore):
"""
Listing 1000+ Scripts can be very slow and memory-consuming. So
we use this custom EvMore child to build en EvTable only for
each page of the list.
"""
def init_pages(self, scripts):
"""Prepare the script list pagination"""
script_pages = Paginator(scripts, max(1, int(self.height / 2)))
super().init_pages(script_pages)
def page_formatter(self, scripts):
"""Takes a page of scripts and formats the output
into an EvTable."""
if not scripts:
return "<No scripts>"
table = EvTable(
"|wdbref|n",
"|wobj|n",
"|wkey|n",
"|wintval|n",
"|wnext|n",
"|wrept|n",
"|wtypeclass|n",
"|wdesc|n",
align="r",
border="tablecols",
width=self.width,
)
for script in scripts:
nextrep = script.time_until_next_repeat()
if nextrep is None:
nextrep = script.db._paused_time
nextrep = f"PAUSED {int(nextrep)}s" if nextrep else "--"
else:
nextrep = f"{nextrep}s"
maxrepeat = script.repeats
remaining = script.remaining_repeats() or 0
if maxrepeat:
rept = "%i/%i" % (maxrepeat - remaining, maxrepeat)
else:
rept = "-/-"
table.add_row(
f"#{script.id}",
f"{script.obj.key}({script.obj.dbref})"
if (hasattr(script, "obj") and script.obj)
else "<Global>",
script.key,
script.interval if script.interval > 0 else "--",
nextrep,
rept,
script.typeclass_path.rsplit(".", 1)[-1],
crop(script.desc, width=20),
)
return str(table)
class CmdScripts(COMMAND_DEFAULT_CLASS):
"""
List and manage all running scripts. Allows for creating new global
scripts.
Usage:
script[/switches] [script-#dbref, key, script.path or <obj>]
script[/start||stop] <obj> = <script.path or script-key>
Switches:
start - start/unpause an existing script's timer.
stop - stops an existing script's timer
pause - pause a script's timer
delete - deletes script. This will also stop the timer as needed
Examples:
script - list scripts
script myobj - list all scripts on object
script foo.bar.Script - create a new global Script
script scriptname - examine named existing global script
script myobj = foo.bar.Script - create and assign script to object
script/stop myobj = scriptname - stop script on object
script/pause foo.Bar.Script - pause global script
script/delete myobj - delete ALL scripts on object
script/delete #dbref[-#dbref] - delete script or range by dbref
When given with an `<obj>` as left-hand-side, this creates and
assigns a new script to that object. Without an `<obj>`, this
manages and inspects global scripts
If no switches are given, this command just views all active
scripts. The argument can be either an object, at which point it
will be searched for all scripts defined on it, or a script name
or #dbref. For using the /stop switch, a unique script #dbref is
required since whole classes of scripts often have the same name.
Use the `script` build-level command for managing scripts attached to
objects.
"""
key = "@scripts"
aliases = ["@script"]
switch_options = ("create", "start", "stop", "pause", "delete")
locks = "cmd:perm(scripts) or perm(Builder)"
help_category = "System"
excluded_typeclass_paths = ["evennia.prototypes.prototypes.DbPrototype"]
switch_mapping = {
"create": "|gCreated|n",
"start": "|gStarted|n",
"stop": "|RStopped|n",
"pause": "|Paused|n",
"delete": "|rDeleted|n"
}
def _search_script(self, args):
# test first if this is a script match
scripts = ScriptDB.objects.get_all_scripts(key=args)
if scripts:
return scripts
# try typeclass path
scripts = ScriptDB.objects.filter(db_typeclass_path__iendswith=args)
if scripts:
return scripts
if "-" in args:
# may be a dbref-range
val1, val2 = (dbref(part.strip()) for part in args.split('-', 1))
if val1 and val2:
scripts = ScriptDB.objects.filter(id__in=(range(val1, val2 + 1)))
if scripts:
return scripts
def func(self):
"""implement method"""
caller = self.caller
if not self.args:
# show all scripts
scripts = ScriptDB.objects.all()
if not scripts:
caller.msg("No scripts found.")
return
ScriptEvMore(caller, scripts.order_by("id"), session=self.session)
return
# find script or object to operate on
scripts, obj = None, None
if self.rhs:
obj_query = self.lhs
script_query = self.rhs
else:
obj_query = script_query = self.args
scripts = self._search_script(script_query)
objects = ObjectDB.objects.object_search(obj_query)
obj = objects[0] if objects else None
if not self.switches:
# creation / view mode
if obj:
# we have an object
if self.rhs:
# creation mode
if obj.scripts.add(self.rhs, autostart=True):
caller.msg(
f"Script |w{self.rhs}|n successfully added and "
f"started on {obj.get_display_name(caller)}.")
else:
caller.msg(f"Script {self.rhs} could not be added and/or started "
f"on {obj.get_display_name(caller)} (or it started and "
"immediately shut down).")
else:
# just show all scripts on object
scripts = ScriptDB.objects.filter(db_obj=obj)
if scripts:
ScriptEvMore(caller, scripts.order_by("id"), session=self.session)
else:
caller.msg(f"No scripts defined on {obj}")
elif scripts:
# show found script(s)
ScriptEvMore(caller, scripts.order_by("id"), session=self.session)
else:
# create global script
try:
new_script = create.create_script(self.args)
except ImportError:
logger.log_trace()
new_script = None
if new_script:
caller.msg(f"Global Script Created - "
f"{new_script.key} ({new_script.typeclass_path})")
ScriptEvMore(caller, [new_script], session=self.session)
else:
caller.msg(f"Global Script |rNOT|n Created |r(see log)|n - "
f"arguments: {self.args}")
elif scripts or obj:
# modification switches - must operate on existing scripts
if not scripts:
scripts = ScriptDB.objects.filter(db_obj=obj)
if scripts.count() > 1:
ret = yield(f"Multiple scripts found: {scripts}. Are you sure you want to "
"operate on all of them? [Y]/N? ")
if ret.lower() in ('n', 'no'):
caller.msg("Aborted.")
return
for script in scripts:
script_key = script.key
script_typeclass_path = script.typeclass_path
scripttype = f"Script on {obj}" if obj else "Global Script"
for switch in self.switches:
verb = self.switch_mapping[switch]
msgs = []
try:
getattr(script, switch)()
except Exception:
logger.log_trace()
msgs.append(f"{scripttype} |rNOT|n {verb} |r(see log)|n - "
f"{script_key} ({script_typeclass_path})|n")
else:
msgs.append(f"{scripttype} {verb} - "
f"{script_key} ({script_typeclass_path})")
caller.msg("\n".join(msgs))
if "delete" not in self.switches:
ScriptEvMore(caller, [script], session=self.session)
else:
caller.msg("No scripts found.")
class CmdObjects(COMMAND_DEFAULT_CLASS):
"""
statistics on objects in the database
Usage:
objects [<nr>]
Gives statictics on objects in database as well as
a list of <nr> latest objects in database. If not
given, <nr> defaults to 10.
"""
key = "@objects"
locks = "cmd:perm(listobjects) or perm(Builder)"
help_category = "System"
def func(self):
"""Implement the command"""
caller = self.caller
nlim = int(self.args) if self.args and self.args.isdigit() else 10
nobjs = ObjectDB.objects.count()
Character = class_from_module(settings.BASE_CHARACTER_TYPECLASS)
nchars = Character.objects.all_family().count()
Room = class_from_module(settings.BASE_ROOM_TYPECLASS)
nrooms = Room.objects.all_family().count()
Exit = class_from_module(settings.BASE_EXIT_TYPECLASS)
nexits = Exit.objects.all_family().count()
nother = nobjs - nchars - nrooms - nexits
nobjs = nobjs or 1 # fix zero-div error with empty database
# total object sum table
totaltable = self.styled_table(
"|wtype|n", "|wcomment|n", "|wcount|n", "|w%|n", border="table", align="l"
)
totaltable.align = "l"
totaltable.add_row(
"Characters",
"(BASE_CHARACTER_TYPECLASS + children)",
nchars,
"%.2f" % ((float(nchars) / nobjs) * 100),
)
totaltable.add_row(
"Rooms",
"(BASE_ROOM_TYPECLASS + children)",
nrooms,
"%.2f" % ((float(nrooms) / nobjs) * 100),
)
totaltable.add_row(
"Exits",
"(BASE_EXIT_TYPECLASS + children)",
nexits,
"%.2f" % ((float(nexits) / nobjs) * 100),
)
totaltable.add_row("Other", "", nother, "%.2f" % ((float(nother) / nobjs) * 100))
# typeclass table
typetable = self.styled_table(
"|wtypeclass|n", "|wcount|n", "|w%|n", border="table", align="l"
)
typetable.align = "l"
dbtotals = ObjectDB.objects.get_typeclass_totals()
for stat in dbtotals:
typetable.add_row(
stat.get("typeclass", "<error>"),
stat.get("count", -1),
"%.2f" % stat.get("percent", -1),
)
# last N table
objs = ObjectDB.objects.all().order_by("db_date_created")[max(0, nobjs - nlim): ]
latesttable = self.styled_table(
"|wcreated|n", "|wdbref|n", "|wname|n", "|wtypeclass|n", align="l", border="table"
)
latesttable.align = "l"
for obj in objs:
latesttable.add_row(
utils.datetime_format(obj.date_created), obj.dbref, obj.key, obj.path
)
string = "\n|wObject subtype totals (out of %i Objects):|n\n%s" % (nobjs, totaltable)
string += "\n|wObject typeclass distribution:|n\n%s" % typetable
string += "\n|wLast %s Objects created:|n\n%s" % (min(nobjs, nlim), latesttable)
caller.msg(string)
class CmdTeleport(COMMAND_DEFAULT_CLASS):
"""
teleport object to another location
Usage:
tel/switch [<object> to||=] <target location>
Examples:
tel Limbo
tel/quiet box = Limbo
tel/tonone box
Switches:
quiet - don't echo leave/arrive messages to the source/target
locations for the move.
intoexit - if target is an exit, teleport INTO
the exit object instead of to its destination
tonone - if set, teleport the object to a None-location. If this
switch is set, <target location> is ignored.
Note that the only way to retrieve
an object from a None location is by direct #dbref
reference. A puppeted object cannot be moved to None.
loc - teleport object to the target's location instead of its contents
Teleports an object somewhere. If no object is given, you yourself are
teleported to the target location.
To lock an object from being teleported, set its `teleport` lock, it will be
checked with the caller. To block
a destination from being teleported to, set the destination's `teleport_here`
lock - it will be checked with the thing being teleported. Admins and
higher permissions can always teleport.
"""
key = "@teleport"
aliases = "@tel"
switch_options = ("quiet", "intoexit", "tonone", "loc")
rhs_split = ("=", " to ") # Prefer = delimiter, but allow " to " usage.
locks = "cmd:perm(teleport) or perm(Builder)"
help_category = "Building"
def parse(self):
"""
Breaking out searching here to make this easier to override.
"""
super().parse()
self.obj_to_teleport = self.caller
self.destination = None
if self.rhs:
self.obj_to_teleport = self.caller.search(self.lhs, global_search=True)
if not self.obj_to_teleport:
self.caller.msg("Did not find object to teleport.")
raise InterruptCommand
self.destination = self.caller.search(self.rhs, global_search=True)
elif self.lhs:
self.destination = self.caller.search(self.lhs, global_search=True)
def func(self):
"""Performs the teleport"""
caller = self.caller
obj_to_teleport = self.obj_to_teleport
destination = self.destination
if "tonone" in self.switches:
# teleporting to None
if destination:
# in this case lhs is always the object to teleport
obj_to_teleport = destination
if obj_to_teleport.has_account:
caller.msg(
"Cannot teleport a puppeted object "
"(%s, puppeted by %s) to a None-location."
% (obj_to_teleport.key, obj_to_teleport.account)
)
return
caller.msg("Teleported %s -> None-location." % obj_to_teleport)
if obj_to_teleport.location and "quiet" not in self.switches:
obj_to_teleport.location.msg_contents(
"%s teleported %s into nothingness." % (caller, obj_to_teleport), exclude=caller
)
obj_to_teleport.location = None
return
if not self.args:
caller.msg("Usage: teleport[/switches] [<obj> =] <target or (X,Y,Z)>||home")
return
if not destination:
caller.msg("Destination not found.")
return
if "loc" in self.switches:
destination = destination.location
if not destination:
caller.msg("Destination has no location.")
return
if obj_to_teleport == destination:
caller.msg("You can't teleport an object inside of itself!")
return
if obj_to_teleport == destination.location:
caller.msg("You can't teleport an object inside something it holds!")
return
if obj_to_teleport.location and obj_to_teleport.location == destination:
caller.msg("%s is already at %s." % (obj_to_teleport, destination))
return
# check any locks
if not (caller.permissions.check("Admin") or obj_to_teleport.access(caller, "teleport")):
caller.msg(f"{obj_to_teleport} 'teleport'-lock blocks you from teleporting "
"it anywhere.")
return
if not (caller.permissions.check("Admin")
or destination.access(obj_to_teleport, "teleport_here")):
caller.msg(f"{destination} 'teleport_here'-lock blocks {obj_to_teleport} from "
"moving there.")
return
# try the teleport
if not obj_to_teleport.location:
# teleporting from none-location
obj_to_teleport.location = destination
caller.msg(f"Teleported {obj_to_teleport} None -> {destination}")
elif obj_to_teleport.move_to(
destination, quiet="quiet" in self.switches,
emit_to_obj=caller, use_destination="intoexit" not in self.switches):
if obj_to_teleport == caller:
caller.msg(f"Teleported to {destination}.")
else:
caller.msg(f"Teleported {obj_to_teleport} -> {destination}.")
else:
caller.msg("Teleportation failed.")
class CmdTag(COMMAND_DEFAULT_CLASS):
"""
handles the tags of an object
Usage:
tag[/del] <obj> [= <tag>[:<category>]]
tag/search <tag>[:<category]
Switches:
search - return all objects with a given Tag
del - remove the given tag. If no tag is specified,
clear all tags on object.
Manipulates and lists tags on objects. Tags allow for quick
grouping of and searching for objects. If only <obj> is given,
list all tags on the object. If /search is used, list objects
with the given tag.
The category can be used for grouping tags themselves, but it
should be used with restrain - tags on their own are usually
enough to for most grouping schemes.
"""
key = "@tag"
aliases = ["@tags"]
options = ("search", "del")
locks = "cmd:perm(tag) or perm(Builder)"
help_category = "Building"
arg_regex = r"(/\w+?(\s|$))|\s|$"
def func(self):
"""Implement the tag functionality"""
if not self.args:
self.caller.msg("Usage: tag[/switches] <obj> [= <tag>[:<category>]]")
return
if "search" in self.switches:
# search by tag
tag = self.args
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
objs = search.search_tag(tag, category=category)
nobjs = len(objs)
if nobjs > 0:
catstr = (
" (category: '|w%s|n')" % category
if category
else ("" if nobjs == 1 else " (may have different tag categories)")
)
matchstr = ", ".join(o.get_display_name(self.caller) for o in objs)
string = "Found |w%i|n object%s with tag '|w%s|n'%s:\n %s" % (
nobjs,
"s" if nobjs > 1 else "",
tag,
catstr,
matchstr,
)
else:
string = "No objects found with tag '%s%s'." % (
tag,
" (category: %s)" % category if category else "",
)
self.caller.msg(string)
return
if "del" in self.switches:
# remove one or all tags
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
if self.rhs:
# remove individual tag
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
if obj.tags.get(tag, category=category):
obj.tags.remove(tag, category=category)
string = "Removed tag '%s'%s from %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
else:
string = "No tag '%s'%s to delete on %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
else:
# no tag specified, clear all tags
old_tags = [
"%s%s" % (tag, " (category: %s)" % category if category else "")
for tag, category in obj.tags.all(return_key_and_category=True)
]
if old_tags:
obj.tags.clear()
string = "Cleared all tags from %s: %s" % (obj, ", ".join(sorted(old_tags)))
else:
string = "No Tags to clear on %s." % obj
self.caller.msg(string)
return
# no search/deletion
if self.rhs:
# = is found; command args are of the form obj = tag
obj = self.caller.search(self.lhs, global_search=True)
if not obj:
return
tag = self.rhs
category = None
if ":" in tag:
tag, category = [part.strip() for part in tag.split(":", 1)]
# create the tag
obj.tags.add(tag, category=category)
string = "Added tag '%s'%s to %s." % (
tag,
" (category: %s)" % category if category else "",
obj,
)
self.caller.msg(string)
else:
# no = found - list tags on object
obj = self.caller.search(self.args, global_search=True)
if not obj:
return
tagtuples = obj.tags.all(return_key_and_category=True)
ntags = len(tagtuples)
tags = [tup[0] for tup in tagtuples]
categories = [" (category: %s)" % tup[1] if tup[1] else "" for tup in tagtuples]
if ntags:
string = "Tag%s on %s: %s" % (
"s" if ntags > 1 else "",
obj,
", ".join(sorted("'%s'%s" % (tags[i], categories[i]) for i in range(ntags))),
)
else:
string = "No tags attached to %s." % obj
self.caller.msg(string)
# helper functions for spawn
class CmdSpawn(COMMAND_DEFAULT_CLASS):
"""
spawn objects from prototype
Usage:
spawn[/noloc] <prototype_key>
spawn[/noloc] <prototype_dict>
spawn/search [prototype_keykey][;tag[,tag]]
spawn/list [tag, tag, ...]
spawn/list modules - list only module-based prototypes
spawn/show [<prototype_key>]
spawn/update <prototype_key>
spawn/save <prototype_dict>
spawn/edit [<prototype_key>]
olc - equivalent to spawn/edit
Switches:
noloc - allow location to be None if not specified explicitly. Otherwise,
location will default to caller's current location.
search - search prototype by name or tags.
list - list available prototypes, optionally limit by tags.
show, examine - inspect prototype by key. If not given, acts like list.
raw - show the raw dict of the prototype as a one-line string for manual editing.
save - save a prototype to the database. It will be listable by /list.
delete - remove a prototype from database, if allowed to.
update - find existing objects with the same prototype_key and update
them with latest version of given prototype. If given with /save,
will auto-update all objects with the old version of the prototype
without asking first.
edit, menu, olc - create/manipulate prototype in a menu interface.
Example:
spawn GOBLIN
spawn {"key":"goblin", "typeclass":"monster.Monster", "location":"#2"}
spawn/save {"key": "grunt", prototype: "goblin"};;mobs;edit:all()
\f
Dictionary keys:
|wprototype_parent |n - name of parent prototype to use. Required if typeclass is
not set. Can be a path or a list for multiple inheritance (inherits
left to right). If set one of the parents must have a typeclass.
|wtypeclass |n - string. Required if prototype_parent is not set.
|wkey |n - string, the main object identifier
|wlocation |n - this should be a valid object or #dbref
|whome |n - valid object or #dbref
|wdestination|n - only valid for exits (object or dbref)
|wpermissions|n - string or list of permission strings
|wlocks |n - a lock-string
|waliases |n - string or list of strings.
|wndb_|n<name> - value of a nattribute (ndb_ is stripped)
|wprototype_key|n - name of this prototype. Unique. Used to store/retrieve from db
and update existing prototyped objects if desired.
|wprototype_desc|n - desc of this prototype. Used in listings
|wprototype_locks|n - locks of this prototype. Limits who may use prototype
|wprototype_tags|n - tags of this prototype. Used to find prototype
any other keywords are interpreted as Attributes and their values.
The available prototypes are defined globally in modules set in
settings.PROTOTYPE_MODULES. If spawn is used without arguments it
displays a list of available prototypes.
"""
key = "@spawn"
aliases = ["@olc"]
switch_options = (
"noloc",
"search",
"list",
"show",
"raw",
"examine",
"save",
"delete",
"menu",
"olc",
"update",
"edit",
)
locks = "cmd:perm(spawn) or perm(Builder)"
help_category = "Building"
def _search_prototype(self, prototype_key, quiet=False):
"""
Search for prototype and handle no/multi-match and access.
Returns a single found prototype or None - in the
case, the caller has already been informed of the
search error we need not do any further action.
"""
prototypes = protlib.search_prototype(prototype_key)
nprots = len(prototypes)
# handle the search result
err = None
if not prototypes:
err = f"No prototype named '{prototype_key}' was found."
elif nprots > 1:
err = "Found {} prototypes matching '{}':\n {}".format(
nprots,
prototype_key,
", ".join(proto.get("prototype_key", "") for proto in prototypes),
)
else:
# we have a single prototype, check access
prototype = prototypes[0]
if not self.caller.locks.check_lockstring(
self.caller, prototype.get("prototype_locks", ""), access_type="spawn", default=True
):
err = "You don't have access to use this prototype."
if err:
# return None on any error
if not quiet:
self.caller.msg(err)
return
return prototype
def _parse_prototype(self, inp, expect=dict):
"""
Parse a prototype dict or key from the input and convert it safely
into a dict if appropriate.
Args:
inp (str): The input from user.
expect (type, optional):
Returns:
prototype (dict, str or None): The parsed prototype. If None, the error
was already reported.
"""
eval_err = None
try:
prototype = _LITERAL_EVAL(inp)
except (SyntaxError, ValueError) as err:
# treat as string
eval_err = err
prototype = utils.to_str(inp)
finally:
# it's possible that the input was a prototype-key, in which case
# it's okay for the LITERAL_EVAL to fail. Only if the result does not
# match the expected type do we have a problem.
if not isinstance(prototype, expect):
if eval_err:
string = (
f"{inp}\n{eval_err}\n|RCritical Python syntax error in argument. Only primitive "
"Python structures are allowed. \nMake sure to use correct "
"Python syntax. Remember especially to put quotes around all "
"strings inside lists and dicts.|n For more advanced uses, embed "
"funcparser callables ($funcs) in the strings."
)
else:
string = "Expected {}, got {}.".format(expect, type(prototype))
self.caller.msg(string)
return
if expect == dict:
# an actual prototype. We need to make sure it's safe,
# so don't allow exec.
# TODO: Exec support is deprecated. Remove completely for 1.0.
if "exec" in prototype and not self.caller.check_permstring("Developer"):
self.caller.msg(
"Spawn aborted: You are not allowed to " "use the 'exec' prototype key."
)
return
try:
# we homogenize the protoype first, to be more lenient with free-form
protlib.validate_prototype(protlib.homogenize_prototype(prototype))
except RuntimeError as err:
self.caller.msg(str(err))
return
return prototype
def _get_prototype_detail(self, query=None, prototypes=None):
"""
Display the detailed specs of one or more prototypes.
Args:
query (str, optional): If this is given and `prototypes` is not, search for
the prototype(s) by this query. This may be a partial query which
may lead to multiple matches, all being displayed.
prototypes (list, optional): If given, ignore `query` and only show these
prototype-details.
Returns:
display (str, None): A formatted string of one or more prototype details.
If None, the caller was already informed of the error.
"""
if not prototypes:
# we need to query. Note that if query is None, all prototypes will
# be returned.
prototypes = protlib.search_prototype(key=query)
if prototypes:
return "\n".join(protlib.prototype_to_str(prot) for prot in prototypes)
elif query:
self.caller.msg(f"No prototype named '{query}' was found.")
else:
self.caller.msg("No prototypes found.")
def _list_prototypes(self, key=None, tags=None):
"""Display prototypes as a list, optionally limited by key/tags. """
protlib.list_prototypes(self.caller, key=key, tags=tags, session=self.session)
@interactive
def _update_existing_objects(self, caller, prototype_key, quiet=False):
"""
Update existing objects (if any) with this prototype-key to the latest
prototype version.
Args:
caller (Object): This is necessary for @interactive to work.
prototype_key (str): The prototype to update.
quiet (bool, optional): If set, don't report to user if no
old objects were found to update.
Returns:
n_updated (int): Number of updated objects.
"""
prototype = self._search_prototype(prototype_key)
if not prototype:
return
existing_objects = protlib.search_objects_with_prototype(prototype_key)
if not existing_objects:
if not quiet:
caller.msg("No existing objects found with an older version of this prototype.")
return
if existing_objects:
n_existing = len(existing_objects)
slow = " (note that this may be slow)" if n_existing > 10 else ""
string = (
f"There are {n_existing} existing object(s) with an older version "
f"of prototype '{prototype_key}'. Should it be re-applied to them{slow}? [Y]/N"
)
answer = yield (string)
if answer.lower() in ["n", "no"]:
caller.msg(
"|rNo update was done of existing objects. "
"Use spawn/update <key> to apply later as needed.|n"
)
return
try:
n_updated = spawner.batch_update_objects_with_prototype(
prototype, objects=existing_objects, caller=caller,
)
except Exception:
logger.log_trace()
caller.msg(f"{n_updated} objects were updated.")
return
def _parse_key_desc_tags(self, argstring, desc=True):
"""
Parse ;-separated input list.
"""
key, desc, tags = "", "", []
if ";" in argstring:
parts = [part.strip().lower() for part in argstring.split(";")]
if len(parts) > 1 and desc:
key = parts[0]
desc = parts[1]
tags = parts[2:]
else:
key = parts[0]
tags = parts[1:]
else:
key = argstring.strip().lower()
return key, desc, tags
def func(self):
"""Implements the spawner"""
caller = self.caller
noloc = "noloc" in self.switches
# run the menu/olc
if (
self.cmdstring == "olc"
or "menu" in self.switches
or "olc" in self.switches
or "edit" in self.switches
):
# OLC menu mode
prototype = None
if self.lhs:
prototype_key = self.lhs
prototype = self._search_prototype(prototype_key)
if not prototype:
return
olc_menus.start_olc(caller, session=self.session, prototype=prototype)
return
if "search" in self.switches:
# query for a key match. The arg is a search query or nothing.
if not self.args:
# an empty search returns the full list
self._list_prototypes()
return
# search for key;tag combinations
key, _, tags = self._parse_key_desc_tags(self.args, desc=False)
self._list_prototypes(key, tags)
return
if "raw" in self.switches:
# query for key match and return the prototype as a safe one-liner string.
if not self.args:
caller.msg("You need to specify a prototype-key to get the raw data for.")
prototype = self._search_prototype(self.args)
if not prototype:
return
caller.msg(str(prototype))
return
if "show" in self.switches or "examine" in self.switches:
# show a specific prot detail. The argument is a search query or empty.
if not self.args:
# we don't show the list of all details, that's too spammy.
caller.msg("You need to specify a prototype-key to show.")
return
detail_string = self._get_prototype_detail(self.args)
if not detail_string:
return
caller.msg(detail_string)
return
if "list" in self.switches:
# for list, all optional arguments are tags.
tags = self.lhslist
err = self._list_prototypes(tags=tags)
if err:
caller.msg(
"No prototypes found with prototype-tag(s): {}".format(
list_to_string(tags, "or")
)
)
return
if "save" in self.switches:
# store a prototype to the database store
if not self.args:
caller.msg(
"Usage: spawn/save [<key>[;desc[;tag,tag[,...][;lockstring]]]] = <prototype_dict>"
)
return
if self.rhs:
# input on the form key = prototype
prototype_key, prototype_desc, prototype_tags = self._parse_key_desc_tags(self.lhs)
prototype_key = None if not prototype_key else prototype_key
prototype_desc = None if not prototype_desc else prototype_desc
prototype_tags = None if not prototype_tags else prototype_tags
prototype_input = self.rhs.strip()
else:
prototype_key = prototype_desc = None
prototype_tags = None
prototype_input = self.lhs.strip()
# handle parsing
prototype = self._parse_prototype(prototype_input)
if not prototype:
return
prot_prototype_key = prototype.get("prototype_key")
if not (prototype_key or prot_prototype_key):
caller.msg(
"A prototype_key must be given, either as `prototype_key = <prototype>` "
"or as a key 'prototype_key' inside the prototype structure."
)
return
if prototype_key is None:
prototype_key = prot_prototype_key
if prot_prototype_key != prototype_key:
caller.msg("(Replacing `prototype_key` in prototype with given key.)")
prototype["prototype_key"] = prototype_key
if prototype_desc is not None and prot_prototype_key != prototype_desc:
caller.msg("(Replacing `prototype_desc` in prototype with given desc.)")
prototype["prototype_desc"] = prototype_desc
if prototype_tags is not None and prototype.get("prototype_tags") != prototype_tags:
caller.msg("(Replacing `prototype_tags` in prototype with given tag(s))")
prototype["prototype_tags"] = prototype_tags
string = ""
# check for existing prototype (exact match)
old_prototype = self._search_prototype(prototype_key, quiet=True)
diff = spawner.prototype_diff(old_prototype, prototype, homogenize=True)
diffstr = spawner.format_diff(diff)
new_prototype_detail = self._get_prototype_detail(prototypes=[prototype])
if old_prototype:
if not diffstr:
string = f"|yAlready existing Prototype:|n\n{new_prototype_detail}\n"
question = (
"\nThere seems to be no changes. Do you still want to (re)save? [Y]/N"
)
else:
string = (
f'|yExisting prototype "{prototype_key}" found. Change:|n\n{diffstr}\n'
f"|yNew changed prototype:|n\n{new_prototype_detail}"
)
question = (
"\n|yDo you want to apply the change to the existing prototype?|n [Y]/N"
)
else:
string = f"|yCreating new prototype:|n\n{new_prototype_detail}"
question = "\nDo you want to continue saving? [Y]/N"
answer = yield (string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rSave cancelled.|n")
return
# all seems ok. Try to save.
try:
prot = protlib.save_prototype(prototype)
if not prot:
caller.msg("|rError saving:|R {}.|n".format(prototype_key))
return
except protlib.PermissionError as err:
caller.msg("|rError saving:|R {}|n".format(err))
return
caller.msg("|gSaved prototype:|n {}".format(prototype_key))
# check if we want to update existing objects
self._update_existing_objects(self.caller, prototype_key, quiet=True)
return
if not self.args:
# all switches beyond this point gets a common non-arg return
ncount = len(protlib.search_prototype())
caller.msg(
"Usage: spawn <prototype-key> or {{key: value, ...}}"
f"\n ({ncount} existing prototypes. Use /list to inspect)"
)
return
if "delete" in self.switches:
# remove db-based prototype
prototype_detail = self._get_prototype_detail(self.args)
if not prototype_detail:
return
string = f"|rDeleting prototype:|n\n{prototype_detail}"
question = "\nDo you want to continue deleting? [Y]/N"
answer = yield (string + question)
if answer.lower() in ["n", "no"]:
caller.msg("|rDeletion cancelled.|n")
return
try:
success = protlib.delete_prototype(self.args)
except protlib.PermissionError as err:
retmsg = f"|rError deleting:|R {err}|n"
else:
retmsg = (
"Deletion successful"
if success
else "Deletion failed (does the prototype exist?)"
)
caller.msg(retmsg)
return
if "update" in self.switches:
# update existing prototypes
prototype_key = self.args.strip().lower()
self._update_existing_objects(self.caller, prototype_key)
return
# If we get to this point, we use not switches but are trying a
# direct creation of an object from a given prototype or -key
prototype = self._parse_prototype(
self.args, expect=dict if self.args.strip().startswith("{") else str
)
if not prototype:
# this will only let through dicts or strings
return
key = "<unnamed>"
if isinstance(prototype, str):
# A prototype key we are looking to apply
prototype_key = prototype
prototype = self._search_prototype(prototype_key)
if not prototype:
return
# proceed to spawning
try:
for obj in spawner.spawn(prototype, caller=self.caller):
self.caller.msg("Spawned %s." % obj.get_display_name(self.caller))
if not prototype.get("location") and not noloc:
# we don't hardcode the location in the prototype (unless the user
# did so manually) - that would lead to it having to be 'removed' every
# time we try to update objects with this prototype in the future.
obj.location = caller.location
except RuntimeError as err:
caller.msg(err)
|
py | 1a32d705e4eb9fd556d57f6c1aed9ef238141835 |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class apply_exp_traffic_class_map_name(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-qos-mpls - based on the path /qos-mpls/map-apply/apply-exp-traffic-class-map-name. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__map_name_cmd1','__all_zero_map_cmd1','__default_map_cmd1','__All_cmd1',)
_yang_name = 'apply-exp-traffic-class-map-name'
_rest_name = 'exp-traffic-class'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__default_map_cmd1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd1", rest_name="default-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-default-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class and drop-prec based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
self.__all_zero_map_cmd1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd1", rest_name="all-zero-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-all-zero-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class 0 and drop-prec 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
self.__All_cmd1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd1", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
self.__map_name_cmd1 = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd1", rest_name="map-name-cmd1", parent=self, choice=(u'apply-exp-traffic-class', u'ca-map-name-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'qos-mpls', u'map-apply', u'apply-exp-traffic-class-map-name']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'qos-mpls', u'map-apply', u'exp-traffic-class']
def _get_map_name_cmd1(self):
"""
Getter method for map_name_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/map_name_cmd1 (map-name-type)
"""
return self.__map_name_cmd1
def _set_map_name_cmd1(self, v, load=False):
"""
Setter method for map_name_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/map_name_cmd1 (map-name-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_map_name_cmd1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_map_name_cmd1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd1", rest_name="map-name-cmd1", parent=self, choice=(u'apply-exp-traffic-class', u'ca-map-name-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """map_name_cmd1 must be of a type compatible with map-name-type""",
'defined-type': "brocade-apply-qos-mpls:map-name-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd1", rest_name="map-name-cmd1", parent=self, choice=(u'apply-exp-traffic-class', u'ca-map-name-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)""",
})
self.__map_name_cmd1 = t
if hasattr(self, '_set'):
self._set()
def _unset_map_name_cmd1(self):
self.__map_name_cmd1 = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'[a-zA-Z]{1}([-a-zA-Z0-9_]{0,63})'}), is_leaf=True, yang_name="map-name-cmd1", rest_name="map-name-cmd1", parent=self, choice=(u'apply-exp-traffic-class', u'ca-map-name-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'<MAP NAME>;;Name for the MAP(Max 64)', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='map-name-type', is_config=True)
def _get_all_zero_map_cmd1(self):
"""
Getter method for all_zero_map_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/all_zero_map_cmd1 (empty)
"""
return self.__all_zero_map_cmd1
def _set_all_zero_map_cmd1(self, v, load=False):
"""
Setter method for all_zero_map_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/all_zero_map_cmd1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_all_zero_map_cmd1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_all_zero_map_cmd1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd1", rest_name="all-zero-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-all-zero-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class 0 and drop-prec 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """all_zero_map_cmd1 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd1", rest_name="all-zero-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-all-zero-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class 0 and drop-prec 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""",
})
self.__all_zero_map_cmd1 = t
if hasattr(self, '_set'):
self._set()
def _unset_all_zero_map_cmd1(self):
self.__all_zero_map_cmd1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="all-zero-map-cmd1", rest_name="all-zero-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-all-zero-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class 0 and drop-prec 0', u'alt-name': u'all-zero-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
def _get_default_map_cmd1(self):
"""
Getter method for default_map_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/default_map_cmd1 (empty)
"""
return self.__default_map_cmd1
def _set_default_map_cmd1(self, v, load=False):
"""
Setter method for default_map_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/default_map_cmd1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_default_map_cmd1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_default_map_cmd1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="default-map-cmd1", rest_name="default-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-default-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class and drop-prec based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """default_map_cmd1 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd1", rest_name="default-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-default-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class and drop-prec based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""",
})
self.__default_map_cmd1 = t
if hasattr(self, '_set'):
self._set()
def _unset_default_map_cmd1(self):
self.__default_map_cmd1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="default-map-cmd1", rest_name="default-map", parent=self, choice=(u'apply-exp-traffic-class', u'ca-default-map-cmd1'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Map EXP value to internal traffic-class and drop-prec based on default map', u'alt-name': u'default-map', u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
def _get_All_cmd1(self):
"""
Getter method for All_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/All_cmd1 (empty)
"""
return self.__All_cmd1
def _set_All_cmd1(self, v, load=False):
"""
Setter method for All_cmd1, mapped from YANG variable /qos_mpls/map_apply/apply_exp_traffic_class_map_name/All_cmd1 (empty)
If this variable is read-only (config: false) in the
source YANG file, then _set_All_cmd1 is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_All_cmd1() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGBool, is_leaf=True, yang_name="All-cmd1", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """All_cmd1 must be of a type compatible with empty""",
'defined-type': "empty",
'generated-type': """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd1", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)""",
})
self.__All_cmd1 = t
if hasattr(self, '_set'):
self._set()
def _unset_All_cmd1(self):
self.__All_cmd1 = YANGDynClass(base=YANGBool, is_leaf=True, yang_name="All-cmd1", rest_name="All", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Apply globally on all interface', u'alt-name': u'All'}}, namespace='urn:brocade.com:mgmt:brocade-apply-qos-mpls', defining_module='brocade-apply-qos-mpls', yang_type='empty', is_config=True)
map_name_cmd1 = __builtin__.property(_get_map_name_cmd1, _set_map_name_cmd1)
all_zero_map_cmd1 = __builtin__.property(_get_all_zero_map_cmd1, _set_all_zero_map_cmd1)
default_map_cmd1 = __builtin__.property(_get_default_map_cmd1, _set_default_map_cmd1)
All_cmd1 = __builtin__.property(_get_All_cmd1, _set_All_cmd1)
__choices__ = {u'apply-exp-traffic-class': {u'ca-default-map-cmd1': [u'default_map_cmd1'], u'ca-map-name-cmd1': [u'map_name_cmd1'], u'ca-all-zero-map-cmd1': [u'all_zero_map_cmd1']}}
_pyangbind_elements = {'map_name_cmd1': map_name_cmd1, 'all_zero_map_cmd1': all_zero_map_cmd1, 'default_map_cmd1': default_map_cmd1, 'All_cmd1': All_cmd1, }
|
py | 1a32d7137b654d451e0fd2940731354e0967eaa0 |
from sklearn.datasets import load_boston
import pandas as pd
import numpy as np
import miceforest as mf
from datetime import datetime
from miceforest.mean_matching_functions import mean_match_kdtree_classification
from matplotlib.pyplot import close
# Make random state and load data
# Define data
random_state = np.random.RandomState(5)
boston = pd.DataFrame(load_boston(return_X_y=True)[0])
boston.columns = [str(i) for i in boston.columns]
boston["3"] = boston["3"].map({0: 'a', 1: 'b'}).astype('category')
boston["8"] = boston["8"].astype("category")
boston_amp = mf.ampute_data(boston, perc=0.25, random_state=random_state)
def test_defaults_pandas():
new_data = boston_amp.loc[range(10), :].copy()
kernel = mf.ImputationKernel(
data=boston_amp,
datasets=2,
mean_match_function=mean_match_kdtree_classification
)
kernel.mice(iterations=2)
kernel2 = mf.ImputationKernel(
data=boston_amp,
datasets=1,
mean_match_function=mean_match_kdtree_classification
)
kernel2.mice(iterations=2)
# Test appending and then test kernel.
kernel.append(kernel2)
# Test mice after appendage
kernel.mice(1)
kernel.complete_data(0, inplace=True)
assert all(kernel.working_data.isnull().sum() == 0)
assert kernel.models[0][0][3].params['objective'] == 'regression'
assert kernel.models[0][3][3].params['objective'] == 'binary'
assert kernel.models[0][8][3].params['objective'] == 'multiclass'
# Make sure we didn't touch the original data
assert all(boston_amp.isnull().sum() > 0)
imp_ds = kernel.impute_new_data(new_data)
imp_ds.complete_data(2,inplace=True)
assert all(imp_ds.working_data.isnull().sum(0) == 0)
assert new_data.isnull().sum().sum() > 0
def test_complex_pandas():
working_set = boston_amp.copy()
# Switch our category columns to integer codes.
# Replace -1 with np.NaN or lightgbm will complain.
working_set["3"] = working_set["3"].cat.codes
working_set["8"] = working_set["8"].cat.codes
working_set["3"].replace(-1,np.NaN, inplace=True)
working_set["8"].replace(-1, np.NaN, inplace=True)
new_data = working_set.loc[range(10), :].copy()
# Customize everything.
vs = {"1": ["2","3","4","5"], "2": ["6","7"], "3": ["1","2","8"], "4": ["8","9","10"]}
mmc = {"1": 4, "2": 0.01, "3": 0}
ds = {"2": 100, "3": 0.5}
io = ["2", "3", "1"]
imputed_var_names = io
non_imputed_var_names = [str(x) for x in range(13) if str(x) not in io]
def mmf(
mmc,
model,
candidate_features,
bachelor_features,
candidate_values,
random_state
):
if mmc > 0:
imp_values = random_state.choice(candidate_values, size=bachelor_features.shape[0])
else:
bachelor_preds = model.predict(bachelor_features)
imp_values = bachelor_preds
return imp_values
kernel = mf.ImputationKernel(
data=working_set,
datasets=2,
variable_schema=vs,
imputation_order=io,
train_nonmissing=True,
mean_match_candidates=mmc,
data_subset=ds,
mean_match_function=mmf,
categorical_feature=[3,8],
copy_data=False
)
kernel2 = mf.ImputationKernel(
data=working_set,
datasets=1,
variable_schema=vs,
imputation_order=io,
train_nonmissing=True,
mean_match_candidates=mmc,
data_subset=ds,
mean_match_function=mmf,
categorical_feature=[3,8],
copy_data=False
)
assert kernel.mean_match_candidates == {1: 4, 2: 3, 3: 0, 4: 5}, "mean_match_candidates initialization failed"
assert kernel.data_subset == {1: 380, 2: 100, 3: 190, 4: 380}, "mean_match_subset initialization failed"
assert kernel.iteration_count() == 0, "iteration initialization failed"
assert kernel.categorical_variables == [3, 8], "categorical recognition failed."
nround = 2
kernel.mice(nround - 1, variable_parameters={"1": {"n_estimators": 15}}, n_estimators=10, verbose=True)
kernel2.mice(nround - 1, variable_parameters={"1": {"n_estimators": 15}}, n_estimators=10, verbose=True)
kernel.append(kernel2)
assert kernel.models[0][1][nround - 1].params['num_iterations'] == 15
assert kernel.models[0][2][nround - 1].params['num_iterations'] == 10
kernel.mice(1, variable_parameters={1: {"n_estimators": 15}}, n_estimators=10, verbose=True)
assert kernel.iteration_count() == nround, "iteration counting is incorrect."
assert kernel.models[0][1][nround].params['num_iterations'] == 15
assert kernel.models[0][2][nround].params['num_iterations'] == 10
# Make sure we only impute variables in variable_schema
compdat = kernel.complete_data(0)
assert all(compdat[imputed_var_names].isnull().sum() == 0)
assert all(compdat[non_imputed_var_names].isnull().sum() > 0)
# Test the ability to tune parameters with custom setup
optimization_steps = 2
op, ol = kernel.tune_parameters(
dataset=0,
optimization_steps=optimization_steps,
variable_parameters={1: {"bagging_fraction": 0.9, "feature_fraction_bynode": (0.85, 0.9)}},
bagging_fraction=0.8,
feature_fraction_bynode=(0.70,0.75),
verbose=True
)
assert op[1]["bagging_fraction"] == 0.9
assert op[2]["bagging_fraction"] == 0.8
assert (op[1]["feature_fraction_bynode"] >= 0.85) and (op[1]["feature_fraction_bynode"] <= 0.9)
assert (op[2]["feature_fraction_bynode"] >= 0.70) and (op[2]["feature_fraction_bynode"] <= 0.75)
kernel.mice(1, variable_parameters=op, verbose=True)
model_2_params = kernel.models[0][2][nround + 1].params
model_1_params = kernel.models[0][1][nround + 1].params
assert model_2_params["bagging_fraction"] == 0.8
assert model_1_params["bagging_fraction"] == 0.9
assert (model_2_params["feature_fraction_bynode"] >= 0.70) and (model_2_params["feature_fraction_bynode"] <= 0.75)
assert (model_1_params["feature_fraction_bynode"] >= 0.85) and (model_1_params["feature_fraction_bynode"] <= 0.9)
new_imp_dat = kernel.impute_new_data(new_data=new_data, verbose=True)
new_imp_complete = new_imp_dat.complete_data(0)
assert all(new_imp_complete[["1","2","3","4"]].isnull().sum() == 0)
# Plotting on multiple imputed dataset
new_imp_dat.plot_mean_convergence()
close()
new_imp_dat.plot_imputed_distributions()
close()
# Plotting on Multiple Imputed Kernel
kernel.plot_feature_importance(0)
close()
kernel.plot_mean_convergence()
close()
kernel.plot_imputed_distributions()
close()
def test_defaults_numpy():
working_set = boston_amp.copy()
working_set["3"] = working_set["3"].cat.codes
working_set["8"] = working_set["8"].cat.codes
working_set["3"].replace(-1,np.NaN, inplace=True)
working_set["8"].replace(-1, np.NaN, inplace=True)
new_data = working_set.loc[range(10), :].copy()
working_set = working_set.values
new_data = new_data.values
s = datetime.now()
kernel = mf.ImputationKernel(
data=working_set,
datasets=3,
categorical_feature=[3,8],
mean_match_function=mean_match_kdtree_classification
)
kernel.mice(iterations=1, verbose=True)
# Complete data with copy.
comp_dat = kernel.complete_data(0, inplace=False)
# We didn't complete data in place. Make sure we created
# a copy, and did not affect internal data or original data.
assert all(np.isnan(comp_dat).sum(0) == 0)
assert all(np.isnan(kernel.working_data).sum(0) > 0)
assert all(np.isnan(working_set).sum(0) > 0)
# Complete data in place
kernel.complete_data(0, inplace=True)
# We completed data in place. Make sure we only affected
# the kernel.working_data and not the original data.
assert all(np.isnan(kernel.working_data).sum(0) == 0)
assert all(np.isnan(working_set).sum(0) > 0)
imp_ds = kernel.impute_new_data(new_data)
imp_ds.complete_data(0,inplace=True)
assert all(np.isnan(imp_ds.working_data).sum(0) == 0)
assert np.isnan(new_data).sum() > 0
print(datetime.now() - s)
def test_complex_numpy():
working_set = boston_amp.copy()
# Switch our category columns to integer codes.
# Replace -1 with np.NaN or lightgbm will complain.
working_set["3"] = working_set["3"].cat.codes
working_set["8"] = working_set["8"].cat.codes
working_set["3"].replace(-1,np.NaN, inplace=True)
working_set["8"].replace(-1, np.NaN, inplace=True)
new_data = working_set.loc[range(100), :].copy()
working_set = working_set.values
new_data = new_data.values
# Specify that models should be built for variables 1, 2, 3, 4
vs = {1: [2,3,4,5], 2: [6,7], 3: [1,2,8], 4: [8,9,10]}
mmc = {1: 4, 2: 0.01, 3: 0}
ds = {2: 100, 3: 0.5}
# Only variables 1, 2, 3 should be imputed using mice.
io = [2,3,1]
niv = np.setdiff1d(np.arange(working_set.shape[1]), io)
nivs = np.setdiff1d(np.arange(working_set.shape[1]), list(vs))
def mmf(
mmc,
model,
candidate_features,
bachelor_features,
candidate_values,
random_state
):
if mmc > 0:
imp_values = random_state.choice(candidate_values, size=bachelor_features.shape[0])
else:
bachelor_preds = model.predict(bachelor_features)
imp_values = bachelor_preds
return imp_values
kernel = mf.ImputationKernel(
data=working_set,
datasets=2,
variable_schema=vs,
imputation_order=io,
train_nonmissing=True,
mean_match_candidates=mmc,
data_subset=ds,
mean_match_function=mmf,
categorical_feature=[3,8],
copy_data=False
)
kernel2 = mf.ImputationKernel(
data=working_set,
datasets=1,
variable_schema=vs,
imputation_order=io,
train_nonmissing=True,
mean_match_candidates=mmc,
data_subset=ds,
mean_match_function=mmf,
categorical_feature=[3,8],
copy_data=False
)
assert kernel.mean_match_candidates == {2: 3, 3: 0, 1: 4, 4: 5}, "mean_match_candidates initialization failed"
assert kernel.data_subset == {2: 100, 3: 190, 1: 380, 4: 380}, "mean_match_subset initialization failed"
assert kernel.iteration_count() == 0, "iteration initialization failed"
assert kernel.categorical_variables == [3, 8], "categorical recognition failed."
nround = 2
kernel.mice(nround - 1, variable_parameters={1: {"n_estimators": 15}}, n_estimators=10, verbose=True)
kernel2.mice(nround - 1, variable_parameters={1: {"n_estimators": 15}}, n_estimators=10, verbose=True)
kernel.append(kernel2)
assert kernel.models[0][1][nround - 1].params['num_iterations'] == 15
assert kernel.models[0][2][nround - 1].params['num_iterations'] == 10
kernel.mice(1, variable_parameters={1: {"n_estimators": 15}}, n_estimators=10, verbose=True)
assert kernel.iteration_count() == nround, "iteration counting is incorrect."
assert kernel.models[0][1][nround].params['num_iterations'] == 15
assert kernel.models[0][2][nround].params['num_iterations'] == 10
# Complete data with copy. Make sure only correct datasets and variables were affected.
compdat = kernel.complete_data(0, inplace=False)
assert all(np.isnan(compdat[:,io]).sum(0) == 0)
assert all(np.isnan(compdat[:,niv]).sum(0) > 0)
# Should have no affect on working_data
assert all(np.isnan(kernel.working_data).sum(0) > 0)
# Should have no affect on working_set
assert all(np.isnan(working_set).sum(0) > 0)
# Now complete the data in place
kernel.complete_data(0, inplace=True)
# Should have affect on working_data and original data
assert all(np.isnan(kernel.working_data[:, io]).sum(0) == 0)
assert all(np.isnan(working_set[:, io]).sum(0) == 0)
assert all(np.isnan(kernel.working_data[:, niv]).sum(0) > 0)
assert all(np.isnan(working_set[:, niv]).sum(0) > 0)
# Test the ability to tune parameters with custom setup
optimization_steps = 2
op, ol = kernel.tune_parameters(
dataset=0,
optimization_steps=optimization_steps,
variable_parameters={1: {"bagging_fraction": 0.9, "feature_fraction_bynode": (0.85, 0.9)}},
bagging_fraction=0.8,
feature_fraction_bynode=(0.70,0.75),
verbose=True
)
assert op[1]["bagging_fraction"] == 0.9
assert op[2]["bagging_fraction"] == 0.8
assert (op[1]["feature_fraction_bynode"] >= 0.85) and (op[1]["feature_fraction_bynode"] <= 0.9)
assert (op[2]["feature_fraction_bynode"] >= 0.70) and (op[2]["feature_fraction_bynode"] <= 0.75)
kernel.mice(1, variable_parameters=op, verbose=True)
model_2_params = kernel.models[0][2][nround + 1].params
model_1_params = kernel.models[0][1][nround + 1].params
assert model_2_params["bagging_fraction"] == 0.8
assert model_1_params["bagging_fraction"] == 0.9
assert (model_2_params["feature_fraction_bynode"] >= 0.70) and (model_2_params["feature_fraction_bynode"] <= 0.75)
assert (model_1_params["feature_fraction_bynode"] >= 0.85) and (model_1_params["feature_fraction_bynode"] <= 0.9)
new_imp_dat = kernel.impute_new_data(new_data=new_data, copy_data=True, verbose=True)
# Not in place
new_imp_complete = new_imp_dat.complete_data(0, inplace=False)
assert all(np.isnan(new_imp_complete[:, list(vs)]).sum(0) == 0)
assert all(np.isnan(new_imp_complete[:, nivs]).sum(0) > 0)
# Should have no affect on working_data or original data
assert all(np.isnan(new_imp_dat.working_data).sum(0) > 0)
assert all(np.isnan(new_data[:, list(vs)]).sum(0) > 0)
# complete data in place
new_imp_dat.complete_data(0, inplace=True)
assert all(np.isnan(new_imp_dat.working_data[:, list(vs)]).sum(0) == 0)
assert all(np.isnan(new_data[:, nivs]).sum(0) > 0)
# Alter in place
new_imp_dat = kernel.impute_new_data(new_data=new_data, copy_data=False, verbose=True)
# Before completion, nan's should still exist in data:
assert all(np.isnan(new_data).sum(0) > 0)
assert all(np.isnan(new_imp_dat.working_data).sum(0) > 0)
# Complete data not in place
new_imp_complete = new_imp_dat.complete_data(0, inplace=False)
assert all(np.isnan(new_imp_complete[:, nivs]).sum(0) > 0)
assert all(np.isnan(new_imp_complete[:, list(vs)]).sum(0) == 0)
assert all(np.isnan(new_data).sum(0) > 0)
assert all(np.isnan(new_imp_dat.working_data).sum(0) > 0)
# Complete data in place
new_imp_dat.complete_data(0, inplace=True)
assert all(np.isnan(new_data[:, nivs]).sum(0) > 0)
assert all(np.isnan(new_data[:, list(vs)]).sum(0) == 0)
assert all(np.isnan(new_imp_dat.working_data[:, nivs]).sum(0) > 0)
assert all(np.isnan(new_imp_dat.working_data[:, list(vs)]).sum(0) == 0)
# Plotting on multiple imputed dataset
new_imp_dat.plot_mean_convergence()
close()
new_imp_dat.plot_imputed_distributions()
close()
# Plotting on Multiple Imputed Kernel
kernel.plot_feature_importance(0)
close()
kernel.plot_mean_convergence()
close()
kernel.plot_imputed_distributions()
close() |
py | 1a32d8b1d7727c8277a29e60b7c97c20cdd077de | class _Creep:
body = []
memory = {'class' : 'AbstractBaseCreep'} # Override this in subclasses
name = None
def __init__(self, spawner):
self.spawner = spawner
def spawn(self):
resp = self.spawner.canCreateCreep(self.body, self.name)
if resp == OK:
print("Spawning new " + self.type)
print("Body: " + self.body)
print("Memory: " + self.memory)
self.spawner.createCreep(self.body, self.name, self.memory)
else:
print("Tried to spawn a " + self.type + " but got code " + resp)
class BasicHarvester(_Creep):
body = [WORK, MOVE, CARRY]
memory = {'class': 'BasicHarvester'}
|
py | 1a32d98c93d4d0da2db69cbf3f1b069c88ee0d95 | # https://gist.github.com/seanchen1991/a151368df32b8e7ae6e7fde715e44b78
# reduce takes a data structure and either finds a key piece of data or
# be able to restructure the data structure
# 1. Reduce usually takes a linear data structure (99% we use reduce on an array)
# 2. Reduce "aggregates" all of the data in the data structure into one final value
# 3. Reduce is extremely flexible with how the reduction actually happens
# 3a. Reduce doesn't care how the reduction happens
# 4. The "reducer" function that's passed in as input is how we specify how the reduction happens
# what's the deal with the anonymous function's parameters?
# 1. An aggregator value
# 2. The current list node
# 3. An optional value the aggregator is initialized with
# what happens on a single call of the anonymous function?
# how many times does the reducer function get called? Once for every element in our data structure
# does the anonymous function do the work of iterating over our data structure?
# no, the anonymous function itself doesn't do the work of iterating
# Steps for our reduce function
# How do all of these calls get aggregated into a single value?
# The anonymous function needs to update the aggregated value somehow
# where is the state set and how is it defaulted to the head?
def linked_list_reduce(head, reducer, init=None):
# where/when do we initialize the state?
# initialize state before we start looping
state = None
# what do we do when the init value is set?
if init != None:
state = init
# what do we do when the init value is None?
elif state is None:
# set the state to be the first value in our list
state = head.value
# move the head to the next list node
head = head.next
# 1. Loop over the data structure
current = head
while current:
# 2. Call our anonymous function on the current iteration value
# 3. Update our state to be the result of the anonymous function
state = reducer(state, current.value)
# update our current pointer
current = current.next
# 4. Return the final aggregated state
return state
class Node:
def __init__(self, value, next=None):
self.value = value
self.next = next
l1 = Node(4)
l2 = Node(7)
l3 = Node(15)
l4 = Node(29)
l5 = Node(5)
l1.next = l2
l2.next = l3
l3.next = l4
l4.next = l5
def reducer(x, y): return x - y
print(linked_list_reduce(l1, reducer, 100))
|
py | 1a32db2aa650bfd268f4126b10c5d1b410bd40be | """Plot a Lyapunov contour"""
from typing import cast, List, Tuple, Optional, TYPE_CHECKING
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import pandas as pd
import seaborn as sns
import torch
import tqdm
from neural_clbf.experiments import Experiment
from neural_clbf.systems import ObservableSystem # noqa
if TYPE_CHECKING:
from neural_clbf.controllers import Controller, NeuralObsBFController # noqa
class LFContourExperiment(Experiment):
"""An experiment for plotting the contours of learned LFs"""
def __init__(
self,
name: str,
domain: Optional[List[Tuple[float, float]]] = None,
n_grid: int = 50,
x_axis_index: int = 0,
y_axis_index: int = 1,
x_axis_label: str = "$x$",
y_axis_label: str = "$y$",
default_state: Optional[torch.Tensor] = None,
):
"""Initialize an experiment for plotting the value of the LF over selected
state dimensions.
args:
name: the name of this experiment
domain: a list of two tuples specifying the plotting range,
one for each state dimension.
n_grid: the number of points in each direction at which to compute h
x_axis_index: the index of the state variable to plot on the x axis
y_axis_index: the index of the state variable to plot on the y axis
x_axis_label: the label for the x axis
y_axis_label: the label for the y axis
default_state: 1 x dynamics_model.n_dims tensor of default state
values. The values at x_axis_index and y_axis_index will be
overwritten by the grid values.
"""
super(LFContourExperiment, self).__init__(name)
# Default to plotting over [-1, 1] in all directions
if domain is None:
domain = [(-1.0, 1.0), (-1.0, 1.0)]
self.domain = domain
self.n_grid = n_grid
self.x_axis_index = x_axis_index
self.y_axis_index = y_axis_index
self.x_axis_label = x_axis_label
self.y_axis_label = y_axis_label
self.default_state = default_state
@torch.no_grad()
def run(self, controller_under_test: "Controller") -> pd.DataFrame:
"""
Run the experiment, likely by evaluating the controller, but the experiment
has freedom to call other functions of the controller as necessary (if these
functions are not supported by all controllers, then experiments will be
responsible for checking compatibility with the provided controller)
args:
controller_under_test: the controller with which to run the experiment
returns:
a pandas DataFrame containing the results of the experiment, in tidy data
format (i.e. each row should correspond to a single observation from the
experiment).
"""
# Sanity check: can only be called on a NeuralObsBFController
if not (hasattr(controller_under_test, "h")):
raise ValueError("Controller under test must be a NeuralObsBFController")
controller_under_test = cast("NeuralObsBFController", controller_under_test)
# Set up a dataframe to store the results
results_df = pd.DataFrame()
# Set up the plotting grid
device = "cpu"
if hasattr(controller_under_test, "device"):
device = controller_under_test.device # type: ignore
x_vals = torch.linspace(
self.domain[0][0], self.domain[0][1], self.n_grid, device=device
)
y_vals = torch.linspace(
self.domain[1][0], self.domain[1][1], self.n_grid, device=device
)
# Default to all zeros if no default provided
if self.default_state is None:
default_state = torch.zeros(1, controller_under_test.dynamics_model.n_dims)
else:
default_state = self.default_state
default_state = default_state.type_as(x_vals)
# Make a copy of the default state, which we'll modify on every loop
x = (
default_state.clone()
.detach()
.reshape(1, controller_under_test.dynamics_model.n_dims)
)
# Loop through the grid
prog_bar_range = tqdm.trange(self.n_grid, desc="Plotting LF", leave=True)
for i in prog_bar_range:
for j in range(self.n_grid):
# Adjust x to be at the current grid point
x[0, self.x_axis_index] = x_vals[i]
x[0, self.y_axis_index] = y_vals[j]
# Get the value of the LF at this point
V = controller_under_test.V(x)
# TODO @dawsonc measure violation
# Store the results
results_df = results_df.append(
{
self.x_axis_label: x_vals[i].cpu().numpy().item(),
self.y_axis_label: y_vals[j].cpu().numpy().item(),
"V": V.cpu().numpy().item(),
},
ignore_index=True,
)
return results_df
def plot(
self,
controller_under_test: "Controller",
results_df: pd.DataFrame,
display_plots: bool = False,
) -> List[Tuple[str, figure]]:
"""
Plot the results, and return the plot handles. Optionally
display the plots.
args:
controller_under_test: the controller with which to run the experiment
display_plots: defaults to False. If True, display the plots (blocks until
the user responds).
returns: a list of tuples containing the name of each figure and the figure
object.
"""
# Set the color scheme
sns.set_theme(context="talk", style="white")
# Plot a contour of h
fig, ax = plt.subplots(1, 1)
fig.set_size_inches(12, 8)
contours = ax.tricontourf(
results_df[self.x_axis_label],
results_df[self.y_axis_label],
results_df["V"],
cmap=sns.color_palette("rocket", as_cmap=True),
)
plt.colorbar(contours, ax=ax, orientation="vertical")
# Make the legend
ax.set_xlabel(self.x_axis_label)
ax.set_ylabel(self.y_axis_label)
fig_handle = ("V Contour", fig)
if display_plots:
plt.show()
return []
else:
return [fig_handle]
|
py | 1a32dc3a18f4210b48ab65873270ae64e91aba03 | # Copyright 2021 the authors.
# This file is part of Hy, which is free software licensed under the Expat
# license. See the LICENSE.
from __future__ import unicode_literals
from contextlib import contextmanager
from math import isnan, isinf
from hy import _initialize_env_var
from hy.errors import HyWrapperError
from fractions import Fraction
from colorama import Fore
PRETTY = True
COLORED = _initialize_env_var('HY_COLORED_AST_OBJECTS', False)
@contextmanager
def pretty(pretty=True):
"""
Context manager to temporarily enable
or disable pretty-printing of Hy model reprs.
"""
global PRETTY
old, PRETTY = PRETTY, pretty
try:
yield
finally:
PRETTY = old
class _ColoredModel:
"""
Mixin that provides a helper function for models that have color.
"""
def _colored(self, text):
if COLORED:
return self.color + text + Fore.RESET
else:
return text
class Object(object):
"""
Generic Hy Object model. This is helpful to inject things into all the
Hy lexing Objects at once.
The position properties (`start_line`, `end_line`, `start_column`,
`end_column`) are each 1-based and inclusive. For example, a symbol
`abc` starting at the first column would have `start_column` 1 and
`end_column` 3.
"""
properties = ["module", "_start_line", "end_line", "_start_column",
"end_column"]
def replace(self, other, recursive=False):
if isinstance(other, Object):
for attr in self.properties:
if not hasattr(self, attr) and hasattr(other, attr):
setattr(self, attr, getattr(other, attr))
else:
raise TypeError("Can't replace a non Hy object '{}' with a Hy object '{}'".format(repr(other), repr(self)))
return self
@property
def start_line(self):
return getattr(self, "_start_line", 1)
@start_line.setter
def start_line(self, value):
self._start_line = value
@property
def start_column(self):
return getattr(self, "_start_column", 1)
@start_column.setter
def start_column(self, value):
self._start_column = value
def __repr__(self):
return (f"hy.models.{self.__class__.__name__}"
f"({super(Object, self).__repr__()})")
_wrappers = {}
def wrap_value(x):
"""Wrap `x` into the corresponding Hy type.
This allows replace_hy_obj to convert a non Hy object to a Hy object.
This also allows a macro to return an unquoted expression transparently.
"""
new = _wrappers.get(type(x), lambda y: y)(x)
if not isinstance(new, Object):
raise HyWrapperError("Don't know how to wrap {!r}: {!r}".format(type(x), x))
if isinstance(x, Object):
new = new.replace(x, recursive=False)
return new
def replace_hy_obj(obj, other):
return wrap_value(obj).replace(other)
def repr_indent(obj):
return repr(obj).replace("\n", "\n ")
class String(Object, str):
"""
Generic Hy String object. Helpful to store string literals from Hy
scripts. It's either a ``str`` or a ``unicode``, depending on the
Python version.
"""
def __new__(cls, s=None, brackets=None):
value = super(String, cls).__new__(cls, s)
value.brackets = brackets
return value
_wrappers[str] = String
class Bytes(Object, bytes):
"""
Generic Hy Bytes object. It's either a ``bytes`` or a ``str``, depending
on the Python version.
"""
pass
_wrappers[bytes] = Bytes
class Symbol(Object, str):
"""
Hy Symbol. Basically a string.
"""
def __new__(cls, s=None):
return super(Symbol, cls).__new__(cls, s)
_wrappers[bool] = lambda x: Symbol("True") if x else Symbol("False")
_wrappers[type(None)] = lambda foo: Symbol("None")
class Keyword(Object):
"""Generic Hy Keyword object."""
__slots__ = ['name']
def __init__(self, value):
self.name = value
def __repr__(self):
return f"hy.models.{self.__class__.__name__}({self.name!r})"
def __str__(self):
return ":%s" % self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
if not isinstance(other, Keyword):
return NotImplemented
return self.name == other.name
def __ne__(self, other):
if not isinstance(other, Keyword):
return NotImplemented
return self.name != other.name
def __bool__(self):
return bool(self.name)
_sentinel = object()
def __call__(self, data, default=_sentinel):
from hy.lex import mangle
try:
return data[mangle(self.name)]
except KeyError:
if default is Keyword._sentinel:
raise
return default
# __getstate__ and __setstate__ are required for Pickle protocol
# 0, because we have __slots__.
def __getstate__(self):
return {k: getattr(self, k)
for k in self.properties + self.__slots__
if hasattr(self, k)}
def __setstate__(self, state):
for k, v in state.items():
setattr(self, k, v)
def strip_digit_separators(number):
# Don't strip a _ or , if it's the first character, as _42 and
# ,42 aren't valid numbers
return (number[0] + number[1:].replace("_", "").replace(",", "")
if isinstance(number, str) and len(number) > 1
else number)
class Integer(Object, int):
"""
Internal representation of a Hy Integer. May raise a ValueError as if
int(foo) was called, given Integer(foo).
"""
def __new__(cls, number, *args, **kwargs):
if isinstance(number, str):
number = strip_digit_separators(number)
bases = {"0x": 16, "0o": 8, "0b": 2}
for leader, base in bases.items():
if number.startswith(leader):
# We've got a string, known leader, set base.
number = int(number, base=base)
break
else:
# We've got a string, no known leader; base 10.
number = int(number, base=10)
else:
# We've got a non-string; convert straight.
number = int(number)
return super(Integer, cls).__new__(cls, number)
_wrappers[int] = Integer
def check_inf_nan_cap(arg, value):
if isinstance(arg, str):
if isinf(value) and "i" in arg.lower() and "Inf" not in arg:
raise ValueError('Inf must be capitalized as "Inf"')
if isnan(value) and "NaN" not in arg:
raise ValueError('NaN must be capitalized as "NaN"')
class Float(Object, float):
"""
Internal representation of a Hy Float. May raise a ValueError as if
float(foo) was called, given Float(foo).
"""
def __new__(cls, num, *args, **kwargs):
value = super(Float, cls).__new__(cls, strip_digit_separators(num))
check_inf_nan_cap(num, value)
return value
_wrappers[float] = Float
class Complex(Object, complex):
"""
Internal representation of a Hy Complex. May raise a ValueError as if
complex(foo) was called, given Complex(foo).
"""
def __new__(cls, real, imag=0, *args, **kwargs):
if isinstance(real, str):
value = super(Complex, cls).__new__(
cls, strip_digit_separators(real)
)
p1, _, p2 = real.lstrip("+-").replace("-", "+").partition("+")
check_inf_nan_cap(p1, value.imag if "j" in p1 else value.real)
if p2:
check_inf_nan_cap(p2, value.imag)
return value
return super(Complex, cls).__new__(cls, real, imag)
_wrappers[complex] = Complex
class Sequence(Object, tuple, _ColoredModel):
"""
An abstract type for sequence-like models to inherit from.
"""
def replace(self, other, recursive=True):
if recursive:
for x in self:
replace_hy_obj(x, other)
Object.replace(self, other)
return self
def __add__(self, other):
return self.__class__(super(Sequence, self).__add__(
tuple(other) if isinstance(other, list) else other))
def __getslice__(self, start, end):
return self.__class__(super(Sequence, self).__getslice__(start, end))
def __getitem__(self, item):
ret = super(Sequence, self).__getitem__(item)
if isinstance(item, slice):
return self.__class__(ret)
return ret
color = None
def __repr__(self):
return str(self) if PRETTY else super(Sequence, self).__repr__()
def __str__(self):
with pretty():
if self:
return self._colored("hy.models.{}{}\n {}{}".format(
self._colored(self.__class__.__name__),
self._colored("(["),
self._colored(",\n ").join(map(repr_indent, self)),
self._colored("])"),
))
else:
return self._colored(f"hy.models.{self.__class__.__name__}()")
class FComponent(Sequence):
"""
Analogue of ast.FormattedValue.
The first node in the contained sequence is the value being formatted,
the rest of the sequence contains the nodes in the format spec (if any).
"""
def __new__(cls, s=None, conversion=None):
value = super().__new__(cls, s)
value.conversion = conversion
return value
def replace(self, other, recursive=True):
super().replace(other, recursive)
if hasattr(other, "conversion"):
self.conversion = other.conversion
return self
class FString(Sequence):
"""
Generic Hy F-String object, for smarter f-string handling.
Mimics ast.JoinedStr, but using String and FComponent.
"""
def __new__(cls, s=None, brackets=None):
value = super().__new__(cls, s)
value.brackets = brackets
return value
class List(Sequence):
color = Fore.CYAN
def recwrap(f):
return lambda l: f(wrap_value(x) for x in l)
_wrappers[FComponent] = recwrap(FComponent)
_wrappers[FString] = recwrap(FString)
_wrappers[List] = recwrap(List)
_wrappers[list] = recwrap(List)
_wrappers[tuple] = recwrap(List)
class Dict(Sequence, _ColoredModel):
"""
Dict (just a representation of a dict)
"""
color = Fore.GREEN
def __str__(self):
with pretty():
if self:
pairs = []
for k, v in zip(self[::2],self[1::2]):
k, v = repr_indent(k), repr_indent(v)
pairs.append(
("{0}{c}\n {1}\n "
if '\n' in k+v
else "{0}{c} {1}").format(k, v, c=self._colored(',')))
if len(self) % 2 == 1:
pairs.append("{} {}\n".format(
repr_indent(self[-1]), self._colored("# odd")))
return "{}\n {}{}".format(
self._colored("hy.models.Dict(["),
"{c}\n ".format(c=self._colored(',')).join(pairs),
self._colored("])"))
else:
return self._colored("hy.models.Dict()")
def keys(self):
return list(self[0::2])
def values(self):
return list(self[1::2])
def items(self):
return list(zip(self.keys(), self.values()))
_wrappers[Dict] = recwrap(Dict)
_wrappers[dict] = lambda d: Dict(wrap_value(x) for x in sum(d.items(), ()))
class Expression(Sequence):
"""
Hy S-Expression. Basically just a list.
"""
color = Fore.YELLOW
_wrappers[Expression] = recwrap(Expression)
_wrappers[Fraction] = lambda e: Expression(
[Symbol("fraction"), wrap_value(e.numerator), wrap_value(e.denominator)])
class Set(Sequence):
"""
Hy set (just a representation of a set)
"""
color = Fore.RED
_wrappers[Set] = recwrap(Set)
_wrappers[set] = recwrap(Set)
|
py | 1a32dc731e9120e8dd027660b6fed3fbab34677d | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyPoster(PythonPackage):
"""Streaming HTTP uploads and multipart/form-data encoding."""
homepage = "https://pypi.org/project/poster/"
url = "https://atlee.ca/software/poster/dist/0.8.1/poster-0.8.1.tar.gz"
version('0.8.1', '2db12704538781fbaa7e63f1505d6fc8')
depends_on('py-setuptools', type='build')
# https://bitbucket.org/chrisatlee/poster/issues/24/not-working-with-python3
# https://bitbucket.org/chrisatlee/poster/issues/25/poster-connot-work-in-python35
# Patch created using 2to3
patch('python3.patch', when='^python@3:')
|
py | 1a32dd34b5f942f811eba6a86511de5dd1ccfa0a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) Huawei Technologies Co., Ltd. 2019. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Inter-process communication using HCOM."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
from tensorflow.python.platform import tf_logging as logging
from npu_bridge.estimator.npu import util as util_lib
class JobInfo:
"""Job information send by CSA."""
def __init__(self,
job_id=0,
job_config=None,
heartbeat_time=-1,
region_id=None,
ak=None,
sk=None,
endpoint_url=None,
device_info=None,
rank_table_file=None,
restart_flag=0,
local_app_dir=None,
local_data_dir=None,
local_checkpoint_dir=None,
local_log_dir=None,
local_result_dir=None,
local_boot_file=None,
rank_size=1
):
"""
Constructs a JobInfo.
Args:
job_id: the unique identifier.
heartbeat_time: the frequency that framework records the heartbeat.
job_config: the configuration of the training task. It's a json string.
region_id: the region id to access the OBS.
ak: the ak to access the OBS.
sk: the sk to access the OBS.
endpoint_url: the host name to access the OBS.
device_info: the device information of the training task. It's a json string.
rank_table_file: the communication routing information.
restart_flag: the abnormal re-issued ID (0: Normally issued; 1: Abnormally re-issued).
local_app_dir: the local path of the user script downloaded from OBS, for example: userfile/code/
local_data_dir: the local path of the user data downloaded from OBS, for example: userfile/data/
local_checkpoint_dir: the local path of the checkpoint file downloaded from OBS, for example: checkpoint/
local_log_dir: the user-created log path, for example: userfile/log/
local_result_dir: the user-created output file path, for example: userfile/result/
local_boot_file: the local path of the user startup script, for example: userfile/code/boot.py
rank_size: Rank size.
"""
self._job_id = job_id
self._job_config = job_config
self._heartbeat_time = heartbeat_time
self._region_id = region_id
self._ak = ak
self._sk = sk
self._endpoint_url = endpoint_url
self._device_info = device_info
self._rank_table_file = rank_table_file
self._restart_flag = restart_flag
self._local_app_dir = local_app_dir
self._local_data_dir = local_data_dir
self._local_checkpoint_dir = local_checkpoint_dir
self._local_log_dir = local_log_dir
self._local_result_dir = local_result_dir
self._local_boot_file = local_boot_file
self._rank_size = rank_size
class JobConfig():
"""Job configuration."""
def __init__(self, learning_rate=None, batch_size=None):
"""
Constructs a JobConfig.
Args:
learning_rate: A Tensor or a floating point value. The learning rate to use.
batch_size: Integer, size of batches to return.
"""
self._learning_rate = learning_rate
self._batch_size = batch_size
class DeviceInfo():
"""Device information."""
def __init__(self,
index="0",
server_id="123456",
dev_index=1):
"""
Constructs a DeviceInfo.
Args:
index: the unique identifier.
server_id: the server resource unique identifier, obtained from resource management.
dev_index: the device serial number in AI server.
"""
self._index = index
self._server_id = server_id
self._dev_index = dev_index
self._root_rank = 0
def is_master_node(self):
"""Determines whether the current node is the primary node."""
return self._index == self._root_rank
class NPUBasics(object):
"""Wrapper class for the basic NPU API."""
__instance = None
__has_init = False
def __new__(cls, file_name):
if not cls.__instance:
cls.__instance = object.__new__(cls)
return cls.__instance
def __init__(self, file_name):
if not self.__has_init:
self._job_info = self._read_job_info(file_name)
self.__has_init = True
@property
def jobinfo(self):
"""Return property"""
return self._job_info
def size(self):
"""A function that returns the number of Tensorflow processes.
Returns:
An integer scalar containing the number of Tensorflow processes.
"""
return self._job_info._rank_size
def _read_job_info(self, file_name):
"""Read the job information.
Args:
file_name: it's a json file which contains the job info from CSA.
Returns:
The job information.
"""
try:
with open(file_name, 'r', encoding='UTF-8') as f:
content = f.read()
data = json.loads(content, encoding='UTF-8')
# 1. Get the device_info and check it.
device_info = data.get('device_info')
util_lib.check_not_none(device_info, 'device_info')
index = device_info.get('Index', None)
util_lib.check_nonnegative_integer(index, 'Index')
# 2. Get the rank_table_file and check it.
rank_table_file = data.get('rank_table_file', None)
util_lib.check_not_none(rank_table_file, 'rank_table_file')
# 3. Get the rank_size and check it.
rank_size = data.get('rank_size', None)
util_lib.check_positive_integer(rank_size, 'rank_size')
# 4. Get the local_checkpoint_dir and check it.
local_checkpoint_dir = data.get('local_checkpoint_dir', None)
# 5. Init the JobInfo.
device_info = DeviceInfo(index=str(index))
job_info = JobInfo(device_info=device_info, rank_table_file=rank_table_file,
local_checkpoint_dir=local_checkpoint_dir, rank_size=rank_size)
return job_info
except IOError:
logging.warning('Warning:job config file does not exist')
job_id = os.getenv('JOB_ID', "")
if job_id == "":
logging.error('Error:can not get job config from env')
return None
heartbeat = os.getenv('HEARTBEAT', "")
rank_table_file = os.getenv('RANK_TABLE_FILE', "")
identity = os.getenv('POD_NAME', "")
if identity == "":
identity = os.getenv('RANK_ID', "")
checkpoint_dir = os.getenv('LOCAL_CHECKPOINT_DIR', "")
# cann't get rank_size from env, set to default 1
rank_size = os.getenv('RANK_SIZE', '1')
if rank_size.isdigit() is False:
print("set rank_size to default 1")
rank_size = 1
device_info = DeviceInfo(index=str(identity))
job_info = JobInfo(job_id=job_id,
heartbeat_time=heartbeat,
device_info=device_info,
rank_table_file=rank_table_file,
local_checkpoint_dir=checkpoint_dir,
rank_size=int(rank_size)
)
return job_info
|
py | 1a32dd7b802426f43cc728b12299aba9597a207a | """
##############################################################################
#
# Utility functions for resolving file paths for Maya's file texture node.
# These utilities are used for dealing with UV tiling and frame numbering in
# the file name and can be used to get the current pattern/preset and list
# of matching files.
#
##############################################################################
"""
def getFilePatternString(filePath, useFrameExtension, uvTilingMode):
"""
Given a path to a file and hints about UV tiling and frame extension usage,
convert the path to a version with appropriate tags marking the UV tile
and frame number.
"""
pass
def _splitPath(filePath):
"""
##############################################################################
# Private Utilities
##############################################################################
"""
pass
def computeUVForFiles(filePaths, filePattern):
"""
Given a collection of paths to a file and the UV pattern it matches compute
the 0-based UV tile indicated by the file name. If a filePath or the pattern
are poorly formed then (0,0) is returned for that path.
"""
pass
def findAllFilesForPattern(pattern, frameNumber):
"""
Given a path, possibly containing tags in the file name, find all files in
the same directory that match the tags. If none found, just return pattern
that we looked for.
"""
pass
def _patternToRegex(pattern):
pass
def computeUVForFile(filePath, filePattern):
"""
Given a path to a file and the UV pattern it matches compute the 0-based UV
tile indicated by the file name. If the filePath or pattern are poorly
formed then (0,0) is returned.
"""
pass
_frameExtensionRegex = None
_oneBasedRegex = None
_zeroBasedRegex = None
_VTag = '<V>'
_udimRegex = None
_uTag = '<u>'
_UTag = '<U>'
_frameTag = '<f>'
_taggedOneBasedRegex = None
_udimTag = '<UDIM>'
_taggedZeroBasedRegex = None
_vTag = '<v>'
|
py | 1a32ddfb5e0db4331307723aea5f2f59565f6b9c | # -*- coding: utf-8 -*-
# question 3
def count_letters(word,find):
"""
Example function with types documented in the docstring.
Ce code doit retourner le nombre d'occurences d'un caractère passé en paramètre dans un mot donné également
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères
param2 : char
Le 2ème paramètre est un caractère
Returns
-------
int
Nombre d'occurences du caractère
Exemples
--------
>>> count_letters(abracadabra,a)
5
>>> count_letters(momomotus,u)
1
"""
count=0
for i in range(len(word)):
if word.find(find,i)!=0:
count+=1
return count
#exemple
print(count_letters('abracadabra','a'))
# question 5
import string
def remove_punctuation(phrase):
"""
Example function with types documented in the docstring.
Ce code doit renvoyer une chaine de caractère issue d'un extrait dont la ponctuation a été supprimée
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères
Returns
-------
str
Chaine de caractères passée en paramètre sans la ponctuation
"""
phrase_sans_punct = ""
for letter in phrase:
if letter not in string.punctuation:
phrase_sans_punct += letter
return phrase_sans_punct
#exemple
print(remove_punctuation("Chaine de caractères, passée en paramètre, sans la ponctuation !!!"))
# question 7
def reverse(word) :
"""
Example function with types documented in the docstring.
Ce code doit renverser le mot qui lui est donné en paramètre
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères (un mot)
Returns
-------
str
Chaine de caractères qui est le mot de départ renversé
"""
new_word=""
for i in range(len(word)) :
new_word=new_word+word[-(i+1)]
return new_word
# question 8
def mirror(word) :
"""
Example function with types documented in the docstring.
Ce code doit afficher le mot qui lui est donné en paramètre en miroir
Parameters
----------
param1 : str
Le 1er paramètre est une chaine de caractères (un mot)
Returns
-------
str
Chaine de caractères qui est le mot de départ écrit comme s'il était lu dans un miroir
"""
new_word=word+reverse(word)
return new_word
|
py | 1a32df27c997ddde7bb6c83607ecc66618e8b63f | """Arnoldi algorithm.
Computes V and H such that :math:`AV_n = V_{n+1}\\underline{H}_n`. If the Krylov
subspace becomes A-invariant then V and H are truncated such that :math:`AV_n = V_n
H_n`.
:param A: a linear operator that works with the @-operator
:param v: the initial vector.
:param ortho: (optional) orthogonalization algorithm: may be one of
* ``'mgs'``: modified Gram-Schmidt (default).
* ``'dmgs'``: double Modified Gram-Schmidt.
* ``'lanczos'``: Lanczos short recurrence.
* ``'householder'``: Householder.
:param M: (optional) a self-adjoint and positive-definite preconditioner. If
``M`` is provided, then also a second basis :math:`P_n` is constructed such that
:math:`V_n=MP_n`. This is of importance in preconditioned methods. ``M`` has to
be ``None`` if ``ortho=='householder'`` (see ``B``).
:param inner: (optional) defines the inner product to use. See
:py:meth:`inner`.
``inner`` has to be ``None`` if ``ortho=='householder'``. It's unclear how a
variant of the Householder QR algorithm can be used with a non-Euclidean inner
product. Compare <https://math.stackexchange.com/q/433644/36678>.
"""
import numpy as np
from ._helpers import Identity, aslinearoperator, get_default_inner
from .errors import ArgumentError
from .householder import Householder
class ArnoldiHouseholder:
def __init__(self, A, v):
self.inner = get_default_inner(v.shape)
# save parameters
self.A = A
self.v = v
self.dtype = np.find_common_type([A.dtype, v.dtype], [])
# number of iterations
self.iter = 0
# Arnoldi basis
self.V = []
# flag indicating if Krylov subspace is invariant
self.is_invariant = False
self.houses = [Householder(v)]
self.vnorm = np.linalg.norm(v, 2)
# TODO set self.is_invariant = True for self.vnorm == 0
self.V.append(v / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# if self.vnorm > 0:
# self.V[0] = v / self.vnorm
# else:
# self.is_invariant = True
def __iter__(self):
return self
def __next__(self):
"""Carry out one iteration of Arnoldi."""
if self.is_invariant:
raise ArgumentError(
"Krylov subspace was found to be invariant in the previous iteration."
)
k = self.iter
Av = self.A @ self.V[k]
for j in range(k + 1):
Av[j:] = self.houses[j] @ Av[j:]
Av[j] *= np.conj(self.houses[j].alpha)
N = self.v.shape[0]
if k < N - 1:
house = Householder(Av[k + 1 :])
self.houses.append(house)
Av[k + 1 :] = (house @ Av[k + 1 :]) * np.conj(house.alpha)
h = Av[: k + 2]
h[-1] = np.abs(h[-1])
if h[-1] <= 1.0e-14:
self.is_invariant = True
v = None
else:
vnew = np.zeros_like(self.v)
vnew[k + 1] = 1
for j in range(k + 1, -1, -1):
vnew[j:] = self.houses[j] @ vnew[j:]
v = vnew * self.houses[-1].alpha
self.V.append(v)
else:
h = np.zeros([len(Av) + 1] + list(self.v.shape[1:]), Av.dtype)
h[:-1] = Av
self.is_invariant = True
v = None
self.iter += 1
return v, h
class ArnoldiMGS:
def __init__(
self,
A,
v,
num_reorthos: int = 1,
M=None,
Mv=None,
Mv_norm=None,
inner=None,
):
self.inner = get_default_inner(v.shape) if inner is None else inner
# save parameters
self.A = A
self.v = v
self.num_reorthos = num_reorthos
self.M = Identity() if M is None else aslinearoperator(M)
self.dtype = np.find_common_type([A.dtype, self.M.dtype, v.dtype], [])
# number of iterations
self.iter = 0
# Arnoldi basis
self.V = []
self.P = []
# flag indicating if Krylov subspace is invariant
self.is_invariant = False
p = v
if Mv is None:
v = self.M @ p
else:
v = Mv
if Mv_norm is None:
self.vnorm = np.sqrt(inner(p, v))
else:
self.vnorm = Mv_norm
self.P.append(p / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# TODO set self.is_invariant = True for self.vnorm == 0
self.V.append(v / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# if self.vnorm > 0:
# self.V[0] = v / self.vnorm
# else:
# self.is_invariant = True
def next_mgs(self, k, Av):
# modified Gram-Schmidt orthogonalization
for j in range(k + 1):
alpha = self.inner(self.V[j], Av)
self.h[j] += alpha
Av -= alpha * self.P[j]
def __iter__(self):
return self
def __next__(self):
if self.is_invariant:
raise ArgumentError(
"Krylov subspace was found to be invariant in the previous iteration."
)
k = self.iter
# the matrix-vector multiplication
Av = self.A @ self.V[k]
self.h = np.zeros([k + 2] + list(self.v.shape[1:]), dtype=self.dtype)
# determine vectors for orthogonalization
for _ in range(self.num_reorthos):
self.next_mgs(k, Av)
MAv = self.M @ Av
self.h[k + 1] = np.sqrt(self.inner(Av, MAv))
if np.all(self.h[k + 1] <= 1.0e-14):
self.is_invariant = True
v = None
else:
Hk1k = np.where(self.h[k + 1] != 0.0, self.h[k + 1], 1.0)
self.P.append(Av / Hk1k)
v = MAv / Hk1k
if v is not None:
self.V.append(v)
# increase iteration counter
self.iter += 1
return v, self.h
class ArnoldiLanczos:
def __init__(self, A, v, M=None, Mv=None, Mv_norm=None, inner=None):
self.A = A
self.M = Identity() if M is None else aslinearoperator(M)
self.inner = get_default_inner(v.shape) if inner is None else inner
self.dtype = np.find_common_type([A.dtype, self.M.dtype, v.dtype], [])
# number of iterations
self.num_iter = 0
# stores the three tridiagonal entries of the Hessenberg matrix
self.h = np.zeros([3] + list(v.shape[1:]), dtype=self.dtype)
# flag indicating if Krylov subspace is invariant
self.is_invariant = False
p = v
v = self.M @ p if Mv is None else Mv
self.vnorm = np.sqrt(inner(p, v)) if Mv_norm is None else Mv_norm
# self.P.append(p / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
# # TODO set self.is_invariant = True for self.vnorm == 0
# self.V.append(v / np.where(self.vnorm != 0.0, self.vnorm, 1.0))
self.p_old = None
self.p = p / np.where(self.vnorm != 0.0, self.vnorm, 1.0)
self.v = v / np.where(self.vnorm != 0.0, self.vnorm, 1.0)
# if self.vnorm > 0:
# self.V[0] = v / self.vnorm
# else:
# self.is_invariant = True
def __next__(self):
"""Carry out one iteration of Arnoldi."""
if self.is_invariant:
raise ArgumentError(
"Krylov subspace was found to be invariant in the previous iteration."
)
Av = self.A @ self.v
if self.num_iter > 0:
# copy the old lower-diagonal entry to the upper diagonal
self.h[0] = self.h[2]
Av -= self.h[0] * self.p_old
# orthogonalize
alpha = self.inner(self.v, Av)
# if self.ortho == "lanczos":
# # check if alpha is real
# if abs(alpha.imag) > 1e-10:
# warnings.warn(
# f"Iter {self.iter}: "
# f"abs(alpha.imag) = {abs(alpha.imag)} > 1e-10. "
# "Is your operator self-adjoint "
# "in the provided inner product?"
# )
# alpha = alpha.real
self.h[1] = alpha
Av -= alpha * self.p
MAv = self.M @ Av
self.h[2] = np.sqrt(self.inner(Av, MAv))
if np.all(self.h[2] <= 1.0e-14):
self.is_invariant = True
self.v = None
self.p = None
else:
Hk1k = np.where(self.h[2] != 0.0, self.h[2], 1.0)
self.p_old = self.p
self.p = Av / Hk1k
self.v = MAv / Hk1k
# increase iteration counter
self.num_iter += 1
return self.v, self.h, self.p
def arnoldi_res(A, V, H, inner=None):
"""Measure Arnoldi residual.
:param A: a linear operator that can be used with scipy's aslinearoperator with
``shape==(N,N)``.
:param V: Arnoldi basis matrix with ``shape==(N,n)``.
:param H: Hessenberg matrix: either :math:`\\underline{H}_{n-1}` with
``shape==(n,n-1)`` or :math:`H_n` with ``shape==(n,n)`` (if the Arnoldi basis
spans an A-invariant subspace).
:param inner: (optional) the inner product to use, see :py:meth:`inner`.
:returns: either :math:`\\|AV_{n-1} - V_n \\underline{H}_{n-1}\\|` or
:math:`\\|A V_n - V_n H_n\\|` (in the invariant case).
"""
invariant = H.shape[0] == H.shape[1]
V1 = V if invariant else V[:, :-1]
res = A * V1 - np.dot(V, H)
return np.sqrt(inner(res, res))
|
py | 1a32df6a557c2c4666f1377e87a61433ba9fcd93 |
import json
import os
from azure.media.videoanalyzeredge import *
from azure.iot.hub import IoTHubRegistryManager #run pip install azure-iot-hub to get this package
from azure.iot.hub.models import CloudToDeviceMethod, CloudToDeviceMethodResult
from datetime import time
device_id = "lva-sample-device"
module_d = "mediaEdge"
connection_string = "connectionString"
live_pipeline_name = "pipelineInstance1"
pipeline_topology_name = "pipelineTopology1"
url = "rtsp://sample-url-from-camera"
def build_pipeline_topology():
pipeline_topology_properties = PipelineTopologyProperties()
pipeline_topology_properties.description = "Continuous video recording to an Azure Media Services Asset"
user_name_param = ParameterDeclaration(name="rtspUserName",type="String",default="testusername")
password_param = ParameterDeclaration(name="rtspPassword",type="SecretString",default="testpassword")
url_param = ParameterDeclaration(name="rtspUrl",type="String",default="rtsp://www.sample.com")
hub_param = ParameterDeclaration(name="hubSinkOutputName",type="String")
source = RtspSource(name="rtspSource", endpoint=UnsecuredEndpoint(url="${rtspUrl}",credentials=UsernamePasswordCredentials(username="${rtspUserName}",password="${rtspPassword}")))
node = NodeInput(node_name="rtspSource")
sink = IotHubMessageSink("msgSink", node, "${hubSinkOutputName}")
pipeline_topology_properties.parameters = [user_name_param, password_param, url_param, hub_param]
pipeline_topology_properties.sources = [source]
pipeline_topology_properties.sinks = [sink]
pipeline_topology = PipelineTopology(name=pipeline_topology_name,properties=pipeline_topology_properties)
return pipeline_topology
def build_live_pipeline():
url_param = ParameterDefinition(name="rtspUrl", value=url)
pass_param = ParameterDefinition(name="rtspPassword", value='testpass')
live_pipeline_properties = LivePipelineProperties(description="Sample description", topology_name=pipeline_topology_name, parameters=[url_param])
live_pipeline = LivePipeline(name=live_pipeline_name, properties=live_pipeline_properties)
return live_pipeline
def invoke_method_helper(method):
direct_method = CloudToDeviceMethod(method_name=method.method_name, payload=method.serialize())
registry_manager = IoTHubRegistryManager(connection_string)
payload = registry_manager.invoke_device_module_method(device_id, module_d, direct_method).payload
if payload is not None and 'error' in payload:
print(payload['error'])
return None
return payload
def main():
pipeline_topology = build_pipeline_topology()
live_pipeline = build_live_pipeline()
try:
set_pipeline_top_response = invoke_method_helper(PipelineTopologySetRequest(pipeline_topology=pipeline_topology))
print(set_pipeline_top_response)
list_pipeline_top_response = invoke_method_helper(PipelineTopologyListRequest())
if list_pipeline_top_response:
list_pipeline_top_result = PipelineTopologyCollection.deserialize(list_pipeline_top_response)
get_pipeline_top_response = invoke_method_helper(PipelineTopologyGetRequest(name=pipeline_topology_name))
if get_pipeline_top_response:
get_pipeline_top_result = PipelineTopology.deserialize(get_pipeline_top_response)
set_live_pipeline_response = invoke_method_helper(LivePipelineSetRequest(live_pipeline=live_pipeline))
activate_pipeline_response = invoke_method_helper(LivePipelineActivateRequest(name=live_pipeline_name))
get_pipeline_response = invoke_method_helper(LivePipelineGetRequest(name=live_pipeline_name))
if get_pipeline_response:
get_pipeline_result = LivePipeline.deserialize(get_pipeline_response)
deactivate_pipeline_response = invoke_method_helper(LivePipelineDeactivateRequest(name=live_pipeline_name))
delete_pipeline_response = invoke_method_helper(LivePipelineDeleteRequest(name=live_pipeline_name))
delete_pipeline_response = invoke_method_helper(PipelineTopologyDeleteRequest(name=pipeline_topology_name))
except Exception as ex:
print(ex)
if __name__ == "__main__":
main() |
py | 1a32dfe4f92b50f9b45a699e17cc0cfc23723e21 | from .image import *
from .lines import *
from .mark import *
from .space import *
from .text import *
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.