ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b400374753de2ae56fb3e46409ae1664c903b0ec | from MessagePresenter import MessagePresenter
import random
class BroadcastPresenter(MessagePresenter):
"""
A proxy for a group of message presenters -- the messages are forwarded to all presenters.
"""
def __init__(self,
presenters=[], # the backend presenter to wrap
*args,**kwargs
):
MessagePresenter.__init__(self,*args,**kwargs)
self.presenters = presenters
def submit(self,message,lockduration=None,clearafter=None):
"""Submit a new message."""
self.marker(220)
for p in self.presenters:
p.submit(message,lockduration,clearafter)
return True
def clear(self):
"""Clear the content."""
for p in self.presenters:
p.clear()
def precache(self,message):
"""Pre-cache a message."""
for p in self.presenters:
p.precache(message)
def unlock(self):
"""Manually unlock the presenter (always succeeds)."""
for p in self.presenters:
p.unlock()
|
py | b400394f610d7b8cdff2f2306eaf910cc399634e | # Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# The Universal Permissive License (UPL), Version 1.0
#
# Subject to the condition set forth below, permission is hereby granted to any
# person obtaining a copy of this software, associated documentation and/or
# data (collectively the "Software"), free of charge and under any and all
# copyright rights in the Software, and any and all patent rights owned or
# freely licensable by each licensor hereunder covering either (i) the
# unmodified Software as contributed to or provided by such licensor, or (ii)
# the Larger Works (as defined below), to deal in both
#
# (a) the Software, and
#
# (b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
# one is included with the Software each a "Larger Work" to which the Software
# is contributed by such licensors),
#
# without restriction, including without limitation the rights to copy, create
# derivative works of, display, perform, and distribute the Software and make,
# use, sell, offer for sale, import, export, have made, and have sold the
# Software and the Larger Work(s), and to sublicense the foregoing rights on
# either these or other terms.
#
# This license is subject to the following condition:
#
# The above copyright notice and either this complete permission notice or at a
# minimum a reference to the UPL must be included in all copies or substantial
# portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import _imp
import posix
import sys
def load(suffix=""):
module_name = "_frozen_importlib%s" % suffix
filename = sys.graal_python_stdlib_home + ("/importlib/_bootstrap%s.py" % suffix)
return __import__(filename, module_name)
load("_external")
importlib = load()
importlib._install(sys, _imp)
importlib._install_external_importers()
sys.modules["builtins"].__import__ = __builtin__(importlib.__import__)
# Insert our meta finder for caching
_imp.CachedImportFinder.ModuleSpec = importlib.ModuleSpec
sys.meta_path.insert(0, _imp.CachedImportFinder)
|
py | b400395bab69b1bbb79ca3a4d3533e1c422e7a7b | from .methods import concat_dispatch
from .core import get_parallel_type, meta_nonempty, make_meta
######################################
# cuDF: Pandas Dataframes on the GPU #
######################################
@concat_dispatch.register_lazy('cudf')
@get_parallel_type.register_lazy('cudf')
@meta_nonempty.register_lazy('cudf')
@make_meta.register_lazy('cudf')
def _register_cudf():
import cudf
import dask_cudf
get_parallel_type.register(cudf.DataFrame, lambda _: dask_cudf.DataFrame)
get_parallel_type.register(cudf.Series, lambda _: dask_cudf.Series)
get_parallel_type.register(cudf.Index, lambda _: dask_cudf.Index)
@meta_nonempty.register((cudf.DataFrame, cudf.Series, cudf.Index))
def _(x):
y = meta_nonempty(x.to_pandas()) # TODO: add iloc[:5]
return cudf.from_pandas(y)
@make_meta.register((cudf.Series, cudf.DataFrame))
def _(x):
return x.head(0)
@make_meta.register(cudf.Index)
def _(x):
return x[:0]
concat_dispatch.register(
(cudf.DataFrame, cudf.Series, cudf.Index),
cudf.concat
)
|
py | b40039bbe3929ad9d7c6c6c64fdd5cecd09767f0 | #!/usr/bin/env python
#############################################################################
##
## Copyright (C) 2013 Riverbank Computing Limited.
## Copyright (C) 2013 Digia Plc and/or its subsidiary(-ies).
## All rights reserved.
##
## This file is part of the examples of PyQt.
##
## $QT_BEGIN_LICENSE:BSD$
## You may use this file under the terms of the BSD license as follows:
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are
## met:
## * Redistributions of source code must retain the above copyright
## notice, this list of conditions and the following disclaimer.
## * Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in
## the documentation and/or other materials provided with the
## distribution.
## * Neither the name of Nokia Corporation and its Subsidiary(-ies) nor
## the names of its contributors may be used to endorse or promote
## products derived from this software without specific prior written
## permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
## OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
## SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
## LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
## DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
## THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
## (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
## OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
## $QT_END_LICENSE$
##
#############################################################################
import sys
from PyQt5.QtCore import (pyqtProperty, pyqtSignal, Q_CLASSINFO,
QCoreApplication, QDate, QObject, QTime, QUrl)
from PyQt5.QtGui import QColor
from PyQt5.QtQml import (qmlAttachedPropertiesObject, qmlRegisterType,
QQmlComponent, QQmlEngine, QQmlListProperty)
QML = b'''
import People 1.0
import QtQuick 2.0
BirthdayParty {
onPartyStarted: console.log("This party started rockin' at " + time);
host: Boy {
name: "Bob Jones"
shoe { size: 12; color: "white"; brand: "Nike"; price: 90.0 }
}
Boy {
name: "Leo Hodges"
BirthdayParty.rsvp: "2009-07-06"
shoe { size: 10; color: "black"; brand: "Reebok"; price: 59.95 }
}
Boy {
name: "Jack Smith"
shoe { size: 8; color: "blue"; brand: "Puma"; price: 19.95 }
}
Girl {
name: "Anne Brown"
BirthdayParty.rsvp: "2009-07-01"
shoe.size: 7
shoe.color: "red"
shoe.brand: "Marc Jacobs"
shoe.price: 699.99
}
}
'''
class ShoeDescription(QObject):
def __init__(self, parent=None):
super(ShoeDescription, self).__init__(parent)
self._size = 0
self._color = QColor()
self._brand = ''
self._price = 0.0
@pyqtProperty(int)
def size(self):
return self._size
@size.setter
def size(self, size):
self._size = size
@pyqtProperty(QColor)
def color(self):
return self._color
@color.setter
def color(self, color):
self._color = color
@pyqtProperty(str)
def brand(self):
return self._brand
@brand.setter
def brand(self, brand):
self._brand = brand
@pyqtProperty(float)
def price(self):
return self._price
@price.setter
def price(self, price):
self._price = price
class Person(QObject):
def __init__(self, parent=None):
super(Person, self).__init__(parent)
self._name = ''
self._shoe = ShoeDescription()
@pyqtProperty(str)
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@pyqtProperty(ShoeDescription)
def shoe(self):
return self._shoe
class Boy(Person):
pass
class Girl(Person):
pass
class BirthdayPartyAttached(QObject):
def __init__(self, parent):
super(BirthdayPartyAttached, self).__init__(parent)
self._rsvp = QDate()
@pyqtProperty(QDate)
def rsvp(self):
return self._rsvp
@rsvp.setter
def rsvp(self, rsvp):
self._rsvp = rsvp
class BirthdayParty(QObject):
Q_CLASSINFO('DefaultProperty', 'guests')
partyStarted = pyqtSignal(QTime, arguments=['time'])
def __init__(self, parent=None):
super(BirthdayParty, self).__init__(parent)
self._host = None
self._guests = []
@pyqtProperty(Person)
def host(self):
return self._host
@host.setter
def host(self, host):
self._host = host
@pyqtProperty(QQmlListProperty)
def guests(self):
return QQmlListProperty(Person, self, self._guests)
def guestCount(self):
return len(self._guests)
def guest(self, idx):
return self._guests[idx]
def startParty(self):
self.partyStarted.emit(QTime.currentTime())
app = QCoreApplication(sys.argv)
qmlRegisterType(BirthdayPartyAttached)
qmlRegisterType(BirthdayParty, "People", 1, 0, "BirthdayParty",
attachedProperties=BirthdayPartyAttached)
qmlRegisterType(ShoeDescription)
qmlRegisterType(Person)
qmlRegisterType(Boy, "People", 1, 0, "Boy")
qmlRegisterType(Girl, "People", 1, 0, "Girl")
engine = QQmlEngine()
component = QQmlComponent(engine)
component.setData(QML, QUrl())
party = component.create()
if party is not None and party.host is not None:
print("\"%s\" is having a birthday!" % party.host.name)
if isinstance(party.host, Boy):
print("He is inviting:")
else:
print("She is inviting:")
for ii in range(party.guestCount()):
guest = party.guest(ii)
attached = qmlAttachedPropertiesObject(BirthdayParty, guest, False)
if attached is not None:
rsvpDate = attached.property('rsvp')
else:
rsvpDate = QDate()
if rsvpDate.isNull():
print(" \"%s\" RSVP date: Hasn't RSVP'd" % guest.name)
else:
print(" \"%s\" RSVP date: %s" % (guest.name, rsvpDate.toString()))
party.startParty()
else:
for e in component.errors():
print("Error:", e.toString());
|
py | b40039fd97023ef97c9b7b86df77b8c6cc88bd9c | import platform
from deriva.core import get_credential, BaseCLI, DerivaServer, __version__
def rollback_annotation(host, catalog_id, snaptime=None, prot='https', credential=None):
"""
Rollback the entire annotation hierarchy for the specified catalog to a given point in time specified by snaptime.
"""
if not credential:
credential = get_credential(host)
server = DerivaServer(prot, host, credentials=credential)
good_catalog = server.connect_ermrest(catalog_id, snaptime)
good_config = good_catalog.getCatalogConfig()
live_catalog = server.connect_ermrest(catalog_id)
live_config = live_catalog.getCatalogConfig()
# copy over annotations
# catalog-level
live_config.annotations.clear()
live_config.annotations.update(good_config.annotations)
for sname, live_schema in live_config.schemas.items():
good_schema = good_config.schemas[sname]
# schema-level
live_schema.annotations.clear()
live_schema.annotations.update(good_schema.annotations)
for tname, live_table in live_schema.tables.items():
if tname not in good_schema.tables:
print('Warning: skipping live table %s.%s which lacks known-good table' % (sname, tname))
continue
good_table = good_schema.tables[tname]
# table-level
live_table.annotations.clear()
live_table.annotations.update(good_table.annotations)
for live_column in live_table.column_definitions:
cname = live_column.name
good_column = good_table.column_definitions[cname]
# column-level
live_column.annotations.clear()
live_column.annotations.update(good_column.annotations)
for live_key in live_table.keys:
constr_name = tuple(live_key.names[0])
good_key = live_table.keys[constr_name]
# key-level
live_key.annotations.clear()
live_key.annotations.update(good_key.annotations)
for live_fkey in live_table.foreign_keys:
constr_name = tuple(live_fkey.names[0])
good_fkey = live_table.foreign_keys[constr_name]
# fkey-level
live_fkey.annotations.clear()
live_fkey.annotations.update(good_fkey.annotations)
live_config.apply(live_catalog)
def main():
cli = BaseCLI("annotation rollback tool", None, version=__version__, hostname_required=True)
cli.parser.add_argument("--catalog", default=1, metavar="<1>", help="Catalog number. Default: 1")
cli.parser.add_argument("--snapshot", metavar="<snapshot ID", help="Catalog snapshot ID. Example: 2QG-VWP6-0YG0")
args = cli.parse_cli()
credential = get_credential(args.host, args.credential_file)
rollback_annotation(args.host, args.catalog, snaptime=args.snapshot, credential=credential)
if __name__ == '__main__':
sys.exit(main())
|
py | b4003a612dd67be0a2ab70388d47fded99560d39 | """
Author: XuLin Yang
Student id: 904904
Date: 2020-4-22 00:10:42
Description: preprocess population age data
"""
import json
from pprint import pprint
GREATER_ADELAIDE_LGA_CODES = [40150, 43650, 40310, 42030, 45680, 40120, 44550, 45340, 44340, 44060, 42600, 40700, 47980, 48410, 40070, 41060, 45290, 40910,
48260, 46510, 47700, 47140, 45890]
GREATER_MELBOURNE_LGA_CODES = [25340, 21450, 21610, 22170, 22670, 23430, 20910, 22310, 24970, 25900, 26350, 23670, 27450, 27350, 21110, 26980, 24410, 21890,
20660, 24210, 25710, 27070, 24850, 25620, 24600, 25250, 23270, 24130, 25150, 27260, 24650, 23110, 21180, 23270, 25060, 24330]
GREATER_BRISBANE_LGA_CODES = [36580, 35010, 31000, 36250, 34590, 36510, 34580, 33960]
GREATER_SYDNEY_LGA_CODES = [18550, 13100, 14000, 16370, 18000, 14500, 17420, 13800, 10750, 16350, 10900, 14900, 18400, 16100, 11500, 11450, 17150, 10750, 12850,
13950, 16250, 10350, 16700, 14100, 14700, 18250, 15950, 15350, 15150, 10200, 11520, 14800, 17200, 18500, 18050, 16550, 11100, 16650,
14450, 14150, 17100, 11550, 11300, 10150, 15200]
GREATER_ADELAIDE_LGA_NAME = "Greater_Adelaide"
GREATER_MELBOURNE_LGA_NAME = "Greater_Melbourne"
GREATER_BRISBANE_LGA_NAME = "Greater_Brisbane"
GREATER_SYDNEY_LGA_NAME = "Greater_Sydney"
CODE_TO_NAME = {}
GREATER_AREA = {
GREATER_ADELAIDE_LGA_NAME: GREATER_ADELAIDE_LGA_CODES,
GREATER_MELBOURNE_LGA_NAME: GREATER_MELBOURNE_LGA_CODES,
GREATER_BRISBANE_LGA_NAME: GREATER_BRISBANE_LGA_CODES,
GREATER_SYDNEY_LGA_NAME: GREATER_SYDNEY_LGA_CODES,
}
for i, j in GREATER_AREA.items():
for k in j:
CODE_TO_NAME[k] = i
def parse_data(file_path: str, meta_path: str):
attribute_info = {}
with open(meta_path) as file:
meta_data = json.load(file)
# pprint(meta_data)
for info in meta_data['selectedAttributes'][1:]:
attribute_info[info['name']] = {'title': info['title'].replace("??? ", "").replace(" ", "_").replace("-", "_to_"),
'description': info['description'].replace("??? ", "")}
with open('result-meta.json', 'w') as outfile:
json.dump(attribute_info, outfile)
result = {
GREATER_ADELAIDE_LGA_NAME: {"percentage_foreigner": 0},
GREATER_MELBOURNE_LGA_NAME: {"percentage_foreigner": 0},
GREATER_BRISBANE_LGA_NAME: {"percentage_foreigner": 0},
GREATER_SYDNEY_LGA_NAME: {"percentage_foreigner": 0},
}
result_counted = {
GREATER_ADELAIDE_LGA_NAME: 0,
GREATER_MELBOURNE_LGA_NAME: 0,
GREATER_BRISBANE_LGA_NAME: 0,
GREATER_SYDNEY_LGA_NAME: 0,
}
with open(file_path) as file:
json_data = json.load(file)
for row in json_data['features']:
lga = int(row['properties']['lga_code18'])
if lga not in CODE_TO_NAME:
continue
area = CODE_TO_NAME[lga]
result_counted[area] += 1
# each column in the table
for key, value in row['properties'].items():
if key == "citizenship_status_p_brn_ovs_aus_citizen_pr100":
if value:
result[area]["percentage_foreigner"] += 100 - value
else:
print("Warning: [unwanted key]", key, value)
# row with error
if not lga:
print("Error: [row with empty lga]", str(row))
continue
for area in result:
result[area]["percentage_foreigner"] /= result_counted[area]
with open('result-' + file_path, 'w') as outfile:
json.dump(result, outfile)
# print all lga_code
# pprint(result.keys())
# print all population count
# pprint(result['20110'][count_str].keys())
# print all population percent
# pprint(result['20110'][percent_str].keys())
if __name__ == "__main__":
meta_file = "meta_data.json"
with open("data_file.json") as file:
files = json.load(file)
for f in files:
parse_data(f, meta_file)
|
py | b4003ad4947b4370e00e8f288d6601130f194798 | """
This function in this file is taken based on and/or taken from https://github.com/lpbsscientist/YeaZ-GUI
which is under the MIT license.
"""
# Import tensorflow differently depending on version
from tensorflow import __version__ as tf_version
from tensorflow.keras.layers import (
Conv2D,
Dropout,
Input,
MaxPooling2D,
UpSampling2D,
concatenate,
)
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
# from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler
if int(tf_version[0]) <= 1:
from tensorflow import ConfigProto, InteractiveSession
else:
from tensorflow.compat.v1 import ConfigProto, InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
def unet(pretrained_weights=None, input_size=(256, 256, 1)):
inputs = Input(input_size)
conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(inputs)
conv1 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool1)
conv2 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool2)
conv3 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(pool3)
conv4 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(
pool4
)
conv5 = Conv2D(1024, 3, activation="relu", padding="same", kernel_initializer="he_normal")(
conv5
)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(512, 2, activation="relu", padding="same", kernel_initializer="he_normal")(
UpSampling2D(size=(2, 2))(drop5)
)
merge6 = concatenate([drop4, up6], axis=3)
conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(
merge6
)
conv6 = Conv2D(512, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv6)
up7 = Conv2D(256, 2, activation="relu", padding="same", kernel_initializer="he_normal")(
UpSampling2D(size=(2, 2))(conv6)
)
merge7 = concatenate([conv3, up7], axis=3)
conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(
merge7
)
conv7 = Conv2D(256, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv7)
up8 = Conv2D(128, 2, activation="relu", padding="same", kernel_initializer="he_normal")(
UpSampling2D(size=(2, 2))(conv7)
)
merge8 = concatenate([conv2, up8], axis=3)
conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(
merge8
)
conv8 = Conv2D(128, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv8)
up9 = Conv2D(64, 2, activation="relu", padding="same", kernel_initializer="he_normal")(
UpSampling2D(size=(2, 2))(conv8)
)
merge9 = concatenate([conv1, up9], axis=3)
conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(merge9)
conv9 = Conv2D(64, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)
conv9 = Conv2D(2, 3, activation="relu", padding="same", kernel_initializer="he_normal")(conv9)
conv10 = Conv2D(1, 1, activation="sigmoid")(conv9)
model = Model(inputs=inputs, outputs=conv10)
model.compile(optimizer=Adam(lr=1e-4), loss="binary_crossentropy", metrics=["accuracy"])
if pretrained_weights:
model.load_weights(pretrained_weights)
return model
|
py | b4003b45fdb17dc9dc8199a9d1a0309423623e50 | import torch
import numpy as np
from scipy.special import softmax
from pytorch_tabnet.utils import filter_weights
from pytorch_tabnet.metrics import check_metrics
from pytorch_tabnet.abstract_model import TabModel
from pytorch_tabnet.multiclass_utils import infer_output_dim, check_output_dim
from torch.nn.utils import clip_grad_norm_
from pytorch_tabnet import tab_network
from pytorch_tabnet.utils import (
PredictDataset,
create_explain_matrix,
validate_eval_set,
create_dataloaders,
)
from sklearn.utils import check_array
from torch.utils.data import DataLoader
class TabNetClassifier(TabModel):
def __post_init__(self):
super(TabNetClassifier, self).__post_init__()
self._task = 'classification'
self._default_loss = torch.nn.functional.cross_entropy
self._default_metric = 'accuracy'
def weight_updater(self, weights):
"""
Updates weights dictionary according to target_mapper.
Parameters
----------
weights : bool or dict
Given weights for balancing training.
Returns
-------
bool or dict
Same bool if weights are bool, updated dict otherwise.
"""
if isinstance(weights, int):
return weights
elif isinstance(weights, dict):
return {self.target_mapper[key]: value for key, value in weights.items()}
else:
return weights
def prepare_target(self, y):
return np.vectorize(self.target_mapper.get)(y)
def compute_loss(self, y_pred, y_true):
return self.loss_fn(y_pred, y_true.long())
def update_fit_params(
self,
X_train,
y_train,
eval_set,
weights,
):
output_dim, train_labels = infer_output_dim(y_train)
for X, y in eval_set:
check_output_dim(train_labels, y)
self.output_dim = output_dim
self._default_metric = ('auc' if self.output_dim == 2 else 'accuracy')
self.classes_ = train_labels
self.target_mapper = {
class_label: index for index, class_label in enumerate(self.classes_)
}
self.preds_mapper = {
str(index): class_label for index, class_label in enumerate(self.classes_)
}
self.updated_weights = self.weight_updater(weights)
def stack_batches(self, list_y_true, list_y_score):
y_true = np.hstack(list_y_true)
y_score = np.vstack(list_y_score)
y_score = softmax(y_score, axis=1)
return y_true, y_score
def predict_func(self, outputs):
outputs = np.argmax(outputs, axis=1)
return np.vectorize(self.preds_mapper.get)(outputs.astype(str))
def predict_proba(self, X):
"""
Make predictions for classification on a batch (valid)
Parameters
----------
X : a :tensor: `torch.Tensor`
Input data
Returns
-------
res : np.ndarray
"""
self.network.eval()
dataloader = DataLoader(
PredictDataset(X),
batch_size=self.batch_size,
shuffle=False,
)
results = []
for batch_nb, data in enumerate(dataloader):
data = data.to(self.device).float()
output, M_loss = self.network(data)
predictions = torch.nn.Softmax(dim=1)(output).cpu().detach().numpy()
results.append(predictions)
res = np.vstack(results)
return res
class TabNetRegressor(TabModel):
def __post_init__(self):
super(TabNetRegressor, self).__post_init__()
self._task = 'regression'
self._default_loss = torch.nn.functional.mse_loss
self._default_metric = 'mse'
def prepare_target(self, y):
return y
def compute_loss(self, y_pred, y_true):
return self.loss_fn(y_pred, y_true)
def update_fit_params(
self,
X_train,
y_train,
eval_set,
weights
):
if len(y_train.shape) != 2:
msg = "Targets should be 2D : (n_samples, n_regression) " + \
f"but y_train.shape={y_train.shape} given.\n" + \
"Use reshape(-1, 1) for single regression."
raise ValueError(msg)
self.output_dim = y_train.shape[1]
self.preds_mapper = None
self.updated_weights = weights
filter_weights(self.updated_weights)
def predict_func(self, outputs):
return outputs
def stack_batches(self, list_y_true, list_y_score):
y_true = np.vstack(list_y_true)
y_score = np.vstack(list_y_score)
return y_true, y_score
|
py | b4003ce54ed82b45c0cc065a056904df60595b78 | __author__ = "Joseph Mullen"
|
py | b4003d00e003110f46f278f8ed5d73625b9e020e | # See readme.md for instructions on running this code.
from __future__ import print_function
import os
import logging
import ssl
import sys
try:
import requests
except ImportError as e:
logging.error("Dependency missing!!\n{}".format(e))
sys.exit(0)
HELP_MESSAGE = '''
This bot allows users to translate a sentence into
'Yoda speak'.
Users should preface messages with '@mention-bot'.
Before running this, make sure to get a Mashape Api token.
Instructions are in the 'readme.md' file.
Store it in the 'yoda_bot.config' file.
The 'yoda_bot.config' file should be located at '~/yoda_bot.config'.
Example input:
@mention-bot You will learn how to speak like me someday.
'''
class ApiKeyError(Exception):
'''raise this when there is an error with the Mashape Api Key'''
class YodaSpeakHandler(object):
'''
This bot will allow users to translate a sentence into 'Yoda speak'.
It looks for messages starting with '@mention-bot'.
'''
def usage(self):
return '''
This bot will allow users to translate a sentence into
'Yoda speak'.
Users should preface messages with '@mention-bot'.
Before running this, make sure to get a Mashape Api token.
Instructions are in the 'readme.md' file.
Store it in the 'yoda_bot.config' file.
The 'yoda_bot.config' file should be located at '~/yoda_bot.config'.
Example input:
@mention-bot You will learn how to speak like me someday.
'''
def handle_message(self, message, client, state_handler):
original_content = message['content']
stream = message['display_recipient']
subject = message['subject']
handle_input(client, original_content, stream, subject)
handler_class = YodaSpeakHandler
def send_to_yoda_api(sentence, api_key):
# function for sending sentence to api
response = requests.get("https://yoda.p.mashape.com/yoda?sentence=" + sentence,
headers={
"X-Mashape-Key": api_key,
"Accept": "text/plain"
}
)
if response.status_code == 200:
return response.text
if response.status_code == 403:
raise ApiKeyError
else:
error_message = response.text['message']
logging.error(error_message)
error_code = response.status_code
error_message = error_message + 'Error code: ' + error_code +\
' Did you follow the instructions in the `readme.md` file?'
return error_message
def format_input(original_content):
# gets rid of whitespace around the edges, so that they aren't a problem in the future
message_content = original_content.strip()
# replaces all spaces with '+' to be in the format the api requires
sentence = message_content.replace(' ', '+')
return sentence
def handle_input(client, original_content, stream, subject):
if is_help(original_content):
send_message(client, HELP_MESSAGE, stream, subject)
else:
sentence = format_input(original_content)
try:
reply_message = send_to_yoda_api(sentence, get_api_key())
except ssl.SSLError or TypeError:
reply_message = 'The service is temporarily unavailable, please try again.'
logging.error(reply_message)
except ApiKeyError:
reply_message = 'Invalid Api Key. Did you follow the instructions in the ' \
'`readme.md` file?'
logging.error(reply_message)
send_message(client, reply_message, stream, subject)
def get_api_key():
# function for getting Mashape api key
home = os.path.expanduser('~')
with open(home + '/yoda_bot.config') as api_key_file:
api_key = api_key_file.read().strip()
return api_key
def send_message(client, message, stream, subject):
# function for sending a message
client.send_message(dict(
type='stream',
to=stream,
subject=subject,
content=message
))
def is_help(original_content):
# gets rid of whitespace around the edges, so that they aren't a problem in the future
message_content = original_content.strip()
if message_content == 'help':
return True
else:
return False
|
py | b4003d09751dbd09c83e9e2c665bae37aede148f | import json
import time
from elasticsearch import Elasticsearch
from elasticsearch.helpers import bulk
# Use tensorflow 1 behavior to match the Universal Sentence Encoder
# examples (https://tfhub.dev/google/universal-sentence-encoder/2).
import tensorflow.compat.v1 as tf
import tensorflow_hub as hub
##### INDEXING #####
def index_data():
print("Creating the 'posts' index.")
client.indices.delete(index=INDEX_NAME, ignore=[404])
with open(INDEX_FILE) as index_file:
source = index_file.read().strip()
client.indices.create(index=INDEX_NAME, body=source)
docs = []
count = 0
with open(DATA_FILE) as data_file:
for line in data_file:
line = line.strip()
doc = json.loads(line)
if doc["type"] != "question":
continue
docs.append(doc)
count += 1
if count % BATCH_SIZE == 0:
index_batch(docs)
docs = []
print("Indexed {} documents.".format(count))
if docs:
index_batch(docs)
print("Indexed {} documents.".format(count))
client.indices.refresh(index=INDEX_NAME)
print("Done indexing.")
def index_batch(docs):
titles = [doc["title"] for doc in docs]
title_vectors = embed_text(titles)
requests = []
for i, doc in enumerate(docs):
request = doc
request["_op_type"] = "index"
request["_index"] = INDEX_NAME
request["title_vector"] = title_vectors[i]
requests.append(request)
bulk(client, requests)
##### SEARCHING #####
def run_query_loop():
while True:
try:
handle_query()
except KeyboardInterrupt:
return
def handle_query():
query = input("Enter query: ")
embedding_start = time.time()
query_vector = embed_text([query])[0]
embedding_time = time.time() - embedding_start
script_query = {
"script_score": {
"query": {"match_all": {}},
"script": {
"source": "cosineSimilarity(params.query_vector, doc['title_vector']) + 1.0",
"params": {"query_vector": query_vector}
}
}
}
search_start = time.time()
response = client.search(
index=INDEX_NAME,
body={
"size": SEARCH_SIZE,
"query": script_query,
"_source": {"includes": ["title", "body"]}
}
)
search_time = time.time() - search_start
print()
print("{} total hits.".format(response["hits"]["total"]["value"]))
print("embedding time: {:.2f} ms".format(embedding_time * 1000))
print("search time: {:.2f} ms".format(search_time * 1000))
for hit in response["hits"]["hits"]:
print("id: {}, score: {}".format(hit["_id"], hit["_score"]))
print(hit["_source"])
print()
##### EMBEDDING #####
def embed_text(text):
vectors = session.run(embeddings, feed_dict={text_ph: text})
return [vector.tolist() for vector in vectors]
##### MAIN SCRIPT #####
if __name__ == '__main__':
INDEX_NAME = "posts"
INDEX_FILE = "data/posts/index.json"
DATA_FILE = "data/posts/posts.json"
BATCH_SIZE = 1000
SEARCH_SIZE = 5
print("Downloading pre-trained embeddings from tensorflow hub...")
embed = hub.Module("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
text_ph = tf.placeholder(tf.string)
embeddings = embed(text_ph)
print("Done.")
print("Creating tensorflow session...")
session = tf.Session()
session.run(tf.global_variables_initializer())
session.run(tf.tables_initializer())
print("Done.")
client = Elasticsearch()
index_data()
run_query_loop()
print("Closing tensorflow session...")
session.close()
print("Done.") |
py | b4003d9016ce9c3143f28eb5f79b9c51fe5ff8f6 | # coding=utf-8
"""Tests for models synccopy"""
# stdlib imports
try:
import unittest.mock as mock
except ImportError: # noqa
import mock
# non-stdlib imports
import bitstring
import pytest
# local imports
import blobxfer.models.azure as azmodels
import blobxfer.models.options as options
# module under test
import blobxfer.models.synccopy as synccopy
def test_specification():
spec = synccopy.Specification(
synccopy_options=options.SyncCopy(
access_tier=None,
delete_extraneous_destination=False,
delete_only=False,
dest_mode=azmodels.StorageModes.Auto,
mode=azmodels.StorageModes.Auto,
overwrite=True,
recursive=True,
rename=False,
server_side_copy=True,
strip_components=0,
),
skip_on_options=options.SkipOn(
filesize_match=True,
lmt_ge=False,
md5_match=True,
)
)
spec.add_azure_source_path(mock.MagicMock())
assert len(spec.sources) == 1
spec.add_azure_destination_path(mock.MagicMock())
assert len(spec.destinations) == 1
def test_descriptor():
opts = mock.MagicMock()
opts.dest_mode = azmodels.StorageModes.Auto
opts.mode = azmodels.StorageModes.Auto
opts.server_side_copy = False
src_ase = azmodels.StorageEntity('cont')
src_ase._mode = azmodels.StorageModes.Block
src_ase._name = 'name'
src_ase._size = 32
src_ase._encryption = None
src_ase._is_arbitrary_url = False
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Block
dst_ase._name = 'name'
dst_ase._size = 32
dst_ase._encryption = None
dst_ase.replica_targets = [mock.MagicMock()]
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
assert d._offset == 0
assert d._chunk_num == 0
assert not d._finalized
assert d._src_block_list is None
assert d.src_entity == src_ase
assert d.dst_entity == dst_ase
assert not d.all_operations_completed
assert d.is_resumable
assert d.last_block_num == -1
assert not d.remote_is_file
assert not d.remote_is_page_blob
assert not d.remote_is_append_blob
assert d.is_one_shot_block_blob
assert not d.requires_put_block_list
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Page
dst_ase._name = 'name'
dst_ase._size = 32
dst_ase._encryption = None
dst_ase.replica_targets = None
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
assert not d.is_one_shot_block_blob
assert not d.requires_put_block_list
opts.server_side_copy = True
dst_ase._mode = azmodels.StorageModes.Page
with pytest.raises(ValueError):
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Block
dst_ase._name = 'name'
dst_ase._size = 0
dst_ase._encryption = None
dst_ase.replica_targets = None
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
assert d.is_one_shot_block_blob
assert not d.requires_put_block_list
dst_ase._size = 32
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
assert not d.is_one_shot_block_blob
assert d.requires_put_block_list
def test_descriptor_complete_offset_upload():
opts = mock.MagicMock()
opts.dest_mode = azmodels.StorageModes.Auto
opts.mode = azmodels.StorageModes.Auto
src_ase = azmodels.StorageEntity('cont')
src_ase._mode = azmodels.StorageModes.Block
src_ase._name = 'name'
src_ase._size = 32
src_ase._encryption = None
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Block
dst_ase._name = 'name'
dst_ase._size = 32
dst_ase._encryption = None
dst_ase.replica_targets = [mock.MagicMock()]
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
d.complete_offset_upload(0)
assert d._outstanding_ops == 1
d.complete_offset_upload(0)
assert d._outstanding_ops == 0
assert 0 not in d._replica_counters
def test_descriptor_compute_chunk_size():
opts = mock.MagicMock()
opts.dest_mode = azmodels.StorageModes.Auto
opts.mode = azmodels.StorageModes.Auto
opts.server_side_copy = False
src_ase = azmodels.StorageEntity('cont')
src_ase._mode = azmodels.StorageModes.Block
src_ase._name = 'name'
src_ase._size = 32
src_ase._encryption = None
src_ase._is_arbitrary_url = False
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Block
dst_ase._name = 'name'
dst_ase._size = 32
dst_ase._encryption = None
dst_ase.replica_targets = [mock.MagicMock()]
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
assert d._compute_chunk_size() == \
synccopy._DEFAULT_AUTO_CHUNKSIZE_BYTES
dst_ase._mode = azmodels.StorageModes.Page
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
assert d._compute_chunk_size() == \
synccopy._MAX_NONBLOCK_BLOB_CHUNKSIZE_BYTES
dst_ase._mode = azmodels.StorageModes.Block
d = synccopy.Descriptor(src_ase, dst_ase, [], opts, mock.MagicMock())
assert d._compute_chunk_size() == d.src_entity.size
b = mock.MagicMock()
b.size = 1
d = synccopy.Descriptor(src_ase, dst_ase, [b], opts, mock.MagicMock())
assert d._compute_chunk_size() == 1
d = synccopy.Descriptor(src_ase, dst_ase, [b, b], opts, mock.MagicMock())
assert d._compute_chunk_size() == -1
def test_descriptor_compute_total_chunks():
opts = mock.MagicMock()
opts.dest_mode = azmodels.StorageModes.Auto
opts.mode = azmodels.StorageModes.Auto
src_ase = azmodels.StorageEntity('cont')
src_ase._mode = azmodels.StorageModes.Block
src_ase._name = 'name'
src_ase._size = 32
src_ase._encryption = None
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Block
dst_ase._name = 'name'
dst_ase._size = 32
dst_ase._encryption = None
dst_ase.replica_targets = [mock.MagicMock()]
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, mock.MagicMock())
assert d._compute_total_chunks(0) == 1
def test_resume():
opts = mock.MagicMock()
opts.dest_mode = azmodels.StorageModes.Auto
opts.mode = azmodels.StorageModes.Auto
src_ase = azmodels.StorageEntity('cont')
src_ase._mode = azmodels.StorageModes.Block
src_ase._name = 'name'
src_ase._size = 32
src_ase._encryption = None
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Block
dst_ase._name = 'name'
dst_ase._size = 32
dst_ase._encryption = None
dst_ase.replica_targets = [mock.MagicMock()]
# test no resume
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, None)
assert d._resume() is None
# check if path exists in resume db
resume = mock.MagicMock()
resume.get_record.return_value = None
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, resume)
assert d._resume() is None
# check same lengths
bad = mock.MagicMock()
bad.length = 0
resume.get_record.return_value = bad
assert d._resume() is None
# check completed resume
comp = mock.MagicMock()
comp.length = 32
comp.completed = True
comp.total_chunks = 1
comp.chunk_size = 32
comp.completed_chunks = 1
resume.get_record.return_value = comp
dst_ase.replica_targets = None
d._completed_chunks = mock.MagicMock()
assert d._resume() == 32
dst_ase.replica_targets = [dst_ase]
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, resume)
d._completed_chunks = mock.MagicMock()
assert d._resume() == 64
# check resume no md5
nc = mock.MagicMock()
nc.offset = 16
nc.length = 32
nc.completed = False
nc.total_chunks = 2
nc.chunk_size = 16
cc = bitstring.BitArray(length=nc.total_chunks)
cc.set(True, 0)
nc.completed_chunks = cc.int
resume.get_record.return_value = nc
dst_ase.replica_targets = None
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, resume)
assert d._resume() == 16
def test_descriptor_next_offsets():
opts = mock.MagicMock()
opts.dest_mode = azmodels.StorageModes.Auto
opts.mode = azmodels.StorageModes.Auto
src_ase = azmodels.StorageEntity('cont')
src_ase._mode = azmodels.StorageModes.Block
src_ase._name = 'name'
src_ase._size = 32
src_ase._encryption = None
dst_ase = azmodels.StorageEntity('cont2')
dst_ase._mode = azmodels.StorageModes.Block
dst_ase._name = 'name'
dst_ase._size = 32
dst_ase._encryption = None
dst_ase.replica_targets = [mock.MagicMock()]
# test normal
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, None)
d._resume = mock.MagicMock()
d._resume.return_value = None
offsets, rb = d.next_offsets()
assert rb is None
assert offsets.chunk_num == 0
assert offsets.num_bytes == 32
assert offsets.range_start == 0
assert offsets.range_end == 31
assert d._offset == 32
assert d._chunk_num == 1
# test nothing left
offsets, rb = d.next_offsets()
assert rb is None
assert offsets is None
# test neg chunk size with block list
b = mock.MagicMock()
b.size = 10
d = synccopy.Descriptor(src_ase, dst_ase, [b], opts, None)
d._resume = mock.MagicMock()
d._resume.return_value = None
d._chunk_size = -1
offsets, rb = d.next_offsets()
assert rb is None
assert offsets.chunk_num == 0
assert offsets.num_bytes == 10
assert offsets.range_start == 0
assert offsets.range_end == 9
assert d._offset == 10
assert d._chunk_num == 1
# test small chunk size
d = synccopy.Descriptor(src_ase, dst_ase, None, opts, None)
d._resume = mock.MagicMock()
d._resume.return_value = None
d._chunk_size = 32
offsets, rb = d.next_offsets()
assert rb is None
assert offsets.chunk_num == 0
assert offsets.num_bytes == 32
assert offsets.range_start == 0
assert offsets.range_end == 31
assert d._offset == 32
assert d._chunk_num == 1
|
py | b4003da88cdecd720939f3254ca2c1415f72e68f | # Copyright 2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from requestbuilder import Arg
from euca2ools.commands.cloudformation import CloudFormationRequest
class GetTemplate(CloudFormationRequest):
DESCRIPTION = "Show a stack's template"
ARGS = [Arg('StackName', metavar='STACK', help='''name or ID of the
stack (names cannot be used for deleted stacks) (required)''')]
LIST_TAGS = ['Stacks']
def print_result(self, result):
print result.get('TemplateBody')
|
py | b4003ed39c792b744658ffacc58c9b9ab80bab5f | # Generated by Django 2.2.5 on 2019-09-18 19:57
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('wagtailimages', '0001_squashed_0021'),
('core', '0040_alter_field_theme_on_researcherthemerelationship'),
]
operations = [
migrations.CreateModel(
name='TextSearchPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('introduction', models.TextField(blank=True, help_text='Text to describe the page')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
]
|
py | b4003ed3bb51edc2758975aebf205bd266a77179 | import json
from preprocess import *
import numpy as np
from torch.utils.data import Dataset, DataLoader
from ChatDataset import ChatDataset
from NN import NN
import torch
import torch.nn as nn
with open('intent.json', 'r') as f:
data = json.load(f)
words = []
intents = []
xy = []
# PREPROCESS (Tokenize + Stemming + Stop Word Removal)
for intent in data['intents']:
tag = intent['intent']
intents.append(tag)
for text in intent['text']:
w = tokenize(text)
words.extend(w)
xy.append((w, tag))
stop_words = ['?', ",", ".", "!", "a", "the"]
words = [stem(w) for w in words if w not in stop_words]
words = sorted(set(words))
intents = sorted(set(intents))
x_train = []
y_train = []
for (s, i) in xy:
bag = bagOfWords(s, words)
x_train.append(bag)
tags = intents.index(i) # Have numbers for intents
y_train.append(tags)
x_train = np.array(x_train)
y_train = np.array(y_train)
# Hyperparameters
batch_size = 8
input_size = len(words)
hidden_size = 8
output_size = len(intents)
learning_rate = .01
num_epochs = 1000
dataset = ChatDataset(x_train, y_train)
train_loader = DataLoader(dataset = dataset, batch_size = batch_size, shuffle = True, num_workers = 0)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = NN(input_size, hidden_size, output_size).to(device)
# crossentropy loss
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Training Loop
for epoch in range(num_epochs):
for (wrds, labls) in train_loader:
wrds = wrds.to(device)
labls = labls.to(device)
# forward
outputs = model(wrds)
labls = labls.long()
loss = criterion(outputs, labls)
# backprop
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (epoch+1) % 100 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
print(f'final loss: {loss.item():.4f}')
data = {
"model_state": model.state_dict(),
"input_size": input_size,
"hidden_size": hidden_size,
"output_size": output_size,
"all_words": words,
"tags": intents
}
FILE = "data.pth"
torch.save(data, FILE)
print(f'training complete. file saved to {FILE}') |
py | b4003edec909f4e779c9f04d2942aa41f2e0db6d | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(name='django-docrootcms-tagulous',
version='0.12',
description='Fork of the django-tagulous patched to work with Django 3',
long_description=long_description,
long_description_content_type="text/markdown",
url='http://github.com/sstacha/django-docrootcms-tagulous',
author='Steve Stacha',
author_email='[email protected]',
license='MIT',
packages=setuptools.find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Framework :: Django",
"Framework :: Django :: 3.0",
],
python_requires='>=3.6',
install_requires=[
'django',
],
)
|
py | b4003f67be4250de5aa1212751ec07cf8777e23e | # -*- coding: utf-8 -*-
"""
Discord API Wrapper
~~~~~~~~~~~~~~~~~~~
A basic wrapper for the Discord API.
:copyright: (c) 2015-2020 Rapptz
:license: MIT, see LICENSE for more details.
"""
__title__ = 'discord'
__author__ = 'Rapptz'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015-2020 Rapptz'
__version__ = '1.5.0'
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
from collections import namedtuple
import logging
from .client import Client
from .appinfo import AppInfo
from .user import User, ClientUser, Profile
from .emoji import Emoji
from .partial_emoji import PartialEmoji
from .activity import *
from .channel import *
from .guild import Guild
from .flags import *
from .relationship import Relationship
from .member import Member, VoiceState
from .message import Message, MessageReference, Attachment
from .asset import Asset
from .errors import *
from .calls import CallMessage, GroupCall
from .permissions import Permissions, PermissionOverwrite
from .role import Role
from .file import File
from .colour import Color, Colour
from .integrations import Integration, IntegrationAccount
from .invite import Invite, PartialInviteChannel, PartialInviteGuild
from .template import Template
from .widget import Widget, WidgetMember, WidgetChannel
from .object import Object
from .reaction import Reaction
from . import utils, opus, abc, rtp
from .enums import *
from .embeds import Embed
from .mentions import AllowedMentions
from .shard import AutoShardedClient, ShardInfo
from .player import *
from .reader import *
from .webhook import *
from .voice_client import VoiceClient, VoiceProtocol
from .audit_logs import AuditLogChanges, AuditLogEntry, AuditLogDiff
from .raw_models import *
from .team import *
from .speakingstate import SpeakingState
VersionInfo = namedtuple('VersionInfo', 'major minor micro releaselevel serial')
version_info = VersionInfo(major=1, minor=5, micro=0, releaselevel='final', serial=0)
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
|
py | b4003fcf90f0769798fd3f7da69db90e7dff9776 | # Generated by Django 4.0.2 on 2022-02-23 20:44
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=350)),
('complete', models.BooleanField(default=False)),
],
),
]
|
py | b400403eb0bbcb330bb589474180d318584d7068 | # -*- encoding: utf-8 -*-
import dsl
from shapely.wkt import loads as wkt_loads
from . import FixtureTest
class BasicOutdoorPois(FixtureTest):
def test_bbq(self):
self.generate_fixtures(dsl.way(1387024181, wkt_loads(
'POINT (-122.043111437058 37.9185297983412)'), {u'source': u'openstreetmap.org', u'amenity': u'bbq'}))
self.assert_has_feature(
16, 10550, 25297, 'pois',
{'kind': 'bbq', 'min_zoom': 18})
def test_bicycle_repair_station(self):
# Node: Valencia Cyclery (3443701422)
self.generate_fixtures(dsl.way(3443701422, wkt_loads('POINT (-122.420883736413 37.7559216043955)'), {u'website': u'http://www.valenciacyclery.com', u'addr:housenumber': u'1077', u'amenity': u'bicycle_repair_station', u'fee': u'yes',
u'name': u'Valencia Cyclery', u'source': u'openstreetmap.org', u'addr:postcode': u'94110', u'service:bicycle:pump': u'yes', u'addr:state': u'CA', u'phone': u'4155506601', u'addr:street': u'Valencia Street', u'addr:city': u'San Francisco'}))
self.assert_has_feature(
16, 10481, 25335, 'pois',
{'id': 3443701422, 'kind': 'bicycle_repair_station',
'min_zoom': 18})
def test_dive_centre(self):
self.generate_fixtures(dsl.way(2910259124, wkt_loads('POINT (-120.682905520547 35.24858062361431)'), {u'shop': u'scuba_diving', u'website': u'http://www.depthperceptions.net/', u'amenity': u'dive_centre',
u'addr:city': u'San Luis Obispo', u'addr:postcode': u'93405', u'name': u'Depth Perceptions', u'source': u'openstreetmap.org', u'addr:housenumber': u'12322', u'addr:street': u'Los Osos Valley Road'}))
self.assert_has_feature(
16, 10798, 25903, 'pois',
{'kind': 'dive_centre', 'min_zoom': 16})
def test_life_ring(self):
self.generate_fixtures(dsl.way(2844159164, wkt_loads(
'POINT (-79.42727771203799 43.75376437111998)'), {u'source': u'openstreetmap.org', u'amenity': u'life_ring'}))
self.assert_has_feature(
16, 18308, 23892, 'pois',
{'kind': 'life_ring', 'min_zoom': 18})
def test_lifeguard_tower(self):
self.generate_fixtures(dsl.way(4083762008, wkt_loads('POINT (-120.64533707705 35.14172262312749)'),
{u'source': u'openstreetmap.org', u'name': u'Pismo Lifeguard Tower 4', u'emergency': u'lifeguard_tower'}))
self.assert_has_feature(
16, 10805, 25927, 'pois',
{'kind': 'lifeguard_tower', 'min_zoom': 17})
def test_picnic_table(self):
self.generate_fixtures(dsl.way(696801847, wkt_loads('POINT (-121.785774381382 38.54985189949681)'), {
u'source': u'openstreetmap.org', u'amenity': u'picnic_table', u'tourism': u'picnic_site', u'name': u'Picnic Tables'}))
self.assert_has_feature(
16, 10597, 25151, 'pois',
{'kind': 'picnic_table', 'min_zoom': 18})
def test_shower(self):
self.generate_fixtures(dsl.way(1128776802, wkt_loads(
'POINT (-122.504145615657 37.59650072368881)'), {u'source': u'openstreetmap.org', u'amenity': u'shower'}))
self.assert_has_feature(
16, 10466, 25372, 'pois',
{'kind': 'shower', 'min_zoom': 18})
def test_waste_disposal(self):
self.generate_fixtures(dsl.way(2287784170, wkt_loads('POINT (-122.244492793011 38.10185261522749)'),
{u'source': u'openstreetmap.org', u'amenity': u'waste_disposal', u'waste': u'trash'}))
self.assert_has_feature(
16, 10514, 25255, 'pois',
{'kind': 'waste_disposal', 'min_zoom': 18})
def test_watering_place(self):
self.generate_fixtures(dsl.way(2640323071, wkt_loads('POINT (-122.306777034078 37.94991920607908)'),
{u'source': u'openstreetmap.org', u'amenity': u'watering_place'}))
self.assert_has_feature(
16, 10502, 25290, 'pois',
{'kind': 'watering_place', 'min_zoom': 18})
def test_water_point(self):
self.generate_fixtures(dsl.way(3954505509, wkt_loads('POINT (-124.110018304493 43.92884831271299)'),
{u'source': u'openstreetmap.org', u'amenity': u'water_point'}))
self.assert_has_feature(
16, 10174, 23848, 'pois',
{'kind': 'water_point', 'min_zoom': 18})
self.generate_fixtures(dsl.way(3984333433, wkt_loads('POINT (-112.165685191532 37.63484157316341)'),
{u'source': u'openstreetmap.org', u'amenity': u'water_point'}))
self.assert_has_feature(
16, 12348, 25363, 'pois',
{'kind': 'water_point', 'min_zoom': 18})
def test_pylon(self):
self.generate_fixtures(dsl.way(1978323412, wkt_loads(
'POINT (-120.241505998775 39.19449800774999)'), {u'source': u'openstreetmap.org', u'aerialway': u'pylon'}))
self.assert_has_feature(
16, 10878, 25000, 'pois',
{'kind': 'pylon', 'min_zoom': 17})
def test_power_pole(self):
self.generate_fixtures(dsl.way(2398019418, wkt_loads(
'POINT (-121.955094595351 37.76424341812228)'), {u'source': u'openstreetmap.org', u'power': u'pole'}))
self.assert_has_feature(
16, 10566, 25333, 'pois',
{'kind': 'power_pole', 'min_zoom': 18})
def test_power_tower(self):
self.generate_fixtures(dsl.way(1378418272, wkt_loads(
'POINT (-122.429615181311 37.6809742037058)'), {u'source': u'openstreetmap.org', u'power': u'tower'}))
self.assert_has_feature(
16, 10480, 25352, 'pois',
{'kind': 'power_tower', 'min_zoom': 16})
def test_petroleum_well(self):
self.generate_fixtures(dsl.way(2890101480, wkt_loads('POINT (-119.13405572999 34.17119825946398)'),
{u'source': u'openstreetmap.org', u'man_made': u'petroleum_well', u'method': u'pumpjack'}))
self.assert_has_feature(
16, 11080, 26141, 'pois',
{'kind': 'petroleum_well', 'min_zoom': 17})
|
py | b400417d706b64d041b809b565da199b03bce5bd | #!/usr/bin/env python
"""
Basic script for steering the observationSim code.
@author J. Chiang
"""
#
# $Header: /nfs/slac/g/glast/ground/cvs/observationSim/python/test.py,v 1.5 2003/10/27 20:49:53 jchiang Exp $
#
import os, sys, string, numarray
#
# LAT response and observationSim packages
#
latResponseRoot = os.getenv("LATRESPONSEROOT")
sys.path.append(latResponseRoot + "/python")
import observationSim, latResponse
observationSimRoot = os.getenv("OBSERVATIONSIMROOT")
caldbPath = latResponseRoot + "/data/CALDB"
def run_test(argv):
"""
The steering function. argv is a tuple containing the arguments
(rootname, counts, <source_names>).
"""
#
# One needs to provide the full path to the xml files
#
xml_files = latResponse.StringVector([observationSimRoot
+ "/xml/source_library.xml",
observationSimRoot
+ "/xml/3EG_catalog_32MeV.xml",
observationSimRoot
+ "/xml/test_sources_v2.xml"])
if (len(argv) == 2 and argv[1] == "-h"):
print "usage: test.py rootname counts [source_names]"
return 0
if (len(argv) > 1):
root = argv[1]
else:
root = "test"
if (len(argv) > 2):
count = int(string.atof(argv[2]))
else:
count = 1000
source_names = latResponse.StringVector()
useSimTime = 0 # Generate a number of counts by default
if (len(argv) > 3):
for name in argv[3:]:
if (name == '-t'): # Detect flag to interpret variable
useSimTime = 1 # count as seconds of simulation time
else:
source_names.append(name)
if len(source_names) == 0:
source_names.append("all_3EG_sources")
# source_names.append("anticenter")
my_simulator = observationSim.Simulator(source_names, xml_files)
irfsFactory = latResponse.IrfsFactory()
respVector = latResponse.IrfVector()
# respVector.append(irfsFactory.create("Glast25::Combined"))
respVector.append(irfsFactory.create("Glast25::Front"))
respVector.append(irfsFactory.create("Glast25::Back"))
# respVector.append(irfsFactory.create("Glast25::FlatAeff"))
useGoodi = 0
events = observationSim.EventContainer(root + "_events", useGoodi)
scData = observationSim.ScDataContainer(root + "_scData", useGoodi)
spacecraft = observationSim.LatSc();
#
# Break up the count into bite-sized chunks to allow for
# intermediate plotting of the results by HippoDraw
#
if useSimTime:
elapsed_time = 0.
time_step = 2.*60. # Update every two minutes of simulation time
while (elapsed_time < count - time_step):
my_simulator.generate_events(time_step, events, scData,
respVector, spacecraft)
elapsed_time += time_step
print "elapsed time: ", elapsed_time
print "events so far: ", events.numEvents()
my_simulator.generate_events(count-elapsed_time, events, scData,
respVector, spacecraft)
else:
num_made = 0
numstep = 30 # Work in chunks of 30 events
while (num_made < count - numstep):
sys.stderr.write("%i " % num_made)
my_simulator.genNumEvents(num_made + numstep, events, scData,
respVector, spacecraft)
num_made += numstep
my_simulator.genNumEvents(count, events, scData,
respVector, spacecraft)
return root
if __name__ == "__main__":
root = run_test(sys.argv)
|
py | b40041a436ea1a4c2903136da5367cd0307f6c1a | import sys
from abci.wire import hex2bytes, decode_big_endian, encode_big_endian
from abci.server import ABCIServer
from abci.reader import BytesBuffer
class CounterApplication():
def __init__(self):
sys.exit("The python example is out of date. Upgrading the Python examples is currently left as an exercise to you.")
self.hashCount = 0
self.txCount = 0
self.serial = False
def echo(self, msg):
return msg, 0
def info(self):
return ["hashes:%d, txs:%d" % (self.hashCount, self.txCount)], 0
def set_option(self, key, value):
if key == "serial" and value == "on":
self.serial = True
return 0
def deliver_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue != self.txCount:
return None, 6
self.txCount += 1
return None, 0
def check_tx(self, txBytes):
if self.serial:
txByteArray = bytearray(txBytes)
if len(txBytes) >= 2 and txBytes[:2] == "0x":
txByteArray = hex2bytes(txBytes[2:])
txValue = decode_big_endian(
BytesBuffer(txByteArray), len(txBytes))
if txValue < self.txCount:
return 6
return 0
def commit(self):
self.hashCount += 1
if self.txCount == 0:
return "", 0
h = encode_big_endian(self.txCount, 8)
h.reverse()
return h.decode(), 0
def add_listener(self):
return 0
def rm_listener(self):
return 0
def event(self):
return
if __name__ == '__main__':
l = len(sys.argv)
if l == 1:
port = 46658
elif l == 2:
port = int(sys.argv[1])
else:
print("too many arguments")
quit()
print('ABCI Demo APP (Python)')
app = CounterApplication()
server = ABCIServer(app, port)
server.main_loop()
|
py | b40041afe06846790bd0cb2407d93173caa093ab | """
This module is a Python wrapper around the FFTW library. The core of the
wrapper is located in the two submodules :mod:`janus.fft.serial` and
:mod:`janus.fft.parallel`, which should be imported explicitely.
.. _planner-flags:
Planner flags
-------------
From the FFTW manual (Sec. `Planner Flags <http://www.fftw.org/fftw3_doc/Planner-Flags.html#Planner-Flags>`_)
All of the planner routines in FFTW accept an integer ``flags`` argument,
which is a bitwise ``OR`` (‘|’) of zero or more of the flag constants
defined below. These flags control the rigor (and time) of the planning
process, and can also impose (or lift) restrictions on the type of
transform algorithm that is employed.
Only the names of the flags are reproduced below. The reader should refer to
the FFTW manual for a complete description.
Planning-rigor flags
^^^^^^^^^^^^^^^^^^^^
.. data:: FFTW_ESTIMATE
Use simple heuristic to pick a plan.
.. data:: FFTW_MEASURE
More accurate selection of a plan (default planning option).
.. data:: FFTW_PATIENT
Even more accurate selection of a plan.
.. data:: FFTW_EXHAUSTIVE
Even more accurate selection of a plan.
.. data:: FFTW_WISDOM_ONLY
Should be used only to check whether wisdom *is* available.
Algorithm-restriction flags
^^^^^^^^^^^^^^^^^^^^^^^^^^^
These flags are exposed for future extensions of the module. They are not used
for the time being.
.. data:: FFTW_DESTROY_INPUT
Unused.
.. data:: FFTW_PRESERVE_INPUT
Unused.
.. data:: FFTW_UNALIGNED
Unused.
"""
from .serial._serial_fft import FFTW_ESTIMATE
from .serial._serial_fft import FFTW_MEASURE
from .serial._serial_fft import FFTW_PATIENT
from .serial._serial_fft import FFTW_EXHAUSTIVE
from .serial._serial_fft import FFTW_WISDOM_ONLY
from .serial._serial_fft import FFTW_DESTROY_INPUT
from .serial._serial_fft import FFTW_PRESERVE_INPUT
from .serial._serial_fft import FFTW_UNALIGNED
from .serial._serial_fft import FFTW_CONSERVE_MEMORY
|
py | b40041ef7ccd7c57562b4606ddb93851518395b2 | import datetime
import json
import time
from functools import reduce
from itertools import chain
from typing import Dict, Optional
import pyorient
from ledger.util import F
from plenum.common.error import fault
from plenum.common.log import getlogger
from plenum.common.txn import TXN_TYPE, TYPE, IP, PORT, KEYS, NAME, VERSION, \
DATA, RAW, ENC, HASH, ORIGIN, VERKEY
from plenum.common.types import f
from plenum.common.util import error
from plenum.persistence.orientdb_graph_store import OrientDbGraphStore
from plenum.server.node import Node
from sovrin.common.txn import NYM, TXN_ID, TARGET_NYM, SPONSOR, \
STEWARD, ROLE, REF, TXN_TIME, ATTRIB, CLAIM_DEF, ATTR_NAMES, ISSUER_KEY, TGB, \
TRUSTEE
from sovrin.server.auth import Authoriser
logger = getlogger()
MIN_TXN_TIME = time.mktime(datetime.datetime(2000, 1, 1).timetuple())
class Vertices:
Nym = NYM
Attribute = "Attribute"
ClaimDef = "ClaimDef"
IssuerKey = "IssuerKey"
_Properties = {
Nym: (NYM, VERKEY, TXN_ID, ROLE, F.seqNo.name),
Attribute: (RAW, ENC, HASH),
ClaimDef: (TYPE, ATTR_NAMES),
IssuerKey: (REF, DATA)
}
@classmethod
def properties(cls, vertexName: str):
return cls._Properties.get(vertexName, ())
class Edges:
AddsNym = "AddsNym"
AddsAttribute = "AddsAttribute"
HasAttribute = "HasAttribute"
# TODO: Create OwnsAttribute in case user takes control of his identity
# TODO: Create KnowsAttribute in case the attribute is shared (disclosed)
# with someone
AliasOf = "AliasOf"
AddsClaimDef = "AddsClaimDef"
HasIssuerKey = "HasIssuerKey"
txnEdges = {
NYM: Edges.AddsNym,
ATTRIB: Edges.AddsAttribute,
CLAIM_DEF: Edges.AddsClaimDef,
ISSUER_KEY: Edges.HasIssuerKey
}
txnEdgeProps = [F.seqNo.name, TXN_TIME, f.REQ_ID.nm, f.IDENTIFIER.nm,
TARGET_NYM, NAME, VERSION, TXN_ID]
def getEdgeByTxnType(txnType: str): return txnEdges.get(txnType)
def getTxnTypeFromEdge(edgeClass: str):
for typ, edge in txnEdges.items():
if edge == edgeClass:
return typ
class IdentityGraph(OrientDbGraphStore):
@property
def classesNeeded(self):
return [
(Vertices.Nym, self.createNymClass),
(Vertices.Attribute, self.createAttributeClass),
(Vertices.ClaimDef, self.createClaimDefClass),
(Vertices.IssuerKey, self.createIssuerKeyClass),
(Edges.AddsNym, self.createAddsNymClass),
(Edges.AliasOf, self.createAliasOfClass),
(Edges.AddsAttribute, self.createAddsAttributeClass),
(Edges.HasAttribute, self.createHasAttributeClass),
(Edges.AddsClaimDef, self.createAddsClaimDefClass),
(Edges.HasIssuerKey, self.createHasIssuerClass)
]
# Creates a vertex class which has a property called `nym` with a unique
# index on it
def createUniqueNymVertexClass(self, className, properties: Dict=None):
d = {NYM: "string",
VERKEY: "string"}
if properties:
properties.update(d)
else:
properties = d
self.createVertexClass(className, properties)
self.store.createUniqueIndexOnClass(className, NYM)
# Creates an edge class which has a property called `txnId` with a unique
# index on it
def createUniqueTxnIdEdgeClass(self, className, properties: Dict=None):
defaultProperties = {
TXN_ID: "string",
TXN_TIME: "datetime"
}
if properties:
properties.update(defaultProperties)
else:
properties = defaultProperties
self.createEdgeClass(className, properties=properties)
self.store.createUniqueIndexOnClass(className, TXN_ID)
def createEdgeClassWithTxnData(self, className, properties: Dict = None):
defaultProperties = {
TXN_ID: "string",
TXN_TIME: "datetime",
TXN_TYPE: "string",
f.REQ_ID.nm: "long",
f.IDENTIFIER.nm: "string",
F.seqNo.name: "string",
}
properties.update(defaultProperties)
self.createUniqueTxnIdEdgeClass(className, properties)
# self.client.command("create index CliIdReq on {} ({}, {})"
# " unique".
# format(className, f.REQ_ID.nm, f.IDENTIFIER.nm))
def createNymClass(self):
self.createUniqueNymVertexClass(Vertices.Nym,
properties={ROLE: "string"})
def createAttributeClass(self):
self.createVertexClass(Vertices.Attribute,
properties={"data": "string"})
def createClaimDefClass(self):
self.createVertexClass(Vertices.ClaimDef, properties={
ATTR_NAMES: "string",
TYPE: "string",
})
def createIssuerKeyClass(self):
self.createVertexClass(Vertices.IssuerKey, properties={
REF: "string",
DATA: "string", # JSON
})
def createAddsNymClass(self):
self.createEdgeClassWithTxnData(Edges.AddsNym,
properties={ROLE: "string"})
self.addEdgeConstraint(Edges.AddsNym, iN=Vertices.Nym, out=Vertices.Nym)
def createAliasOfClass(self):
self.createUniqueTxnIdEdgeClass(Edges.AliasOf,
properties={REF: "string"})
# if not then `iN` need to be a USER
self.addEdgeConstraint(Edges.AliasOf, iN=Vertices.Nym,
out=Vertices.Nym)
def createAddsAttributeClass(self):
self.createEdgeClassWithTxnData(Edges.AddsAttribute,
properties={TARGET_NYM: "string"})
# Not specifying `out` here as both Sponsor and Agent can add attributes
self.addEdgeConstraint(Edges.AddsAttribute, iN=Vertices.Attribute)
def createHasAttributeClass(self):
self.createUniqueTxnIdEdgeClass(Edges.HasAttribute)
self.addEdgeConstraint(Edges.HasAttribute, iN=Vertices.Attribute)
def createAddsClaimDefClass(self):
# TODO: Add compound index on the name and version
self.createUniqueTxnIdEdgeClass(Edges.AddsClaimDef, properties={
NAME: "string",
VERSION: "string"
})
self.addEdgeConstraint(Edges.AddsClaimDef, iN=Vertices.ClaimDef)
def createHasIssuerClass(self):
self.createUniqueTxnIdEdgeClass(Edges.HasIssuerKey)
self.addEdgeConstraint(Edges.HasAttribute, out=Vertices.Nym)
def getEdgeByTxnId(self, edgeClassName, txnId):
return self.getEntityByUniqueAttr(edgeClassName, TXN_ID, txnId)
def getAddsNymEdge(self, nym):
return self.getEntityByUniqueAttr(Edges.AddsNym, NYM, nym)
def addNym(self, txnId, nym, verkey, role, frm=None, reference=None,
seqNo=None):
kwargs = {
NYM: nym,
TXN_ID: txnId, # # Need to have txnId as a property for cases
# where we dont know the sponsor of this nym or its a genesis nym
}
# Need to have role as a property of the vertex it
# makes faster to query roles by vertex. Also used for genesis txns
if role:
kwargs[ROLE] = role
if verkey:
kwargs[VERKEY] = verkey
if not frm:
# In case of genesis transaction
kwargs[F.seqNo.name] = seqNo
self.createVertex(Vertices.Nym, **kwargs)
if not frm:
logger.debug("frm not available while adding nym")
else:
frmV = "(select from {} where {} = '{}')".format(Vertices.Nym,
NYM,
frm)
toV = "(select from {} where {} = '{}')".format(Vertices.Nym,
NYM,
nym)
self.createEdge(Edges.AddsNym, frmV, toV, **kwargs)
if reference:
nymEdge = self.getEdgeByTxnId(Edges.AddsNym, txnId=reference)
referredNymRid = nymEdge.oRecordData['in'].get()
kwargs = {
REF: reference,
TXN_ID: txnId
}
self.createEdge(Edges.AliasOf, referredNymRid, toV, **kwargs)
def addAttribute(self, frm, txnId, raw=None, enc=None, hash=None, to=None):
# Only one of `raw`, `enc`, `hash` should be provided so 2 should be
# `None`
if (raw, enc, hash).count(None) != 2:
error("One and only one of raw, enc and hash should be provided")
if raw:
attrVertex = self.createVertex(Vertices.Attribute, raw=raw)
elif enc:
attrVertex = self.createVertex(Vertices.Attribute, enc=enc)
elif hash:
attrVertex = self.createVertex(Vertices.Attribute, hash=hash)
frm = "(select from {} where {} = '{}')".format(Vertices.Nym, NYM,
frm)
kwargs = {
TARGET_NYM: to,
TXN_ID: txnId,
}
self.createEdge(Edges.AddsAttribute, frm, attrVertex._rid, **kwargs)
if to:
to = "(select from {} where {} = '{}')".format(Vertices.Nym, NYM,
to)
kwargs = {
TXN_ID: txnId
}
self.createEdge(Edges.HasAttribute, to, attrVertex._rid, **kwargs)
def addClaimDef(self, frm, txnId, name, version, attrNames,
typ: Optional[str]=None):
kwargs = {
TYPE: typ,
ATTR_NAMES: attrNames
}
vertex = self.createVertex(Vertices.ClaimDef, **kwargs)
frm = "(select from {} where {} = '{}')".format(Vertices.Nym, NYM,
frm)
kwargs = {
TXN_ID: txnId,
NAME: name,
VERSION: version
}
self.createEdge(Edges.AddsClaimDef, frm, vertex._rid, **kwargs)
def addIssuerKey(self, frm, txnId, data, reference):
kwargs = {
DATA: json.dumps(data),
REF: reference
}
vertex = self.createVertex(Vertices.IssuerKey, **kwargs)
frm = "(select from {} where {} = '{}')".format(Vertices.Nym, NYM,
frm)
kwargs = {
TXN_ID: txnId,
}
self.createEdge(Edges.HasIssuerKey, frm, vertex._rid, **kwargs)
def updateNym(self, txnId, nym, verkey, seqNo, role):
kwargs = {
TXN_ID: txnId,
F.seqNo.name: seqNo,
ROLE: role,
}
if verkey is not None:
kwargs[VERKEY] = verkey
self.updateEntityWithUniqueId(Vertices.Nym, NYM, nym, **kwargs)
def getRawAttrs(self, frm, *attrNames):
cmd = 'select expand(outE("{}").inV("{}")) from {} where {}="{}"'.\
format(Edges.HasAttribute, Vertices.Attribute, Vertices.Nym,
NYM, frm)
allAttrsRecords = self.client.command(cmd)
attrVIds = [a._rid for a in allAttrsRecords]
seqNos = {}
if attrVIds:
edgeRecs = self.client.command("select expand(inE('{}')) from [{}]"
.format(Edges.AddsAttribute,
", ".join(attrVIds)))
seqNos = {str(rec._in): int(rec.oRecordData.get(F.seqNo.name))
for rec in edgeRecs}
result = {}
for attrRec in allAttrsRecords:
raw = json.loads(attrRec.oRecordData.get(RAW))
key, value = raw.popitem()
if len(attrNames) == 0 or key in attrNames:
result[key] = [value, seqNos[attrRec._rid]]
return result
def getClaimDef(self, frm, name, version):
# TODO: Can this query be made similar to get attribute?
cmd = "select outV('{}')[{}='{}'], expand(inV('{}')) from {} where " \
"name = '{}' and version = '{}'".format(Vertices.Nym, NYM, frm,
Vertices.ClaimDef,
Edges.AddsClaimDef, name,
version)
claimDefs = self.client.command(cmd)
if claimDefs:
claimDef = claimDefs[0].oRecordData
edgeData = self.client.command(
"select expand(inE('{}')) from {}".format(
Edges.AddsClaimDef, claimDefs[0]._rid))[0].oRecordData
return {
NAME: name,
VERSION: version,
TYPE: claimDef.get(TYPE),
F.seqNo.name: edgeData.get(F.seqNo.name),
ATTR_NAMES: claimDef.get(ATTR_NAMES),
ORIGIN: frm,
}
return None
def getIssuerKeys(self, frm, ref):
cmd = "select expand(outE('{}').inV('{}')) from {} where " \
"{} = '{}'".format(Edges.HasIssuerKey, Vertices.IssuerKey,
Vertices.Nym, NYM, frm)
haystack = self.client.command(cmd)
needle = None
if haystack:
for rec in haystack:
if rec.oRecordData.get(REF) == str(ref):
needle = rec
break
if needle:
edgeData = self.client.command(
"select expand(inE('{}')) from {}".format(
Edges.HasIssuerKey, needle._rid))[0].oRecordData
return {
ORIGIN: frm,
REF: ref,
F.seqNo.name: edgeData.get(F.seqNo.name),
DATA: json.loads(needle.oRecordData.get(DATA))
}
return None
def getNym(self, nym, role=None):
"""
Get a nym, if role is provided then get nym with that role
:param nym:
:param role:
:return:
"""
if not role:
return self.getEntityByUniqueAttr(Vertices.Nym, NYM, nym)
else:
return self.getEntityByAttrs(Vertices.Nym, {
NYM: nym,
ROLE: role
})
def getTrustee(self, nym):
return self.getNym(nym, TRUSTEE)
def getTGB(self, nym):
return self.getNym(nym, TGB)
def getSteward(self, nym):
return self.getNym(nym, STEWARD)
def getSponsor(self, nym):
return self.getNym(nym, SPONSOR)
# def getUser(self, nym):
# return self.getNym(nym, USER)
def hasTrustee(self, nym):
return bool(self.getTrustee(nym))
def hasTGB(self, nym):
return bool(self.getTGB(nym))
def hasSteward(self, nym):
return bool(self.getSteward(nym))
def hasSponsor(self, nym):
return bool(self.getSponsor(nym))
# def hasUser(self, nym):
# return bool(self.getUser(nym))
def hasNym(self, nym):
return bool(self.getNym(nym))
def getRole(self, nym):
nymV = self.getNym(nym)
if not nymV:
raise ValueError("Nym {} does not exist".format(nym))
else:
return nymV.oRecordData.get(ROLE)
def getSponsorFor(self, nym):
sponsor = self.client.command("select expand (out) from {} where "
"{} = '{}'".format(Edges.AddsNym,
NYM, nym))
return None if not sponsor else sponsor[0].oRecordData.get(NYM)
def countStewards(self):
return self.countEntitiesByAttrs(Vertices.Nym, {ROLE: STEWARD})
def getAddNymTxn(self, nym):
nymEdge = self.getAddsNymEdge(nym)
if not nymEdge:
# For the special case where steward(s) are added through genesis
# transactions so they wont have an edge
nymV = self.getNym(nym)
if not nymV:
return None
else:
return {
TXN_ID: nymV.oRecordData.get(TXN_ID),
TARGET_NYM: nym,
ROLE: nymV.oRecordData.get(ROLE),
VERKEY: nymV.oRecordData.get(VERKEY)
}
else:
edgeData = nymEdge.oRecordData
result = {
TXN_ID: edgeData.get(TXN_ID),
ROLE: edgeData.get(ROLE)
}
frm, to = self.store.getByRecordIds(edgeData['out'].get(),
edgeData['in'].get())
result[f.IDENTIFIER.nm] = frm.oRecordData.get(NYM)
result[TARGET_NYM] = to.oRecordData.get(NYM)
verkey = to.oRecordData.get(VERKEY)
if verkey is not None:
result[VERKEY] = verkey
return result
def getAddAttributeTxnIds(self, nym):
attrEdges = self.client.command("select {} from {} where {} = '{}'".
format(TXN_ID, Edges.AddsAttribute,
TARGET_NYM, nym)) or []
return [edge.oRecordData[TXN_ID] for edge in attrEdges]
def getTxn(self, identifier, reqId, **kwargs):
typ = kwargs[TXN_TYPE]
edgeClass = getEdgeByTxnType(typ)
edgeProps = ", ".join("@this.{} as __e_{}".format(name, name) for name in
txnEdgeProps)
vertexProps = ", ".join("in.{} as __v_{}".format(name, name) for name in
chain.from_iterable(
Vertices._Properties.values()))
txnId = Node.genTxnId(identifier, reqId)
cmd = "select {}, {} from {} where {} = '{}'". \
format(edgeProps, vertexProps, edgeClass, f.TXN_ID.nm, txnId)
result = self.client.command(cmd)
return None if not result \
else self.makeResult(typ, self.cleanKeyNames(result[0].oRecordData))
def getResultForTxnIds(self, *txnIds, seqNo=None) -> dict:
txnIds = set(txnIds)
txnIdsStr = ",".join(["'{}'".format(tid) for tid in txnIds])
def getTxnsFromEdge(edgeClass):
# TODO: Need to do this to get around a bug in pyorient,
# https://github.com/mogui/pyorient/issues/207
edgeProps = ", ".join("@this.{} as __e_{}".format(name, name)
for name in txnEdgeProps)
vertexProps = ", ".join("in.{} as __v_{}".format(name, name)
for name in chain.from_iterable(
Vertices._Properties.values()))
cmd = "select {}, {} from {} where {} in [{}]".\
format(edgeProps, vertexProps, edgeClass, TXN_ID, txnIdsStr)
if seqNo:
cmd += " and {} > {}".format(F.seqNo.name, seqNo)
result = self.client.command(cmd)
if not result:
return {}
else:
out = {}
for r in result:
if r.oRecordData:
oRecordData = self.cleanKeyNames(r.oRecordData)
out[oRecordData[F.seqNo.name]] = self.makeResult(
NYM, oRecordData)
return out
result = reduce(lambda d1, d2: {**d1, **d2},
map(getTxnsFromEdge, list(txnEdges.values())))
if len(txnIds) > len(result):
# Some transactions missing so look for transactions without edges
result.update(self.getTxnsWithoutEdge(*(txnIds.difference(
{r.get(TXN_ID) for r in result.values()})), seqNo=seqNo))
return result
def getTxnsWithoutEdge(self, *txnIds, seqNo=None):
# For getting transactions for which have no edges in the graph as in
# case of genesis transactions, currently looking for only `NYM`
# transactions
txnIdsStr = ",".join(["'{}'".format(tid) for tid in txnIds])
cmd = "select from {} where {} in [{}]".format(Vertices.Nym, TXN_ID,
txnIdsStr)
if seqNo:
cmd += " and {} > {}".format(F.seqNo.name, seqNo)
result = self.client.command(cmd)
if not result:
return {}
else:
out = {}
for r in result:
if r.oRecordData:
r.oRecordData[TARGET_NYM] = r.oRecordData.pop(NYM)
out[r.oRecordData[F.seqNo.name]] = self.makeResult(
NYM, r.oRecordData)
return out
def _updateTxnIdEdgeWithTxn(self, txnId, edgeClass, txn, properties=None):
properties = properties or txnEdgeProps
updates = ', '.join(["{}={}".format(prop, txn[prop])
if isinstance(txn[prop], (int, float)) else
"{}='{}'".format(prop, txn[prop])
for prop in properties if (prop in txn and
txn[prop] is not None)])
updateCmd = "update {} set {} where {}='{}'". \
format(edgeClass, updates, TXN_ID, txnId)
logger.debug("updating edge {} with command {}".format(edgeClass,
updateCmd))
self.client.command(updateCmd)
def addNymTxnToGraph(self, txn):
origin = txn.get(f.IDENTIFIER.nm)
role = txn.get(ROLE)
if not Authoriser.isValidRole(role):
raise ValueError("Unknown role {} for nym, cannot add nym to graph"
.format(role))
nym = txn[TARGET_NYM]
verkey = txn.get(VERKEY)
try:
txnId = txn[TXN_ID]
seqNo = txn.get(F.seqNo.name)
# Since NYM vertex has a unique index on the identifier,
# (CID or DID) a unique constraint violattion would occur if the
# nym exists. Instead of catching an exception, a call to hasNym or
# getNym could be done but since NYM update txns would be less
# common then NYM adding transactions so avoidinhg the cost of
# extra db query
try:
self.addNym(txnId, nym, verkey, role,
frm=origin, reference=txn.get(REF),
seqNo=seqNo)
except pyorient.PyOrientORecordDuplicatedException:
self.updateNym(txnId, nym, verkey, seqNo, role)
else:
# Only update edge in case of new NYM transaction
self._updateTxnIdEdgeWithTxn(txnId, Edges.AddsNym, txn)
except pyorient.PyOrientORecordDuplicatedException:
logger.debug("The nym {} was already added to graph".
format(nym))
except pyorient.PyOrientCommandException as ex:
fault(ex, "An exception was raised while adding "
"nym {}: {}".format(nym, ex))
def addAttribTxnToGraph(self, txn):
origin = txn.get(f.IDENTIFIER.nm)
txnId = txn[TXN_ID]
try:
self.addAttribute(frm=origin, txnId=txnId, raw=txn.get(RAW),
enc=txn.get(ENC), hash=txn.get(HASH),
to=txn.get(TARGET_NYM))
self._updateTxnIdEdgeWithTxn(txnId, Edges.AddsAttribute, txn)
except pyorient.PyOrientCommandException as ex:
fault(ex, "An exception was raised while adding attribute: {}".
format(ex))
def addClaimDefTxnToGraph(self, txn):
origin = txn.get(f.IDENTIFIER.nm)
txnId = txn[TXN_ID]
data = txn.get(DATA)
try:
self.addClaimDef(
frm=origin,
txnId=txnId,
name=data.get(NAME),
version=data.get(VERSION),
attrNames=data.get(ATTR_NAMES),
typ=data.get(TYPE))
self._updateTxnIdEdgeWithTxn(txnId, Edges.AddsClaimDef, txn)
except Exception as ex:
fault(ex, "Error adding cred def to orientdb")
def addIssuerKeyTxnToGraph(self, txn):
origin = txn.get(f.IDENTIFIER.nm)
txnId = txn[TXN_ID]
data = txn.get(DATA)
try:
self.addIssuerKey(
frm=origin,
txnId=txnId,
data=data,
reference=txn.get(REF),
)
self._updateTxnIdEdgeWithTxn(txnId, Edges.HasIssuerKey, txn)
except Exception as ex:
fault(ex, "Error adding issuer key to orientdb")
pass
def countTxns(self):
seqNos = set()
for txnEdgeClass in (list(txnEdges.values())+[Vertices.Nym]):
cmd = "select distinct({}) as seqNo from {}". \
format(F.seqNo.name, txnEdgeClass)
result = self.client.command(cmd)
seqNos.update({r.oRecordData.get('seqNo') for r in result})
return len(seqNos)
@staticmethod
def cleanKeyNames(oRecordData):
# Removing `__e_` and `__v_` from key names of oRecordData.
# They are added to make select queries work which can contain
# duplicate key names
return {k[4:] if (k.startswith("__e_") or k.startswith("__v_")) else k:
v for k, v in oRecordData.items()}
@staticmethod
def makeResult(txnType, oRecordData):
try:
int(oRecordData.get(F.seqNo.name))
except TypeError as ex:
logger.debug(
"Cannot convert {} to integer. Provided oRecordData {} for type"
" {}".format(oRecordData.get(F.seqNo.name), oRecordData,
txnType))
return {}
result = {
F.seqNo.name: int(oRecordData.get(F.seqNo.name)),
TXN_TYPE: txnType,
TXN_ID: oRecordData.get(TXN_ID),
f.REQ_ID.nm: oRecordData.get(f.REQ_ID.nm),
f.IDENTIFIER.nm: oRecordData.get(f.IDENTIFIER.nm),
}
if TXN_TIME in oRecordData:
txnTime = oRecordData.get(TXN_TIME)
if isinstance(txnTime, datetime.datetime):
try:
txnTimeStamp = int(time.mktime(txnTime.timetuple()))
except (OverflowError, ValueError) as ex:
logger.warn("TXN_TIME cannot convert datetime '{}' "
"to timestamp, reject it".format(txnTime))
else:
# TODO The right thing to do is check the time of the PRE-PREPARE.
# https://github.com/evernym/sovrin-priv/pull/20#discussion_r80387554
now = time.time()
if MIN_TXN_TIME < txnTimeStamp < now:
result[TXN_TIME] = txnTimeStamp
else:
logger.warn("TXN_TIME {} is not in the range ({}, {}), "
"reject it".format(txnTimeStamp,
MIN_TXN_TIME, now))
if TARGET_NYM in oRecordData:
result[TARGET_NYM] = oRecordData[TARGET_NYM]
if txnType == NYM:
result[ROLE] = oRecordData.get(ROLE)
if txnType == ATTRIB:
for n in [RAW, ENC, HASH]:
if n in oRecordData:
result[n] = oRecordData[n]
break
if txnType == CLAIM_DEF:
result[DATA] = {}
for n in [IP, PORT, KEYS, TYPE, NAME, VERSION]:
if n in oRecordData:
result[DATA][n] = oRecordData[n]
return result
|
py | b400437d205f749be5aadb1b42986a859ae95d4d | # Generated by Django 3.2.5 on 2021-10-14 06:31
import re
from django.db import migrations
def build_refs(apps, schema_editor):
"""
Rebuild the integer "reference fields" for existing Build objects
"""
PurchaseOrder = apps.get_model('order', 'purchaseorder')
for order in PurchaseOrder.objects.all():
ref = 0
result = re.match(r"^(\d+)", order.reference)
if result and len(result.groups()) == 1:
try:
ref = int(result.groups()[0])
except:
ref = 0
order.reference_int = ref
order.save()
SalesOrder = apps.get_model('order', 'salesorder')
for order in SalesOrder.objects.all():
ref = 0
result = re.match(r"^(\d+)", order.reference)
if result and len(result.groups()) == 1:
try:
ref = int(result.groups()[0])
except:
ref = 0
order.reference_int = ref
order.save()
def unbuild_refs(apps, schema_editor):
"""
Provided only for reverse migration compatibility
"""
pass
class Migration(migrations.Migration):
dependencies = [
('order', '0051_auto_20211014_0623'),
]
operations = [
migrations.RunPython(
build_refs,
reverse_code=unbuild_refs
)
]
|
py | b400450aad0fbb6a9dfe592ab8be4d47077173b9 | from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url('^$',views.welcome,name = 'welcome'),
url(r'^accounts/profile/$',views.profile,name = 'profile'),
url(r'^profile/(\d+)',views.new_post,name = 'visitprofile'),
url(r'^search/', views.search_results, name='search_results'),
url(r'^new/post$', views.new_post, name='new_post')
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT) |
py | b40045746b05ca3dbed771c7d8b023adb411df64 | from typing import (Any,
Iterable)
from hypothesis import given
from lz.iterating import (capacity,
trailer)
from lz.replication import duplicate
from tests import strategies
from tests.utils import iterable_ends_with
@given(strategies.non_empty_iterables, strategies.non_negative_indices)
def test_capacity(non_empty_iterable: Iterable[Any],
size: int) -> None:
original, target = duplicate(non_empty_iterable)
trail = trailer(size)
result = trail(target)
if capacity(original) < size:
assert capacity(result) < size
else:
assert capacity(result) == size
@given(strategies.non_empty_iterables, strategies.non_negative_indices)
def test_elements(non_empty_iterable: Iterable[Any],
size: int) -> None:
original, target = duplicate(non_empty_iterable)
trail = trailer(size)
result = trail(target)
assert iterable_ends_with(original, result)
|
py | b40047129582cd66845b614d0a5ca82bb4dce75d | import sacred
from sacred import Experiment
from mot_neural_solver.utils.evaluation import MOTMetricsLogger
from mot_neural_solver.utils.misc import make_deterministic, get_run_str_and_save_dir, ModelCheckpoint
from mot_neural_solver.path_cfg import OUTPUT_PATH
import os.path as osp
from mot_neural_solver.pl_module.pl_module import MOTNeuralSolver
from pytorch_lightning import Trainer
from pytorch_lightning.loggers import TensorBoardLogger
#from pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint
from sacred import SETTINGS
SETTINGS.CONFIG.READ_ONLY_CONFIG=False
ex = Experiment()
ex.add_config('configs/tracking_cfg.yaml')
ex.add_config({'run_id': 'train_w_default_config',
'add_date': True,
'cross_val_split': None})
@ex.config
def cfg(cross_val_split, eval_params, dataset_params, graph_model_params, data_splits):
# Training requires the use of precomputed embeddings
assert dataset_params['precomputed_embeddings'], "Training without precomp. embeddings is not supp"
if 'tracktor' not in dataset_params['det_file_name']:
eval_params['add_tracktor_detects'] = False
# Make sure that the edges encoder MLP input dim. matches the number of edge features used.
graph_model_params['encoder_feats_dict']['edge_in_dim'] = len(dataset_params['edge_feats_to_use'])
# Determine which histology patches will be used for training / validation
if cross_val_split is not None:
assert cross_val_split in (1, 2, 3), f"{cross_val_split} is not a valid cross validation split"
data_splits['train'] =['mot15_train_gt', f'mot17_split_{cross_val_split}_train_gt']
data_splits['val'] = [f'split_{cross_val_split}_val']
# If we're training on all the available training data, disable validation
if data_splits['train'] =='all_train' or data_splits['val'] is None:
data_splits['val'] = []
eval_params['val_percent_check'] = 0
@ex.automain
def main(_config, _run, prepr_w_tracktor=True):
if prepr_w_tracktor == False:
_config['dataset_params']['det_file_name'] = 'frcnn_prepr_det'
_config['eval_params']['add_tracktor_detects'] = False
sacred.commands.print_config(_run)
make_deterministic(12345)
model = MOTNeuralSolver(hparams = dict(_config))
run_str, save_dir = get_run_str_and_save_dir(_config['run_id'], _config['cross_val_split'], _config['add_date'])
if _config['train_params']['tensorboard']:
logger = TensorBoardLogger(OUTPUT_PATH, name='experiments', version=run_str)
else:
logger = None
ckpt_callback = ModelCheckpoint(save_epoch_start = _config['train_params']['save_epoch_start'],
save_every_epoch = _config['train_params']['save_every_epoch'])
trainer = Trainer(gpus=2,
callbacks=[MOTMetricsLogger(compute_oracle_results = _config['eval_params']['normalize_mot_metrics']), ckpt_callback],
weights_summary = None,
checkpoint_callback=False,
max_epochs=_config['train_params']['num_epochs'],
val_percent_check = _config['eval_params']['val_percent_check'],
check_val_every_n_epoch=_config['eval_params']['check_val_every_n_epoch'],
nb_sanity_val_steps=0,
logger =logger,
default_save_path=osp.join(OUTPUT_PATH, 'experiments', run_str))
trainer.fit(model)
|
py | b40047b63d4b2d1286784a94ef7bbaabc13cef99 | import numpy as np
class SolverWrapper(object):
"""The base class for all solver wrappers.
Solver wrappers implement a core method needed by all
Reachability-based algorithms: The method
`solve_stagewise_optim`. This methods solves a Linear/Quadratic
Program subject to linear constraints at a given stage, and
possibly with additional auxiliary constraints.
Note that some solver wrappers only handle Linear Program while
some handle both.
Certain solver wrappers need to be setup and close down before and
after usage. For instance, the wrappers for mosek and qpOASES with
warmstart capability. To provide this functionality, this class
contains two abstract methods `setup_solver` and `close_solver`,
which should be called before and after any call to
`solve_stagewise_optim`, so that necessary setups can be made.
Attributes
----------
constraints : list of `Constraint`
Constraints on the robot system.
path : Interpolator
The geometric path to be time-parametrized.
path_discretization: array
The discretization grid use to discretize the geometric path.
"""
def __init__(self, constraint_list, path, path_discretization):
# Main attributes
self.constraints = constraint_list
self.path = path
self.path_discretization = np.array(path_discretization)
# End main attributes
self.N = len(path_discretization) - 1 # Number of stages. Number of point is _N + 1
self.deltas = self.path_discretization[1:] - self.path_discretization[:-1]
assert path.get_path_interval()[0] == path_discretization[0]
assert path.get_path_interval()[1] == path_discretization[-1]
for i in range(self.N):
assert path_discretization[i + 1] > path_discretization[i]
self.params = [c.compute_constraint_params(self.path, self.path_discretization)
for c in self.constraints]
self.nV = 2 + sum([c.get_no_extra_vars() for c in self.constraints])
def get_no_stages(self):
"""Return the number of stages.
The number of gridpoints equals N + 1, where N is the number
of stages.
"""
return self.N
def get_no_vars(self):
""" Return total number of variables, including u, x.
"""
return self.nV
def get_deltas(self):
return self.deltas
def solve_stagewise_optim(self, i, H, g, x_min, x_max, x_next_min, x_next_max):
"""Solve a stage-wise quadratic (or linear) optimization problem.
The quadratic optimization problem is described below:
.. math::
\\text{min } & 0.5 [u, x, v] H [u, x, v]^\\top + [u, x, v] g \\\\
\\text{s.t. } & [u, x] \\text{ is feasible at stage } i \\\\
& x_{min} \leq x \leq x_{max} \\\\
& x_{next, min} \leq x + 2 \Delta_i u \leq x_{next, max},
where `v` is an auxiliary variable, only exist if there are
non-canonical constraints. The linear program is the
quadratic problem without the quadratic term.
Parameters
----------
i: int
The stage index.
H: (d,d)array or None
The coefficient of the quadratic objective function. If is
None, neglect the quadratic term.
g: (d,)array
The linear term.
x_min: float
If not specified, set to NaN.
x_max: float
If not specified, set to NaN.
x_next_min: float
If not specified, set to NaN.
x_next_max: float
If not specified, set to NaN.
Returns
-------
double array, or list
If successes, return an array containing the optimal
variable. Since NaN is also a valid double, this list
contains NaN if the optimization problem is infeasible.
"""
raise NotImplementedError
def setup_solver(self):
pass
def close_solver(self):
pass
|
py | b400483f943711b0f3c708fafcfac07c57b88c92 | """ Start with a copy of your program from Exercise 8-9.
Write a function called make_great() that modifies the
list of magicians by adding the phrase the Great to each
magician’s name. Call show_magicians() to see that the list
has actually been modified. """
def show_magicians(some_names):
for name in some_names:
print(f'- {name.title()}')
def make_great(some_names):
great_magicians = []
while some_names:
new = some_names.pop()
great = new + ' the GREAT!'
great_magicians.append(great)
for name in great_magicians:
print(f'- {name.title()}')
lista_de_magos = ['james', 'cynthya', 'carolina']
show_magicians(lista_de_magos)
make_great(lista_de_magos) |
py | b400484186baf6eb2e8f03bef21ab94e6feac3bc | # ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016-2020 Kestrel Technology LLC
# Copyright (c) 2020 Henny Sipma
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import List, Sequence, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
import chb.simulation.SimUtil as SU
import chb.simulation.SimValue as SV
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
from chb.x86.simulation.X86SimulationState import X86SimulationState
from chb.x86.X86DictionaryRecord import x86registry
from chb.x86.X86Opcode import X86Opcode
from chb.x86.X86Operand import X86Operand
if TYPE_CHECKING:
from chb.x86.X86Dictionary import X86Dictionary
from chb.x86.simulation.X86SimulationState import X86SimulationState
@x86registry.register_tag("jmp", X86Opcode)
class X86Jump(X86Opcode):
"""JMP dst
args[0]: index of dst in x86dictionary
"""
def __init__(
self,
x86d: "X86Dictionary",
ixval: IndexedTableValue) -> None:
X86Opcode.__init__(self, x86d, ixval)
@property
def target_address(self) -> X86Operand:
return self.x86d.operand(self.args[0])
@property
def operands(self) -> Sequence[X86Operand]:
return [self.target_address]
def annotation(self, xdata: InstrXData) -> str:
tgtaddr = self.bd.address(xdata.args[0])
return 'goto ' + str(tgtaddr)
def simulate(self, iaddr: str, simstate: "X86SimulationState") -> None:
raise SU.CHBSimJumpException(iaddr, str(self.target_address))
|
py | b40048c819555aa47a16bae0e3fa3edfac91e1ed | # Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import logging
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from azure.mgmt.resourcegraph.models import QueryRequest
from c7n.actions import ActionRegistry
from c7n.exceptions import PolicyValidationError
from c7n.filters import FilterRegistry
from c7n.manager import ResourceManager
from c7n.query import MaxResourceLimit, sources
from c7n.utils import local_session
from c7n_azure.actions.logic_app import LogicAppAction
from c7n_azure.actions.notify import Notify
from c7n_azure.constants import DEFAULT_RESOURCE_AUTH_ENDPOINT
from c7n_azure.filters import ParentFilter
from c7n_azure.provider import resources
from c7n_azure.utils import generate_key_vault_url, serialize
log = logging.getLogger('custodian.azure.query')
class ResourceQuery:
def __init__(self, session_factory):
self.session_factory = session_factory
def filter(self, resource_manager, **params):
m = resource_manager.resource_type
enum_op, list_op, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
params.update(m.extra_args(resource_manager))
try:
op = getattr(getattr(resource_manager.get_client(), enum_op), list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
except Exception as e:
log.error("Failed to query resource.\n"
"Type: azure.{0}.\n"
"Error: {1}".format(resource_manager.type, e))
raise
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m
@sources.register('describe-azure')
class DescribeSource:
resource_query_factory = ResourceQuery
def __init__(self, manager):
self.manager = manager
self.query = self.resource_query_factory(self.manager.session_factory)
def validate(self):
pass
def get_resources(self, query):
return self.query.filter(self.manager)
def get_permissions(self):
return ()
def augment(self, resources):
return resources
@sources.register('resource-graph')
class ResourceGraphSource:
def __init__(self, manager):
self.manager = manager
def validate(self):
if not hasattr(self.manager.resource_type, 'resource_type'):
raise PolicyValidationError(
"%s is not supported with the Azure Resource Graph source."
% self.manager.data['resource'])
def get_resources(self, _):
log.warning('The Azure Resource Graph source '
'should not be used in production scenarios at this time.')
session = self.manager.get_session()
client = session.client('azure.mgmt.resourcegraph.ResourceGraphClient')
# empty scope will return all resource
query_scope = ""
if self.manager.resource_type.resource_type != 'armresource':
query_scope = "where type =~ '%s'" % self.manager.resource_type.resource_type
query = QueryRequest(
query=query_scope,
subscriptions=[session.get_subscription_id()]
)
res = client.resources(query)
cols = [c['name'] for c in res.data['columns']]
data = [dict(zip(cols, r)) for r in res.data['rows']]
return data
def get_permissions(self):
return ()
def augment(self, resources):
return resources
class ChildResourceQuery(ResourceQuery):
"""A resource query for resources that must be queried with parent information.
Several resource types can only be queried in the context of their
parents identifiers. ie. SQL and Cosmos databases
"""
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type) # type: ChildTypeInfo
parents = resource_manager.get_parent_manager()
# Have to query separately for each parent's children.
results = []
for parent in parents.resources():
try:
vault_url = None
if m.keyvault_child:
vault_url = generate_key_vault_url(parent['name'])
subset = resource_manager.enumerate_resources(
parent, m, vault_url=vault_url, **params)
if subset:
# If required, append parent resource ID to all child resources
if m.annotate_parent:
for r in subset:
r[m.parent_key] = parent[parents.resource_type.id]
results.extend(subset)
except Exception as e:
log.warning('Child enumeration failed for {0}. {1}'
.format(parent[parents.resource_type.id], e))
if m.raise_on_exception:
raise e
return results
@sources.register('describe-child-azure')
class ChildDescribeSource(DescribeSource):
resource_query_factory = ChildResourceQuery
class TypeMeta(type):
def __repr__(cls):
return "<Type info service:%s client: %s>" % (
cls.service,
cls.client)
class TypeInfo(metaclass=TypeMeta):
doc_groups = None
"""api client construction information"""
service = ''
client = ''
resource = DEFAULT_RESOURCE_AUTH_ENDPOINT
# Default id field, resources should override if different (used for meta filters, report etc)
id = 'id'
@classmethod
def extra_args(cls, resource_manager):
return {}
class ChildTypeInfo(TypeInfo, metaclass=TypeMeta):
"""api client construction information for child resources"""
parent_manager_name = ''
annotate_parent = True
raise_on_exception = True
parent_key = 'c7n:parent-id'
keyvault_child = False
@classmethod
def extra_args(cls, parent_resource):
return {}
class QueryMeta(type):
"""metaclass to have consistent action/filter registry for new resources."""
def __new__(cls, name, parents, attrs):
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
class QueryResourceManager(ResourceManager, metaclass=QueryMeta):
class resource_type(TypeInfo):
pass
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
self._session = None
def augment(self, resources):
return resources
def get_permissions(self):
return ()
def get_source(self, source_type):
return sources.get(source_type)(self)
def get_session(self):
if self._session is None:
self._session = local_session(self.session_factory)
return self._session
def get_client(self, service=None, vault_url=None):
if not service:
return self.get_session().client(
"%s.%s" % (self.resource_type.service, self.resource_type.client),
vault_url=vault_url)
return self.get_session().client(service, vault_url=vault_url)
def get_cache_key(self, query):
return {'source_type': self.source_type,
'query': query,
'resource': str(self.__class__.__name__)}
@classmethod
def get_model(cls):
return ResourceQuery.resolve(cls.resource_type)
@property
def source_type(self):
return self.data.get('source', 'describe-azure')
def resources(self, query=None, augment=True):
cache_key = self.get_cache_key(query)
resources = None
if self._cache.load():
resources = self._cache.get(cache_key)
if resources is not None:
self.log.debug("Using cached %s: %d" % (
"%s.%s" % (self.__class__.__module__,
self.__class__.__name__),
len(resources)))
if resources is None:
with self.ctx.tracer.subsegment('resource-fetch'):
resources = self.source.get_resources(query)
if augment:
with self.ctx.tracer.subsegment('resource-augment'):
resources = self.augment(resources)
self._cache.save(cache_key, resources)
with self.ctx.tracer.subsegment('filter'):
resource_count = len(resources)
resources = self.filter_resources(resources)
# Check if we're out of a policies execution limits.
if self.data == self.ctx.policy.data:
self.check_resource_limit(len(resources), resource_count)
return resources
def check_resource_limit(self, selection_count, population_count):
"""Check if policy's execution affects more resources then its limit.
"""
p = self.ctx.policy
max_resource_limits = MaxResourceLimit(p, selection_count, population_count)
return max_resource_limits.check_resource_limits()
def get_resources(self, resource_ids, **params):
resource_client = self.get_client()
m = self.resource_type
get_client, get_op, extra_args = m.get_spec
if extra_args:
params.update(extra_args)
op = getattr(getattr(resource_client, get_client), get_op)
data = [
op(rid, **params)
for rid in resource_ids
]
return [r.serialize(True) for r in data]
@staticmethod
def register_actions_and_filters(registry, resource_class):
resource_class.action_registry.register('notify', Notify)
if 'logic-app' not in resource_class.action_registry:
resource_class.action_registry.register('logic-app', LogicAppAction)
def validate(self):
self.source.validate()
class ChildResourceManager(QueryResourceManager, metaclass=QueryMeta):
child_source = 'describe-child-azure'
parent_manager = None
@property
def source_type(self):
source = self.data.get('source', self.child_source)
if source == 'describe':
source = self.child_source
return source
def get_parent_manager(self):
if not self.parent_manager:
self.parent_manager = self.get_resource_manager(self.resource_type.parent_manager_name)
return self.parent_manager
def get_session(self):
if self._session is None:
session = super(ChildResourceManager, self).get_session()
if self.resource_type.resource != DEFAULT_RESOURCE_AUTH_ENDPOINT:
session = session.get_session_for_resource(self.resource_type.resource)
self._session = session
return self._session
def enumerate_resources(self, parent_resource, type_info, vault_url=None, **params):
client = self.get_client(vault_url=vault_url)
enum_op, list_op, extra_args = self.resource_type.enum_spec
# There are 2 types of extra_args:
# - static values stored in 'extra_args' dict (e.g. some type)
# - dynamic values are retrieved via 'extra_args' method (e.g. parent name)
if extra_args:
params.update({key: extra_args[key](parent_resource) for key in extra_args.keys()})
params.update(type_info.extra_args(parent_resource))
# Some resources might not have enum_op piece (non-arm resources)
if enum_op:
op = getattr(getattr(client, enum_op), list_op)
else:
op = getattr(client, list_op)
result = op(**params)
if isinstance(result, Iterable):
# KeyVault items don't have `serialize` method now
return [(r.serialize(True) if hasattr(r, 'serialize') else serialize(r))
for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def register_child_specific(registry, resource_class):
if not issubclass(resource_class, ChildResourceManager):
return
# If Child Resource doesn't annotate parent, there is no way to filter based on
# parent properties.
if resource_class.resource_type.annotate_parent:
resource_class.filter_registry.register('parent', ParentFilter)
resources.subscribe(QueryResourceManager.register_actions_and_filters)
resources.subscribe(ChildResourceManager.register_child_specific)
|
py | b4004997f60444559ef105be029e1c4ba607b943 | """Certbot display."""
import os
import textwrap
import dialog
import zope.interface
from certbot import interfaces
from certbot import errors
from certbot.display import completer
WIDTH = 72
HEIGHT = 20
DSELECT_HELP = (
"Use the arrow keys or Tab to move between window elements. Space can be "
"used to complete the input path with the selected element in the "
"directory window. Pressing enter will select the currently highlighted "
"button.")
"""Help text on how to use dialog's dselect."""
# Display exit codes
OK = "ok"
"""Display exit code indicating user acceptance."""
CANCEL = "cancel"
"""Display exit code for a user canceling the display."""
HELP = "help"
"""Display exit code when for when the user requests more help."""
def _wrap_lines(msg):
"""Format lines nicely to 80 chars.
:param str msg: Original message
:returns: Formatted message respecting newlines in message
:rtype: str
"""
lines = msg.splitlines()
fixed_l = []
for line in lines:
fixed_l.append(textwrap.fill(
line,
80,
break_long_words=False,
break_on_hyphens=False))
return os.linesep.join(fixed_l)
@zope.interface.implementer(interfaces.IDisplay)
class NcursesDisplay(object):
"""Ncurses-based display."""
def __init__(self, width=WIDTH, height=HEIGHT):
super(NcursesDisplay, self).__init__()
self.dialog = dialog.Dialog()
self.width = width
self.height = height
def notification(self, message, height=10, pause=False):
# pylint: disable=unused-argument
"""Display a notification to the user and wait for user acceptance.
.. todo:: It probably makes sense to use one of the transient message
types for pause. It isn't straightforward how best to approach
the matter though given the context of our messages.
http://pythondialog.sourceforge.net/doc/widgets.html#displaying-transient-messages
:param str message: Message to display
:param int height: Height of the dialog box
:param bool pause: Not applicable to NcursesDisplay
"""
self.dialog.msgbox(message, height, width=self.width)
def menu(self, message, choices, ok_label="OK", cancel_label="Cancel",
help_label="", **unused_kwargs):
"""Display a menu.
:param str message: title of menu
:param choices: menu lines, len must be > 0
:type choices: list of tuples (`tag`, `item`) tags must be unique or
list of items (tags will be enumerated)
:param str ok_label: label of the OK button
:param str help_label: label of the help button
:param dict unused_kwargs: absorbs default / cli_args
:returns: tuple of the form (`code`, `index`) where
`code` - int display exit code
`int` - index of the selected item
:rtype: tuple
"""
menu_options = {
"choices": choices,
"ok_label": ok_label,
"cancel_label": cancel_label,
"help_button": bool(help_label),
"help_label": help_label,
"width": self.width,
"height": self.height,
"menu_height": self.height - 6,
}
# Can accept either tuples or just the actual choices
if choices and isinstance(choices[0], tuple):
# pylint: disable=star-args
code, selection = self.dialog.menu(message, **menu_options)
# Return the selection index
for i, choice in enumerate(choices):
if choice[0] == selection:
return code, i
return code, -1
else:
# "choices" is not formatted the way the dialog.menu expects...
menu_options["choices"] = [
(str(i), choice) for i, choice in enumerate(choices, 1)
]
# pylint: disable=star-args
code, index = self.dialog.menu(message, **menu_options)
if code == CANCEL:
return code, -1
return code, int(index) - 1
def input(self, message, **unused_kwargs):
"""Display an input box to the user.
:param str message: Message to display that asks for input.
:param dict _kwargs: absorbs default / cli_args
:returns: tuple of the form (`code`, `string`) where
`code` - int display exit code
`string` - input entered by the user
"""
sections = message.split("\n")
# each section takes at least one line, plus extras if it's longer than self.width
wordlines = [1 + (len(section) / self.width) for section in sections]
height = 6 + sum(wordlines) + len(sections)
return self.dialog.inputbox(message, width=self.width, height=height)
def yesno(self, message, yes_label="Yes", no_label="No", **unused_kwargs):
"""Display a Yes/No dialog box.
Yes and No label must begin with different letters.
:param str message: message to display to user
:param str yes_label: label on the "yes" button
:param str no_label: label on the "no" button
:param dict _kwargs: absorbs default / cli_args
:returns: if yes_label was selected
:rtype: bool
"""
return self.dialog.DIALOG_OK == self.dialog.yesno(
message, self.height, self.width,
yes_label=yes_label, no_label=no_label)
def checklist(self, message, tags, default_status=True, **unused_kwargs):
"""Displays a checklist.
:param message: Message to display before choices
:param list tags: where each is of type :class:`str` len(tags) > 0
:param bool default_status: If True, items are in a selected state by
default.
:param dict _kwargs: absorbs default / cli_args
:returns: tuple of the form (`code`, `list_tags`) where
`code` - int display exit code
`list_tags` - list of str tags selected by the user
"""
choices = [(tag, "", default_status) for tag in tags]
return self.dialog.checklist(
message, width=self.width, height=self.height, choices=choices)
def directory_select(self, message, **unused_kwargs):
"""Display a directory selection screen.
:param str message: prompt to give the user
:returns: tuple of the form (`code`, `string`) where
`code` - int display exit code
`string` - input entered by the user
"""
root_directory = os.path.abspath(os.sep)
return self.dialog.dselect(
filepath=root_directory, width=self.width,
height=self.height, help_button=True, title=message)
@zope.interface.implementer(interfaces.IDisplay)
class FileDisplay(object):
"""File-based display."""
def __init__(self, outfile):
super(FileDisplay, self).__init__()
self.outfile = outfile
def notification(self, message, height=10, pause=True):
# pylint: disable=unused-argument
"""Displays a notification and waits for user acceptance.
:param str message: Message to display
:param int height: No effect for FileDisplay
:param bool pause: Whether or not the program should pause for the
user's confirmation
"""
side_frame = "-" * 79
message = _wrap_lines(message)
self.outfile.write(
"{line}{frame}{line}{msg}{line}{frame}{line}".format(
line=os.linesep, frame=side_frame, msg=message))
if pause:
raw_input("Press Enter to Continue")
def menu(self, message, choices, ok_label="", cancel_label="",
help_label="", **unused_kwargs):
# pylint: disable=unused-argument
"""Display a menu.
.. todo:: This doesn't enable the help label/button (I wasn't sold on
any interface I came up with for this). It would be a nice feature
:param str message: title of menu
:param choices: Menu lines, len must be > 0
:type choices: list of tuples (tag, item) or
list of descriptions (tags will be enumerated)
:param dict _kwargs: absorbs default / cli_args
:returns: tuple of (`code`, `index`) where
`code` - str display exit code
`index` - int index of the user's selection
:rtype: tuple
"""
self._print_menu(message, choices)
code, selection = self._get_valid_int_ans(len(choices))
return code, selection - 1
def input(self, message, **unused_kwargs):
# pylint: disable=no-self-use
"""Accept input from the user.
:param str message: message to display to the user
:param dict _kwargs: absorbs default / cli_args
:returns: tuple of (`code`, `input`) where
`code` - str display exit code
`input` - str of the user's input
:rtype: tuple
"""
ans = raw_input(
textwrap.fill(
"%s (Enter 'c' to cancel): " % message,
80,
break_long_words=False,
break_on_hyphens=False))
if ans == "c" or ans == "C":
return CANCEL, "-1"
else:
return OK, ans
def yesno(self, message, yes_label="Yes", no_label="No", **unused_kwargs):
"""Query the user with a yes/no question.
Yes and No label must begin with different letters, and must contain at
least one letter each.
:param str message: question for the user
:param str yes_label: Label of the "Yes" parameter
:param str no_label: Label of the "No" parameter
:param dict _kwargs: absorbs default / cli_args
:returns: True for "Yes", False for "No"
:rtype: bool
"""
side_frame = ("-" * 79) + os.linesep
message = _wrap_lines(message)
self.outfile.write("{0}{frame}{msg}{0}{frame}".format(
os.linesep, frame=side_frame, msg=message))
while True:
ans = raw_input("{yes}/{no}: ".format(
yes=_parens_around_char(yes_label),
no=_parens_around_char(no_label)))
# Couldn't get pylint indentation right with elif
# elif doesn't matter in this situation
if (ans.startswith(yes_label[0].lower()) or
ans.startswith(yes_label[0].upper())):
return True
if (ans.startswith(no_label[0].lower()) or
ans.startswith(no_label[0].upper())):
return False
def checklist(self, message, tags, default_status=True, **unused_kwargs):
# pylint: disable=unused-argument
"""Display a checklist.
:param str message: Message to display to user
:param list tags: `str` tags to select, len(tags) > 0
:param bool default_status: Not used for FileDisplay
:param dict _kwargs: absorbs default / cli_args
:returns: tuple of (`code`, `tags`) where
`code` - str display exit code
`tags` - list of selected tags
:rtype: tuple
"""
while True:
self._print_menu(message, tags)
code, ans = self.input("Select the appropriate numbers separated "
"by commas and/or spaces")
if code == OK:
indices = separate_list_input(ans)
selected_tags = self._scrub_checklist_input(indices, tags)
if selected_tags:
return code, selected_tags
else:
self.outfile.write(
"** Error - Invalid selection **%s" % os.linesep)
else:
return code, []
def directory_select(self, message, **unused_kwargs):
"""Display a directory selection screen.
:param str message: prompt to give the user
:returns: tuple of the form (`code`, `string`) where
`code` - int display exit code
`string` - input entered by the user
"""
with completer.Completer():
return self.input(message)
def _scrub_checklist_input(self, indices, tags):
# pylint: disable=no-self-use
"""Validate input and transform indices to appropriate tags.
:param list indices: input
:param list tags: Original tags of the checklist
:returns: valid tags the user selected
:rtype: :class:`list` of :class:`str`
"""
# They should all be of type int
try:
indices = [int(index) for index in indices]
except ValueError:
return []
# Remove duplicates
indices = list(set(indices))
# Check all input is within range
for index in indices:
if index < 1 or index > len(tags):
return []
# Transform indices to appropriate tags
return [tags[index - 1] for index in indices]
def _print_menu(self, message, choices):
"""Print a menu on the screen.
:param str message: title of menu
:param choices: Menu lines
:type choices: list of tuples (tag, item) or
list of descriptions (tags will be enumerated)
"""
# Can take either tuples or single items in choices list
if choices and isinstance(choices[0], tuple):
choices = ["%s - %s" % (c[0], c[1]) for c in choices]
# Write out the message to the user
self.outfile.write(
"{new}{msg}{new}".format(new=os.linesep, msg=message))
side_frame = ("-" * 79) + os.linesep
self.outfile.write(side_frame)
# Write out the menu choices
for i, desc in enumerate(choices, 1):
self.outfile.write(
textwrap.fill(
"{num}: {desc}".format(num=i, desc=desc),
80,
break_long_words=False,
break_on_hyphens=False))
# Keep this outside of the textwrap
self.outfile.write(os.linesep)
self.outfile.write(side_frame)
def _get_valid_int_ans(self, max_):
"""Get a numerical selection.
:param int max: The maximum entry (len of choices), must be positive
:returns: tuple of the form (`code`, `selection`) where
`code` - str display exit code ('ok' or cancel')
`selection` - int user's selection
:rtype: tuple
"""
selection = -1
if max_ > 1:
input_msg = ("Select the appropriate number "
"[1-{max_}] then [enter] (press 'c' to "
"cancel): ".format(max_=max_))
else:
input_msg = ("Press 1 [enter] to confirm the selection "
"(press 'c' to cancel): ")
while selection < 1:
ans = raw_input(input_msg)
if ans.startswith("c") or ans.startswith("C"):
return CANCEL, -1
try:
selection = int(ans)
if selection < 1 or selection > max_:
selection = -1
raise ValueError
except ValueError:
self.outfile.write(
"{0}** Invalid input **{0}".format(os.linesep))
return OK, selection
@zope.interface.implementer(interfaces.IDisplay)
class NoninteractiveDisplay(object):
"""An iDisplay implementation that never asks for interactive user input"""
def __init__(self, outfile):
super(NoninteractiveDisplay, self).__init__()
self.outfile = outfile
def _interaction_fail(self, message, cli_flag, extra=""):
"Error out in case of an attempt to interact in noninteractive mode"
msg = "Missing command line flag or config entry for this setting:\n"
msg += message
if extra:
msg += "\n" + extra
if cli_flag:
msg += "\n\n(You can set this with the {0} flag)".format(cli_flag)
raise errors.MissingCommandlineFlag(msg)
def notification(self, message, height=10, pause=False):
# pylint: disable=unused-argument
"""Displays a notification without waiting for user acceptance.
:param str message: Message to display to stdout
:param int height: No effect for NoninteractiveDisplay
:param bool pause: The NoninteractiveDisplay waits for no keyboard
"""
side_frame = "-" * 79
message = _wrap_lines(message)
self.outfile.write(
"{line}{frame}{line}{msg}{line}{frame}{line}".format(
line=os.linesep, frame=side_frame, msg=message))
def menu(self, message, choices, ok_label=None, cancel_label=None,
help_label=None, default=None, cli_flag=None):
# pylint: disable=unused-argument,too-many-arguments
"""Avoid displaying a menu.
:param str message: title of menu
:param choices: Menu lines, len must be > 0
:type choices: list of tuples (tag, item) or
list of descriptions (tags will be enumerated)
:param int default: the default choice
:param dict kwargs: absorbs various irrelevant labelling arguments
:returns: tuple of (`code`, `index`) where
`code` - str display exit code
`index` - int index of the user's selection
:rtype: tuple
:raises errors.MissingCommandlineFlag: if there was no default
"""
if default is None:
self._interaction_fail(message, cli_flag, "Choices: " + repr(choices))
return OK, default
def input(self, message, default=None, cli_flag=None):
"""Accept input from the user.
:param str message: message to display to the user
:returns: tuple of (`code`, `input`) where
`code` - str display exit code
`input` - str of the user's input
:rtype: tuple
:raises errors.MissingCommandlineFlag: if there was no default
"""
if default is None:
self._interaction_fail(message, cli_flag)
else:
return OK, default
def yesno(self, message, yes_label=None, no_label=None, default=None, cli_flag=None):
# pylint: disable=unused-argument
"""Decide Yes or No, without asking anybody
:param str message: question for the user
:param dict kwargs: absorbs yes_label, no_label
:raises errors.MissingCommandlineFlag: if there was no default
:returns: True for "Yes", False for "No"
:rtype: bool
"""
if default is None:
self._interaction_fail(message, cli_flag)
else:
return default
def checklist(self, message, tags, default=None, cli_flag=None, **kwargs):
# pylint: disable=unused-argument
"""Display a checklist.
:param str message: Message to display to user
:param list tags: `str` tags to select, len(tags) > 0
:param dict kwargs: absorbs default_status arg
:returns: tuple of (`code`, `tags`) where
`code` - str display exit code
`tags` - list of selected tags
:rtype: tuple
"""
if default is None:
self._interaction_fail(message, cli_flag, "? ".join(tags))
else:
return OK, default
def directory_select(self, message, default=None, cli_flag=None):
"""Simulate prompting the user for a directory.
This function returns default if it is not ``None``, otherwise,
an exception is raised explaining the problem. If cli_flag is
not ``None``, the error message will include the flag that can
be used to set this value with the CLI.
:param str message: prompt to give the user
:param default: default value to return (if one exists)
:param str cli_flag: option used to set this value with the CLI
:returns: tuple of the form (`code`, `string`) where
`code` - int display exit code
`string` - input entered by the user
"""
return self.input(message, default, cli_flag)
def separate_list_input(input_):
"""Separate a comma or space separated list.
:param str input_: input from the user
:returns: strings
:rtype: list
"""
no_commas = input_.replace(",", " ")
# Each string is naturally unicode, this causes problems with M2Crypto SANs
# TODO: check if above is still true when M2Crypto is gone ^
return [str(string) for string in no_commas.split()]
def _parens_around_char(label):
"""Place parens around first character of label.
:param str label: Must contain at least one character
"""
return "({first}){rest}".format(first=label[0], rest=label[1:])
|
py | b4004997faab0ad9eadfaf0090feda3476ae1b32 |
import json
files=open('/home/navgurukul20/Desktop/VIKASH_K/Files_requests_API+WebS/Task5_6.json', 'r')
v3=json.load(files)
dic={}
def analyse_movies_directors(movies_list,languagelist):
global s
for cout in movies_list:
for language in languagelist:
if cout in dic:
dic3=dic[cout]
if language in dic[cout]:
s=dic[cout][language]
s+=1
dic.update({cout:{language:s}})
else:
dic3[language]=s
dic[cout]=dic3
else:
dic.update({cout:{language:s}})
for details in v3:
s=1
analyse_movies_directors(details['Director'],details['Language'])
print(dic)
|
py | b4004a69b62aaa0acaea62de31538733b0041cf1 | import logging
from django.core.management.base import BaseCommand
from metadata.handlers import populate_values_list_cache
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
logger.info('Populating values list cache')
populate_values_list_cache()
|
py | b4004a7c33f56d3f843bc31761d9e6518ec9111e | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2020
# Leandro Toledo de Souza <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
"""This module contains the BasePersistence class."""
from abc import ABC, abstractmethod
class BasePersistence(ABC):
"""Interface class for adding persistence to your bot.
Subclass this object for different implementations of a persistent bot.
All relevant methods must be overwritten. This means:
* If :attr:`store_bot_data` is ``True`` you must overwrite :meth:`get_bot_data` and
:meth:`update_bot_data`.
* If :attr:`store_chat_data` is ``True`` you must overwrite :meth:`get_chat_data` and
:meth:`update_chat_data`.
* If :attr:`store_user_data` is ``True`` you must overwrite :meth:`get_user_data` and
:meth:`update_user_data`.
* If you want to store conversation data with :class:`telegram.ext.ConversationHandler`, you
must overwrite :meth:`get_conversations` and :meth:`update_conversation`.
* :meth:`flush` will be called when the bot is shutdown.
Attributes:
store_user_data (:obj:`bool`): Optional, Whether user_data should be saved by this
persistence class.
store_chat_data (:obj:`bool`): Optional. Whether chat_data should be saved by this
persistence class.
store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this
persistence class.
Args:
store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this
persistence class. Default is ``True``.
store_chat_data (:obj:`bool`, optional): Whether chat_data should be saved by this
persistence class. Default is ``True`` .
store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this
persistence class. Default is ``True`` .
"""
def __init__(self, store_user_data=True, store_chat_data=True, store_bot_data=True):
self.store_user_data = store_user_data
self.store_chat_data = store_chat_data
self.store_bot_data = store_bot_data
@abstractmethod
def get_user_data(self):
""""Will be called by :class:`telegram.ext.Dispatcher` upon creation with a
persistence object. It should return the user_data if stored, or an empty
``defaultdict(dict)``.
Returns:
:obj:`defaultdict`: The restored user data.
"""
@abstractmethod
def get_chat_data(self):
""""Will be called by :class:`telegram.ext.Dispatcher` upon creation with a
persistence object. It should return the chat_data if stored, or an empty
``defaultdict(dict)``.
Returns:
:obj:`defaultdict`: The restored chat data.
"""
@abstractmethod
def get_bot_data(self):
""""Will be called by :class:`telegram.ext.Dispatcher` upon creation with a
persistence object. It should return the bot_data if stored, or an empty
``dict``.
Returns:
:obj:`defaultdict`: The restored bot data.
"""
@abstractmethod
def get_conversations(self, name):
""""Will be called by :class:`telegram.ext.Dispatcher` when a
:class:`telegram.ext.ConversationHandler` is added if
:attr:`telegram.ext.ConversationHandler.persistent` is ``True``.
It should return the conversations for the handler with `name` or an empty ``dict``
Args:
name (:obj:`str`): The handlers name.
Returns:
:obj:`dict`: The restored conversations for the handler.
"""
@abstractmethod
def update_conversation(self, name, key, new_state):
"""Will be called when a :attr:`telegram.ext.ConversationHandler.update_state`
is called. this allows the storeage of the new state in the persistence.
Args:
name (:obj:`str`): The handlers name.
key (:obj:`tuple`): The key the state is changed for.
new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.
"""
@abstractmethod
def update_user_data(self, user_id, data):
"""Will be called by the :class:`telegram.ext.Dispatcher` after a handler has
handled an update.
Args:
user_id (:obj:`int`): The user the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].
"""
@abstractmethod
def update_chat_data(self, chat_id, data):
"""Will be called by the :class:`telegram.ext.Dispatcher` after a handler has
handled an update.
Args:
chat_id (:obj:`int`): The chat the data might have been changed for.
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].
"""
@abstractmethod
def update_bot_data(self, data):
"""Will be called by the :class:`telegram.ext.Dispatcher` after a handler has
handled an update.
Args:
data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data` .
"""
def flush(self):
"""Will be called by :class:`telegram.ext.Updater` upon receiving a stop signal. Gives the
persistence a chance to finish up saving or close a database connection gracefully. If this
is not of any importance just pass will be sufficient.
"""
pass
|
py | b4004ba3b421d0674edca434fa747e564593a297 |
class ProgramMemory(object):
def __init__(self, size: int, programCounter):
self._operations = [None] * size
self._programCounter = programCounter
@property
def operations(self) -> list:
return self._operations
@property
def programCounter(self):
return self._programCounter
def nextOp(self):
return self.operations[self.programCounter.address]
|
py | b4004c6de9d286fece926354243de2d9255e048f | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
import sys
from cryptography import utils
from cryptography.exceptions import (
AlreadyFinalized, InvalidKey, UnsupportedAlgorithm, _Reasons
)
from cryptography.hazmat.backends.interfaces import ScryptBackend
from cryptography.hazmat.primitives import constant_time
from cryptography.hazmat.primitives.kdf import KeyDerivationFunction
# This is used by the scrypt tests to skip tests that require more memory
# than the MEM_LIMIT
_MEM_LIMIT = sys.maxsize // 2
@utils.register_interface(KeyDerivationFunction)
class Scrypt(object):
def __init__(self, salt, length, n, r, p, backend):
if not isinstance(backend, ScryptBackend):
raise UnsupportedAlgorithm(
"Backend object does not implement ScryptBackend.",
_Reasons.BACKEND_MISSING_INTERFACE
)
self._length = length
utils._check_bytes("salt", salt)
if n < 2 or (n & (n - 1)) != 0:
raise ValueError("n must be greater than 1 and be a power of 2.")
if r < 1:
raise ValueError("r must be greater than or equal to 1.")
if p < 1:
raise ValueError("p must be greater than or equal to 1.")
self._used = False
self._salt = salt
self._n = n
self._r = r
self._p = p
self._backend = backend
def derive(self, key_material):
if self._used:
raise AlreadyFinalized("Scrypt instances can only be used once.")
self._used = True
utils._check_byteslike("key_material", key_material)
return self._backend.derive_scrypt(
key_material, self._salt, self._length, self._n, self._r, self._p
)
def verify(self, key_material, expected_key):
derived_key = self.derive(key_material)
if not constant_time.bytes_eq(derived_key, expected_key):
raise InvalidKey("Keys do not match.")
|
py | b4004d8de0af0341340c99c0d67a4e12508b4a89 | # ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.3'
# jupytext_version: 0.8.6
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from fastai.basics import *
# In this part of the lecture we explain Stochastic Gradient Descent (SGD) which is an **optimization** method commonly used in neural networks. We will illustrate the concepts with concrete examples.
# # Linear Regression problem
# The goal of linear regression is to fit a line to a set of points.
n=100
x = torch.ones(n,2)
x[:,0].uniform_(-1.,1)
x[:5]
a = tensor(3.,2); a
y = x@a + torch.rand(n)
plt.scatter(x[:,0], y);
# You want to find **parameters** (weights) `a` such that you minimize the *error* between the points and the line `x@a`. Note that here `a` is unknown. For a regression problem the most common *error function* or *loss function* is the **mean squared error**.
def mse(y_hat, y): return ((y_hat-y)**2).mean()
# Suppose we believe `a = (-1.0,1.0)` then we can compute `y_hat` which is our *prediction* and then compute our error.
a = tensor(-1.,1)
y_hat = x@a
mse(y_hat, y)
plt.scatter(x[:,0],y)
plt.scatter(x[:,0],y_hat);
# So far we have specified the *model* (linear regression) and the *evaluation criteria* (or *loss function*). Now we need to handle *optimization*; that is, how do we find the best values for `a`? How do we find the best *fitting* linear regression.
# # Gradient Descent
# We would like to find the values of `a` that minimize `mse_loss`.
#
# **Gradient descent** is an algorithm that minimizes functions. Given a function defined by a set of parameters, gradient descent starts with an initial set of parameter values and iteratively moves toward a set of parameter values that minimize the function. This iterative minimization is achieved by taking steps in the negative direction of the function gradient.
#
# Here is gradient descent implemented in [PyTorch](http://pytorch.org/).
a = nn.Parameter(a); a
def update():
y_hat = x@a
loss = mse(y, y_hat)
if t % 10 == 0: print(loss)
loss.backward()
with torch.no_grad():
a.sub_(lr * a.grad)
a.grad.zero_()
lr = 1e-1
for t in range(100): update()
plt.scatter(x[:,0],y)
plt.scatter(x[:,0],x@a);
# ## Animate it!
from matplotlib import animation, rc
rc('animation', html='jshtml')
# +
a = nn.Parameter(tensor(-1.,1))
fig = plt.figure()
plt.scatter(x[:,0], y, c='orange')
line, = plt.plot(x[:,0], x@a)
plt.close()
def animate(i):
update()
line.set_ydata(x@a)
return line,
animation.FuncAnimation(fig, animate, np.arange(0, 100), interval=20)
# -
# In practice, we don't calculate on the whole file at once, but we use *mini-batches*.
# ## Vocab
# - Learning rate
# - Epoch
# - Minibatch
# - SGD
# - Model / Architecture
# - Parameters
# - Loss function
#
# For classification problems, we use *cross entropy loss*, also known as *negative log likelihood loss*. This penalizes incorrect confident predictions, and correct unconfident predictions.
|
py | b4004f2e29e4550d99801b16a9857d3026785b9a | """
WSGI config for CSI2999 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'CSI2999.settings')
application = get_wsgi_application()
|
py | b4004f53a936c7e7509707419cda6395c15aac56 | import datetime
import math
import os
import tarfile
from rasa_nlu.model import Interpreter
import json
import warnings
class TestIntent:
def __init__(self):
warnings.filterwarnings("ignore")
path = '../rasa_default/models'
files = os.listdir(path)
paths = [os.path.join(path, basename) for basename in files if ('.tar' in basename)]
file_name = max(paths, key=os.path.getctime)
print(file_name)
tf = tarfile.open(file_name)
tf.extractall()
self.interpreter = Interpreter.load('./nlu')
with open('../questions.json') as f:
self.questions = json.loads(f.read())['set']
with open('query_result.json', 'w') as f:
f.write('@' + str(datetime.datetime.now()) + '\n')
with open('entity_recognition.json', 'w') as f:
f.write('')
self.entities_results = []
self.direct_hit_rate = 0
self.indirect_hit_rate = 0
self.question_number = 0
def test_question(self, question, label):
self.question_number = self.question_number + 1
# print(question)
result = self.interpreter.parse(question)
intent_ranking_first_3 = result['intent_ranking'][:3]
if intent_ranking_first_3[0]['name'] == label:
self.direct_hit_rate = self.direct_hit_rate + 1
else:
print('------ a wrong question -------')
print(question)
for intent in intent_ranking_first_3[-1:]:
if label == intent['name']:
self.indirect_hit_rate = self.indirect_hit_rate + 1
with open('query_result.json', 'a') as f:
f.write('--------------------------\n')
f.write(question + ' ! ' + label + '\n')
f.write(json.dumps(result, indent=4) + '\n')
# TODO: make a file recording all the entity recognition.
with open('entity_recognition.json', 'a') as f1:
f1.write('-------------\n')
f1.write(question + '\n')
f1.write(json.dumps(result['entities'], indent=4))
new_entities_result = \
{'question': question, 'result': [{'value': x['value'], 'entity': x['entity']} for x in result['entities']]}
self.entities_results.append(new_entities_result)
def iterate_questions(self):
for category in self.questions:
label = category['label']
for question in category['questions']:
self.test_question(question, label)
def run(self):
self.iterate_questions()
print('direct hit', self.direct_hit_rate, 'out of ', self.question_number)
print('indirect hit', self.indirect_hit_rate, 'out of ', self.question_number)
print('success rate', round(self.direct_hit_rate / self.question_number * 100, 2), '%')
a_test = TestIntent()
a_test.run()
with open('entity_recognition.json', 'w') as f:
f.write(json.dumps(a_test.entities_results))
|
py | b4004f5f9b9787c09c84f4b5956c65f557364691 | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.densenet import DenseNet121, DenseNet169, DenseNet201
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 256
args_lr = 0.004
args_model = 'densenet169'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_no_threshold/' + job_name + '*'
total_epochs = 46
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '121' in args_model:
base_model = DenseNet121(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '169' in args_model:
base_model = DenseNet169(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
elif '201' in args_model:
base_model = DenseNet201(weights=None, include_top=False, input_shape=(32, 32, 3), pooling='avg')
model.add(base_model)
#model.add(layers.Flatten())
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(128, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
#model.add(layers.Dense(64, activation='relu'))
#model.add(layers.Dropout(0.5))
#model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_no_threshold/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
|
py | b4004fc236a99730143b22a4eae746e233143f3d | # Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import streamlit as st
single_file = st.file_uploader("Drop a file:", type=["txt"])
if single_file is None:
st.text("No upload")
else:
st.text(single_file.read())
multiple_files = st.file_uploader(
"Drop multiple files:", type=["txt"], accept_multiple_files=True
)
if multiple_files is None:
st.text("No upload")
else:
files = [file.read().decode() for file in multiple_files]
st.text("\n".join(files))
|
py | b4004fdc33b0d87ac8457548d7289a0945528a86 |
import numpy as np
from libgdf_cffi import ffi, libgdf
def new_column():
return ffi.new('gdf_column*')
def new_context():
return ffi.new('gdf_context*')
def unwrap_devary(devary):
return ffi.cast('void*', devary.device_ctypes_pointer.value)
def get_dtype(dtype):
return {
np.float64: libgdf.GDF_FLOAT64,
np.float32: libgdf.GDF_FLOAT32,
np.int64: libgdf.GDF_INT64,
np.int32: libgdf.GDF_INT32,
np.int16: libgdf.GDF_INT16,
np.int8: libgdf.GDF_INT8,
np.bool_: libgdf.GDF_INT8,
}[np.dtype(dtype).type]
def seed_rand():
# A constant seed for deterministic testing
np.random.seed(0xabcdef)
def gen_rand(dtype, size, **kwargs):
dtype = np.dtype(dtype)
if dtype.kind == 'f':
res = np.random.random(size=size).astype(dtype)
if kwargs.get('positive_only', False):
return res
else:
return (res * 2 - 1)
elif dtype == np.int8:
low = kwargs.get('low', -32)
high = kwargs.get('high', 32)
return np.random.randint(low=low, high=high, size=size).astype(dtype)
elif dtype.kind == 'i':
low = kwargs.get('low', -10000)
high = kwargs.get('high', 10000)
return np.random.randint(low=low, high=high, size=size).astype(dtype)
elif dtype.kind == 'b':
low = kwargs.get('low', 0)
high = kwargs.get('high', 1)
return np.random.randint(low=low, high=high, size=size).astype(np.bool)
raise NotImplementedError('dtype.kind={}'.format(dtype.kind))
def fix_zeros(arr, val=1):
arr[arr == 0] = val
def buffer_as_bits(data):
def fix_binary(x):
x = x[2:]
diff = 8 - len(x)
return ('0' * diff + x)[::-1]
binaries = ''.join(fix_binary(bin(x)) for x in bytearray(data))
return list(map(lambda x: x == '1', binaries))
|
py | b400502ef9ccd624299171391d43972556bdf2f4 | from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:7644")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:7644")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported" |
py | b4005036d0e00076f2ddc09bc84224a21e253efc | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
module: rds_boto3
short_description: Create and delete AWS VPN Virtual Gateways.
description:
- Creates RDS instances
- Modifies RDS instances
- Snapshots RDS instances
- Restores RDS instances
- Deletes RDS snapshots
- Deletes RDS instances
- Reboots RDS instances
version_added: "2.2"
requirements: [ boto3 ]
options:
command:
description:
- specifies the action to take
- absent to remove resource
required: true
default: None
choices: [ 'create', 'delete', 'modify', 'snapshot', 'reboot', 'restore' ]
db_instance_identifier:
description:
- Database instance identifier. Required except when using command=delete on just a snapshot
required: false
default: null
engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
choices: [ 'mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora']
allocated_storage:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
db_instance_class:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
master_username:
description:
- Master database username. Used only when command=create.
required: false
default: null
master_user_password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
db_parameter_group_name:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_az:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be a multiple between 3 and 10 of the storage amount for the DB instance
required: false
default: null
vpc_security_group_ids:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
auto_minor_version_upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create
required: false
default: no
choices: [ "yes", "no" ]
allow_major_version_upgrade:
description:
- Indicates that major version upgrades should be applied automatically. Used only when command=create
required: false
default: no
choices: [ "yes", "no" ]
option_group_name:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create or command=modify.
required: false
default: null
preferred_maintenance_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
preferred_backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention_period:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
availability_zone:
description:
- availability zone in which to launch the instance. Used only when command=create or command=restore.
required: false
default: null
db_subnet_group_name:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
db_snapshot_identifier:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. Used with command=delete or command=snapshot.
required: false
default: null
wait:
description:
- When command=create, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 320
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_db_instance_identifier:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create and command=modify.
required: false
default: null
storage_encrypted:
description:
- Specifies whether the DB instance is encrypted
required: false
default: false
kms_key_id:
description:
- The KMS key identifier for an encrypted DB instance. The KMS key identifier is the Amazon Resource Name (ARN) for the KMS encryption key.
required: false
default: default encryption key for the account
skip_final_snapshot:
description:
- Boolean value that determines whether a final DB snapshot is created before the DB instance is deleted. Used when commad=delete.
required: false
default: false
final_db_snapshot_identifier:
description:
- The db_snapshot_identifier of the new DBSnapshot created when skip_final_snapshot is set to false.
required: false
copy_tags_to_snapshot:
description:
- True to copy all tags from the DB instance to snapshots of the DB instance; otherwise false.
required: false
storage_type:
description:
- Specifies the storage type to be associated with the DB instance.
required: false
monitoring_interval:
description:
- The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance.
required: false
db_cluster_identifier:
description:
- The identifier of the DB cluster that the instance will belong to.
required: false
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=restore.
required: false
default: null
author: Nick Aslanidis (@naslanidis)
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create an RDS Instance
- name: Create an RDS instance
rds_boto3:
region: ap-southeast-2
profile: production
command: create
db_instance_identifier: test-instance
engine: MySQL
db_instance_class: db.t2.medium
master_username: testuser
master_user_password: password
allocated_storage: 10
db_parameter_group_name: test-param-group
option_group_name: test-option-group
vpc_security_group_ids:
- sg-d1881234
db_subnet_group_name: test-subnet
tags:
Role: Mysql
wait: yes
wait_timeout: 720
register: new_rds_instance
# Create an RDS Aurora Instance
- name: Create an Aurora RDS instance with encryption enabled
rds_boto3:
region: ap-southeast-2
profile: production
command: create
db_instance_identifier: test-aurora-instance-a
db_cluster_identifier: test-aurora-cluster
engine: aurora
engine_version: 5.6.10a
db_instance_class: db.r3.large
db_subnet_group_name: test-subnet
availability_zone: ap-southeast-2a
publicly_accessible: false
storage_encrypted: true
tags:
Role: Aurora
wait: yes
wait_timeout: 720
register: new_aurora_rds_instance
'''
try:
import json
import time
import botocore
import boto3
import q
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
def wait_for_status(client, module, status):
polling_increment_secs = 5
max_retries = (module.params.get('wait_timeout') / polling_increment_secs)
status_achieved = False
for x in range(0, max_retries):
if module.params.get('command') == 'snapshot':
try:
response = get_db_snapshot(client, module)
if module.params.get('command') == 'delete' and response == None:
status_achieved = True
break
else:
if response['DBSnapshots'][0]['Status'] == status:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
else:
try:
response = get_db_instance(client, module)
if module.params.get('command') == 'delete' and response == None:
status_achieved = True
break
else:
if response['DBInstances'][0]['DBInstanceStatus'] == status:
status_achieved = True
break
else:
time.sleep(polling_increment_secs)
except botocore.exceptions.ClientError as e:
module.fail_json(msg=str(e))
result = response
return status_achieved, result
def get_db_instance(client, module, instance_name=None):
params = dict()
if instance_name:
params['DBInstanceIdentifier'] = instance_name
else:
params['DBInstanceIdentifier'] = module.params.get('db_instance_identifier')
try:
response = client.describe_db_instances(**params)
except botocore.exceptions.ClientError as e:
if 'DBInstanceNotFound' not in e.message:
module.fail_json(msg=str(e))
else:
response = None
return response
def get_db_snapshot(client, module, snapshot_name=None):
params = dict()
if module.params.get('db_instance_identifier'):
params['DBInstanceIdentifier'] = module.params.get('db_instance_identifier')
if snapshot_name:
params['DBSnapshotIdentifier'] = snapshot_name
else:
params['DBSnapshotIdentifier'] = module.params.get('db_snapshot_identifier')
try:
response = client.describe_db_snapshots(**params)
except botocore.exceptions.ClientError as e:
if 'DBSnapshotNotFound' not in e.message:
module.fail_json(msg=str(e))
else:
response = None
return response
def create_db_instance(client, module):
#DBClusterIdentifier is only for Aurora instances. If one is supplied, only a subset of params are supported.
if module.params.get('db_cluster_identifier'):
required_vars = ['db_instance_identifier','db_instance_class', 'engine']
valid_vars = ['db_cluster_identifier', 'availability_zone', 'db_subnet_group_name',
'preferred_maintenance_window', 'db_parameter_group_name',
'engine_version', 'auto_minor_version_upgrade', 'license_model', 'option_group_name',
'publicly_accessible', 'character_set_name', 'storage_encrypted', 'kms_key_id', 'tags']
else:
required_vars = ['db_instance_identifier', 'engine', 'db_instance_class', 'master_username', 'master_user_password']
valid_vars = ['db_name', 'allocated_storage', 'vpc_security_group_ids', 'availability_zone', 'db_subnet_group_name',
'preferred_maintenance_window', 'db_parameter_group_name', 'backup_retention_period', 'preferred_backup_window',
'port', 'multi_az', 'engine_version', 'auto_minor_version_upgrade', 'license_model', 'iops',
'option_group_name', 'publicly_accessible', 'character_set_name', 'storage_encrypted', 'kms_key_id', 'tags',
'db_cluster_identifier']
params = validate_parameters(required_vars, valid_vars, module)
#check if the db instance already exists
db_instance = get_db_instance(client, module)
if db_instance:
response = db_instance
changed=False
else:
try:
response = client.create_db_instance(**params)
changed = True
except botocore.exceptions.ClientError as e:
e = get_exception()
module.fail_json(msg=str(e))
if module.params.get('wait'):
status_achieved, response = wait_for_status(client, module, 'available')
if not status_achieved:
module.fail_json(msg='Error waiting for RDS instance creation - please check the AWS console')
result = response
return changed, result
def modify_db_instance(client, module):
changed = False
required_vars = ['db_instance_identifier']
valid_vars = ['allocated_storage', 'db_instance_class', 'vpc_security_group_ids', 'apply_immediately', 'master_user_password', 'preferred_maintenance_window',
'db_parameter_group_name', 'backup_retention_period', 'preferred_backup_window', 'port', 'multi_az', 'engine_version', 'auto_minor_version_upgrade',
'allow_major_version_upgrade', 'license_model', 'iops', 'option_group_name', 'publicly_accessible', 'new_db_instance_identifier', 'storage_type'
'copy_tags_to_snapshot', 'monitoring_interval']
NewDBInstanceIdentifier = module.params.get('new_db_instance_identifier')
params = validate_parameters(required_vars, valid_vars, module)
# change the Ports key to 'DBPortNumber'. For some reason this argument is different in the modify and create API action specs
if module.params.get('port'):
params.pop('Port')
params['DBPortNumber'] = module.params.get('port')
#get current instance so we can see if anything was actually modified and set changed accordingly
before_modify_instance = get_db_instance(client, module)
try:
response = client.modify_db_instance(**params)
except botocore.exceptions.ClientError as e:
e = get_exception()
module.fail_json(msg=str(e))
if params.get('apply_immediately'):
if NewDBInstanceIdentifier:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = get_db_instance(client, module, NewDBInstanceIdentifier)
time.sleep(5)
#lookup instance again to see if anything was modified
after_modify_instance = get_db_instance(client, module)
if cmp(before_modify_instance['DBInstances'], after_modify_instance['DBInstances']) != 0:
changed = True
else:
changed = False
if module.params.get('wait'):
if changed:
#wait for status modifying, then wait for status available.
#Note: aurora doesn't transition modifying state
if before_modify_instance['DBInstances'][0]['Engine'] == 'aurora':
status_achieved, response = wait_for_status(client, module, 'available')
else:
status_achieved, response = wait_for_status(client, module, 'modifying')
status_achieved, response = wait_for_status(client, module, 'available')
if not status_achieved:
module.fail_json(msg='Error modifying RDS instance - please check the AWS console')
else:
response = get_db_instance(client, module)
result = response
return changed, result
def snapshot_db_instance(client, module):
changed = False
required_vars = ['db_snapshot_identifier','db_instance_identifier']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
db_snapshot = get_db_snapshot(client, module)
if db_snapshot['DBSnapshots']:
response = db_snapshot
changed=False
else:
try:
response = client.create_db_snapshot(**params)
changed = True
except botocore.exceptions.ClientError as e:
e = get_exception()
module.fail_json(msg=str(e))
if module.params.get('wait'):
status_achieved, response = wait_for_status(client, module, 'available')
if not status_achieved:
module.fail_json(msg='Error waiting for RDS snapshot creation - please check the AWS console')
result = response
return changed, result
def delete_db_instance_or_snapshot(client, module):
changed = False
if module.params.get('db_snapshot_identifier'):
required_vars =['db_snapshot_identifier']
valid_vars = []
params = validate_parameters(required_vars, valid_vars, module)
else:
required_vars =['db_instance_identifier']
valid_vars = ['skip_final_snapshot','final_db_snapshot_identifier']
params = validate_parameters(required_vars, valid_vars, module)
if module.params.get('db_snapshot_identifier'):
#check if the db instance exists before attempting to delete it
db_snapshot = get_db_snapshot(client, module, module.params.get('db_snapshot_identifier'))
if not db_snapshot:
response = None
changed=False
else:
try:
response = client.delete_db_snapshot(**params)
changed = True
except botocore.exceptions.ClientError as e:
e = get_exception()
module.fail_json(msg=str(e))
else:
#check if the db instance exists before attempting to delete it
db_instance = get_db_instance(client, module)
if not db_instance:
response = None
changed=False
else:
try:
response = client.delete_db_instance(**params)
changed = True
except botocore.exceptions.ClientError as e:
e = get_exception()
module.fail_json(msg=str(e))
if module.params.get('wait'):
#wait for status deleting, then wait for status deleted
status_achieved, response = wait_for_status(client, module, 'deleting')
status_achieved, response = wait_for_status(client, module, 'deleted')
if not status_achieved:
module.fail_json(msg='Error deleting RDS instance - please check the AWS console')
result = response
return changed, result
def restore_db_instance_from_snapshot(client, module):
changed = False
required_vars = ['db_instance_identifier', 'db_snapshot_identifier']
valid_vars = ['db_instance_class', 'port', 'availability_zone', 'db_subnet_group_name', 'multi_az', 'publicly_accessible',
'auto_minor_version_upgrade', 'db_name', 'engine', 'iops', 'option_group_name', 'tags', 'storage_type', 'copy_tags_to_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
#check if instance already exists. If so, do nothing and return the instance
db_instance = get_db_instance(client, module)
if db_instance:
response = db_instance
changed=False
else:
try:
response = client.restore_db_instance_from_db_snapshot(**params)
changed = True
except botocore.exceptions.ClientError as e:
e = get_exception()
module.fail_json(msg=str(e))
if module.params.get('wait'):
status_achieved, response = wait_for_status(client, module, 'available')
if not status_achieved:
module.fail_json(msg='Error waiting for RDS instance creation - please check the AWS console')
result = response
return changed, result
def reboot_db_instance(client, module):
changed = False
required_vars = ['db_instance_identifier']
valid_vars = ['force_failover']
params = validate_parameters(required_vars, valid_vars, module)
try:
response = client.reboot_db_instance(**params)
changed = True
except botocore.exceptions.ClientError as e:
e = get_exception()
module.fail_json(msg=str(e))
if module.params.get('wait'):
status_achieved, response = wait_for_status(client, module, 'available')
if not status_achieved:
module.fail_json(msg='Error waiting for RDS instance creation - please check the AWS console')
result = response
return changed, result
def validate_parameters(required_vars, valid_vars, module):
params = {}
command = module.params.get('command')
# convert snek case args to camel case params required for API
camel_params = {
'availability_zone': 'AvailabilityZone',
'backup_retention_period': 'BackupRetentionPeriod',
'character_set_name': 'CharacterSetName',
'db_name': 'DBName',
'db_instance_identifier': 'DBInstanceIdentifier',
'db_parameter_group_name': 'DBParameterGroupName',
'db_snapshot_identifier': 'DBSnapshotIdentifier',
'vpc_security_group_ids': 'VpcSecurityGroupIds',
'new_db_instance_identifier': 'NewDBInstanceIdentifier',
'db_subnet_group_name': 'DBSubnetGroupName',
'engine': 'Engine',
'engine_version': 'EngineVersion',
'port': 'Port',
'master_username': 'MasterUsername',
'master_user_password': 'MasterUserPassword',
'option_group_name': 'OptionGroupName',
'preferred_maintenance_window': 'PreferredMaintenanceWindow',
'preferred_backup_window': 'PreferredBackupWindow',
'tags': 'Tags',
'storage_encrypted': 'StorageEncrypted',
'kms_key_id': 'KmsKeyId',
'skip_final_snapshot': 'SkipFinalSnapshot',
'final_db_snapshot_identifier': 'FinalDBSnapshotIdentifier',
'apply_immediately': 'ApplyImmediately',
'allocated_storage': 'AllocatedStorage',
'db_instance_class': 'DBInstanceClass',
'copy_tags_to_snapshot': 'CopyTagsToSnapshot',
'monitoring_interval': 'MonitoringInterval',
'multi_az': 'MultiAZ',
'license_model': 'LicenseModel',
'auto_minor_version_upgrade': 'AutoMinorVersionUpgrade',
'allow_major_version_upgrade': 'AllowMajorVersionUpgrade',
'iops': 'Iops',
'storage_type': 'StorageType',
'publicly_accessible': 'PubliclyAccessible',
'storage_encrypted': 'StorageEncrypted',
'force_failover': 'ForceFailover',
'db_cluster_identifier': 'DBClusterIdentifier'}
for (k, v) in camel_params.iteritems():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if k in required_vars:
if module.params.get(k):
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s required for %s command" % (k, command))
tag_array = []
if module.params.get('tags'):
for tag, value in module.params.get('tags').iteritems():
tag_array.append({'Key': tag, 'Value': value})
params['Tags'] = tag_array
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
region=dict(type='str', required=True),
command=dict(type='str', choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
db_name=dict(type='str', required=False),
db_instance_identifier=dict(type='str', required=False),
allocated_storage=dict(type='int', required=False),
db_instance_class=dict(type='str', required=False),
engine=dict(type='str', choices=['mariadb', 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres', 'aurora'], required=False),
master_username=dict(type='str', required=False),
master_user_password=dict(type='str', no_log=True, required=False),
vpc_security_group_ids=dict(type='list', required=False),
apply_immediately=dict(type='bool', default=False),
availability_zone=dict(type='str', required=False),
db_subnet_group_name=dict(type='str', required=False),
preferred_maintenance_window=dict(type='str', required=False),
db_parameter_group_name=dict(type='str', required=False),
backup_retention_period=dict(type=int, required=False),
preferred_backup_window=dict(type='str', required=False),
copy_tags_to_snapshot=dict(type='bool', required=False),
monitoring_interval=dict(type='int', required=False),
port=dict(type='int', required=False),
multi_az=dict(type='bool', default=False),
engine_version=dict(type='str', required=False),
auto_minor_version_upgrade=dict(type='bool', required=False),
allow_major_version_upgrade=dict(type='bool', required=False),
license_model=dict(type='str', choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
iops=dict(type='int', required=False),
option_group_name=dict(type='str', required=False),
publicly_accessible=dict(type='bool', required=False),
character_set_name = dict(type='str', required=False),
tags=dict(type='dict', default=None, required=False),
storage_encrypted=dict(type='bool', required=False),
kms_key_id=dict(type='str', required=False),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=320, required=False),
new_db_instance_identifier=dict(type='str', required=False),
storage_type=dict(type='str', required=False),
db_snapshot_identifier=dict(type='str', required=False),
skip_final_snapshot=dict(type='bool', default=False),
final_db_snapshot_identifier=dict(type='str', required=False),
force_failover=dict(type='bool', default=False),
db_cluster_identifier=dict(type='str', required=False)
)
)
module = AnsibleModule(argument_spec=argument_spec)
if not HAS_BOTO3:
module.fail_json(msg='json and boto3 is required.')
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
client = boto3_conn(module, conn_type='client', resource='rds', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except botocore.exceptions.NoCredentialsError:
e = get_exception()
module.fail_json(msg="Can't authorize connection - "+str(e))
invocations = {
'create': create_db_instance,
'modify': modify_db_instance,
'snapshot': snapshot_db_instance,
'delete': delete_db_instance_or_snapshot,
'restore': restore_db_instance_from_snapshot,
'reboot': reboot_db_instance
}
changed, results = invocations[module.params.get('command')](client, module)
module.exit_json(changed=changed, rds=results)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == '__main__':
main()
|
py | b400507d905d3c83885d989ea0157053c5080092 | from .publish import * # noqa
from .subscribe import * # noqa
__all__ = ['Publish', 'publish', 'Subscribe', 'subscribe']
|
py | b40050d0ada7be85f9c90f775526f1ec4755887b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : fedhf\core\coordinator\base_coordinator.py
# @Time : 2022-05-03 15:41:08
# @Author : Bingjie Yan
# @Email : [email protected]
# @License : Apache License 2.0
from abc import ABC, abstractmethod
import threading
from fedhf.api import Logger
from fedhf.core import build_server, build_client
from fedhf.component import build_sampler
from fedhf.dataset import ClientDataset, build_dataset
class AbsCoordinator(ABC):
def __init__(self) -> None:
super().__init__()
@abstractmethod
def prepare(self) -> None:
raise NotImplementedError
@abstractmethod
def main(self) -> None:
raise NotImplementedError
@abstractmethod
def finish(self) -> None:
raise NotImplementedError
@abstractmethod
def run(self) -> None:
raise NotImplementedError
class SimulatedBaseCoordinator(AbsCoordinator):
def __init__(self, args) -> None:
super().__init__()
self.args = args
self.logger = Logger(self.args)
def prepare(self) -> None:
self.dataset = build_dataset(self.args.dataset)(self.args)
self.sampler = build_sampler(self.args.sampler)(self.args)
if self.args.test:
# reduce data for test
self.data = [
ClientDataset(self.dataset.trainset,
range(i * self.args.batch_size, (i + 1) * self.args.batch_size))
for i in range(self.args.num_clients)
]
else:
self.data = self.sampler.sample(self.dataset.trainset)
self.client_list = [i for i in range(self.args.num_clients)]
self.server = build_server(self.args.deploy_mode)(self.args)
def main(self) -> None:
pass
def finish(self) -> None:
self.server.model.save()
try:
if self.args.evaluate_on_client:
self.logger.info("Evaluate on client")
for client_id in self.client_list:
client = build_client(self.args.deploy_mode)(self.args, client_id)
result = client.evaluate(data=self.data[client_id], model=self.server.model)
self.logger.info(f'Client {client_id} result: {result}')
result = self.server.evaluate(self.dataset.testset)
self.logger.info(f'Server result: {result}')
self.logger.info(f'Final server model version: {self.server.model.get_model_version()}')
except KeyboardInterrupt:
self.logger.info(f'Interrupted by user.')
self.logger.info(f'All finished.')
def run(self) -> None:
self.prepare()
self.main()
self.finish()
|
py | b40051dc98a460390c9882f11878fa207f0451da | from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (assert_almost_equal,
assert_array_equal,
assert_equal,
assert_)
from scipy.spatial.distance import directed_hausdorff
from scipy.spatial import distance
from scipy._lib._util import check_random_state
class TestHausdorff(object):
# Test various properties of the directed Hausdorff code.
def setup_method(self):
np.random.seed(1234)
random_angles = np.random.random(100) * np.pi * 2
random_columns = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns[..., 0] = np.cos(random_columns[..., 0])
random_columns[..., 1] = np.sin(random_columns[..., 1])
random_columns_2 = np.column_stack(
(random_angles, random_angles, np.zeros(100)))
random_columns_2[1:, 0] = np.cos(random_columns_2[1:, 0]) * 2.0
random_columns_2[1:, 1] = np.sin(random_columns_2[1:, 1]) * 2.0
# move one point farther out so we don't have two perfect circles
random_columns_2[0, 0] = np.cos(random_columns_2[0, 0]) * 3.3
random_columns_2[0, 1] = np.sin(random_columns_2[0, 1]) * 3.3
self.path_1 = random_columns
self.path_2 = random_columns_2
self.path_1_4d = np.insert(self.path_1, 3, 5, axis=1)
self.path_2_4d = np.insert(self.path_2, 3, 27, axis=1)
def test_symmetry(self):
# Ensure that the directed (asymmetric) Hausdorff distance is
# actually asymmetric
forward = directed_hausdorff(self.path_1, self.path_2)[0]
reverse = directed_hausdorff(self.path_2, self.path_1)[0]
assert_(forward != reverse)
def test_brute_force_comparison_forward(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# forward direction.
actual = directed_hausdorff(self.path_1, self.path_2)[0]
# brute force over rows:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=1))
assert_almost_equal(actual, expected, decimal=9)
def test_brute_force_comparison_reverse(self):
# Ensure that the algorithm for directed_hausdorff gives the
# same result as the simple / brute force approach in the
# reverse direction.
actual = directed_hausdorff(self.path_2, self.path_1)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1, self.path_2),
axis=0))
assert_almost_equal(actual, expected, decimal=9)
def test_degenerate_case(self):
# The directed Hausdorff distance must be zero if both input
# data arrays match.
actual = directed_hausdorff(self.path_1, self.path_1)[0]
assert_almost_equal(actual, 0.0, decimal=9)
def test_2d_data_forward(self):
# Ensure that 2D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_1[..., :2],
self.path_2[..., :2])[0]
expected = max(np.amin(distance.cdist(self.path_1[..., :2],
self.path_2[..., :2]),
axis=1))
assert_almost_equal(actual, expected, decimal=9)
def test_4d_data_reverse(self):
# Ensure that 4D data is handled properly for a simple case
# relative to brute force approach.
actual = directed_hausdorff(self.path_2_4d, self.path_1_4d)[0]
# brute force over columns:
expected = max(np.amin(distance.cdist(self.path_1_4d, self.path_2_4d),
axis=0))
assert_almost_equal(actual, expected, decimal=9)
def test_indices(self):
# Ensure that correct point indices are returned -- they should
# correspond to the Hausdorff pair
path_simple_1 = np.array([[-1,-12],[0,0], [1,1], [3,7], [1,2]])
path_simple_2 = np.array([[0,0], [1,1], [4,100], [10,9]])
actual = directed_hausdorff(path_simple_2, path_simple_1)[1:]
expected = (2, 3)
assert_array_equal(actual, expected)
def test_random_state(self):
# ensure that the global random state is not modified because
# the directed Hausdorff algorithm uses randomization
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
def test_random_state_None_int(self):
# check that seed values of None or int do not alter global
# random state
for seed in [None, 27870671]:
rs = check_random_state(None)
old_global_state = rs.get_state()
directed_hausdorff(self.path_1, self.path_2, seed)
rs2 = check_random_state(None)
new_global_state = rs2.get_state()
assert_equal(new_global_state, old_global_state)
|
py | b400523eab818847e9da42f7ee43160ea3da75f3 | # https://github.com/JonathanNickerson/talon_voice_user_scripts
import time
import talon.clip as clip
from talon.voice import Key, press, Str, Context
from ..utils import (
parse_words,
join_words,
is_not_vim,
numeral_list,
extract_num_from_m,
)
ctx = Context("generic_editor", func=is_not_vim)
ctx.set_list("n", numeral_list)
def find_next(m):
press("cmd-f")
Str(str(m.dgndictation[0]._words[0]))(None)
press("escape")
def find_previous(m):
press("left")
press("cmd-f")
Str(str(m.dgndictation[0]._words[0]))(None)
press("cmd-shift-g")
press("escape")
# jcooper-korg from talon slack
def select_text_to_left_of_cursor(m, cursorKey, clipboardSelectKey="shift-home"):
key = join_words(parse_words(m)).lower()
with clip.capture() as clipboardText:
press(clipboardSelectKey, wait=20000)
press("cmd-c", wait=20000)
press("right", wait=20000)
searchText = clipboardText.get().lower()
result = searchText.rfind(key)
if result == -1:
return False
# cursor over to the found key text and select the matching text
for i in range(result, len(searchText) - len(key)):
press(cursorKey, wait=0)
for i in range(0, len(key)):
press("shift-left", wait=0)
return True
# jcooper-korg from talon slack
def select_text_to_right_of_cursor(m, cursorKey, clipboardSelectKey="shift-end"):
key = join_words(parse_words(m)).lower()
with clip.capture() as clipboardText:
press(clipboardSelectKey, wait=20000)
press("cmd-c", wait=20000)
press("left", wait=20000)
searchText = clipboardText.get().lower()
result = searchText.find(key)
if result == -1:
return False
# cursor over to the found key text and select the matching text
for i in range(0, result):
press(cursorKey, wait=0)
for i in range(0, len(key)):
press("shift-right", wait=0)
return True
# jcooper-korg from talon slack
def select_text_on_same_line(m):
key = join_words(parse_words(m)).lower()
# first check to the left of the cursor
if (
select_text_to_left_of_cursor(
m, cursorKey="left", clipboardSelectKey="shift-ctrl-a"
)
== False
):
# if nothing found, then check to the right of the cursor
select_text_to_right_of_cursor(
m, cursorKey="right", clipboardSelectKey="shift-ctrl-e"
)
alphanumeric = "abcdefghijklmnopqrstuvwxyz0123456789_"
def big_word_neck(m):
return word_neck(m, valid_characters=set(alphanumeric) | set("/\\-_.>=<"))
def small_word_neck(m):
return word_neck(m, valid_characters=set(alphanumeric) - set("_"))
def word_neck(m, valid_characters=alphanumeric):
word_index = extract_num_from_m(m, 1)
old = clip.get()
press("shift-right", wait=2000)
press("cmd-c", wait=2000)
press("shift-left", wait=2000)
current_highlight = clip.get()
if len(current_highlight) > 1:
press("right", wait=2000)
press("shift-end", wait=2000)
time.sleep(0.25)
press("cmd-c", wait=2000)
press("left", wait=2000)
time.sleep(0.25)
text_right = clip.get().lower()
clip.set(old)
is_word = [character in valid_characters for character in text_right]
word_count = 1
i = 0
while i < (len(is_word) - 1) and not is_word[i]:
i += 1
# print("a start", i)
while i < (len(is_word) - 1) and word_count < word_index:
# print(i, is_word[i], word_count, word_index)
if not is_word[i] and is_word[i + 1]:
word_count += 1
i += 1
# warning: this is a hack, sorry
# print("i", i)
if i == 1 and is_word[0]:
i = 0
start_position = i
# print(text_right[start_position:])
while i < len(is_word) and is_word[i]:
i += 1
end_position = i
# print(start_position, end_position)
# cursor over to the found word
for i in range(0, start_position):
press("right", wait=0)
# now select the word
for i in range(0, end_position - start_position):
press("shift-right")
def big_word_prev(m):
return word_prev(m, valid_characters=set(alphanumeric) | set("/\\-_.>=<"))
def small_word_prev(m):
return word_prev(m, valid_characters=set(alphanumeric) - set("_"))
def word_prev(m, valid_characters=alphanumeric):
word_index = extract_num_from_m(m, 1)
old = clip.get()
press("shift-right", wait=2000)
press("cmd-c", wait=2000)
press("shift-left", wait=2000)
current_highlight = clip.get()
if len(current_highlight) > 1:
press("left", wait=2000)
press("shift-home", wait=2000)
time.sleep(0.25)
press("cmd-c", wait=2000)
press("right", wait=2000)
time.sleep(0.25)
text_right = clip.get().lower()
clip.set(old)
text_right = list(reversed(text_right))
is_word = [character in valid_characters for character in text_right]
word_count = 1
i = 0
while i < (len(is_word) - 1) and not is_word[i]:
i += 1
while i < (len(is_word) - 1) and word_count < word_index:
# print(i, is_word[i], word_count, word_index)
if not is_word[i] and is_word[i + 1]:
word_count += 1
i += 1
start_position = i
# print(text_right[start_position:])
while i < len(is_word) and is_word[i]:
i += 1
end_position = i
# print(start_position, end_position, text_right[start_position:end_position])
# cursor over to the found word
for i in range(0, start_position):
press("left", wait=0)
# now select the word
for i in range(0, end_position - start_position):
press("shift-left")
def word_number(m):
# lefty
press("cmd-left")
word_neck(m)
ctx.keymap(
{
# meta
"(save it | sage)": Key("cmd-s"),
"(undo it | dizzle)": Key("cmd-z"),
"(redo it | rizzle)": Key("cmd-shift-z"),
# clipboard
"(clip cut | snatch)": Key("cmd-x"),
"(clip copy | stoosh)": Key("cmd-c"),
"(clip paste | spark)": Key("cmd-v"),
"(clip paste preserve formatting | match spark)": Key("cmd-shift-alt-v"),
# motions
"([go] word left | fame)": Key("alt-left"),
"([go] word right | fish)": Key("alt-right"),
"([go] line after end)": Key("cmd-right space"),
"([go] line start | lefty)": Key("cmd-left"),
"([go] line end | ricky)": Key("cmd-right"),
"([go] line before end | smear)": Key("cmd-right left"),
# insertions
"([insert] line break | sky turn)": Key("shift-enter"),
"([insert] new line below | slap)": Key("cmd-right enter"),
"([insert] new line above | shocker)": Key("ctrl-a cmd-left enter up"),
"([insert] duplicate line | jolt)": Key(
"ctrl-a cmd-left shift-down cmd-c down cmd-v"
),
# deleting
"(delete around this | slurp)": Key("backspace delete"),
"(delete line left | snip left)": Key("shift-cmd-left delete"),
"(delete line right | snip right)": Key("shift-cmd-right delete"),
"(delete [this] line)": Key("shift-cmd-right delete delete ctrl-a cmd-left"),
"(delete word left | trough | steffi | carmex)": Key("alt-backspace"),
"(delete word right | stippy | kite)": Key("alt-delete"),
"(delete [this] word | slurpies)": Key("alt-backspace alt-delete"),
# selecting
"(crew | find right) <dgndictation> [over]": lambda m: select_text_to_right_of_cursor(
m, cursorKey="right"
),
"(select | sell) (crew | find right) <dgndictation> [over]": lambda m: select_text_to_right_of_cursor(
m, cursorKey="shift-right"
),
"(trail | find left) <dgndictation> [over]": lambda m: select_text_to_left_of_cursor(
m, cursorKey="left"
),
"(select | sell) (trail | find left) <dgndictation> [over]": lambda m: select_text_to_left_of_cursor(
m, cursorKey="shift-left"
),
"(find on line | kerleck) <dgndictation> [over]": select_text_on_same_line,
"((select | sell) this word | word this)": Key("alt-right shift-alt-left"),
"((select | sell) this line | shackle)": Key("cmd-right shift-cmd-left"),
"((select | sell) above | shift home)": Key("shift-home"),
"((select | sell) up | shreep)": Key("shift-up"),
"((select | sell) down | shroom)": Key("shift-down"),
"((select | sell) way down | shroomway)": Key("cmd-shift-down"),
"((select | sell) way up | shreepway)": Key("cmd-shift-up"),
"((select | sell) all | olly | ali)": Key("cmd-a"),
"((select | sell) left | shrim | shlicky)": Key("shift-left"),
"((select | sell) right | shrish | shricky)": Key("shift-right"),
"((select | sell) word number {generic_editor.n}* above | wordpreev {generic_editor.n}*)": word_prev,
"big word preev {generic_editor.n}*": big_word_prev,
"big word neck {generic_editor.n}*": big_word_neck,
"small word preev {generic_editor.n}*": small_word_prev,
"small word neck {generic_editor.n}*": small_word_neck,
"( (select | sell) word number {generic_editor.n}* below | wordneck {generic_editor.n}*)": word_neck,
"word {generic_editor.n}": word_number,
"((select | sell) word left | scram)": Key("alt-shift-left"),
"((select | sell) word right | scrish)": Key("alt-shift-right"),
"((select | sell) line left | lecksy)": Key("cmd-shift-left"),
"((select | sell) line right | ricksy)": Key("cmd-shift-right"),
}
)
|
py | b400529a51aea64252003e59dae0ace187563737 | from model.contact import Contact
import random
import re
def test_some_contact_deletion(app, db, check_ui):
if len(db.get_contact_list()) == 0:
app.contact.create(Contact(first_name="Created for deletion"))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
app.contact.delete_contact_by_id(contact.contact_id)
new_contacts = db.get_contact_list()
old_contacts.remove(contact)
assert old_contacts == new_contacts
if check_ui:
def clean(cn):
return Contact(contact_id=cn.contact_id, first_name=re.sub(r'\s+', ' ', cn.first_name.strip()),
last_name=re.sub(r'\s+', ' ', cn.last_name.strip()))
ui_contacts = sorted(app.contact.get_simple_contact_list(), key=Contact.id_or_max)
db_contacts = sorted(map(clean, db.get_contact_list()), key=Contact.id_or_max)
assert ui_contacts == db_contacts
|
py | b400532de22a18fa51c748d9bf985d1e75734b74 | # -*- coding: utf-8 -*-
"""
.. module:: pytfa
:platform: Unix, Windows
:synopsis: Thermodynamics-based Flux Analysis
.. moduleauthor:: pyTFA team
Constraints declarations
"""
from ..utils.str import camel2underscores
###################################################
### CONSTRAINTS ###
###################################################
class GenericConstraint:
"""
Class to represent a generic constraint. The purpose is that the interface
is instantiated on initialization, to follow the type of interface used
by the problem, and avoid incompatibilities in optlang
Attributes:
:id: Used for DictList comprehension. Usually points back at a
enzyme or reaction id for ease of linking. Should be unique given
a constraint type.
:name: Should be a concatenation of the id and a prefix that is
specific to the variable type. will be used to address the constraint at
the solver level, and hence should be unique in the whole cobra_model
:expr: the expression of the constraint (sympy.Expression subtype)
:cobra_model: the cobra_model hook.
:constraint: links directly to the cobra_model representation of tbe constraint
"""
@property
def __attrname__(self):
"""
Name the attribute the instances will have
Example: GenericConstraint -> generic_constraint
:return:
"""
return camel2underscores(self.__class__.__name__)
def __init__(self, id_, expr, model, queue=False, **kwargs):
"""
:param id_: will be used to identify the variable
(name will be a concat of this and a prefix)
:param model: the cobra.Model object
:param queue: whether or not to queue the variable for update object
:param kwargs: stuff you want to pass to the variable constructor
"""
self._id = id_
self._model = model
self.kwargs = kwargs
self._name = self.make_name()
self.get_interface(expr, queue)
self.prefix = ''
def get_interface(self, expr, queue):
"""
Called upon completion of __init__, initializes the value of self.var,
which is returned upon call, and stores the actual interfaced variable.
:return: instance of Variable from the problem
"""
if not self.name in self.model.constraints:
constraint = self.model.problem.Constraint(expression = expr,
name = self.name,
**self.kwargs)
if not queue:
self.model.add_cons_vars(constraint)
else:
self.model._cons_queue.append(constraint)
else:
self.constraint = self.model.constraints.get(self.name)
def make_name(self):
"""
Needs to be overridden by the subclass, concats the id with a
prefix
:return: None
"""
return self.prefix + self.id
@property
def expr(self):
return self.constraint.expression
@expr.setter
def expr(self,value):
self.constraint.expression = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def id(self):
"""
for cobra.thermo.DictList compatibility
:return:
"""
return self._id
@property
def constraint(self):
return self.model.constraints[self.name]
@constraint.setter
def constraint(self, value):
self.model.constraints[self.name] = value
@property
def model(self):
return self._model
def __repr__(self):
return self.name + ': ' + self.constraint.expression.__repr__()
class ReactionConstraint(GenericConstraint):
"""
Class to represent a variable attached to a reaction
"""
def __init__(self, reaction, expr, **kwargs):
self.reaction = reaction
model = reaction.model
GenericConstraint.__init__(self,
id_=self.id,
expr=expr,
model=model,
**kwargs)
@property
def id(self):
return self.reaction.id
@property
def model(self):
return self.reaction.model
class MetaboliteConstraint(GenericConstraint):
"""
Class to represent a variable attached to a enzyme
"""
def __init__(self, metabolite, expr, **kwargs):
self.metabolite = metabolite
model = metabolite.model
GenericConstraint.__init__(self,
id_=self.id,
expr=expr,
model=model,
**kwargs)
@property
def id(self):
return self.metabolite.id
@property
def model(self):
return self.metabolite.model
class NegativeDeltaG(ReactionConstraint):
"""
Class to represent thermodynamics constraints.
G: - DGR_rxn + DGoRerr_Rxn + RT * StoichCoefProd1 * LC_prod1
+ RT * StoichCoefProd2 * LC_prod2
+ RT * StoichCoefSub1 * LC_subs1
+ RT * StoichCoefSub2 * LC_subs2
- ...
= 0
"""
prefix = 'G_'
class ForwardDeltaGCoupling(ReactionConstraint):
"""
Class to represent thermodynamics coupling: DeltaG of reactions has to be
DGR < 0 for the reaction to proceed forwards
Looks like:
FU_rxn: 1000 FU_rxn + DGR_rxn < 1000
"""
def __init__(self, reaction, expr, **kwargs):
ReactionConstraint.__init__(self, reaction, expr, **kwargs)
prefix = 'FU_'
class BackwardDeltaGCoupling(ReactionConstraint):
"""
Class to represent thermodynamics coupling: DeltaG of reactions has to be
DGR > 0 for the reaction to proceed backwards
Looks like:
BU_rxn: 1000 BU_rxn - DGR_rxn < 1000
"""
def __init__(self, reaction, expr, **kwargs):
ReactionConstraint.__init__(self, reaction, expr, **kwargs)
prefix = 'BU_'
class ForwardDirectionCoupling(ReactionConstraint):
"""
Class to represent a forward directionality coupling with thermodynamics on
reaction variables
Looks like :
UF_rxn: F_rxn - M FU_rxn < 0
"""
def __init__(self, reaction, expr, **kwargs):
ReactionConstraint.__init__(self, reaction, expr, **kwargs)
prefix = 'UF_'
class BackwardDirectionCoupling(ReactionConstraint):
"""
Class to represent a backward directionality coupling with thermodynamics on
reaction variables
Looks like :
UR_rxn: R_rxn - M RU_rxn < 0
"""
def __init__(self, reaction, expr, **kwargs):
ReactionConstraint.__init__(self, reaction, expr, **kwargs)
prefix = 'UR_'
class SimultaneousUse(ReactionConstraint):
"""
Class to represent a simultaneous use constraint on reaction variables
Looks like:
SU_rxn: FU_rxn + BU_rxn <= 1
"""
prefix = 'SU_'
class DisplacementCoupling(ReactionConstraint):
"""
Class to represent the coupling to the thermodynamic displacement
Looks like:
Ln(Gamma) - (1/RT)*DGR_rxn = 0
"""
prefix = 'DC_'
class ForbiddenProfile(GenericConstraint):
"""
Class to represent a forbidden net flux directionality profile
Looks like:
FU_rxn_1 + BU_rxn_2 + ... + FU_rxn_n <= n-1
"""
def __init__(self, model, expr, id_, **kwargs):
GenericConstraint.__init__(self,
id_=id_,
expr=expr,
model=model,
**kwargs)
prefix = 'FP_' |
py | b400533eb5a9314b50f7cf77b407e6947dfeaa84 | from typing import Optional, List, Union
from discord import (
Message,
Embed,
Attachment,
AllowedMentions,
InvalidArgument,
File,
MessageFlags,
)
from discord.http import Route, HTTPClient
from discord.abc import Messageable, Snowflake
from discord.ext.commands import Context
from .utils import _get_components_json, _form_files
from .component import (
Select,
SelectOption,
_get_component_type,
ActionRow,
Component,
Button,
)
__all__ = ("ComponentMessage",)
class ComponentMessage(Message):
__slots__ = tuple(list(Message.__slots__) + ["components", "ephemeral"])
def __init__(self, *, state, channel, data, ephemeral=False):
super().__init__(state=state, channel=channel, data=data)
self.ephemeral = ephemeral
components = []
for i in data["components"]:
components.append(ActionRow())
for j in i["components"]:
components[-1].append(_get_component_type(j["type"]).from_json(j))
self.components: List[ActionRow] = components
def get_component(self, custom_id: str) -> Optional[Component]:
for row in self.components:
for component in row.components:
if component.custom_id == custom_id:
return component
async def disable_components(self) -> None:
await self.edit(
components=[row.disable_components() for row in self.components],
)
async def click_component(self, custom_id: str) -> None:
component = self.get_component(custom_id)
if component is None:
raise LookupError("Component not found")
if isinstance(component, Button):
return await self._state.http.click_button(self, component)
else:
raise TypeError(f"{component} is not a button")
async def select_option(self, select: Select, option: SelectOption) -> None:
return await self._state.http.select_option(self, select, option)
async def edit(
self,
content: Optional[str] = None,
embed: Optional[Embed] = None,
embeds: List[Embed] = None,
suppress: bool = None,
attachments: List[Attachment] = None,
delete_after: Optional[float] = None,
allowed_mentions: Optional[AllowedMentions] = None,
components: List[Union[ActionRow, Component, List[Component]]] = None,
**fields,
):
if self.ephemeral:
return
state = self._state
data = {}
if content is not None:
data["content"] = content
if embed is not None and embeds is not None:
raise InvalidArgument(
"cannot pass both embed and embeds parameter to edit()"
)
if embed is not None:
data["embeds"] = [embed.to_dict()]
if embeds is not None:
data["embeds"] = [e.to_dict() for e in embeds]
if suppress is not None:
flags = MessageFlags._from_value(0)
flags.suppress_embeds = True
data["flags"] = flags.value
if allowed_mentions is None:
if (
state.allowed_mentions is not None
and self.author.id == self._state.self_id
):
data["allowed_mentions"] = state.allowed_mentions.to_dict()
else:
if state.allowed_mentions is not None:
data["allowed_mentions"] = state.allowed_mentions.merge(
allowed_mentions
).to_dict()
else:
data["allowed_mentions"] = allowed_mentions.to_dict()
if attachments is not None:
data["attachments"] = [a.to_dict() for a in attachments]
if components is not None:
data["components"] = _get_components_json(components)
if data:
await state.http.request(
Route(
"PATCH",
"/channels/{channel_id}/messages/{message_id}",
channel_id=self.channel.id,
message_id=self.id,
),
json=data,
)
if delete_after is not None:
await self.delete(delay=delete_after)
async def delete(self, *args, **kwargs):
if self.ephemeral:
return
return await super().delete(*args, **kwargs)
def new_override(cls, *args, **kwargs):
if isinstance(cls, Message):
return object.__new__(ComponentMessage)
else:
return object.__new__(cls)
Message.__new__ = new_override
def send_files(
self,
channel_id: Snowflake,
*,
files,
content=None,
tts=False,
embed=None,
embeds=None,
stickers=None,
nonce=None,
allowed_mentions=None,
message_reference=None,
components=None,
):
data = {"tts": tts}
if content is not None:
data["content"] = content
if embed is not None:
data["embeds"] = [embed]
if embeds is not None:
data["embeds"] = embeds
if nonce is not None:
data["nonce"] = nonce
if allowed_mentions is not None:
data["allowed_mentions"] = allowed_mentions
if message_reference is not None:
data["message_reference"] = message_reference
if stickers is not None:
data["sticker_ids"] = stickers
if components is not None:
data["components"] = components
form = _form_files(data, files, use_form=False)
return self.request(
Route("POST", "/channels/{channel_id}/messages", channel_id=channel_id),
form=form,
files=files,
)
def click_button(self, msg: Message, button: Button):
route = Route("POST", "/interactions")
data = {"component_type": 2, "custom_id": str(button.id)}
values = {
"application_id": str(msg.author.id),
"channel_id": str(msg.channel.id),
"type": "3",
"data": data,
"guild_id": str(msg.guild.id),
"message_flags": 1,
"message_id": str(msg.id),
"session_id": str(msg.channel._state._get_websocket().session_id),
}
return self.request(route, json=values)
def select_option(self, msg: Message, select: Select, option: SelectOption):
route = Route("POST", "/interactions")
data = {
"component_type": 3,
"custom_id": str(select.id),
"values": [str(option.value)],
"type": 3,
}
values = {
"application_id": str(msg.author.id),
"channel_id": str(msg.channel.id),
"type": "3",
"data": data,
"guild_id": str(msg.guild.id),
"message_flags": 0,
"message_id": str(msg.id),
}
return self.request(route, json=values)
def send_message(
self,
channel_id,
content,
*,
tts=False,
embed=None,
embeds=None,
nonce=None,
allowed_mentions=None,
message_reference=None,
stickers=None,
components=None,
):
payload = {"tts": tts}
if content is not None:
payload["content"] = content
if embed is not None:
payload["embeds"] = [embed]
if embeds is not None:
payload["embeds"] = embeds
if nonce is not None:
payload["nonce"] = nonce
if allowed_mentions is not None:
payload["allowed_mentions"] = allowed_mentions
if message_reference is not None:
payload["message_reference"] = message_reference
if stickers is not None:
payload["sticker_ids"] = stickers
if components is not None:
payload["components"] = components
return self.request(
Route("POST", "/channels/{channel_id}/messages", channel_id=channel_id),
json=payload,
)
HTTPClient.send_files = send_files
HTTPClient.send_message = send_message
HTTPClient.click_button = click_button
HTTPClient.select_option = select_option
async def send(
self,
content=None,
*,
tts=False,
embed=None,
embeds=None,
file=None,
files=None,
stickers=None,
delete_after=None,
nonce=None,
allowed_mentions=None,
reference=None,
mention_author=None,
components=None,
):
state = self._state
channel = await self._get_channel()
content = str(content) if content is not None else None
if embed is not None and embeds is not None:
raise InvalidArgument("cannot pass both embed and embeds parameter to send()")
if embed is not None:
embeds = [embed.to_dict()]
elif embeds is not None:
if len(embeds) > 10:
raise InvalidArgument(
"embeds parameter must be a list of up to 10 elements"
)
embeds = [embed.to_dict() for embed in embeds]
if stickers is not None:
stickers = [sticker.id for sticker in stickers]
if allowed_mentions is not None:
if state.allowed_mentions is not None:
allowed_mentions = state.allowed_mentions.merge(allowed_mentions).to_dict()
else:
allowed_mentions = allowed_mentions.to_dict()
else:
allowed_mentions = state.allowed_mentions and state.allowed_mentions.to_dict()
if mention_author is not None:
allowed_mentions = allowed_mentions or AllowedMentions().to_dict()
allowed_mentions["replied_user"] = bool(mention_author)
if reference is not None:
try:
reference = reference.to_message_reference_dict()
except AttributeError:
raise InvalidArgument(
"reference parameter must be Message or MessageReference"
) from None
if components is not None:
components = _get_components_json(components)
if file is not None and files is not None:
raise InvalidArgument("cannot pass both file and files parameter to send()")
if file is not None:
if not isinstance(file, File):
raise InvalidArgument("file parameter must be File")
try:
data = await state.http.send_files(
channel.id,
files=[file],
allowed_mentions=allowed_mentions,
content=content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
message_reference=reference,
stickers=stickers,
components=components,
)
finally:
file.close()
elif files is not None:
if len(files) > 10:
raise InvalidArgument("files parameter must be a list of up to 10 elements")
elif not all(isinstance(file, File) for file in files):
raise InvalidArgument("files parameter must be a list of File")
try:
data = await state.http.send_files(
channel.id,
files=files,
content=content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
allowed_mentions=allowed_mentions,
message_reference=reference,
stickers=stickers,
components=components,
)
finally:
for f in files:
f.close()
else:
data = await state.http.send_message(
channel.id,
content,
tts=tts,
embed=embed,
embeds=embeds,
nonce=nonce,
allowed_mentions=allowed_mentions,
message_reference=reference,
stickers=stickers,
components=components,
)
ret = ComponentMessage(state=state, channel=channel, data=data)
if delete_after is not None:
await ret.delete(delay=delete_after)
return ret
async def send_override(context_or_channel, *args, **kwargs):
if isinstance(context_or_channel, Context):
channel = context_or_channel.channel
else:
channel = context_or_channel
return await send(channel, *args, **kwargs)
async def fetch_message(context_or_channel, id: int):
if isinstance(context_or_channel, Context):
channel = context_or_channel.channel
else:
channel = context_or_channel
state = channel._state
data = await state.http.get_message(channel.id, id)
return ComponentMessage(state=state, channel=channel, data=data)
Messageable.send = send_override
Messageable.fetch_message = fetch_message
|
py | b40053aa364bc7b980bf3d99848ad21d8afa2bb5 | #!/usr/bin/env python
""" Check all the customer pods status on every compute node, send status code "1" if all pods on a compute node are not running status """
# Adding the ignore because it does not like the naming of the script
# to be different than the class name
# pylint: disable=invalid-name
# pylint: disable=wrong-import-position
# pylint: disable=broad-except
# pylint: disable=line-too-long
import argparse
import time
import logging
logging.basicConfig(
format='%(asctime)s - %(relativeCreated)6d - %(levelname)-8s - %(message)s',
)
logger = logging.getLogger()
logger.setLevel(logging.WARN)
# Our jenkins server does not include these rpms.
# In the future we might move this to a container where these
# libs might exist
#pylint: disable=import-error
from openshift_tools.monitoring.ocutil import OCUtil
from openshift_tools.monitoring.metric_sender import MetricSender
ocutil = OCUtil()
def runOCcmd_yaml(cmd, base_cmd='oc'):
""" log commands through ocutil """
logger.info(base_cmd + " " + cmd)
ocy_time = time.time()
ocy_result = ocutil.run_user_cmd_yaml(cmd, base_cmd=base_cmd, )
logger.info("oc command took %s seconds", str(time.time() - ocy_time))
return ocy_result
def parse_args():
""" parse the args from the cli """
parser = argparse.ArgumentParser(description='Check all the nodes pods Status')
parser.add_argument('-s', '--skip_namespaces', nargs='+', help='namespaces exception list that we should avoid to check', required=True)
parser.add_argument('-v', '--verbose', action='count', default=0,
help='verbosity level, specify multiple')
args = parser.parse_args()
if args.verbose > 0:
logger.setLevel(logging.INFO)
if args.verbose > 1:
logger.setLevel(logging.DEBUG)
return args
def check_node_pods_status(nsList):
"""get all the info of all node """
result_status = 0
nsFilter = ""
for ns in nsList:
nsFilter += ",metadata.namespace!="+ns
node_info = runOCcmd_yaml("get node ")
for item in node_info['items']:
nodeName = item['metadata']['name']
logger.info("Checking node: %s", item['metadata']['name'])
node_not_running_pods = runOCcmd_yaml("get pods --all-namespaces --field-selector='spec.nodeName="+nodeName+",status.phase!=Running"+nsFilter+"'")
node_pods = runOCcmd_yaml("get pods --all-namespaces --field-selector='spec.nodeName="+nodeName+nsFilter+"'")
if len(node_not_running_pods['items']) == len(node_pods['items']):
result_status = 1
logger.warn("Node: %s, all pods are not running", item['metadata']['name'])
return result_status
def main():
""" check all the node pods tatus see if any node have problem """
args = parse_args()
logger.debug("args: ")
logger.debug(args)
nsList = args.skip_namespaces
pods_status = check_node_pods_status(nsList)
#send the value to zabbix
mts = MetricSender(verbose=args.verbose)
mts.add_metric({'openshift.nodes.pods.status': pods_status})
mts.send_metrics()
if __name__ == "__main__":
main()
|
py | b40053c8538b7ab4246cc3f6c0401a67a17257e0 | from django.db import models
from localflavor.us.models import USStateField
class Category(models.Model):
title = models.CharField(max_length=30)
def __str__(self):
return self.title
class Meta:
ordering = ['title']
verbose_name_plural = "categories"
class CamManager(models.Manager):
def belongs_to_category(self, cat=None):
'''
Returns a queryset of all Cams associated with specified Category.
'''
qs = super(type(self), self).get_query_set()
if cat:
qs = qs.filter(category=cat)
return qs
else:
return qs
class Cam(models.Model):
title = models.CharField(max_length=100)
url = models.URLField()
description = models.TextField(blank=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE)
state = USStateField(default="CO")
objects = CamManager()
def __str__(self):
return self.title
class Meta:
ordering = ['title', 'url']
|
py | b4005537f60e526404521fcd1dd490391dd3f489 | class Solution:
def minJumps(self, arr) -> int:
# value: [indices]
graph = defaultdict(set)
for index, value in enumerate(arr):
graph[value].add(index)
queue = deque([0])
visited = [False for i in range(len(arr))]
target = len(arr) - 1
steps = 0
while queue:
level_size = len(queue)
for i in range(level_size):
curr = queue.popleft()
if curr == target:
return steps
visited[curr] = True
for neighbor in graph[arr[curr]]:
if not visited[neighbor]:
queue.append(neighbor)
graph[arr[curr]].clear()
for neighbor in [curr-1, curr+1]:
if 0 <= neighbor <= target and not visited[neighbor]:
queue.append(neighbor)
steps += 1
return steps
class SolutionBidirectionalBFS:
def minJumps(self, arr) -> int:
# value: [indices]
graph = defaultdict(set)
for index, value in enumerate(arr):
graph[value].add(index)
visited = [False for i in range(len(arr))]
target = len(arr) - 1
head_queue = set([0])
tail_queue = set([target])
steps = 0
# bidirectional BFS
# alternating the queue in the forward and backward directions
# pick the queue with less elements to advance
while head_queue:
if len(tail_queue) < len(head_queue):
head_queue, tail_queue = tail_queue, head_queue
next_queue = set()
while head_queue:
curr = head_queue.pop()
if curr in tail_queue:
return steps
visited[curr] = True
for neighbor in graph[arr[curr]]:
if not visited[neighbor]:
next_queue.add(neighbor)
graph[arr[curr]].clear()
for neighbor in [curr-1, curr+1]:
if 0 <= neighbor <= target and not visited[neighbor]:
next_queue.add(neighbor)
head_queue = next_queue
steps += 1
return steps
|
py | b400565cd0f2f6a3ecd2534fb42a0f284ab2f2d4 | """ This submoduls contains most of the user-facing code, as the objects
`Sky` and `Instrument` can be used for most of the functionality of the
code, without delving into the difference `Model` objects.
Objects:
Sky
"""
import toml
from . import units as u
from .utils import bandpass_unit_conversion
from . import data
from .models import Model
from .models import *
def remove_class_from_dict(d):
"""Return a copy of dictionary without the key "class" """
return {k: d[k] for k in d.keys() if k != "class"}
def create_components_from_config(config, nside, map_dist=None):
output_components = []
for model_name, model_config in config.items():
try:
class_name = model_config["class"]
except KeyError: # multiple components
partial_components = []
for each_config in model_config.values():
class_name = each_config["class"]
component_class = globals()[class_name]
partial_components.append(
component_class(
**remove_class_from_dict(each_config),
nside=nside,
map_dist=map_dist
)
)
output_component = Sky(
component_objects=partial_components, nside=nside, map_dist=map_dist
)
else:
component_class = globals()[class_name]
output_component = component_class(
**remove_class_from_dict(model_config), nside=nside, map_dist=map_dist
)
output_components.append(output_component)
return output_components
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
PRESET_MODELS = toml.loads(pkg_resources.read_text(data, "presets.cfg"))
class Sky(Model):
"""Sky is the main interface to PySM
It accepts the configuration of the desired components in 3 different
ways: `preset_strings`, `component_config` or `component_objects`,
see details below.
Once a Sky object is created, all the sky components are initialized,
i.e. loading the input templates.
Then bandpass-integrated maps can be computed calling the
`get_emission` method.
Check the :func:`~pysm.apply_smoothing_and_coord_transform` function
for applying a beam and transform coordinates to the map arrays
from `get_emission`.
See the tutorials section of the documentation for examples.
Attributes
----------
components: list(pysm.Model object)
List of `pysm.Model` objects.
"""
def __init__(
self,
nside=None,
preset_strings=None,
component_config=None,
component_objects=None,
output_unit=u.uK_RJ,
map_dist=None,
):
"""Initialize Sky
Parameters
----------
nside : int
Requested output NSIDE, inputs will be degraded
using :func:`healpy.ud_grade`
preset_strings : list of str
List of strings identifiers for the models included in PySM 3,
these are exactly the same models included in PySM 2, e.g.
`["d2", "s1", "a1"]`, see the documentation for details about the
available models.
component_config : dict or TOML filename
Modify the configuration of one of the included components or create
a new component based on a Python dictionary or a TOML filename,
see for example the TOML configuration file for the `presets.cfg`
file in the `data` folder of the package.
component_config : list of Model subclasses
List of component objects already initialized, typically subclasses of PySM.Model
This is the most flexible way to provide a custom model to PySM
output_unit : astropy Unit or string
Astropy unit, e.g. "K_CMB", "MJ/sr"
map_dist: pysm.MapDistribution
Distribution object used for parallel computing with MPI
"""
if nside is None and not component_objects: # not None and not []
raise Exception("Need to specify nside in Sky")
elif nside is None:
nside = component_objects[0].nside
elif component_objects:
for comp in component_objects:
assert (
nside == comp.nside
), "Component objects should have same NSIDE of Sky"
super().__init__(nside=nside, map_dist=map_dist)
self.components = component_objects if component_objects is not None else []
# otherwise instantiate the sky object from list of predefined models,
# identified by their strings. These are defined in `pysm.presets`.
if component_config is None:
component_config = {}
elif not isinstance(component_config, dict):
component_config = toml.load(component_config)
if preset_strings is not None:
assert isinstance(preset_strings, list), "preset_strings should be a list"
for string in preset_strings:
component_config[string] = PRESET_MODELS[string]
if len(component_config) > 0:
self.components += create_components_from_config(
component_config, nside=nside, map_dist=map_dist
)
self.output_unit = u.Unit(output_unit)
def add_component(self, component):
self.components.append(component)
def get_emission(self, freq, weights=None):
""" This function returns the emission at a frequency, set of
frequencies, or over a bandpass.
"""
output = self.components[0].get_emission(freq, weights=weights)
for comp in self.components[1:]:
output += comp.get_emission(freq, weights=weights)
return output * bandpass_unit_conversion(freq, weights, self.output_unit)
|
py | b4005671e2567eb5a050d656e557b1207db80653 | #!/usr/bin/env nix-shell
#!nix-shell -p python3Packages.docopt python3Packages.pyqt5 python3Packages.notify2 python3Packages.requests qt5.qtbase -i python3 # noqa
""" usage: chapter-marker [options] TITLEFILE [SHOW]
options:
--settings-dir=DIR The base directory of the chapter-marker [Default: ~/.local/share/chapter-marker]
--disable-notification If set notify2 is disables which can help with dbus issues
starts chapter-marker with the given TITLE and starts with the top entry of TITLEFILE
if the chapter marker file for this SHOW already exists it will be backed up.
"""
import logging
import pickle
import sys
from datetime import datetime, timedelta
from os.path import expanduser, join
from docopt import docopt
from pynput import keyboard
from PyQt5 import QtGui, QtWidgets
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QMenu, QSystemTrayIcon
from . import resources # noqa F401
try:
import notify2
except ImportError:
import fakenotify2 as notify2
now = datetime.now
log = logging.getLogger("chapter-tray")
logging.basicConfig(level=logging.INFO)
def current_show():
import urllib.request
url = "https://pad.binaergewitter.de/"
ret = urllib.request.urlopen(url)
return ret.geturl().split("/")[-1]
class KeyBoardManager(QObject):
j_signal = pyqtSignal()
u_signal = pyqtSignal()
def start(self):
self.hotkeys = keyboard.GlobalHotKeys({"<ctrl>+j": self.j_signal.emit, "<ctrl>+u": self.u_signal.emit})
self.hotkeys.start()
class LeftClickMenu(QtWidgets.QMenu):
def __init__(self, parent=None):
QtWidgets.QMenu.__init__(self, "File", parent)
self.dateAction = QtWidgets.QAction(QIcon(":/icons/start-date.png"), "Start Date", self)
self.addAction(self.dateAction)
self.currentChapterAction = QtWidgets.QAction(QIcon(":/icons/current-chapter.png"), "Current Chapter", self)
self.addAction(self.currentChapterAction)
self.nextChapterAction = QtWidgets.QAction(QIcon(":/icons/next-chapter.png"), "Next Chapter", self)
self.addAction(self.nextChapterAction)
class ChapterEntry:
def __init__(self, title, is_comment=False, delta=None):
self.title = title
self.is_comment = is_comment
self.delta = delta # timedelta
def __str__(self):
if self.is_comment:
return "# " + self.title
elif self.delta is None:
return f"not-started {self.title}"
else:
m, s = divmod(self.delta.seconds, 60)
h, m = divmod(m, 60)
millis = round(self.delta.microseconds / 1000)
return f"{h:02}:{m:02}:{s:02}.{millis:03} {self.title}"
def to_simple_element(self):
raise NotImplementedError("sorry")
states = ["preshow", "show", "postshow"]
class ChapterMarkFile:
active_chapter = 0
def __init__(self, show: str, titles: list, location: str):
log.info(f"Initialize ChapterMarkFile for show {show} at {location}")
self.initial_titles = titles.copy()
self.show = show
self.location = location
# if location is empty:
self.state = "preshow" #
self.timers = {} # timer for preshow, show und postshow
self.initialize_chapters(titles)
def reset(self):
""" clean up everything and start a new show """
self.active_chapter = 0
self.__init__(self.show, self.initial_titles, self.location)
def set_state(self, state):
self.state = state
self.timers[state] = now()
def initialize_chapters(self, titles):
# put "hallihallo" at the top
titles.insert(0, "Hallihallo und Herzlich Willkommen")
self.storage = [ChapterEntry(title) for title in titles]
self.active_chapter = 0
self.add_comment(f"Preshow für '{self.show}' gestartet um {now().replace(microsecond=0)}")
self.set_state("preshow")
def add_comment(self, text, before=True):
loc = self.active_chapter if before else self.active_chapter + 1
log.info(f"Comment {'before' if before else 'after'}: {text}")
notify2.Notification(text).show()
self.storage.insert(loc, ChapterEntry(text, is_comment=True))
self.active_chapter += 1
def begin(self):
# initialize the time tracker
self.started = True
self.start_date = now()
self.set_state("show")
duration = self.timers["show"] - self.timers["preshow"]
m, s = divmod(duration.seconds, 60)
h, m = divmod(m, 60)
self.add_comment(f"Preshow ende um {self.start_date.replace(microsecond=0) } ({h:02}:{m:02}:{s:02} Vorgeplänkel)")
self.get_current().delta = timedelta(seconds=0)
def end(self):
log.info(f"Start Postshow at {now()}")
self.set_state("postshow")
duration = self.timers["postshow"] - self.timers["show"]
m, s = divmod(duration.seconds, 60)
h, m = divmod(m, 60)
self.add_comment(f"Postshow beginnt um {now()} ({h:02}:{m:02}:{s:02} Show)", before=False)
log.debug(self)
def get_current(self):
return self.storage[self.active_chapter]
def get_next(self):
return self.storage[self.active_chapter + 1]
def last_chapter(self):
return self.active_chapter == len(self.storage) - 1
def begin_next(self) -> bool:
""" moves to the next chapter, returns false if this was not possible, else true"""
if self.last_chapter():
log.info("cannot go beyond last chapter")
return False
elif self.state != "show":
log.info("Show has not started yet, start show first!")
return False
self.active_chapter += 1
log.debug(f"Current Chapter: {self.active_chapter}, Total Chapters: {len(self.storage)}")
active = self.get_current()
active.delta = now() - self.timers["show"]
if self.last_chapter():
log.info("at last chapter")
self.end()
# print(self)
return True
def persist(self):
dbpath = join(self.location, self.show + ".db")
with open(dbpath, "wb+") as f:
log.info(f"writing chaptermark state to {dbpath}")
pickle.dump(self, f)
chapterpath = join(self.location, self.show + "_chapters.txt")
with open(chapterpath, "w+") as f:
log.info(f"writing real chaptermarks to {chapterpath}")
f.write(str(self))
log.info("Also writing the last state to stdout:")
print(self)
def load(self, path=None):
# Load existing chapter-files
if not path:
path = join(self.location, self.show + ".db")
with open(path) as f:
return pickle.load(f)
def __str__(self):
return "\n".join([str(s) for s in self.storage])
class SystemTrayIcon(QSystemTrayIcon):
def __init__(self, parent, show, titles, settingsdir):
QSystemTrayIcon.__init__(self, QIcon(":/icons/main.png"), parent)
self.left_menu = LeftClickMenu()
self.markers = ChapterMarkFile(show, titles, settingsdir)
log.debug(self.markers)
# left click
self.activated.connect(self.left_click)
# Right Click
menu = QMenu(parent=None)
self.setContextMenu(menu)
setting_action = menu.addAction(QIcon(":/icons/save.png"), "save")
setting_action.triggered.connect(self.save)
setting_action = menu.addAction("---")
setting_action = menu.addAction(QIcon(":/icons/main.png"), "Reset and Restart")
setting_action.triggered.connect(self.reset)
setting_action = menu.addAction(QIcon(":/icons/exit.png"), "exit")
setting_action.triggered.connect(self.exit)
manager = KeyBoardManager(self)
manager.j_signal.connect(self.next_chapter)
manager.start()
def exit(self):
log.info("Persisting Chaptermarks")
self.markers.persist()
sys.exit()
def refresh_menu(self):
self.left_menu.dateAction.setText(f"{self.markers.state} since {self.markers.timers[self.markers.state].replace(microsecond=0)}")
self.left_menu.currentChapterAction.setText(f"Current: {self.markers.get_current()}")
try:
self.left_menu.nextChapterAction.setText(f"Next: {self.markers.get_next().title}")
except: # noqa E722
self.left_menu.nextChapterAction.setText("No next Chapter")
def next_chapter(self):
log.info("Markers Status:")
log.info(self.markers)
if self.markers.state == "preshow":
self.markers.begin()
text = f"start show {self.markers.show} with follwing chapter marks planned:\n{self.markers}"
notify2.Notification(text).show()
log.info(text)
elif self.markers.begin_next():
notify2.Notification(f"Next Chapter: {self.markers.get_current().title}").show()
log.info(f"next chapter {self.markers.get_current().title}")
else:
log.info("Cannot move to next chapter")
notify2.Notification("Cannot move to next Chapter").show()
def left_click(self, value):
self.refresh_menu()
if value == self.Trigger: # left click!
self.left_menu.exec_(QtGui.QCursor.pos())
def reset(self):
log.warn("performing complete rewind of chaptermarks")
self.markers.persist()
self.markers.reset()
def save(self):
self.markers.persist()
def main():
args = docopt(__doc__)
settingsdir = expanduser(args["--settings-dir"])
disable_notifications = args["--disable-notification"]
if not disable_notifications:
notify2.init("chapter-marker")
else:
class FakeNotify2:
def Notification(self, text): # noqa N802
return self
def show(self):
pass
notify2.Notification = FakeNotify2().Notification
show = args["SHOW"]
if not show:
show = current_show()
titles = [line.strip() for line in open(args["TITLEFILE"]).readlines()]
app = QtWidgets.QApplication([]) # can also take sys.argv
tray = SystemTrayIcon(app, show, titles, settingsdir)
tray.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
py | b40056727a369269bb4c543a99154233c46f793e | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import six
http_client = six.moves.http_client
__all__ = [
'HTTP_SUCCESS',
'parse_content_type_header'
]
HTTP_SUCCESS = [http_client.OK, http_client.CREATED, http_client.ACCEPTED,
http_client.NON_AUTHORITATIVE_INFORMATION, http_client.NO_CONTENT,
http_client.RESET_CONTENT, http_client.PARTIAL_CONTENT,
http_client.MULTI_STATUS, http_client.IM_USED,
]
def parse_content_type_header(content_type):
"""
Parse and normalize request content type and return a tuple with the content type and the
options.
:rype: ``tuple``
"""
if ';' in content_type:
split = content_type.split(';')
media = split[0]
options = {}
for pair in split[1:]:
split_pair = pair.split('=', 1)
if len(split_pair) != 2:
continue
key = split_pair[0].strip()
value = split_pair[1].strip()
options[key] = value
else:
media = content_type
options = {}
result = (media, options)
return result
|
py | b400572183f99d11a07a048d70cc659c3f8039ea | import atexit
import io
import sys
import copy
import math
_INPUT_LINES = sys.stdin.read().splitlines()
input = iter(_INPUT_LINES).__next__
_OUTPUT_BUFFER = io.StringIO()
sys.stdout = _OUTPUT_BUFFER
@atexit.register
def write():
sys.__stdout__.write(_OUTPUT_BUFFER.getvalue())
def decrypt(v):
temp = copy.deepcopy(v)
temp.sort()
convert = {}
letter = ord('A')
for i in temp:
if i not in convert:
convert[i] = str(chr(letter))
letter += 1
ans = ''
for i in v:
ans += convert[i]
return ans
def solve(l, v):
ans = [0 for i in range(l + 1)]
for i in range(1, l):
if v[i] != v[i - 1]:
idx = i
ans[i] = math.gcd(v[i], v[i - 1])
break
i = idx
while i > 0:
i -= 1
ans[i] = v[i] // ans[i + 1]
i = idx
while i < l:
i += 1
ans[i] = v[i - 1] // ans[i - 1]
return decrypt(ans)
def main():
t = int(input())
for test in range(1, t + 1):
_, l = list(map(int, input().split(' ')))
v = list(map(int, input().split(' ')))
print('Case #' + str(test) + ': ' + solve(l, v))
main() |
py | b40057a96a973a16375afdbca262c813b9954b29 | import os
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.i18n import i18n_patterns
from django.contrib.auth.views import (
LoginView,
PasswordResetCompleteView,
PasswordResetConfirmView,
PasswordResetDoneView,
)
from django.urls import path
from django.utils.module_loading import import_string
from django.views.generic import RedirectView, TemplateView
from zerver.forms import LoggingSetPasswordForm
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
from zerver.lib.rest import rest_path
from zerver.tornado.views import cleanup_event_queue, get_events, get_events_internal, notify
from zerver.views.alert_words import add_alert_words, list_alert_words, remove_alert_words
from zerver.views.archive import archive, get_web_public_topics_backend
from zerver.views.attachments import list_by_user, remove
from zerver.views.auth import (
api_fetch_api_key,
api_get_server_settings,
json_fetch_api_key,
log_into_subdomain,
login_page,
logout_then_login,
password_reset,
remote_user_jwt,
remote_user_sso,
saml_sp_metadata,
show_deactivation_notice,
start_remote_user_sso,
start_social_login,
start_social_signup,
)
from zerver.views.camo import handle_camo_url
from zerver.views.compatibility import check_global_compatibility
from zerver.views.custom_profile_fields import (
create_realm_custom_profile_field,
delete_realm_custom_profile_field,
list_realm_custom_profile_fields,
remove_user_custom_profile_data,
reorder_realm_custom_profile_fields,
update_realm_custom_profile_field,
update_user_custom_profile_data,
)
from zerver.views.digest import digest_page
from zerver.views.documentation import IntegrationView, MarkdownDirectoryView, integration_doc
from zerver.views.drafts import create_drafts, delete_draft, edit_draft, fetch_drafts
from zerver.views.email_mirror import email_mirror_message
from zerver.views.events_register import events_register_backend
from zerver.views.home import accounts_accept_terms, desktop_home, home
from zerver.views.hotspots import mark_hotspot_as_read
from zerver.views.invite import (
generate_multiuse_invite_backend,
get_user_invites,
invite_users_backend,
resend_user_invite_email,
revoke_multiuse_invite,
revoke_user_invite,
)
from zerver.views.message_edit import (
delete_message_backend,
get_message_edit_history,
json_fetch_raw_message,
update_message_backend,
)
from zerver.views.message_fetch import get_messages_backend, messages_in_narrow_backend
from zerver.views.message_flags import (
mark_all_as_read,
mark_stream_as_read,
mark_topic_as_read,
update_message_flags,
)
from zerver.views.message_send import render_message_backend, send_message_backend, zcommand_backend
from zerver.views.muting import mute_user, unmute_user, update_muted_topic
from zerver.views.portico import (
app_download_link_redirect,
apps_view,
hello_view,
landing_view,
plans_view,
privacy_view,
team_view,
terms_view,
)
from zerver.views.presence import (
get_presence_backend,
get_statuses_for_realm,
update_active_status_backend,
update_user_status_backend,
)
from zerver.views.push_notifications import (
add_android_reg_id,
add_apns_device_token,
remove_android_reg_id,
remove_apns_device_token,
)
from zerver.views.reactions import add_reaction, remove_reaction
from zerver.views.realm import (
check_subdomain_available,
deactivate_realm,
realm_reactivation,
update_realm,
)
from zerver.views.realm_domains import (
create_realm_domain,
delete_realm_domain,
list_realm_domains,
patch_realm_domain,
)
from zerver.views.realm_emoji import delete_emoji, list_emoji, upload_emoji
from zerver.views.realm_export import delete_realm_export, export_realm, get_realm_exports
from zerver.views.realm_icon import delete_icon_backend, get_icon_backend, upload_icon
from zerver.views.realm_linkifiers import (
create_linkifier,
delete_linkifier,
list_linkifiers,
update_linkifier,
)
from zerver.views.realm_logo import delete_logo_backend, get_logo_backend, upload_logo
from zerver.views.realm_playgrounds import add_realm_playground, delete_realm_playground
from zerver.views.registration import (
accounts_home,
accounts_home_from_multiuse_invite,
accounts_register,
check_prereg_key_and_redirect,
create_realm,
find_account,
realm_redirect,
)
from zerver.views.report import (
report_csp_violations,
report_error,
report_narrow_times,
report_send_times,
report_unnarrow_times,
)
from zerver.views.storage import get_storage, remove_storage, update_storage
from zerver.views.streams import (
add_default_stream,
add_subscriptions_backend,
create_default_stream_group,
deactivate_stream_backend,
delete_in_topic,
get_streams_backend,
get_subscribers_backend,
get_topics_backend,
json_get_stream_id,
list_subscriptions_backend,
remove_default_stream,
remove_default_stream_group,
remove_subscriptions_backend,
update_default_stream_group_info,
update_default_stream_group_streams,
update_stream_backend,
update_subscription_properties_backend,
update_subscriptions_backend,
update_subscriptions_property,
)
from zerver.views.submessage import process_submessage
from zerver.views.thumbnail import backend_serve_thumbnail
from zerver.views.tutorial import set_tutorial_status
from zerver.views.typing import send_notification_backend
from zerver.views.unsubscribe import email_unsubscribe
from zerver.views.upload import (
serve_file_backend,
serve_file_url_backend,
serve_local_file_unauthed,
upload_file_backend,
)
from zerver.views.user_groups import (
add_user_group,
delete_user_group,
edit_user_group,
get_user_group,
update_user_group_backend,
)
from zerver.views.user_settings import (
change_enter_sends,
confirm_email_change,
delete_avatar_backend,
json_change_notify_settings,
json_change_settings,
regenerate_api_key,
set_avatar_backend,
update_display_settings_backend,
)
from zerver.views.users import (
add_bot_backend,
avatar,
create_user_backend,
deactivate_bot_backend,
deactivate_user_backend,
deactivate_user_own_backend,
get_bots_backend,
get_members_backend,
get_profile_backend,
get_subscription_backend,
get_user_by_email,
patch_bot_backend,
reactivate_user_backend,
regenerate_bot_api_key,
update_user_backend,
)
from zerver.views.video_calls import (
complete_zoom_user,
deauthorize_zoom_user,
get_bigbluebutton_url,
join_bigbluebutton,
make_zoom_video_call,
register_zoom_user,
)
from zerver.views.zephyr import webathena_kerberos_login
from zproject import dev_urls
from zproject.legacy_urls import legacy_urls
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
from two_factor.gateways.twilio.urls import urlpatterns as tf_twilio_urls
from two_factor.urls import urlpatterns as tf_urls
# NB: There are several other pieces of code which route requests by URL:
#
# - legacy_urls.py contains API endpoint written before the redesign
# and should not be added to.
#
# - runtornado.py has its own URL list for Tornado views. See the
# invocation of web.Application in that file.
#
# - The Nginx config knows which URLs to route to Django or Tornado.
#
# - Likewise for the local dev server in tools/run-dev.py.
# These endpoints constitute the currently designed API (V1), which uses:
# * REST verbs
# * Basic auth (username:password is email:apiKey)
# * Take and return json-formatted data
#
# If you're adding a new endpoint to the code that requires authentication,
# please add it here.
# See rest_dispatch in zerver.lib.rest for an explanation of auth methods used
#
# All of these paths are accessed by either a /json or /api/v1 prefix;
# e.g. `PATCH /json/realm` or `PATCH /api/v1/realm`.
v1_api_and_json_patterns = [
# realm-level calls
rest_path("realm", PATCH=update_realm),
path("realm/subdomain/<subdomain>", check_subdomain_available),
# realm/domains -> zerver.views.realm_domains
rest_path("realm/domains", GET=list_realm_domains, POST=create_realm_domain),
rest_path("realm/domains/<domain>", PATCH=patch_realm_domain, DELETE=delete_realm_domain),
# realm/emoji -> zerver.views.realm_emoji
rest_path("realm/emoji", GET=list_emoji),
rest_path(
"realm/emoji/<emoji_name>",
POST=upload_emoji,
DELETE=(delete_emoji, {"intentionally_undocumented"}),
),
# this endpoint throws a status code 400 JsonableError when it should be a 404.
# realm/icon -> zerver.views.realm_icon
rest_path("realm/icon", POST=upload_icon, DELETE=delete_icon_backend, GET=get_icon_backend),
# realm/logo -> zerver.views.realm_logo
rest_path("realm/logo", POST=upload_logo, DELETE=delete_logo_backend, GET=get_logo_backend),
# realm/filters and realm/linkifiers -> zerver.views.realm_linkifiers
rest_path("realm/linkifiers", GET=list_linkifiers),
rest_path("realm/filters", POST=create_linkifier),
rest_path("realm/filters/<int:filter_id>", DELETE=delete_linkifier, PATCH=update_linkifier),
# realm/playgrounds -> zerver.views.realm_playgrounds
rest_path("realm/playgrounds", POST=add_realm_playground),
rest_path("realm/playgrounds/<int:playground_id>", DELETE=delete_realm_playground),
# realm/profile_fields -> zerver.views.custom_profile_fields
rest_path(
"realm/profile_fields",
GET=list_realm_custom_profile_fields,
PATCH=reorder_realm_custom_profile_fields,
POST=create_realm_custom_profile_field,
),
rest_path(
"realm/profile_fields/<int:field_id>",
PATCH=update_realm_custom_profile_field,
DELETE=delete_realm_custom_profile_field,
),
# realm/deactivate -> zerver.views.deactivate_realm
rest_path("realm/deactivate", POST=deactivate_realm),
# users -> zerver.views.users
rest_path("users", GET=get_members_backend, POST=create_user_backend),
rest_path("users/me", GET=get_profile_backend, DELETE=deactivate_user_own_backend),
rest_path("users/<int:user_id>/reactivate", POST=reactivate_user_backend),
rest_path(
"users/<int:user_id>",
GET=get_members_backend,
PATCH=update_user_backend,
DELETE=deactivate_user_backend,
),
rest_path("users/<int:user_id>/subscriptions/<int:stream_id>", GET=get_subscription_backend),
rest_path("users/<email>", GET=get_user_by_email),
rest_path("bots", GET=get_bots_backend, POST=add_bot_backend),
rest_path("bots/<int:bot_id>/api_key/regenerate", POST=regenerate_bot_api_key),
rest_path("bots/<int:bot_id>", PATCH=patch_bot_backend, DELETE=deactivate_bot_backend),
# invites -> zerver.views.invite
rest_path("invites", GET=get_user_invites, POST=invite_users_backend),
rest_path("invites/<int:prereg_id>", DELETE=revoke_user_invite),
rest_path("invites/<int:prereg_id>/resend", POST=resend_user_invite_email),
# invites/multiuse -> zerver.views.invite
rest_path("invites/multiuse", POST=generate_multiuse_invite_backend),
# invites/multiuse -> zerver.views.invite
rest_path("invites/multiuse/<int:invite_id>", DELETE=revoke_multiuse_invite),
# mark messages as read (in bulk)
rest_path("mark_all_as_read", POST=mark_all_as_read),
rest_path("mark_stream_as_read", POST=mark_stream_as_read),
rest_path("mark_topic_as_read", POST=mark_topic_as_read),
rest_path("zcommand", POST=zcommand_backend),
# Endpoints for syncing drafts.
rest_path(
"drafts",
GET=(fetch_drafts, {"intentionally_undocumented"}),
POST=(create_drafts, {"intentionally_undocumented"}),
),
rest_path(
"drafts/<int:draft_id>",
PATCH=(edit_draft, {"intentionally_undocumented"}),
DELETE=(delete_draft, {"intentionally_undocumented"}),
),
# messages -> zerver.views.message*
# GET returns messages, possibly filtered, POST sends a message
rest_path(
"messages",
GET=(get_messages_backend, {"allow_anonymous_user_web"}),
POST=(send_message_backend, {"allow_incoming_webhooks"}),
),
rest_path(
"messages/<int:message_id>",
GET=json_fetch_raw_message,
PATCH=update_message_backend,
DELETE=delete_message_backend,
),
rest_path("messages/render", POST=render_message_backend),
rest_path("messages/flags", POST=update_message_flags),
rest_path("messages/<int:message_id>/history", GET=get_message_edit_history),
rest_path("messages/matches_narrow", GET=messages_in_narrow_backend),
rest_path("users/me/subscriptions/properties", POST=update_subscription_properties_backend),
rest_path("users/me/subscriptions/<int:stream_id>", PATCH=update_subscriptions_property),
rest_path("submessage", POST=process_submessage),
# New endpoint for handling reactions.
# reactions -> zerver.view.reactions
# POST adds a reaction to a message
# DELETE removes a reaction from a message
rest_path("messages/<int:message_id>/reactions", POST=add_reaction, DELETE=remove_reaction),
# attachments -> zerver.views.attachments
rest_path("attachments", GET=list_by_user),
rest_path("attachments/<int:attachment_id>", DELETE=remove),
# typing -> zerver.views.typing
# POST sends a typing notification event to recipients
rest_path("typing", POST=send_notification_backend),
# user_uploads -> zerver.views.upload
rest_path("user_uploads", POST=upload_file_backend),
rest_path(
"user_uploads/<realm_id_str>/<path:filename>",
GET=(serve_file_url_backend, {"override_api_url_scheme"}),
),
# bot_storage -> zerver.views.storage
rest_path("bot_storage", PUT=update_storage, GET=get_storage, DELETE=remove_storage),
# Endpoint used by mobile devices to register their push
# notification credentials
rest_path(
"users/me/apns_device_token", POST=add_apns_device_token, DELETE=remove_apns_device_token
),
rest_path("users/me/android_gcm_reg_id", POST=add_android_reg_id, DELETE=remove_android_reg_id),
# users/*/presnece => zerver.views.presence.
rest_path("users/me/presence", POST=update_active_status_backend),
# It's important that this sit after users/me/presence so that
# Django's URL resolution order doesn't break the
# /users/me/presence endpoint.
rest_path("users/<user_id_or_email>/presence", GET=get_presence_backend),
rest_path("realm/presence", GET=get_statuses_for_realm),
rest_path("users/me/status", POST=update_user_status_backend),
# user_groups -> zerver.views.user_groups
rest_path("user_groups", GET=get_user_group),
rest_path("user_groups/create", POST=add_user_group),
rest_path("user_groups/<int:user_group_id>", PATCH=edit_user_group, DELETE=delete_user_group),
rest_path("user_groups/<int:user_group_id>/members", POST=update_user_group_backend),
# users/me -> zerver.views.user_settings
rest_path("users/me/api_key/regenerate", POST=regenerate_api_key),
rest_path(
"users/me/enter-sends",
POST=(
change_enter_sends,
# This endpoint should be folded into user settings
{"intentionally_undocumented"},
),
),
rest_path("users/me/avatar", POST=set_avatar_backend, DELETE=delete_avatar_backend),
# users/me/hotspots -> zerver.views.hotspots
rest_path(
"users/me/hotspots",
POST=(
mark_hotspot_as_read,
# This endpoint is low priority for documentation as
# it is part of the webapp-specific tutorial.
{"intentionally_undocumented"},
),
),
# users/me/tutorial_status -> zerver.views.tutorial
rest_path(
"users/me/tutorial_status",
POST=(
set_tutorial_status,
# This is a relic of an old Zulip tutorial model and
# should be deleted.
{"intentionally_undocumented"},
),
),
# settings -> zerver.views.user_settings
rest_path("settings", PATCH=json_change_settings),
rest_path("settings/display", PATCH=update_display_settings_backend),
rest_path("settings/notifications", PATCH=json_change_notify_settings),
# users/me/alert_words -> zerver.views.alert_words
rest_path(
"users/me/alert_words",
GET=list_alert_words,
POST=add_alert_words,
DELETE=remove_alert_words,
),
# users/me/custom_profile_data -> zerver.views.custom_profile_data
rest_path(
"users/me/profile_data",
PATCH=update_user_custom_profile_data,
DELETE=remove_user_custom_profile_data,
),
rest_path(
"users/me/<int:stream_id>/topics", GET=(get_topics_backend, {"allow_anonymous_user_web"})
),
# streams -> zerver.views.streams
# (this API is only used externally)
rest_path("streams", GET=get_streams_backend),
# GET returns `stream_id`, stream name should be encoded in the URL query (in `stream` param)
rest_path("get_stream_id", GET=json_get_stream_id),
# GET returns "stream info" (undefined currently?), HEAD returns whether stream exists (200 or 404)
rest_path("streams/<int:stream_id>/members", GET=get_subscribers_backend),
rest_path(
"streams/<int:stream_id>", PATCH=update_stream_backend, DELETE=deactivate_stream_backend
),
# Delete topic in stream
rest_path("streams/<int:stream_id>/delete_topic", POST=delete_in_topic),
rest_path("default_streams", POST=add_default_stream, DELETE=remove_default_stream),
rest_path("default_stream_groups/create", POST=create_default_stream_group),
rest_path(
"default_stream_groups/<int:group_id>",
PATCH=update_default_stream_group_info,
DELETE=remove_default_stream_group,
),
rest_path(
"default_stream_groups/<int:group_id>/streams", PATCH=update_default_stream_group_streams
),
# GET lists your streams, POST bulk adds, PATCH bulk modifies/removes
rest_path(
"users/me/subscriptions",
GET=list_subscriptions_backend,
POST=add_subscriptions_backend,
PATCH=update_subscriptions_backend,
DELETE=remove_subscriptions_backend,
),
# muting -> zerver.views.muting
rest_path("users/me/subscriptions/muted_topics", PATCH=update_muted_topic),
rest_path("users/me/muted_users/<int:muted_user_id>", POST=mute_user, DELETE=unmute_user),
# used to register for an event queue in tornado
rest_path("register", POST=events_register_backend),
# events -> zerver.tornado.views
rest_path("events", GET=get_events, DELETE=cleanup_event_queue),
# report -> zerver.views.report
#
# These endpoints are for internal error/performance reporting
# from the browser to the webapp, and we don't expect to ever
# include in our API documentation.
rest_path(
"report/error",
# Logged-out browsers can hit this endpoint, for portico page JS exceptions.
POST=(report_error, {"allow_anonymous_user_web", "intentionally_undocumented"}),
),
rest_path("report/send_times", POST=(report_send_times, {"intentionally_undocumented"})),
rest_path(
"report/narrow_times",
POST=(report_narrow_times, {"allow_anonymous_user_web", "intentionally_undocumented"}),
),
rest_path(
"report/unnarrow_times",
POST=(report_unnarrow_times, {"allow_anonymous_user_web", "intentionally_undocumented"}),
),
# Used to generate a Zoom video call URL
rest_path("calls/zoom/create", POST=make_zoom_video_call),
# Used to generate a Big Blue Button video call URL
rest_path("calls/bigbluebutton/create", GET=get_bigbluebutton_url),
# export/realm -> zerver.views.realm_export
rest_path("export/realm", POST=export_realm, GET=get_realm_exports),
rest_path("export/realm/<int:export_id>", DELETE=delete_realm_export),
]
integrations_view = IntegrationView.as_view()
# These views serve pages (HTML). As such, their internationalization
# must depend on the URL.
#
# If you're adding a new page to the website (as opposed to a new
# endpoint for use by code), you should add it here.
i18n_urls = [
path("", home, name="home"),
# We have a desktop-specific landing page in case we change our /
# to not log in in the future. We don't want to require a new
# desktop app build for everyone in that case
path("desktop_home/", desktop_home),
# Backwards-compatibility (legacy) Google auth URL for the mobile
# apps; see https://github.com/zulip/zulip/issues/13081 for
# background. We can remove this once older versions of the
# mobile app are no longer present in the wild.
path("accounts/login/google/", start_social_login, {"backend": "google"}),
path("accounts/login/start/sso/", start_remote_user_sso, name="start-login-sso"),
path("accounts/login/sso/", remote_user_sso, name="login-sso"),
path("accounts/login/jwt/", remote_user_jwt),
path("accounts/login/social/<backend>", start_social_login, name="login-social"),
path("accounts/login/social/<backend>/<extra_arg>", start_social_login, name="login-social"),
path("accounts/register/social/<backend>", start_social_signup, name="signup-social"),
path(
"accounts/register/social/<backend>/<extra_arg>", start_social_signup, name="signup-social"
),
path("accounts/login/subdomain/<token>", log_into_subdomain),
# We have two entries for accounts/login; only the first one is
# used for URL resolution. The second here is to allow
# reverse("login") in templates to
# return `/accounts/login/`.
path("accounts/login/", login_page, {"template_name": "zerver/login.html"}, name="login_page"),
path("accounts/login/", LoginView.as_view(template_name="zerver/login.html"), name="login"),
path("accounts/logout/", logout_then_login),
path("accounts/webathena_kerberos_login/", webathena_kerberos_login),
path("accounts/password/reset/", password_reset, name="password_reset"),
path(
"accounts/password/reset/done/",
PasswordResetDoneView.as_view(template_name="zerver/reset_emailed.html"),
),
path(
"accounts/password/reset/<uidb64>/<token>/",
PasswordResetConfirmView.as_view(
success_url="/accounts/password/done/",
template_name="zerver/reset_confirm.html",
form_class=LoggingSetPasswordForm,
),
name="password_reset_confirm",
),
path(
"accounts/password/done/",
PasswordResetCompleteView.as_view(template_name="zerver/reset_done.html"),
),
path("accounts/deactivated/", show_deactivation_notice),
# Displays digest email content in browser.
path("digest/", digest_page),
# Registration views, require a confirmation ID.
path("accounts/home/", accounts_home),
path(
"accounts/send_confirm/<email>",
TemplateView.as_view(template_name="zerver/accounts_send_confirm.html"),
name="signup_send_confirm",
),
path(
"accounts/new/send_confirm/<email>",
TemplateView.as_view(template_name="zerver/accounts_send_confirm.html"),
{"realm_creation": True},
name="new_realm_send_confirm",
),
path("accounts/register/", accounts_register, name="accounts_register"),
path(
"accounts/do_confirm/<confirmation_key>",
check_prereg_key_and_redirect,
name="check_prereg_key_and_redirect",
),
path(
"accounts/confirm_new_email/<confirmation_key>",
confirm_email_change,
name="confirm_email_change",
),
# Email unsubscription endpoint. Allows for unsubscribing from various types of emails,
# including the welcome emails (day 1 & 2), missed PMs, etc.
path(
"accounts/unsubscribe/<email_type>/<confirmation_key>",
email_unsubscribe,
name="unsubscribe",
),
# Portico-styled page used to provide email confirmation of terms acceptance.
path("accounts/accept_terms/", accounts_accept_terms, name="accept_terms"),
# Find your account
path("accounts/find/", find_account, name="find_account"),
# Go to organization subdomain
path("accounts/go/", realm_redirect, name="realm_redirect"),
# Realm creation
path("new/", create_realm),
path("new/<creation_key>", create_realm, name="create_realm"),
# Realm reactivation
path("reactivate/<confirmation_key>", realm_reactivation, name="realm_reactivation"),
# Global public streams (Zulip's way of doing archives)
path("archive/streams/<int:stream_id>/topics/<topic_name>", archive),
path("archive/streams/<int:stream_id>/topics", get_web_public_topics_backend),
# Login/registration
path("register/", accounts_home, name="register"),
path("login/", login_page, {"template_name": "zerver/login.html"}, name="login_page"),
path("join/<confirmation_key>/", accounts_home_from_multiuse_invite, name="join"),
# Used to generate a Zoom video call URL
path("calls/zoom/register", register_zoom_user),
path("calls/zoom/complete", complete_zoom_user),
path("calls/zoom/deauthorize", deauthorize_zoom_user),
# Used to join a Big Blue Button video call
path("calls/bigbluebutton/join", join_bigbluebutton),
# API and integrations documentation
path("integrations/doc-html/<integration_name>", integration_doc),
path("integrations/", integrations_view),
path("integrations/<path:path>", integrations_view),
# Landing page, features pages, signup form, etc.
path("hello/", hello_view),
path("new-user/", RedirectView.as_view(url="/hello", permanent=True)),
path("features/", landing_view, {"template_name": "zerver/features.html"}),
path("plans/", plans_view, name="plans"),
path("apps/", apps_view),
path("apps/download/<platform>", app_download_link_redirect),
path("apps/<platform>", apps_view),
path("team/", team_view),
path("history/", landing_view, {"template_name": "zerver/history.html"}),
path("why-zulip/", landing_view, {"template_name": "zerver/why-zulip.html"}),
path("for/open-source/", landing_view, {"template_name": "zerver/for-open-source.html"}),
path("for/research/", landing_view, {"template_name": "zerver/for-research.html"}),
path("for/companies/", landing_view, {"template_name": "zerver/for-companies.html"}),
path(
"for/working-groups-and-communities/",
landing_view,
{"template_name": "zerver/for-working-groups-and-communities.html"},
),
path("security/", landing_view, {"template_name": "zerver/security.html"}),
# Terms of Service and privacy pages.
path("terms/", terms_view),
path("privacy/", privacy_view),
]
# Make a copy of i18n_urls so that they appear without prefix for english
urls = list(i18n_urls)
# Include the dual-use patterns twice
urls += [
path("api/v1/", include(v1_api_and_json_patterns)),
path("json/", include(v1_api_and_json_patterns)),
]
# user_uploads -> zerver.views.upload.serve_file_backend
#
# This URL is an exception to the URL naming schemes for endpoints. It
# supports both API and session cookie authentication, using a single
# URL for both (not 'api/v1/' or 'json/' prefix). This is required to
# easily support the mobile apps fetching uploaded files without
# having to rewrite URLs, and is implemented using the
# 'override_api_url_scheme' flag passed to rest_dispatch
urls += [
path(
"user_uploads/temporary/<token>/<filename>",
serve_local_file_unauthed,
name="local_file_unauthed",
),
rest_path(
"user_uploads/<realm_id_str>/<path:filename>",
GET=(serve_file_backend, {"override_api_url_scheme"}),
),
# This endpoint serves thumbnailed versions of images using thumbor;
# it requires an exception for the same reason.
rest_path("thumbnail", GET=(backend_serve_thumbnail, {"override_api_url_scheme"})),
# Avatars have the same constraint because their URLs are included
# in API data structures used by both the mobile and web clients.
rest_path("avatar/<email_or_id>", GET=(avatar, {"override_api_url_scheme"})),
rest_path(
"avatar/<email_or_id>/medium", {"medium": True}, GET=(avatar, {"override_api_url_scheme"})
),
]
# This URL serves as a way to receive CSP violation reports from the users.
# We use this endpoint to just log these reports.
urls += [
path("report/csp_violations", report_csp_violations),
]
# This URL serves as a way to provide backward compatibility to messages
# rendered at the time Zulip used camo for doing http -> https conversion for
# such links with images previews. Now thumbor can be used for serving such
# images.
urls += [
path("external_content/<digest>/<received_url>", handle_camo_url),
]
# Incoming webhook URLs
# We don't create URLs for particular Git integrations here
# because of generic one below
for incoming_webhook in WEBHOOK_INTEGRATIONS:
if incoming_webhook.url_object:
urls.append(incoming_webhook.url_object)
# Desktop-specific authentication URLs
urls += [
rest_path("json/fetch_api_key", POST=json_fetch_api_key),
]
# Mobile-specific authentication URLs
urls += [
# Used as a global check by all mobile clients, which currently send
# requests to https://zulip.com/compatibility almost immediately after
# starting up.
path("compatibility", check_global_compatibility),
]
v1_api_mobile_patterns = [
# This json format view used by the mobile apps lists which
# authentication backends the server allows as well as details
# like the requested subdomains'd realm icon (if known) and
# server-specific compatibility.
path("server_settings", api_get_server_settings),
# This json format view used by the mobile apps accepts a username
# password/pair and returns an API key.
path("fetch_api_key", api_fetch_api_key),
]
# View for uploading messages from email mirror
urls += [
path("email_mirror_message", email_mirror_message),
]
# Include URL configuration files for site-specified extra installed
# Django apps
for app_name in settings.EXTRA_INSTALLED_APPS:
app_dir = os.path.join(settings.DEPLOY_ROOT, app_name)
if os.path.exists(os.path.join(app_dir, "urls.py")):
urls += [path("", include(f"{app_name}.urls"))]
i18n_urls += import_string(f"{app_name}.urls.i18n_urlpatterns")
# Tornado views
urls += [
# Used internally for communication between Django and Tornado processes
#
# Since these views don't use rest_dispatch, they cannot have
# asynchronous Tornado behavior.
path("notify_tornado", notify),
path("api/v1/events/internal", get_events_internal),
]
# Python Social Auth
urls += [path("", include("social_django.urls", namespace="social"))]
urls += [path("saml/metadata.xml", saml_sp_metadata)]
# User documentation site
help_documentation_view = MarkdownDirectoryView.as_view(
template_name="zerver/documentation_main.html", path_template="/zerver/help/%s.md"
)
api_documentation_view = MarkdownDirectoryView.as_view(
template_name="zerver/documentation_main.html", path_template="/zerver/api/%s.md"
)
urls += [
# Redirects due to us having moved the docs:
path(
"help/delete-a-stream", RedirectView.as_view(url="/help/archive-a-stream", permanent=True)
),
path("api/delete-stream", RedirectView.as_view(url="/api/archive-stream", permanent=True)),
path(
"help/configure-missed-message-emails",
RedirectView.as_view(url="/help/configure-message-notification-emails", permanent=True),
),
path("help/", help_documentation_view),
path("help/<path:article>", help_documentation_view),
path("api/", api_documentation_view),
path("api/<slug:article>", api_documentation_view),
]
# Two-factor URLs
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
urls += [path("", include(tf_urls)), path("", include(tf_twilio_urls))]
if settings.DEVELOPMENT:
urls += dev_urls.urls
i18n_urls += dev_urls.i18n_urls
v1_api_mobile_patterns += dev_urls.v1_api_mobile_patterns
urls += [
path("api/v1/", include(v1_api_mobile_patterns)),
]
# The sequence is important; if i18n URLs don't come first then
# reverse URL mapping points to i18n URLs which causes the frontend
# tests to fail
urlpatterns = i18n_patterns(*i18n_urls) + urls + legacy_urls
|
py | b40057e6c7e31542eccf1f8d7fc284d9c4e06164 | '''
Numerical example for the Continuous No-Regret Algorithm with quadratic loss functions
@author: Maximilian Balandat, Walid Krichene
@date: Dec 20, 2014
'''
from ContNoRegret.Domains import S
from ContNoRegret.Distributions import Uniform
from ContNoRegret.LossFunctions import GaussianLossFunction
from ContNoRegret.HedgeAlgorithm import GaussianNoRegretProblem
from ContNoRegret.utils import create_random_Sigmas, compute_etaopt, plot_results
from scipy.stats import expon
# set up some basic parameters
T = 10000
M = 10.0
Lbnd = 5.0 # Uniform bound on the Lipschitz constant
N = 2500
Ngrid = 200000
# # just a simple rectangle
# dom = Rectangle([-1.0, 1.0], [-1.0, 1.0])
# domain S
dom = S()
# create random means, uniformly over the domain
mus = Uniform(dom).sample(T)
# create random covariance matrices, based on the Lipschitz bound and the uniform bound M
covs = create_random_Sigmas(dom.n, T, Lbnd, M, expon())
# create list of loss functions (for now: Ignore uniform bound M!)
lossfuncs = [GaussianLossFunction(dom, mu, cov, M) for mu,cov in zip(mus,covs)]
# Create gauss problem for the largest horizon
gaussprob = GaussianNoRegretProblem(dom, lossfuncs, Lbnd, M)
# run the problem for different constant rates etas
etaopts = {}
Ts = [2500, 7500]
for T in Ts:
etaopts[T] = compute_etaopt(dom, M, T)
etas = [0.1, 0.2]
results = gaussprob.run_simulation(N, etaopts=etaopts, etas=etas, Ngrid=Ngrid)
plot_results(results, offset=1000, filename='figures/Gauss_etas_S')
slopes, slopes_bnd = results.estimate_loglog_slopes()
|
py | b40059912677f87fef84f755e99e49f81224dc97 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__all__ = ['singledispatch']
from functools import update_wrapper
from weakref import WeakKeyDictionary
from singledispatch_helpers import MappingProxyType, get_cache_token
################################################################################
### singledispatch() - single-dispatch generic function decorator
################################################################################
def _c3_merge(sequences):
"""Merges MROs in *sequences* to a single MRO using the C3 algorithm.
Adapted from http://www.python.org/download/releases/2.3/mro/.
"""
result = []
while True:
sequences = [s for s in sequences if s] # purge empty sequences
if not sequences:
return result
for s1 in sequences: # find merge candidates among seq heads
candidate = s1[0]
for s2 in sequences:
if candidate in s2[1:]:
candidate = None
break # reject the current head, it appears later
else:
break
if not candidate:
raise RuntimeError("Inconsistent hierarchy")
result.append(candidate)
# remove the chosen candidate
for seq in sequences:
if seq[0] == candidate:
del seq[0]
def _c3_mro(cls, abcs=None):
"""Computes the method resolution order using extended C3 linearization.
If no *abcs* are given, the algorithm works exactly like the built-in C3
linearization used for method resolution.
If given, *abcs* is a list of abstract base classes that should be inserted
into the resulting MRO. Unrelated ABCs are ignored and don't end up in the
result. The algorithm inserts ABCs where their functionality is introduced,
i.e. issubclass(cls, abc) returns True for the class itself but returns
False for all its direct base classes. Implicit ABCs for a given class
(either registered or inferred from the presence of a special method like
__len__) are inserted directly after the last ABC explicitly listed in the
MRO of said class. If two implicit ABCs end up next to each other in the
resulting MRO, their ordering depends on the order of types in *abcs*.
"""
for i, base in enumerate(reversed(cls.__bases__)):
if hasattr(base, '__abstractmethods__'):
boundary = len(cls.__bases__) - i
break # Bases up to the last explicit ABC are considered first.
else:
boundary = 0
abcs = list(abcs) if abcs else []
explicit_bases = list(cls.__bases__[:boundary])
abstract_bases = []
other_bases = list(cls.__bases__[boundary:])
for base in abcs:
if issubclass(cls, base) and not any(
issubclass(b, base) for b in cls.__bases__
):
# If *cls* is the class that introduces behaviour described by
# an ABC *base*, insert said ABC to its MRO.
abstract_bases.append(base)
for base in abstract_bases:
abcs.remove(base)
explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases]
abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases]
other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases]
return _c3_merge(
[[cls]] +
explicit_c3_mros + abstract_c3_mros + other_c3_mros +
[explicit_bases] + [abstract_bases] + [other_bases]
)
def _compose_mro(cls, types):
"""Calculates the method resolution order for a given class *cls*.
Includes relevant abstract base classes (with their respective bases) from
the *types* iterable. Uses a modified C3 linearization algorithm.
"""
bases = set(cls.__mro__)
# Remove entries which are already present in the __mro__ or unrelated.
def is_related(typ):
return (typ not in bases and hasattr(typ, '__mro__')
and issubclass(cls, typ))
types = [n for n in types if is_related(n)]
# Remove entries which are strict bases of other entries (they will end up
# in the MRO anyway.
def is_strict_base(typ):
for other in types:
if typ != other and typ in other.__mro__:
return True
return False
types = [n for n in types if not is_strict_base(n)]
# Subclasses of the ABCs in *types* which are also implemented by
# *cls* can be used to stabilize ABC ordering.
type_set = set(types)
mro = []
for typ in types:
found = []
for sub in typ.__subclasses__():
if sub not in bases and issubclass(cls, sub):
found.append([s for s in sub.__mro__ if s in type_set])
if not found:
mro.append(typ)
continue
# Favor subclasses with the biggest number of useful bases
found.sort(key=len, reverse=True)
for sub in found:
for subcls in sub:
if subcls not in mro:
mro.append(subcls)
return _c3_mro(cls, abcs=mro)
def _find_impl(cls, registry):
"""Returns the best matching implementation from *registry* for type *cls*.
Where there is no registered implementation for a specific type, its method
resolution order is used to find a more generic implementation.
Note: if *registry* does not contain an implementation for the base
*object* type, this function may return None.
"""
mro = _compose_mro(cls, registry.keys())
match = None
for t in mro:
if match is not None:
# If *match* is an implicit ABC but there is another unrelated,
# equally matching implicit ABC, refuse the temptation to guess.
if (t in registry and t not in cls.__mro__
and match not in cls.__mro__
and not issubclass(match, t)):
raise RuntimeError("Ambiguous dispatch: {0} or {1}".format(
match, t))
break
if t in registry:
match = t
return registry.get(match)
def singledispatch(func):
"""Single-dispatch generic function decorator.
Transforms a function into a generic function, which can have different
behaviours depending upon the type of its first argument. The decorated
function acts as the default implementation, and additional
implementations can be registered using the register() attribute of the
generic function.
"""
registry = {}
dispatch_cache = WeakKeyDictionary()
def ns(): pass
ns.cache_token = None
def dispatch(cls):
"""generic_func.dispatch(cls) -> <function implementation>
Runs the dispatch algorithm to return the best available implementation
for the given *cls* registered on *generic_func*.
"""
if ns.cache_token is not None:
current_token = get_cache_token()
if ns.cache_token != current_token:
dispatch_cache.clear()
ns.cache_token = current_token
try:
impl = dispatch_cache[cls]
except KeyError:
try:
impl = registry[cls]
except KeyError:
impl = _find_impl(cls, registry)
dispatch_cache[cls] = impl
return impl
def register(cls, func=None):
"""generic_func.register(cls, func) -> func
Registers a new implementation for the given *cls* on a *generic_func*.
"""
if func is None:
return lambda f: register(cls, f)
registry[cls] = func
if ns.cache_token is None and hasattr(cls, '__abstractmethods__'):
ns.cache_token = get_cache_token()
dispatch_cache.clear()
return func
def wrapper(*args, **kw):
return dispatch(args[0].__class__)(*args, **kw)
registry[object] = func
wrapper.register = register
wrapper.dispatch = dispatch
wrapper.registry = MappingProxyType(registry)
wrapper._clear_cache = dispatch_cache.clear
update_wrapper(wrapper, func)
return wrapper
|
py | b4005b54dab9a525dfeb30f82e8fb21c870b098e | """Module containing the Base Client for Jina."""
import abc
import argparse
import inspect
import os
from abc import ABC
from typing import TYPE_CHECKING, AsyncIterator, Callable, Iterator, Optional, Union
from jina.excepts import BadClientInput
from jina.helper import T, parse_client, typename
from jina.logging.logger import JinaLogger
from jina.logging.predefined import default_logger
if TYPE_CHECKING:
from jina.clients.request import GeneratorSourceType
from jina.types.request import Request, Response
InputType = Union[GeneratorSourceType, Callable[..., GeneratorSourceType]]
CallbackFnType = Optional[Callable[[Response], None]]
class BaseClient(ABC):
"""A base client for connecting to the Flow Gateway.
:param args: the Namespace from argparse
:param kwargs: additional parameters that can be accepted by client parser
"""
def __init__(
self,
args: Optional['argparse.Namespace'] = None,
**kwargs,
):
if args and isinstance(args, argparse.Namespace):
self.args = args
else:
self.args = parse_client(kwargs)
self.logger = JinaLogger(self.__class__.__name__, **vars(self.args))
if not self.args.proxy and os.name != 'nt':
# (Han 2020 12.12): gRPC channel is over HTTP2 and it does not work when we have proxy
# as many enterprise users are behind proxy, a quick way to
# surpass it is by temporally unset proxy. Please do NOT panic as it will NOT
# affect users os-level envs.
os.unsetenv('http_proxy')
os.unsetenv('https_proxy')
self._inputs = None
@staticmethod
def check_input(inputs: Optional['InputType'] = None, **kwargs) -> None:
"""Validate the inputs and print the first request if success.
:param inputs: the inputs
:param kwargs: keyword arguments
"""
if inputs is None:
# empty inputs is considered as valid
return
if hasattr(inputs, '__call__'):
# it is a function
inputs = inputs()
kwargs['data'] = inputs
kwargs['exec_endpoint'] = '/'
if inspect.isasyncgenfunction(inputs) or inspect.isasyncgen(inputs):
raise BadClientInput(
'checking the validity of an async generator is not implemented yet'
)
try:
from jina.clients.request import request_generator
r = next(request_generator(**kwargs))
from jina.types.request import Request
if not isinstance(r, Request):
raise TypeError(f'{typename(r)} is not a valid Request')
except Exception as ex:
default_logger.error(f'inputs is not valid!')
raise BadClientInput from ex
def _get_requests(
self, **kwargs
) -> Union[Iterator['Request'], AsyncIterator['Request']]:
"""
Get request in generator.
:param kwargs: Keyword arguments.
:return: Iterator of request.
"""
_kwargs = vars(self.args)
_kwargs['data'] = self.inputs
# override by the caller-specific kwargs
_kwargs.update(kwargs)
if hasattr(self._inputs, '__len__'):
total_docs = len(self._inputs)
elif 'total_docs' in _kwargs:
total_docs = _kwargs['total_docs']
else:
total_docs = None
self._inputs_length = None
if total_docs:
self._inputs_length = max(1, total_docs / _kwargs['request_size'])
if inspect.isasyncgen(self.inputs):
from jina.clients.request.asyncio import request_generator
return request_generator(**_kwargs)
else:
from jina.clients.request import request_generator
return request_generator(**_kwargs)
@property
def inputs(self) -> 'InputType':
"""
An iterator of bytes, each element represents a Document's raw content.
``inputs`` defined in the protobuf
:return: inputs
"""
return self._inputs
@inputs.setter
def inputs(self, bytes_gen: 'InputType') -> None:
"""
Set the input data.
:param bytes_gen: input type
"""
if hasattr(bytes_gen, '__call__'):
self._inputs = bytes_gen()
else:
self._inputs = bytes_gen
@abc.abstractmethod
async def _get_results(
self,
inputs: 'InputType',
on_done: 'CallbackFnType',
on_error: Optional['CallbackFnType'] = None,
on_always: Optional['CallbackFnType'] = None,
**kwargs,
):
...
@property
def client(self: T) -> T:
"""Return the client object itself
:return: the Client object
"""
return self
|
py | b4005ca5fb2fee52986b20da0d04be53aaf7c6e1 | # -*- coding: utf-8 -*-
"""Core targets for this process.
"""
#
# standard library imports
#
import io
import json
import os
import shutil
import subprocess
from collections import OrderedDict # python 3.1
from datetime import datetime
from pathlib import Path # python 3.4
#
# third-party imports
#
from flask import Response, request, abort
from Bio import SeqIO, AlignIO, Phylo
#
# local imports
#
from . import app, rq
#
# Non-configurable global constants.
#
# File-name-related variables.
SEQUENCE_EXTENSIONS = OrderedDict([
('DNA', '.fna'),
('peptide', '.faa')
])
SEQUENCES_NAME = 'sequences'
ALIGNMENT_NAME = 'alignment'
RUN_LOG_NAME = 'run_log.txt'
STATUS_NAME = 'status.txt'
STOCKHOLM_NAME = 'alignment.stockholm'
RAW_TREE_NAME = 'tree_raw.nwk'
TREE_NAME = 'tree.nwk'
PHYLOXML_NAME = 'tree.xml'
SEQUENCE_DATA_NAME = 'sequence_data.json'
HMM_FILENAME = 'family.hmm'
HMMSTATS_NAME = 'hmmstats.json'
FAMILIES_NAME = 'families.json'
ALL_FILENAMES = ['', # don't allow null name
ALIGNMENT_NAME + SEQUENCE_EXTENSIONS['DNA'],
ALIGNMENT_NAME + SEQUENCE_EXTENSIONS['peptide'],
SEQUENCES_NAME + SEQUENCE_EXTENSIONS['DNA'],
SEQUENCES_NAME + SEQUENCE_EXTENSIONS['peptide'],
HMM_FILENAME,
STATUS_NAME,
SEQUENCE_DATA_NAME,
RUN_LOG_NAME,
STOCKHOLM_NAME,
HMMSTATS_NAME,
FAMILIES_NAME,
PHYLOXML_NAME] + ['FastTree', 'RAxML']
# MIME types.
NEWICK_MIMETYPE = 'application/newick'
JSON_MIMETYPE = 'application/json'
FASTA_MIMETYPE = 'application/fasta'
TEXT_MIMETYPE = 'text/plain'
# hmmalign stuff.
HMM_SWITCHES = {'peptide': 'amino',
'DNA': 'dna'}
#
# Helper function defs start here.
#
def create_fasta(familyname, data_name, superfamily=None):
"""Verify and characterize a FASTA file and save it to disk.
:param familyname:
:param data_name:
:param superfamily:
:return:
"""
record_dict = None
infileext = None
if not superfamily:
path = Path(app.config['DATA']) / familyname
else:
if superfamily in ALL_FILENAMES:
abort(403)
path = Path(app.config['DATA']) / familyname / superfamily
# post data
if path.exists() and not path.is_dir():
app.logger.warning('Removing existing file in data path name.')
path.unlink()
if not path.is_dir():
app.logger.debug("Creating directory %s.", path)
path.mkdir()
for sequence_type in SEQUENCE_EXTENSIONS.keys():
if sequence_type in request.files:
fasta = request.files[sequence_type]
infileext = SEQUENCE_EXTENSIONS[sequence_type]
break
else:
fasta = None
app.logger.error('Unrecognized request for FASTA.')
abort(400)
try: # parse FASTA file
record_dict = SeqIO.to_dict(
SeqIO.parse(io.StringIO(fasta.read().decode('UTF-8')), 'fasta'))
if len(record_dict) < 1:
raise ValueError
except (ValueError, RuntimeError):
app.logger.error('Unparseable/empty FASTA requested for family "%s".',
familyname)
abort(406)
lengths = [len(rec.seq) for rec in record_dict.values()]
infilename = data_name + infileext
if superfamily: # Do superfamily processing
sub_path = Path(app.config['DATA']) / familyname / infilename
sub_parsed_fasta = SeqIO.parse(str(sub_path), 'fasta')
sub_record_dict = SeqIO.to_dict(sub_parsed_fasta)
for rec in record_dict.values():
if not rec.id.startswith(superfamily):
rec.id = superfamily + '.' + rec.id
rec.description = superfamily + '.' + rec.description
record_dict.update(sub_record_dict) # combine sequences
fasta_dict = {'sequences': len(record_dict),
'sub_sequences': len(sub_record_dict),
'max_length': max(lengths),
'min_length': min(lengths),
'total_length': sum(lengths),
'overwrite': False,
'superfamily_name': superfamily}
else:
fasta_dict = {'sequences': len(record_dict),
'max_length': max(lengths),
'min_length': min(lengths),
'total_length': sum(lengths),
'overwrite': False}
app.logger.debug('Saving FASTA file for family "%s".', familyname)
if (path / infilename).exists():
app.logger.warning('Overwriting existing FASTA file for family %s.',
familyname)
fasta_dict['overwrite'] = True
with open(str(path / infilename), 'w') as fasta_outfh:
for seq in record_dict.values():
SeqIO.write(seq, fasta_outfh, 'fasta')
with open(str(path / SEQUENCE_DATA_NAME), 'w') as sequence_data_fh:
json.dump(fasta_dict, sequence_data_fh)
return Response(json.dumps(fasta_dict), mimetype=JSON_MIMETYPE)
def write_status(path, code):
"""Write a numeric status to file.
:param path:
:param code:
:return:
"""
with path.open(mode='wt') as status_fh:
status_fh.write("%d\n" % code)
def run_subprocess_with_status(out_path,
err_path,
cmdlist,
cwd,
status_path,
post_process,
post_args):
"""Run a subprocess, writing a status file.
:param post_process: Function called after processing
:param post_args: Arguments to post_process
:param out_path: Path to which stdout gets sent
:param err_path: Path to which stderr gets sent
:param cmdlist: List of commands to be sent
:param cwd: Path to working directory
:param status_path: Path to status log file
:return: Return code of subprocess
"""
environ = os.environ.copy()
#
# Modify the environment to select the number of threads, if requested.
#
if app.config['THREADS'] > 0:
environ['OMP_NUM_THREADS'] = str(app.config['THREADS'])
with out_path.open(mode='wb') as out_fh:
with err_path.open(mode='wt') as err_fh:
status = subprocess.run(cmdlist,
stdout=out_fh,
stderr=err_fh,
cwd=str(cwd),
env=environ)
write_status(status_path, status.returncode)
if post_process is not None:
post_process(out_path,
err_path,
cwd,
status,
*post_args)
return status.returncode
def datetime_to_isoformat(time):
if time is None:
return 'None'
else:
return datetime.isoformat(time)
def job_data_as_response(job, q):
"""Return a JSON dictionary of job parameters.
:param job: Job object.
:param q: Queue object.
:return: Response of JSON data.
"""
job_ids = q.get_job_ids()
if job.id in job_ids:
queue_position = job_ids.index(job.id)
else:
queue_position = len(job_ids)
queue_time = 0
# needs testing before shipping
# if queue_position > 1:
# for otherjob in job_ids[:queue_position-1]:
# queue_time += q.fetch_job(otherjob).estimated_time
job_dict = {'id': job.id,
'description': job.description,
'status': job.status,
'tasktype': job.tasktype,
'taskname': job.taskname,
'family': job.family,
'superfamily': job.superfamily,
# booleans
'is_queued': job.is_queued,
'is_started': job.is_started,
'is_finished': job.is_finished,
'is_failed': job.is_failed,
# times
'created_at': datetime_to_isoformat(job.created_at),
'enqueued_at': datetime_to_isoformat(job.enqueued_at),
'ended_at': datetime_to_isoformat(job.ended_at),
'started_at': datetime_to_isoformat(job.started_at),
'estimated_job_time': job.estimated_time,
# queue data
'queue_name': q.name,
'queue_position': queue_position,
'estimated_queue_time': queue_time
}
return Response(json.dumps(job_dict), mimetype=JSON_MIMETYPE)
def estimate_job_time(task):
"""Placeholder for alignment calculation time estimate.
:param task:
:return:
"""
if task == 'alignment':
return 10
elif task == 'tree':
return 60
def convert_stockholm_to_fasta(out_path,
err_path,
cwd,
status,
fasta):
"""Convert a Stockholm-format alignment file to FASTA.
:param cwd:
:param err_path:
:param out_path: Path to which stdout was sent.
:param status: Status object from subprocess.
:param fasta: Path to FASTA file to be created.
:return: Return code of subprocess.
"""
del err_path, cwd
if status.returncode == 0:
alignment = AlignIO.read(out_path.open(mode='rU'), 'stockholm')
AlignIO.write(alignment, fasta.open(mode='w'), 'fasta')
def cleanup_tree(raw_path,
err_path,
cwd,
status,
clean_path,
make_rooted,
root_name,
xml_path):
"""Ladderize output tree.
:param cwd:
:param err_path:
:param clean_path:
:param make_rooted:
:param root_name:
:param xml_path:
:param raw_path: Path to which raw tree was sent.
:param status: Status object from subprocess.
:return: Return code of subprocess.
"""
del err_path, cwd
if status.returncode == 0:
tree = Phylo.read(raw_path.open(mode='rU'), 'newick')
if make_rooted:
tree.root_at_midpoint()
tree.ladderize()
tree.root.name = root_name
Phylo.write(tree, clean_path.open(mode='w'), 'newick')
Phylo.write(tree, xml_path.open(mode='w'), 'phyloxml')
def set_job_description(tasktype, taskname, job, family, superfamily):
"""Set the job description.
:param tasktype:
:param taskname: Type of task (string).
:param taskname: Name of task (string).
:param job: rc job object.
:param family: Name of family.
:param superfamily: Name of superfamily.
:return:
"""
job.tasktype = tasktype
job.taskname = taskname
job.family = family
job.superfamily = superfamily
job.estimated_time = estimate_job_time(tasktype)
if superfamily is None:
job.description = '%s %s of family %s' % (job.taskname,
job.tasktype,
job.family)
else:
job.description = '%s %s of superfamily %s.%s' % (job.taskname,
job.tasktype,
job.family,
job.superfamily)
def queue_calculation(familyname,
calculation,
superfamily=None):
"""Submit alignment or tree-building jobs or both to queue.
:param superfamily:
:param familyname: Name of previously-created family.
:param calculation: Name of calculation to be done.
:option superfamily: Name of superfamily directory.
:return: JobID
"""
#
# Assignments to make PEP8 happy
#
alignment_tool = None
tree_builder = None
hmm_seq_type = None
seqfile = None
seq_type = None
alignment_input_path = None
aligner_command = None
alignment_status_path = None
tree_command = None
treebuilder_status_path = None
stockholm_path = None
alignment_log_path = None
raw_tree_path = None
tree_log_path = None
tree_dir = None
tree_path = None
phyloxml_path = None
alignment_output_path = None
#
# Get calculation type(s).
#
calculation_components = calculation.split('_')
if len(calculation_components) == 2: # combined calculation
if calculation_components[0] in list(app.config['ALIGNERS'].keys()):
alignment_tool = calculation_components[0]
else:
app.logger.error('Unrecognized aligner %s.',
calculation_components[0])
abort(404)
if calculation_components[1] in list(
app.config['TREEBUILDERS'].keys()):
tree_builder = calculation_components[1]
else:
app.logger.error('Unrecognized tree builder %s.',
calculation_components[1])
abort(404)
elif calculation in list(app.config['ALIGNERS'].keys()):
alignment_tool = calculation
tree_builder = None
elif calculation in list(app.config['TREEBUILDERS'].keys()):
alignment_tool = None
tree_builder = calculation
else:
app.logger.error('Unrecognized calculation type %s.', calculation)
abort(404)
#
# Get paths to things we might need for either calculation.
#
if not superfamily:
alignment_dir = Path(app.config['DATA']) / familyname
hmm_path = Path(HMM_FILENAME)
else:
if superfamily in ALL_FILENAMES:
app.logger.error('superfamily name is a reserved name, "%s".',
superfamily)
abort(403)
alignment_dir = Path(
app.config['DATA']) / familyname / superfamily
hmm_path = Path('..') / HMM_FILENAME
#
# Check for prerequisites and determine sequence types.
#
if not alignment_dir.is_dir():
app.logger.error('Directory was not previously created for %s.',
alignment_dir)
abort(428)
if alignment_tool is not None: # will do an alignment.
stockholm_path = alignment_dir / STOCKHOLM_NAME
alignment_status_path = alignment_dir / STATUS_NAME
alignment_log_path = alignment_dir / RUN_LOG_NAME
for key in SEQUENCE_EXTENSIONS.keys():
if (alignment_dir / (
SEQUENCES_NAME + SEQUENCE_EXTENSIONS[key])).exists():
seqfile = SEQUENCES_NAME + SEQUENCE_EXTENSIONS[key]
alignment_output_path = alignment_dir / (
ALIGNMENT_NAME + SEQUENCE_EXTENSIONS[key])
hmm_seq_type = HMM_SWITCHES[key]
# These are only used if building a tree.
alignment_input_path = Path('..') / (
ALIGNMENT_NAME + SEQUENCE_EXTENSIONS[key])
seq_type = key
break
else:
app.logger.error('Unable to find sequences to align.')
abort(404)
if tree_builder is not None: # will build a tree.
tree_dir = alignment_dir / tree_builder
treebuilder_status_path = tree_dir / STATUS_NAME
raw_tree_path = tree_dir / RAW_TREE_NAME
tree_path = tree_dir / TREE_NAME
phyloxml_path = tree_dir / PHYLOXML_NAME
tree_log_path = tree_dir / RUN_LOG_NAME
if not tree_dir.exists():
tree_dir.mkdir()
if alignment_tool is None: # build tree with alignment already done
for key in SEQUENCE_EXTENSIONS.keys():
if (alignment_dir / (
ALIGNMENT_NAME +
SEQUENCE_EXTENSIONS[key])).exists():
alignment_input_path = Path('..') / (
ALIGNMENT_NAME + SEQUENCE_EXTENSIONS[key])
seq_type = key
break
else: # pragma: no cover
app.logger.error('Unable to find aligned sequences.')
abort(404)
#
# Marshal command-line arguments.
#
if alignment_tool == 'hmmalign':
aligner_command = ['time', 'nice', app.config['HMMALIGN_EXE']] + \
app.config['ALIGNERS'][alignment_tool] + \
['--' + hmm_seq_type, str(hmm_path), str(seqfile)]
if tree_builder == 'FastTree':
tree_command = ['time', 'nice', app.config['FASTTREE_EXE']] \
+ app.config['TREEBUILDERS'][tree_builder][seq_type] \
+ [str(alignment_input_path)]
elif tree_builder == 'RAxML': # pragma: no cover
tree_command = ['time', 'nice', app.config['RAXML_EXE']] \
+ app.config['TREEBUILDERS'][tree_builder][seq_type] \
+ ['-n',
'production',
'-T',
'%d' % app.config['THREADS'],
'-s', str(alignment_input_path)]
#
# Log command line and initialize status files.
#
if alignment_tool is not None:
app.logger.debug('Alignment command line is %s.', aligner_command)
write_status(alignment_status_path, -1)
if tree_builder is not None:
app.logger.debug('Tree builder command line is %s.', tree_command)
write_status(treebuilder_status_path, -1)
#
# Queue processes.
#
align_queue = rq.get_queue(app.config['ALIGNMENT_QUEUE'])
tree_queue = rq.get_queue(app.config['TREE_QUEUE'])
if alignment_tool is not None and tree_builder is not None:
align_job = align_queue.enqueue(run_subprocess_with_status,
args=(stockholm_path,
alignment_log_path,
aligner_command,
alignment_dir,
alignment_status_path,
convert_stockholm_to_fasta,
(alignment_output_path,),
),
timeout=app.config[
'ALIGNMENT_QUEUE_TIMEOUT']
)
set_job_description('alignment', alignment_tool, align_job, familyname,
superfamily)
tree_job = tree_queue.enqueue(run_subprocess_with_status,
args=(raw_tree_path,
tree_log_path,
tree_command,
tree_dir,
treebuilder_status_path,
cleanup_tree,
(tree_path, True, familyname,
phyloxml_path)),
timeout=app.config['TREE_QUEUE_TIMEOUT'],
depends_on=align_job
)
set_job_description('tree', tree_builder, tree_job, familyname,
superfamily)
return job_data_as_response(tree_job, tree_queue)
elif alignment_tool is not None:
align_job = align_queue.enqueue(run_subprocess_with_status,
args=(stockholm_path,
alignment_log_path,
aligner_command,
alignment_dir,
alignment_status_path,
convert_stockholm_to_fasta,
(alignment_output_path,)
),
timeout=app.config[
'ALIGNMENT_QUEUE_TIMEOUT']
)
set_job_description('alignment', alignment_tool, align_job, familyname,
superfamily)
return job_data_as_response(align_job, align_queue)
elif tree_builder is not None:
tree_job = tree_queue.enqueue(run_subprocess_with_status,
args=(raw_tree_path,
tree_log_path,
tree_command,
tree_dir,
treebuilder_status_path,
cleanup_tree,
(tree_path, True, familyname,
phyloxml_path)),
timeout=app.config['TREE_QUEUE_TIMEOUT']
)
set_job_description('tree', tree_builder, tree_job, familyname,
superfamily)
return job_data_as_response(tree_job, tree_queue)
else:
abort(404)
@app.route('/trees/' + FAMILIES_NAME)
def return_families():
"""Return the list of gene familes.
:return: JSON list
"""
directory_list = sorted(os.listdir(path=app.config['DATA']))
return Response(json.dumps(directory_list), mimetype=JSON_MIMETYPE)
@app.route('/trees/<family>/alignment', methods=['POST', 'GET'])
def post_or_get_alignment(family):
"""POST or GET alignment.
:param family: Family name
:return: FASTA of alignment on GET
"""
test_path = None
if request.method == 'POST':
return create_fasta(family, ALIGNMENT_NAME)
elif request.method == 'GET':
alignment_path = Path(
app.config['DATA']) / family / ALIGNMENT_NAME
for ext in SEQUENCE_EXTENSIONS.keys():
test_path = alignment_path.with_suffix(SEQUENCE_EXTENSIONS[ext])
if test_path.exists():
break
else:
abort(404)
return Response(test_path.open().read(), mimetype=FASTA_MIMETYPE)
@app.route('/trees/<family>.<superfamily>/alignment', methods=['POST', 'GET'])
def post_or_get_alignment_superfamily(family, superfamily):
"""POST or GET alignment for a superfamily.
:param family: Existing family name
:param superfamily: Existing superfamily name
:return:
"""
test_path = None
if request.method == 'POST':
return create_fasta(family, ALIGNMENT_NAME, superfamily=superfamily)
elif request.method == 'GET':
alignment_path = Path(
app.config['DATA']) / family / superfamily / ALIGNMENT_NAME
for ext in SEQUENCE_EXTENSIONS.keys():
test_path = alignment_path.with_suffix(SEQUENCE_EXTENSIONS[ext])
if test_path.exists():
break
else:
abort(404)
return Response(test_path.open().read(), mimetype=FASTA_MIMETYPE)
@app.route('/trees/<family>/sequences', methods=['POST'])
def post_sequences(family):
"""POST a set of sequences that belong in a family.
:param family: New or existing family name.
:return:
"""
return create_fasta(family, SEQUENCES_NAME)
@app.route('/trees/<family>.<superfamily>', methods=['DELETE'])
def delete_superfamily(family, superfamily):
"""DELETE a superfamily.
:param family:
:param superfamily:
:return:
"""
if superfamily in ALL_FILENAMES:
abort(403)
path = Path(app.config['DATA']) / family / superfamily
if not path.exists():
abort(403)
shutil.rmtree(str(path))
return 'Deleted "%s.%s".' % (family, superfamily)
@app.route('/trees/<family>.<superfamily>/sequences', methods=['POST'])
def post_superfamily_sequences(family, superfamily):
"""POST a set of sequences for a superfamily.
:param family: Existing family name
:param superfamily: Name of superfamily to be created
:return:
"""
return create_fasta(family, SEQUENCES_NAME, superfamily=superfamily)
@app.route('/trees/<family>/HMM', methods=['PUT'])
def put_hmm(family):
"""PUT an hmm that belongs with the family.
:param family: name of existing family
:return:
"""
hmm_fh = None
hmmstats_output = None
hmm_path = Path(app.config['DATA']) / family / HMM_FILENAME
try:
hmm_fh = hmm_path.open('wb')
except IOError: # pragma: no cover
app.logger.error('Unable to create "%s".', str(hmm_path))
abort(400)
hmm_fh.write(request.data)
hmm_fh.close()
try: # get HMM stats with hmmstat
with open(os.devnull, 'w') as devnull:
hmmstats_output = subprocess.check_output(
['hmmstat', HMM_FILENAME],
universal_newlines=True,
stderr=devnull,
cwd=str(hmm_path.parent))
except subprocess.CalledProcessError:
app.logger.error('Not a valid HMM file for family %s, removing.',
family)
hmm_path.unlink()
abort(406)
hmmstats_dict = {}
for line in io.StringIO(hmmstats_output):
if line.startswith('#') or line.startswith('\n'):
continue
else:
fields = line.split()
try:
hmmstats_dict['idx'] = fields[0]
hmmstats_dict['name'] = fields[1]
hmmstats_dict['accession'] = fields[2]
hmmstats_dict['nseq'] = int(fields[3])
hmmstats_dict['eff_nseq'] = float(fields[4])
hmmstats_dict['M'] = int(fields[5])
hmmstats_dict['relent'] = float(fields[6])
hmmstats_dict['info'] = float(fields[7])
hmmstats_dict['relE'] = float(fields[8])
hmmstats_dict['compKL'] = float(fields[9])
except (TypeError, KeyError,
ValueError): # pragma: no cover
app.logger.error(
'hmmstats did not return expected stats, check version.')
abort(406)
with (Path(hmm_path.parent) / HMMSTATS_NAME).open(mode='w') as hmmstats_fh:
json.dump(hmmstats_dict, hmmstats_fh)
return Response(json.dumps(hmmstats_dict), mimetype=JSON_MIMETYPE)
def bind_calculation(method, superfamily=False):
"""A factory for uniquely-named functions with route decorators applied.
:param superfamily:
:param method: Name of resulting method.
:return: Route-decorated function.
"""
if not superfamily:
def _calculate(family):
return queue_calculation(family,
method)
_calculate.__name__ = 'calculate_' + method
_calculate = app.route('/trees/<family>/' + method)(_calculate)
return _calculate
else:
def _calculate(family, sup):
return queue_calculation(family,
method,
superfamily=sup)
_calculate.__name__ = 'calculate_' + method + '_superfamily'
_calculate = app.route('/trees/<family>.<sup>/' + method)(_calculate)
return _calculate
calculation_methods = []
for aligner in list(app.config['ALIGNERS'].keys()):
calculation_methods.append(bind_calculation(aligner))
calculation_methods.append(bind_calculation(aligner, superfamily=True))
for builder in list(app.config['TREEBUILDERS'].keys()):
calculation_methods.append(bind_calculation(aligner + '_' + builder))
calculation_methods.append(bind_calculation(aligner +
'_' + builder,
superfamily=True))
for builder in list(app.config['TREEBUILDERS'].keys()):
calculation_methods.append(bind_calculation(builder))
calculation_methods.append(bind_calculation(builder, superfamily=True))
@app.route('/trees/<familyname>/<method>/' + TREE_NAME)
def get_existing_tree(familyname, method):
if method not in app.config['TREEBUILDERS']:
abort(404)
inpath = Path(app.config['DATA']) / familyname / method / TREE_NAME
if not inpath.exists():
abort(404)
return Response(inpath.open().read(), mimetype=NEWICK_MIMETYPE)
@app.route('/trees/<family>.<sup>/<method>/' + TREE_NAME)
def get_existing_tree_super(family, method, sup):
return get_existing_tree(family + '/' + sup, method)
@app.route('/trees/<familyname>/<method>/' + PHYLOXML_NAME)
def get_phyloxml_tree(familyname, method):
if method not in app.config['TREEBUILDERS']:
abort(404)
inpath = Path(
app.config['DATA']) / familyname / method / PHYLOXML_NAME
if not inpath.exists():
abort(404)
return Response(inpath.open().read(), mimetype=NEWICK_MIMETYPE)
@app.route('/trees/<family>.<sup>/<method>/' + PHYLOXML_NAME)
def get_phyloxml_tree_super(family, method, sup):
return get_phyloxml_tree(family + '/' + sup, method)
@app.route('/trees/<familyname>/<method>/' + RUN_LOG_NAME)
def get_log(familyname, method):
inpath = None
if method in list(app.config['TREEBUILDERS'].keys()):
inpath = Path(
app.config['DATA']) / familyname / method / RUN_LOG_NAME
elif method in list(app.config['ALIGNERS'].keys()):
inpath = Path(app.config['DATA']) / familyname / RUN_LOG_NAME
else:
abort(428)
if not inpath.exists():
abort(404)
return Response(inpath.open().read(), mimetype=TEXT_MIMETYPE)
@app.route('/trees/<family>.<sup>/<method>/' + RUN_LOG_NAME)
def get_log_super(family, method, sup):
return get_log(family + '/' + sup, method)
@app.route('/trees/<familyname>/<method>/status')
def get_status(familyname, method):
inpath = None
if method in list(app.config['TREEBUILDERS'].keys()):
inpath = Path(
app.config['DATA']) / familyname / method / STATUS_NAME
elif method in list(app.config['ALIGNERS'].keys()):
inpath = Path(app.config['DATA']) / familyname / STATUS_NAME
else:
abort(428)
if not inpath.exists():
abort(404)
status = inpath.open().read()
return status
@app.route('/trees/<family>.<sup>/<method>/status')
def get_status_super(family, method, sup):
return get_status(family + '/' + sup, method)
|
py | b4005df7c22c65a6afcd611be0d77519f9bdf766 | from core.himesis import Himesis, HimesisPreConditionPatternLHS
import uuid
class HGlobalVarGetsCorrectFunctionAddress_CompleteLHS(HimesisPreConditionPatternLHS):
def __init__(self):
"""
Creates the himesis graph representing the AToM3 model HGlobalVarGetsCorrectFunctionAddress_CompleteLHS
"""
# Flag this instance as compiled now
self.is_compiled = True
super(HGlobalVarGetsCorrectFunctionAddress_CompleteLHS, self).__init__(name='HGlobalVarGetsCorrectFunctionAddress_CompleteLHS', num_nodes=0, edges=[])
# Add the edges
self.add_edges([])
# Set the graph attributes
self["mm__"] = ['MT_pre__FamiliesToPersonsMM', 'MoTifRule']
self["MT_constraint__"] = """return True"""
self["name"] = """"""
self["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'HGlobalVarGetsCorrectFunctionAddress_CompleteLHS')
self["equations"] = []
# Set the node attributes
# match class ComponentInstance(0.2.m.0ComponentInstance) node
self.add_node()
self.vs[0]["MT_pre__attr1"] = """return True"""
self.vs[0]["MT_label__"] = """1"""
self.vs[0]["mm__"] = """MT_pre__ComponentInstance"""
self.vs[0]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.0ComponentInstance')
# match class Operation(0.2.m.1Operation) node
self.add_node()
self.vs[1]["MT_pre__attr1"] = """return True"""
self.vs[1]["MT_label__"] = """2"""
self.vs[1]["mm__"] = """MT_pre__Operation"""
self.vs[1]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.1Operation')
# match class OperationTrigger(0.2.m.2OperationTrigger) node
self.add_node()
self.vs[2]["MT_pre__attr1"] = """return True"""
self.vs[2]["MT_label__"] = """3"""
self.vs[2]["mm__"] = """MT_pre__OperationTrigger"""
self.vs[2]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.2OperationTrigger')
# match class Executable(0.2.m.3Executable) node
self.add_node()
self.vs[3]["MT_pre__attr1"] = """return True"""
self.vs[3]["MT_label__"] = """4"""
self.vs[3]["mm__"] = """MT_pre__Executable"""
self.vs[3]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.3Executable')
# match class ProvidedPort(0.2.m.4ProvidedPort) node
self.add_node()
self.vs[4]["MT_pre__attr1"] = """return True"""
self.vs[4]["MT_label__"] = """5"""
self.vs[4]["mm__"] = """MT_pre__ProvidedPort"""
self.vs[4]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.4ProvidedPort')
# match class InstanceConfiguration(0.2.m.5InstanceConfiguration) node
self.add_node()
self.vs[5]["MT_pre__attr1"] = """return True"""
self.vs[5]["MT_label__"] = """6"""
self.vs[5]["mm__"] = """MT_pre__InstanceConfiguration"""
self.vs[5]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.5InstanceConfiguration')
# match class ClientServerInterface(0.2.m.6ClientServerInterface) node
self.add_node()
self.vs[6]["MT_pre__attr1"] = """return True"""
self.vs[6]["MT_label__"] = """7"""
self.vs[6]["mm__"] = """MT_pre__ClientServerInterface"""
self.vs[6]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.6ClientServerInterface')
# match class AtomicComponent(0.2.m.7AtomicComponent) node
self.add_node()
self.vs[7]["MT_pre__attr1"] = """return True"""
self.vs[7]["MT_label__"] = """8"""
self.vs[7]["mm__"] = """MT_pre__AtomicComponent"""
self.vs[7]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.7AtomicComponent')
# apply class StatementList(0.2.a.0StatementList) node
self.add_node()
self.vs[8]["MT_pre__attr1"] = """return True"""
self.vs[8]["MT_label__"] = """9"""
self.vs[8]["mm__"] = """MT_pre__StatementList"""
self.vs[8]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.0StatementList')
# apply class FunctionCall(0.2.a.1FunctionCall) node
self.add_node()
self.vs[9]["MT_pre__attr1"] = """return True"""
self.vs[9]["MT_label__"] = """10"""
self.vs[9]["mm__"] = """MT_pre__FunctionCall"""
self.vs[9]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.1FunctionCall')
# apply class GenericMemberRef(0.2.a.2GenericMemberRef) node
self.add_node()
self.vs[10]["MT_pre__attr1"] = """return True"""
self.vs[10]["MT_label__"] = """11"""
self.vs[10]["mm__"] = """MT_pre__GenericMemberRef"""
self.vs[10]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.2GenericMemberRef')
# apply class AssignmentExpr(0.2.a.3AssignmentExpr) node
self.add_node()
self.vs[11]["MT_pre__attr1"] = """return True"""
self.vs[11]["MT_label__"] = """12"""
self.vs[11]["mm__"] = """MT_pre__AssignmentExpr"""
self.vs[11]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.3AssignmentExpr')
# apply class ExpressionStatement(0.2.a.4ExpressionStatement) node
self.add_node()
self.vs[12]["MT_pre__attr1"] = """return True"""
self.vs[12]["MT_label__"] = """13"""
self.vs[12]["mm__"] = """MT_pre__ExpressionStatement"""
self.vs[12]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.4ExpressionStatement')
# apply class StatementList(0.2.a.5StatementList) node
self.add_node()
self.vs[13]["MT_pre__attr1"] = """return True"""
self.vs[13]["MT_label__"] = """14"""
self.vs[13]["mm__"] = """MT_pre__StatementList"""
self.vs[13]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.5StatementList')
# apply class FunctionPrototype(0.2.a.6FunctionPrototype) node
self.add_node()
self.vs[14]["MT_pre__attr1"] = """return True"""
self.vs[14]["MT_label__"] = """15"""
self.vs[14]["mm__"] = """MT_pre__FunctionPrototype"""
self.vs[14]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.6FunctionPrototype')
# apply class ExpressionStatement(0.2.a.7ExpressionStatement) node
self.add_node()
self.vs[15]["MT_pre__attr1"] = """return True"""
self.vs[15]["MT_label__"] = """16"""
self.vs[15]["mm__"] = """MT_pre__ExpressionStatement"""
self.vs[15]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.7ExpressionStatement')
# apply class Function(0.2.a.8Function) node
self.add_node()
self.vs[16]["MT_pre__attr1"] = """return True"""
self.vs[16]["MT_label__"] = """17"""
self.vs[16]["mm__"] = """MT_pre__Function"""
self.vs[16]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.8Function')
# apply class FunctionRefExpr(0.2.a.9FunctionRefExpr) node
self.add_node()
self.vs[17]["MT_pre__attr1"] = """return True"""
self.vs[17]["MT_label__"] = """18"""
self.vs[17]["mm__"] = """MT_pre__FunctionRefExpr"""
self.vs[17]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.9FunctionRefExpr')
# apply class GlobalVarRef(0.2.a.10GlobalVarRef) node
self.add_node()
self.vs[18]["MT_pre__attr1"] = """return True"""
self.vs[18]["MT_label__"] = """19"""
self.vs[18]["mm__"] = """MT_pre__GlobalVarRef"""
self.vs[18]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.10GlobalVarRef')
# apply class ReferenceExpr(0.2.a.11ReferenceExpression) node
self.add_node()
self.vs[19]["MT_pre__attr1"] = """return True"""
self.vs[19]["MT_label__"] = """20"""
self.vs[19]["mm__"] = """MT_pre__ReferenceExpr"""
self.vs[19]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.11ReferenceExpression')
# apply class GenericDotExpression(0.2.a.12GenericDotExpression) node
self.add_node()
self.vs[20]["MT_pre__attr1"] = """return True"""
self.vs[20]["MT_label__"] = """21"""
self.vs[20]["mm__"] = """MT_pre__GenericDotExpression"""
self.vs[20]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.12GenericDotExpression')
# apply class Function(0.2.a.13Function) node
self.add_node()
self.vs[21]["MT_pre__attr1"] = """return True"""
self.vs[21]["MT_label__"] = """22"""
self.vs[21]["mm__"] = """MT_pre__Function"""
self.vs[21]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.13Function')
# apply class FunctionPrototype(0.2.a.14FunctionPrototype) node
self.add_node()
self.vs[22]["MT_pre__attr1"] = """return True"""
self.vs[22]["MT_label__"] = """23"""
self.vs[22]["mm__"] = """MT_pre__FunctionPrototype"""
self.vs[22]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.14FunctionPrototype')
# apply class GlobalVariableDeclaration(0.2.a.15GlobalVariableDeclaration) node
self.add_node()
self.vs[23]["MT_pre__attr1"] = """return True"""
self.vs[23]["MT_label__"] = """24"""
self.vs[23]["mm__"] = """MT_pre__GlobalVariableDeclaration"""
self.vs[23]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.15GlobalVariableDeclaration')
# apply class StructDeclaration(0.2.a.16StructDeclaration) node
self.add_node()
self.vs[24]["MT_pre__attr1"] = """return True"""
self.vs[24]["MT_label__"] = """25"""
self.vs[24]["mm__"] = """MT_pre__StructDeclaration"""
self.vs[24]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.16StructDeclaration')
# apply class CFunctionPointerStructMember(0.2.a.17CFunctionPointerStructMember) node
self.add_node()
self.vs[25]["MT_pre__attr1"] = """return True"""
self.vs[25]["MT_label__"] = """26"""
self.vs[25]["mm__"] = """MT_pre__CFunctionPointerStructMember"""
self.vs[25]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.17CFunctionPointerStructMember')
# match association InstanceConfiguration--contents-->ComponentInstancenode
self.add_node()
self.vs[26]["MT_pre__attr1"] = """return attr_value == "contents" """
self.vs[26]["MT_label__"] = """27"""
self.vs[26]["mm__"] = """MT_pre__directLink_S"""
self.vs[26]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.5InstanceConfigurationassoc260.2.m.0ComponentInstance')
# match association AtomicComponent--contents-->ProvidedPortnode
self.add_node()
self.vs[27]["MT_pre__attr1"] = """return attr_value == "contents" """
self.vs[27]["MT_label__"] = """28"""
self.vs[27]["mm__"] = """MT_pre__directLink_S"""
self.vs[27]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.7AtomicComponentassoc270.2.m.4ProvidedPort')
# match association ComponentInstance--component-->AtomicComponentnode
self.add_node()
self.vs[28]["MT_pre__attr1"] = """return attr_value == "component" """
self.vs[28]["MT_label__"] = """29"""
self.vs[28]["mm__"] = """MT_pre__directLink_S"""
self.vs[28]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.0ComponentInstanceassoc280.2.m.7AtomicComponent')
# match association AtomicComponent--contents-->Executablenode
self.add_node()
self.vs[29]["MT_pre__attr1"] = """return attr_value == "contents" """
self.vs[29]["MT_label__"] = """30"""
self.vs[29]["mm__"] = """MT_pre__directLink_S"""
self.vs[29]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.7AtomicComponentassoc290.2.m.3Executable')
# match association Executable--trigger-->OperationTriggernode
self.add_node()
self.vs[30]["MT_pre__attr1"] = """return attr_value == "trigger" """
self.vs[30]["MT_label__"] = """31"""
self.vs[30]["mm__"] = """MT_pre__directLink_S"""
self.vs[30]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.3Executableassoc300.2.m.2OperationTrigger')
# match association OperationTrigger--calledOperation-->Operationnode
self.add_node()
self.vs[31]["MT_pre__attr1"] = """return attr_value == "calledOperation" """
self.vs[31]["MT_label__"] = """32"""
self.vs[31]["mm__"] = """MT_pre__directLink_S"""
self.vs[31]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.2OperationTriggerassoc310.2.m.1Operation')
# match association ProvidedPort--intf-->ClientServerInterfacenode
self.add_node()
self.vs[32]["MT_pre__attr1"] = """return attr_value == "intf" """
self.vs[32]["MT_label__"] = """33"""
self.vs[32]["mm__"] = """MT_pre__directLink_S"""
self.vs[32]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.4ProvidedPortassoc320.2.m.6ClientServerInterface')
# match association ClientServerInterface--contents-->Operationnode
self.add_node()
self.vs[33]["MT_pre__attr1"] = """return attr_value == "contents" """
self.vs[33]["MT_label__"] = """34"""
self.vs[33]["mm__"] = """MT_pre__directLink_S"""
self.vs[33]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.6ClientServerInterfaceassoc330.2.m.1Operation')
# match association OperationTrigger--providedPort-->ProvidedPortnode
self.add_node()
self.vs[34]["MT_pre__attr1"] = """return attr_value == "providedPort" """
self.vs[34]["MT_label__"] = """35"""
self.vs[34]["mm__"] = """MT_pre__directLink_S"""
self.vs[34]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.m.2OperationTriggerassoc340.2.m.4ProvidedPort')
# apply association FunctionCall--function-->FunctionPrototypenode
self.add_node()
self.vs[35]["MT_pre__attr1"] = """return attr_value == "function" """
self.vs[35]["MT_label__"] = """36"""
self.vs[35]["mm__"] = """MT_pre__directLink_T"""
self.vs[35]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.1FunctionCallassoc350.2.a.6FunctionPrototype')
# apply association Function--body-->StatementListnode
self.add_node()
self.vs[36]["MT_pre__attr1"] = """return attr_value == "body" """
self.vs[36]["MT_label__"] = """37"""
self.vs[36]["mm__"] = """MT_pre__directLink_T"""
self.vs[36]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.8Functionassoc360.2.a.5StatementList')
# apply association StatementList--statements-->ExpressionStatementnode
self.add_node()
self.vs[37]["MT_pre__attr1"] = """return attr_value == "statements" """
self.vs[37]["MT_label__"] = """38"""
self.vs[37]["mm__"] = """MT_pre__directLink_T"""
self.vs[37]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.5StatementListassoc370.2.a.7ExpressionStatement')
# apply association ExpressionStatement--expr-->FunctionCallnode
self.add_node()
self.vs[38]["MT_pre__attr1"] = """return attr_value == "expr" """
self.vs[38]["MT_label__"] = """39"""
self.vs[38]["mm__"] = """MT_pre__directLink_T"""
self.vs[38]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.7ExpressionStatementassoc380.2.a.1FunctionCall')
# apply association Function--body-->StatementListnode
self.add_node()
self.vs[39]["MT_pre__attr1"] = """return attr_value == "body" """
self.vs[39]["MT_label__"] = """40"""
self.vs[39]["mm__"] = """MT_pre__directLink_T"""
self.vs[39]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.13Functionassoc390.2.a.0StatementList')
# apply association StatementList--statements-->ExpressionStatementnode
self.add_node()
self.vs[40]["MT_pre__attr1"] = """return attr_value == "statements" """
self.vs[40]["MT_label__"] = """41"""
self.vs[40]["mm__"] = """MT_pre__directLink_T"""
self.vs[40]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.0StatementListassoc400.2.a.4ExpressionStatement')
# apply association ExpressionStatement--expr-->AssignmentExprnode
self.add_node()
self.vs[41]["MT_pre__attr1"] = """return attr_value == "expr" """
self.vs[41]["MT_label__"] = """42"""
self.vs[41]["mm__"] = """MT_pre__directLink_T"""
self.vs[41]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.4ExpressionStatementassoc410.2.a.3AssignmentExpr')
# apply association AssignmentExpr--left-->GenericDotExpressionnode
self.add_node()
self.vs[42]["MT_pre__attr1"] = """return attr_value == "left" """
self.vs[42]["MT_label__"] = """43"""
self.vs[42]["mm__"] = """MT_pre__directLink_T"""
self.vs[42]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.3AssignmentExprassoc420.2.a.12GenericDotExpression')
# apply association AssignmentExpr--right-->ReferenceExpressionnode
self.add_node()
self.vs[43]["MT_pre__attr1"] = """return attr_value == "right" """
self.vs[43]["MT_label__"] = """44"""
self.vs[43]["mm__"] = """MT_pre__directLink_T"""
self.vs[43]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.3AssignmentExprassoc430.2.a.11ReferenceExpression')
# apply association GenericDotExpression--expression-->GlobalVarRefnode
self.add_node()
self.vs[44]["MT_pre__attr1"] = """return attr_value == "expression" """
self.vs[44]["MT_label__"] = """45"""
self.vs[44]["mm__"] = """MT_pre__directLink_T"""
self.vs[44]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.12GenericDotExpressionassoc440.2.a.10GlobalVarRef')
# apply association GenericDotExpression--target-->GenericMemberRefnode
self.add_node()
self.vs[45]["MT_pre__attr1"] = """return attr_value == "target" """
self.vs[45]["MT_label__"] = """46"""
self.vs[45]["mm__"] = """MT_pre__directLink_T"""
self.vs[45]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.12GenericDotExpressionassoc450.2.a.2GenericMemberRef')
# apply association ReferenceExpression--expression-->FunctionRefExprnode
self.add_node()
self.vs[46]["MT_pre__attr1"] = """return attr_value == "expression" """
self.vs[46]["MT_label__"] = """47"""
self.vs[46]["mm__"] = """MT_pre__directLink_T"""
self.vs[46]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.11ReferenceExpressionassoc460.2.a.9FunctionRefExpr')
# apply association FunctionRefExpr--function-->FunctionPrototypenode
self.add_node()
self.vs[47]["MT_pre__attr1"] = """return attr_value == "function" """
self.vs[47]["MT_label__"] = """48"""
self.vs[47]["mm__"] = """MT_pre__directLink_T"""
self.vs[47]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.9FunctionRefExprassoc470.2.a.14FunctionPrototype')
# apply association StructDeclaration--members-->CFunctionPointerStructMembernode
self.add_node()
self.vs[48]["MT_pre__attr1"] = """return attr_value == "members" """
self.vs[48]["MT_label__"] = """49"""
self.vs[48]["mm__"] = """MT_pre__directLink_T"""
self.vs[48]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.16StructDeclarationassoc480.2.a.17CFunctionPointerStructMember')
# apply association GenericMemberRef--member-->CFunctionPointerStructMembernode
self.add_node()
self.vs[49]["MT_pre__attr1"] = """return attr_value == "member" """
self.vs[49]["MT_label__"] = """50"""
self.vs[49]["mm__"] = """MT_pre__directLink_T"""
self.vs[49]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.2GenericMemberRefassoc490.2.a.17CFunctionPointerStructMember')
# apply association GlobalVarRef--var-->GlobalVariableDeclarationnode
self.add_node()
self.vs[50]["MT_pre__attr1"] = """return attr_value == "var" """
self.vs[50]["MT_label__"] = """51"""
self.vs[50]["mm__"] = """MT_pre__directLink_T"""
self.vs[50]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.10GlobalVarRefassoc500.2.a.15GlobalVariableDeclaration')
# trace association Function--trace-->InstanceConfigurationnode
self.add_node()
self.vs[51]["MT_label__"] = """52"""
self.vs[51]["mm__"] = """MT_pre__trace_link"""
self.vs[51]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.8Functionassoc510.2.m.5InstanceConfiguration')
# trace association FunctionPrototype--trace-->ComponentInstancenode
self.add_node()
self.vs[52]["MT_label__"] = """53"""
self.vs[52]["mm__"] = """MT_pre__trace_link"""
self.vs[52]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.6FunctionPrototypeassoc520.2.m.0ComponentInstance')
# trace association Function--trace-->ComponentInstancenode
self.add_node()
self.vs[53]["MT_label__"] = """54"""
self.vs[53]["mm__"] = """MT_pre__trace_link"""
self.vs[53]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.13Functionassoc530.2.m.0ComponentInstance')
# trace association FunctionPrototype--trace-->Executablenode
self.add_node()
self.vs[54]["MT_label__"] = """55"""
self.vs[54]["mm__"] = """MT_pre__trace_link"""
self.vs[54]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.14FunctionPrototypeassoc540.2.m.3Executable')
# trace association GlobalVariableDeclaration--trace-->ComponentInstancenode
self.add_node()
self.vs[55]["MT_label__"] = """56"""
self.vs[55]["mm__"] = """MT_pre__trace_link"""
self.vs[55]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.15GlobalVariableDeclarationassoc550.2.m.0ComponentInstance')
# trace association StructDeclaration--trace-->ClientServerInterfacenode
self.add_node()
self.vs[56]["MT_label__"] = """57"""
self.vs[56]["mm__"] = """MT_pre__trace_link"""
self.vs[56]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.16StructDeclarationassoc560.2.m.6ClientServerInterface')
# trace association CFunctionPointerStructMember--trace-->Operationnode
self.add_node()
self.vs[57]["MT_label__"] = """58"""
self.vs[57]["mm__"] = """MT_pre__trace_link"""
self.vs[57]["GUID__"] = uuid.uuid3(uuid.NAMESPACE_DNS,'0.2.a.17CFunctionPointerStructMemberassoc570.2.m.1Operation')
self['equations'].append(((14,'name'),('concat',(('wildcard'),('constant','__wire')))))
self['equations'].append(((16,'name'),('concat',(('wildcard'),('constant','__init')))))
self['equations'].append(((21,'name'),('concat',(('wildcard'),('constant','__wire')))))
self['equations'].append(((22,'name'),('concat',(('wildcard'),(3,'name')))))
self['equations'].append(((23,'name'),('concat',(('wildcard'),('constant','__ops')))))
self['equations'].append(((24,'name'),('concat',(('wildcard'),('constant','__idata')))))
self['equations'].append(((25,'name'),(1,'name')))
# Add the edges
self.add_edges([
(5,26), # match class InstanceConfiguration(0.2.m.5InstanceConfiguration) -> association contents
(26,0), # association ComponentInstance -> match class ComponentInstance(0.2.m.0ComponentInstance)
(7,27), # match class AtomicComponent(0.2.m.7AtomicComponent) -> association contents
(27,4), # association ProvidedPort -> match class ProvidedPort(0.2.m.4ProvidedPort)
(0,28), # match class ComponentInstance(0.2.m.0ComponentInstance) -> association component
(28,7), # association AtomicComponent -> match class AtomicComponent(0.2.m.7AtomicComponent)
(7,29), # match class AtomicComponent(0.2.m.7AtomicComponent) -> association contents
(29,3), # association Executable -> match class Executable(0.2.m.3Executable)
(3,30), # match class Executable(0.2.m.3Executable) -> association trigger
(30,2), # association OperationTrigger -> match class OperationTrigger(0.2.m.2OperationTrigger)
(2,31), # match class OperationTrigger(0.2.m.2OperationTrigger) -> association calledOperation
(31,1), # association Operation -> match class Operation(0.2.m.1Operation)
(4,32), # match class ProvidedPort(0.2.m.4ProvidedPort) -> association intf
(32,6), # association ClientServerInterface -> match class ClientServerInterface(0.2.m.6ClientServerInterface)
(6,33), # match class ClientServerInterface(0.2.m.6ClientServerInterface) -> association contents
(33,1), # association Operation -> match class Operation(0.2.m.1Operation)
(2,34), # match class OperationTrigger(0.2.m.2OperationTrigger) -> association providedPort
(34,4), # association ProvidedPort -> match class ProvidedPort(0.2.m.4ProvidedPort)
(9,35), # apply class FunctionCall(0.2.a.1FunctionCall) -> association function
(35,14), # association FunctionPrototype -> apply class FunctionPrototype(0.2.a.6FunctionPrototype)
(16,36), # apply class Function(0.2.a.8Function) -> association body
(36,13), # association StatementList -> apply class StatementList(0.2.a.5StatementList)
(13,37), # apply class StatementList(0.2.a.5StatementList) -> association statements
(37,15), # association ExpressionStatement -> apply class ExpressionStatement(0.2.a.7ExpressionStatement)
(15,38), # apply class ExpressionStatement(0.2.a.7ExpressionStatement) -> association expr
(38,9), # association FunctionCall -> apply class FunctionCall(0.2.a.1FunctionCall)
(21,39), # apply class Function(0.2.a.13Function) -> association body
(39,8), # association StatementList -> apply class StatementList(0.2.a.0StatementList)
(8,40), # apply class StatementList(0.2.a.0StatementList) -> association statements
(40,12), # association ExpressionStatement -> apply class ExpressionStatement(0.2.a.4ExpressionStatement)
(12,41), # apply class ExpressionStatement(0.2.a.4ExpressionStatement) -> association expr
(41,11), # association AssignmentExpr -> apply class AssignmentExpr(0.2.a.3AssignmentExpr)
(11,42), # apply class AssignmentExpr(0.2.a.3AssignmentExpr) -> association left
(42,20), # association GenericDotExpression -> apply class GenericDotExpression(0.2.a.12GenericDotExpression)
(11,43), # apply class AssignmentExpr(0.2.a.3AssignmentExpr) -> association right
(43,19), # association ReferenceExpression -> apply class ReferenceExpression(0.2.a.11ReferenceExpression)
(20,44), # apply class GenericDotExpression(0.2.a.12GenericDotExpression) -> association expression
(44,18), # association GlobalVarRef -> apply class GlobalVarRef(0.2.a.10GlobalVarRef)
(20,45), # apply class GenericDotExpression(0.2.a.12GenericDotExpression) -> association target
(45,10), # association GenericMemberRef -> apply class GenericMemberRef(0.2.a.2GenericMemberRef)
(19,46), # apply class ReferenceExpression(0.2.a.11ReferenceExpression) -> association expression
(46,17), # association FunctionRefExpr -> apply class FunctionRefExpr(0.2.a.9FunctionRefExpr)
(17,47), # apply class FunctionRefExpr(0.2.a.9FunctionRefExpr) -> association function
(47,22), # association FunctionPrototype -> apply class FunctionPrototype(0.2.a.14FunctionPrototype)
(24,48), # apply class StructDeclaration(0.2.a.16StructDeclaration) -> association members
(48,25), # association CFunctionPointerStructMember -> apply class CFunctionPointerStructMember(0.2.a.17CFunctionPointerStructMember)
(10,49), # apply class GenericMemberRef(0.2.a.2GenericMemberRef) -> association member
(49,25), # association CFunctionPointerStructMember -> apply class CFunctionPointerStructMember(0.2.a.17CFunctionPointerStructMember)
(18,50), # apply class GlobalVarRef(0.2.a.10GlobalVarRef) -> association var
(50,23), # association GlobalVariableDeclaration -> apply class GlobalVariableDeclaration(0.2.a.15GlobalVariableDeclaration)
(16,51), # apply class Function(0.2.m.5InstanceConfiguration) -> backward_association
(51,5), # backward_associationInstanceConfiguration -> match_class InstanceConfiguration(0.2.m.5InstanceConfiguration)
(14,52), # apply class FunctionPrototype(0.2.m.0ComponentInstance) -> backward_association
(52,0), # backward_associationComponentInstance -> match_class ComponentInstance(0.2.m.0ComponentInstance)
(21,53), # apply class Function(0.2.m.0ComponentInstance) -> backward_association
(53,0), # backward_associationComponentInstance -> match_class ComponentInstance(0.2.m.0ComponentInstance)
(22,54), # apply class FunctionPrototype(0.2.m.3Executable) -> backward_association
(54,3), # backward_associationExecutable -> match_class Executable(0.2.m.3Executable)
(23,55), # apply class GlobalVariableDeclaration(0.2.m.0ComponentInstance) -> backward_association
(55,0), # backward_associationComponentInstance -> match_class ComponentInstance(0.2.m.0ComponentInstance)
(24,56), # apply class StructDeclaration(0.2.m.6ClientServerInterface) -> backward_association
(56,6), # backward_associationClientServerInterface -> match_class ClientServerInterface(0.2.m.6ClientServerInterface)
(25,57), # apply class CFunctionPointerStructMember(0.2.m.1Operation) -> backward_association
(57,1), # backward_associationOperation -> match_class Operation(0.2.m.1Operation)
])
# define evaluation methods for each match class.
def eval_attr11(self, attr_value, this):
return True
def eval_attr12(self, attr_value, this):
return True
def eval_attr13(self, attr_value, this):
return True
def eval_attr14(self, attr_value, this):
return True
def eval_attr15(self, attr_value, this):
return True
def eval_attr16(self, attr_value, this):
return True
def eval_attr17(self, attr_value, this):
return True
def eval_attr18(self, attr_value, this):
return True
# define evaluation methods for each apply class.
def eval_attr19(self, attr_value, this):
return True
def eval_attr110(self, attr_value, this):
return True
def eval_attr111(self, attr_value, this):
return True
def eval_attr112(self, attr_value, this):
return True
def eval_attr113(self, attr_value, this):
return True
def eval_attr114(self, attr_value, this):
return True
def eval_attr115(self, attr_value, this):
return True
def eval_attr116(self, attr_value, this):
return True
def eval_attr117(self, attr_value, this):
return True
def eval_attr118(self, attr_value, this):
return True
def eval_attr119(self, attr_value, this):
return True
def eval_attr120(self, attr_value, this):
return True
def eval_attr121(self, attr_value, this):
return True
def eval_attr122(self, attr_value, this):
return True
def eval_attr123(self, attr_value, this):
return True
def eval_attr124(self, attr_value, this):
return True
def eval_attr125(self, attr_value, this):
return True
def eval_attr126(self, attr_value, this):
return True
# define evaluation methods for each match association.
def eval_attr127(self, attr_value, this):
return attr_value == "contents"
def eval_attr128(self, attr_value, this):
return attr_value == "contents"
def eval_attr129(self, attr_value, this):
return attr_value == "component"
def eval_attr130(self, attr_value, this):
return attr_value == "contents"
def eval_attr131(self, attr_value, this):
return attr_value == "trigger"
def eval_attr132(self, attr_value, this):
return attr_value == "calledOperation"
def eval_attr133(self, attr_value, this):
return attr_value == "intf"
def eval_attr134(self, attr_value, this):
return attr_value == "contents"
def eval_attr135(self, attr_value, this):
return attr_value == "providedPort"
# define evaluation methods for each apply association.
def eval_attr136(self, attr_value, this):
return attr_value == "function"
def eval_attr137(self, attr_value, this):
return attr_value == "body"
def eval_attr138(self, attr_value, this):
return attr_value == "statements"
def eval_attr139(self, attr_value, this):
return attr_value == "expr"
def eval_attr140(self, attr_value, this):
return attr_value == "body"
def eval_attr141(self, attr_value, this):
return attr_value == "statements"
def eval_attr142(self, attr_value, this):
return attr_value == "expr"
def eval_attr143(self, attr_value, this):
return attr_value == "left"
def eval_attr144(self, attr_value, this):
return attr_value == "right"
def eval_attr145(self, attr_value, this):
return attr_value == "expression"
def eval_attr146(self, attr_value, this):
return attr_value == "target"
def eval_attr147(self, attr_value, this):
return attr_value == "expression"
def eval_attr148(self, attr_value, this):
return attr_value == "function"
def eval_attr149(self, attr_value, this):
return attr_value == "members"
def eval_attr150(self, attr_value, this):
return attr_value == "member"
def eval_attr151(self, attr_value, this):
return attr_value == "var"
def constraint(self, PreNode, graph):
return True
|
py | b4005e4dd93dd7bda5578885f88ec155db13bbf3 | # -*- coding: utf-8 -*-
#!/usr/bin/python
from sqlalchemy import (
create_engine,
MetaData,
Table,
Column,
Integer,
)
from sqlalchemy.pool import NullPool
# XXX: It is advised to use another user that can connect to a default database,
# and has CREATE DATABASE permissions, rather than use a superuser.
DB_CONFIG_DICT = {
'user': 'root',
'password': 'F0rxlt',
'host': 'localhost',
'port': 3306,
}
DB_CONN_FORMAT = "mysql+pymysql://{user}:{password}@{host}:{port}/{database}"
DB_CONN_URI_DEFAULT = (DB_CONN_FORMAT.format(
database='mysql',
**DB_CONFIG_DICT))
engine_default = create_engine(DB_CONN_URI_DEFAULT)
NEW_DB_NAME = 'stock'
DB_CONN_URI_NEW = (DB_CONN_FORMAT.format(
database=NEW_DB_NAME,
**DB_CONFIG_DICT))
metadata = MetaData()
proj = Table('test', metadata, Column('id', Integer))
def setup_module():
conn = engine_default.connect()
conn.execute("COMMIT")
# Do not substitute user-supplied database names here.
conn.execute("CREATE DATABASE %s" % NEW_DB_NAME)
conn.close()
def test_create_table():
# Get a new engine for the just-created database and create a table.
engine_new = create_engine(DB_CONN_URI_NEW, poolclass=NullPool)
conn = engine_new.connect()
metadata.create_all(conn)
conn.close()
def teardown_module():
conn = engine_default.connect()
conn.execute("COMMIT")
# Do not substitute user-supplied database names here.
conn.execute("DROP DATABASE %s" % NEW_DB_NAME)
conn.close()
if __name__ == '__main__':
setup_module()
test_create_table()
# teardown_module()
|
py | b4005f97e72c113695ae2e2cd6642df3fe0e3914 | def compare(f1,f2,file,i):
for x in f1:
if x not in f2:
if file not in not_equal:
not_equal.append(file)
print(x,"not present in ",file,i)
elif isinstance(f1[x],dict) and isinstance(f2[x],dict):
compare(f1[x],f2[x],file,i)
elif f1[x]!=f2[x]:
print("value is not same for key:",x ,"in file ",file,i)
print("dev:",f1[x],"jsonmapper_sumter:",f2[x])
if file not in not_equal:
not_equal.append(file)
|
py | b40060b7e7e101e75ebce9923f14b32ff899342a | import Queue
import threading
import multiprocessing
from amuse.lab import *
###BOOKLISTSTART1###
code_queue = Queue.Queue()
def remote_worker_code():
code = code_queue.get()
evolve_single_star(code)
code_queue.task_done()
def evolve_with_different_stellar_model(codes):
for ci in codes:
code_queue.put(ci)
n_cpu = multiprocessing.cpu_count()
for i in range(n_cpu):
th = threading.Thread(target=remote_worker_code)
th.daemon = True
th.start()
code_queue.join() # block until all tasks are done
###BOOKLISTSTOP1###
###BOOKLISTSTART2###
def evolve_single_star(code):
stars = Particles(mass=10|units.MSun)
stellar = code()
stellar.particles.add_particles(stars)
channel = stellar.particles.new_channel_to(stars)
stellar.evolve_model(1|units.Myr)
channel.copy()
print "Star evolved to time=", stellar.model_time, \
" M=", stars.mass, "R=", stars.radius
stellar.stop()
###BOOKLISTSTOP2###
def new_option_parser():
from amuse.units.optparse import OptionParser
result = OptionParser()
result.add_option("-t", action="store_true",
dest="threaded", help="run threaded [%default]")
return result
if __name__ in ('__main__', '__plot__'):
o, arguments = new_option_parser().parse_args()
set_printing_strategy("custom",\
preferred_units = [units.MSun, units.RSun, units.Myr],\
precision = 6, prefix = "", separator = "[", suffix = "]")
codes = [SeBa, MESA, SSE, EVtwin]
if o.threaded:
print "Run threaded"
evolve_with_different_stellar_model(codes)
else:
print "Run sequentially"
for ci in codes:
evolve_single_star(ci)
|
py | b400618a6902958ad33463a5c86258e8e8451032 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/xavier_ssd/TrekBot/TrekBot_WS/devel_isolated/zed_wrapper/include".split(';') if "/xavier_ssd/TrekBot/TrekBot_WS/devel_isolated/zed_wrapper/include" != "" else []
PROJECT_CATKIN_DEPENDS = "roscpp;rosconsole;sensor_msgs;stereo_msgs;image_transport;dynamic_reconfigure;tf2_ros;tf2_geometry_msgs;message_runtime".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "zed_wrapper"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot_WS/devel_isolated/zed_wrapper"
PROJECT_VERSION = "2.6.0"
|
py | b400626b52250da0fff438cfe9f0f24f39d24487 | import jax.numpy as np
def ae(y_pred, y_true):
''' Description: mean-absolut-error loss
Args:
y_pred : value predicted by method
y_true : ground truth value
eps: some scalar
'''
a = np.absolute(np.array([y_pred]) - np.array([y_true]))[0]
if(a.shape == (1,)): #y_pred is sometimes not a scalar but a (1,) vector which causes problems. This does fix the problem.
return a[0]
return a |
py | b40062f6d58873b327f39f1bd0c6bcd6f12094e8 | # -*- coding: utf-8 -*-
#
# wafer documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'wafer'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'waferdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'wafer.tex',
u'wafer Documentation',
u"Urvi Gada", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'wafer', u'wafer Documentation',
[u"Urvi Gada"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'wafer', u'wafer Documentation',
u"Urvi Gada", 'wafer',
'its a wafer fault detection project using mlops', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
|
py | b40063ba486ba2902cc27072b5cebaf8078df721 | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: platform.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='platform.proto',
package='org.dash.platform.dapi.v0',
syntax='proto3',
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x0eplatform.proto\x12\x19org.dash.platform.dapi.v0\"\x93\x01\n\x0fStoreTreeProofs\x12\x18\n\x10identities_proof\x18\x01 \x01(\x0c\x12/\n\'public_key_hashes_to_identity_ids_proof\x18\x02 \x01(\x0c\x12\x1c\n\x14\x64\x61ta_contracts_proof\x18\x03 \x01(\x0c\x12\x17\n\x0f\x64ocuments_proof\x18\x04 \x01(\x0c\"\x97\x01\n\x05Proof\x12\x17\n\x0froot_tree_proof\x18\x01 \x01(\x0c\x12\x45\n\x11store_tree_proofs\x18\x02 \x01(\x0b\x32*.org.dash.platform.dapi.v0.StoreTreeProofs\x12\x1b\n\x13signature_llmq_hash\x18\x03 \x01(\x0c\x12\x11\n\tsignature\x18\x04 \x01(\x0c\"D\n\x10ResponseMetadata\x12\x0e\n\x06height\x18\x01 \x01(\x03\x12 \n\x18\x63ore_chain_locked_height\x18\x02 \x01(\r\"L\n\x1dStateTransitionBroadcastError\x12\x0c\n\x04\x63ode\x18\x01 \x01(\r\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\";\n\x1f\x42roadcastStateTransitionRequest\x12\x18\n\x10state_transition\x18\x01 \x01(\x0c\"\"\n BroadcastStateTransitionResponse\"/\n\x12GetIdentityRequest\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\"\x97\x01\n\x13GetIdentityResponse\x12\x10\n\x08identity\x18\x01 \x01(\x0c\x12/\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.Proof\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\"3\n\x16GetDataContractRequest\x12\n\n\x02id\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\"\xa0\x01\n\x17GetDataContractResponse\x12\x15\n\rdata_contract\x18\x01 \x01(\x0c\x12/\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.Proof\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\"\x97\x01\n\x13GetDocumentsRequest\x12\x18\n\x10\x64\x61ta_contract_id\x18\x01 \x01(\x0c\x12\x15\n\rdocument_type\x18\x02 \x01(\t\x12\r\n\x05where\x18\x03 \x01(\x0c\x12\x10\n\x08order_by\x18\x04 \x01(\x0c\x12\r\n\x05limit\x18\x05 \x01(\r\x12\x10\n\x08start_at\x18\x07 \x01(\r\x12\r\n\x05prove\x18\x08 \x01(\x08\"\x99\x01\n\x14GetDocumentsResponse\x12\x11\n\tdocuments\x18\x01 \x03(\x0c\x12/\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.Proof\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\"Q\n%GetIdentitiesByPublicKeyHashesRequest\x12\x19\n\x11public_key_hashes\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\"\xac\x01\n&GetIdentitiesByPublicKeyHashesResponse\x12\x12\n\nidentities\x18\x01 \x03(\x0c\x12/\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.Proof\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\"R\n&GetIdentityIdsByPublicKeyHashesRequest\x12\x19\n\x11public_key_hashes\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\"\xaf\x01\n\'GetIdentityIdsByPublicKeyHashesResponse\x12\x14\n\x0cidentity_ids\x18\x01 \x01(\x0c\x12/\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.Proof\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadata\"S\n#WaitForStateTransitionResultRequest\x12\x1d\n\x15state_transition_hash\x18\x01 \x01(\x0c\x12\r\n\x05prove\x18\x02 \x01(\x08\"\xf0\x01\n$WaitForStateTransitionResultResponse\x12I\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x38.org.dash.platform.dapi.v0.StateTransitionBroadcastErrorH\x00\x12\x31\n\x05proof\x18\x02 \x01(\x0b\x32 .org.dash.platform.dapi.v0.ProofH\x00\x12=\n\x08metadata\x18\x03 \x01(\x0b\x32+.org.dash.platform.dapi.v0.ResponseMetadataB\x0b\n\tresponses\"P\n\x14\x43onsensusParamsBlock\x12\x11\n\tmax_bytes\x18\x01 \x01(\t\x12\x0f\n\x07max_gas\x18\x02 \x01(\t\x12\x14\n\x0ctime_iota_ms\x18\x03 \x01(\t\"b\n\x17\x43onsensusParamsEvidence\x12\x1a\n\x12max_age_num_blocks\x18\x01 \x01(\t\x12\x18\n\x10max_age_duration\x18\x02 \x01(\t\x12\x11\n\tmax_bytes\x18\x03 \x01(\t\":\n\x19GetConsensusParamsRequest\x12\x0e\n\x06height\x18\x01 \x01(\x03\x12\r\n\x05prove\x18\x02 \x01(\x08\"\xa2\x01\n\x1aGetConsensusParamsResponse\x12>\n\x05\x62lock\x18\x01 \x01(\x0b\x32/.org.dash.platform.dapi.v0.ConsensusParamsBlock\x12\x44\n\x08\x65vidence\x18\x02 \x01(\x0b\x32\x32.org.dash.platform.dapi.v0.ConsensusParamsEvidence2\xf2\x08\n\x08Platform\x12\x93\x01\n\x18\x62roadcastStateTransition\x12:.org.dash.platform.dapi.v0.BroadcastStateTransitionRequest\x1a;.org.dash.platform.dapi.v0.BroadcastStateTransitionResponse\x12l\n\x0bgetIdentity\x12-.org.dash.platform.dapi.v0.GetIdentityRequest\x1a..org.dash.platform.dapi.v0.GetIdentityResponse\x12x\n\x0fgetDataContract\x12\x31.org.dash.platform.dapi.v0.GetDataContractRequest\x1a\x32.org.dash.platform.dapi.v0.GetDataContractResponse\x12o\n\x0cgetDocuments\x12..org.dash.platform.dapi.v0.GetDocumentsRequest\x1a/.org.dash.platform.dapi.v0.GetDocumentsResponse\x12\xa5\x01\n\x1egetIdentitiesByPublicKeyHashes\x12@.org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesRequest\x1a\x41.org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesResponse\x12\xa8\x01\n\x1fgetIdentityIdsByPublicKeyHashes\x12\x41.org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesRequest\x1a\x42.org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesResponse\x12\x9f\x01\n\x1cwaitForStateTransitionResult\x12>.org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest\x1a?.org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse\x12\x81\x01\n\x12getConsensusParams\x12\x34.org.dash.platform.dapi.v0.GetConsensusParamsRequest\x1a\x35.org.dash.platform.dapi.v0.GetConsensusParamsResponseb\x06proto3'
)
_STORETREEPROOFS = _descriptor.Descriptor(
name='StoreTreeProofs',
full_name='org.dash.platform.dapi.v0.StoreTreeProofs',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='identities_proof', full_name='org.dash.platform.dapi.v0.StoreTreeProofs.identities_proof', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='public_key_hashes_to_identity_ids_proof', full_name='org.dash.platform.dapi.v0.StoreTreeProofs.public_key_hashes_to_identity_ids_proof', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data_contracts_proof', full_name='org.dash.platform.dapi.v0.StoreTreeProofs.data_contracts_proof', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='documents_proof', full_name='org.dash.platform.dapi.v0.StoreTreeProofs.documents_proof', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=193,
)
_PROOF = _descriptor.Descriptor(
name='Proof',
full_name='org.dash.platform.dapi.v0.Proof',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='root_tree_proof', full_name='org.dash.platform.dapi.v0.Proof.root_tree_proof', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='store_tree_proofs', full_name='org.dash.platform.dapi.v0.Proof.store_tree_proofs', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature_llmq_hash', full_name='org.dash.platform.dapi.v0.Proof.signature_llmq_hash', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='signature', full_name='org.dash.platform.dapi.v0.Proof.signature', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=196,
serialized_end=347,
)
_RESPONSEMETADATA = _descriptor.Descriptor(
name='ResponseMetadata',
full_name='org.dash.platform.dapi.v0.ResponseMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='org.dash.platform.dapi.v0.ResponseMetadata.height', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='core_chain_locked_height', full_name='org.dash.platform.dapi.v0.ResponseMetadata.core_chain_locked_height', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=349,
serialized_end=417,
)
_STATETRANSITIONBROADCASTERROR = _descriptor.Descriptor(
name='StateTransitionBroadcastError',
full_name='org.dash.platform.dapi.v0.StateTransitionBroadcastError',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='code', full_name='org.dash.platform.dapi.v0.StateTransitionBroadcastError.code', index=0,
number=1, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='message', full_name='org.dash.platform.dapi.v0.StateTransitionBroadcastError.message', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='data', full_name='org.dash.platform.dapi.v0.StateTransitionBroadcastError.data', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=419,
serialized_end=495,
)
_BROADCASTSTATETRANSITIONREQUEST = _descriptor.Descriptor(
name='BroadcastStateTransitionRequest',
full_name='org.dash.platform.dapi.v0.BroadcastStateTransitionRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='state_transition', full_name='org.dash.platform.dapi.v0.BroadcastStateTransitionRequest.state_transition', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=497,
serialized_end=556,
)
_BROADCASTSTATETRANSITIONRESPONSE = _descriptor.Descriptor(
name='BroadcastStateTransitionResponse',
full_name='org.dash.platform.dapi.v0.BroadcastStateTransitionResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=558,
serialized_end=592,
)
_GETIDENTITYREQUEST = _descriptor.Descriptor(
name='GetIdentityRequest',
full_name='org.dash.platform.dapi.v0.GetIdentityRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.dash.platform.dapi.v0.GetIdentityRequest.id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='prove', full_name='org.dash.platform.dapi.v0.GetIdentityRequest.prove', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=594,
serialized_end=641,
)
_GETIDENTITYRESPONSE = _descriptor.Descriptor(
name='GetIdentityResponse',
full_name='org.dash.platform.dapi.v0.GetIdentityResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='identity', full_name='org.dash.platform.dapi.v0.GetIdentityResponse.identity', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='proof', full_name='org.dash.platform.dapi.v0.GetIdentityResponse.proof', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='org.dash.platform.dapi.v0.GetIdentityResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=644,
serialized_end=795,
)
_GETDATACONTRACTREQUEST = _descriptor.Descriptor(
name='GetDataContractRequest',
full_name='org.dash.platform.dapi.v0.GetDataContractRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.dash.platform.dapi.v0.GetDataContractRequest.id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='prove', full_name='org.dash.platform.dapi.v0.GetDataContractRequest.prove', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=797,
serialized_end=848,
)
_GETDATACONTRACTRESPONSE = _descriptor.Descriptor(
name='GetDataContractResponse',
full_name='org.dash.platform.dapi.v0.GetDataContractResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='data_contract', full_name='org.dash.platform.dapi.v0.GetDataContractResponse.data_contract', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='proof', full_name='org.dash.platform.dapi.v0.GetDataContractResponse.proof', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='org.dash.platform.dapi.v0.GetDataContractResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=851,
serialized_end=1011,
)
_GETDOCUMENTSREQUEST = _descriptor.Descriptor(
name='GetDocumentsRequest',
full_name='org.dash.platform.dapi.v0.GetDocumentsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='data_contract_id', full_name='org.dash.platform.dapi.v0.GetDocumentsRequest.data_contract_id', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='document_type', full_name='org.dash.platform.dapi.v0.GetDocumentsRequest.document_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='where', full_name='org.dash.platform.dapi.v0.GetDocumentsRequest.where', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='order_by', full_name='org.dash.platform.dapi.v0.GetDocumentsRequest.order_by', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='limit', full_name='org.dash.platform.dapi.v0.GetDocumentsRequest.limit', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='start_at', full_name='org.dash.platform.dapi.v0.GetDocumentsRequest.start_at', index=5,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='prove', full_name='org.dash.platform.dapi.v0.GetDocumentsRequest.prove', index=6,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1014,
serialized_end=1165,
)
_GETDOCUMENTSRESPONSE = _descriptor.Descriptor(
name='GetDocumentsResponse',
full_name='org.dash.platform.dapi.v0.GetDocumentsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='documents', full_name='org.dash.platform.dapi.v0.GetDocumentsResponse.documents', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='proof', full_name='org.dash.platform.dapi.v0.GetDocumentsResponse.proof', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='org.dash.platform.dapi.v0.GetDocumentsResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1168,
serialized_end=1321,
)
_GETIDENTITIESBYPUBLICKEYHASHESREQUEST = _descriptor.Descriptor(
name='GetIdentitiesByPublicKeyHashesRequest',
full_name='org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='public_key_hashes', full_name='org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesRequest.public_key_hashes', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='prove', full_name='org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesRequest.prove', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1323,
serialized_end=1404,
)
_GETIDENTITIESBYPUBLICKEYHASHESRESPONSE = _descriptor.Descriptor(
name='GetIdentitiesByPublicKeyHashesResponse',
full_name='org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='identities', full_name='org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesResponse.identities', index=0,
number=1, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='proof', full_name='org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesResponse.proof', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1407,
serialized_end=1579,
)
_GETIDENTITYIDSBYPUBLICKEYHASHESREQUEST = _descriptor.Descriptor(
name='GetIdentityIdsByPublicKeyHashesRequest',
full_name='org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='public_key_hashes', full_name='org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesRequest.public_key_hashes', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='prove', full_name='org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesRequest.prove', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1581,
serialized_end=1663,
)
_GETIDENTITYIDSBYPUBLICKEYHASHESRESPONSE = _descriptor.Descriptor(
name='GetIdentityIdsByPublicKeyHashesResponse',
full_name='org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='identity_ids', full_name='org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesResponse.identity_ids', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='proof', full_name='org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesResponse.proof', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1666,
serialized_end=1841,
)
_WAITFORSTATETRANSITIONRESULTREQUEST = _descriptor.Descriptor(
name='WaitForStateTransitionResultRequest',
full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='state_transition_hash', full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest.state_transition_hash', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=b"",
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='prove', full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest.prove', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1843,
serialized_end=1926,
)
_WAITFORSTATETRANSITIONRESULTRESPONSE = _descriptor.Descriptor(
name='WaitForStateTransitionResultResponse',
full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='error', full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse.error', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='proof', full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse.proof', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='metadata', full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse.metadata', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='responses', full_name='org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse.responses',
index=0, containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[]),
],
serialized_start=1929,
serialized_end=2169,
)
_CONSENSUSPARAMSBLOCK = _descriptor.Descriptor(
name='ConsensusParamsBlock',
full_name='org.dash.platform.dapi.v0.ConsensusParamsBlock',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='max_bytes', full_name='org.dash.platform.dapi.v0.ConsensusParamsBlock.max_bytes', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_gas', full_name='org.dash.platform.dapi.v0.ConsensusParamsBlock.max_gas', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='time_iota_ms', full_name='org.dash.platform.dapi.v0.ConsensusParamsBlock.time_iota_ms', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2171,
serialized_end=2251,
)
_CONSENSUSPARAMSEVIDENCE = _descriptor.Descriptor(
name='ConsensusParamsEvidence',
full_name='org.dash.platform.dapi.v0.ConsensusParamsEvidence',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='max_age_num_blocks', full_name='org.dash.platform.dapi.v0.ConsensusParamsEvidence.max_age_num_blocks', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_age_duration', full_name='org.dash.platform.dapi.v0.ConsensusParamsEvidence.max_age_duration', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_bytes', full_name='org.dash.platform.dapi.v0.ConsensusParamsEvidence.max_bytes', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2253,
serialized_end=2351,
)
_GETCONSENSUSPARAMSREQUEST = _descriptor.Descriptor(
name='GetConsensusParamsRequest',
full_name='org.dash.platform.dapi.v0.GetConsensusParamsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='height', full_name='org.dash.platform.dapi.v0.GetConsensusParamsRequest.height', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='prove', full_name='org.dash.platform.dapi.v0.GetConsensusParamsRequest.prove', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2353,
serialized_end=2411,
)
_GETCONSENSUSPARAMSRESPONSE = _descriptor.Descriptor(
name='GetConsensusParamsResponse',
full_name='org.dash.platform.dapi.v0.GetConsensusParamsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='block', full_name='org.dash.platform.dapi.v0.GetConsensusParamsResponse.block', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='evidence', full_name='org.dash.platform.dapi.v0.GetConsensusParamsResponse.evidence', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2414,
serialized_end=2576,
)
_PROOF.fields_by_name['store_tree_proofs'].message_type = _STORETREEPROOFS
_GETIDENTITYRESPONSE.fields_by_name['proof'].message_type = _PROOF
_GETIDENTITYRESPONSE.fields_by_name['metadata'].message_type = _RESPONSEMETADATA
_GETDATACONTRACTRESPONSE.fields_by_name['proof'].message_type = _PROOF
_GETDATACONTRACTRESPONSE.fields_by_name['metadata'].message_type = _RESPONSEMETADATA
_GETDOCUMENTSRESPONSE.fields_by_name['proof'].message_type = _PROOF
_GETDOCUMENTSRESPONSE.fields_by_name['metadata'].message_type = _RESPONSEMETADATA
_GETIDENTITIESBYPUBLICKEYHASHESRESPONSE.fields_by_name['proof'].message_type = _PROOF
_GETIDENTITIESBYPUBLICKEYHASHESRESPONSE.fields_by_name['metadata'].message_type = _RESPONSEMETADATA
_GETIDENTITYIDSBYPUBLICKEYHASHESRESPONSE.fields_by_name['proof'].message_type = _PROOF
_GETIDENTITYIDSBYPUBLICKEYHASHESRESPONSE.fields_by_name['metadata'].message_type = _RESPONSEMETADATA
_WAITFORSTATETRANSITIONRESULTRESPONSE.fields_by_name['error'].message_type = _STATETRANSITIONBROADCASTERROR
_WAITFORSTATETRANSITIONRESULTRESPONSE.fields_by_name['proof'].message_type = _PROOF
_WAITFORSTATETRANSITIONRESULTRESPONSE.fields_by_name['metadata'].message_type = _RESPONSEMETADATA
_WAITFORSTATETRANSITIONRESULTRESPONSE.oneofs_by_name['responses'].fields.append(
_WAITFORSTATETRANSITIONRESULTRESPONSE.fields_by_name['error'])
_WAITFORSTATETRANSITIONRESULTRESPONSE.fields_by_name['error'].containing_oneof = _WAITFORSTATETRANSITIONRESULTRESPONSE.oneofs_by_name['responses']
_WAITFORSTATETRANSITIONRESULTRESPONSE.oneofs_by_name['responses'].fields.append(
_WAITFORSTATETRANSITIONRESULTRESPONSE.fields_by_name['proof'])
_WAITFORSTATETRANSITIONRESULTRESPONSE.fields_by_name['proof'].containing_oneof = _WAITFORSTATETRANSITIONRESULTRESPONSE.oneofs_by_name['responses']
_GETCONSENSUSPARAMSRESPONSE.fields_by_name['block'].message_type = _CONSENSUSPARAMSBLOCK
_GETCONSENSUSPARAMSRESPONSE.fields_by_name['evidence'].message_type = _CONSENSUSPARAMSEVIDENCE
DESCRIPTOR.message_types_by_name['StoreTreeProofs'] = _STORETREEPROOFS
DESCRIPTOR.message_types_by_name['Proof'] = _PROOF
DESCRIPTOR.message_types_by_name['ResponseMetadata'] = _RESPONSEMETADATA
DESCRIPTOR.message_types_by_name['StateTransitionBroadcastError'] = _STATETRANSITIONBROADCASTERROR
DESCRIPTOR.message_types_by_name['BroadcastStateTransitionRequest'] = _BROADCASTSTATETRANSITIONREQUEST
DESCRIPTOR.message_types_by_name['BroadcastStateTransitionResponse'] = _BROADCASTSTATETRANSITIONRESPONSE
DESCRIPTOR.message_types_by_name['GetIdentityRequest'] = _GETIDENTITYREQUEST
DESCRIPTOR.message_types_by_name['GetIdentityResponse'] = _GETIDENTITYRESPONSE
DESCRIPTOR.message_types_by_name['GetDataContractRequest'] = _GETDATACONTRACTREQUEST
DESCRIPTOR.message_types_by_name['GetDataContractResponse'] = _GETDATACONTRACTRESPONSE
DESCRIPTOR.message_types_by_name['GetDocumentsRequest'] = _GETDOCUMENTSREQUEST
DESCRIPTOR.message_types_by_name['GetDocumentsResponse'] = _GETDOCUMENTSRESPONSE
DESCRIPTOR.message_types_by_name['GetIdentitiesByPublicKeyHashesRequest'] = _GETIDENTITIESBYPUBLICKEYHASHESREQUEST
DESCRIPTOR.message_types_by_name['GetIdentitiesByPublicKeyHashesResponse'] = _GETIDENTITIESBYPUBLICKEYHASHESRESPONSE
DESCRIPTOR.message_types_by_name['GetIdentityIdsByPublicKeyHashesRequest'] = _GETIDENTITYIDSBYPUBLICKEYHASHESREQUEST
DESCRIPTOR.message_types_by_name['GetIdentityIdsByPublicKeyHashesResponse'] = _GETIDENTITYIDSBYPUBLICKEYHASHESRESPONSE
DESCRIPTOR.message_types_by_name['WaitForStateTransitionResultRequest'] = _WAITFORSTATETRANSITIONRESULTREQUEST
DESCRIPTOR.message_types_by_name['WaitForStateTransitionResultResponse'] = _WAITFORSTATETRANSITIONRESULTRESPONSE
DESCRIPTOR.message_types_by_name['ConsensusParamsBlock'] = _CONSENSUSPARAMSBLOCK
DESCRIPTOR.message_types_by_name['ConsensusParamsEvidence'] = _CONSENSUSPARAMSEVIDENCE
DESCRIPTOR.message_types_by_name['GetConsensusParamsRequest'] = _GETCONSENSUSPARAMSREQUEST
DESCRIPTOR.message_types_by_name['GetConsensusParamsResponse'] = _GETCONSENSUSPARAMSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
StoreTreeProofs = _reflection.GeneratedProtocolMessageType('StoreTreeProofs', (_message.Message,), {
'DESCRIPTOR' : _STORETREEPROOFS,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.StoreTreeProofs)
})
_sym_db.RegisterMessage(StoreTreeProofs)
Proof = _reflection.GeneratedProtocolMessageType('Proof', (_message.Message,), {
'DESCRIPTOR' : _PROOF,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.Proof)
})
_sym_db.RegisterMessage(Proof)
ResponseMetadata = _reflection.GeneratedProtocolMessageType('ResponseMetadata', (_message.Message,), {
'DESCRIPTOR' : _RESPONSEMETADATA,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.ResponseMetadata)
})
_sym_db.RegisterMessage(ResponseMetadata)
StateTransitionBroadcastError = _reflection.GeneratedProtocolMessageType('StateTransitionBroadcastError', (_message.Message,), {
'DESCRIPTOR' : _STATETRANSITIONBROADCASTERROR,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.StateTransitionBroadcastError)
})
_sym_db.RegisterMessage(StateTransitionBroadcastError)
BroadcastStateTransitionRequest = _reflection.GeneratedProtocolMessageType('BroadcastStateTransitionRequest', (_message.Message,), {
'DESCRIPTOR' : _BROADCASTSTATETRANSITIONREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BroadcastStateTransitionRequest)
})
_sym_db.RegisterMessage(BroadcastStateTransitionRequest)
BroadcastStateTransitionResponse = _reflection.GeneratedProtocolMessageType('BroadcastStateTransitionResponse', (_message.Message,), {
'DESCRIPTOR' : _BROADCASTSTATETRANSITIONRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.BroadcastStateTransitionResponse)
})
_sym_db.RegisterMessage(BroadcastStateTransitionResponse)
GetIdentityRequest = _reflection.GeneratedProtocolMessageType('GetIdentityRequest', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITYREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetIdentityRequest)
})
_sym_db.RegisterMessage(GetIdentityRequest)
GetIdentityResponse = _reflection.GeneratedProtocolMessageType('GetIdentityResponse', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITYRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetIdentityResponse)
})
_sym_db.RegisterMessage(GetIdentityResponse)
GetDataContractRequest = _reflection.GeneratedProtocolMessageType('GetDataContractRequest', (_message.Message,), {
'DESCRIPTOR' : _GETDATACONTRACTREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetDataContractRequest)
})
_sym_db.RegisterMessage(GetDataContractRequest)
GetDataContractResponse = _reflection.GeneratedProtocolMessageType('GetDataContractResponse', (_message.Message,), {
'DESCRIPTOR' : _GETDATACONTRACTRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetDataContractResponse)
})
_sym_db.RegisterMessage(GetDataContractResponse)
GetDocumentsRequest = _reflection.GeneratedProtocolMessageType('GetDocumentsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETDOCUMENTSREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetDocumentsRequest)
})
_sym_db.RegisterMessage(GetDocumentsRequest)
GetDocumentsResponse = _reflection.GeneratedProtocolMessageType('GetDocumentsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETDOCUMENTSRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetDocumentsResponse)
})
_sym_db.RegisterMessage(GetDocumentsResponse)
GetIdentitiesByPublicKeyHashesRequest = _reflection.GeneratedProtocolMessageType('GetIdentitiesByPublicKeyHashesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITIESBYPUBLICKEYHASHESREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesRequest)
})
_sym_db.RegisterMessage(GetIdentitiesByPublicKeyHashesRequest)
GetIdentitiesByPublicKeyHashesResponse = _reflection.GeneratedProtocolMessageType('GetIdentitiesByPublicKeyHashesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITIESBYPUBLICKEYHASHESRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetIdentitiesByPublicKeyHashesResponse)
})
_sym_db.RegisterMessage(GetIdentitiesByPublicKeyHashesResponse)
GetIdentityIdsByPublicKeyHashesRequest = _reflection.GeneratedProtocolMessageType('GetIdentityIdsByPublicKeyHashesRequest', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITYIDSBYPUBLICKEYHASHESREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesRequest)
})
_sym_db.RegisterMessage(GetIdentityIdsByPublicKeyHashesRequest)
GetIdentityIdsByPublicKeyHashesResponse = _reflection.GeneratedProtocolMessageType('GetIdentityIdsByPublicKeyHashesResponse', (_message.Message,), {
'DESCRIPTOR' : _GETIDENTITYIDSBYPUBLICKEYHASHESRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetIdentityIdsByPublicKeyHashesResponse)
})
_sym_db.RegisterMessage(GetIdentityIdsByPublicKeyHashesResponse)
WaitForStateTransitionResultRequest = _reflection.GeneratedProtocolMessageType('WaitForStateTransitionResultRequest', (_message.Message,), {
'DESCRIPTOR' : _WAITFORSTATETRANSITIONRESULTREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.WaitForStateTransitionResultRequest)
})
_sym_db.RegisterMessage(WaitForStateTransitionResultRequest)
WaitForStateTransitionResultResponse = _reflection.GeneratedProtocolMessageType('WaitForStateTransitionResultResponse', (_message.Message,), {
'DESCRIPTOR' : _WAITFORSTATETRANSITIONRESULTRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.WaitForStateTransitionResultResponse)
})
_sym_db.RegisterMessage(WaitForStateTransitionResultResponse)
ConsensusParamsBlock = _reflection.GeneratedProtocolMessageType('ConsensusParamsBlock', (_message.Message,), {
'DESCRIPTOR' : _CONSENSUSPARAMSBLOCK,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.ConsensusParamsBlock)
})
_sym_db.RegisterMessage(ConsensusParamsBlock)
ConsensusParamsEvidence = _reflection.GeneratedProtocolMessageType('ConsensusParamsEvidence', (_message.Message,), {
'DESCRIPTOR' : _CONSENSUSPARAMSEVIDENCE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.ConsensusParamsEvidence)
})
_sym_db.RegisterMessage(ConsensusParamsEvidence)
GetConsensusParamsRequest = _reflection.GeneratedProtocolMessageType('GetConsensusParamsRequest', (_message.Message,), {
'DESCRIPTOR' : _GETCONSENSUSPARAMSREQUEST,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetConsensusParamsRequest)
})
_sym_db.RegisterMessage(GetConsensusParamsRequest)
GetConsensusParamsResponse = _reflection.GeneratedProtocolMessageType('GetConsensusParamsResponse', (_message.Message,), {
'DESCRIPTOR' : _GETCONSENSUSPARAMSRESPONSE,
'__module__' : 'platform_pb2'
# @@protoc_insertion_point(class_scope:org.dash.platform.dapi.v0.GetConsensusParamsResponse)
})
_sym_db.RegisterMessage(GetConsensusParamsResponse)
_PLATFORM = _descriptor.ServiceDescriptor(
name='Platform',
full_name='org.dash.platform.dapi.v0.Platform',
file=DESCRIPTOR,
index=0,
serialized_options=None,
create_key=_descriptor._internal_create_key,
serialized_start=2579,
serialized_end=3717,
methods=[
_descriptor.MethodDescriptor(
name='broadcastStateTransition',
full_name='org.dash.platform.dapi.v0.Platform.broadcastStateTransition',
index=0,
containing_service=None,
input_type=_BROADCASTSTATETRANSITIONREQUEST,
output_type=_BROADCASTSTATETRANSITIONRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getIdentity',
full_name='org.dash.platform.dapi.v0.Platform.getIdentity',
index=1,
containing_service=None,
input_type=_GETIDENTITYREQUEST,
output_type=_GETIDENTITYRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getDataContract',
full_name='org.dash.platform.dapi.v0.Platform.getDataContract',
index=2,
containing_service=None,
input_type=_GETDATACONTRACTREQUEST,
output_type=_GETDATACONTRACTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getDocuments',
full_name='org.dash.platform.dapi.v0.Platform.getDocuments',
index=3,
containing_service=None,
input_type=_GETDOCUMENTSREQUEST,
output_type=_GETDOCUMENTSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getIdentitiesByPublicKeyHashes',
full_name='org.dash.platform.dapi.v0.Platform.getIdentitiesByPublicKeyHashes',
index=4,
containing_service=None,
input_type=_GETIDENTITIESBYPUBLICKEYHASHESREQUEST,
output_type=_GETIDENTITIESBYPUBLICKEYHASHESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getIdentityIdsByPublicKeyHashes',
full_name='org.dash.platform.dapi.v0.Platform.getIdentityIdsByPublicKeyHashes',
index=5,
containing_service=None,
input_type=_GETIDENTITYIDSBYPUBLICKEYHASHESREQUEST,
output_type=_GETIDENTITYIDSBYPUBLICKEYHASHESRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='waitForStateTransitionResult',
full_name='org.dash.platform.dapi.v0.Platform.waitForStateTransitionResult',
index=6,
containing_service=None,
input_type=_WAITFORSTATETRANSITIONRESULTREQUEST,
output_type=_WAITFORSTATETRANSITIONRESULTRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
_descriptor.MethodDescriptor(
name='getConsensusParams',
full_name='org.dash.platform.dapi.v0.Platform.getConsensusParams',
index=7,
containing_service=None,
input_type=_GETCONSENSUSPARAMSREQUEST,
output_type=_GETCONSENSUSPARAMSRESPONSE,
serialized_options=None,
create_key=_descriptor._internal_create_key,
),
])
_sym_db.RegisterServiceDescriptor(_PLATFORM)
DESCRIPTOR.services_by_name['Platform'] = _PLATFORM
# @@protoc_insertion_point(module_scope)
|
py | b400650973a2e479835e02c8b0c24cc201c1fb80 | #!/usr/bin/env impala-python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# This module is used to stress test Impala by running queries concurrently.
#
# Stress test outline (and notes):
# 1) Get a set of queries as requested by the user from the CLI options.
# 2) For each query, run it individually to find:
# a) Minimum mem limit to avoid spilling
# b) Minimum mem limit to successfully run the query (spilling allowed)
# c) Runtime when no mem was spilled
# d) Runtime when mem was spilled
# e) A row order independent hash of the result set.
# This is a slow process so the results will be written to disk for reuse.
# 3) Find the memory available to Impalad. This will be done by finding the minimum
# memory available across all impalads (-mem_limit startup option). Ideally, for
# maximum stress, all impalads will have the same memory configuration but this is
# not required.
# 4) Optionally, set an amount of memory that can be overcommitted. Overcommitting
# memory can increase memory pressure which can result in memory being spilled to
# disk or queries failing with out-of-memory.
# 5) Start submitting queries. There are two modes for throttling the number of
# concurrent queries, depending on --test-admission-control.
# a) test-admission-control=false: Submit queries until all available memory (as
# determined by items 3 and 4) is used. Before running the query a query mem
# limit is set between 2a and 2b. (There is a runtime option to increase the
# likelihood that a query will be given the full 2a limit to avoid spilling.)
# b) test-admission-control=true: Submit enough queries to achieve the desired
# level of overcommit, but expect that Impala's admission control will throttle
# queries. In this mode mem_limit is not set per query.
# 6) Randomly cancel queries to test cancellation. There is a runtime option to control
# the likelihood that a query will be randomly canceled.
# 7) If a query errored, verify that the error is expected. Errors are expected in the
# following cases:
# a) Memory-based admission control is not being tested (i.e.
# --test-admission-control=false), the error is an out-of-memory error and memory
# on the cluster is overcommitted.
# b) The error is an admission control rejection or timeout.
# 8) Verify the result set hash of successful queries if there are no DML queries in the
# current run.
from __future__ import print_function
import json
import logging
import os
import re
import signal
import sys
import threading
import traceback
from Queue import Empty # Must be before Queue below
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace, SUPPRESS
from collections import defaultdict
from contextlib import contextmanager
from copy import copy
from datetime import datetime
from multiprocessing import Lock, Process, Queue, Value
from random import choice, random, randrange, shuffle
from sys import exit, maxint
from tempfile import gettempdir
from textwrap import dedent
from threading import current_thread, Thread
from time import sleep, time
import tests.comparison.cli_options as cli_options
import tests.util.test_file_parser as test_file_parser
from tests.comparison.cluster import Timeout
from tests.comparison.db_types import Int, TinyInt, SmallInt, BigInt
from tests.comparison.model_translator import SqlWriter
from tests.comparison.query_generator import QueryGenerator
from tests.comparison.query_profile import DefaultProfile
from tests.util.parse_util import (
EXPECTED_TPCDS_QUERIES_COUNT, EXPECTED_TPCH_NESTED_QUERIES_COUNT,
EXPECTED_TPCH_STRESS_QUERIES_COUNT, match_memory_estimate, parse_mem_to_mb)
from tests.util.thrift_util import op_handle_to_query_id
LOG = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
PROFILES_DIR = "profiles"
RESULT_HASHES_DIR = "result_hashes"
# The version of the file format containing the collected query runtime info.
RUNTIME_INFO_FILE_VERSION = 3
# Metrics collected during the stress running process.
NUM_QUERIES_DEQUEUED = "num_queries_dequeued"
# The number of queries that were submitted to a query runner.
NUM_QUERIES_SUBMITTED = "num_queries_submitted"
# The number of queries that have entered the RUNNING state (i.e. got through Impala's
# admission control and started executing) or were cancelled or hit an error.
NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED = "num_queries_started_running_or_cancelled"
NUM_QUERIES_FINISHED = "num_queries_finished"
NUM_QUERIES_EXCEEDED_MEM_LIMIT = "num_queries_exceeded_mem_limit"
NUM_QUERIES_AC_REJECTED = "num_queries_ac_rejected"
NUM_QUERIES_AC_TIMEDOUT = "num_queries_ac_timedout"
NUM_QUERIES_CANCELLED = "num_queries_cancelled"
NUM_RESULT_MISMATCHES = "num_result_mismatches"
NUM_OTHER_ERRORS = "num_other_errors"
class StressArgConverter(object):
def __init__(self, args):
"""
Convert arguments as returned from from argparse parse_args() into internal forms.
The purpose of this object is to do any conversions needed from the type given by
parge_args() into internal forms. For example, if a commandline option takes in a
complicated string that needs to be converted into a list or dictionary, this is the
place to do it. Access works the same as on the object returned by parse_args(),
i.e., object.option_attribute.
In most cases, simple arguments needn't be converted, because argparse handles the
type conversion already, and in most cases, type conversion (e.g., "8" <str> to 8
<int>) is all that's needed. If a property getter below doesn't exist, it means the
argument value is just passed along unconverted.
Params:
args: argparse.Namespace object (from argparse.ArgumentParser().parse_args())
"""
assert isinstance(args, Namespace), "expected Namespace, got " + str(type(args))
self._args = args
self._common_query_options = None
def __getattr__(self, attr):
# This "proxies through" all the attributes from the Namespace object that are not
# defined in this object via property getters below.
return getattr(self._args, attr)
@property
def common_query_options(self):
# Memoize this, as the integrity checking of --common-query-options need only
# happen once.
if self._common_query_options is not None:
return self._common_query_options
# The stress test sets these, so callers cannot override them.
IGNORE_QUERY_OPTIONS = frozenset([
'ABORT_ON_ERROR',
'MEM_LIMIT',
])
common_query_options = {}
if self._args.common_query_options is not None:
for query_option_and_value in self._args.common_query_options:
try:
query_option, value = query_option_and_value.split('=')
except ValueError:
LOG.error(
"Could not parse --common-query-options: '{common_query_options}'".format(
common_query_options=self._args.common_query_options))
exit(1)
query_option = query_option.upper()
if query_option in common_query_options:
LOG.error(
"Query option '{query_option}' already defined in --common-query-options: "
"'{common_query_options}'".format(
query_option=query_option,
common_query_options=self._args.common_query_options))
exit(1)
elif query_option in IGNORE_QUERY_OPTIONS:
LOG.warn(
"Ignoring '{query_option}' in common query options: '{opt}': "
"The stress test algorithm needs control of this option.".format(
query_option=query_option, opt=self._args.common_query_options))
else:
common_query_options[query_option] = value
LOG.debug("Common query option '{query_option}' set to '{value}'".format(
query_option=query_option, value=value))
self._common_query_options = common_query_options
return self._common_query_options
@property
def runtime_info_path(self):
runtime_info_path = self._args.runtime_info_path
if "{cm_host}" in runtime_info_path:
runtime_info_path = runtime_info_path.format(cm_host=self._args.cm_host)
return runtime_info_path
def create_and_start_daemon_thread(fn, name):
thread = Thread(target=fn, name=name)
thread.error = None
thread.daemon = True
thread.start()
return thread
def increment(counter):
with counter.get_lock():
counter.value += 1
def print_stacks(*_):
"""Print the stacks of all threads from this script to stderr."""
thread_names = dict([(t.ident, t.name) for t in threading.enumerate()])
stacks = list()
for thread_id, stack in sys._current_frames().items():
stacks.append(
"\n# Thread: %s(%d)"
% (thread_names.get(thread_id, "No name"), thread_id))
for filename, lineno, name, line in traceback.extract_stack(stack):
stacks.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
stacks.append(" %s" % (line.strip(), ))
print("\n".join(stacks), file=sys.stderr)
# To help debug hangs, the stacks of all threads can be printed by sending signal USR1
# to each process.
signal.signal(signal.SIGUSR1, print_stacks)
def print_crash_info_if_exists(impala, start_time):
"""If any impalads are found not running, they will assumed to have crashed and an
error message will be printed to stderr for each stopped impalad. Returns a value
that evaluates to True if any impalads are stopped.
"""
max_attempts = 5
for remaining_attempts in xrange(max_attempts - 1, -1, -1):
try:
crashed_impalads = impala.find_crashed_impalads(start_time)
break
except Timeout as e:
LOG.info(
"Timeout checking if impalads crashed: %s."
% e + (" Will retry." if remaining_attempts else ""))
else:
LOG.error(
"Aborting after %s failed attempts to check if impalads crashed", max_attempts)
raise e
for message in crashed_impalads.itervalues():
print(message, file=sys.stderr)
return crashed_impalads
class QueryReport(object):
"""Holds information about a single query run."""
def __init__(self, query):
self.query = query
self.result_hash = None
self.runtime_secs = None
self.mem_was_spilled = False
# not_enough_memory includes conditions like "Memory limit exceeded", admission
# control rejecting because not enough memory, etc.
self.not_enough_memory = False
# ac_rejected is true if the query was rejected by admission control.
# It is mutually exclusive with not_enough_memory - if the query is rejected by
# admission control because the memory limit is too low, it is counted as
# not_enough_memory.
# TODO: reconsider whether they should be mutually exclusive
self.ac_rejected = False
self.ac_timedout = False
self.other_error = None
self.timed_out = False
self.was_cancelled = False
self.profile = None
self.query_id = None
def __str__(self):
return dedent("""
<QueryReport
result_hash: %(result_hash)s
runtime_secs: %(runtime_secs)s
mem_was_spilled: %(mem_was_spilled)s
not_enough_memory: %(not_enough_memory)s
ac_rejected: %(ac_rejected)s
ac_timedout: %(ac_timedout)s
other_error: %(other_error)s
timed_out: %(timed_out)s
was_cancelled: %(was_cancelled)s
query_id: %(query_id)s
>
""".strip() % self.__dict__)
def has_query_error(self):
"""Return true if any kind of error status was returned from the query (i.e.
the query didn't run to completion, time out or get cancelled)."""
return (self.not_enough_memory or self.ac_rejected or self.ac_timedout
or self.other_error)
def write_query_profile(self, directory, prefix=None):
"""
Write out the query profile bound to this object to a given directory.
The file name is generated and will contain the query ID. Use the optional prefix
parameter to set a prefix on the filename.
Example return:
tpcds_300_decimal_parquet_q21_00000001_a38c8331_profile.txt
Parameters:
directory (str): Directory to write profile.
prefix (str): Prefix for filename.
"""
if not (self.profile and self.query_id):
return
if prefix is not None:
file_name = prefix + '_'
else:
file_name = ''
file_name += self.query.logical_query_id + '_'
file_name += self.query_id.replace(":", "_") + "_profile.txt"
profile_log_path = os.path.join(directory, file_name)
with open(profile_log_path, "w") as profile_log:
profile_log.write(self.profile)
class MemBroker(object):
"""Provides memory usage coordination for clients running in different processes.
The broker fulfills reservation requests by blocking as needed so total memory
used by clients never exceeds the total available memory (including an
'overcommitable' amount).
The lock built in to _available is also used to protect access to other members.
The state stored in this class is actually an encapsulation of part of the state
of the StressRunner class below. The state here is separated for clarity.
"""
def __init__(self, real_mem_mb, overcommitable_mem_mb):
"""'real_mem_mb' memory should be the amount of memory that each impalad is able
to use. 'overcommitable_mem_mb' is the amount of memory that will be dispensed
over the 'real' amount.
"""
self._total_mem_mb = real_mem_mb + overcommitable_mem_mb
self._available = Value("i", self._total_mem_mb)
self._max_overcommitment = overcommitable_mem_mb
# Each reservation will be assigned an id. Ids are monotonically increasing. When
# a reservation crosses the overcommitment threshold, the corresponding reservation
# id will be stored in '_last_overcommitted_reservation_id' so clients can check
# to see if memory was overcommitted since their reservation was made (this is a race
# but an incorrect result will be on the conservative side).
self._next_reservation_id = Value("L", 0)
self._last_overcommitted_reservation_id = Value("L", 0)
@property
def total_mem_mb(self):
return self._total_mem_mb
@property
def overcommitted_mem_mb(self):
return max(self._max_overcommitment - self._available.value, 0)
@property
def available_mem_mb(self):
return self._available.value
@property
def last_overcommitted_reservation_id(self):
return self._last_overcommitted_reservation_id.value
@contextmanager
def reserve_mem_mb(self, mem_mb):
"""Blocks until the requested amount of memory is available and taken for the caller.
This function should be used in a 'with' block. The taken memory will
automatically be released when the 'with' context exits. A numeric id is returned
so clients can compare against 'last_overcommitted_reservation_id' to see if
memory was overcommitted since the reservation was obtained.
with broker.reserve_mem_mb(100) as reservation_id:
# Run query using 100 MB of memory
if <query failed>:
# Immediately check broker.was_overcommitted(reservation_id) to see if
# memory was overcommitted.
"""
reservation_id = self._wait_until_reserved(mem_mb)
try:
yield reservation_id
finally:
self._release(mem_mb)
def _wait_until_reserved(self, req):
while True:
with self._available.get_lock():
if req <= self._available.value:
self._available.value -= req
LOG.debug(
"Reserved %s MB; %s MB available; %s MB overcommitted",
req, self._available.value, self.overcommitted_mem_mb)
reservation_id = self._next_reservation_id.value
increment(self._next_reservation_id)
if self.overcommitted_mem_mb > 0:
self._last_overcommitted_reservation_id.value = reservation_id
return reservation_id
sleep(0.1)
def _release(self, req):
with self._available.get_lock():
self._available.value += req
LOG.debug(
"Released %s MB; %s MB available; %s MB overcommitted",
req, self._available.value, self.overcommitted_mem_mb)
def was_overcommitted(self, reservation_id):
"""Returns True if memory was overcommitted since the given reservation was made.
For an accurate return value, this should be called just after the query ends
or while the query is still running.
"""
return reservation_id <= self._last_overcommitted_reservation_id.value
class StressRunner(object):
"""This class contains functionality related to producing/consuming queries for the
purpose of stress testing Impala.
Queries will be executed in separate processes since python threading is limited
to the use of a single CPU.
"""
# This is the point at which the work queue will block because it is full.
WORK_QUEUE_CAPACITY = 10
def __init__(self):
self.use_kerberos = False
self.common_query_options = {}
self.test_admission_control = False
self._mem_broker = None
self._verify_results = True
self._select_probability = None
# Synchronized blocking work queue for producer/consumers.
self._query_queue = Queue(self.WORK_QUEUE_CAPACITY)
# The Value class provides cross-process shared memory.
self._mem_mb_needed_for_next_query = Value("i", 0)
# This lock provides a way to stop new queries from running. This lock must be
# acquired before writing to the NUM_QUERIES_SUBMITTED metric for the query_runner,
# which is incremented before every query submission.Reading NUM_QUERIES_SUBMITTED is
# allowed without taking this lock.
self._submit_query_lock = Lock()
self.leak_check_interval_mins = None
self._next_leak_check_unix_time = Value("i", 0)
self._max_mem_mb_reported_usage = Value("i", -1) # -1 => Unknown
self._max_mem_mb_usage = Value("i", -1) # -1 => Unknown
self.cancel_probability = 0
self.spill_probability = 0
self.startup_queries_per_sec = 1.0
self.num_successive_errors_needed_to_abort = 1
self._num_successive_errors = Value("i", 0)
self.results_dir = gettempdir()
self._status_headers = [
"Done", "Active", "Executing", "Mem Lmt Ex", "AC Reject", "AC Timeout",
"Cancel", "Err", "Incorrect", "Next Qry Mem Lmt",
"Tot Qry Mem Lmt", "Tracked Mem", "RSS Mem"]
self._num_queries_to_run = None
self._query_producer_thread = None
# This lock is used to synchronize access to the '_query_runners' list and also to all
# the '_past_runners*' members.
self._query_runners_lock = Lock()
self._query_runners = []
# These are the cumulative values of all the queries that have started/finished/-
# dequeued, etc. on runners that have already died. Every time we notice that a query
# runner has died, we update these values.
self._past_runner_metrics = defaultdict(lambda: Value("i", 0))
self._query_consumer_thread = None
self._mem_polling_thread = None
def _record_runner_metrics_before_evict(self, query_runner):
""" Before removing 'query_runner' from the self._query_runners list, record its
metrics. Must only be called if 'query_runner' is to be removed from the list.
MUST hold '_query_runners_lock' before calling.
"""
for key, synchronized_val in query_runner._metrics.iteritems():
self._past_runner_metrics[key].value += synchronized_val.value
def _calc_total_runner_metrics(self):
""" Calculate the total of metrics across past and active query runners. """
totals = defaultdict(lambda: 0)
with self._query_runners_lock:
for key in self._past_runner_metrics:
totals[key] = self._past_runner_metrics[key].value
for query_runner in self._query_runners:
for key, synchronized_val in query_runner._metrics.iteritems():
totals[key] += synchronized_val.value
return totals
def _calc_total_runner_metric(self, key):
""" Calculate the total of metric 'key' across past and active query runners. """
with self._query_runners_lock:
return self._calc_total_runner_metric_no_lock(key)
def _calc_total_runner_metric_no_lock(self, key):
""" TODO: Get rid of this function after reformatting how we obtain query indices.
_query_runners_lock MUST be taken before calling this function.
"""
total = self._past_runner_metrics[key].value
for runner in self._query_runners:
total += runner._metrics[key].value
return total
def _total_num_queries_submitted(self):
return self._calc_total_runner_metric(NUM_QUERIES_SUBMITTED)
def _total_num_queries_active(self):
"""The number of queries that are currently active (i.e. submitted to a query runner
and haven't yet completed)."""
metrics = self._calc_total_runner_metrics()
num_running = metrics[NUM_QUERIES_SUBMITTED] - metrics[NUM_QUERIES_FINISHED]
assert num_running >= 0, "The number of running queries is negative"
return num_running
def _num_runners_remaining(self):
return len(self._query_runners)
def run_queries(
self, queries, impala, num_queries_to_run, mem_overcommit_pct, should_print_status,
verify_results, select_probability
):
"""Runs queries randomly chosen from 'queries' and stops after 'num_queries_to_run'
queries have completed. 'select_probability' should be float between 0 and 1, it
determines the likelihood of choosing a select query (as opposed to a DML query,
for example).
Before a query is run, a mem limit will be chosen. 'spill_probability' determines
the likelihood of choosing a mem limit that will cause spilling. To induce
spilling, a value is randomly chosen below the min memory needed to avoid spilling
but above the min memory needed with spilling. So the min/max query memory
requirements must be determined before calling this method.
If 'mem_overcommit_pct' is zero, an exception will be raised if any queries
fail for any reason other than cancellation (controlled by the 'cancel_probability'
property), since each query should have enough memory to run successfully. If
non-zero, failures due to insufficient memory will be ignored if memory was
overcommitted at any time during execution.
If a query completes without error, the result will be verified if 'verify_results'
is True. An error will be raised upon a result mismatch. 'verify_results' should be
false for the case where the expected results are not known in advance, if we are
running DML queries, for example.
"""
# TODO: The state from a previous run should be cleared out. This isn't really a
# problem now because the one caller (main()) never calls a second time.
if self.startup_queries_per_sec <= 0:
raise Exception("Startup queries per second must be positive")
if self.leak_check_interval_mins is not None and self.leak_check_interval_mins <= 0:
raise Exception("Memory leak check interval must be positive")
# If there is a crash, start looking for errors starting from this time.
self.start_time = datetime.now()
self._mem_broker = MemBroker(
impala.min_impalad_mem_mb,
int(impala.min_impalad_mem_mb * mem_overcommit_pct / 100))
self._verify_results = verify_results
self._select_probability = select_probability
# Print the status to show the state before starting.
if should_print_status:
self._print_status(print_header=True)
self._num_queries_to_run = num_queries_to_run
self._start_polling_mem_usage(impala)
self._start_producing_queries(queries)
self._start_consuming_queries(impala)
# Wait for everything to finish.
self._wait_for_test_to_finish(impala, should_print_status)
# And print the final state.
if should_print_status:
self._print_status()
self._check_for_test_failure()
self.print_duration()
def _start_producing_queries(self, queries):
def enqueue_queries():
# Generate a dict(query type -> list of queries).
queries_by_type = {}
for query in queries:
if query.query_type not in queries_by_type:
queries_by_type[query.query_type] = []
queries_by_type[query.query_type].append(query)
try:
for _ in xrange(self._num_queries_to_run):
# First randomly determine a query type, then choose a random query of that
# type.
if (
QueryType.SELECT in queries_by_type and
(len(queries_by_type.keys()) == 1 or random() < self._select_probability)
):
result = choice(queries_by_type[QueryType.SELECT])
else:
query_type = choice([
key for key in queries_by_type if key != QueryType.SELECT])
result = choice(queries_by_type[query_type])
self._query_queue.put(result)
except Exception as e:
LOG.error("Error producing queries: %s", e)
current_thread().error = e
raise e
LOG.info("Producing thread completed job. Exiting...")
self._query_producer_thread = create_and_start_daemon_thread(
enqueue_queries, "Query Producer")
def _start_consuming_queries(self, impala):
def start_additional_runners_if_needed():
try:
while self._total_num_queries_submitted() < self._num_queries_to_run:
# TODO: sleeping for the below amount leads to slower submission than the goal,
# because it does not factor in the time spent by this thread outside of the
# sleep() call.
sleep(1.0 / self.startup_queries_per_sec)
# Remember num dequeued/started are cumulative.
with self._submit_query_lock:
metrics = self._calc_total_runner_metrics()
num_dequeued = metrics[NUM_QUERIES_DEQUEUED]
num_submitted = metrics[NUM_QUERIES_SUBMITTED]
LOG.debug("Submitted {0} queries. Dequeued {1} queries".format(
num_submitted, num_dequeued))
if num_dequeued != num_submitted:
# Assume dequeued queries are stuck waiting for cluster resources so there
# is no point in starting an additional runner.
continue
num_coordinators = len(impala.impalads)
if self.max_coordinators > 0:
num_coordinators = min(num_coordinators, self.max_coordinators)
impalad = impala.impalads[len(self._query_runners) % num_coordinators]
query_runner = QueryRunner()
query_runner.impalad = impalad
query_runner.results_dir = self.results_dir
query_runner.use_kerberos = self.use_kerberos
query_runner.common_query_options = self.common_query_options
query_runner.test_admission_control = self.test_admission_control
query_runner.proc = \
Process(target=self._start_single_runner, args=(query_runner, ))
query_runner.proc.daemon = True
with self._query_runners_lock:
self._query_runners.append(query_runner)
query_runner.proc.start()
LOG.info("Consuming thread completed job. Exiting...")
except Exception as e:
LOG.error("Error consuming queries: %s", e)
current_thread().error = e
raise e
self._query_consumer_thread = create_and_start_daemon_thread(
start_additional_runners_if_needed, "Query Consumer")
def _start_polling_mem_usage(self, impala):
def poll_mem_usage():
if self.leak_check_interval_mins:
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
query_submission_is_locked = False
# Query submission will be unlocked after a memory report has been collected twice
# while no queries were running.
ready_to_unlock = None
try:
while self._total_num_queries_submitted() < self._num_queries_to_run:
if ready_to_unlock:
assert query_submission_is_locked, "Query submission not yet locked"
assert not self._total_num_queries_active(), "Queries are still running"
LOG.debug("Resuming query submission")
self._next_leak_check_unix_time.value = int(
time() + 60 * self.leak_check_interval_mins)
self._submit_query_lock.release()
query_submission_is_locked = False
ready_to_unlock = None
if (
not query_submission_is_locked and
self.leak_check_interval_mins and
time() > self._next_leak_check_unix_time.value
):
assert self._total_num_queries_active() <= self._num_runners_remaining(), \
"Each running query should belong to a runner"
LOG.debug("Stopping query submission")
self._submit_query_lock.acquire()
query_submission_is_locked = True
max_reported, max_actual = self._get_mem_usage_values()
if max_reported != -1 and max_actual != -1:
# Value were already retrieved but haven't been used yet. Assume newer
# values aren't wanted and check again later.
sleep(1)
continue
try:
max_reported = max(impala.find_impalad_mem_mb_reported_usage())
except Timeout:
LOG.debug("Timeout collecting reported mem usage")
max_reported = -1
try:
max_actual = max(impala.find_impalad_mem_mb_actual_usage())
except Timeout:
LOG.debug("Timeout collecting reported actual usage")
max_actual = -1
self._set_mem_usage_values(max_reported, max_actual)
if query_submission_is_locked and not self._total_num_queries_active():
if ready_to_unlock is None:
ready_to_unlock = False
else:
ready_to_unlock = True
except Exception:
LOG.debug("Error collecting impalad mem usage", exc_info=True)
if query_submission_is_locked:
LOG.debug("Resuming query submission")
self._submit_query_lock.release()
self._mem_polling_thread = create_and_start_daemon_thread(
poll_mem_usage, "Mem Usage Poller")
def _get_mem_usage_values(self, reset=False):
reported = None
actual = None
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
reported = self._max_mem_mb_reported_usage.value
actual = self._max_mem_mb_usage.value
if reset:
self._max_mem_mb_reported_usage.value = -1
self._max_mem_mb_usage.value = -1
return reported, actual
def _set_mem_usage_values(self, reported, actual):
with self._max_mem_mb_reported_usage.get_lock():
with self._max_mem_mb_usage.get_lock():
self._max_mem_mb_reported_usage.value = reported
self._max_mem_mb_usage.value = actual
def _start_single_runner(self, query_runner):
"""Consumer function to take a query of the queue and run it. This is intended to
run in a separate process so validating the result set can use a full CPU.
"""
LOG.debug("New query runner started")
# The query runner should already be set up. We just need to connect() before using
# the runner.
query_runner.connect()
while not self._query_queue.empty():
try:
query = self._query_queue.get(True, 1)
except Empty:
continue
except EOFError:
LOG.debug("Query running aborting due to closed query queue")
break
LOG.debug("Getting query_idx")
with self._query_runners_lock:
query_idx = self._calc_total_runner_metric_no_lock(NUM_QUERIES_DEQUEUED)
increment(query_runner._metrics[NUM_QUERIES_DEQUEUED])
LOG.debug("Query_idx: {0} | PID: {1}".format(query_idx, query_runner.proc.pid))
if not query.required_mem_mb_without_spilling:
mem_limit = query.required_mem_mb_with_spilling
solo_runtime = query.solo_runtime_secs_with_spilling
elif self.spill_probability < random():
mem_limit = query.required_mem_mb_without_spilling
solo_runtime = query.solo_runtime_secs_without_spilling
else:
mem_limit = randrange(
query.required_mem_mb_with_spilling,
query.required_mem_mb_without_spilling + 1)
solo_runtime = query.solo_runtime_secs_with_spilling
LOG.debug("Waiting for other query runners to start their queries")
while query_idx > self._total_num_queries_submitted():
sleep(0.1)
self._mem_mb_needed_for_next_query.value = mem_limit
LOG.debug("Requesting memory reservation")
with self._mem_broker.reserve_mem_mb(mem_limit) as reservation_id:
LOG.debug("Received memory reservation")
with self._submit_query_lock:
increment(query_runner._metrics[NUM_QUERIES_SUBMITTED])
should_cancel = self.cancel_probability > random()
if should_cancel:
timeout = randrange(1, max(int(solo_runtime), 2))
else:
# Let the query run as long as necessary - it is nearly impossible to pick a
# good value that won't have false positives under load - see IMPALA-8222.
timeout = maxint
report = query_runner.run_query(query, mem_limit, timeout_secs=timeout,
should_cancel=should_cancel)
LOG.debug("Got execution report for query")
if report.timed_out and should_cancel:
report.was_cancelled = True
query_runner.update_from_query_report(report)
if report.other_error:
error_msg = str(report.other_error)
# There is a possible race during cancellation. If a fetch request fails (for
# example due to hitting a mem limit), just before the cancellation request, the
# server may have already unregistered the query as part of the fetch failure.
# In that case the server gives an error response saying the handle is invalid.
if "Invalid query handle" in error_msg and report.timed_out:
self._num_successive_errors.value = 0
continue
# Occasionally the network connection will fail, and depending on when the
# failure occurred during run_query(), an attempt to get the profile may be
# made which results in "Invalid session id" since the server destroyed the
# session upon disconnect.
if "Invalid session id" in error_msg:
self._num_successive_errors.value = 0
continue
# The server may fail to respond to clients if the load is high. An error
# message with "connect()...Connection timed out" comes from the impalad so
# that will not be ignored.
if (
("Connection timed out" in error_msg and "connect()" not in error_msg) or
"ECONNRESET" in error_msg or
"couldn't get a client" in error_msg or
"timeout: timed out" in error_msg
):
self._num_successive_errors.value = 0
continue
increment(self._num_successive_errors)
increment(query_runner._metrics[NUM_OTHER_ERRORS])
self._write_query_profile(report, PROFILES_DIR, prefix='error')
raise Exception("Query {query} ID {id} failed: {mesg}".format(
query=query.logical_query_id,
id=report.query_id,
mesg=error_msg))
if (
report.not_enough_memory and (self.test_admission_control or
not self._mem_broker.was_overcommitted(reservation_id))
):
increment(self._num_successive_errors)
self._write_query_profile(
report, PROFILES_DIR, prefix='unexpected_mem_exceeded')
raise Exception("Unexpected mem limit exceeded; mem was not overcommitted. "
"Query ID: {0}".format(report.query_id))
if (
not report.timed_out and not report.has_query_error() and
(self._verify_results and report.result_hash != query.result_hash)
):
increment(self._num_successive_errors)
increment(query_runner._metrics[NUM_RESULT_MISMATCHES])
self._write_query_profile(report, PROFILES_DIR, prefix='incorrect_results')
raise Exception(dedent("""\
Result hash mismatch; expected {expected}, got {actual}
Query ID: {id}
Query: {query}""".format(expected=query.result_hash,
actual=report.result_hash,
id=report.query_id,
query=query.logical_query_id)))
if report.timed_out and not should_cancel:
self._write_query_profile(report, PROFILES_DIR, prefix='timed_out')
raise Exception(
"Query {query} unexpectedly timed out. Query ID: {id}".format(
query=query.logical_query_id, id=report.query_id))
self._num_successive_errors.value = 0
LOG.debug("Query runner completed...")
def _print_status_header(self):
print(" | ".join(self._status_headers))
def _print_status(self, print_header=False):
if print_header:
self._print_status_header()
metrics = self._calc_total_runner_metrics()
reported_mem, actual_mem = self._get_mem_usage_values(reset=True)
status_format = " | ".join(["%%%ss" % len(header) for header in self._status_headers])
print(status_format % (
# Done
metrics[NUM_QUERIES_FINISHED],
# Active
metrics[NUM_QUERIES_SUBMITTED] - metrics[NUM_QUERIES_FINISHED],
# Executing
metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED] -
metrics[NUM_QUERIES_FINISHED],
# Mem Lmt Ex
metrics[NUM_QUERIES_EXCEEDED_MEM_LIMIT],
# AC Rejected
metrics[NUM_QUERIES_AC_REJECTED],
# AC Timed Out
metrics[NUM_QUERIES_AC_TIMEDOUT],
# Cancel
metrics[NUM_QUERIES_CANCELLED],
# Err
metrics[NUM_OTHER_ERRORS],
# Incorrect
metrics[NUM_RESULT_MISMATCHES],
# Next Qry Mem Lmt
self._mem_mb_needed_for_next_query.value,
# Total Qry Mem Lmt
self._mem_broker.total_mem_mb - self._mem_broker.available_mem_mb,
# Tracked Mem
"" if reported_mem == -1 else reported_mem,
# RSS Mem
"" if actual_mem == -1 else actual_mem))
def _write_query_profile(self, report, subdir, prefix=None):
report.write_query_profile(
os.path.join(self.results_dir, subdir),
prefix)
def _check_successive_errors(self):
if (self._num_successive_errors.value >= self.num_successive_errors_needed_to_abort):
print(
"Aborting due to %s successive errors encountered"
% self._num_successive_errors.value, file=sys.stderr)
self.print_duration()
sys.exit(1)
def _check_for_test_failure(self):
metrics = self._calc_total_runner_metrics()
if metrics[NUM_OTHER_ERRORS] > 0 or metrics[NUM_RESULT_MISMATCHES] > 0:
LOG.error("Failing the stress test due to unexpected errors, incorrect results, or "
"timed out queries. See the report line above for details.")
self.print_duration()
sys.exit(1)
def _wait_for_test_to_finish(self, impala, should_print_status):
last_report_secs = 0
lines_printed = 1
sleep_secs = 0.1
num_runners_remaining = self._num_runners_remaining()
while (
self._query_producer_thread.is_alive() or
self._query_consumer_thread.is_alive() or
num_runners_remaining
):
if self._query_producer_thread.error or self._query_consumer_thread.error:
# This is bad enough to abort early. A failure here probably means there's a
# bug in this script. The mem poller could be checked for an error too. It is
# not critical so is ignored.
LOG.error("Aborting due to error in producer/consumer")
sys.exit(1)
do_check_for_impala_crashes = False
with self._query_runners_lock:
for idx, runner in enumerate(self._query_runners):
if runner.proc.exitcode is not None:
if runner.proc.exitcode != 0:
# Since at least one query runner process failed, make sure to check for
# crashed impalads.
do_check_for_impala_crashes = True
# TODO: Handle case for num_queries_dequeued != num_queries_submitted
num_submitted = runner._metrics[NUM_QUERIES_SUBMITTED].value
num_started_or_cancelled = \
runner._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED]
num_finished = runner._metrics[NUM_QUERIES_FINISHED].value
if num_submitted != num_finished:
# The query runner process may have crashed before updating the number
# of finished queries but after it incremented the number of queries
# submitted.
assert num_submitted - num_finished == 1
increment(runner._metrics[NUM_QUERIES_FINISHED])
if num_submitted != num_started_or_cancelled:
assert num_submitted - num_started_or_cancelled == 1
increment(runner._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
# Since we know that the runner crashed while trying to run a query, we
# count it as an 'other error'
increment(runner._metrics[NUM_OTHER_ERRORS])
self._check_successive_errors()
assert runner._metrics[NUM_QUERIES_SUBMITTED].value == \
runner._metrics[NUM_QUERIES_FINISHED].value, \
str([(k, v.value) for k, v in runner._metrics.iteritems()])
# Make sure to record all the metrics before removing this runner from the
# list.
print("Query runner ({0}) exited with exit code {1}".format(
runner.proc.pid, runner.proc.exitcode))
self._record_runner_metrics_before_evict(self._query_runners[idx])
# Remove the query runner from the list.
del self._query_runners[idx]
if do_check_for_impala_crashes:
# Since we know that at least one query runner failed, check if any of the Impala
# daemons themselves crashed.
LOG.info("Checking for Impala crashes")
if print_crash_info_if_exists(impala, self.start_time):
self.print_duration()
sys.exit(runner.proc.exitcode)
do_check_for_impala_crashes = False
LOG.info("No Impala crashes detected")
sleep(sleep_secs)
num_runners_remaining = self._num_runners_remaining()
if should_print_status:
last_report_secs += sleep_secs
if last_report_secs > 5:
if (
not self._query_producer_thread.is_alive() or
not self._query_consumer_thread.is_alive() or
not num_runners_remaining
):
LOG.debug("Producer is alive: %s" % self._query_producer_thread.is_alive())
LOG.debug("Consumer is alive: %s" % self._query_consumer_thread.is_alive())
LOG.debug("Queue size: %s" % self._query_queue.qsize())
LOG.debug("Runners: %s" % num_runners_remaining)
last_report_secs = 0
lines_printed %= 50
self._print_status(print_header=(lines_printed == 0))
lines_printed += 1
def print_duration(self):
duration = datetime.now() - self.start_time
LOG.info("Test Duration: {0:.0f} seconds".format(duration.total_seconds()))
class QueryTimeout(Exception):
pass
class QueryType(object):
COMPUTE_STATS, DELETE, INSERT, SELECT, UPDATE, UPSERT = range(6)
class Query(object):
"""Contains a SQL statement along with expected runtime information."""
def __init__(self):
self.name = None
self.sql = None
# In order to be able to make good estimates for DML queries in the binary search,
# we need to bring the table to a good initial state before excuting the sql. Running
# set_up_sql accomplishes this task.
self.set_up_sql = None
self.db_name = None
self.result_hash = None
self.required_mem_mb_with_spilling = None
self.required_mem_mb_without_spilling = None
self.solo_runtime_profile_with_spilling = None
self.solo_runtime_profile_without_spilling = None
self.solo_runtime_secs_with_spilling = None
self.solo_runtime_secs_without_spilling = None
# Query options to set before running the query.
self.options = {}
# Determines the order in which we will populate query runtime info. Queries with the
# lowest population_order property will be handled first.
self.population_order = 0
# Type of query. Can have the following values: SELECT, COMPUTE_STATS, INSERT, UPDATE,
# UPSERT, DELETE.
self.query_type = QueryType.SELECT
self._logical_query_id = None
def __repr__(self):
return dedent("""
<Query
Mem: %(required_mem_mb_with_spilling)s
Mem no-spilling: %(required_mem_mb_without_spilling)s
Solo Runtime: %(solo_runtime_secs_with_spilling)s
Solo Runtime no-spilling: %(solo_runtime_secs_without_spilling)s
DB: %(db_name)s
Options: %(options)s
Set up SQL: %(set_up_sql)s>
SQL: %(sql)s>
Population order: %(population_order)r>
""".strip() % self.__dict__)
@property
def logical_query_id(self):
"""
Return a meanginful unique str identifier for the query.
Example: "tpcds_300_decimal_parquet_q21"
"""
if self._logical_query_id is None:
self._logical_query_id = '{0}_{1}'.format(self.db_name, self.name)
return self._logical_query_id
def write_runtime_info_profiles(self, directory):
"""Write profiles for spilling and non-spilling into directory (str)."""
profiles_to_write = [
(self.logical_query_id + "_profile_with_spilling.txt",
self.solo_runtime_profile_with_spilling),
(self.logical_query_id + "_profile_without_spilling.txt",
self.solo_runtime_profile_without_spilling),
]
for filename, profile in profiles_to_write:
if profile is None:
LOG.debug("No profile recorded for {0}".format(filename))
continue
with open(os.path.join(directory, filename), "w") as fh:
fh.write(profile)
class QueryRunner(object):
"""Encapsulates functionality to run a query and provide a runtime report."""
SPILLED_PATTERNS = [re.compile("ExecOption:.*Spilled"), re.compile("SpilledRuns: [^0]")]
BATCH_SIZE = 1024
def __init__(self, stress_runner=None):
"""Creates a new instance. The caller must fill in the below fields. stress_runner
must be provided if this is running in the context of a stress run, so that statistics
can be updated."""
self.stress_runner = stress_runner
self.impalad = None
self.impalad_conn = None
self.use_kerberos = False
self.results_dir = gettempdir()
self.check_if_mem_was_spilled = False
self.common_query_options = {}
self.proc = None
# All these values are shared values between processes. We want these to be accessible
# by the parent process that started this QueryRunner, for operational purposes.
self._metrics = {
NUM_QUERIES_DEQUEUED: Value("i", 0),
NUM_QUERIES_SUBMITTED: Value("i", 0),
NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED: Value("i", 0),
NUM_QUERIES_FINISHED: Value("i", 0),
NUM_QUERIES_EXCEEDED_MEM_LIMIT: Value("i", 0),
NUM_QUERIES_AC_REJECTED: Value("i", 0),
NUM_QUERIES_AC_TIMEDOUT: Value("i", 0),
NUM_QUERIES_CANCELLED: Value("i", 0),
NUM_RESULT_MISMATCHES: Value("i", 0),
NUM_OTHER_ERRORS: Value("i", 0)}
def connect(self):
self.impalad_conn = self.impalad.impala.connect(impalad=self.impalad)
def disconnect(self):
if self.impalad_conn:
self.impalad_conn.close()
self.impalad_conn = None
def run_query(self, query, mem_limit_mb, run_set_up=False,
timeout_secs=maxint, should_cancel=False, retain_profile=False):
"""Run a query and return an execution report. If 'run_set_up' is True, set up sql
will be executed before the main query. This should be the case during the binary
search phase of the stress test.
If 'should_cancel' is True, don't get the query profile for timed out queries because
the query was purposely cancelled by setting the query timeout too short to complete,
rather than having some problem that needs to be investigated.
"""
if not self.impalad_conn:
raise Exception("connect() must first be called")
timeout_unix_time = time() + timeout_secs
report = QueryReport(query)
try:
with self.impalad_conn.cursor() as cursor:
start_time = time()
self._set_db_and_options(cursor, query, run_set_up, mem_limit_mb, timeout_secs)
error = None
try:
cursor.execute_async(
"/* Mem: %s MB. Coordinator: %s. */\n"
% (mem_limit_mb, self.impalad.host_name) + query.sql)
report.query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
LOG.debug("Query id is %s", report.query_id)
if not self._wait_until_fetchable(cursor, report, timeout_unix_time,
should_cancel):
return report
if query.query_type == QueryType.SELECT:
try:
report.result_hash = self._hash_result(cursor, timeout_unix_time, query)
if retain_profile or \
query.result_hash and report.result_hash != query.result_hash:
fetch_and_set_profile(cursor, report)
except QueryTimeout:
self._cancel(cursor, report)
return report
else:
# If query is in error state, this will raise an exception
cursor._wait_to_finish()
except Exception as error:
report.query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
LOG.debug("Error running query with id %s: %s", report.query_id, error)
self._check_for_memory_errors(report, cursor, error)
if report.has_query_error():
return report
report.runtime_secs = time() - start_time
if cursor.execution_failed() or self.check_if_mem_was_spilled:
fetch_and_set_profile(cursor, report)
report.mem_was_spilled = any([
pattern.search(report.profile) is not None
for pattern in QueryRunner.SPILLED_PATTERNS])
report.not_enough_memory = "Memory limit exceeded" in report.profile
except Exception as error:
# A mem limit error would have been caught above, no need to check for that here.
report.other_error = error
return report
def _set_db_and_options(self, cursor, query, run_set_up, mem_limit_mb, timeout_secs):
"""Set up a new cursor for running a query by switching to the correct database and
setting query options."""
if query.db_name:
LOG.debug("Using %s database", query.db_name)
cursor.execute("USE %s" % query.db_name)
if run_set_up and query.set_up_sql:
LOG.debug("Running set up query:\n%s", query.set_up_sql)
cursor.execute(query.set_up_sql)
for query_option, value in self.common_query_options.iteritems():
cursor.execute(
"SET {query_option}={value}".format(query_option=query_option, value=value))
for query_option, value in query.options.iteritems():
cursor.execute(
"SET {query_option}={value}".format(query_option=query_option, value=value))
cursor.execute("SET ABORT_ON_ERROR=1")
if self.test_admission_control:
LOG.debug(
"Running query without mem limit at %s with timeout secs %s:\n%s",
self.impalad.host_name, timeout_secs, query.sql)
else:
LOG.debug("Setting mem limit to %s MB", mem_limit_mb)
cursor.execute("SET MEM_LIMIT=%sM" % mem_limit_mb)
LOG.debug(
"Running query with %s MB mem limit at %s with timeout secs %s:\n%s",
mem_limit_mb, self.impalad.host_name, timeout_secs, query.sql)
def _wait_until_fetchable(self, cursor, report, timeout_unix_time, should_cancel):
"""Wait up until timeout_unix_time until the query results can be fetched (if it's
a SELECT query) or until it has finished executing (if it's a different query type
like DML). If the timeout expires we either cancel the query or report the timeout.
Return True in the first case or False in the second (timeout) case."""
# Loop until the query gets to the right state or a timeout expires.
sleep_secs = 0.1
secs_since_log = 0
# True if we incremented num_queries_started_running_or_cancelled for this query.
started_running_or_cancelled = False
while True:
query_state = cursor.status()
# Check if the query got past the PENDING/INITIALIZED states, either because
# it's executing or hit an error.
if (not started_running_or_cancelled and query_state not in ('PENDING_STATE',
'INITIALIZED_STATE')):
started_running_or_cancelled = True
increment(self._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
# Return if we're ready to fetch results (in the FINISHED state) or we are in
# another terminal state like EXCEPTION.
if query_state not in ('PENDING_STATE', 'INITIALIZED_STATE', 'RUNNING_STATE'):
return True
if time() > timeout_unix_time:
if not should_cancel:
fetch_and_set_profile(cursor, report)
self._cancel(cursor, report)
if not started_running_or_cancelled:
increment(self._metrics[NUM_QUERIES_STARTED_RUNNING_OR_CANCELLED])
return False
if secs_since_log > 5:
secs_since_log = 0
LOG.debug("Waiting for query to execute")
sleep(sleep_secs)
secs_since_log += sleep_secs
def update_from_query_report(self, report):
LOG.debug("Updating runtime stats (Query Runner PID: {0})".format(self.proc.pid))
increment(self._metrics[NUM_QUERIES_FINISHED])
if report.not_enough_memory:
increment(self._metrics[NUM_QUERIES_EXCEEDED_MEM_LIMIT])
if report.ac_rejected:
increment(self._metrics[NUM_QUERIES_AC_REJECTED])
if report.ac_timedout:
increment(self._metrics[NUM_QUERIES_AC_TIMEDOUT])
if report.was_cancelled:
increment(self._metrics[NUM_QUERIES_CANCELLED])
def _cancel(self, cursor, report):
report.timed_out = True
if not report.query_id:
return
try:
LOG.debug("Attempting cancellation of query with id %s", report.query_id)
cursor.cancel_operation()
LOG.debug("Sent cancellation request for query with id %s", report.query_id)
except Exception as e:
LOG.debug("Error cancelling query with id %s: %s", report.query_id, e)
try:
LOG.debug("Attempting to cancel query through the web server.")
self.impalad.cancel_query(report.query_id)
except Exception as e:
LOG.debug("Error cancelling query %s through the web server: %s",
report.query_id, e)
def _check_for_memory_errors(self, report, cursor, caught_exception):
"""To be called after a query failure to check for signs of failed due to a
mem limit or admission control rejection/timeout. The report will be updated
accordingly.
"""
fetch_and_set_profile(cursor, report)
caught_msg = str(caught_exception).lower().strip()
# Distinguish error conditions based on string fragments. The AC rejection and
# out-of-memory conditions actually overlap (since some memory checks happen in
# admission control) so check the out-of-memory conditions first.
if "memory limit exceeded" in caught_msg or \
"repartitioning did not reduce the size of a spilled partition" in caught_msg or \
"failed to get minimum memory reservation" in caught_msg or \
"minimum memory reservation is greater than" in caught_msg or \
"minimum memory reservation needed is greater than" in caught_msg:
report.not_enough_memory = True
return
if "rejected query from pool" in caught_msg:
report.ac_rejected = True
return
if "admission for query exceeded timeout" in caught_msg:
report.ac_timedout = True
return
LOG.debug("Non-mem limit error for query with id %s: %s", report.query_id,
caught_exception, exc_info=True)
report.other_error = caught_exception
def _hash_result(self, cursor, timeout_unix_time, query):
"""Returns a hash that is independent of row order. 'query' is only used for debug
logging purposes (if the result is not as expected a log file will be left for
investigations).
"""
query_id = op_handle_to_query_id(cursor._last_operation.handle if
cursor._last_operation else None)
# A value of 1 indicates that the hash thread should continue to work.
should_continue = Value("i", 1)
def hash_result_impl():
result_log = None
try:
file_name = '_'.join([query.logical_query_id, query_id.replace(":", "_")])
if query.result_hash is None:
file_name += "_initial"
file_name += "_results.txt"
result_log = open(os.path.join(self.results_dir, RESULT_HASHES_DIR, file_name),
"w")
result_log.write(query.sql)
result_log.write("\n")
current_thread().result = 1
while should_continue.value:
LOG.debug(
"Fetching result for query with id %s",
op_handle_to_query_id(
cursor._last_operation.handle if cursor._last_operation else None))
rows = cursor.fetchmany(self.BATCH_SIZE)
if not rows:
LOG.debug(
"No more results for query with id %s",
op_handle_to_query_id(
cursor._last_operation.handle if cursor._last_operation else None))
return
for row in rows:
for idx, val in enumerate(row):
if val is None:
# The hash() of None can change from run to run since it's based on
# a memory address. A chosen value will be used instead.
val = 38463209
elif isinstance(val, float):
# Floats returned by Impala may not be deterministic, the ending
# insignificant digits may differ. Only the first 6 digits will be used
# after rounding.
sval = "%f" % val
dot_idx = sval.find(".")
val = round(val, 6 - dot_idx)
current_thread().result += (idx + 1) * hash(val)
# Modulo the result to keep it "small" otherwise the math ops can be slow
# since python does infinite precision math.
current_thread().result %= maxint
if result_log:
result_log.write(str(val))
result_log.write("\t")
result_log.write(str(current_thread().result))
result_log.write("\n")
except Exception as e:
current_thread().error = e
finally:
if result_log is not None:
result_log.close()
if (
current_thread().error is not None and
current_thread().result == query.result_hash
):
os.remove(result_log.name)
hash_thread = create_and_start_daemon_thread(
hash_result_impl, "Fetch Results %s" % query_id)
hash_thread.join(max(timeout_unix_time - time(), 0))
if hash_thread.is_alive():
should_continue.value = 0
raise QueryTimeout()
if hash_thread.error:
raise hash_thread.error
return hash_thread.result
def load_tpc_queries(workload):
"""Returns a list of TPC queries. 'workload' should either be 'tpch' or 'tpcds'."""
LOG.info("Loading %s queries", workload)
queries = []
for query_name, query_sql in test_file_parser.load_tpc_queries(workload,
include_stress_queries=True).iteritems():
query = Query()
query.name = query_name
query.sql = query_sql
queries.append(query)
return queries
def load_queries_from_test_file(file_path, db_name=None):
LOG.debug("Loading queries from %s", file_path)
test_cases = test_file_parser.parse_query_test_file(file_path)
queries = list()
for test_case in test_cases:
query = Query()
query.sql = test_file_parser.remove_comments(test_case["QUERY"])
query.db_name = db_name
queries.append(query)
return queries
def load_random_queries_and_populate_runtime_info(
query_generator, model_translator, tables, impala, converted_args
):
"""Returns a list of random queries. Each query will also have its runtime info
populated. The runtime info population also serves to validate the query.
"""
LOG.info("Generating random queries")
def generate_candidates():
while True:
query_model = query_generator.generate_statement(tables)
sql = model_translator.write_query(query_model)
query = Query()
query.sql = sql
query.db_name = converted_args.random_db
yield query
return populate_runtime_info_for_random_queries(
impala, generate_candidates(), converted_args)
def populate_runtime_info_for_random_queries(impala, candidate_queries, converted_args):
"""Returns a list of random queries. Each query will also have its runtime info
populated. The runtime info population also serves to validate the query.
"""
start_time = datetime.now()
queries = list()
# TODO(IMPALA-4632): Consider running reset_databases() here if we want to extend DML
# functionality to random stress queries as well.
for query in candidate_queries:
try:
populate_runtime_info(
query, impala, converted_args,
timeout_secs=converted_args.random_query_timeout_seconds)
queries.append(query)
except Exception as e:
# Ignore any non-fatal errors. These could be query timeouts or bad queries (
# query generator bugs).
if print_crash_info_if_exists(impala, start_time):
raise e
LOG.warn(
"Error running query (the test will continue)\n%s\n%s",
e, query.sql, exc_info=True)
if len(queries) == converted_args.random_query_count:
break
return queries
def populate_runtime_info(query, impala, converted_args, timeout_secs=maxint):
"""Runs the given query by itself repeatedly until the minimum memory is determined
with and without spilling. Potentially all fields in the Query class (except
'sql') will be populated by this method. 'required_mem_mb_without_spilling' and
the corresponding runtime field may still be None if the query could not be run
without spilling.
converted_args.samples and converted_args.max_conflicting_samples control the
reliability of the collected information. The problem is that memory spilling or usage
may differ (by a large amount) from run to run due to races during execution. The
parameters provide a way to express "X out of Y runs must have resulted in the same
outcome". Increasing the number of samples and decreasing the tolerance (max conflicts)
increases confidence but also increases the time to collect the data.
"""
LOG.info("Collecting runtime info for query %s: \n%s", query.name, query.sql)
samples = converted_args.samples
max_conflicting_samples = converted_args.max_conflicting_samples
results_dir = converted_args.results_dir
mem_limit_eq_threshold_mb = converted_args.mem_limit_eq_threshold_mb
mem_limit_eq_threshold_percent = converted_args.mem_limit_eq_threshold_percent
runner = QueryRunner()
runner.check_if_mem_was_spilled = True
runner.common_query_options = converted_args.common_query_options
runner.test_admission_control = converted_args.test_admission_control
runner.impalad = impala.impalads[0]
runner.results_dir = results_dir
runner.use_kerberos = converted_args.use_kerberos
runner.connect()
limit_exceeded_mem = 0
non_spill_mem = None
spill_mem = None
report = None
mem_limit = None
old_required_mem_mb_without_spilling = query.required_mem_mb_without_spilling
old_required_mem_mb_with_spilling = query.required_mem_mb_with_spilling
profile_error_prefix = query.logical_query_id + "_binsearch_error"
# TODO: This method is complicated enough now that breaking it out into a class may be
# helpful to understand the structure.
def update_runtime_info():
required_mem = min(mem_limit, impala.min_impalad_mem_mb)
if report.mem_was_spilled:
if (
query.required_mem_mb_with_spilling is None or
required_mem < query.required_mem_mb_with_spilling
):
query.required_mem_mb_with_spilling = required_mem
query.solo_runtime_secs_with_spilling = report.runtime_secs
query.solo_runtime_profile_with_spilling = report.profile
elif (
query.required_mem_mb_without_spilling is None or
required_mem < query.required_mem_mb_without_spilling
):
query.required_mem_mb_without_spilling = required_mem
query.solo_runtime_secs_without_spilling = report.runtime_secs
assert report.runtime_secs is not None, report
query.solo_runtime_profile_without_spilling = report.profile
def get_report(desired_outcome=None):
reports_by_outcome = defaultdict(list)
leading_outcome = None
for remaining_samples in xrange(samples - 1, -1, -1):
report = runner.run_query(query, mem_limit, run_set_up=True,
timeout_secs=timeout_secs, retain_profile=True)
if report.timed_out:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise QueryTimeout(
"query {0} timed out during binary search".format(query.logical_query_id))
if report.other_error:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"query {0} errored during binary search: {1}".format(
query.logical_query_id, str(report.other_error)))
LOG.debug("Spilled: %s" % report.mem_was_spilled)
if not report.has_query_error():
if query.result_hash is None:
query.result_hash = report.result_hash
elif query.result_hash != report.result_hash:
report.write_query_profile(
os.path.join(results_dir, PROFILES_DIR), profile_error_prefix)
raise Exception(
"Result hash mismatch for query %s; expected %s, got %s" %
(query.logical_query_id, query.result_hash, report.result_hash))
if report.not_enough_memory:
outcome = "EXCEEDED"
elif report.mem_was_spilled:
outcome = "SPILLED"
else:
outcome = "NOT_SPILLED"
reports_by_outcome[outcome].append(report)
if not leading_outcome:
leading_outcome = outcome
continue
if len(reports_by_outcome[outcome]) > len(reports_by_outcome[leading_outcome]):
leading_outcome = outcome
if len(reports_by_outcome[leading_outcome]) + max_conflicting_samples == samples:
break
if (
len(reports_by_outcome[leading_outcome]) + remaining_samples <
samples - max_conflicting_samples
):
return
if desired_outcome \
and len(reports_by_outcome[desired_outcome]) + remaining_samples \
< samples - max_conflicting_samples:
return
reports = reports_by_outcome[leading_outcome]
reports.sort(key=lambda r: r.runtime_secs)
return reports[len(reports) / 2]
if not any((old_required_mem_mb_with_spilling, old_required_mem_mb_without_spilling)):
mem_estimate = estimate_query_mem_mb_usage(query, runner)
LOG.info("Finding a starting point for binary search")
mem_limit = min(mem_estimate, impala.min_impalad_mem_mb) or impala.min_impalad_mem_mb
while True:
LOG.info("Next mem_limit: {0}".format(mem_limit))
report = get_report()
if not report or report.not_enough_memory:
if report and report.not_enough_memory:
limit_exceeded_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
LOG.warn(
"Query couldn't be run even when using all available memory\n%s", query.sql)
return
mem_limit = min(2 * mem_limit, impala.min_impalad_mem_mb)
continue
update_runtime_info()
if report.mem_was_spilled:
spill_mem = mem_limit
else:
non_spill_mem = mem_limit
break
LOG.info("Finding minimum memory required to avoid spilling")
lower_bound = max(limit_exceeded_mem, spill_mem)
upper_bound = min(non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_without_spilling:
mem_limit = old_required_mem_mb_without_spilling
old_required_mem_mb_without_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome=("NOT_SPILLED" if spill_mem else None))
if not report:
lower_bound = mem_limit
elif report.not_enough_memory:
lower_bound = mem_limit
limit_exceeded_mem = mem_limit
else:
update_runtime_info()
if report.mem_was_spilled:
lower_bound = mem_limit
spill_mem = min(spill_mem, mem_limit)
else:
upper_bound = mem_limit
non_spill_mem = mem_limit
if mem_limit == impala.min_impalad_mem_mb:
break
if should_break:
if non_spill_mem:
break
lower_bound = upper_bound = impala.min_impalad_mem_mb
# This value may be updated during the search for the absolute minimum.
LOG.info(
"Minimum memory to avoid spilling: %s MB" % query.required_mem_mb_without_spilling)
LOG.info("Finding absolute minimum memory required")
lower_bound = limit_exceeded_mem
upper_bound = min(
spill_mem or maxint, non_spill_mem or maxint, impala.min_impalad_mem_mb)
while True:
if old_required_mem_mb_with_spilling:
mem_limit = old_required_mem_mb_with_spilling
old_required_mem_mb_with_spilling = None
else:
mem_limit = (lower_bound + upper_bound) / 2
LOG.info("Next mem_limit: {0}".format(mem_limit))
should_break = mem_limit / float(upper_bound) > 1 - mem_limit_eq_threshold_percent \
or upper_bound - mem_limit < mem_limit_eq_threshold_mb
report = get_report(desired_outcome="SPILLED")
if not report or report.not_enough_memory:
lower_bound = mem_limit
else:
update_runtime_info()
upper_bound = mem_limit
if should_break:
if not query.required_mem_mb_with_spilling:
if upper_bound - mem_limit < mem_limit_eq_threshold_mb:
# IMPALA-6604: A fair amount of queries go down this path.
LOG.info(
"Unable to find a memory limit with spilling within the threshold of {0} "
"MB. Using the same memory limit for both.".format(
mem_limit_eq_threshold_mb))
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = \
query.solo_runtime_profile_without_spilling
break
LOG.info("Minimum memory is %s MB" % query.required_mem_mb_with_spilling)
if (
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling is not None and
query.required_mem_mb_without_spilling < query.required_mem_mb_with_spilling
):
# Query execution is not deterministic and sometimes a query will run without spilling
# at a lower mem limit than it did with spilling. In that case, just use the lower
# value.
LOG.info(
"A lower memory limit to avoid spilling was found while searching for"
" the absolute minimum memory.")
query.required_mem_mb_with_spilling = query.required_mem_mb_without_spilling
query.solo_runtime_secs_with_spilling = query.solo_runtime_secs_without_spilling
query.solo_runtime_profile_with_spilling = query.solo_runtime_profile_without_spilling
LOG.debug("Query after populating runtime info: %s", query)
def estimate_query_mem_mb_usage(query, query_runner):
"""Runs an explain plan then extracts and returns the estimated memory needed to run
the query.
"""
with query_runner.impalad_conn.cursor() as cursor:
LOG.debug("Using %s database", query.db_name)
if query.db_name:
cursor.execute('USE ' + query.db_name)
if query.query_type == QueryType.COMPUTE_STATS:
# Running "explain" on compute stats is not supported by Impala.
return
LOG.debug("Explaining query\n%s", query.sql)
cursor.execute('EXPLAIN ' + query.sql)
explain_rows = cursor.fetchall()
explain_lines = [row[0] for row in explain_rows]
mem_limit, units = match_memory_estimate(explain_lines)
return parse_mem_to_mb(mem_limit, units)
def save_runtime_info(path, query, impala):
"""Updates the file at 'path' with the given query information."""
store = None
if os.path.exists(path):
with open(path) as file:
store = json.load(file)
_check_store_version(store)
if not store:
store = {
"host_names": list(), "db_names": dict(), "version": RUNTIME_INFO_FILE_VERSION}
with open(path, "w+") as file:
store["host_names"] = sorted([i.host_name for i in impala.impalads])
queries = store["db_names"].get(query.db_name, dict())
query_by_options = queries.get(query.sql, dict())
query_by_options[str(sorted(query.options.items()))] = query
queries[query.sql] = query_by_options
store["db_names"][query.db_name] = queries
class JsonEncoder(json.JSONEncoder):
def default(self, obj):
data = dict(obj.__dict__)
# Queries are stored by sql, so remove the duplicate data. Also don't store
# profiles as JSON values, but instead separately.
for k in ("sql", "solo_runtime_profile_with_spilling",
"solo_runtime_profile_without_spilling"):
if k in data:
del data[k]
return data
json.dump(
store, file, cls=JsonEncoder, sort_keys=True, indent=2, separators=(',', ': '))
def load_runtime_info(path, impala=None):
"""Reads the query runtime information at 'path' and returns a
dict<db_name, dict<sql, Query>>. Returns an empty dict if the hosts in the 'impala'
instance do not match the data in 'path'.
"""
queries_by_db_and_sql = defaultdict(lambda: defaultdict(dict))
if not os.path.exists(path):
return queries_by_db_and_sql
with open(path) as file:
store = json.load(file)
_check_store_version(store)
if (
impala and
store.get("host_names") != sorted([i.host_name for i in impala.impalads])
):
return queries_by_db_and_sql
for db_name, queries_by_sql in store["db_names"].iteritems():
for sql, queries_by_options in queries_by_sql.iteritems():
for options, json_query in queries_by_options.iteritems():
query = Query()
query.__dict__.update(json_query)
query.sql = sql
queries_by_db_and_sql[db_name][sql][options] = query
return queries_by_db_and_sql
def _check_store_version(store):
"""Clears 'store' if the version is too old or raises an error if the version is too
new.
"""
if store["version"] < RUNTIME_INFO_FILE_VERSION:
LOG.warn("Runtime file info version is old and will be ignored")
store.clear()
elif store["version"] > RUNTIME_INFO_FILE_VERSION:
raise Exception(
"Unexpected runtime file info version %s expected %s"
% (store["version"], RUNTIME_INFO_FILE_VERSION))
def print_runtime_info_comparison(old_runtime_info, new_runtime_info):
# TODO: Provide a way to call this from the CLI. This was hard coded to run from main()
# when it was used.
print(",".join([
"Database", "Query",
"Old Mem MB w/Spilling",
"New Mem MB w/Spilling",
"Diff %",
"Old Runtime w/Spilling",
"New Runtime w/Spilling",
"Diff %",
"Old Mem MB wout/Spilling",
"New Mem MB wout/Spilling",
"Diff %",
"Old Runtime wout/Spilling",
"New Runtime wout/Spilling",
"Diff %"]))
for db_name, old_queries in old_runtime_info.iteritems():
new_queries = new_runtime_info.get(db_name)
if not new_queries:
continue
for sql, old_query in old_queries.iteritems():
new_query = new_queries.get(sql)
if not new_query:
continue
sys.stdout.write(old_query["db_name"])
sys.stdout.write(",")
sys.stdout.write(old_query["name"])
sys.stdout.write(",")
for attr in [
"required_mem_mb_with_spilling", "solo_runtime_secs_with_spilling",
"required_mem_mb_without_spilling", "solo_runtime_secs_without_spilling"
]:
old_value = old_query[attr]
sys.stdout.write(str(old_value))
sys.stdout.write(",")
new_value = new_query[attr]
sys.stdout.write(str(new_value))
sys.stdout.write(",")
if old_value and new_value is not None:
sys.stdout.write("%0.2f%%" % (100 * float(new_value - old_value) / old_value))
else:
sys.stdout.write("N/A")
sys.stdout.write(",")
print()
def generate_DML_queries(cursor, dml_mod_values):
"""Generate insert, upsert, update, delete DML statements.
For each table in the database that cursor is connected to, create 4 DML queries
(insert, upsert, update, delete) for each mod value in 'dml_mod_values'. This value
controls which rows will be affected. The generated queries assume that for each table
in the database, there exists a table with a '_original' suffix that is never modified.
This function has some limitations:
1. Only generates DML statements against Kudu tables, and ignores non-Kudu tables.
2. Requires that the type of the first column of the primary key is an integer type.
"""
LOG.info("Generating DML queries")
tables = [cursor.describe_table(t) for t in cursor.list_table_names()
if not t.endswith("_original")]
result = []
for table in tables:
if not table.primary_keys:
# Skip non-Kudu tables. If a table has no primary keys, then it cannot be a Kudu
# table.
LOG.debug("Skipping table '{0}' because it has no primary keys.".format(table.name))
continue
if len(table.primary_keys) > 1:
# TODO(IMPALA-4665): Add support for tables with multiple primary keys.
LOG.debug("Skipping table '{0}' because it has more than "
"1 primary key column.".format(table.name))
continue
primary_key = table.primary_keys[0]
if primary_key.exact_type not in (Int, TinyInt, SmallInt, BigInt):
# We want to be able to apply the modulo operation on the primary key. If the
# the first primary key column happens to not be an integer, we will skip
# generating queries for this table
LOG.debug("Skipping table '{0}' because the first column '{1}' in the "
"primary key is not an integer.".format(table.name, primary_key.name))
continue
for mod_value in dml_mod_values:
# Insert
insert_query = Query()
# Populate runtime info for Insert and Upsert queries before Update and Delete
# queries because tables remain in original state after running the Insert and
# Upsert queries. During the binary search in runtime info population for the
# Insert query, we first delete some rows and then reinsert them, so the table
# remains in the original state. For the delete, the order is reversed, so the table
# is not in the original state after running the the delete (or update) query. This
# is why population_order is smaller for Insert and Upsert queries.
insert_query.population_order = 1
insert_query.query_type = QueryType.INSERT
insert_query.name = "insert_{0}".format(table.name)
insert_query.db_name = cursor.db_name
insert_query.sql = (
"INSERT INTO TABLE {0} SELECT * FROM {0}_original "
"WHERE {1} % {2} = 0").format(table.name, primary_key.name, mod_value)
# Upsert
upsert_query = Query()
upsert_query.population_order = 1
upsert_query.query_type = QueryType.UPSERT
upsert_query.name = "upsert_{0}".format(table.name)
upsert_query.db_name = cursor.db_name
upsert_query.sql = (
"UPSERT INTO TABLE {0} SELECT * "
"FROM {0}_original WHERE {1} % {2} = 0").format(
table.name, primary_key.name, mod_value)
# Update
update_query = Query()
update_query.population_order = 2
update_query.query_type = QueryType.UPDATE
update_query.name = "update_{0}".format(table.name)
update_query.db_name = cursor.db_name
update_list = ', '.join(
'a.{0} = b.{0}'.format(col.name)
for col in table.cols if not col.is_primary_key)
update_query.sql = (
"UPDATE a SET {update_list} FROM {table_name} a JOIN {table_name}_original b "
"ON a.{pk} = b.{pk} + 1 WHERE a.{pk} % {mod_value} = 0").format(
table_name=table.name, pk=primary_key.name, mod_value=mod_value,
update_list=update_list)
# Delete
delete_query = Query()
delete_query.population_order = 2
delete_query.query_type = QueryType.DELETE
delete_query.name = "delete_{0}".format(table.name)
delete_query.db_name = cursor.db_name
delete_query.sql = ("DELETE FROM {0} WHERE {1} % {2} = 0").format(
table.name, primary_key.name, mod_value)
if table.name + "_original" in set(table.name for table in tables):
insert_query.set_up_sql = "DELETE FROM {0} WHERE {1} % {2} = 0".format(
table.name, primary_key.name, mod_value)
upsert_query.set_up_sql = insert_query.set_up_sql
update_query.set_up_sql = (
"UPSERT INTO TABLE {0} SELECT * FROM {0}_original "
"WHERE {1} % {2} = 0").format(table.name, primary_key.name, mod_value)
delete_query.set_up_sql = update_query.set_up_sql
result.append(insert_query)
LOG.debug("Added insert query: {0}".format(insert_query))
result.append(update_query)
LOG.debug("Added update query: {0}".format(update_query))
result.append(upsert_query)
LOG.debug("Added upsert query: {0}".format(upsert_query))
result.append(delete_query)
LOG.debug("Added delete query: {0}".format(delete_query))
assert len(result) > 0, "No DML queries were added."
return result
def generate_compute_stats_queries(cursor):
"""For each table in the database that cursor is connected to, generate several compute
stats queries. Each query will have a different value for the MT_DOP query option.
"""
LOG.info("Generating Compute Stats queries")
tables = [cursor.describe_table(t) for t in cursor.list_table_names()
if not t.endswith("_original")]
result = []
mt_dop_values = [str(2**k) for k in range(5)]
for table in tables:
for mt_dop_value in mt_dop_values:
compute_query = Query()
compute_query.population_order = 1
compute_query.query_type = QueryType.COMPUTE_STATS
compute_query.sql = "COMPUTE STATS {0}".format(table.name)
compute_query.options["MT_DOP"] = mt_dop_value
compute_query.db_name = cursor.db_name
compute_query.name = "compute_stats_{0}_mt_dop_{1}".format(
table.name, compute_query.options["MT_DOP"])
result.append(compute_query)
LOG.debug("Added compute stats query: {0}".format(compute_query))
return result
def prepare_database(cursor):
"""For each table in the database that cursor is connected to, create an identical copy
with '_original' suffix. This function is idempotent.
Note: At this time we only support Kudu tables with a simple hash partitioning based on
the primary key. (SHOW CREATE TABLE would not work otherwise.)
"""
tables = dict((t, cursor.describe_table(t)) for t in cursor.list_table_names())
for table_name in tables:
if not table_name.endswith("_original") and table_name + "_original" not in tables:
LOG.debug("Creating original table: {0}".format(table_name))
cursor.execute("SHOW CREATE TABLE " + table_name)
create_sql = cursor.fetchone()[0]
search_pattern = r"CREATE TABLE (\w*)\.(.*) \("
replacement = "CREATE TABLE {tbl} (".format(tbl=table_name + "_original")
create_original_sql = re.sub(
search_pattern, replacement, create_sql, count=1)
LOG.debug("Create original SQL:\n{0}".format(create_original_sql))
cursor.execute(create_original_sql)
cursor.execute("INSERT INTO {0}_original SELECT * FROM {0}".format(table_name))
cursor.execute("COMPUTE STATS {0}".format(table_name + "_original"))
def reset_databases(cursor):
"""Reset the database to the initial state. This is done by overwriting tables which
don't have the _original suffix with data from tables with the _original suffix.
Note: At this time we only support Kudu tables with a simple hash partitioning based on
the primary key. (SHOW CREATE TABLE would not work otherwise.)
"""
LOG.info("Resetting {0} database".format(cursor.db_name))
tables = dict((t, cursor.describe_table(t)) for t in cursor.list_table_names())
for table_name in tables:
if not table_name.endswith("_original"):
if table_name + "_original" in tables:
cursor.execute("SHOW CREATE TABLE " + table_name)
create_table_command = cursor.fetchone()[0]
cursor.execute("DROP TABLE {0}".format(table_name))
cursor.execute(create_table_command)
cursor.execute("INSERT INTO {0} SELECT * FROM {0}_original".format(table_name))
cursor.execute("COMPUTE STATS {0}".format(table_name))
else:
LOG.debug("Table '{0}' cannot be reset because '{0}_original' does not"
" exist in '{1}' database.".format(table_name, cursor.db_name))
def populate_all_queries(
queries, impala, converted_args, queries_with_runtime_info_by_db_sql_and_options
):
"""Populate runtime info for all queries, ordered by the population_order property."""
result = []
queries_by_order = {}
for query in queries:
if query.population_order not in queries_by_order:
queries_by_order[query.population_order] = []
queries_by_order[query.population_order].append(query)
for population_order in sorted(queries_by_order.keys()):
for query in queries_by_order[population_order]:
if (
query.sql in
queries_with_runtime_info_by_db_sql_and_options[query.db_name] and
str(sorted(query.options.items())) in
queries_with_runtime_info_by_db_sql_and_options[query.db_name][query.sql]
):
LOG.debug("Reusing previous runtime data for query: " + query.sql)
result.append(queries_with_runtime_info_by_db_sql_and_options[
query.db_name][query.sql][str(sorted(query.options.items()))])
else:
populate_runtime_info(query, impala, converted_args)
save_runtime_info(converted_args.runtime_info_path, query, impala)
query.write_runtime_info_profiles(
os.path.join(converted_args.results_dir, PROFILES_DIR))
result.append(query)
return result
def fetch_and_set_profile(cursor, report):
"""Set the report's query profile using the given cursor.
Producing a query profile can be somewhat expensive. A v-tune profile of
impalad showed 10% of cpu time spent generating query profiles.
"""
if not report.profile and cursor._last_operation:
try:
report.profile = cursor.get_profile()
except Exception as e:
LOG.debug("Error getting profile for query with id %s: %s", report.query_id, e)
def print_version(cluster):
"""
Print the cluster impalad version info to the console sorted by hostname.
"""
def _sorter(i1, i2):
return cmp(i1.host_name, i2.host_name)
version_info = cluster.impala.get_version_info()
print("Cluster Impalad Version Info:")
for impalad in sorted(version_info.keys(), cmp=_sorter):
print("{0}: {1}".format(impalad.host_name, version_info[impalad]))
def main():
parser = ArgumentParser(
epilog=dedent("""
Before running this script a CM cluster must be setup and any needed data
such as TPC-H/DS must be loaded. The first time this script is run it will
find memory limits and runtimes for each query and save the data to disk (since
collecting the data is slow) at --runtime-info-path then run the stress test.
Later runs will reuse the saved memory limits and timings. If the cluster changes
significantly the memory limits should be re-measured (deleting the file at
--runtime-info-path will cause re-measuring to happen).""").strip(),
formatter_class=ArgumentDefaultsHelpFormatter)
cli_options.add_logging_options(parser)
cli_options.add_cluster_options(parser)
cli_options.add_kerberos_options(parser)
cli_options.add_ssl_options(parser)
parser.add_argument(
"--runtime-info-path",
default=os.path.join(gettempdir(), "{cm_host}_query_runtime_info.json"),
help="The path to store query runtime info at. '{cm_host}' will be replaced with"
" the actual host name from --cm-host.")
parser.add_argument(
"--samples", default=1, type=int,
help='Used when collecting "runtime info" - the number of samples to collect when'
' testing a particular mem limit value.')
parser.add_argument(
"--max-conflicting-samples", default=0, type=int,
help='Used when collecting "runtime info" - the number of samples outcomes that'
' can disagree when deciding to accept a particular mem limit. Ex, when trying to'
' determine the mem limit that avoids spilling with samples=5 and'
' max-conflicting-samples=1, then 4/5 queries must not spill at a particular mem'
' limit.')
parser.add_argument(
"--mem-limit-eq-threshold-percent", default=0.025,
type=float, help='Used when collecting "runtime info". If the difference between'
' two memory limits is less than this percentage, we consider the two limits to'
' be equal and stop the memory binary search.')
parser.add_argument(
"--mem-limit-eq-threshold-mb", default=50,
type=int, help='Used when collecting "runtime info". If the difference between'
' two memory limits is less than this value in MB, we consider the two limits to'
' be equal and stop the memory binary search.')
parser.add_argument(
"--results-dir", default=gettempdir(),
help="Directory under which the profiles and result_hashes directories are created."
" Query hash results are written in the result_hashes directory. If query results"
" do not match, a log file will be left in that dir. The log file is also created"
" during the first run when runtime info is collected for each query. Unexpected"
" query timeouts, exceeded memory, failures or result mismatches will result in a"
" profile written in the profiles directory.")
parser.add_argument(
"--no-status", action="store_true", help="Do not print the status table.")
parser.add_argument(
"--cancel-current-queries", action="store_true",
help="Cancel any queries running on the cluster before beginning.")
parser.add_argument(
"--filter-query-mem-ratio", type=float, default=0.333,
help="Queries that require this ratio of total available memory will be filtered.")
parser.add_argument(
"--startup-queries-per-second", type=float, default=2.0,
help="Adjust this depending on the cluster size and workload. This determines"
" the minimum amount of time between successive query submissions when"
" the workload is initially ramping up.")
parser.add_argument(
"--fail-upon-successive-errors", type=int, default=1,
help="Continue running until N query errors are encountered in a row. Set"
" this to a high number to only stop when something catastrophic happens. A"
" value of 1 stops upon the first error.")
parser.add_argument(
"--mem-limit-padding-pct", type=int, default=25,
help="Pad query mem limits found by solo execution with this percentage when"
" running concurrently. After padding queries will not be expected to fail"
" due to mem limit exceeded.")
parser.add_argument(
"--mem-limit-padding-abs", type=int, default=0,
help="Pad query mem limits found by solo execution with this value (in megabytes)"
" running concurrently. After padding queries will not be expected to fail"
" due to mem limit exceeded. This is useful if we want to be able to add the same"
" amount of memory to smaller queries as to the big ones.")
parser.add_argument(
"--timeout-multiplier", type=float, default=1.0,
help="Deprecated - has no effect.")
parser.add_argument("--max-queries", type=int, default=100)
parser.add_argument(
"--reset-databases-before-binary-search", action="store_true",
help="If True, databases will be reset to their original state before the binary"
" search.")
parser.add_argument(
"--reset-databases-after-binary-search", action="store_true",
help="If True, databases will be reset to their original state after the binary"
" search and before starting the stress test. The primary intent of this option is"
" to undo the changes made to the databases by the binary search. This option can"
" also be used to reset the databases before running other (non stress) tests on"
" the same data.")
parser.add_argument(
"--generate-dml-queries", action="store_true",
help="If True, DML queries will be generated for Kudu databases.")
parser.add_argument(
"--dml-mod-values", nargs="+", type=int, default=[11],
help="List of mod values to use for the DML queries. There will be 4 DML (delete,"
" insert, update, upsert) queries generated per mod value per table. The smaller"
" the value, the more rows the DML query would touch (the query should touch about"
" 1/mod_value rows.)")
parser.add_argument(
"--generate-compute-stats-queries", action="store_true",
help="If True, Compute Stats queries will be generated.")
parser.add_argument(
"--select-probability", type=float, default=0.5,
help="Probability of choosing a select query (as opposed to a DML query).")
parser.add_argument("--tpcds-db", help="If provided, TPC-DS queries will be used.")
parser.add_argument("--tpch-db", help="If provided, TPC-H queries will be used.")
parser.add_argument(
"--tpch-nested-db", help="If provided, nested TPC-H queries will be used.")
parser.add_argument(
"--tpch-kudu-db", help="If provided, TPC-H queries for Kudu will be used.")
parser.add_argument(
"--tpcds-kudu-db", help="If provided, TPC-DS queries for Kudu will be used.")
parser.add_argument(
"--random-db", help="If provided, random queries will be used.")
parser.add_argument(
"--random-query-count", type=int, default=50,
help="The number of random queries to generate.")
parser.add_argument(
"--random-query-timeout-seconds", type=int, default=(5 * 60),
help="A random query that runs longer than this time when running alone will"
" be discarded.")
parser.add_argument(
"--query-file-path", help="Use queries in the given file. The file"
" format must be the same as standard test case format. Queries are expected to "
" be randomly generated and will be validated before running in stress mode.")
parser.add_argument(
"--query-file-db",
help="The name of the database to use with the queries from --query-file-path.")
parser.add_argument("--mem-overcommit-pct", type=float, default=0)
parser.add_argument(
"--mem-spill-probability", type=float, default=0.33, dest="spill_probability",
help="The probability that a mem limit will be set low enough to induce spilling.")
parser.add_argument(
"--mem-leak-check-interval-mins", type=int, default=None,
help="Periodically stop query execution and check that memory levels have reset.")
parser.add_argument(
"--cancel-probability", type=float, default=0.1,
help="The probability a query will be cancelled.")
parser.add_argument("--nlj-filter", help=SUPPRESS) # Made a no-op by IMPALA-7440.
parser.add_argument(
"--common-query-options", default=None, nargs="*",
help="Space-delimited string of query options and values. This is a freeform "
"string with little regard to whether you've spelled the query options correctly "
"or set valid values. Example: --common-query-options "
"DISABLE_CODEGEN=true RUNTIME_FILTER_MODE=1")
parser.add_argument(
"--test-admission-control", type=bool, default=False,
help="If true, assume that the Impala cluster under test is using memory-based "
"admission control and should not admit queries that cannot be run to completion. "
"In this mode the stress runner does not set mem_limit on queries and "
"out-of-memory errors are not expected in this mode so will fail the stress test "
"if encountered. The stress runner still tracks the 'admitted' memory so that "
"it can try to submit more queries than there is available memory for.")
parser.add_argument(
"--max-coordinators", default=0, type=int, metavar="max coordinators",
help="If > 0, submit queries to at most this number of coordinators."
"This is useful in conjunction with --test-admission-control to test behaviour "
"with a smaller number of admission controller instances.")
args = parser.parse_args()
converted_args = StressArgConverter(args)
cli_options.configure_logging(
args.log_level, debug_log_file=args.debug_log_file, log_thread_name=True,
log_process_id=True)
LOG.debug("CLI args: %s" % (args, ))
if (
not args.tpcds_db and not args.tpch_db and not args.random_db and not
args.tpch_nested_db and not args.tpch_kudu_db and not
args.tpcds_kudu_db and not args.query_file_path
):
raise Exception(
"At least one of --tpcds-db, --tpch-db, --tpch-kudu-db,"
"--tpcds-kudu-db, --tpch-nested-db, --random-db, --query-file-path is required")
result_hashes_path = os.path.join(args.results_dir, RESULT_HASHES_DIR)
if not os.path.isdir(result_hashes_path):
os.makedirs(result_hashes_path)
results_dir_path = os.path.join(args.results_dir, PROFILES_DIR)
if not os.path.isdir(results_dir_path):
os.makedirs(results_dir_path)
cluster = cli_options.create_cluster(args)
impala = cluster.impala
if impala.find_stopped_impalads():
impala.restart()
print_version(cluster)
impala.find_and_set_path_to_running_impalad_binary()
if args.cancel_current_queries and impala.queries_are_running():
impala.cancel_queries()
sleep(10)
if impala.queries_are_running():
raise Exception("Queries are currently running on the cluster")
impala.min_impalad_mem_mb = min(impala.find_impalad_mem_mb_limit())
queries_with_runtime_info_by_db_sql_and_options = load_runtime_info(
converted_args.runtime_info_path, impala)
# Start loading the test queries.
queries = list()
# If random queries were requested, those will be handled later. Unlike random queries,
# the TPC queries are expected to always complete successfully.
if args.tpcds_db:
tpcds_queries = load_tpc_queries("tpcds")
assert len(tpcds_queries) == EXPECTED_TPCDS_QUERIES_COUNT
for query in tpcds_queries:
query.db_name = args.tpcds_db
queries.extend(tpcds_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpcds_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_db:
tpch_queries = load_tpc_queries("tpch")
assert len(tpch_queries) == EXPECTED_TPCH_STRESS_QUERIES_COUNT
for query in tpch_queries:
query.db_name = args.tpch_db
queries.extend(tpch_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_nested_db:
tpch_nested_queries = load_tpc_queries("tpch_nested")
assert len(tpch_nested_queries) == EXPECTED_TPCH_NESTED_QUERIES_COUNT
for query in tpch_nested_queries:
query.db_name = args.tpch_nested_db
queries.extend(tpch_nested_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_nested_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.tpch_kudu_db:
tpch_kudu_queries = load_tpc_queries("tpch")
assert len(tpch_kudu_queries) == EXPECTED_TPCH_STRESS_QUERIES_COUNT
for query in tpch_kudu_queries:
query.db_name = args.tpch_kudu_db
queries.extend(tpch_kudu_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpch_kudu_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.generate_dml_queries:
with impala.cursor(db_name=args.tpch_kudu_db) as cursor:
prepare_database(cursor)
queries.extend(generate_DML_queries(cursor, args.dml_mod_values))
if args.tpcds_kudu_db:
tpcds_kudu_queries = load_tpc_queries("tpcds")
assert len(tpcds_kudu_queries) == EXPECTED_TPCDS_QUERIES_COUNT
for query in tpcds_kudu_queries:
query.db_name = args.tpcds_kudu_db
queries.extend(tpcds_kudu_queries)
if args.generate_compute_stats_queries:
with impala.cursor(db_name=args.tpcds_kudu_db) as cursor:
queries.extend(generate_compute_stats_queries(cursor))
if args.generate_dml_queries:
with impala.cursor(db_name=args.tpcds_kudu_db) as cursor:
prepare_database(cursor)
queries.extend(generate_DML_queries(cursor, args.dml_mod_values))
if args.reset_databases_before_binary_search:
for database in set(query.db_name for query in queries):
with impala.cursor(db_name=database) as cursor:
reset_databases(cursor)
queries = populate_all_queries(
queries, impala, converted_args, queries_with_runtime_info_by_db_sql_and_options)
# A particular random query may either fail (due to a generator or Impala bug) or
# take a really long time to complete. So the queries needs to be validated. Since the
# runtime info also needs to be collected, that will serve as validation.
if args.random_db:
query_generator = QueryGenerator(DefaultProfile())
with impala.cursor(db_name=args.random_db) as cursor:
tables = [cursor.describe_table(t) for t in cursor.list_table_names()]
queries.extend(load_random_queries_and_populate_runtime_info(
query_generator, SqlWriter.create(), tables, impala, converted_args))
if args.query_file_path:
file_queries = load_queries_from_test_file(
args.query_file_path, db_name=args.query_file_db)
shuffle(file_queries)
queries.extend(populate_runtime_info_for_random_queries(
impala, file_queries, converted_args))
# Apply tweaks to the query's runtime info as requested by CLI options.
for idx in xrange(len(queries) - 1, -1, -1):
query = queries[idx]
if query.required_mem_mb_with_spilling:
query.required_mem_mb_with_spilling += int(
query.required_mem_mb_with_spilling * args.mem_limit_padding_pct / 100.0) + \
args.mem_limit_padding_abs
if query.required_mem_mb_without_spilling:
query.required_mem_mb_without_spilling += int(
query.required_mem_mb_without_spilling * args.mem_limit_padding_pct / 100.0) + \
args.mem_limit_padding_abs
# Remove any queries that would use "too many" resources. This way a larger number
# of queries will run concurrently.
if query.required_mem_mb_without_spilling is not None and \
query.required_mem_mb_without_spilling / float(impala.min_impalad_mem_mb) \
> args.filter_query_mem_ratio:
LOG.debug(
"Filtering non-spilling query that exceeds "
"--filter-query-mem-ratio: " + query.sql)
query.required_mem_mb_without_spilling = None
if query.required_mem_mb_with_spilling is None \
or query.required_mem_mb_with_spilling / float(impala.min_impalad_mem_mb) \
> args.filter_query_mem_ratio:
LOG.debug("Filtering query that exceeds --filter-query-mem-ratio: " + query.sql)
del queries[idx]
if len(queries) == 0:
raise Exception("All queries were filtered")
print("Using %s queries" % len(queries))
# After the binary search phase finishes, it may be a good idea to reset the database
# again to start the stress test from a clean state.
if args.reset_databases_after_binary_search:
for database in set(query.db_name for query in queries):
with impala.cursor(db_name=database) as cursor:
reset_databases(cursor)
LOG.info("Number of queries in the list: {0}".format(len(queries)))
stress_runner = StressRunner()
stress_runner.results_dir = args.results_dir
stress_runner.startup_queries_per_sec = args.startup_queries_per_second
stress_runner.num_successive_errors_needed_to_abort = args.fail_upon_successive_errors
stress_runner.use_kerberos = args.use_kerberos
stress_runner.cancel_probability = args.cancel_probability
stress_runner.spill_probability = args.spill_probability
stress_runner.leak_check_interval_mins = args.mem_leak_check_interval_mins
stress_runner.common_query_options = converted_args.common_query_options
stress_runner.test_admission_control = converted_args.test_admission_control
stress_runner.max_coordinators = converted_args.max_coordinators
stress_runner.run_queries(
queries, impala, args.max_queries, args.mem_overcommit_pct,
should_print_status=not args.no_status,
verify_results=not args.generate_dml_queries,
select_probability=args.select_probability)
if __name__ == "__main__":
main()
|
py | b400659e1e132026cffbca2fd0ec15a09061e887 | import discord
from discord.ext import commands
def is_mod():
async def mod_predicate(ctx):
return ctx.author.guild_permissions.manage_guild or ctx.bot.config.roles.mod in (role.id for role in ctx.author.roles) or ctx.bot.config.roles.admin in (role.id for role in ctx.author.roles)
return commands.check(mod_predicate)
def is_admin():
async def admin_predicate(ctx):
return ctx.author.guild_permissions.administrator or ctx.bot.config.roles.admin in (role.id for role in ctx.author.roles)
return commands.check(admin_predicate)
def guild_only():
async def guild_predicate(ctx):
if ctx.guild is None:
return False
return ctx.guild.id == ctx.bot.config.server
return commands.check(guild_predicate) |
py | b40065f626fc9940f121030531a509d8130731cf | extension = """
from gino.ext.starlette import Gino
from core.factories import settings
from ssl import create_default_context
if not settings.DEBUG:
ssl_object =create_default_context(cafile=settings.SSL_CERT_FILE)
db: Gino = Gino(dsn=settings.DATABASE_URL,ssl=ssl_object,pool_min_size=3,pool_max_size=20,retry_limit=1,retry_interval=1)
else:
db: Gino = Gino(dsn=settings.DATABASE_URL)
"""
|
py | b40066d4868562919656e3447f997471a00243be | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetApplicationTypeVersionResult',
'AwaitableGetApplicationTypeVersionResult',
'get_application_type_version',
]
@pulumi.output_type
class GetApplicationTypeVersionResult:
"""
An application type version resource for the specified application type name resource.
"""
def __init__(__self__, app_package_url=None, default_parameter_list=None, etag=None, location=None, name=None, provisioning_state=None, tags=None, type=None):
if app_package_url and not isinstance(app_package_url, str):
raise TypeError("Expected argument 'app_package_url' to be a str")
pulumi.set(__self__, "app_package_url", app_package_url)
if default_parameter_list and not isinstance(default_parameter_list, dict):
raise TypeError("Expected argument 'default_parameter_list' to be a dict")
pulumi.set(__self__, "default_parameter_list", default_parameter_list)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="appPackageUrl")
def app_package_url(self) -> str:
"""
The URL to the application package
"""
return pulumi.get(self, "app_package_url")
@property
@pulumi.getter(name="defaultParameterList")
def default_parameter_list(self) -> Mapping[str, str]:
"""
List of application type parameters that can be overridden when creating or updating the application.
"""
return pulumi.get(self, "default_parameter_list")
@property
@pulumi.getter
def etag(self) -> str:
"""
Azure resource etag.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
It will be deprecated in New API, resource location depends on the parent resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Azure resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current deployment or provisioning state, which only appears in the response
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Azure resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Azure resource type.
"""
return pulumi.get(self, "type")
class AwaitableGetApplicationTypeVersionResult(GetApplicationTypeVersionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetApplicationTypeVersionResult(
app_package_url=self.app_package_url,
default_parameter_list=self.default_parameter_list,
etag=self.etag,
location=self.location,
name=self.name,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type)
def get_application_type_version(application_type_name: Optional[str] = None,
cluster_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
version: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetApplicationTypeVersionResult:
"""
Use this data source to access information about an existing resource.
:param str application_type_name: The name of the application type name resource.
:param str cluster_name: The name of the cluster resource.
:param str resource_group_name: The name of the resource group.
:param str version: The application type version.
"""
__args__ = dict()
__args__['applicationTypeName'] = application_type_name
__args__['clusterName'] = cluster_name
__args__['resourceGroupName'] = resource_group_name
__args__['version'] = version
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:servicefabric/v20190301:getApplicationTypeVersion', __args__, opts=opts, typ=GetApplicationTypeVersionResult).value
return AwaitableGetApplicationTypeVersionResult(
app_package_url=__ret__.app_package_url,
default_parameter_list=__ret__.default_parameter_list,
etag=__ret__.etag,
location=__ret__.location,
name=__ret__.name,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type)
|
py | b400671754183f8abbf5e628aa9f7b5d751386bc | #!/usr/bin/env python
"""
GeoClaw util Module `$CLAW/geoclaw/src/python/geoclaw/tide.py`
Module provides provides tide prediction functions.
:Functions:
- retrieve_constituents - retrieves harmonic constituents from NOAA gauge station
- retrieve_water_levels - retrieves observed water levels from NOAA's API
- retrieve_predicted_tide - retrieves predicted tide from NOAA's API
- datum_value - retrieves datum value for desired datum reference
- predict_tide - predicts tide with Pytides
- datetimes - prepares a collection of datetimes from beginning to end dates
- detide - detides observed water levels
- surge - predicts surge at NOAA gauge station
"""
from __future__ import absolute_import
from __future__ import print_function
from collections.abc import Iterable
from collections import OrderedDict, namedtuple
from scipy.optimize import leastsq, fsolve
from itertools import takewhile, count
from datetime import datetime, timedelta
from functools import reduce
from six.moves.urllib.parse import urlencode
from six.moves.urllib.request import urlopen
try:
from itertools import izip, ifilter
except ImportError: #Python3
izip = zip
ifilter = filter
try:
import requests
import json
import string
import lxml.html as lh
import pandas as pd
import operator as op
import numpy as np
import io
import os
import os.path
except ImportError as e:
print(e)
#%env CLAW=/Users/jonathansocoy/clawpack
d2r, r2d = np.pi/180.0, 180.0/np.pi
NOAA_API_URL = 'https://tidesandcurrents.noaa.gov/api/datagetter'
NOAA_home = 'https://tidesandcurrents.noaa.gov/harcon.html'
######################## Tide Prediction Functions ########################
def retrieve_constituents(station, time_zone='GMT', units='meters', cache_dir=None,
verbose=True):
"""Fetch constituent data for given NOAA tide station.
By default, retrieved data is cached in the geoclaw scratch directory
located at:
$CLAW/geoclaw/scratch
:Required Arguments:
- station (string): 7 character station ID
:Optional Arguments:
- time_zone (string): see NOAA API documentation for possible values
- units (string): see NOAA API documentation for possible values
- cache_dir (string): alternative directory to use for caching data
- verbose (bool): whether to output informational messages
:Returns:
- constituent_dict (dictionary): dictionary of tidal constituents for NOAA gauge station
"""
def units_num(units):
if (units == 'meters'):
return 0
elif (time_zone == 'feet'):
return 1
def time_zone_num(time_zone):
if (time_zone == 'GMT'):
return 0
elif (time_zone == 'Local'):
return 1
def get_noaa_params(station, time_zone, units):
noaa_params = {
'unit': units_num(units),
'timezone': time_zone_num(time_zone),
'id': station
}
return noaa_params
# use geoclaw scratch directory for caching by default
if cache_dir is None:
if 'CLAW' not in os.environ:
raise ValueError('CLAW environment variable not set')
claw_dir = os.environ['CLAW']
cache_dir = os.path.join(claw_dir, 'geoclaw', 'scratch') #### cache_dir
def get_cache_path(station, time_zone, units):
filename = '{}_{}_constituents'.format(time_zone, units)
abs_cache_dir = os.path.abspath(cache_dir)
return os.path.join(abs_cache_dir, 'constituents', station, filename)
def save_to_cache(cache_path, data):
# make parent directories if they do not exist
parent_dir = os.path.dirname(cache_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
component_array = pd.DataFrame(data)
component_array.to_csv(cache_path, index=False)
def parse(cache_path):
# read data into structured array, skipping header row if present
data = pd.read_csv(cache_path)
component_array = pd.DataFrame(data)
component_dict = component_array.to_dict(orient='list')
return component_dict
#Requests URL
def fetch_data(station, time_zone, units):
noaa_params = get_noaa_params(station, time_zone, units)
cache_path = get_cache_path(station, time_zone, units)
# use cached data if available
if os.path.exists(cache_path):
if verbose:
print('Using cached constituent data for station {}'.format(station))
return parse(cache_path)
# otherwise, retrieve data from NOAA and cache it
if verbose:
print('Fetching constituent data from NOAA for station {}'.format(station))
#Forms URL
url = '{}?{}'.format(NOAA_home, urlencode(noaa_params))
page = requests.get(url)
doc = lh.fromstring(page.content)
tr_elements = doc.xpath('//tr')
col = [((t.text_content(),[])) for t in tr_elements[0]]
for j in range(1, len(tr_elements)):
T, i = tr_elements[j], 0
for t in T.iterchildren():
col[i][1].append(t.text_content())
i+=1
constituent_dict = {title:column for (title,column) in col}
# if there were no errors, then cache response
save_to_cache(cache_path, constituent_dict)
return constituent_dict
try:
constituents = fetch_data(station, time_zone, units)
except:
print('*** Fetching NOAA Constituents failed, returning None')
constituents = None
return constituents
def fetch_noaa_tide_data(station, begin_date, end_date, datum='MTL', time_zone='GMT', units='metric', cache_dir=None, verbose=True):
"""Fetch water levels and tide predictions at given NOAA tide station.
The data is returned in 6 minute intervals between the specified begin and
end dates/times. A complete specification of the NOAA CO-OPS API for Data
Retrieval used to fetch the data can be found at:
https://tidesandcurrents.noaa.gov/api/
By default, retrieved data is cached in the geoclaw scratch directory
located at:
$CLAW/geoclaw/scratch
:Required Arguments:
- station (string): 7 character station ID
- begin_date (datetime): start of date/time range of retrieval
- end_date (datetime): end of date/time range of retrieval
:Optional Arguments:
- datum (string): see NOAA API documentation for possible values
- time_zone (string): see NOAA API documentation for possible values
- units (string): see NOAA API documentation for possible values
- cache_dir (string): alternative directory to use for caching data
- verbose (bool): whether to output informational messages
:Returns:
- date_time (numpy.ndarray): times corresponding to retrieved data
- water_level (numpy.ndarray): preliminary or verified water levels
- prediction (numpy.ndarray): tide predictions
"""
# use geoclaw scratch directory for caching by default
if cache_dir is None:
if 'CLAW' not in os.environ:
raise ValueError('CLAW environment variable not set')
claw_dir = os.environ['CLAW']
cache_dir = os.path.join(claw_dir, 'geoclaw', 'scratch')
def fetch(product, expected_header, col_idx, col_types):
noaa_params = get_noaa_params(product)
cache_path = get_cache_path(product)
# use cached data if available
if os.path.exists(cache_path):
if verbose:
print('Using cached {} data for station {}'.format(
product, station))
return parse(cache_path, col_idx, col_types, header=True)
# otherwise, retrieve data from NOAA and cache it
if verbose:
print('Fetching {} data from NOAA for station {}'.format(
product, station))
full_url = '{}?{}'.format(NOAA_API_URL, urlencode(noaa_params))
with urlopen(full_url) as response:
text = response.read().decode('utf-8')
with io.StringIO(text) as data:
# ensure that received header is correct
header = data.readline().strip()
if header != expected_header or 'Error' in text:
# if not, response contains error message
raise ValueError(text)
# if there were no errors, then cache response
save_to_cache(cache_path, text)
return parse(data, col_idx, col_types, header=False)
def get_noaa_params(product):
noaa_date_fmt = '%Y%m%d %H:%M'
noaa_params = {
'product': product,
'application': 'Clawpack',
'format': 'csv',
'station': station,
'begin_date': begin_date.strftime(noaa_date_fmt),
'end_date': end_date.strftime(noaa_date_fmt),
'time_zone': time_zone,
'datum': datum,
'units': units
}
return noaa_params
def get_cache_path(product):
cache_date_fmt = '%Y%m%d%H%M'
dates = '{}_{}'.format(begin_date.strftime(cache_date_fmt),
end_date.strftime(cache_date_fmt))
filename = '{}_{}_{}'.format(time_zone, datum, units)
abs_cache_dir = os.path.abspath(cache_dir)
return os.path.join(abs_cache_dir, product, station, dates, filename)
def save_to_cache(cache_path, data):
# make parent directories if they do not exist
parent_dir = os.path.dirname(cache_path)
if not os.path.exists(parent_dir):
os.makedirs(parent_dir)
# write data to cache file
with open(cache_path, 'w') as cache_file:
cache_file.write(data)
def parse(data, col_idx, col_types, header):
# read data into structured array, skipping header row if present
a = np.genfromtxt(data, usecols=col_idx, dtype=col_types,
skip_header=int(header), delimiter=',',
missing_values='')
# return tuple of columns
return tuple(a[col] for col in a.dtype.names)
# only need first two columns of data; first column contains date/time,
# and second column contains corresponding value
col_idx = (0, 1)
col_types = 'datetime64[m], float'
# fetch water levels and tide predictions
try:
date_time, water_level = fetch(
'water_level', 'Date Time, Water Level, Sigma, O or I (for verified), F, R, L, Quality',
col_idx, col_types)
except:
print('*** Fetching water_level failed, returning None')
date_time = None
water_level = None
try:
date_time2, prediction = fetch('predictions', 'Date Time, Prediction',
col_idx, col_types)
if date_time is None:
date_time = date_time2
except:
print('*** Fetching prediction failed, returning None')
date_time2 = None
prediction = None
# ensure that date/time ranges are the same
if (date_time is not None) and (date_time2 is not None):
if not np.array_equal(date_time, date_time2):
raise ValueError('Received data for different times')
return date_time, water_level, prediction
def datum_value(station, datum, time_zone='GMT', units='metric'):
"""Fetch datum value for given NOAA tide station.
:Required Arguments:
- station (string): 7 character station ID
- datum (string): MSL, MTL
:Optional Arguments:
- time_zone (string): see NOAA API documentation for possible values
- units (string): see NOAA API documentation for possible values
:Returns:
- datum_value (float): value for requested datum reference
"""
def get_noaa_params(station, time_zone, units):
noaa_params = {
'product': 'datums',
'units': units,
'time_zone': time_zone,
'station': station,
'application': 'Clawpack',
'format':'json'
}
return noaa_params
#Scrapes MTL/MSL Datum Value
def get_datum(station, time_zone, units):
noaa_params = get_noaa_params(station, time_zone, units)
url = '{}?{}'.format(NOAA_API_URL, urlencode(noaa_params))
page_data = requests.get(url)
data = page_data.json()['datums']
datum_value = [d['v'] for d in data if d['n'] == datum]
return datum_value
try:
datum_value = float(get_datum(station, time_zone, units)[0])
except:
print('*** Fetching datum value failed, returning None')
datum_value = None
return datum_value
def predict_tide(station, begin_date, end_date, datum='MTL', time_zone='GMT', units='meters'):
"""Fetch datum value for given NOAA tide station.
:Required Arguments:
- station (string): 7 character station ID
- begin_date (datetime): start of date/time range of prediction
- end_date (datetime): end of date/time range of prediction
:Optional Arguments:
- datum (string): MTL for tide prediction
- time_zone (string): see NOAA API documentation for possible values
- units (string): see NOAA API documentation for possible values
:Returns:
- heights (float): tide heights
"""
#These are the NOAA constituents, in the order presented on NOAA's website.
constituents = [c for c in noaa if c != _Z0]
noaa_values = retrieve_constituents(station, time_zone, units)
noaa_amplitudes = [float(amplitude) for amplitude in noaa_values['Amplitude']]
noaa_phases = [float(phases) for phases in noaa_values['Phase']]
#We can add a constant offset - set to MTL
# MTL = datum_value(args[0], 'MTL')
desired_datum = datum_value(station, datum)
MSL = datum_value(station, 'MSL')
offset = MSL - desired_datum
constituents.append(_Z0)
noaa_phases.append(0)
noaa_amplitudes.append(offset)
#Build the model
assert(len(constituents) == len(noaa_phases) == len(noaa_amplitudes))
model = np.zeros(len(constituents), dtype = Tide.dtype)
model['constituent'] = constituents
model['amplitude'] = noaa_amplitudes
model['phase'] = noaa_phases
tide = Tide(model = model, radians = False)
#Time Calculations
delta = (end_date-begin_date)/timedelta(hours=1) + .1
times = Tide._times(begin_date, np.arange(0, delta, .1))
#Height Calculations
heights_arrays = [tide.at([times[i]]) for i in range(len(times))]
heights = [val for sublist in heights_arrays for val in sublist]
return heights
def datetimes(begin_date, end_date):
#Time Calculations
delta = (end_date-begin_date)/timedelta(hours=1) + .1
times = Tide._times(begin_date, np.arange(0, delta, .1))
return times
def detide(NOAA_observed_water_level, predicted_tide):
# NOAA observed water level - predicted tide
return [(NOAA_observed_water_level[i] - predicted_tide[i]) for i in range(len(NOAA_observed_water_level))]
#Surge Implementation
def surge(station, beg_date, end_date, landfall_date):
"""Fetch datum value for given NOAA tide station.
:Required Arguments:
- station (string): 7 character station ID
- begin_date (datetime): start of date/time range of prediction
- end_date (datetime): end of date/time range of prediction
- landfall_date (datetime): approximate time of landfall for reference
:Optional Arguments:
- datum (string): MTL for tide prediction and retrieval
- time_zone (string): see NOAA API documentation for possible values
:Returns:
- times (float): times with landfall event as reference
- surge (float): surge heights
"""
predicted_tide = predict_tide(station, beg_date, end_date)
NOAA_times, NOAA_observed_water_level, NOAA_predicted_tide = fetch_noaa_tide_data(station, beg_date, end_date)
#detides NOAA observed water levels with predicted tide
surge = detide(NOAA_observed_water_level, predicted_tide)
#modifies NOAA times to datetimes
times = [((pd.to_datetime(time).to_pydatetime())-landfall_date)/timedelta(days=1) for time in NOAA_times]
return times, surge
######################## Nodal Corrections ########################
def f_unity(a):
return 1.0
#Schureman equations 73, 65
def f_Mm(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
mean = (2/3.0 - np.sin(omega)**2)*(1 - 3/2.0 * np.sin(i)**2)
return (2/3.0 - np.sin(I)**2) / mean
#Schureman equations 74, 66
def f_Mf(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
mean = np.sin(omega)**2 * np.cos(0.5*i)**4
return np.sin(I)**2 / mean
#Schureman equations 75, 67
def f_O1(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
mean = np.sin(omega) * np.cos(0.5*omega)**2 * np.cos(0.5*i)**4
return (np.sin(I) * np.cos(0.5*I)**2) / mean
#Schureman equations 76, 68
def f_J1(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
mean = np.sin(2*omega) * (1-3/2.0 * np.sin(i)**2)
return np.sin(2*I) / mean
#Schureman equations 77, 69
def f_OO1(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
mean = np.sin(omega) * np.sin(0.5*omega)**2 * np.cos(0.5*i)**4
return np.sin(I) * np.sin(0.5*I)**2 / mean
#Schureman equations 78, 70
def f_M2(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
mean = np.cos(0.5*omega)**4 * np.cos(0.5*i)**4
return np.cos(0.5*I)**4 / mean
#Schureman equations 227, 226, 68
#Should probably eventually include the derivations of the magic numbers (0.5023 etc).
def f_K1(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
nu = d2r*a['nu'].value
sin2Icosnu_mean = np.sin(2*omega) * (1-3/2.0 * np.sin(i)**2)
mean = 0.5023*sin2Icosnu_mean + 0.1681
return (0.2523*np.sin(2*I)**2 + 0.1689*np.sin(2*I)*np.cos(nu)+0.0283)**(0.5) / mean
#Schureman equations 215, 213, 204
#It can be (and has been) confirmed that the exponent for R_a reads 1/2 via Schureman Table 7
def f_L2(a):
P = d2r*a['P'].value
I = d2r*a['I'].value
R_a_inv = (1 - 12*np.tan(0.5*I)**2 * np.cos(2*P)+36*np.tan(0.5*I)**4)**(0.5)
return f_M2(a) * R_a_inv
#Schureman equations 235, 234, 71
#Again, magic numbers
def f_K2(a):
omega = d2r*a['omega'].value
i = d2r*a['i'].value
I = d2r*a['I'].value
nu = d2r*a['nu'].value
sinsqIcos2nu_mean = np.sin(omega)**2 * (1-3/2.0 * np.sin(i)**2)
mean = 0.5023*sinsqIcos2nu_mean + 0.0365
return (0.2533*np.sin(I)**4 + 0.0367*np.sin(I)**2 *np.cos(2*nu)+0.0013)**(0.5) / mean
#Schureman equations 206, 207, 195
def f_M1(a):
P = d2r*a['P'].value
I = d2r*a['I'].value
Q_a_inv = (0.25 + 1.5*np.cos(I)*np.cos(2*P)*np.cos(0.5*I)**(-0.5) + 2.25*np.cos(I)**2 * np.cos(0.5*I)**(-4))**(0.5)
return f_O1(a) * Q_a_inv
#See e.g. Schureman equation 149
def f_Modd(a, n):
return f_M2(a) ** (n / 2.0)
#Node factors u, see Table 2 of Schureman.
def u_zero(a):
return 0.0
def u_Mf(a):
return -2.0 * a['xi'].value
def u_O1(a):
return 2.0 * a['xi'].value - a['nu'].value
def u_J1(a):
return -a['nu'].value
def u_OO1(a):
return -2.0 * a['xi'].value - a['nu'].value
def u_M2(a):
return 2.0 * a['xi'].value - 2.0 * a['nu'].value
def u_K1(a):
return -a['nup'].value
#Schureman 214
def u_L2(a):
I = d2r*a['I'].value
P = d2r*a['P'].value
R = r2d*np.arctan(np.sin(2*P)/(1/6.0 * np.tan(0.5*I) **(-2) -np.cos(2*P)))
return 2.0 * a['xi'].value - 2.0 * a['nu'].value - R
def u_K2(a):
return -2.0 * a['nupp'].value
#Schureman 202
def u_M1(a):
I = d2r*a['I'].value
P = d2r*a['P'].value
Q = r2d*np.arctan((5*np.cos(I)-1)/(7*np.cos(I)+1)*np.tan(P))
return a['xi'].value - a['nu'].value + Q
def u_Modd(a, n):
return n/2.0 * u_M2(a)
######################## Constituents ########################
class BaseConstituent(object):
xdo_int = {
'A': 1, 'B': 2, 'C': 3, 'D': 4, 'E': 5, 'F': 6, 'G': 7, 'H': 8, 'I': 9,
'J': 10, 'K': 11, 'L': 12, 'M': 13, 'N': 14, 'O': 15, 'P': 16, 'Q': 17,
'R': -8, 'S': -7, 'T': -6, 'U': -5, 'V': -4, 'W': -3, 'X': -2, 'Y': -1,
'Z': 0
}
int_xdo = {v:k for k, v in xdo_int.items()}
def __init__(self, name, xdo='', coefficients=[], u=u_zero, f=f_unity):
if xdo == '':
self.coefficients = np.array(coefficients)
else:
self.coefficients = np.array(self.xdo_to_coefficients(xdo))
self.name = name
self.u = u
self.f = f
def xdo_to_coefficients(self, xdo):
return [self.xdo_int[l.upper()] for l in xdo if l in string.ascii_letters]
def coefficients_to_xdo(self, coefficients):
return ''.join([self.int_xdo[c] for c in coefficients])
def V(self, astro):
return np.dot(self.coefficients, self.astro_values(astro))
def xdo(self):
return self.coefficients_to_xdo(self.coefficients)
def speed(self, a):
return np.dot(self.coefficients, self.astro_speeds(a))
def astro_xdo(self, a):
return [a['T+h-s'], a['s'], a['h'], a['p'], a['N'], a['pp'], a['90']]
def astro_speeds(self, a):
return np.array([each.speed for each in self.astro_xdo(a)])
def astro_values(self, a):
return np.array([each.value for each in self.astro_xdo(a)])
#Consider two out of phase constituents which travel at the same speed to
#be identical
def __eq__(self, c):
return np.all(self.coefficients[:-1] == c.coefficients[:-1])
def __hash__(self):
return hash(tuple(self.coefficients[:-1]))
class CompoundConstituent(BaseConstituent):
def __init__(self, members = [], **kwargs):
self.members = members
if 'u' not in kwargs:
kwargs['u'] = self.u
if 'f' not in kwargs:
kwargs['f'] = self.f
super(CompoundConstituent,self).__init__(**kwargs)
self.coefficients = reduce(op.add,[c.coefficients * n for (c,n) in members])
def speed(self, a):
return reduce(op.add, [n * c.speed(a) for (c,n) in self.members])
def V(self, a):
return reduce(op.add, [n * c.V(a) for (c,n) in self.members])
def u(self, a):
return reduce(op.add, [n * c.u(a) for (c,n) in self.members])
def f(self, a):
return reduce(op.mul, [c.f(a) ** abs(n) for (c,n) in self.members])
###### Base Constituents
#Long Term
_Z0 = BaseConstituent(name = 'Z0', xdo = 'Z ZZZ ZZZ', u = u_zero, f = f_unity)
_Sa = BaseConstituent(name = 'Sa', xdo = 'Z ZAZ ZZZ', u = u_zero, f = f_unity)
_Ssa = BaseConstituent(name = 'Ssa', xdo = 'Z ZBZ ZZZ', u = u_zero, f = f_unity)
_Mm = BaseConstituent(name = 'Mm', xdo = 'Z AZY ZZZ', u = u_zero, f = f_Mm)
_Mf = BaseConstituent(name = 'Mf', xdo = 'Z BZZ ZZZ', u = u_Mf, f = f_Mf)
#Diurnals
_Q1 = BaseConstituent(name = 'Q1', xdo = 'A XZA ZZA', u = u_O1, f = f_O1)
_O1 = BaseConstituent(name = 'O1', xdo = 'A YZZ ZZA', u = u_O1, f = f_O1)
_K1 = BaseConstituent(name = 'K1', xdo = 'A AZZ ZZY', u = u_K1, f = f_K1)
_J1 = BaseConstituent(name = 'J1', xdo = 'A BZY ZZY', u = u_J1, f = f_J1)
#M1 is a tricky business for reasons of convention, rather than theory. The
#reasons for this are best summarised by Schureman paragraphs 126, 127 and in
#the comments found in congen_input.txt of xtides, so I won't go over all this
#again here.
_M1 = BaseConstituent(name = 'M1', xdo = 'A ZZZ ZZA', u = u_M1, f = f_M1)
_P1 = BaseConstituent(name = 'P1', xdo = 'A AXZ ZZA', u = u_zero, f = f_unity)
_S1 = BaseConstituent(name = 'S1', xdo = 'A AYZ ZZZ', u = u_zero, f = f_unity)
_OO1 = BaseConstituent(name = 'OO1', xdo = 'A CZZ ZZY', u = u_OO1, f = f_OO1)
#Semi-Diurnals
_2N2 = BaseConstituent(name = '2N2', xdo = 'B XZB ZZZ', u = u_M2, f = f_M2)
_N2 = BaseConstituent(name = 'N2', xdo = 'B YZA ZZZ', u = u_M2, f = f_M2)
_nu2 = BaseConstituent(name = 'nu2', xdo = 'B YBY ZZZ', u = u_M2, f = f_M2)
_M2 = BaseConstituent(name = 'M2', xdo = 'B ZZZ ZZZ', u = u_M2, f = f_M2)
_lambda2 = BaseConstituent(name = 'lambda2', xdo = 'B AXA ZZB', u = u_M2, f = f_M2)
_L2 = BaseConstituent(name = 'L2', xdo = 'B AZY ZZB', u = u_L2, f = f_L2)
_T2 = BaseConstituent(name = 'T2', xdo = 'B BWZ ZAZ', u = u_zero, f = f_unity)
_S2 = BaseConstituent(name = 'S2', xdo = 'B BXZ ZZZ', u = u_zero, f = f_unity)
_R2 = BaseConstituent(name = 'R2', xdo = 'B BYZ ZYB', u = u_zero, f = f_unity)
_K2 = BaseConstituent(name = 'K2', xdo = 'B BZZ ZZZ', u = u_K2, f = f_K2)
#Third-Diurnals
_M3 = BaseConstituent(name = 'M3', xdo = 'C ZZZ ZZZ', u = lambda a: u_Modd(a,3), f = lambda a: f_Modd(a,3))
###### Compound Constituents
#Long Term
_MSF = CompoundConstituent(name = 'MSF', members = [(_S2, 1), (_M2, -1)])
#Diurnal
_2Q1 = CompoundConstituent(name = '2Q1', members = [(_N2, 1), (_J1, -1)])
_rho1 = CompoundConstituent(name = 'rho1', members = [(_nu2, 1), (_K1, -1)])
#Semi-Diurnal
_mu2 = CompoundConstituent(name = 'mu2', members = [(_M2, 2), (_S2, -1)]) #2MS2
_2SM2 = CompoundConstituent(name = '2SM2', members = [(_S2, 2), (_M2, -1)])
#Third-Diurnal
_2MK3 = CompoundConstituent(name = '2MK3', members = [(_M2, 1), (_O1, 1)])
_MK3 = CompoundConstituent(name = 'MK3', members = [(_M2, 1), (_K1, 1)])
#Quarter-Diurnal
_MN4 = CompoundConstituent(name = 'MN4', members = [(_M2, 1), (_N2, 1)])
_M4 = CompoundConstituent(name = 'M4', members = [(_M2, 2)])
_MS4 = CompoundConstituent(name = 'MS4', members = [(_M2, 1), (_S2, 1)])
_S4 = CompoundConstituent(name = 'S4', members = [(_S2, 2)])
#Sixth-Diurnal
_M6 = CompoundConstituent(name = 'M6', members = [(_M2, 3)])
_S6 = CompoundConstituent(name = 'S6', members = [(_S2, 3)])
#Eighth-Diurnals
_M8 = CompoundConstituent(name = 'M8', members = [(_M2, 4)])
noaa = [
_M2, _S2, _N2, _K1, _M4, _O1, _M6, _MK3, _S4, _MN4, _nu2, _S6, _mu2,
_2N2, _OO1, _lambda2, _S1, _M1, _J1, _Mm, _Ssa, _Sa, _MSF, _Mf,
_rho1, _Q1, _T2, _R2, _2Q1, _P1, _2SM2, _M3, _L2, _2MK3, _K2,
_M8, _MS4
]
####################### Tide ########################
class Tide(object):
dtype = np.dtype([
('constituent', object),
('amplitude', float),
('phase', float)])
def __init__(
self,
constituents = None,
amplitudes = None,
phases = None,
model = None,
radians = False
):
"""
Initialise a tidal model. Provide constituents, amplitudes and phases OR a model.
Arguments:
constituents -- list of constituents used in the model.
amplitudes -- list of amplitudes corresponding to constituents
phases -- list of phases corresponding to constituents
model -- an ndarray of type Tide.dtype representing the constituents, amplitudes and phases.
radians -- boolean representing whether phases are in radians (default False)
"""
if None not in [constituents, amplitudes, phases]:
if len(constituents) == len(amplitudes) == len(phases):
model = np.zeros(len(phases), dtype=Tide.dtype)
model['constituent'] = np.array(constituents)
model['amplitude'] = np.array(amplitudes)
model['phase'] = np.array(phases)
else:
raise ValueError("Constituents, amplitudes and phases should all be arrays of equal length.")
elif model is not None:
if not model.dtype == Tide.dtype:
raise ValueError("Model must be a numpy array with dtype == Tide.dtype")
else:
raise ValueError("Must be initialised with constituents, amplitudes and phases; or a model.")
if radians:
model['phase'] = r2d*model['phase']
self.model = model[:]
self.normalize()
def prepare(self, *args, **kwargs):
return Tide._prepare(self.model['constituent'], *args, **kwargs)
@staticmethod
def _prepare(constituents, t0, t = None, radians = True):
"""
Return constituent speed and equilibrium argument at a given time, and constituent node factors at given times.
Arguments:
constituents -- list of constituents to prepare
t0 -- time at which to evaluate speed and equilibrium argument for each constituent
t -- list of times at which to evaluate node factors for each constituent (default: t0)
radians -- whether to return the angular arguments in radians or degrees (default: True)
"""
#The equilibrium argument is constant and taken at the beginning of the
#time series (t0). The speed of the equilibrium argument changes very
#slowly, so again we take it to be constant over any length of data. The
#node factors change more rapidly.
if isinstance(t0, Iterable):
t0 = t0[0]
if t is None:
t = [t0]
if not isinstance(t, Iterable):
t = [t]
a0 = astro(t0)
a = [astro(t_i) for t_i in t]
#For convenience give u, V0 (but not speed!) in [0, 360)
V0 = np.array([c.V(a0) for c in constituents])[:, np.newaxis]
speed = np.array([c.speed(a0) for c in constituents])[:, np.newaxis]
u = [np.mod(np.array([c.u(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
f = [np.mod(np.array([c.f(a_i) for c in constituents])[:, np.newaxis], 360.0)
for a_i in a]
if radians:
speed = d2r*speed
V0 = d2r*V0
u = [d2r*each for each in u]
return speed, u, f, V0
def at(self, t):
"""
Return the modelled tidal height at given times.
Arguments:
t -- array of times at which to evaluate the tidal height
"""
t0 = t[0]
hours = self._hours(t0, t)
partition = 240.0
t = self._partition(hours, partition)
times = self._times(t0, [(i + 0.5)*partition for i in range(len(t))])
speed, u, f, V0 = self.prepare(t0, times, radians = True)
H = self.model['amplitude'][:, np.newaxis]
p = d2r*self.model['phase'][:, np.newaxis]
return np.concatenate([
Tide._tidal_series(t_i, H, p, speed, u_i, f_i, V0)
for t_i, u_i, f_i in izip(t, u, f)
])
def highs(self, *args):
"""
Generator yielding only the high tides.
Arguments:
see Tide.extrema()
"""
for t in ifilter(lambda e: e[2] == 'H', self.extrema(*args)):
yield t
def lows(self, *args):
"""
Generator yielding only the low tides.
Arguments:
see Tide.extrema()
"""
for t in ifilter(lambda e: e[2] == 'L', self.extrema(*args)):
yield t
def form_number(self):
"""
Returns the model's form number, a helpful heuristic for classifying tides.
"""
k1, o1, m2, s2 = (
np.extract(self.model['constituent'] == c, self.model['amplitude'])
for c in [_K1, _O1, _M2, _S2]
)
return (k1+o1)/(m2+s2)
def classify(self):
"""
Classify the tide according to its form number
"""
form = self.form_number()
if 0 <= form <= 0.25:
return 'semidiurnal'
elif 0.25 < form <= 1.5:
return 'mixed (semidiurnal)'
elif 1.5 < form <= 3.0:
return 'mixed (diurnal)'
else:
return 'diurnal'
def extrema(self, t0, t1 = None, partition = 2400.0):
"""
A generator for high and low tides.
Arguments:
t0 -- time after which extrema are sought
t1 -- optional time before which extrema are sought (if not given, the generator is infinite)
partition -- number of hours for which we consider the node factors to be constant (default: 2400.0)
"""
if t1:
#yield from in python 3.4
for e in takewhile(lambda t: t[0] < t1, self.extrema(t0)):
yield e
else:
#We assume that extrema are separated by at least delta hours
delta = np.amin([
90.0 / c.speed(astro(t0)) for c in self.model['constituent']
if not c.speed(astro(t0)) == 0
])
#We search for stationary points from offset hours before t0 to
#ensure we find any which might occur very soon after t0.
offset = 24.0
partitions = (
Tide._times(t0, i*partition) for i in count()), (Tide._times(t0, i*partition) for i in count(1)
)
#We'll overestimate to be on the safe side;
#values outside (start,end) won't get yielded.
interval_count = int(np.ceil((partition + offset) / delta)) + 1
amplitude = self.model['amplitude'][:, np.newaxis]
phase = d2r*self.model['phase'][:, np.newaxis]
for start, end in izip(*partitions):
speed, [u], [f], V0 = self.prepare(start, Tide._times(start, 0.5*partition))
#These derivatives don't include the time dependence of u or f,
#but these change slowly.
def d(t):
return np.sum(-speed*amplitude*f*np.sin(speed*t + (V0 + u) - phase), axis=0)
def d2(t):
return np.sum(-speed**2.0 * amplitude*f*np.cos(speed*t + (V0 + u) - phase), axis=0)
#We'll overestimate to be on the safe side;
#values outside (start,end) won't get yielded.
intervals = (
delta * i -offset for i in range(interval_count)), (delta*(i+1) - offset for i in range(interval_count)
)
for a, b in izip(*intervals):
if d(a)*d(b) < 0:
extrema = fsolve(d, (a + b) / 2.0, fprime = d2)[0]
time = Tide._times(start, extrema)
[height] = self.at([time])
hilo = 'H' if d2(extrema) < 0 else 'L'
if start < time < end:
yield (time, height, hilo)
@staticmethod
def _hours(t0, t):
"""
Return the hourly offset(s) of a (list of) time from a given time.
Arguments:
t0 -- time from which offsets are sought
t -- times to find hourly offsets from t0.
"""
if not isinstance(t, Iterable):
return Tide._hours(t0, [t])[0]
elif isinstance(t[0], datetime):
return np.array([(ti-t0).total_seconds() / 3600.0 for ti in t])
else:
return t
@staticmethod
def _partition(hours, partition = 3600.0):
"""
Partition a sorted list of numbers (or in this case hours).
Arguments:
hours -- sorted ndarray of hours.
partition -- maximum partition length (default: 3600.0)
"""
partition = float(partition)
relative = hours - hours[0]
total_partitions = np.ceil(relative[-1] / partition + 10*np.finfo(np.float).eps).astype('int')
return [hours[np.floor(np.divide(relative, partition)) == i] for i in range(total_partitions)]
@staticmethod
def _times(t0, hours):
"""
Return a (list of) datetime(s) given an initial time and an (list of) hourly offset(s).
Arguments:
t0 -- initial time
hours -- hourly offsets from t0
"""
if not isinstance(hours, Iterable):
return Tide._times(t0, [hours])[0]
elif not isinstance(hours[0], datetime):
return np.array([t0 + timedelta(hours=h) for h in hours])
else:
return np.array(hours)
@staticmethod
def _tidal_series(t, amplitude, phase, speed, u, f, V0):
return np.sum(amplitude*f*np.cos(speed*t + (V0 + u) - phase), axis=0)
def normalize(self):
"""
Adapt self.model so that amplitudes are positive and phases are in [0,360) as per convention
"""
for i, (_, amplitude, phase) in enumerate(self.model):
if amplitude < 0:
self.model['amplitude'][i] = -amplitude
self.model['phase'][i] = phase + 180.0
self.model['phase'][i] = np.mod(self.model['phase'][i], 360.0)
@classmethod
def decompose(
cls,
heights,
t = None,
t0 = None,
interval = None,
constituents = noaa,
initial = None,
n_period = 2,
callback = None,
full_output = False
):
"""
Return an instance of Tide which has been fitted to a series of tidal observations.
Arguments:
It is not necessary to provide t0 or interval if t is provided.
heights -- ndarray of tidal observation heights
t -- ndarray of tidal observation times
t0 -- datetime representing the time at which heights[0] was recorded
interval -- hourly interval between readings
constituents -- list of constituents to use in the fit (default: noaa)
initial -- optional Tide instance to use as first guess for least squares solver
n_period -- only include constituents which complete at least this many periods (default: 2)
callback -- optional function to be called at each iteration of the solver
full_output -- whether to return the output of scipy's leastsq solver (default: False)
"""
if t is not None:
if isinstance(t[0], datetime):
hours = Tide._hours(t[0], t)
t0 = t[0]
elif t0 is not None:
hours = t
else:
raise ValueError("t can be an array of datetimes, or an array "
"of hours since t0 in which case t0 must be "
"specified.")
elif None not in [t0, interval]:
hours = np.arange(len(heights)) * interval
else:
raise ValueError("Must provide t(datetimes), or t(hours) and "
"t0(datetime), or interval(hours) and t0(datetime) "
"so that each height can be identified with an "
"instant in time.")
#Remove duplicate constituents (those which travel at exactly the same
#speed, irrespective of phase)
constituents = list(OrderedDict.fromkeys(constituents))
#No need for least squares to find the mean water level constituent z0,
#work relative to mean
constituents = [c for c in constituents if not c == _Z0]
z0 = np.mean(heights)
heights = heights - z0
#Only analyse frequencies which complete at least n_period cycles over
#the data period.
constituents = [
c for c in constituents
if 360.0 * n_period < hours[-1] * c.speed(astro(t0))
]
n = len(constituents)
sort = np.argsort(hours)
hours = hours[sort]
heights = heights[sort]
#We partition our time/height data into intervals over which we consider
#the values of u and f to assume a constant value (that is, their true
#value at the midpoint of the interval). Constituent
#speeds change much more slowly than the node factors, so we will
#consider these constant and equal to their speed at t0, regardless of
#the length of the time series.
partition = 240.0
t = Tide._partition(hours, partition)
times = Tide._times(t0, [(i + 0.5)*partition for i in range(len(t))])
speed, u, f, V0 = Tide._prepare(constituents, t0, times, radians = True)
#Residual to be minimised by variation of parameters (amplitudes, phases)
def residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
s = np.concatenate([
Tide._tidal_series(t_i, H, p, speed, u_i, f_i, V0)
for t_i, u_i, f_i in izip(t, u, f)
])
res = heights - s
if callback:
callback(res)
return res
#Analytic Jacobian of the residual - this makes solving significantly
#faster than just using gradient approximation, especially with many
#measurements / constituents.
def D_residual(hp):
H, p = hp[:n, np.newaxis], hp[n:, np.newaxis]
ds_dH = np.concatenate([
f_i*np.cos(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
ds_dp = np.concatenate([
H*f_i*np.sin(speed*t_i+u_i+V0-p)
for t_i, u_i, f_i in izip(t, u, f)],
axis = 1)
return np.append(-ds_dH, -ds_dp, axis=0)
#Initial guess for solver, haven't done any analysis on this since the
#solver seems to converge well regardless of the initial guess We do
#however scale the initial amplitude guess with some measure of the
#variation
amplitudes = np.ones(n) * (np.sqrt(np.dot(heights, heights)) / len(heights))
phases = np.ones(n)
if initial:
for (c0, amplitude, phase) in initial.model:
for i, c in enumerate(constituents):
if c0 == c:
amplitudes[i] = amplitude
phases[i] = d2r*phase
initial = np.append(amplitudes, phases)
lsq = leastsq(residual, initial, Dfun=D_residual, col_deriv=True, ftol=1e-7)
model = np.zeros(1+n, dtype=cls.dtype)
model[0] = (_Z0, z0, 0)
model[1:]['constituent'] = constituents[:]
model[1:]['amplitude'] = lsq[0][:n]
model[1:]['phase'] = lsq[0][n:]
if full_output:
return cls(model = model, radians = True), lsq
return cls(model = model, radians = True)
################# Astronomical Values #######################
#Convert a sexagesimal angle into decimal degrees
def s2d(degrees, arcmins = 0, arcsecs = 0, mas = 0, muas = 0):
return (
degrees
+ (arcmins / 60.0)
+ (arcsecs / (60.0*60.0))
+ (mas / (60.0*60.0*1e3))
+ (muas / (60.0*60.0*1e6))
)
#Evaluate a polynomial at argument
def polynomial(coefficients, argument):
return sum([c * (argument ** i) for i,c in enumerate(coefficients)])
#Evaluate the first derivative of a polynomial at argument
def d_polynomial(coefficients, argument):
return sum([c * i * (argument ** (i-1)) for i,c in enumerate(coefficients)])
#Meeus formula 11.1
def T(t):
return (JD(t) - 2451545.0)/36525
#Meeus formula 7.1
def JD(t):
Y, M = t.year, t.month
D = (
t.day
+ t.hour / (24.0)
+ t.minute / (24.0*60.0)
+ t.second / (24.0*60.0*60.0)
+ t.microsecond / (24.0 * 60.0 * 60.0 * 1e6)
)
if M <= 2:
Y = Y - 1
M = M + 12
A = np.floor(Y / 100.0)
B = 2 - A + np.floor(A / 4.0)
return np.floor(365.25*(Y+4716)) + np.floor(30.6001*(M+1)) + D + B - 1524.5
#Meeus formula 21.3
terrestrial_obliquity_coefficients = (
s2d(23,26,21.448),
-s2d(0,0,4680.93),
-s2d(0,0,1.55),
s2d(0,0,1999.25),
-s2d(0,0,51.38),
-s2d(0,0,249.67),
-s2d(0,0,39.05),
s2d(0,0,7.12),
s2d(0,0,27.87),
s2d(0,0,5.79),
s2d(0,0,2.45)
)
#Adjust these coefficients for parameter T rather than U
terrestrial_obliquity_coefficients = [
c * (1e-2) ** i for i,c in enumerate(terrestrial_obliquity_coefficients)
]
#Not entirely sure about this interpretation, but this is the difference
#between Meeus formulae 24.2 and 24.3 and seems to work
solar_perigee_coefficients = (
280.46645 - 357.52910,
36000.76932 - 35999.05030,
0.0003032 + 0.0001559,
0.00000048
)
#Meeus formula 24.2
solar_longitude_coefficients = (
280.46645,
36000.76983,
0.0003032
)
#This value is taken from JPL Horizon and is essentially constant
lunar_inclination_coefficients = (
5.145,
)
#Meeus formula 45.1
lunar_longitude_coefficients = (
218.3164591,
481267.88134236,
-0.0013268,
1/538841.0
-1/65194000.0
)
#Meeus formula 45.7
lunar_node_coefficients = (
125.0445550,
-1934.1361849,
0.0020762,
1/467410.0,
-1/60616000.0
)
#Meeus, unnumbered formula directly preceded by 45.7
lunar_perigee_coefficients = (
83.3532430,
4069.0137111,
-0.0103238,
-1/80053.0,
1/18999000.0
)
#Now follow some useful auxiliary values, we won't need their speed.
#See notes on Table 6 in Schureman for I, nu, xi, nu', 2nu''
def _I(N, i, omega):
N, i, omega = d2r * N, d2r*i, d2r*omega
cosI = np.cos(i)*np.cos(omega)-np.sin(i)*np.sin(omega)*np.cos(N)
return r2d*np.arccos(cosI)
def _xi(N, i, omega):
N, i, omega = d2r * N, d2r*i, d2r*omega
e1 = np.cos(0.5*(omega-i))/np.cos(0.5*(omega+i)) * np.tan(0.5*N)
e2 = np.sin(0.5*(omega-i))/np.sin(0.5*(omega+i)) * np.tan(0.5*N)
e1, e2 = np.arctan(e1), np.arctan(e2)
e1, e2 = e1 - 0.5*N, e2 - 0.5*N
return -(e1 + e2)*r2d
def _nu(N, i, omega):
N, i, omega = d2r * N, d2r*i, d2r*omega
e1 = np.cos(0.5*(omega-i))/np.cos(0.5*(omega+i)) * np.tan(0.5*N)
e2 = np.sin(0.5*(omega-i))/np.sin(0.5*(omega+i)) * np.tan(0.5*N)
e1, e2 = np.arctan(e1), np.arctan(e2)
e1, e2 = e1 - 0.5*N, e2 - 0.5*N
return (e1 - e2)*r2d
#Schureman equation 224
#Can we be more precise than B "the solar coefficient" = 0.1681?
def _nup(N, i, omega):
I = d2r * _I(N, i, omega)
nu = d2r * _nu(N, i, omega)
return r2d * np.arctan(np.sin(2*I)*np.sin(nu)/(np.sin(2*I)*np.cos(nu)+0.3347))
#Schureman equation 232
def _nupp(N, i, omega):
I = d2r * _I(N, i, omega)
nu = d2r * _nu(N, i, omega)
tan2nupp = (np.sin(I)**2*np.sin(2*nu))/(np.sin(I)**2*np.cos(2*nu)+0.0727)
return r2d * 0.5 * np.arctan(tan2nupp)
AstronomicalParameter = namedtuple('AstronomicalParameter', ['value', 'speed'])
def astro(t):
a = {}
#We can use polynomial fits from Meeus to obtain good approximations to
#some astronomical values (and therefore speeds).
polynomials = {
's': lunar_longitude_coefficients,
'h': solar_longitude_coefficients,
'p': lunar_perigee_coefficients,
'N': lunar_node_coefficients,
'pp': solar_perigee_coefficients,
'90': (90.0,),
'omega': terrestrial_obliquity_coefficients,
'i': lunar_inclination_coefficients
}
#Polynomials are in T, that is Julian Centuries; we want our speeds to be
#in the more convenient unit of degrees per hour.
dT_dHour = 1 / (24 * 365.25 * 100)
for name, coefficients in polynomials.items():
a[name] = AstronomicalParameter(
np.mod(polynomial(coefficients, T(t)), 360.0),
d_polynomial(coefficients, T(t)) * dT_dHour
)
#Some other parameters defined by Schureman which are dependent on the
#parameters N, i, omega for use in node factor calculations. We don't need
#their speeds.
args = list(each.value for each in [a['N'], a['i'], a['omega']])
for name, function in {
'I': _I,
'xi': _xi,
'nu': _nu,
'nup': _nup,
'nupp': _nupp
}.items():
a[name] = AstronomicalParameter(np.mod(function(*args), 360.0), None)
#We don't work directly with the T (hours) parameter, instead our spanning
#set for equilibrium arguments #is given by T+h-s, s, h, p, N, pp, 90.
#This is in line with convention.
hour = AstronomicalParameter((JD(t) - np.floor(JD(t))) * 360.0, 15.0)
a['T+h-s'] = AstronomicalParameter(
hour.value + a['h'].value - a['s'].value,
hour.speed + a['h'].speed - a['s'].speed
)
#It is convenient to calculate Schureman's P here since several node
#factors need it, although it could be argued that these
#(along with I, xi, nu etc) belong somewhere else.
a['P'] = AstronomicalParameter(
np.mod(a['p'].value -a['xi'].value,360.0),
None
)
return a
|
py | b40067251e52540f6232506e1ae914baf61024dc | #!/usr/bin/env python3
import os
import asyncio
from electrum_but.simple_config import SimpleConfig
from electrum_but import constants
from electrum_but.daemon import Daemon
from electrum_but.storage import WalletStorage
from electrum_but.wallet import Wallet, create_new_wallet
from electrum_but.wallet_db import WalletDB
from electrum_but.commands import Commands
from electrum_but.util import create_and_start_event_loop, log_exceptions
loop, stopping_fut, loop_thread = create_and_start_event_loop()
config = SimpleConfig({"testnet": True}) # to use ~/.electrum-but/testnet as datadir
constants.set_testnet() # to set testnet magic bytes
daemon = Daemon(config, listen_jsonrpc=False)
network = daemon.network
assert network.asyncio_loop.is_running()
# get wallet on disk
wallet_dir = os.path.dirname(config.get_wallet_path())
wallet_path = os.path.join(wallet_dir, "test_wallet")
if not os.path.exists(wallet_path):
create_new_wallet(path=wallet_path, config=config)
# open wallet
wallet = daemon.load_wallet(wallet_path, password=None, manual_upgrades=False)
wallet.start_network(network)
# you can use ~CLI commands by accessing command_runner
command_runner = Commands(config=config, daemon=daemon, network=network)
print("balance", network.run_from_another_thread(command_runner.getbalance(wallet=wallet)))
print("addr", network.run_from_another_thread(command_runner.getunusedaddress(wallet=wallet)))
print("gettx", network.run_from_another_thread(
command_runner.gettransaction("d8ee577f6b864071c6ccbac1e30d0d19edd6fa9a171be02b85a73fd533f2734d")))
# but you might as well interact with the underlying methods directly
print("balance", wallet.get_balance())
print("addr", wallet.get_unused_address())
print("gettx", network.run_from_another_thread(network.get_transaction("d8ee577f6b864071c6ccbac1e30d0d19edd6fa9a171be02b85a73fd533f2734d")))
stopping_fut.set_result(1) # to stop event loop
|
py | b4006746202ed35fba479fa19ad79e6d1c6c07d6 | class LayerNorm(Module):
__parameters__ = ["weight", "bias", ]
__buffers__ = []
weight : Tensor
bias : Tensor
training : bool
def forward(self: __torch__.multimodal.model.multimodal_transformer.___torch_mangle_9442.LayerNorm,
argument_1: Tensor) -> Tensor:
_0 = self.bias
_1 = self.weight
input = torch.to(argument_1, torch.device("cuda:0"), 6, False, False, None)
ret = torch.layer_norm(input, [768], _1, _0, 1.0000000000000001e-05, True)
query = torch.to(ret, torch.device("cuda:0"), 5, False, False, None)
return query
def forward1(self: __torch__.multimodal.model.multimodal_transformer.___torch_mangle_9442.LayerNorm,
argument_1: Tensor) -> Tensor:
_2 = self.bias
_3 = self.weight
input = torch.to(argument_1, torch.device("cuda:0"), 6, False, False, None)
ret = torch.layer_norm(input, [768], _3, _2, 1.0000000000000001e-05, True)
query = torch.to(ret, torch.device("cuda:0"), 5, False, False, None)
return query
|
py | b400676a03e6a8f91096ea6aa06d5740b51b635c | # -*- coding: utf-8 -*-
from __future__ import print_function
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/melodic/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/melodic/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in '/home/arijitnoobstar/UAVProjectileCatcher/devel;/opt/ros/melodic'.split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/arijitnoobstar/UAVProjectileCatcher/devel/.private/hector_xacro_tools/env.sh')
output_filename = '/home/arijitnoobstar/UAVProjectileCatcher/build/hector_xacro_tools/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
# print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
py | b40067e44a2dd554360099b39672aa0d8752c9af | import os
import weakref
import sys
import traceback as tb
import numpy as np
import pyqtgraph as pg
from PyQt5 import uic
from pyqtgraph.Qt import QtCore, QtGui
from pyqtgraph.graphicsItems.ViewBox import ViewBox
from pyqtgraph.graphicsItems.ViewBox.ViewBoxMenu import ViewBoxMenu
from inqbus.lidar.scc_gui.configs import main_config as mc
from inqbus.lidar.scc_gui import PROJECT_PATH
from inqbus.lidar.scc_gui.log import logger
from inqbus.lidar.scc_gui.configs.base_config import resource_path
path = os.path.abspath(__file__)
dir_path = os.path.dirname(path)
class RegionMenu(QtGui.QMenu):
pass
class LimitsViewBoxMenu(ViewBoxMenu):
def __init__(self, view):
QtGui.QMenu.__init__(self)
# keep weakref to view to avoid circular reference (don't know why, but
# this prevents the ViewBox from being collected)
self.view = weakref.ref(view)
self.valid = False # tells us whether the ui needs to be updated
# weakrefs to all views listed in the link combos
self.viewMap = weakref.WeakValueDictionary()
self.setTitle("ViewBox options")
self.viewAll = QtGui.QAction("View All", self)
self.viewAll.triggered.connect(self.autoRange)
self.addAction(self.viewAll)
self.axes = []
self.ctrl = []
self.widgetGroups = []
self.dv = QtGui.QDoubleValidator(self)
for axis in 'XY':
w = ui = uic.loadUi(
resource_path(
os.path.join(
PROJECT_PATH,
'UI/axisCtrlTemplate.ui')))
# x is a time axis so we use bins to set axis
if axis == 'X':
ui.unitCombo.addItems(['bins'])
# y is a distance axis with different units
if axis == 'Y':
ui.unitCombo.addItems(['bins', 'm', 'km'])
sub_a = QtGui.QWidgetAction(self)
sub_a.setDefaultWidget(w)
a = self.addMenu("%s Axis" % axis)
a.addAction(sub_a)
self.axes.append(a)
self.ctrl.append(ui)
connects = [
(ui.mouseCheck.toggled, 'MouseToggled'),
(ui.manualRadio.clicked, 'ManualClicked'),
(ui.minText.editingFinished, 'MinTextChanged'),
(ui.maxText.editingFinished, 'MaxTextChanged'),
(ui.unitCombo.currentIndexChanged, 'UnitComboChanged'),
(ui.minText_2.editingFinished, 'MinTextChanged_2'),
(ui.maxText_2.editingFinished, 'MaxTextChanged_2'),
(ui.cbLimits.stateChanged, 'cbLimitsChanged'),
(ui.autoRadio.clicked, 'AutoClicked'),
(ui.autoPercentSpin.valueChanged, 'AutoSpinChanged'),
(ui.linkCombo.currentIndexChanged, 'LinkComboChanged'),
(ui.autoPanCheck.toggled, 'AutoPanToggled'),
(ui.visibleOnlyCheck.toggled, 'VisibleOnlyToggled')
]
for sig, fn in connects:
sig.connect(getattr(self, axis.lower() + fn))
self.ctrl[0].invertCheck.toggled.connect(self.xInvertToggled)
self.ctrl[1].invertCheck.toggled.connect(self.yInvertToggled)
self.leftMenu = QtGui.QMenu("Mouse Mode")
group = QtGui.QActionGroup(self)
# This does not work! QAction _must_ be initialized with a permanent
# object as the parent or else it may be collected prematurely.
pan = QtGui.QAction("3 button", self.leftMenu)
zoom = QtGui.QAction("1 button", self.leftMenu)
self.leftMenu.addAction(pan)
self.leftMenu.addAction(zoom)
pan.triggered.connect(self.set3ButtonMode)
zoom.triggered.connect(self.set1ButtonMode)
pan.setCheckable(True)
zoom.setCheckable(True)
pan.setActionGroup(group)
zoom.setActionGroup(group)
self.mouseModes = [pan, zoom]
self.addMenu(self.leftMenu)
self.view().sigStateChanged.connect(self.viewStateChanged)
self.updateState()
def updateState(self):
# Something about the viewbox has changed; update the menu GUI
state = self.view().getState(copy=False)
if state['mouseMode'] == ViewBox.PanMode:
self.mouseModes[0].setChecked(True)
else:
self.mouseModes[1].setChecked(True)
limits = state['limits']
if limits['xLimits']:
if limits['xLimits'][0]:
self.ctrl[0].minText_2.setText("%0.5g" % limits['xLimits'][0])
if limits['xLimits'][1]:
self.ctrl[0].maxText_2.setText("%0.5g" % limits['xLimits'][1])
if limits['yLimits']:
if limits['yLimits'][0]:
self.ctrl[1].minText_2.setText("%0.5g" % limits['yLimits'][0])
if limits['yLimits'][1]:
self.ctrl[1].maxText_2.setText("%0.5g" % limits['yLimits'][1])
for i in [0, 1]: # x, y
tr = state['targetRange'][i]
tr_unit = self.convert_target_range_to_unit(tr, i)
self.ctrl[i].minText.setText("%0.5g" % tr_unit[0])
self.ctrl[i].maxText.setText("%0.5g" % tr_unit[1])
if state['autoRange'][i] is not False:
self.ctrl[i].autoRadio.setChecked(True)
if state['autoRange'][i] is not True:
self.ctrl[i].autoPercentSpin.setValue(
state['autoRange'][i] * 100)
else:
self.ctrl[i].manualRadio.setChecked(True)
self.ctrl[i].mouseCheck.setChecked(state['mouseEnabled'][i])
# Update combo to show currently linked view
c = self.ctrl[i].linkCombo
c.blockSignals(True)
try:
view = state['linkedViews'][i] # will always be string or None
if view is None:
view = ''
ind = c.findText(view)
if ind == -1:
ind = 0
c.setCurrentIndex(ind)
finally:
c.blockSignals(False)
self.valid = True
def xMinTextChanged_2(self):
# ToDo: Error! We are changing only one of many views
self.ctrl[0].cbLimits.setChecked(True)
self.view().setLimits(xMin=float(self.ctrl[0].minText_2.text()))
def yMinTextChanged_2(self):
self.ctrl[1].cbLimits.setChecked(True)
self.view().setLimits(yMin=float(self.ctrl[0].minText_2.text()))
def xMaxTextChanged_2(self):
self.ctrl[0].cbLimits.setChecked(True)
self.view().setLimits(xMax=float(self.ctrl[0].maxText_2.text()))
def yMaxTextChanged_2(self):
self.ctrl[1].cbLimits.setChecked(True)
self.view().setLimits(yMax=float(self.ctrl[0].maxText_2.text()))
def get_min_and_max(self, ctrl_index):
self.ctrl[ctrl_index].manualRadio.setChecked(True)
min = float(self.ctrl[ctrl_index].minText.text())
max = float(self.ctrl[ctrl_index].maxText.text())
# to fit units in y range, m/bins are equal
if self.ctrl[ctrl_index].unitCombo.currentText() == 'km':
min = self.m_2_bin(min * 1000.0)
max = self.m_2_bin(max * 1000.0)
elif self.ctrl[ctrl_index].unitCombo.currentText() == 'm':
min = self.m_2_bin(min) * 1.0
max = self.m_2_bin(max) * 1.0
if min is None:
min = self.m_2_bin(0.0) * 1.0
if max is None:
max = self.m_2_bin(mc.MAX_PLOT_ALTITUDE) * 1.0
return min, max
def convert_target_range_to_unit(self, target_range, ctrl_index):
unit = self.ctrl[ctrl_index].unitCombo.currentText()
if unit == 'm':
result = []
for x in target_range:
result.append(self.bin_2_m(x))
return result
elif unit == 'km':
result = []
for x in target_range:
result.append(self.bin_2_m(x) / 1000.0)
return result
else:
return target_range
def bin_2_m(self, bin):
view = self.view()
data = view.plot.height_axis.axis_data
size = data.size
bin = int(bin)
if bin > size:
return data[-1]
else:
return data[bin]
def m_2_bin(self, altitude_m):
try:
view = self.view()
data = view.plot.height_axis.axis_data
res = np.where(data > altitude_m)
if res[0].size == 0:
res = None
else:
res = res[0][0]
return res
except BaseException as e:
logger.error("Exception: %s" % sys.exc_info()[0])
logger.error("Traceback: %s" % tb.format_exc())
def block_signals_ranges(self, ctrl_index):
self.ctrl[ctrl_index].unitCombo.blockSignals(True)
self.ctrl[ctrl_index].minText.blockSignals(True)
self.ctrl[ctrl_index].maxText.blockSignals(True)
def unblock_signals_ranges(self, ctrl_index):
self.ctrl[ctrl_index].minText.blockSignals(False)
self.ctrl[ctrl_index].maxText.blockSignals(False)
self.ctrl[ctrl_index].unitCombo.blockSignals(False)
def updateXRange(self):
min, max = self.get_min_and_max(0)
self.block_signals_ranges(0)
self.view().setXRange(min, max, padding=0)
self.unblock_signals_ranges(0)
def updateYRange(self):
orig_min = self.ctrl[1].minText.text()
orig_max = self.ctrl[1].maxText.text()
orig_unit = self.ctrl[1].unitCombo.currentIndex()
min, max = self.get_min_and_max(1)
self.block_signals_ranges(1)
self.view().setYRange(min, max, padding=0)
self.ctrl[1].minText.setText(orig_min)
self.ctrl[1].maxText.setText(orig_max)
self.ctrl[1].unitCombo.setCurrentIndex(orig_unit)
self.unblock_signals_ranges(1)
def xMinTextChanged(self):
self.updateXRange()
def xMaxTextChanged(self):
self.updateXRange()
def yMinTextChanged(self):
self.updateYRange()
def yMaxTextChanged(self):
self.updateYRange()
def xUnitComboChanged(self):
self.updateXRange()
def yUnitComboChanged(self):
self.updateYRange()
def xcbLimitsChanged(self):
if self.ctrl[0].cbLimits.isChecked():
self.view().setLimits(
xMin=float(
self.ctrl[0].minText_2.text()), xMax=float(
self.ctrl[0].maxText_2.text()), )
else:
self.view().setLimits(xMin=None)
self.view().setLimits(xMax=None)
def ycbLimitsChanged(self):
if self.ctrl[1].cbLimits.isChecked():
self.view().setLimits(
yMin=float(
self.ctrl[1].minText_2.text()), yMax=float(
self.ctrl[1].maxText_2.text()), )
else:
self.view().setLimits(yMin=None)
self.view().setLimits(yMax=None)
class ProfileViewBox(pg.ViewBox):
"""
A viewbox that has truely fixed axis
"""
def __init__(self, plot, *args, **kwargs):
super(ProfileViewBox, self).__init__(*args, **kwargs)
# self.menu = LimitsViewBoxMenu(self)
self.plot = plot
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton and self.menuEnabled():
super(ProfileViewBox, self).mouseClickEvent(ev)
def mouseDoubleClickEvent(self, event):
self.mouse_double_click(event)
def mouse_double_click(self, ev):
ev.accept()
self.plot.add_cloud_region(ev.scenePos())
class QLFixedViewBox(pg.ViewBox):
"""
A viewbox that has truely fixed axis
"""
def __init__(self, plot, *args, **kwargs):
super(QLFixedViewBox, self).__init__(*args, **kwargs)
self.menu = LimitsViewBoxMenu(self)
self.plot = plot
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton and self.menuEnabled():
super(QLFixedViewBox, self).mouseClickEvent(ev)
def mouseDoubleClickEvent(self, event):
self.mouse_double_click(event)
def mouse_double_click(self, ev):
ev.accept()
self.plot.add_region(ev.scenePos())
def setRange(
self,
rect=None,
xRange=None,
yRange=None,
padding=None,
update=True,
disableAutoRange=True):
"""
Set the visible range of the ViewBox.
Must specify at least one of *rect*, *xRange*, or *yRange*.
================== =====================================================================
**Arguments:**
*rect* (QRectF) The full range that should be visible in the view box.
*xRange* (min,max) The range that should be visible along the x-axis.
*yRange* (min,max) The range that should be visible along the y-axis.
*padding* (float) Expand the view by a fraction of the requested range.
By default, this value is set between 0.02 and 0.1 depending on
the size of the ViewBox.
*update* (bool) If True, update the range of the ViewBox immediately.
Otherwise, the update is deferred until before the next render.
*disableAutoRange* (bool) If True, auto-ranging is diabled. Otherwise, it is left
unchanged.
================== =====================================================================
"""
changes = {} # axes
setRequested = [False, False]
if rect is not None:
changes = {0: [rect.left(), rect.right()], 1: [
rect.top(), rect.bottom()]}
setRequested = [True, True]
if xRange is not None:
changes[0] = xRange
setRequested[0] = True
if yRange is not None:
changes[1] = yRange
setRequested[1] = True
if len(changes) == 0:
print(rect)
raise Exception(
"Must specify at least one of rect, xRange, or yRange. (gave rect=%s)" % str(
type(rect)))
# Update axes one at a time
changed = [False, False]
for ax, range in changes.items():
mn = min(range)
mx = max(range)
# If we requested 0 range, try to preserve previous scale.
# Otherwise just pick an arbitrary scale.
if mn == mx:
dy = self.state['viewRange'][ax][1] - \
self.state['viewRange'][ax][0]
if dy == 0:
dy = 1
mn -= dy * 0.5
mx += dy * 0.5
xpad = 0.0
# Make sure no nan/inf get through
if not all(np.isfinite([mn, mx])):
raise Exception(
"Cannot set range [%s, %s]" %
(str(mn), str(mx)))
# Apply padding
if padding is None:
xpad = self.suggestPadding(ax)
else:
xpad = padding
p = (mx - mn) * xpad
mn -= p
mx += p
# Set target range
if self.state['targetRange'][ax] != [mn, mx]:
self.state['targetRange'][ax] = [mn, mx]
changed[ax] = True
# Update viewRange to match targetRange as closely as possible while
# accounting for aspect ratio constraint
lockX, lockY = setRequested
if lockX and lockY:
lockX = False
lockY = False
self.updateViewRange(lockX, lockY)
# Disable auto-range for each axis that was requested to be set
if disableAutoRange:
xOff = False if setRequested[0] else None
yOff = False if setRequested[1] else None
self.enableAutoRange(x=xOff, y=yOff)
changed.append(True)
# If nothing has changed, we are done.
if any(changed):
self.sigStateChanged.emit(self)
# Update target rect for debugging
if self.target.isVisible():
self.target.setRect(
self.mapRectFromItem(
self.childGroup,
self.targetRect()))
# If ortho axes have auto-visible-only, update them now
# Note that aspect ratio constraints and auto-visible probably do not
# work together..
if changed[0] and self.state['autoVisibleOnly'][1] and (
self.state['autoRange'][0] is not False):
self._autoRangeNeedsUpdate = True
elif changed[1] and self.state['autoVisibleOnly'][0] and (self.state['autoRange'][1] is not False):
self._autoRangeNeedsUpdate = True
def updateViewRange(self, forceX=False, forceY=False):
# Update viewRange to match targetRange as closely as possible, given
# aspect ratio constraints. The *force* arguments are used to indicate
# which axis (if any) should be unchanged when applying constraints.
viewRange = [
self.state['targetRange'][0][:],
self.state['targetRange'][1][:]]
changed = [False, False]
#-------- Make correction for aspect ratio constraint ----------
# aspect is (widget w/h) / (view range w/h)
aspect = self.state['aspectLocked'] # size ratio / view ratio
tr = self.targetRect()
bounds = self.rect()
if aspect is not False and 0 not in [
aspect, tr.height(), bounds.height(), bounds.width()]:
# This is the view range aspect ratio we have requested
targetRatio = tr.width() / tr.height() if tr.height() != 0 else 1
# This is the view range aspect ratio we need to obey aspect
# constraint
viewRatio = (bounds.width() / bounds.height()
if bounds.height() != 0 else 1) / aspect
viewRatio = 1 if viewRatio == 0 else viewRatio
# Decide which range to keep unchanged
if forceX:
ax = 0
elif forceY:
ax = 1
else:
# if we are not required to keep a particular axis unchanged,
# then make the entire target range visible
ax = 0 if targetRatio > viewRatio else 1
if ax == 0:
# view range needs to be taller than target
dy = 0.5 * (tr.width() / viewRatio - tr.height())
if dy != 0:
changed[1] = True
viewRange[1] = [
self.state['targetRange'][1][0] - dy,
self.state['targetRange'][1][1] + dy]
else:
# view range needs to be wider than target
dx = 0.5 * (tr.height() * viewRatio - tr.width())
if dx != 0:
changed[0] = True
viewRange[0] = [
self.state['targetRange'][0][0] - dx,
self.state['targetRange'][0][1] + dx]
# ----------- Make corrections for view limits -----------
limits = (
self.state['limits']['xLimits'],
self.state['limits']['yLimits'])
minRng = [
self.state['limits']['xRange'][0],
self.state['limits']['yRange'][0]]
maxRng = [
self.state['limits']['xRange'][1],
self.state['limits']['yRange'][1]]
for axis in [0, 1]:
if limits[axis][0] is None and limits[axis][1] is None and minRng[axis] is None and maxRng[axis] is None:
continue
# max range cannot be larger than bounds, if they are given
if limits[axis][0] is not None and limits[axis][1] is not None:
if maxRng[axis] is not None:
maxRng[axis] = min(
maxRng[axis], limits[axis][1] - limits[axis][0])
else:
maxRng[axis] = limits[axis][1] - limits[axis][0]
# Apply xRange, yRange
diff = viewRange[axis][1] - viewRange[axis][0]
if maxRng[axis] is not None and diff > maxRng[axis]:
delta = maxRng[axis] - diff
changed[axis] = True
elif minRng[axis] is not None and diff < minRng[axis]:
delta = minRng[axis] - diff
changed[axis] = True
else:
delta = 0
viewRange[axis][0] -= delta / 2.
viewRange[axis][1] += delta / 2.
# Apply xLimits, yLimits
mn, mx = limits[axis]
if mn is not None and viewRange[axis][0] < mn:
delta = mn - viewRange[axis][0]
viewRange[axis][0] += delta
viewRange[axis][1] += delta
changed[axis] = True
elif mx is not None and viewRange[axis][1] < mx:
delta = mx - viewRange[axis][1]
viewRange[axis][0] += delta
viewRange[axis][1] += delta
changed[axis] = True
changed = [(viewRange[i][0] != self.state['viewRange'][i][0]) or (
viewRange[i][1] != self.state['viewRange'][i][1]) for i in (0, 1)]
self.state['viewRange'] = viewRange
# emit range change signals
if changed[0]:
self.sigXRangeChanged.emit(self, tuple(self.state['viewRange'][0]))
if changed[1]:
self.sigYRangeChanged.emit(self, tuple(self.state['viewRange'][1]))
if any(changed):
self.sigRangeChanged.emit(self, self.state['viewRange'])
self.update()
self._matrixNeedsUpdate = True
# Inform linked views that the range has changed
for ax in [0, 1]:
if not changed[ax]:
continue
link = self.linkedView(ax)
if link is not None:
link.linkedViewChanged(self, ax)
|
py | b400688412e219ea8bb2646a39a5e16be9e59ff7 | from flavorsync.parser.parser import Parser
from flask import json
class JSONParser(Parser):
def from_model(self, data):
return json.dumps(data.to_dict(), default=lambda o: o.__dict__) |
py | b40068df8019f35c5a095dd0574587a1ff9ab3b5 | import json
import re
import sys
from typing import Any, TypedDict
from typing_extensions import NotRequired
from urllib.request import urlopen
from .common_types import HtmlCode, PostInfo, Story, Url
def get_json(url: Url) -> Any:
return json.load(urlopen(url))
class UserInfo(TypedDict):
id: int
username: str
class CharacterInfo(TypedDict, total=False):
id: int
name: str
class IconInfo(TypedDict):
id: int
url: Url
keyword: str
class ThreadInfo(TypedDict):
"""Info about a whole thread (or story)."""
id: int
authors: list[UserInfo]
content: HtmlCode
created_at: str
num_replies: int
subject: str
character: NotRequired[CharacterInfo]
class RawPost(TypedDict):
id: int
content: HtmlCode
created_at: str
user: UserInfo
character: NotRequired[CharacterInfo]
icon: NotRequired[IconInfo]
def proc(main_post: int) -> Story:
comments_url = Url(f"https://www.glowfic.com/posts/{main_post:d}")
main_url = Url(f"https://www.glowfic.com/api/v1/posts/{main_post:d}")
thread: ThreadInfo = get_json(main_url)
title: str = thread["subject"]
authors = " & ".join(a["username"] for a in thread["authors"])
num_replies = thread["num_replies"]
permalink = Url(f"https://www.glowfic.com/posts/{main_post:d}")
all_posts: list[PostInfo] = []
all_posts.append(_dopost(thread, permalink, thread["authors"][0]))
index = 1
percent = 0
while True:
replies_url = Url(
f"https://www.glowfic.com/api/v1/posts/{main_post:d}/replies?page={index:d}"
)
posts: list[RawPost] = get_json(replies_url)
index += 1
if not posts:
break
for post in posts:
postid = post["id"]
permalink = Url(
f"https://www.glowfic.com/replies/{postid:d}#reply-{postid:d}"
)
all_posts.append(_dopost(post, permalink, post["user"]))
p = 100 * len(all_posts) // num_replies
if p != percent:
percent = p
sys.stderr.write("\r%3d %% " % percent)
sys.stderr.write("\r \r")
return {
"title": title,
"authors": authors,
"comments": comments_url,
"posts": all_posts,
}
def _dopost(post: ThreadInfo | RawPost, permalink: Url, author: UserInfo) -> PostInfo:
ret: PostInfo = {
"id": post["id"],
"permalink": permalink,
"author": author["username"],
"author_url": Url(f"https://www.glowfic.com/users/{author['id']:d}"),
"posted": post["created_at"],
"content": post["content"],
}
c = post.get("character")
if c and "name" in c and "id" in c:
ret["character"] = c["name"]
ret["character_url"] = Url(f"https://www.glowfic.com/characters/{c['id']:d}")
i: IconInfo | None = post.get("icon")
if i is not None:
ret["icon_url"] = i["url"]
return ret
def main(postid: int):
thread = proc(postid)
ofilename = re.sub(r"\W+", "_", thread["title"]) + ".json"
with open(ofilename, "w") as o:
json.dump(thread, o)
sys.stderr.write('wrote to "%s".\n' % ofilename)
if __name__ == "__main__":
for arg in sys.argv[1:]:
main(int(arg))
|
py | b4006a715198a5f7c21868fc3d479b5dcbc7c887 | """GeoServer."""
import random
import logging
from flask import Flask
from flask_caching import Cache
from flask_cors import CORS
from waitress import serve
from utils.sysx import log_metrics
from geo import alt, geodata
DEFAULT_CACHE_TIMEOUT = 120
logging.basicConfig(level=logging.INFO)
def _warmup():
logging.info(log_metrics())
random_latlng = [6 + random.random() * 3, 79.9 + random.random()]
geodata.get_latlng_regions(random_latlng)
region_id = 'LK-%d' % (random.randint(1, 9))
geodata.get_region_geo(region_id)
logging.info('GeoServer warmup complete.')
_warmup()
app = Flask(__name__)
CORS(app)
cache = Cache(config={'CACHE_TYPE': 'SimpleCache'})
cache.init_app(app)
@app.route('/status')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def status():
"""Index."""
return log_metrics() | {'server': 'geo_server'}
@app.route('/latlng_to_region/<string:latlng_str>')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def latlng_to_region(latlng_str):
"""Get region from latlng."""
lat, _, lng = latlng_str.partition(',')
lat_lng = (float)(lat), (float)(lng)
return geodata.get_latlng_regions(lat_lng)
@app.route('/region_geo/<string:region_id>')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def region_geo(region_id):
"""Get region."""
return geodata.get_region_geo(region_id)
@app.route('/altitude/<string:latlng_str>')
@cache.cached(timeout=DEFAULT_CACHE_TIMEOUT)
def altitude(latlng_str):
"""Get altitude for latlng."""
lat, _, lng = latlng_str.partition(',')
lat_lng = (float)(lat), (float)(lng)
return {
'altitude': alt.get_altitude(lat_lng),
}
if __name__ == '__main__':
PORT = 4002
HOST = '0.0.0.0'
logging.info('Starting geo_server on %s:%d...' % (HOST, PORT))
serve(
app,
host=HOST,
port=PORT,
threads=8,
)
|
py | b4006a803d87d73f291a8ebf67e3cfa77b20473d | # -*- coding: utf-8 -*-
'''
@author: Angel Navia Vázquez
Dec. 2020
python3 pom4_deep_learning_preprocessing.py --dataset mnist_raw_matrix --verbose 1
'''
import argparse
import time
import json
import sys, os
import numpy as np
# Add higher directory to python modules path.
sys.path.append("../../../../")
try:
from MMLL.nodes.MasterNode import MasterNode
from MMLL.common.MMLL_tools import display
#from MMLL.comms.comms_pycloudmessenger import Comms_master as Comms
from MMLL.comms.comms_pycloudmessenger import Comms_master as Comms
except Exception as err:
if "No module named 'MMLL'" in str(err):
print('\n' + 80 * '#')
print('You need to install the MMLL library')
print('pip install git+https://github.com/Musketeer-H2020/MMLL.git')
print(80 * '#' + '\n')
raise
from demo_tools.task_manager_pycloudmessenger import Task_Manager
from demo_tools.mylogging.logger_v1 import Logger
from demo_tools.data_connectors.Load_from_file import Load_From_File as DC # Data connector
from demo_tools.evaluation_tools import eval_classification, create_folders
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--id', type=str, default=None, help='The external names of the workers')
parser.add_argument('--verbose', type=str, default='1', help='Print messages on screen when True')
parser.add_argument('--dataset', type=str, default=None, help='The dataset to be used')
FLAGS, unparsed = parser.parse_known_args()
if FLAGS.verbose == '1':
verbose = True
else:
verbose = False
# Create the directories for storing relevant outputs if they do not exist
create_folders("./results/")
# Logging is optional, if you do not want to log messages, simply set logger=None
logger = Logger('./results/logs/Master.log')
pom = 4
Nworkers = 5 + 1
model_type = 'LR'
dataset_name = FLAGS.dataset
display('===========================================', logger, True)
display('Creating Master... ', logger, True)
display('Please wait until Master is ready before launching the workers...', logger, True)
# ==================================================
# Note: this part creates the task and waits for the workers to join. This code is
# intended to be used only at the demos, in Musketeer this part must be done in the client.
credentials_filename = '../../musketeer.json'
tm = Task_Manager(credentials_filename)
# We need the aggregator to build comms object
aggregator = tm.create_master_random_taskname(pom, Nworkers, user_org='UC3M')
display('Workers can be launched now!', logger, True)
display('Waiting for the workers to join task name = %s' % tm.task_name, logger, True)
tm.wait_for_workers()
# ==================================================
#########################################
display('Creating MasterNode under POM4, communicating through pycloudmessenger', logger, True)
# Creating Comms object, needed by MMLL
comms = Comms(aggregator)
# Creating Masternode
mn = MasterNode(pom, comms, logger, verbose)
display('-------------------- Loading dataset %s --------------------------' % dataset_name, logger, True)
# Warning: this data connector is only designed for the demos. In Musketeer, appropriate data
# connectors must be provided
data_file = '../../../../input_data/' + dataset_name + '_demonstrator_data.pkl'
dc = DC(data_file)
#########################################
input_data_description = None
if dataset_name == "mnist_raw_matrix_binclass":
input_data_description = {
"NI": 1,
"input_types": [
{"type": "matrix", "name": "image"}]
}
target_data_description= {
"NT": 1,
"output_types": [
{"type": "bin", "name": "even/odd", "definition": "if a number is even or odd"}
]
}
#--------------- Creating a ML model (Master side) ---------------------
########################################
# Parameters depending on the model_type
########################################
if input_data_description is not None:
model_parameters = {}
model_parameters.update({'regularization': 0.001})
model_parameters.update({'Nmaxiter': 10})
model_parameters.update({'conv_stop': 0.005})
model_parameters.update({'input_data_description': input_data_description})
model_parameters.update({'aggregation_type': 'direct'})
#model_parameters.update({'aggregation_type': 'roundrobin'})
else:
display('\n' + '='*50 + '\nERROR: input_data_description is missing\n' + '='*50 + '\n', logger, True)
sys.exit()
mn.create_model_Master(model_type, model_parameters=model_parameters)
display('MMLL model %s is ready for training!' % model_type, logger, True)
display('Deep Learning image processing at workers', logger, True)
[dl_transformer, new_input_data_description, errors_deep_learning] = mn.deep_learning_transform_workers(input_data_description)
[Xval, yval] = dc.get_data_val()
display('------- Sample before Deep Learning Preprocessing ---\n', logger, True)
display(Xval[0, :], logger, True)
display(Xval.shape, logger, True)
Xval_DLP = dl_transformer.transform(Xval)
display('------- Sample after Deep Learning Preprocessing ---\n', logger, True)
display(Xval_DLP[0, :], logger, True)
display(Xval_DLP.shape, logger, True)
display('\n---------------------------------------------------------------------', logger, True)
display('----------------------- Deep Learning Preprocessing completed ---------', logger, True)
display('----------------------------------------------------------------------\n', logger, True)
display('Terminating all worker nodes.', logger, True)
mn.terminate_workers()
|
py | b4006c1a1a088a8339919eaafe70a743d2df34c7 | """
Django settings for hotel project.
Generated by 'django-admin startproject' using Django 3.1.2.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '^eoz2gbf#*2xx9+$fi_852po1d-%z--az$@&_me67bnwob05@7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'hotel_app',
'drf_yasg',
'djoser',
'corsheaders'
]
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'hotel.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'hotel.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.TokenAuthentication',
),
}
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
CORS_ORIGIN_ALLOW_ALL = True |
py | b4006d430557306402222ca6ed7ebd22830456e0 | import ipaddress
import os
import semver
import re
import shutil
import sys
import yaml
import tempfile
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
from invoke import run, task
from invoke.exceptions import Exit, UnexpectedExit
all_binaries = set(["controller",
"speaker",
"mirror-server"])
all_architectures = set(["amd64",
"arm",
"arm64",
"ppc64le",
"s390x"])
def _check_architectures(architectures):
out = set()
for arch in architectures:
if arch == "all":
out |= all_architectures
elif arch not in all_architectures:
print("unknown architecture {}".format(arch))
print("Supported architectures: {}".format(", ".join(sorted(all_architectures))))
sys.exit(1)
else:
out.add(arch)
if not out:
out.add("amd64")
return list(sorted(out))
def _check_binaries(binaries):
out = set()
for binary in binaries:
if binary == "all":
out |= all_binaries
elif binary not in all_binaries:
print("Unknown binary {}".format(binary))
print("Known binaries: {}".format(", ".join(sorted(all_binaries))))
sys.exit(1)
else:
out.add(binary)
if not out:
out.add("controller")
out.add("speaker")
return list(sorted(out))
def _docker_build_cmd():
cmd = os.getenv('DOCKER_BUILD_CMD')
if cmd:
out = cmd
else:
out = run("docker buildx ls >/dev/null"
"&& echo 'docker buildx build --load' "
"|| echo 'docker build'", hide=True).stdout.strip()
return out
def _make_build_dirs():
for arch in all_architectures:
for binary in all_binaries:
dir = os.path.join("build", arch, binary)
if not os.path.exists(dir):
os.makedirs(dir, mode=0o750)
# Returns true if docker is a symbolic link to podman.
def _is_podman():
return 'podman' in os.path.realpath(shutil.which('docker'))
# Get the list of subnets for the kind nework.
def _get_network_subnets():
if _is_podman():
cmd = ('podman network inspect kind -f "'
'{{ range (index .plugins 0).ipam.ranges}}'
'{{ (index . 0).subnet }} {{end}}"')
else:
cmd = ('docker network inspect kind -f "'
'{{ range .IPAM.Config}}{{.Subnet}} {{end}}"'
)
return run(cmd, echo=True).stdout.strip().split(' ')
# Get the list of allocated IPv4 and IPv6 addresses for the kind network.
def _get_subnets_allocated_ips():
v4_ips = []
v6_ips = []
if _is_podman():
cmd = 'podman ps -f network=kind --format "{{.ID}}"'
containers = run(cmd, echo=True).stdout.strip().split('\n')
# for each container, get the IP address and add it to the list of
# allocated IPs
for c in containers:
cmd = ("podman inspect {container} --format '"
"{{{{.NetworkSettings.Networks.kind.IPAddress}}}} "
"{{{{.NetworkSettings.Networks.kind.GlobalIPv6Address}}}}'"
).format(container=c)
v4, v6 = run(cmd, echo=True).stdout.strip().split(' ')
v4_ips.append(v4)
v6_ips.append(v6)
else:
v4_ips = run('docker network inspect kind -f '
'"{{range .Containers}}{{.IPv4Address}} {{end}}"',
echo=True).stdout.strip().split(' ')
v6_ips = run('docker network inspect kind -f '
'"{{range .Containers}}{{.IPv6Address}} {{end}}"',
echo=True).stdout.strip().split(' ')
return sorted(v4_ips), sorted(v6_ips)
@task(iterable=["binaries", "architectures"],
help={
"binaries": "binaries to build. One or more of {}, or 'all'".format(", ".join(sorted(all_binaries))),
"architectures": "architectures to build. One or more of {}, or 'all'".format(", ".join(sorted(all_architectures))),
"registry": "Docker registry under which to tag the images. Default 'quay.io'.",
"repo": "Docker repository under which to tag the images. Default 'metallb'.",
"tag": "Docker image tag prefix to use. Actual tag will be <tag>-<arch>. Default 'dev'.",
})
def build(ctx, binaries, architectures, registry="quay.io", repo="metallb", tag="dev"):
"""Build MetalLB docker images."""
binaries = _check_binaries(binaries)
architectures = _check_architectures(architectures)
docker_build_cmd = _docker_build_cmd()
_make_build_dirs()
commit = run("git describe --dirty --always", hide=True).stdout.strip()
branch = run("git rev-parse --abbrev-ref HEAD", hide=True).stdout.strip()
for arch in architectures:
env = {
"CGO_ENABLED": "0",
"GOOS": "linux",
"GOARCH": arch,
"GOARM": "6",
"GO111MODULE": "on",
}
if "speaker" in binaries:
shutil.copy("frr-reloader/frr-reloader.sh","build/{arch}/speaker/".format(arch=arch))
run("go build -v -o build/{arch}/speaker/frr-metrics -ldflags "
"'-X go.universe.tf/metallb/internal/version.gitCommit={commit} "
"-X go.universe.tf/metallb/internal/version.gitBranch={branch}' "
"frr-metrics/exporter.go".format(
arch=arch,
commit=commit,
branch=branch),
env=env,
echo=True,
)
for bin in binaries:
run("go build -v -o build/{arch}/{bin}/{bin} -ldflags "
"'-X go.universe.tf/metallb/internal/version.gitCommit={commit} "
"-X go.universe.tf/metallb/internal/version.gitBranch={branch}' "
"go.universe.tf/metallb/{bin}".format(
arch=arch,
bin=bin,
commit=commit,
branch=branch),
env=env,
echo=True)
run("{docker_build_cmd} "
"--platform linux/{arch} "
"-t {registry}/{repo}/{bin}:{tag}-{arch} "
"-f {bin}/Dockerfile build/{arch}/{bin}".format(
docker_build_cmd=docker_build_cmd,
registry=registry,
repo=repo,
bin=bin,
tag=tag,
arch=arch),
echo=True)
@task(iterable=["binaries", "architectures"],
help={
"binaries": "binaries to build. One or more of {}, or 'all'".format(", ".join(sorted(all_binaries))),
"architectures": "architectures to build. One or more of {}, or 'all'".format(", ".join(sorted(all_architectures))),
"registry": "Docker registry under which to tag the images. Default 'quay.io'.",
"repo": "Docker repository under which to tag the images. Default 'metallb'.",
"tag": "Docker image tag prefix to use. Actual tag will be <tag>-<arch>. Default 'dev'.",
})
def push(ctx, binaries, architectures, registry="quay.io", repo="metallb", tag="dev"):
"""Build and push docker images to registry."""
binaries = _check_binaries(binaries)
architectures = _check_architectures(architectures)
for arch in architectures:
for bin in binaries:
build(ctx, binaries=[bin], architectures=[arch], registry=registry, repo=repo, tag=tag)
run("docker push {registry}/{repo}/{bin}:{tag}-{arch}".format(
registry=registry,
repo=repo,
bin=bin,
arch=arch,
tag=tag),
echo=True)
@task(iterable=["binaries"],
help={
"binaries": "binaries to build. One or more of {}, or 'all'".format(", ".join(sorted(all_binaries))),
"registry": "Docker registry under which to tag the images. Default 'quay.io'.",
"repo": "Docker repository under which to tag the images. Default 'metallb'.",
"tag": "Docker image tag prefix to use. Actual tag will be <tag>-<arch>. Default 'dev'.",
})
def push_multiarch(ctx, binaries, registry="quay.io", repo="metallb", tag="dev"):
"""Build and push multi-architecture docker images to registry."""
binaries = _check_binaries(binaries)
architectures = _check_architectures(["all"])
push(ctx, binaries=binaries, architectures=architectures, registry=registry, repo=repo, tag=tag)
platforms = ",".join("linux/{}".format(arch) for arch in architectures)
for bin in binaries:
run("manifest-tool push from-args "
"--platforms {platforms} "
"--template {registry}/{repo}/{bin}:{tag}-ARCH "
"--target {registry}/{repo}/{bin}:{tag}".format(
platforms=platforms,
registry=registry,
repo=repo,
bin=bin,
tag=tag),
echo=True)
def validate_kind_version():
"""Validate minimum required version of kind."""
# If kind is not installed, this first command will raise an UnexpectedExit
# exception, and inv will exit at this point making it clear running "kind"
# failed.
min_version = "0.9.0"
try:
raw = run("kind version", echo=True)
except Exception as e:
raise Exit(message="Could not determine kind version (is kind installed?)")
actual_version = re.search("v(\d*\.\d*\.\d*)", raw.stdout).group(1)
delta = semver.compare(actual_version, min_version)
if delta < 0:
raise Exit(message="kind version >= {} required".format(min_version))
@task(help={
"architecture": "CPU architecture of the local machine. Default 'amd64'.",
"name": "name of the kind cluster to use.",
"protocol": "Pre-configure MetalLB with the specified protocol. "
"Unconfigured by default. Supported: 'bgp','layer2'",
"node_img": "Optional node image to use for the kind cluster (e.g. kindest/node:v1.18.19)."
"The node image drives the kubernetes version used in kind.",
"ip_family": "Optional ipfamily of the cluster."
"Default: ipv4, supported families are 'ipv6' and 'dual'.",
"bgp_type": "Type of BGP implementation to use."
"Supported: 'native' (default), 'frr'",
"frr_volume_dir": "FRR router config directory to be mounted inside frr container. "
"Default: ./dev-env/bgp/frr-volume",
"log_level": "Log level for the controller and the speaker."
"Default: info, Supported: 'all', 'debug', 'info', 'warn', 'error' or 'none'",
"helm_install": "Optional install MetalLB via helm chart instead of manifests."
"Default: False."
})
def dev_env(ctx, architecture="amd64", name="kind", protocol=None, frr_volume_dir="",
node_img=None, ip_family="ipv4", bgp_type="native", log_level="info",
helm_install=False):
"""Build and run MetalLB in a local Kind cluster.
If the cluster specified by --name (default "kind") doesn't exist,
it is created. Then, build MetalLB docker images from the
checkout, push them into kind, and deploy manifests/metallb.yaml
to run those images.
The optional node_img parameter will be used to determine the version of the cluster.
"""
validate_kind_version()
clusters = run("kind get clusters", hide=True).stdout.strip().splitlines()
mk_cluster = name not in clusters
if mk_cluster:
config = {
"apiVersion": "kind.x-k8s.io/v1alpha4",
"kind": "Cluster",
"nodes": [{"role": "control-plane"},
{"role": "worker"},
{"role": "worker"},
],
}
networking_config = {}
if ip_family != "ipv4":
networking_config["ipFamily"] = ip_family
if len(networking_config) > 0:
config["networking"] = networking_config
extra_options = ""
if node_img != None:
extra_options = "--image={}".format(node_img)
config = yaml.dump(config).encode("utf-8")
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(config)
tmp.flush()
run("kind create cluster --name={} --config={} {}".format(name, tmp.name, extra_options), pty=True, echo=True)
binaries = ["controller", "speaker", "mirror-server"]
build(ctx, binaries, architectures=[architecture])
run("kind load docker-image --name={} quay.io/metallb/controller:dev-{}".format(name, architecture), echo=True)
run("kind load docker-image --name={} quay.io/metallb/speaker:dev-{}".format(name, architecture), echo=True)
run("kind load docker-image --name={} quay.io/metallb/mirror-server:dev-{}".format(name, architecture), echo=True)
if helm_install:
run("helm install metallb charts/metallb/ --set controller.image.tag=dev-{} "
"--set speaker.image.tag=dev-{} --set speaker.frr.enabled={}".format(architecture,
architecture, "true" if bgp_type == "frr" else "false"), echo=True)
else:
run("kubectl delete po -nmetallb-system --all", echo=True)
manifests_dir = os.getcwd() + "/manifests"
with tempfile.TemporaryDirectory() as tmpdir:
# Copy namespace manifest.
shutil.copy(manifests_dir + "/namespace.yaml", tmpdir)
# FIXME: This is a hack to get the correct manifest file.
manifest_filename = "metallb-frr.yaml" if bgp_type == "frr" else "metallb.yaml"
# open file and replace the protocol with the one specified by the user
with open(manifests_dir + "/" + manifest_filename) as f:
manifest = f.read()
for image in binaries:
manifest = re.sub("image: quay.io/metallb/{}:.*".format(image),
"image: quay.io/metallb/{}:dev-{}".format(image, architecture), manifest)
manifest = re.sub("--log-level=info", "--log-level={}".format(log_level), manifest)
with open(tmpdir + "/metallb.yaml", "w") as f:
f.write(manifest)
f.flush()
run("kubectl apply -f {}/namespace.yaml".format(tmpdir), echo=True)
run("kubectl apply -f {}/metallb.yaml".format(tmpdir), echo=True)
with open("e2etest/manifests/mirror-server.yaml") as f:
manifest = f.read()
manifest = manifest.replace(":main", ":dev-{}".format(architecture))
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(manifest.encode("utf-8"))
tmp.flush()
run("kubectl apply -f {}".format(tmp.name), echo=True)
if protocol == "bgp":
print("Configuring MetalLB with a BGP test environment")
bgp_dev_env(ip_family, frr_volume_dir)
elif protocol == "layer2":
print("Configuring MetalLB with a layer 2 test environment")
layer2_dev_env()
else:
print("Leaving MetalLB unconfigured")
# Configure MetalLB in the dev-env for layer2 testing.
# Identify the unused network address range from kind network and used it in configmap.
def layer2_dev_env():
dev_env_dir = os.getcwd() + "/dev-env/layer2"
with open("%s/config.yaml.tmpl" % dev_env_dir, 'r') as f:
layer2_config = "# THIS FILE IS AUTOGENERATED\n" + f.read()
layer2_config = layer2_config.replace(
"SERVICE_V4_RANGE", get_available_ips(4)[0])
layer2_config = layer2_config.replace(
"SERVICE_V6_RANGE", get_available_ips(6)[0])
with open("%s/config.yaml" % dev_env_dir, 'w') as f:
f.write(layer2_config)
# Apply the MetalLB ConfigMap
run("kubectl apply -f %s/config.yaml" % dev_env_dir)
# Configure MetalLB in the dev-env for BGP testing. Start an frr based BGP
# router in a container and configure MetalLB to peer with it.
# See dev-env/bgp/README.md for some more information.
def bgp_dev_env(ip_family, frr_volume_dir):
dev_env_dir = os.getcwd() + "/dev-env/bgp"
if frr_volume_dir == "":
frr_volume_dir = dev_env_dir + "/frr-volume"
# TODO -- The IP address handling will need updates to add support for IPv6
# We need the IPs for each Node in the cluster to place them in the BGP
# router configuration file (bgpd.conf). Each Node will peer with this
# router.
node_ips = run("kubectl get nodes -o jsonpath='{.items[*].status.addresses"
"[?(@.type==\"InternalIP\")].address}{\"\\n\"}'", echo=True)
node_ips = node_ips.stdout.strip().split()
if len(node_ips) != 3:
raise Exit(message='Expected 3 nodes, got %d' % len(node_ips))
# Create a new directory that will be used as the config volume for frr.
try:
# sudo because past docker runs will have changed ownership of this dir
run('sudo rm -rf "%s"' % frr_volume_dir)
os.mkdir(frr_volume_dir)
except FileExistsError:
pass
except Exception as e:
raise Exit(message='Failed to create frr-volume directory: %s'
% str(e))
# These config files are static, so we copy them straight in.
copy_files = ('zebra.conf', 'daemons', 'vtysh.conf')
for f in copy_files:
shutil.copyfile("%s/frr/%s" % (dev_env_dir, f),
"%s/%s" % (frr_volume_dir, f))
# bgpd.conf is created from a template so that we can include the current
# Node IPs.
with open("%s/frr/bgpd.conf.tmpl" % dev_env_dir, 'r') as f:
bgpd_config = "! THIS FILE IS AUTOGENERATED\n" + f.read()
bgpd_config = bgpd_config.replace("PROTOCOL", ip_family)
for n in range(0, len(node_ips)):
bgpd_config = bgpd_config.replace("NODE%d_IP" % n, node_ips[n])
with open("%s/bgpd.conf" % frr_volume_dir, 'w') as f:
f.write(bgpd_config)
# Run a BGP router in a container for all of the speakers to peer with.
run('for frr in $(docker ps -a -f name=frr --format {{.Names}}) ; do '
' docker rm -f $frr ; '
'done', echo=True)
run("docker run -d --privileged --network kind --rm --ulimit core=-1 --name frr --volume %s:/etc/frr "
"quay.io/frrouting/frr:stable_7.5" % frr_volume_dir, echo=True)
if ip_family == "ipv4":
peer_address = run('docker inspect -f "{{ '
'range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}" frr', echo=True)
elif ip_family == "ipv6":
peer_address = run('docker inspect -f "{{ '
'range .NetworkSettings.Networks}}{{.GlobalIPv6Address}}{{end}}" frr', echo=True)
else:
raise Exit(message='Unsupported ip address family %s' % ip_family)
with open("%s/config.yaml.tmpl" % dev_env_dir, 'r') as f:
mlb_config = "# THIS FILE IS AUTOGENERATED\n" + f.read()
mlb_config = mlb_config.replace("IP_PEER_ADDRESS", peer_address.stdout.strip())
with open("%s/config.yaml" % dev_env_dir, 'w') as f:
f.write(mlb_config)
# Apply the MetalLB ConfigMap
run("kubectl apply -f %s/config.yaml" % dev_env_dir)
def get_available_ips(ip_family=None):
if ip_family is None or (ip_family != 4 and ip_family != 6):
raise Exit(message="Please provide network version: 4 or 6.")
v4, v6 = _get_subnets_allocated_ips()
for i in _get_network_subnets():
network = ipaddress.ip_network(i)
if network.version == ip_family:
used_list = v4 if ip_family == 4 else v6
last_used = ipaddress.ip_interface(used_list[-1])
for_containers = []
for i in range(2):
last_used = last_used + 1
for_containers.append(str(last_used))
# try to get 10 IP addresses after the last assigned node address in the kind network subnet
# if failed, just quit (recreate kind cluster might solve the situation)
service_ip_range_start = last_used + 1
service_ip_range_end = last_used + 11
if service_ip_range_start not in network:
raise Exit(message='network range %s is not in %s' % (service_ip_range_start, network))
if service_ip_range_end not in network:
raise Exit(message='network range %s is not in %s' % (service_ip_range_end, network))
return '%s-%s' % (service_ip_range_start.ip, service_ip_range_end.ip), ','.join(for_containers)
@task(help={
"name": "name of the kind cluster to delete.",
"frr_volume_dir": "FRR router config directory to be cleaned up. "
"Default: ./dev-env/bgp/frr-volume"
})
def dev_env_cleanup(ctx, name="kind", frr_volume_dir=""):
"""Remove traces of the dev env."""
validate_kind_version()
clusters = run("kind get clusters", hide=True).stdout.strip().splitlines()
if name in clusters:
run("kind delete cluster --name={}".format(name), hide=True)
else:
raise Exit(message="Unable to find cluster named: {}".format(name))
run('for frr in $(docker ps -a -f name=frr --format {{.Names}}) ; do '
' docker rm -f $frr ; '
'done', hide=True)
# cleanup bgp configs
dev_env_dir = os.getcwd() + "/dev-env/bgp"
if frr_volume_dir == "":
frr_volume_dir = dev_env_dir + "/frr-volume"
# sudo because past docker runs will have changed ownership of this dir
run('sudo rm -rf "%s"' % frr_volume_dir)
run('rm -f "%s"/config.yaml' % dev_env_dir)
# cleanup layer2 configs
dev_env_dir = os.getcwd() + "/dev-env/layer2"
run('rm -f "%s"/config.yaml' % dev_env_dir)
@task(help={
"version": "version of MetalLB to release.",
"skip-release-notes": "make the release even if there are no release notes.",
})
def release(ctx, version, skip_release_notes=False):
"""Tag a new release."""
status = run("git status --porcelain", hide=True).stdout.strip()
if status != "":
raise Exit(message="git checkout not clean, cannot release")
version = semver.parse_version_info(version)
is_patch_release = version.patch != 0
# Check that we have release notes for the desired version.
run("git checkout main", echo=True)
if not skip_release_notes:
with open("website/content/release-notes/_index.md") as release_notes:
if "## Version {}".format(version) not in release_notes.read():
raise Exit(message="no release notes for v{}".format(version))
# Move HEAD to the correct release branch - either a new one, or
# an existing one.
if is_patch_release:
run("git checkout v{}.{}".format(version.major, version.minor), echo=True)
else:
run("git checkout -b v{}.{}".format(version.major, version.minor), echo=True)
# Copy over release notes from main.
if not skip_release_notes:
run("git checkout main -- website/content/release-notes/_index.md", echo=True)
# Update links on the website to point to files at the version
# we're creating.
if is_patch_release:
previous_version = "v{}.{}.{}".format(version.major, version.minor, version.patch-1)
else:
previous_version = "main"
def _replace(pattern):
oldpat = pattern.format(previous_version)
newpat = pattern.format("v{}").format(version)
run("perl -pi -e 's#{}#{}#g' website/content/*.md website/content/*/*.md".format(oldpat, newpat),
echo=True)
_replace("/metallb/metallb/{}")
_replace("/metallb/metallb/tree/{}")
_replace("/metallb/metallb/blob/{}")
# Update the version listed on the website sidebar
run("perl -pi -e 's/MetalLB .*/MetalLB v{}/g' website/content/_header.md".format(version), echo=True)
# Update the manifests with the new version
run("perl -pi -e 's,image: quay.io/metallb/speaker:.*,image: quay.io/metallb/speaker:v{},g' manifests/metallb.yaml".format(version), echo=True)
run("perl -pi -e 's,image: quay.io/metallb/controller:.*,image: quay.io/metallb/controller:v{},g' manifests/metallb.yaml".format(version), echo=True)
# Update the versions in the helm chart (version and appVersion are always the same)
# helm chart versions follow Semantic Versioning, and thus exclude the leading 'v'
run("perl -pi -e 's,^version: .*,version: {},g' charts/metallb/Chart.yaml".format(version), echo=True)
run("perl -pi -e 's,^appVersion: .*,appVersion: v{},g' charts/metallb/Chart.yaml".format(version), echo=True)
run("perl -pi -e 's,^Current chart version is: .*,Current chart version is: `{}`,g' charts/metallb/README.md".format(version), echo=True)
# Update the version in kustomize instructions
#
# TODO: Check if kustomize instructions really need the version in the
# website or if there is a simpler way. For now, though, we just replace the
# only page that mentions the version on release.
run("perl -pi -e 's,github.com/metallb/metallb//manifests\?ref=.*,github.com/metallb/metallb//manifests\?ref=v{},g' website/content/installation/_index.md".format(version), echo=True)
# Update the version embedded in the binary
run("perl -pi -e 's/version\s+=.*/version = \"{}\"/g' internal/version/version.go".format(version), echo=True)
run("gofmt -w internal/version/version.go", echo=True)
run("git commit -a -m 'Automated update for release v{}'".format(version), echo=True)
run("git tag v{} -m 'See the release notes for details:\n\nhttps://metallb.universe.tf/release-notes/#version-{}-{}-{}'".format(version, version.major, version.minor, version.patch), echo=True)
run("git checkout main", echo=True)
@task
def test(ctx):
"""Run unit tests."""
run("go test -short ./...")
run("go test -short -race ./...")
@task
def checkpatch(ctx):
# Generate a diff of all changes on this branch from origin/main
# and look for any added lines with 2 spaces after a period.
try:
lines = run("git diff $(diff -u <(git rev-list --first-parent HEAD) "
" <(git rev-list --first-parent origin/main) "
" | sed -ne 's/^ //p' | head -1)..HEAD | "
" grep '+.*\.\ '")
if len(lines.stdout.strip()) > 0:
raise Exit(message="ERROR: Found changed lines with 2 spaces "
"after a period.")
except UnexpectedExit:
# Will exit non-zero if no double-space-after-period lines are found.
pass
@task(help={
"env": "Specify in which environment to run the linter . Default 'container'. Supported: 'container','host'"
})
def lint(ctx, env="container"):
"""Run linter.
By default, this will run a golangci-lint docker image against the code.
However, in some environments (such as the MetalLB CI), it may be more
convenient to install the golangci-lint binaries on the host. This can be
achieved by running `inv lint --env host`.
"""
version = "1.39.0"
golangci_cmd = "golangci-lint run --timeout 5m0s ./..."
if env == "container":
run("docker run --rm -v $(git rev-parse --show-toplevel):/app -w /app golangci/golangci-lint:v{} {}".format(version, golangci_cmd), echo=True)
elif env == "host":
run("curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v{}".format(version))
run(golangci_cmd)
else:
raise Exit(message="Unsupported linter environment: {}". format(env))
@task(help={
"name": "name of the kind cluster to test (only kind uses).",
"export": "where to export kind logs.",
"kubeconfig": "kubeconfig location. By default, use the kubeconfig from kind.",
"system_namespaces": "comma separated list of Kubernetes system namespaces",
"service_pod_port": "port number that service pods open.",
"skip_docker": "don't use docker command in BGP testing.",
"focus": "the list of arguments to pass into as -ginkgo.focus",
"skip": "the list of arguments to pass into as -ginkgo.skip",
"ipv4_service_range": "a range of IPv4 addresses for MetalLB to use when running in layer2 mode.",
"ipv6_service_range": "a range of IPv6 addresses for MetalLB to use when running in layer2 mode.",
"use_operator": "use operator to update MetalLB configuration",
})
def e2etest(ctx, name="kind", export=None, kubeconfig=None, system_namespaces="kube-system,metallb-system", service_pod_port=80, skip_docker=False, focus="", skip="", ipv4_service_range=None, ipv6_service_range=None, use_operator=False):
"""Run E2E tests against development cluster."""
if skip_docker:
opt_skip_docker = "--skip-docker"
else:
opt_skip_docker = ""
ginkgo_skip = ""
if skip:
ginkgo_skip = "--ginkgo.skip=\"" + skip + "\""
opt_use_operator = ""
if use_operator:
opt_use_operator = "--use-operator"
ginkgo_focus = ""
if focus:
ginkgo_focus = "--ginkgo.focus=\"" + focus + "\""
if kubeconfig is None:
validate_kind_version()
clusters = run("kind get clusters", hide=True).stdout.strip().splitlines()
if name in clusters:
kubeconfig_file = tempfile.NamedTemporaryFile()
kubeconfig = kubeconfig_file.name
run("kind export kubeconfig --name={} --kubeconfig={}".format(name, kubeconfig), pty=True, echo=True)
else:
raise Exit(message="Unable to find cluster named: {}".format(name))
else:
os.environ['KUBECONFIG'] = kubeconfig
namespaces = system_namespaces.replace(' ', '').split(',')
for ns in namespaces:
run("kubectl -n {} wait --for=condition=Ready --all pods --timeout 300s".format(ns), hide=True)
ips_for_containers_v4 = ""
if ipv4_service_range is None:
ipv4_service_range, ips = get_available_ips(4)
ips_for_containers_v4 = "--ips-for-containers-v4=" + ips
ips_for_containers_v6 = ""
if ipv6_service_range is None:
ipv6_service_range, ips = get_available_ips(6)
ips_for_containers_v6 = "--ips-for-containers-v6=" + ips
testrun = run("cd `git rev-parse --show-toplevel`/e2etest &&"
"KUBECONFIG={} go test -timeout 1h {} {} --provider=local --kubeconfig={} --service-pod-port={} {} {} -ipv4-service-range={} -ipv6-service-range={} {} {}".format(kubeconfig, ginkgo_focus, ginkgo_skip, kubeconfig, service_pod_port, ips_for_containers_v4, ips_for_containers_v6, ipv4_service_range, ipv6_service_range, opt_skip_docker, opt_use_operator), warn="True")
if export != None:
run("kind export logs {}".format(export))
if testrun.failed:
raise Exit(message="E2E tests failed", code=testrun.return_code)
@task
def bumplicense(ctx):
"""Bumps the license header on all go files that have it missing"""
res = run("find . -name '*.go'")
for file in res.stdout.splitlines():
res = run("grep -q License {}".format(file), warn=True)
if not res.ok:
run(r"sed -i '1s/^/\/\/ SPDX-License-Identifier:Apache-2.0\n\n/' " + file)
@task
def verifylicense(ctx):
"""Verifies all files have the corresponding license"""
res = run("find . -name '*.go'", hide="out")
no_license = False
for file in res.stdout.splitlines():
res = run("grep -q License {}".format(file), warn=True)
if not res.ok:
no_license = True
print("{} is missing license".format(file))
if no_license:
raise Exit(message="#### Files with no license found.\n#### Please run ""inv bumplicense"" to add the license header")
|
py | b4006e5def0c0fca7e4c10499572cc0235b9ff80 | # coding: utf-8
"""
Strava API v3
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
OpenAPI spec version: 3.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from setuptools import setup, find_packages # noqa: H301
NAME = "swagger-client"
VERSION = "1.0.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"]
setup(
name=NAME,
version=VERSION,
description="Strava API v3",
author_email="",
url="",
keywords=["Swagger", "Strava API v3"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
The [Swagger Playground](https://developers.strava.com/playground) is the easiest way to familiarize yourself with the Strava API by submitting HTTP requests and observing the responses before you write any client code. It will show what a response will look like with different endpoints depending on the authorization scope you receive from your athletes. To use the Playground, go to https://www.strava.com/settings/api and change your “Authorization Callback Domain” to developers.strava.com. Please note, we only support Swagger 2.0. There is a known issue where you can only select one scope at a time. For more information, please check the section “client code” at https://developers.strava.com/docs. # noqa: E501
"""
)
|
py | b4006f00ba1179e2abca6e85930ae67eede414ba | sum = 2000 + 19
|
py | b4006f2be4301de73374ec795586f53008d27921 | #!/usr/bin/python3
import window as win
win.Window().mainloop()
|
py | b4007080bb1d3db4e992ad2ae916cfd15836f105 | import inspect
import unittest
from unittest.mock import MagicMock
from notebook.base.handlers import APIHandler
from jupyterlab_bigquery.create_handler.create_handler import _handler, Handlers
class TestHandlers(unittest.TestCase):
def testHandlerDecorator(self):
func = MagicMock()
@_handler("POST", "_test")
def f(args):
func(args)
# Ensure handler is a subclass of APIHandler
self.assertTrue(
APIHandler in inspect.getmro(Handlers.get().get_handlers()["_test"]))
def testHandlerDecoratorGet(self):
func = MagicMock()
@_handler("GET", "_test")
def f(args):
func(args)
Handlers.get().get_handlers()["_test"].get(MagicMock())
func.assert_called()
def testHandlerDecoratorPost(self):
func = MagicMock()
@_handler("POST", "_test")
def f(args):
func(args)
Handlers.get().get_handlers()["_test"].post(MagicMock())
func.assert_called()
if __name__ == "__main__":
unittest.main()
|
py | b40071d38655ebc3d1ef560d3881ee5d5e99f6cc | # -*- coding: utf-8 -*-
"""
countries api module.
"""
from pyrin.api.router.decorators import api
from pyrin.core.enumerations import HTTPMethodEnum
import charma.countries.services as country_services
@api('/countries/<uuid:id>', authenticated=False)
def get(id, **options):
"""
gets country with given id.
it raises an error if country does not exist.
:param uuid.UUID id: country id.
:raises CountryDoesNotExistError: country does not exist error.
:rtype: CountryEntity
"""
return country_services.get(id)
@api('/countries', methods=HTTPMethodEnum.POST, authenticated=False)
def create(name, **options):
"""
creates a new country.
:param str name: country name.
:raises ValidationError: validation error.
:returns: created country id.
:rtype: uuid.UUID
"""
return country_services.create(name, **options)
@api('/countries', authenticated=False)
def find(**filters):
"""
finds countries with given filters.
:keyword str name: country name.
:raises ValidationError: validation error.
:rtype: list[CountryEntity]
"""
return country_services.find(**filters)
@api('/countries/all', authenticated=False)
def get_all(**options):
"""
gets all countries.
:rtype: list[CountryEntity]
"""
return country_services.get_all()
@api('/countries/<uuid:id>', methods=HTTPMethodEnum.DELETE, authenticated=False)
def delete(id, **options):
"""
deletes a country with given id.
:param uuid.UUID id: country id.
:returns: count of deleted items.
:rtype: int
"""
return country_services.delete(id)
|
py | b4007242107ecddb8ee83d0a663377564ab22aba | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ProvidersAdsItem(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'account': 'str',
'allocate_gids': 'bool',
'allocate_uids': 'bool',
'assume_default_domain': 'bool',
'authentication': 'bool',
'check_online_interval': 'int',
'controller_time': 'int',
'create_home_directory': 'bool',
'dns_domain': 'str',
'domain_offline_alerts': 'bool',
'findable_groups': 'list[str]',
'findable_users': 'list[str]',
'groupnet': 'str',
'home_directory_template': 'str',
'ignore_all_trusts': 'bool',
'ignored_trusted_domains': 'list[str]',
'include_trusted_domains': 'list[str]',
'instance': 'str',
'kerberos_hdfs_spn': 'bool',
'kerberos_nfs_spn': 'bool',
'ldap_sign_and_seal': 'bool',
'login_shell': 'str',
'lookup_domains': 'list[str]',
'lookup_groups': 'bool',
'lookup_normalize_groups': 'bool',
'lookup_normalize_users': 'bool',
'lookup_users': 'bool',
'machine_name': 'str',
'machine_password_changes': 'bool',
'machine_password_lifespan': 'int',
'name': 'str',
'node_dc_affinity': 'str',
'node_dc_affinity_timeout': 'int',
'nss_enumeration': 'bool',
'organizational_unit': 'str',
'password': 'str',
'restrict_findable': 'bool',
'sfu_support': 'str',
'store_sfu_mappings': 'bool',
'unfindable_groups': 'list[str]',
'unfindable_users': 'list[str]',
'user': 'str'
}
attribute_map = {
'account': 'account',
'allocate_gids': 'allocate_gids',
'allocate_uids': 'allocate_uids',
'assume_default_domain': 'assume_default_domain',
'authentication': 'authentication',
'check_online_interval': 'check_online_interval',
'controller_time': 'controller_time',
'create_home_directory': 'create_home_directory',
'dns_domain': 'dns_domain',
'domain_offline_alerts': 'domain_offline_alerts',
'findable_groups': 'findable_groups',
'findable_users': 'findable_users',
'groupnet': 'groupnet',
'home_directory_template': 'home_directory_template',
'ignore_all_trusts': 'ignore_all_trusts',
'ignored_trusted_domains': 'ignored_trusted_domains',
'include_trusted_domains': 'include_trusted_domains',
'instance': 'instance',
'kerberos_hdfs_spn': 'kerberos_hdfs_spn',
'kerberos_nfs_spn': 'kerberos_nfs_spn',
'ldap_sign_and_seal': 'ldap_sign_and_seal',
'login_shell': 'login_shell',
'lookup_domains': 'lookup_domains',
'lookup_groups': 'lookup_groups',
'lookup_normalize_groups': 'lookup_normalize_groups',
'lookup_normalize_users': 'lookup_normalize_users',
'lookup_users': 'lookup_users',
'machine_name': 'machine_name',
'machine_password_changes': 'machine_password_changes',
'machine_password_lifespan': 'machine_password_lifespan',
'name': 'name',
'node_dc_affinity': 'node_dc_affinity',
'node_dc_affinity_timeout': 'node_dc_affinity_timeout',
'nss_enumeration': 'nss_enumeration',
'organizational_unit': 'organizational_unit',
'password': 'password',
'restrict_findable': 'restrict_findable',
'sfu_support': 'sfu_support',
'store_sfu_mappings': 'store_sfu_mappings',
'unfindable_groups': 'unfindable_groups',
'unfindable_users': 'unfindable_users',
'user': 'user'
}
def __init__(self, account=None, allocate_gids=None, allocate_uids=None, assume_default_domain=None, authentication=None, check_online_interval=None, controller_time=None, create_home_directory=None, dns_domain=None, domain_offline_alerts=None, findable_groups=None, findable_users=None, groupnet=None, home_directory_template=None, ignore_all_trusts=None, ignored_trusted_domains=None, include_trusted_domains=None, instance=None, kerberos_hdfs_spn=None, kerberos_nfs_spn=None, ldap_sign_and_seal=None, login_shell=None, lookup_domains=None, lookup_groups=None, lookup_normalize_groups=None, lookup_normalize_users=None, lookup_users=None, machine_name=None, machine_password_changes=None, machine_password_lifespan=None, name=None, node_dc_affinity=None, node_dc_affinity_timeout=None, nss_enumeration=None, organizational_unit=None, password=None, restrict_findable=None, sfu_support=None, store_sfu_mappings=None, unfindable_groups=None, unfindable_users=None, user=None): # noqa: E501
"""ProvidersAdsItem - a model defined in Swagger""" # noqa: E501
self._account = None
self._allocate_gids = None
self._allocate_uids = None
self._assume_default_domain = None
self._authentication = None
self._check_online_interval = None
self._controller_time = None
self._create_home_directory = None
self._dns_domain = None
self._domain_offline_alerts = None
self._findable_groups = None
self._findable_users = None
self._groupnet = None
self._home_directory_template = None
self._ignore_all_trusts = None
self._ignored_trusted_domains = None
self._include_trusted_domains = None
self._instance = None
self._kerberos_hdfs_spn = None
self._kerberos_nfs_spn = None
self._ldap_sign_and_seal = None
self._login_shell = None
self._lookup_domains = None
self._lookup_groups = None
self._lookup_normalize_groups = None
self._lookup_normalize_users = None
self._lookup_users = None
self._machine_name = None
self._machine_password_changes = None
self._machine_password_lifespan = None
self._name = None
self._node_dc_affinity = None
self._node_dc_affinity_timeout = None
self._nss_enumeration = None
self._organizational_unit = None
self._password = None
self._restrict_findable = None
self._sfu_support = None
self._store_sfu_mappings = None
self._unfindable_groups = None
self._unfindable_users = None
self._user = None
self.discriminator = None
if account is not None:
self.account = account
if allocate_gids is not None:
self.allocate_gids = allocate_gids
if allocate_uids is not None:
self.allocate_uids = allocate_uids
if assume_default_domain is not None:
self.assume_default_domain = assume_default_domain
if authentication is not None:
self.authentication = authentication
if check_online_interval is not None:
self.check_online_interval = check_online_interval
if controller_time is not None:
self.controller_time = controller_time
if create_home_directory is not None:
self.create_home_directory = create_home_directory
if dns_domain is not None:
self.dns_domain = dns_domain
if domain_offline_alerts is not None:
self.domain_offline_alerts = domain_offline_alerts
if findable_groups is not None:
self.findable_groups = findable_groups
if findable_users is not None:
self.findable_users = findable_users
if groupnet is not None:
self.groupnet = groupnet
if home_directory_template is not None:
self.home_directory_template = home_directory_template
if ignore_all_trusts is not None:
self.ignore_all_trusts = ignore_all_trusts
if ignored_trusted_domains is not None:
self.ignored_trusted_domains = ignored_trusted_domains
if include_trusted_domains is not None:
self.include_trusted_domains = include_trusted_domains
if instance is not None:
self.instance = instance
if kerberos_hdfs_spn is not None:
self.kerberos_hdfs_spn = kerberos_hdfs_spn
if kerberos_nfs_spn is not None:
self.kerberos_nfs_spn = kerberos_nfs_spn
if ldap_sign_and_seal is not None:
self.ldap_sign_and_seal = ldap_sign_and_seal
if login_shell is not None:
self.login_shell = login_shell
if lookup_domains is not None:
self.lookup_domains = lookup_domains
if lookup_groups is not None:
self.lookup_groups = lookup_groups
if lookup_normalize_groups is not None:
self.lookup_normalize_groups = lookup_normalize_groups
if lookup_normalize_users is not None:
self.lookup_normalize_users = lookup_normalize_users
if lookup_users is not None:
self.lookup_users = lookup_users
if machine_name is not None:
self.machine_name = machine_name
if machine_password_changes is not None:
self.machine_password_changes = machine_password_changes
if machine_password_lifespan is not None:
self.machine_password_lifespan = machine_password_lifespan
self.name = name
if node_dc_affinity is not None:
self.node_dc_affinity = node_dc_affinity
if node_dc_affinity_timeout is not None:
self.node_dc_affinity_timeout = node_dc_affinity_timeout
if nss_enumeration is not None:
self.nss_enumeration = nss_enumeration
if organizational_unit is not None:
self.organizational_unit = organizational_unit
self.password = password
if restrict_findable is not None:
self.restrict_findable = restrict_findable
if sfu_support is not None:
self.sfu_support = sfu_support
if store_sfu_mappings is not None:
self.store_sfu_mappings = store_sfu_mappings
if unfindable_groups is not None:
self.unfindable_groups = unfindable_groups
if unfindable_users is not None:
self.unfindable_users = unfindable_users
self.user = user
@property
def account(self):
"""Gets the account of this ProvidersAdsItem. # noqa: E501
Specifies the machine account name when creating a SAM account with Active Directory. The default cluster name is called 'default'. # noqa: E501
:return: The account of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this ProvidersAdsItem.
Specifies the machine account name when creating a SAM account with Active Directory. The default cluster name is called 'default'. # noqa: E501
:param account: The account of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if account is not None and len(account) > 255:
raise ValueError("Invalid value for `account`, length must be less than or equal to `255`") # noqa: E501
if account is not None and len(account) < 0:
raise ValueError("Invalid value for `account`, length must be greater than or equal to `0`") # noqa: E501
self._account = account
@property
def allocate_gids(self):
"""Gets the allocate_gids of this ProvidersAdsItem. # noqa: E501
Allocates an ID for an unmapped Active Directory (ADS) group. ADS groups without GIDs can be proactively assigned a GID by the ID mapper. If the ID mapper option is disabled, GIDs are not proactively assigned, and when a primary group for a user does not include a GID, the system may allocate one. # noqa: E501
:return: The allocate_gids of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._allocate_gids
@allocate_gids.setter
def allocate_gids(self, allocate_gids):
"""Sets the allocate_gids of this ProvidersAdsItem.
Allocates an ID for an unmapped Active Directory (ADS) group. ADS groups without GIDs can be proactively assigned a GID by the ID mapper. If the ID mapper option is disabled, GIDs are not proactively assigned, and when a primary group for a user does not include a GID, the system may allocate one. # noqa: E501
:param allocate_gids: The allocate_gids of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._allocate_gids = allocate_gids
@property
def allocate_uids(self):
"""Gets the allocate_uids of this ProvidersAdsItem. # noqa: E501
Allocates a user ID for an unmapped Active Directory (ADS) user. ADS users without UIDs can be proactively assigned a UID by the ID mapper. IF the ID mapper option is disabled, UIDs are not proactively assigned, and when an identify for a user does not include a UID, the system may allocate one. # noqa: E501
:return: The allocate_uids of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._allocate_uids
@allocate_uids.setter
def allocate_uids(self, allocate_uids):
"""Sets the allocate_uids of this ProvidersAdsItem.
Allocates a user ID for an unmapped Active Directory (ADS) user. ADS users without UIDs can be proactively assigned a UID by the ID mapper. IF the ID mapper option is disabled, UIDs are not proactively assigned, and when an identify for a user does not include a UID, the system may allocate one. # noqa: E501
:param allocate_uids: The allocate_uids of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._allocate_uids = allocate_uids
@property
def assume_default_domain(self):
"""Gets the assume_default_domain of this ProvidersAdsItem. # noqa: E501
Enables lookup of unqualified user names in the primary domain. # noqa: E501
:return: The assume_default_domain of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._assume_default_domain
@assume_default_domain.setter
def assume_default_domain(self, assume_default_domain):
"""Sets the assume_default_domain of this ProvidersAdsItem.
Enables lookup of unqualified user names in the primary domain. # noqa: E501
:param assume_default_domain: The assume_default_domain of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._assume_default_domain = assume_default_domain
@property
def authentication(self):
"""Gets the authentication of this ProvidersAdsItem. # noqa: E501
Enables authentication and identity management through the authentication provider. # noqa: E501
:return: The authentication of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._authentication
@authentication.setter
def authentication(self, authentication):
"""Sets the authentication of this ProvidersAdsItem.
Enables authentication and identity management through the authentication provider. # noqa: E501
:param authentication: The authentication of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._authentication = authentication
@property
def check_online_interval(self):
"""Gets the check_online_interval of this ProvidersAdsItem. # noqa: E501
Specifies the time in seconds between provider online checks. # noqa: E501
:return: The check_online_interval of this ProvidersAdsItem. # noqa: E501
:rtype: int
"""
return self._check_online_interval
@check_online_interval.setter
def check_online_interval(self, check_online_interval):
"""Sets the check_online_interval of this ProvidersAdsItem.
Specifies the time in seconds between provider online checks. # noqa: E501
:param check_online_interval: The check_online_interval of this ProvidersAdsItem. # noqa: E501
:type: int
"""
if check_online_interval is not None and check_online_interval > 86400: # noqa: E501
raise ValueError("Invalid value for `check_online_interval`, must be a value less than or equal to `86400`") # noqa: E501
if check_online_interval is not None and check_online_interval < 60: # noqa: E501
raise ValueError("Invalid value for `check_online_interval`, must be a value greater than or equal to `60`") # noqa: E501
self._check_online_interval = check_online_interval
@property
def controller_time(self):
"""Gets the controller_time of this ProvidersAdsItem. # noqa: E501
Specifies the current time for the domain controllers. # noqa: E501
:return: The controller_time of this ProvidersAdsItem. # noqa: E501
:rtype: int
"""
return self._controller_time
@controller_time.setter
def controller_time(self, controller_time):
"""Sets the controller_time of this ProvidersAdsItem.
Specifies the current time for the domain controllers. # noqa: E501
:param controller_time: The controller_time of this ProvidersAdsItem. # noqa: E501
:type: int
"""
if controller_time is not None and controller_time > 4294967295: # noqa: E501
raise ValueError("Invalid value for `controller_time`, must be a value less than or equal to `4294967295`") # noqa: E501
if controller_time is not None and controller_time < 0: # noqa: E501
raise ValueError("Invalid value for `controller_time`, must be a value greater than or equal to `0`") # noqa: E501
self._controller_time = controller_time
@property
def create_home_directory(self):
"""Gets the create_home_directory of this ProvidersAdsItem. # noqa: E501
Automatically creates a home directory on the first login. # noqa: E501
:return: The create_home_directory of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._create_home_directory
@create_home_directory.setter
def create_home_directory(self, create_home_directory):
"""Sets the create_home_directory of this ProvidersAdsItem.
Automatically creates a home directory on the first login. # noqa: E501
:param create_home_directory: The create_home_directory of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._create_home_directory = create_home_directory
@property
def dns_domain(self):
"""Gets the dns_domain of this ProvidersAdsItem. # noqa: E501
Specifies the DNS search domain. Set this parameter if the DNS search domain has a unique name or address. # noqa: E501
:return: The dns_domain of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._dns_domain
@dns_domain.setter
def dns_domain(self, dns_domain):
"""Sets the dns_domain of this ProvidersAdsItem.
Specifies the DNS search domain. Set this parameter if the DNS search domain has a unique name or address. # noqa: E501
:param dns_domain: The dns_domain of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if dns_domain is not None and len(dns_domain) > 255:
raise ValueError("Invalid value for `dns_domain`, length must be less than or equal to `255`") # noqa: E501
if dns_domain is not None and len(dns_domain) < 2:
raise ValueError("Invalid value for `dns_domain`, length must be greater than or equal to `2`") # noqa: E501
self._dns_domain = dns_domain
@property
def domain_offline_alerts(self):
"""Gets the domain_offline_alerts of this ProvidersAdsItem. # noqa: E501
Sends an alert if the domain goes offline. # noqa: E501
:return: The domain_offline_alerts of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._domain_offline_alerts
@domain_offline_alerts.setter
def domain_offline_alerts(self, domain_offline_alerts):
"""Sets the domain_offline_alerts of this ProvidersAdsItem.
Sends an alert if the domain goes offline. # noqa: E501
:param domain_offline_alerts: The domain_offline_alerts of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._domain_offline_alerts = domain_offline_alerts
@property
def findable_groups(self):
"""Gets the findable_groups of this ProvidersAdsItem. # noqa: E501
Sets list of groups that can be resolved. # noqa: E501
:return: The findable_groups of this ProvidersAdsItem. # noqa: E501
:rtype: list[str]
"""
return self._findable_groups
@findable_groups.setter
def findable_groups(self, findable_groups):
"""Sets the findable_groups of this ProvidersAdsItem.
Sets list of groups that can be resolved. # noqa: E501
:param findable_groups: The findable_groups of this ProvidersAdsItem. # noqa: E501
:type: list[str]
"""
self._findable_groups = findable_groups
@property
def findable_users(self):
"""Gets the findable_users of this ProvidersAdsItem. # noqa: E501
Sets list of users that can be resolved. # noqa: E501
:return: The findable_users of this ProvidersAdsItem. # noqa: E501
:rtype: list[str]
"""
return self._findable_users
@findable_users.setter
def findable_users(self, findable_users):
"""Sets the findable_users of this ProvidersAdsItem.
Sets list of users that can be resolved. # noqa: E501
:param findable_users: The findable_users of this ProvidersAdsItem. # noqa: E501
:type: list[str]
"""
self._findable_users = findable_users
@property
def groupnet(self):
"""Gets the groupnet of this ProvidersAdsItem. # noqa: E501
Groupnet identifier. # noqa: E501
:return: The groupnet of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._groupnet
@groupnet.setter
def groupnet(self, groupnet):
"""Sets the groupnet of this ProvidersAdsItem.
Groupnet identifier. # noqa: E501
:param groupnet: The groupnet of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if groupnet is not None and len(groupnet) > 255:
raise ValueError("Invalid value for `groupnet`, length must be less than or equal to `255`") # noqa: E501
if groupnet is not None and len(groupnet) < 0:
raise ValueError("Invalid value for `groupnet`, length must be greater than or equal to `0`") # noqa: E501
self._groupnet = groupnet
@property
def home_directory_template(self):
"""Gets the home_directory_template of this ProvidersAdsItem. # noqa: E501
Specifies the path to the home directory template. # noqa: E501
:return: The home_directory_template of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._home_directory_template
@home_directory_template.setter
def home_directory_template(self, home_directory_template):
"""Sets the home_directory_template of this ProvidersAdsItem.
Specifies the path to the home directory template. # noqa: E501
:param home_directory_template: The home_directory_template of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if home_directory_template is not None and len(home_directory_template) > 4096:
raise ValueError("Invalid value for `home_directory_template`, length must be less than or equal to `4096`") # noqa: E501
if home_directory_template is not None and len(home_directory_template) < 0:
raise ValueError("Invalid value for `home_directory_template`, length must be greater than or equal to `0`") # noqa: E501
self._home_directory_template = home_directory_template
@property
def ignore_all_trusts(self):
"""Gets the ignore_all_trusts of this ProvidersAdsItem. # noqa: E501
If set to true, ignores all trusted domains. # noqa: E501
:return: The ignore_all_trusts of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._ignore_all_trusts
@ignore_all_trusts.setter
def ignore_all_trusts(self, ignore_all_trusts):
"""Sets the ignore_all_trusts of this ProvidersAdsItem.
If set to true, ignores all trusted domains. # noqa: E501
:param ignore_all_trusts: The ignore_all_trusts of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._ignore_all_trusts = ignore_all_trusts
@property
def ignored_trusted_domains(self):
"""Gets the ignored_trusted_domains of this ProvidersAdsItem. # noqa: E501
Includes trusted domains when 'ignore_all_trusts' is set to false. # noqa: E501
:return: The ignored_trusted_domains of this ProvidersAdsItem. # noqa: E501
:rtype: list[str]
"""
return self._ignored_trusted_domains
@ignored_trusted_domains.setter
def ignored_trusted_domains(self, ignored_trusted_domains):
"""Sets the ignored_trusted_domains of this ProvidersAdsItem.
Includes trusted domains when 'ignore_all_trusts' is set to false. # noqa: E501
:param ignored_trusted_domains: The ignored_trusted_domains of this ProvidersAdsItem. # noqa: E501
:type: list[str]
"""
self._ignored_trusted_domains = ignored_trusted_domains
@property
def include_trusted_domains(self):
"""Gets the include_trusted_domains of this ProvidersAdsItem. # noqa: E501
Includes trusted domains when 'ignore_all_trusts' is set to true. # noqa: E501
:return: The include_trusted_domains of this ProvidersAdsItem. # noqa: E501
:rtype: list[str]
"""
return self._include_trusted_domains
@include_trusted_domains.setter
def include_trusted_domains(self, include_trusted_domains):
"""Sets the include_trusted_domains of this ProvidersAdsItem.
Includes trusted domains when 'ignore_all_trusts' is set to true. # noqa: E501
:param include_trusted_domains: The include_trusted_domains of this ProvidersAdsItem. # noqa: E501
:type: list[str]
"""
self._include_trusted_domains = include_trusted_domains
@property
def instance(self):
"""Gets the instance of this ProvidersAdsItem. # noqa: E501
Specifies Active Directory provider instance. # noqa: E501
:return: The instance of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._instance
@instance.setter
def instance(self, instance):
"""Sets the instance of this ProvidersAdsItem.
Specifies Active Directory provider instance. # noqa: E501
:param instance: The instance of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if instance is not None and len(instance) > 255:
raise ValueError("Invalid value for `instance`, length must be less than or equal to `255`") # noqa: E501
if instance is not None and len(instance) < 0:
raise ValueError("Invalid value for `instance`, length must be greater than or equal to `0`") # noqa: E501
self._instance = instance
@property
def kerberos_hdfs_spn(self):
"""Gets the kerberos_hdfs_spn of this ProvidersAdsItem. # noqa: E501
Determines if connecting through HDFS with Kerberos. # noqa: E501
:return: The kerberos_hdfs_spn of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._kerberos_hdfs_spn
@kerberos_hdfs_spn.setter
def kerberos_hdfs_spn(self, kerberos_hdfs_spn):
"""Sets the kerberos_hdfs_spn of this ProvidersAdsItem.
Determines if connecting through HDFS with Kerberos. # noqa: E501
:param kerberos_hdfs_spn: The kerberos_hdfs_spn of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._kerberos_hdfs_spn = kerberos_hdfs_spn
@property
def kerberos_nfs_spn(self):
"""Gets the kerberos_nfs_spn of this ProvidersAdsItem. # noqa: E501
Determines if connecting through NFS with Kerberos. # noqa: E501
:return: The kerberos_nfs_spn of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._kerberos_nfs_spn
@kerberos_nfs_spn.setter
def kerberos_nfs_spn(self, kerberos_nfs_spn):
"""Sets the kerberos_nfs_spn of this ProvidersAdsItem.
Determines if connecting through NFS with Kerberos. # noqa: E501
:param kerberos_nfs_spn: The kerberos_nfs_spn of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._kerberos_nfs_spn = kerberos_nfs_spn
@property
def ldap_sign_and_seal(self):
"""Gets the ldap_sign_and_seal of this ProvidersAdsItem. # noqa: E501
Enables encryption and signing on LDAP requests. # noqa: E501
:return: The ldap_sign_and_seal of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._ldap_sign_and_seal
@ldap_sign_and_seal.setter
def ldap_sign_and_seal(self, ldap_sign_and_seal):
"""Sets the ldap_sign_and_seal of this ProvidersAdsItem.
Enables encryption and signing on LDAP requests. # noqa: E501
:param ldap_sign_and_seal: The ldap_sign_and_seal of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._ldap_sign_and_seal = ldap_sign_and_seal
@property
def login_shell(self):
"""Gets the login_shell of this ProvidersAdsItem. # noqa: E501
Specifies the login shell path. # noqa: E501
:return: The login_shell of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._login_shell
@login_shell.setter
def login_shell(self, login_shell):
"""Sets the login_shell of this ProvidersAdsItem.
Specifies the login shell path. # noqa: E501
:param login_shell: The login_shell of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if login_shell is not None and len(login_shell) > 4096:
raise ValueError("Invalid value for `login_shell`, length must be less than or equal to `4096`") # noqa: E501
if login_shell is not None and len(login_shell) < 0:
raise ValueError("Invalid value for `login_shell`, length must be greater than or equal to `0`") # noqa: E501
self._login_shell = login_shell
@property
def lookup_domains(self):
"""Gets the lookup_domains of this ProvidersAdsItem. # noqa: E501
Limits user and group lookups to the specified domains. # noqa: E501
:return: The lookup_domains of this ProvidersAdsItem. # noqa: E501
:rtype: list[str]
"""
return self._lookup_domains
@lookup_domains.setter
def lookup_domains(self, lookup_domains):
"""Sets the lookup_domains of this ProvidersAdsItem.
Limits user and group lookups to the specified domains. # noqa: E501
:param lookup_domains: The lookup_domains of this ProvidersAdsItem. # noqa: E501
:type: list[str]
"""
self._lookup_domains = lookup_domains
@property
def lookup_groups(self):
"""Gets the lookup_groups of this ProvidersAdsItem. # noqa: E501
Looks up AD groups in other providers before allocating a group ID. # noqa: E501
:return: The lookup_groups of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._lookup_groups
@lookup_groups.setter
def lookup_groups(self, lookup_groups):
"""Sets the lookup_groups of this ProvidersAdsItem.
Looks up AD groups in other providers before allocating a group ID. # noqa: E501
:param lookup_groups: The lookup_groups of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._lookup_groups = lookup_groups
@property
def lookup_normalize_groups(self):
"""Gets the lookup_normalize_groups of this ProvidersAdsItem. # noqa: E501
Normalizes AD group names to lowercase before look up. # noqa: E501
:return: The lookup_normalize_groups of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._lookup_normalize_groups
@lookup_normalize_groups.setter
def lookup_normalize_groups(self, lookup_normalize_groups):
"""Sets the lookup_normalize_groups of this ProvidersAdsItem.
Normalizes AD group names to lowercase before look up. # noqa: E501
:param lookup_normalize_groups: The lookup_normalize_groups of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._lookup_normalize_groups = lookup_normalize_groups
@property
def lookup_normalize_users(self):
"""Gets the lookup_normalize_users of this ProvidersAdsItem. # noqa: E501
Normalize AD user names to lowercase before look up. # noqa: E501
:return: The lookup_normalize_users of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._lookup_normalize_users
@lookup_normalize_users.setter
def lookup_normalize_users(self, lookup_normalize_users):
"""Sets the lookup_normalize_users of this ProvidersAdsItem.
Normalize AD user names to lowercase before look up. # noqa: E501
:param lookup_normalize_users: The lookup_normalize_users of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._lookup_normalize_users = lookup_normalize_users
@property
def lookup_users(self):
"""Gets the lookup_users of this ProvidersAdsItem. # noqa: E501
Looks up AD users in other providers before allocating a user ID. # noqa: E501
:return: The lookup_users of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._lookup_users
@lookup_users.setter
def lookup_users(self, lookup_users):
"""Sets the lookup_users of this ProvidersAdsItem.
Looks up AD users in other providers before allocating a user ID. # noqa: E501
:param lookup_users: The lookup_users of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._lookup_users = lookup_users
@property
def machine_name(self):
"""Gets the machine_name of this ProvidersAdsItem. # noqa: E501
Specifies name to join AD as. # noqa: E501
:return: The machine_name of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._machine_name
@machine_name.setter
def machine_name(self, machine_name):
"""Sets the machine_name of this ProvidersAdsItem.
Specifies name to join AD as. # noqa: E501
:param machine_name: The machine_name of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if machine_name is not None and len(machine_name) > 255:
raise ValueError("Invalid value for `machine_name`, length must be less than or equal to `255`") # noqa: E501
if machine_name is not None and len(machine_name) < 0:
raise ValueError("Invalid value for `machine_name`, length must be greater than or equal to `0`") # noqa: E501
self._machine_name = machine_name
@property
def machine_password_changes(self):
"""Gets the machine_password_changes of this ProvidersAdsItem. # noqa: E501
Enables periodic changes of the machine password for security. # noqa: E501
:return: The machine_password_changes of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._machine_password_changes
@machine_password_changes.setter
def machine_password_changes(self, machine_password_changes):
"""Sets the machine_password_changes of this ProvidersAdsItem.
Enables periodic changes of the machine password for security. # noqa: E501
:param machine_password_changes: The machine_password_changes of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._machine_password_changes = machine_password_changes
@property
def machine_password_lifespan(self):
"""Gets the machine_password_lifespan of this ProvidersAdsItem. # noqa: E501
Sets maximum age of a password in seconds. # noqa: E501
:return: The machine_password_lifespan of this ProvidersAdsItem. # noqa: E501
:rtype: int
"""
return self._machine_password_lifespan
@machine_password_lifespan.setter
def machine_password_lifespan(self, machine_password_lifespan):
"""Sets the machine_password_lifespan of this ProvidersAdsItem.
Sets maximum age of a password in seconds. # noqa: E501
:param machine_password_lifespan: The machine_password_lifespan of this ProvidersAdsItem. # noqa: E501
:type: int
"""
if machine_password_lifespan is not None and machine_password_lifespan > 31536000: # noqa: E501
raise ValueError("Invalid value for `machine_password_lifespan`, must be a value less than or equal to `31536000`") # noqa: E501
if machine_password_lifespan is not None and machine_password_lifespan < 3600: # noqa: E501
raise ValueError("Invalid value for `machine_password_lifespan`, must be a value greater than or equal to `3600`") # noqa: E501
self._machine_password_lifespan = machine_password_lifespan
@property
def name(self):
"""Gets the name of this ProvidersAdsItem. # noqa: E501
Specifies the Active Directory provider name. # noqa: E501
:return: The name of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProvidersAdsItem.
Specifies the Active Directory provider name. # noqa: E501
:param name: The name of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if name is not None and len(name) > 64:
raise ValueError("Invalid value for `name`, length must be less than or equal to `64`") # noqa: E501
if name is not None and len(name) < 1:
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def node_dc_affinity(self):
"""Gets the node_dc_affinity of this ProvidersAdsItem. # noqa: E501
Specifies the domain controller for which the node has affinity. # noqa: E501
:return: The node_dc_affinity of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._node_dc_affinity
@node_dc_affinity.setter
def node_dc_affinity(self, node_dc_affinity):
"""Sets the node_dc_affinity of this ProvidersAdsItem.
Specifies the domain controller for which the node has affinity. # noqa: E501
:param node_dc_affinity: The node_dc_affinity of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if node_dc_affinity is not None and len(node_dc_affinity) > 255:
raise ValueError("Invalid value for `node_dc_affinity`, length must be less than or equal to `255`") # noqa: E501
if node_dc_affinity is not None and len(node_dc_affinity) < 0:
raise ValueError("Invalid value for `node_dc_affinity`, length must be greater than or equal to `0`") # noqa: E501
self._node_dc_affinity = node_dc_affinity
@property
def node_dc_affinity_timeout(self):
"""Gets the node_dc_affinity_timeout of this ProvidersAdsItem. # noqa: E501
Specifies the timeout for the domain controller for which the local node has affinity. # noqa: E501
:return: The node_dc_affinity_timeout of this ProvidersAdsItem. # noqa: E501
:rtype: int
"""
return self._node_dc_affinity_timeout
@node_dc_affinity_timeout.setter
def node_dc_affinity_timeout(self, node_dc_affinity_timeout):
"""Sets the node_dc_affinity_timeout of this ProvidersAdsItem.
Specifies the timeout for the domain controller for which the local node has affinity. # noqa: E501
:param node_dc_affinity_timeout: The node_dc_affinity_timeout of this ProvidersAdsItem. # noqa: E501
:type: int
"""
if node_dc_affinity_timeout is not None and node_dc_affinity_timeout > 4294967295: # noqa: E501
raise ValueError("Invalid value for `node_dc_affinity_timeout`, must be a value less than or equal to `4294967295`") # noqa: E501
if node_dc_affinity_timeout is not None and node_dc_affinity_timeout < 0: # noqa: E501
raise ValueError("Invalid value for `node_dc_affinity_timeout`, must be a value greater than or equal to `0`") # noqa: E501
self._node_dc_affinity_timeout = node_dc_affinity_timeout
@property
def nss_enumeration(self):
"""Gets the nss_enumeration of this ProvidersAdsItem. # noqa: E501
Enables the Active Directory provider to respond to 'getpwent' and 'getgrent' requests. # noqa: E501
:return: The nss_enumeration of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._nss_enumeration
@nss_enumeration.setter
def nss_enumeration(self, nss_enumeration):
"""Sets the nss_enumeration of this ProvidersAdsItem.
Enables the Active Directory provider to respond to 'getpwent' and 'getgrent' requests. # noqa: E501
:param nss_enumeration: The nss_enumeration of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._nss_enumeration = nss_enumeration
@property
def organizational_unit(self):
"""Gets the organizational_unit of this ProvidersAdsItem. # noqa: E501
Specifies the organizational unit. # noqa: E501
:return: The organizational_unit of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._organizational_unit
@organizational_unit.setter
def organizational_unit(self, organizational_unit):
"""Sets the organizational_unit of this ProvidersAdsItem.
Specifies the organizational unit. # noqa: E501
:param organizational_unit: The organizational_unit of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if organizational_unit is not None and len(organizational_unit) > 255:
raise ValueError("Invalid value for `organizational_unit`, length must be less than or equal to `255`") # noqa: E501
if organizational_unit is not None and len(organizational_unit) < 0:
raise ValueError("Invalid value for `organizational_unit`, length must be greater than or equal to `0`") # noqa: E501
self._organizational_unit = organizational_unit
@property
def password(self):
"""Gets the password of this ProvidersAdsItem. # noqa: E501
Specifies the password used during domain join. # noqa: E501
:return: The password of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this ProvidersAdsItem.
Specifies the password used during domain join. # noqa: E501
:param password: The password of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if password is None:
raise ValueError("Invalid value for `password`, must not be `None`") # noqa: E501
if password is not None and len(password) > 255:
raise ValueError("Invalid value for `password`, length must be less than or equal to `255`") # noqa: E501
if password is not None and len(password) < 0:
raise ValueError("Invalid value for `password`, length must be greater than or equal to `0`") # noqa: E501
self._password = password
@property
def restrict_findable(self):
"""Gets the restrict_findable of this ProvidersAdsItem. # noqa: E501
Check the provider for filtered lists of findable and unfindable users and groups. # noqa: E501
:return: The restrict_findable of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._restrict_findable
@restrict_findable.setter
def restrict_findable(self, restrict_findable):
"""Sets the restrict_findable of this ProvidersAdsItem.
Check the provider for filtered lists of findable and unfindable users and groups. # noqa: E501
:param restrict_findable: The restrict_findable of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._restrict_findable = restrict_findable
@property
def sfu_support(self):
"""Gets the sfu_support of this ProvidersAdsItem. # noqa: E501
Specifies whether to support RFC 2307 attributes on ADS domain controllers. # noqa: E501
:return: The sfu_support of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._sfu_support
@sfu_support.setter
def sfu_support(self, sfu_support):
"""Sets the sfu_support of this ProvidersAdsItem.
Specifies whether to support RFC 2307 attributes on ADS domain controllers. # noqa: E501
:param sfu_support: The sfu_support of this ProvidersAdsItem. # noqa: E501
:type: str
"""
allowed_values = ["none", "rfc2307"] # noqa: E501
if sfu_support not in allowed_values:
raise ValueError(
"Invalid value for `sfu_support` ({0}), must be one of {1}" # noqa: E501
.format(sfu_support, allowed_values)
)
self._sfu_support = sfu_support
@property
def store_sfu_mappings(self):
"""Gets the store_sfu_mappings of this ProvidersAdsItem. # noqa: E501
Stores SFU mappings permanently in the ID mapper. # noqa: E501
:return: The store_sfu_mappings of this ProvidersAdsItem. # noqa: E501
:rtype: bool
"""
return self._store_sfu_mappings
@store_sfu_mappings.setter
def store_sfu_mappings(self, store_sfu_mappings):
"""Sets the store_sfu_mappings of this ProvidersAdsItem.
Stores SFU mappings permanently in the ID mapper. # noqa: E501
:param store_sfu_mappings: The store_sfu_mappings of this ProvidersAdsItem. # noqa: E501
:type: bool
"""
self._store_sfu_mappings = store_sfu_mappings
@property
def unfindable_groups(self):
"""Gets the unfindable_groups of this ProvidersAdsItem. # noqa: E501
Specifies groups that cannot be resolved by the provider. # noqa: E501
:return: The unfindable_groups of this ProvidersAdsItem. # noqa: E501
:rtype: list[str]
"""
return self._unfindable_groups
@unfindable_groups.setter
def unfindable_groups(self, unfindable_groups):
"""Sets the unfindable_groups of this ProvidersAdsItem.
Specifies groups that cannot be resolved by the provider. # noqa: E501
:param unfindable_groups: The unfindable_groups of this ProvidersAdsItem. # noqa: E501
:type: list[str]
"""
self._unfindable_groups = unfindable_groups
@property
def unfindable_users(self):
"""Gets the unfindable_users of this ProvidersAdsItem. # noqa: E501
Specifies users that cannot be resolved by the provider. # noqa: E501
:return: The unfindable_users of this ProvidersAdsItem. # noqa: E501
:rtype: list[str]
"""
return self._unfindable_users
@unfindable_users.setter
def unfindable_users(self, unfindable_users):
"""Sets the unfindable_users of this ProvidersAdsItem.
Specifies users that cannot be resolved by the provider. # noqa: E501
:param unfindable_users: The unfindable_users of this ProvidersAdsItem. # noqa: E501
:type: list[str]
"""
self._unfindable_users = unfindable_users
@property
def user(self):
"""Gets the user of this ProvidersAdsItem. # noqa: E501
Specifies the user name that has permission to join a machine to the given domain. # noqa: E501
:return: The user of this ProvidersAdsItem. # noqa: E501
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this ProvidersAdsItem.
Specifies the user name that has permission to join a machine to the given domain. # noqa: E501
:param user: The user of this ProvidersAdsItem. # noqa: E501
:type: str
"""
if user is None:
raise ValueError("Invalid value for `user`, must not be `None`") # noqa: E501
if user is not None and len(user) > 256:
raise ValueError("Invalid value for `user`, length must be less than or equal to `256`") # noqa: E501
if user is not None and len(user) < 0:
raise ValueError("Invalid value for `user`, length must be greater than or equal to `0`") # noqa: E501
self._user = user
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProvidersAdsItem):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b400733e59c80b27bdb9fc3de6f3236f28f26da1 | import os
import tessy
from setuptools import setup
README = ""
if os.path.isfile("README.md"):
with open("README.md") as f:
README = f.read()
setup(
name="tessy",
version=tessy.VERSION,
author="K4rian",
author_email="[email protected]",
url="https://github.com/k4rian/tessy",
license="MIT",
description="A Python wrapper for Tesseract-OCR.",
long_description=README,
long_description_content_type="text/markdown",
keywords="python tesseract ocr",
packages=["tessy"],
include_package_data=True,
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Operating System :: Microsoft :: Windows",
"Operating System :: Unix",
"Operating System :: MacOS :: MacOS X",
"Programming Language :: Python",
"Programming Language :: Python :: 3.4",
"Topic :: Utilities",
],
)
|
py | b400745043ffa978afd25c72005d2a253f46fef3 | from asynctest import TestCase as AsyncTestCase
from asynctest import mock as async_mock
from .....admin.request_context import AdminRequestContext
from .....wallet.base import BaseWallet, DIDInfo
from .. import routes as test_module
TEST_DID = "LjgpST2rjsoxYegQDRm7EL"
class TestV20CredRoutes(AsyncTestCase):
async def setUp(self):
self.session_inject = {}
self.context = AdminRequestContext.test_context(self.session_inject)
self.request_dict = {
"context": self.context,
"outbound_message_router": async_mock.CoroutineMock(),
}
self.request = async_mock.MagicMock(
app={},
match_info={},
query={},
__getitem__=lambda _, k: self.request_dict[k],
)
async def test_validate_cred_filter_schema(self):
schema = test_module.V20CredFilterSchema()
schema.validate_fields({"indy": {"issuer_did": TEST_DID}})
schema.validate_fields(
{"indy": {"issuer_did": TEST_DID, "schema_version": "1.0"}}
)
schema.validate_fields(
{
"indy": {"issuer_did": TEST_DID},
"dif": {"some_dif_criterion": "..."},
}
)
schema.validate_fields(
{
"indy": {},
"dif": {"some_dif_criterion": "..."},
}
)
with self.assertRaises(test_module.ValidationError):
schema.validate_fields({})
with self.assertRaises(test_module.ValidationError):
schema.validate_fields(["hopeless", "stop"])
with self.assertRaises(test_module.ValidationError):
schema.validate_fields({"veres-one": {"no": "support"}})
async def test_credential_exchange_list(self):
self.request.query = {
"thread_id": "dummy",
"connection_id": "dummy",
"role": "dummy",
"state": "dummy",
}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
mock_cx_rec.query = async_mock.CoroutineMock(return_value=[mock_cx_rec])
mock_cx_rec.serialize = async_mock.MagicMock(
return_value={"hello": "world"}
)
mock_cred_mgr.return_value.get_detail_record = async_mock.CoroutineMock(
return_value=None
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.credential_exchange_list(self.request)
mock_response.assert_called_once_with(
{
"results": [
{
"cred_ex_record": mock_cx_rec.serialize.return_value,
"indy": None,
"dif": None,
}
]
}
)
async def test_credential_exchange_list_x(self):
self.request.query = {
"thread_id": "dummy",
"connection_id": "dummy",
"role": "dummy",
"state": "dummy",
}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.query = async_mock.CoroutineMock(
side_effect=test_module.StorageError()
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_list(self.request)
async def test_credential_exchange_retrieve(self):
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value = mock_cx_rec
mock_cx_rec.serialize = async_mock.MagicMock()
mock_cx_rec.serialize.return_value = {"hello": "world"}
mock_cred_mgr.return_value.get_detail_record = async_mock.CoroutineMock(
side_effect=[
async_mock.MagicMock( # indy
serialize=async_mock.MagicMock(return_value={"...": "..."})
),
None, # dif
]
)
with async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
await test_module.credential_exchange_retrieve(self.request)
mock_response.assert_called_once_with(
{
"cred_ex_record": mock_cx_rec.serialize.return_value,
"indy": {"...": "..."},
"dif": None,
}
)
async def test_credential_exchange_retrieve_not_found(self):
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.credential_exchange_retrieve(self.request)
async def test_credential_exchange_retrieve_x(self):
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value = mock_cx_rec
mock_cx_rec.serialize = async_mock.MagicMock(
side_effect=test_module.BaseModelError()
)
mock_cred_mgr.return_value.get_detail_record = async_mock.CoroutineMock(
return_value=None
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_retrieve(self.request)
async def test_credential_exchange_create(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_connection_record, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredPreview, "deserialize", autospec=True
) as mock_cred_preview_deser, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.CoroutineMock(),
async_mock.CoroutineMock(),
)
mock_cx_rec = async_mock.MagicMock()
mock_cred_offer = async_mock.MagicMock()
mock_cred_mgr.return_value.prepare_send.return_value = (
mock_cx_rec,
mock_cred_offer,
)
await test_module.credential_exchange_create(self.request)
mock_response.assert_called_once_with(mock_cx_rec.serialize.return_value)
async def test_credential_exchange_create_x(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_connection_record, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredPreview, "deserialize", autospec=True
) as mock_cred_preview_deser, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.CoroutineMock(),
async_mock.CoroutineMock(),
)
mock_cred_mgr.return_value.prepare_send.side_effect = (
test_module.StorageError()
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_create(self.request)
async def test_credential_exchange_create_no_preview(self):
connection_id = "connection-id"
self.request.json = async_mock.CoroutineMock(
return_value={"connection_id": connection_id}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_create(self.request)
assert "credential_preview" in str(context.exception)
async def test_credential_exchange_create_no_filter(self):
connection_id = "connection-id"
self.request.json = async_mock.CoroutineMock(
return_value={
"connection_id": connection_id,
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_create(self.request)
assert "Missing filter" in str(context.exception)
async def test_credential_exchange_send(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredPreview, "deserialize", autospec=True
) as mock_cred_preview_deser, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.CoroutineMock(),
async_mock.CoroutineMock(),
)
mock_cx_rec = async_mock.MagicMock()
mock_cred_offer = async_mock.MagicMock()
mock_cred_mgr.return_value.prepare_send.return_value = (
mock_cx_rec,
mock_cred_offer,
)
await test_module.credential_exchange_send(self.request)
mock_response.assert_called_once_with(mock_cx_rec.serialize.return_value)
async def test_credential_exchange_send_no_proposal(self):
connection_id = "connection-id"
self.request.json = async_mock.CoroutineMock(
return_value={"connection_id": connection_id}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_send(self.request)
assert "credential_preview" in str(context.exception)
async def test_credential_exchange_send_no_conn_record(self):
connection_id = "connection-id"
preview_spec = {"attributes": [{"name": "attr", "value": "value"}]}
self.request.json = async_mock.CoroutineMock(
return_value={
"connection_id": connection_id,
"credential_preview": preview_spec,
}
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
# Emulate storage not found (bad connection id)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send(self.request)
async def test_credential_exchange_send_not_ready(self):
connection_id = "connection-id"
preview_spec = {"attributes": [{"name": "attr", "value": "value"}]}
self.request.json = async_mock.CoroutineMock(
return_value={
"connection_id": connection_id,
"credential_preview": preview_spec,
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
mock_conn_rec.retrieve_by_id.return_value.is_ready = False
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.credential_exchange_send(self.request)
async def test_credential_exchange_send_x(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredPreview, "deserialize", autospec=True
) as mock_cred_preview_deser:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.CoroutineMock(),
async_mock.CoroutineMock(),
)
mock_cred_mgr.return_value.prepare_send.side_effect = (
test_module.StorageError()
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send(self.request)
async def test_credential_exchange_send_proposal(self):
connection_id = "connection-id"
preview_spec = {"attributes": [{"name": "attr", "value": "value"}]}
self.request.json = async_mock.CoroutineMock(
return_value={
"connection_id": connection_id,
"credential_preview": preview_spec,
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredProposal, "deserialize", autospec=True
) as mock_cred_proposal_deser, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.create_proposal.return_value = mock_cx_rec
await test_module.credential_exchange_send_proposal(self.request)
mock_response.assert_called_once_with(mock_cx_rec.serialize.return_value)
self.request["outbound_message_router"].assert_awaited_once_with(
mock_cred_proposal_deser.return_value, connection_id=connection_id
)
async def test_credential_exchange_send_proposal_no_filter(self):
connection_id = "connection-id"
self.request.json = async_mock.CoroutineMock(
return_value={
"comment": "comment",
"connection_id": connection_id,
}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_send_proposal(self.request)
assert "Missing filter" in str(context.exception)
async def test_credential_exchange_send_proposal_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredPreview, "deserialize", autospec=True
) as mock_preview_deser:
# Emulate storage not found (bad connection id)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
mock_cred_mgr.return_value.create_proposal.return_value = (
async_mock.MagicMock()
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send_proposal(self.request)
async def test_credential_exchange_send_proposal_deser_x(self):
connection_id = "connection-id"
preview_spec = {"attributes": [{"name": "attr", "value": "value"}]}
self.request.json = async_mock.CoroutineMock(
return_value={
"connection_id": connection_id,
"credential_preview": preview_spec,
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredProposal, "deserialize", autospec=True
) as mock_cred_proposal_deser:
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.create_proposal.return_value = mock_cx_rec
mock_cred_proposal_deser.side_effect = test_module.BaseModelError()
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send_proposal(self.request)
async def test_credential_exchange_send_proposal_not_ready(self):
self.request.json = async_mock.CoroutineMock()
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.V20CredPreview, "deserialize", autospec=True
) as mock_preview_deser:
# Emulate connection not ready
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_rec.retrieve_by_id.return_value.is_ready = False
mock_cred_mgr.return_value.create_proposal.return_value = (
async_mock.MagicMock()
)
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.credential_exchange_send_proposal(self.request)
async def test_credential_exchange_create_free_offer(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"connection_id": "dummy",
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
self.context.update_settings({"default_endpoint": "http://1.2.3.4:8081"})
self.session_inject[BaseWallet] = async_mock.MagicMock(
get_local_did=async_mock.CoroutineMock(
return_value=DIDInfo("did", "verkey", {"meta": "data"})
),
get_public_did=async_mock.CoroutineMock(
return_value=DIDInfo("public-did", "verkey", {"meta": "data"})
),
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "serialize_outofband"
) as mock_seroob, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.create_offer.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
mock_seroob.return_value = "abc123"
await test_module.credential_exchange_create_free_offer(self.request)
mock_response.assert_called_once_with(
{
"record": mock_cx_rec.serialize.return_value,
"oob_url": "abc123",
}
)
async def test_credential_exchange_create_free_offer_no_filter(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"connection_id": "dummy",
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_create_free_offer(self.request)
assert "Missing filter" in str(context.exception)
async def test_credential_exchange_create_free_offer_no_preview(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"comment": "comment",
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_create_free_offer(self.request)
assert "Missing credential_preview" in str(context.exception)
async def test_credential_exchange_create_free_offer_retrieve_conn_rec_x(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"connection_id": "dummy",
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
self.session_inject[BaseWallet] = async_mock.MagicMock(
get_local_did=async_mock.CoroutineMock(
side_effect=test_module.WalletError()
),
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec:
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_create_free_offer(self.request)
async def test_credential_exchange_create_free_offer_no_conn_id(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
self.context.update_settings({"default_endpoint": "http://1.2.3.4:8081"})
self.session_inject[BaseWallet] = async_mock.MagicMock(
get_public_did=async_mock.CoroutineMock(
return_value=DIDInfo("public-did", "verkey", {"meta": "data"})
),
get_local_did=async_mock.CoroutineMock(
return_value=DIDInfo("did", "verkey", {"meta": "data"})
),
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "serialize_outofband"
) as mock_seroob, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.create_offer.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
mock_seroob.return_value = "abc123"
await test_module.credential_exchange_create_free_offer(self.request)
mock_response.assert_called_once_with(
{
"record": mock_cx_rec.serialize.return_value,
"oob_url": "abc123",
}
)
async def test_credential_exchange_create_free_offer_no_conn_id_no_public_did(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
self.context.update_settings({"default_endpoint": "http://1.2.3.4:8081"})
self.session_inject[BaseWallet] = async_mock.MagicMock(
get_public_did=async_mock.CoroutineMock(return_value=None),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_create_free_offer(self.request)
async def test_credential_exchange_create_free_offer_no_endpoint(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
self.session_inject[BaseWallet] = async_mock.MagicMock(
get_public_did=async_mock.CoroutineMock(
return_value=DIDInfo("did", "verkey", {"meta": "data"})
),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_create_free_offer(self.request)
async def test_credential_exchange_create_free_offer_deser_x(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"connection_id": "dummy",
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
self.context.update_settings({"default_endpoint": "http://1.2.3.4:8081"})
self.session_inject[BaseWallet] = async_mock.MagicMock(
get_local_did=async_mock.CoroutineMock(
return_value=DIDInfo("did", "verkey", {"meta": "data"})
),
get_public_did=async_mock.CoroutineMock(
return_value=DIDInfo("public-did", "verkey", {"meta": "data"})
),
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.side_effect = (
test_module.BaseModelError()
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_create_free_offer(self.request)
async def test_credential_exchange_send_free_offer(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.create_offer.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
await test_module.credential_exchange_send_free_offer(self.request)
mock_response.assert_called_once_with(mock_cx_rec.serialize.return_value)
async def test_credential_exchange_send_free_offer_no_filter(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"comment": "comment",
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_send_free_offer(self.request)
assert "Missing filter" in str(context.exception)
async def test_credential_exchange_send_free_offer_no_preview(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_send_free_offer(self.request)
assert "Missing credential_preview" in str(context.exception)
async def test_credential_exchange_send_free_offer_no_conn_record(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": False,
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
# Emulate storage not found (bad connection id)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send_free_offer(self.request)
async def test_credential_exchange_send_free_offer_not_ready(self):
self.request.json = async_mock.CoroutineMock(
return_value={
"auto_issue": True,
"credential_preview": {
"attributes": [{"name": "hello", "value": "world"}]
},
"filter": {"indy": {"schema_version": "1.0"}},
}
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
# Emulate connection not ready
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_rec.retrieve_by_id.return_value.is_ready = False
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.credential_exchange_send_free_offer(self.request)
async def test_credential_exchange_send_bound_offer(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cls_cx_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cls_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cls_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_PROPOSAL_RECEIVED
)
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.create_offer.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
await test_module.credential_exchange_send_bound_offer(self.request)
mock_response.assert_called_once_with(mock_cx_rec.serialize.return_value)
async def test_credential_exchange_send_bound_offer_bad_cred_ex_id(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.side_effect = test_module.StorageNotFoundError()
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.credential_exchange_send_bound_offer(self.request)
async def test_credential_exchange_send_bound_offer_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_PROPOSAL_RECEIVED
)
# Emulate storage not found (bad connection id)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send_bound_offer(self.request)
async def test_credential_exchange_send_bound_offer_bad_state(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_DONE
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send_bound_offer(self.request)
async def test_credential_exchange_send_bound_offer_not_ready(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_PROPOSAL_RECEIVED
)
# Emulate connection not ready
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_rec.retrieve_by_id.return_value.is_ready = False
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.credential_exchange_send_bound_offer(self.request)
async def test_credential_exchange_send_request(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cls_cx_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cls_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cls_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_OFFER_RECEIVED
)
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.create_request.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
await test_module.credential_exchange_send_request(self.request)
mock_response.assert_called_once_with(mock_cx_rec.serialize.return_value)
async def test_credential_exchange_send_request_bad_cred_ex_id(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.side_effect = test_module.StorageNotFoundError()
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.credential_exchange_send_request(self.request)
async def test_credential_exchange_send_request_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_OFFER_RECEIVED
)
# Emulate storage not found (bad connection id)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_send_request(self.request)
async def test_credential_exchange_send_request_not_ready(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_OFFER_RECEIVED
)
# Emulate connection not ready
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_rec.retrieve_by_id.return_value.is_ready = False
mock_cred_mgr.return_value.create_offer = async_mock.CoroutineMock()
mock_cred_mgr.return_value.create_offer.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.credential_exchange_send_request(self.request)
async def test_credential_exchange_issue(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cls_cx_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cls_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cls_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_REQUEST_RECEIVED
)
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.get_detail_record = async_mock.CoroutineMock(
side_effect=[
async_mock.MagicMock( # indy
serialize=async_mock.MagicMock(return_value={"...": "..."})
),
None, # dif
]
)
mock_cred_mgr.return_value.issue_credential.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
await test_module.credential_exchange_issue(self.request)
mock_response.assert_called_once_with(
{
"cred_ex_record": mock_cx_rec.serialize.return_value,
"indy": {"...": "..."},
"dif": None,
}
)
async def test_credential_exchange_issue_bad_cred_ex_id(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.side_effect = test_module.StorageNotFoundError()
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.credential_exchange_issue(self.request)
async def test_credential_exchange_issue_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_REQUEST_RECEIVED
)
# Emulate storage not found (bad connection id)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
mock_cred_mgr.return_value.issue_credential = async_mock.CoroutineMock()
mock_cred_mgr.return_value.issue_credential.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_issue(self.request)
async def test_credential_exchange_issue_not_ready(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_REQUEST_RECEIVED
)
# Emulate connection not ready
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_rec.retrieve_by_id.return_value.is_ready = False
mock_cred_mgr.return_value.issue_credential = async_mock.CoroutineMock()
mock_cred_mgr.return_value.issue_credential.return_value = (
async_mock.MagicMock(),
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.credential_exchange_issue(self.request)
async def test_credential_exchange_issue_rev_reg_full(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_REQUEST_RECEIVED
)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_rec.retrieve_by_id.return_value.is_ready = True
mock_issue_cred = async_mock.CoroutineMock(
side_effect=test_module.IndyIssuerError()
)
mock_cred_mgr.return_value.issue_credential = mock_issue_cred
with self.assertRaises(test_module.web.HTTPBadRequest) as context:
await test_module.credential_exchange_issue(self.request)
async def test_credential_exchange_issue_deser_x(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
mock_cx_rec = async_mock.MagicMock(
connection_id="dummy",
serialize=async_mock.MagicMock(side_effect=test_module.BaseModelError()),
)
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cls_cx_rec:
mock_cls_cx_rec.retrieve_by_id = async_mock.CoroutineMock(
return_value=mock_cx_rec
)
mock_cred_mgr.return_value.issue_credential.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
mock_cred_mgr.return_value.get_detail_record = async_mock.CoroutineMock(
side_effect=[
async_mock.MagicMock( # indy
serialize=async_mock.MagicMock(return_value={"...": "..."})
),
None, # dif
]
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_issue(self.request)
async def test_credential_exchange_store(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cls_cx_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cls_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cls_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_CREDENTIAL_RECEIVED
)
mock_cred_mgr.return_value.get_detail_record = async_mock.CoroutineMock(
side_effect=[
async_mock.MagicMock( # indy
serialize=async_mock.MagicMock(return_value={"...": "..."})
),
None, # dif
]
)
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.store_credential.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
await test_module.credential_exchange_store(self.request)
mock_response.assert_called_once_with(
{
"cred_ex_record": mock_cx_rec.serialize.return_value,
"indy": {"...": "..."},
"dif": None,
}
)
async def test_credential_exchange_store_bad_cred_id_json(self):
self.request.json = async_mock.CoroutineMock(
side_effect=test_module.JSONDecodeError("Nope", "Nope", 0)
)
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cls_cx_rec, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cls_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cls_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_CREDENTIAL_RECEIVED
)
mock_cx_rec = async_mock.MagicMock()
mock_cred_mgr.return_value.get_detail_record = async_mock.CoroutineMock(
side_effect=[
async_mock.MagicMock( # indy
serialize=async_mock.MagicMock(return_value={"...": "..."})
),
None, # dif
]
)
mock_cred_mgr.return_value.store_credential.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
await test_module.credential_exchange_store(self.request)
mock_response.assert_called_once_with(
{
"cred_ex_record": mock_cx_rec.serialize.return_value,
"indy": {"...": "..."},
"dif": None,
}
)
async def test_credential_exchange_store_bad_cred_ex_id(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.side_effect = test_module.StorageNotFoundError()
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.credential_exchange_store(self.request)
async def test_credential_exchange_store_no_conn_record(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_CREDENTIAL_RECEIVED
)
# Emulate storage not found (bad connection id)
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
mock_cred_mgr.return_value.store_credential.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_store(self.request)
async def test_credential_exchange_store_not_ready(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.connection_id = "conn-123"
mock_cx_rec.thread_id = "conn-123"
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_cx_rec.retrieve_by_id.return_value.state = (
test_module.V20CredExRecord.STATE_CREDENTIAL_RECEIVED
)
# Emulate connection not ready
mock_conn_rec.retrieve_by_id = async_mock.CoroutineMock()
mock_conn_rec.retrieve_by_id.return_value.is_ready = False
mock_cred_mgr.return_value.store_credential.return_value = (
mock_cx_rec,
async_mock.MagicMock(),
)
with self.assertRaises(test_module.web.HTTPForbidden):
await test_module.credential_exchange_store(self.request)
async def test_credential_exchange_remove(self):
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cred_mgr.return_value = async_mock.MagicMock(
delete_cred_ex_record=async_mock.CoroutineMock()
)
await test_module.credential_exchange_remove(self.request)
mock_response.assert_called_once_with({})
async def test_credential_exchange_remove_bad_cred_ex_id(self):
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
mock_cred_mgr.return_value = async_mock.MagicMock(
delete_cred_ex_record=async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.credential_exchange_remove(self.request)
async def test_credential_exchange_remove_x(self):
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredManager", autospec=True
) as mock_cred_mgr:
mock_cred_mgr.return_value = async_mock.MagicMock(
delete_cred_ex_record=async_mock.CoroutineMock(
side_effect=test_module.StorageError()
)
)
with self.assertRaises(test_module.web.HTTPBadRequest):
await test_module.credential_exchange_remove(self.request)
async def test_credential_exchange_problem_report(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "ConnRecord", autospec=True
) as mock_conn_rec, async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec, async_mock.patch.object(
test_module, "ProblemReport", autospec=True
) as mock_prob_report, async_mock.patch.object(
test_module.web, "json_response"
) as mock_response:
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock()
await test_module.credential_exchange_problem_report(self.request)
mock_response.assert_called_once_with({})
self.request["outbound_message_router"].assert_awaited_once_with(
mock_prob_report.return_value,
connection_id=mock_cx_rec.retrieve_by_id.return_value.connection_id,
)
async def test_credential_exchange_problem_report_bad_cred_id(self):
self.request.json = async_mock.CoroutineMock()
self.request.match_info = {"cred_ex_id": "dummy"}
with async_mock.patch.object(
test_module, "V20CredExRecord", autospec=True
) as mock_cx_rec:
mock_cx_rec.retrieve_by_id = async_mock.CoroutineMock(
side_effect=test_module.StorageNotFoundError()
)
with self.assertRaises(test_module.web.HTTPNotFound):
await test_module.credential_exchange_problem_report(self.request)
async def test_register(self):
mock_app = async_mock.MagicMock()
mock_app.add_routes = async_mock.MagicMock()
await test_module.register(mock_app)
mock_app.add_routes.assert_called_once()
async def test_post_process_routes(self):
mock_app = async_mock.MagicMock(_state={"swagger_dict": {}})
test_module.post_process_routes(mock_app)
assert "tags" in mock_app._state["swagger_dict"]
|
py | b400751cae93244c680a2e7d71bdf7e6e61a7efe | #!/usr/local/autopkg/python
"""
JamfComputerProfileUploader processor for uploading computer configuration profiles
to Jamf Pro using AutoPkg
by G Pugh
"""
import json
import re
import os
import plistlib
import subprocess
import uuid
from collections import namedtuple
from base64 import b64encode
from pathlib import Path
from shutil import rmtree
from time import sleep
from xml.sax.saxutils import escape
from autopkglib import Processor, ProcessorError # pylint: disable=import-error
class JamfComputerProfileUploader(Processor):
"""A processor for AutoPkg that will upload an item to a Jamf Cloud or on-prem server."""
input_variables = {
"JSS_URL": {
"required": True,
"description": "URL to a Jamf Pro server that the API user has write access "
"to, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_USERNAME": {
"required": True,
"description": "Username of account with appropriate access to "
"jss, optionally set as a key in the com.github.autopkg "
"preference file.",
},
"API_PASSWORD": {
"required": True,
"description": "Password of api user, optionally set as a key in "
"the com.github.autopkg preference file.",
},
"profile_name": {
"required": False,
"description": "Configuration Profile name",
"default": "",
},
"payload": {
"required": False,
"description": "Path to Configuration Profile payload plist file",
},
"mobileconfig": {
"required": False,
"description": "Path to Configuration Profile mobileconfig file",
},
"identifier": {
"required": False,
"description": "Configuration Profile payload identifier",
},
"profile_template": {
"required": False,
"description": "Path to Configuration Profile XML template file",
},
"profile_category": {
"required": False,
"description": "a category to assign to the profile",
},
"organization": {
"required": False,
"description": "Organization to assign to the profile",
},
"profile_description": {
"required": False,
"description": "a description to assign to the profile",
},
"profile_computergroup": {
"required": False,
"description": "a computer group that will be scoped to the profile",
},
"unsign_profile": {
"required": False,
"description": (
"Unsign a mobileconfig file prior to uploading "
"if it is signed, if true."
),
"default": False,
},
"replace_profile": {
"required": False,
"description": "overwrite an existing Configuration Profile if True.",
"default": False,
},
}
output_variables = {
"jamfcomputerprofileuploader_summary_result": {
"description": "Description of interesting results.",
},
}
# do not edit directly - copy from template
def write_json_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some json to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.json")
with open(tf, "w") as fp:
json.dump(data, fp)
return tf
# do not edit directly - copy from template
def write_temp_file(self, data, tmp_dir="/tmp/jamf_upload"):
"""dump some text to a temporary file"""
self.make_tmp_dir(tmp_dir)
tf = os.path.join(tmp_dir, f"jamf_upload_{str(uuid.uuid4())}.txt")
with open(tf, "w") as fp:
fp.write(data)
return tf
# do not edit directly - copy from template
def make_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""make the tmp directory"""
if not os.path.exists(tmp_dir):
os.mkdir(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def clear_tmp_dir(self, tmp_dir="/tmp/jamf_upload"):
"""remove the tmp directory"""
if os.path.exists(tmp_dir):
rmtree(tmp_dir)
return tmp_dir
# do not edit directly - copy from template
def curl(self, method, url, auth, data="", additional_headers=""):
"""
build a curl command based on method (GET, PUT, POST, DELETE)
If the URL contains 'uapi' then token should be passed to the auth variable,
otherwise the enc_creds variable should be passed to the auth variable
"""
tmp_dir = self.make_tmp_dir()
headers_file = os.path.join(tmp_dir, "curl_headers_from_jamf_upload.txt")
output_file = os.path.join(tmp_dir, "curl_output_from_jamf_upload.txt")
cookie_jar = os.path.join(tmp_dir, "curl_cookies_from_jamf_upload.txt")
# build the curl command
curl_cmd = [
"/usr/bin/curl",
"--silent",
"--show-error",
"-X",
method,
"-D",
headers_file,
"--output",
output_file,
url,
]
# authorisation if using Jamf Pro API or Classic API
# if using uapi and we already have a token then we use the token for authorization
if "uapi" in url and "tokens" not in url:
curl_cmd.extend(["--header", f"authorization: Bearer {auth}"])
# basic auth to obtain a token, or for classic API
elif "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
curl_cmd.extend(["--header", f"authorization: Basic {auth}"])
# set either Accept or Content-Type depending on method
if method == "GET" or method == "DELETE":
curl_cmd.extend(["--header", "Accept: application/json"])
# icon upload requires special method
elif method == "POST" and "fileuploads" in url:
curl_cmd.extend(["--header", "Content-type: multipart/form-data"])
curl_cmd.extend(["--form", f"name=@{data}"])
elif method == "POST" or method == "PUT":
if data:
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
# jamf data upload requires upload-file argument
curl_cmd.extend(["--upload-file", data])
else:
# slack requires data argument
curl_cmd.extend(["--data", data])
# uapi and slack accepts json, classic API only accepts xml
if "JSSResource" in url:
curl_cmd.extend(["--header", "Content-type: application/xml"])
else:
curl_cmd.extend(["--header", "Content-type: application/json"])
else:
self.output(f"WARNING: HTTP method {method} not supported")
# write session for jamf requests
if "uapi" in url or "JSSResource" in url or "dbfileupload" in url:
try:
with open(headers_file, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
with open(cookie_jar, "w") as fp:
fp.write(header)
except IOError:
pass
# look for existing session
try:
with open(cookie_jar, "r") as file:
headers = file.readlines()
existing_headers = [x.strip() for x in headers]
for header in existing_headers:
if "APBALANCEID" in header or "AWSALB" in header:
cookie = header.split()[1].rstrip(";")
self.output(f"Existing cookie found: {cookie}", verbose_level=2)
curl_cmd.extend(["--cookie", cookie])
except IOError:
self.output(
"No existing cookie found - starting new session", verbose_level=2
)
# additional headers for advanced requests
if additional_headers:
curl_cmd.extend(additional_headers)
self.output(f"curl command: {' '.join(curl_cmd)}", verbose_level=3)
# now subprocess the curl command and build the r tuple which contains the
# headers, status code and outputted data
subprocess.check_output(curl_cmd)
r = namedtuple(
"r", ["headers", "status_code", "output"], defaults=(None, None, None)
)
try:
with open(headers_file, "r") as file:
headers = file.readlines()
r.headers = [x.strip() for x in headers]
for header in r.headers: # pylint: disable=not-an-iterable
if re.match(r"HTTP/(1.1|2)", header) and "Continue" not in header:
r.status_code = int(header.split()[1])
except IOError:
raise ProcessorError(f"WARNING: {headers_file} not found")
if os.path.exists(output_file) and os.path.getsize(output_file) > 0:
with open(output_file, "rb") as file:
if "uapi" in url:
r.output = json.load(file)
else:
r.output = file.read()
else:
self.output(f"No output from request ({output_file} not found or empty)")
return r()
# do not edit directly - copy from template
def status_check(self, r, endpoint_type, obj_name):
"""Return a message dependent on the HTTP response"""
if r.status_code == 200 or r.status_code == 201:
self.output(f"{endpoint_type} '{obj_name}' uploaded successfully")
return "break"
elif r.status_code == 409:
self.output(r.output, verbose_level=2)
raise ProcessorError(
f"WARNING: {endpoint_type} '{obj_name}' upload failed due to a conflict"
)
elif r.status_code == 401:
raise ProcessorError(
f"ERROR: {endpoint_type} '{obj_name}' upload failed due to permissions error"
)
else:
raise ProcessorError(f"ERROR: {endpoint_type} '{obj_name}' upload failed")
# do not edit directly - copy from template
def get_path_to_file(self, filename):
"""AutoPkg is not very good at finding dependent files. This function
will look inside the search directories for any supplied file"""
# if the supplied file is not a path, use the override directory or
# recipe dir if no override
recipe_dir = self.env.get("RECIPE_DIR")
filepath = os.path.join(recipe_dir, filename)
if os.path.exists(filepath):
self.output(f"File found at: {filepath}")
return filepath
# if not found, search parent directories to look for it
if self.env.get("PARENT_RECIPES"):
# also look in the repos containing the parent recipes.
parent_recipe_dirs = list(
{os.path.dirname(item) for item in self.env["PARENT_RECIPES"]}
)
matched_filepath = ""
for d in parent_recipe_dirs:
# check if we are in the root of a parent repo, if not, ascend to the root
# note that if the parents are not in a git repo, only the same
# directory as the recipe will be searched for templates
if not os.path.isdir(os.path.join(d, ".git")):
d = os.path.dirname(d)
for path in Path(d).rglob(filename):
matched_filepath = str(path)
break
if matched_filepath:
self.output(f"File found at: {matched_filepath}")
return matched_filepath
# do not edit directly - copy from template
def get_api_obj_id_from_name(self, jamf_url, object_name, object_type, enc_creds):
"""check if a Classic API object with the same name exists on the server"""
# define the relationship between the object types and their URL
object_types = {
"package": "packages",
"computer_group": "computergroups",
"policy": "policies",
"extension_attribute": "computerextensionattributes",
"restricted_software": "restrictedsoftware",
"os_x_configuration_profile": "osxconfigurationprofiles",
}
object_list_types = {
"computer_group": "computer_groups",
"extension_attribute": "computer_extension_attributes",
"os_x_configuration_profile": "os_x_configuration_profiles",
"package": "packages",
"policy": "policies",
"restricted_software": "restricted_software",
}
url = "{}/JSSResource/{}".format(jamf_url, object_types[object_type])
r = self.curl("GET", url, enc_creds)
if r.status_code == 200:
object_list = json.loads(r.output)
self.output(
object_list,
verbose_level=4,
)
obj_id = 0
for obj in object_list[object_list_types[object_type]]:
self.output(
obj,
verbose_level=3,
)
# we need to check for a case-insensitive match
if obj["name"].lower() == object_name.lower():
obj_id = obj["id"]
return obj_id
# do not edit directly - copy from template
def get_api_obj_value_from_id(
self, jamf_url, object_type, obj_id, obj_path, enc_creds
):
"""get the value of an item in a Classic API object"""
# define the relationship between the object types and their URL
# we could make this shorter with some regex but I think this way is clearer
object_types = {
"package": "packages",
"computer_group": "computergroups",
"policy": "policies",
"extension_attribute": "computerextensionattributes",
"restricted_software": "restrictedsoftware",
"os_x_configuration_profile": "osxconfigurationprofiles",
}
url = "{}/JSSResource/{}/id/{}".format(
jamf_url, object_types[object_type], obj_id
)
r = self.curl("GET", url, enc_creds)
if r.status_code == 200:
obj_content = json.loads(r.output)
self.output(obj_content, verbose_level=4)
# convert an xpath to json
xpath_list = obj_path.split("/")
value = obj_content[object_type]
for i in range(0, len(xpath_list)):
if xpath_list[i]:
try:
value = value[xpath_list[i]]
self.output(value, verbose_level=3)
except KeyError:
value = ""
break
if value:
self.output(
"Value of '{}': {}".format(obj_path, value), verbose_level=2
)
return value
# do not edit directly - copy from template
def substitute_assignable_keys(self, data, xml_escape=False):
"""substitutes any key in the inputted text using the %MY_KEY% nomenclature"""
# do a four-pass to ensure that all keys are substituted
print(data) # TEMP
loop = 5
while loop > 0:
loop = loop - 1
found_keys = re.findall(r"\%\w+\%", data)
if not found_keys:
break
found_keys = [i.replace("%", "") for i in found_keys]
for found_key in found_keys:
if self.env.get(found_key):
self.output(
(
f"Replacing any instances of '{found_key}' with",
f"'{str(self.env.get(found_key))}'",
),
verbose_level=2,
)
if xml_escape:
replacement_key = escape(self.env.get(found_key))
else:
replacement_key = self.env.get(found_key)
data = data.replace(f"%{found_key}%", replacement_key)
else:
self.output(
f"WARNING: '{found_key}' has no replacement object!",
)
raise ProcessorError("Unsubstitutable key in template found")
return data
def substitute_limited_assignable_keys(
self, data, cli_custom_keys, xml_escape=False
):
"""substitutes any key in the inputted text using the %MY_KEY% nomenclature.
Whenever %MY_KEY% is found in the provided data, it is replaced with the assigned
value of MY_KEY. A five-times passa through is done to ensure that all keys are substituted.
Optionally, if the xml_escape key is set, the value is escaped for XML special characters.
This is designed primarily to account for ampersands in the substituted strings."""
loop = 5
while loop > 0:
loop = loop - 1
found_keys = re.findall(r"\%\w+\%", data)
if not found_keys:
break
found_keys = [i.replace("%", "") for i in found_keys]
for found_key in found_keys:
if cli_custom_keys[found_key]:
self.output(
f"Replacing any instances of '{found_key}' with "
f"'{str(cli_custom_keys[found_key])}'",
verbose_level=2,
)
if xml_escape:
replacement_key = escape(cli_custom_keys[found_key])
else:
replacement_key = cli_custom_keys[found_key]
data = data.replace(f"%{found_key}%", replacement_key)
else:
self.output(
f"WARNING: '{found_key}' has no replacement object!",
)
return data
def pretty_print_xml(self, xml):
proc = subprocess.Popen(
["xmllint", "--format", "/dev/stdin"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
(output, _) = proc.communicate(xml)
return output
def get_existing_uuid(self, jamf_url, obj_id, enc_creds):
"""return the existing UUID to ensure we don't change it"""
# first grab the payload from the xml object
existing_plist = self.get_api_obj_value_from_id(
jamf_url,
"os_x_configuration_profile",
obj_id,
"general/payloads",
enc_creds,
)
# Jamf seems to sometimes export an empty key which plistlib considers invalid,
# so let's remove this
existing_plist = existing_plist.replace("<key/>", "")
# make the xml pretty so we can see where the problem importing it is better
existing_plist = self.pretty_print_xml(bytes(existing_plist, "utf-8"))
self.output(
f"Existing payload (type: {type(existing_plist)}):", verbose_level=2
)
self.output(existing_plist.decode("UTF-8"), verbose_level=2)
# now extract the UUID from the existing payload
existing_payload = plistlib.loads(existing_plist)
self.output("Imported payload", verbose_level=2)
self.output(existing_payload, verbose_level=2)
existing_uuid = existing_payload["PayloadUUID"]
self.output(f"Existing UUID found: {existing_uuid}")
return existing_uuid
def generate_uuid(self):
"""generate a UUID for new profiles"""
return str(uuid.uuid4())
def make_mobileconfig_from_payload(
self,
payload_path,
payload_identifier,
mobileconfig_name,
organization,
description,
mobileconfig_uuid,
):
"""create a mobileconfig file using a payload file"""
# import plist as text and replace any substitutable keys
with open(payload_path, "rb") as file:
payload_text = file.read()
# substitute user-assignable keys (requires decode to string)
payload_text = self.substitute_assignable_keys(
(payload_text.decode()), xml_escape=True
)
# now convert to data (requires encode back to bytes...)
mcx_preferences = plistlib.loads(str.encode(payload_text))
self.output("Preferences contents:", verbose_level=2)
self.output(mcx_preferences, verbose_level=2)
# generate a random UUID for the payload
payload_uuid = self.generate_uuid()
# add the other keys required in the payload
payload_contents = {
"PayloadDisplayName": "Custom Settings",
"PayloadIdentifier": payload_uuid,
"PayloadOrganization": "JAMF Software",
"PayloadType": "com.apple.ManagedClient.preferences",
"PayloadUUID": payload_uuid,
"PayloadVersion": 1,
"PayloadContent": {
payload_identifier: {
"Forced": [{"mcx_preference_settings": mcx_preferences}]
}
},
}
self.output("Payload contents:", verbose_level=2)
self.output(payload_contents, verbose_level=2)
# now write the mobileconfig file
mobileconfig_data = {
"PayloadDescription": description,
"PayloadDisplayName": mobileconfig_name,
"PayloadOrganization": organization,
"PayloadRemovalDisallowed": True,
"PayloadScope": "System",
"PayloadType": "Configuration",
"PayloadVersion": 1,
"PayloadIdentifier": mobileconfig_uuid,
"PayloadUUID": mobileconfig_uuid,
"PayloadContent": [payload_contents],
}
self.output("Converting config data to plist")
mobileconfig_plist = plistlib.dumps(mobileconfig_data)
self.output("Mobileconfig contents:", verbose_level=2)
self.output(mobileconfig_plist.decode("UTF-8"), verbose_level=2)
return mobileconfig_plist
def unsign_signed_mobileconfig(self, mobileconfig_plist):
"""checks if profile is signed. This is necessary because Jamf cannot
upload a signed profile, so we either need to unsign it, or bail"""
output_path = os.path.join("/tmp", str(uuid.uuid4()))
cmd = [
"/usr/bin/security",
"cms",
"-D",
"-i",
mobileconfig_plist,
"-o",
output_path,
]
self.output(cmd, verbose_level=1)
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
_, err = proc.communicate()
if os.path.exists(output_path) and os.stat(output_path).st_size > 0:
self.output(f"Profile is signed. Unsigned profile at {output_path}")
return output_path
elif err:
self.output("Profile is not signed.")
self.output(err, verbose_level=2)
def upload_mobileconfig(
self,
jamf_url,
enc_creds,
mobileconfig_name,
description,
category,
mobileconfig_plist,
computergroup_name,
template_contents,
profile_uuid,
obj_id=None,
):
"""Update Configuration Profile metadata."""
# if we find an object ID we put, if not, we post
if obj_id:
url = f"{jamf_url}/JSSResource/osxconfigurationprofiles/id/{obj_id}"
else:
url = f"{jamf_url}/JSSResource/osxconfigurationprofiles/id/0"
# remove newlines, tabs, leading spaces, and XML-escape the payload
mobileconfig_plist = mobileconfig_plist.decode("UTF-8")
mobileconfig_list = mobileconfig_plist.rsplit("\n")
mobileconfig_list = [x.strip("\t") for x in mobileconfig_list]
mobileconfig_list = [x.strip(" ") for x in mobileconfig_list]
mobileconfig = "".join(mobileconfig_list)
# substitute user-assignable keys
replaceable_keys = {
"mobileconfig_name": mobileconfig_name,
"description": description,
"category": category,
"payload": mobileconfig,
"computergroup_name": computergroup_name,
"uuid": f"com.github.grahampugh.jamf-upload.{profile_uuid}",
}
# for key in replaceable_keys:
# self.output(f"TEMP: {replaceable_keys[key]}")
# substitute user-assignable keys (escaping for XML)
template_contents = self.substitute_limited_assignable_keys(
template_contents, replaceable_keys, xml_escape=True
)
self.output("Configuration Profile to be uploaded:", verbose_level=2)
self.output(template_contents, verbose_level=2)
self.output("Uploading Configuration Profile..")
# write the template to temp file
template_xml = self.write_temp_file(template_contents)
count = 0
while True:
count += 1
self.output(
f"Configuration Profile upload attempt {count}", verbose_level=1
)
method = "PUT" if obj_id else "POST"
r = self.curl(method, url, enc_creds, template_xml)
# check HTTP response
if (
self.status_check(r, "Configuration Profile", mobileconfig_name)
== "break"
):
break
if count > 5:
self.output(
"ERROR: Configuration Profile upload did not succeed after 5 attempts"
)
self.output(f"\nHTTP POST Response Code: {r.status_code}")
break
sleep(10)
return r
def main(self):
"""Do the main thing here"""
self.jamf_url = self.env.get("JSS_URL")
self.jamf_user = self.env.get("API_USERNAME")
self.jamf_password = self.env.get("API_PASSWORD")
self.profile_name = self.env.get("profile_name")
self.payload = self.env.get("payload")
self.mobileconfig = self.env.get("mobileconfig")
self.identifier = self.env.get("identifier")
self.template = self.env.get("profile_template")
self.profile_category = self.env.get("profile_category")
self.organization = self.env.get("organization")
self.profile_description = self.env.get("profile_description")
self.profile_computergroup = self.env.get("profile_computergroup")
self.unsign = self.env.get("unsign_profile")
# handle setting unsign in overrides
if not self.unsign or self.unsign == "False":
self.unsign = False
self.replace = self.env.get("replace_profile")
# handle setting replace in overrides
if not self.replace or self.replace == "False":
self.replace = False
# clear any pre-existing summary result
if "jamfcomputerprofileuploader_summary_result" in self.env:
del self.env["jamfcomputerprofileuploader_summary_result"]
profile_updated = False
# encode the username and password into a basic auth b64 encoded string
credentials = f"{self.jamf_user}:{self.jamf_password}"
enc_creds_bytes = b64encode(credentials.encode("utf-8"))
enc_creds = str(enc_creds_bytes, "utf-8")
# handle files with no path
if self.payload and "/" not in self.payload:
found_payload = self.get_path_to_file(self.payload)
if found_payload:
self.payload = found_payload
else:
raise ProcessorError(f"ERROR: Payload file {self.payload} not found")
if self.mobileconfig and "/" not in self.mobileconfig:
found_mobileconfig = self.get_path_to_file(self.mobileconfig)
if found_mobileconfig:
self.mobileconfig = found_mobileconfig
else:
raise ProcessorError(
f"ERROR: mobileconfig file {self.mobileconfig} not found"
)
if self.template and "/" not in self.template:
found_template = self.get_path_to_file(self.template)
if found_template:
self.template = found_template
else:
raise ProcessorError(
f"ERROR: XML template file {self.template} not found"
)
# if an unsigned mobileconfig file is supplied we can get the name, organization and
# description from it
if self.mobileconfig:
self.output(f"mobileconfig file supplied: {self.mobileconfig}")
# check if the file is signed
mobileconfig_file = self.unsign_signed_mobileconfig(self.mobileconfig)
# quit if we get an unsigned profile back and we didn't select --unsign
if mobileconfig_file and not self.unsign:
raise ProcessorError(
"Signed profiles cannot be uploaded to Jamf Pro via the API. "
"Use the GUI to upload the signed profile, or use --unsign to upload "
"the profile with the signature removed."
)
# import mobileconfig
with open(self.mobileconfig, "rb") as file:
mobileconfig_contents = plistlib.load(file)
with open(self.mobileconfig, "rb") as file:
mobileconfig_plist = file.read()
try:
mobileconfig_name = mobileconfig_contents["PayloadDisplayName"]
self.output(f"Configuration Profile name: {mobileconfig_name}")
self.output("Mobileconfig contents:", verbose_level=2)
self.output(mobileconfig_plist.decode("UTF-8"), verbose_level=2)
except KeyError:
raise ProcessorError(
"ERROR: Invalid mobileconfig file supplied - cannot import"
)
try:
description = mobileconfig_contents["PayloadDescription"]
except KeyError:
description = ""
try:
organization = mobileconfig_contents["PayloadOrganization"]
except KeyError:
organization = ""
# otherwise we are dealing with a payload plist and we need a few other bits of info
else:
if not self.profile_name:
raise ProcessorError("ERROR: No profile name supplied - cannot import")
if not self.payload:
raise ProcessorError(
"ERROR: No path to payload file supplied - cannot import"
)
if not self.identifier:
raise ProcessorError(
"ERROR: No identifier for mobileconfig supplied - cannot import"
)
mobileconfig_name = self.profile_name
description = ""
organization = ""
# we provide a default template which has no category or scope
if not self.template:
self.template = "Jamf_Templates/ProfileTemplate-no-scope.xml"
# automatically provide a description and organisation from the mobileconfig
# if not provided in the options
if not self.profile_description:
if description:
self.profile_description = description
else:
self.profile_description = (
"Config profile created by AutoPkg and JamfComputerProfileUploader"
)
if not self.organization:
if organization:
self.organization = organization
else:
organization = "AutoPkg"
self.organization = organization
# import profile template
with open(self.template, "r") as file:
template_contents = file.read()
# check for existing Configuration Profile
self.output(f"Checking for existing '{mobileconfig_name}' on {self.jamf_url}")
obj_id = self.get_api_obj_id_from_name(
self.jamf_url, mobileconfig_name, "os_x_configuration_profile", enc_creds
)
if obj_id:
self.output(
f"Configuration Profile '{mobileconfig_name}' already exists: ID {obj_id}"
)
if self.replace:
# grab existing UUID from profile as it MUST match on the destination
existing_uuid = self.get_existing_uuid(self.jamf_url, obj_id, enc_creds)
if not self.mobileconfig:
# generate the mobileconfig from the supplied payload
mobileconfig_plist = self.make_mobileconfig_from_payload(
self.payload,
self.identifier,
mobileconfig_name,
self.organization,
self.profile_description,
existing_uuid,
)
# now upload the mobileconfig by generating an XML template
if mobileconfig_plist:
self.upload_mobileconfig(
self.jamf_url,
enc_creds,
mobileconfig_name,
self.profile_description,
self.profile_category,
mobileconfig_plist,
self.profile_computergroup,
template_contents,
existing_uuid,
obj_id,
)
profile_updated = True
else:
self.output("A mobileconfig was not generated so cannot upload.")
else:
self.output(
"Not replacing existing Configuration Profile. "
"Override the replace_profile key to True to enforce."
)
else:
self.output(
f"Configuration Profile '{mobileconfig_name}' not found - will create"
)
new_uuid = self.generate_uuid()
if not self.mobileconfig:
# generate the mobileconfig from the supplied payload
mobileconfig_plist = self.make_mobileconfig_from_payload(
self.payload,
self.identifier,
mobileconfig_name,
self.organization,
self.profile_description,
new_uuid,
)
# now upload the mobileconfig by generating an XML template
if mobileconfig_plist:
self.upload_mobileconfig(
self.jamf_url,
enc_creds,
mobileconfig_name,
self.profile_description,
self.profile_category,
mobileconfig_plist,
self.profile_computergroup,
template_contents,
new_uuid,
)
profile_updated = True
else:
raise ProcessorError(
"A mobileconfig was not generated so cannot upload."
)
# output the summary
self.env["profile_name"] = self.profile_name
self.env["profile_updated"] = profile_updated
if profile_updated:
self.env["jamfcomputerprofileuploader_summary_result"] = {
"summary_text": (
"The following configuration profiles were uploaded to "
"or updated in Jamf Pro:"
),
"report_fields": ["mobileconfig_name", "profile_category"],
"data": {
"mobileconfig_name": mobileconfig_name,
"profile_category": self.profile_category,
},
}
if __name__ == "__main__":
PROCESSOR = JamfComputerProfileUploader()
PROCESSOR.execute_shell()
|
py | b40075c4746ea0cc1a66b84ae0c7e106f4c6a4c2 | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'afisha.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | b40077f7f00a77373f3ff64f14cecf84fc55c162 | """
Quick plot for the outputs of transcritical flow without shock
"""
import anuga.utilities.plot_utils as util
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as pyplot
from analytical_without_shock import *
from numpy import ones
p_st = util.get_output('transcritical.sww')
p2_st=util.get_centroids(p_st)
v = p2_st.y[10]
v2=(p2_st.y==v)
h,z = analytic_sol(p2_st.x[v2])
tid = 100
#Plot the stages##############################################################
pyplot.clf()
pyplot.plot(p2_st.x[v2], p2_st.stage[tid,v2], 'b.-', label='numerical stage') # 0*T/6
pyplot.plot(p2_st.x[v2], h+z,'r-', label='analytical stage')
pyplot.plot(p2_st.x[v2], z,'k-', label='bed elevation')
pyplot.title('Stage at time %s secs'% p2_st.time[tid])
##pyplot.ylim(-5.0,5.0)
pyplot.legend(loc='best')
pyplot.xlabel('Xposition')
pyplot.ylabel('Stage')
pyplot.savefig('stage_plot.png')
#Plot the momentums##########################################################
pyplot.clf()
pyplot.plot(p2_st.x[v2], p2_st.xmom[tid,v2], 'b.-', label='numerical') # 0*T/6
pyplot.plot(p2_st.x[v2], 1.53*ones(len(p2_st.x[v2])),'r-', label='analytical')
pyplot.title('Xmomentum at time %s secs'% p2_st.time[tid])
pyplot.legend(loc='best')
pyplot.ylim([1.52,1.54])
pyplot.xlabel('Xposition')
pyplot.ylabel('Xmomentum')
pyplot.savefig('xmom_plot.png')
#Plot the velocities#########################################################
pyplot.clf()
pyplot.plot(p2_st.x[v2], p2_st.xvel[tid,v2], 'b.-', label='numerical') # 0*T/6
pyplot.plot(p2_st.x[v2], 1.53/h,'r-', label='analytical')
pyplot.title('Xvelocity at time %s secs'% p2_st.time[tid])
pyplot.legend(loc='best')
pyplot.xlabel('Xposition')
pyplot.ylabel('Xvelocity')
pyplot.savefig('xvel_plot.png')
|
py | b400788163c286228cb59434ddc2de79393be18a | import colander
from cornice_apispec.converters.schema import TypeConverter
class MyNestedSchema(colander.MappingSchema):
my_precious = colander.SchemaNode(colander.Boolean())
class BodySchema(colander.MappingSchema):
id = colander.SchemaNode(colander.String())
timestamp = colander.SchemaNode(colander.Int())
obj = MyNestedSchema()
ex = colander.SchemaNode(colander.String(), missing=colander.drop, example='example string')
class QuerySchema(colander.MappingSchema):
foo = colander.SchemaNode(colander.String(), validator=colander.Length(3),
missing=colander.drop)
class HeaderSchema(colander.MappingSchema):
bar = colander.SchemaNode(colander.String(), missing=colander.drop)
class PathSchema(colander.MappingSchema):
meh = colander.SchemaNode(colander.String(), default='default')
class GetRequestSchema(colander.MappingSchema):
querystring = QuerySchema()
class PutRequestSchema(colander.MappingSchema):
body = BodySchema()
querystring = QuerySchema()
header = HeaderSchema()
class ResponseSchema(colander.MappingSchema):
body = BodySchema()
header = HeaderSchema()
response_schemas = {
'200': ResponseSchema(description='Return ice cream'),
'404': ResponseSchema(description='Return sadness')
}
class DeclarativeSchema(colander.MappingSchema):
@colander.instantiate(description='my body')
class body(colander.MappingSchema):
id = colander.SchemaNode(colander.String())
class AnotherDeclarativeSchema(colander.MappingSchema):
@colander.instantiate(description='my another body')
class body(colander.MappingSchema):
timestamp = colander.SchemaNode(colander.Int())
class AnyType(colander.SchemaType):
"""A simple custom colander type."""
def deserialize(self, cstruct=colander.null):
return cstruct
class AnyTypeConverter(TypeConverter):
def __call__(self, schema_node):
return {}
|
py | b40078ecfb623d5b450734b2584e1fd94cb871b5 | from abc import ABC
from math import *
from pprint import pprint
from typing import (
List,
Optional,
Tuple,
)
import numpy as np
# sys.path.append('../examples/flatirons')
# import func_tools
# matplotlib.use('tkagg')
from ..data_logging.data_recorder import DataRecorder
from .ask_tell_optimizer import AskTellOptimizer
from .dimension.dimension_info import DimensionInfo
# import shapely
class SPSADimensionInfo:
def __init__(self, theta: float, a_scale: float, info: DimensionInfo):
self.theta: float = theta
self.a_scale: float = a_scale
self.info: DimensionInfo = info
class SPSAOptimizer(AskTellOptimizer, ABC):
"""
A prototype implementation of a simultaneous perturbation stochastic approximation optimizer
see https://www.jhuapl.edu/SPSA/
see https://www.jhuapl.edu/SPSA/PDF-SPSA/Spall_An_Overview.PDF
see https://en.wikipedia.org/wiki/Simultaneous_perturbation_stochastic_approximation
see https://www.jhuapl.edu/SPSA/PDF-SPSA/Spall_Implementation_of_the_Simultaneous.PDF
"""
def __init__(self,
a: float, # s.t. a / (A+1)^alpha ~= smallest desired change in elements of theta in early iterations
c: float = 1e-3, # ~= std dev of measurement noise
A: float = 1e2, # <= # iterations expected, typically 10%
alpha: float = .602, # or 1.0
gamma: float = .101, # or 1/6th
dimensions: Optional[List[DimensionInfo]] = None,
num_estimates: int = 1,
) -> None:
self._recorder: Optional[DataRecorder] = None
self._a = a
self._c = c
self._A = A
self._alpha = alpha
self._gamma = gamma
self._k: int = 1 # step counter
self._theta: Optional[np.ndarray] = None
self._a_scale: Optional[np.ndarray] = None
self._dimensions: Optional[List[DimensionInfo]] = dimensions
self._num_estimates: int = num_estimates
self._ck: float = self._c
def setup(self, dimensions: [SPSADimensionInfo], recorder: DataRecorder) -> None:
"""
Setup parameters given initial conditions of the candidate
:param dimensions: list of search dimensions
:param recorder: data recorder
"""
self._k = 1
self._theta = np.fromiter((t.theta for t in dimensions), float)
self._a_scale = np.fromiter((t.a_scale for t in dimensions), float)
self._dimensions = [t.info for t in dimensions]
self._ck: float = self._c
self._recorder = recorder
self._recorder.add_columns('generation', 'gradient_estimate')
def ask(self, num: Optional[int] = None) -> [any]:
"""
:param num: the number of search points to return. If undefined, the optimizer will choose how many to return.
:return: a list of search points generated by the optimizer
"""
num_estimates = ceil(num / 2) if num is not None else self._num_estimates
num_dimensions = self.get_num_dimensions()
ck = self._c / (self._k ** self._gamma)
self._ck = ck
result = []
for c in range(num_estimates):
positive_candidate = np.empty(num_dimensions)
negative_candidate = np.empty(num_dimensions)
for i in range(num_dimensions):
delta_i = self._dimensions[i].sample()
perturbation = ck * delta_i
positive_candidate[i] = self._theta[i] + perturbation
negative_candidate[i] = self._theta[i] - perturbation
result.append(positive_candidate)
result.append(negative_candidate)
pprint(result)
return result
def tell(self, evaluations: [Tuple[float, any]]) -> None:
"""
Updates the optimizer with the objective evaluations of a list of search points
:param evaluations: a list of tuples of (evaluation, search point)
"""
gradient_estimate = np.zeros(self.get_num_dimensions())
def convert_candidate(candidate):
if isinstance(candidate, np.ndarray):
return candidate
return np.fromiter(candidate, dtype='float64')
num_estimates = floor(len(evaluations) / 2)
for i in range(num_estimates):
base = i * 2
positive_candidate = evaluations[base]
negative_candidate = evaluations[base + 1]
# compute gradient estimate
performance_difference = positive_candidate[0] - negative_candidate[0]
difference = convert_candidate(positive_candidate[1]) - convert_candidate(
negative_candidate[1]) # = 2*ck*delta
delta = performance_difference / (num_estimates * difference)
if fabs(performance_difference) > 1e-12:
gradient_estimate += delta
ak = self._a / (self._k + self._A) ** self._alpha
# print(ak * self.a_scale)
update = ak * self._a_scale * gradient_estimate
self._theta += update
self._k += 1
self._recorder.accumulate(evaluations, gradient_estimate)
def best_solution(self) -> (Optional[float], any):
"""
:return: the current best solution and (estimated) score
"""
return None, self._theta
def get_num_candidates(self) -> int:
"""
:return: Suggested number of candidates to ask for (for parallel asking), or None for no suggestion
"""
return self._num_estimates * 2
def get_candidate_block_size(self) -> int:
"""
:return: number of candidates requested should be a multiple of this quantity
"""
return 2
def get_num_dimensions(self) -> int:
"""
:return: number of dimensions being optimized over, or None if not implemented or applicable
"""
return len(self._dimensions)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.