ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a569b029e3fb6f1bd1f79f475cc18dede44a979
|
# -*- coding:utf-8 -*-
# Copyright (c) 2015, Roger Duran. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import division
import os
from . import base
class DF(base.ThreadedPollText):
"""Disk Free Widget
By default the widget only displays if the space is less than warn_space.
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('partition', '/', 'the partition to check space'),
('warn_color', 'ff0000', 'Warning color'),
('warn_space', 2, 'Warning space in scale defined by the ``measure`` option.'),
('visible_on_warn', True, 'Only display if warning'),
('measure', "G", "Measurement (G, M, B)"),
('format', '{p} ({uf}{m}|{r:.0f}%)',
'String format (p: partition, s: size, '
'f: free space, uf: user free space, m: measure, r: ratio (uf/s))'),
('update_interval', 60, 'The update interval.'),
]
measures = {"G": 1024 * 1024 * 1024,
"M": 1024 * 1024,
"B": 1024}
def __init__(self, **config):
base.ThreadedPollText.__init__(self, **config)
self.add_defaults(DF.defaults)
self.user_free = 0
self.calc = self.measures[self.measure]
def draw(self):
if self.user_free <= self.warn_space:
self.layout.colour = self.warn_color
else:
self.layout.colour = self.foreground
base.ThreadedPollText.draw(self)
def poll(self):
statvfs = os.statvfs(self.partition)
size = statvfs.f_frsize * statvfs.f_blocks // self.calc
free = statvfs.f_frsize * statvfs.f_bfree // self.calc
self.user_free = statvfs.f_frsize * statvfs.f_bavail // self.calc
if self.visible_on_warn and self.user_free >= self.warn_space:
text = ""
else:
text = self.format.format(p=self.partition, s=size, f=free,
uf=self.user_free, m=self.measure,
r=(size - self.user_free) / size * 100)
return text
|
py
|
1a569b0d2c08d2114b5e725b8dcfe1358c4803b7
|
# pylint: disable=no-self-use,invalid-name
from __future__ import with_statement
from __future__ import absolute_import
from __future__ import print_function
from collections import defaultdict
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSequenceLabelField(AllenNlpTestCase):
def setUp(self):
super(TestSequenceLabelField, self).setUp()
self.text = TextField([Token(t) for t in [u"here", u"are", u"some", u"words", u"."]],
{u"words": SingleIdTokenIndexer(u"words")})
def test_tag_length_mismatch_raises(self):
with pytest.raises(ConfigurationError):
wrong_tags = [u"B", u"O", u"O"]
_ = SequenceLabelField(wrong_tags, self.text)
def test_count_vocab_items_correctly_indexes_tags(self):
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"labels")
counter = defaultdict(lambda: defaultdict(int))
sequence_label_field.count_vocab_items(counter)
assert counter[u"labels"][u"B"] == 1
assert counter[u"labels"][u"I"] == 1
assert counter[u"labels"][u"O"] == 3
assert set(counter.keys()) == set([u"labels"])
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
b_index = vocab.add_token_to_namespace(u"B", namespace=u'*labels')
i_index = vocab.add_token_to_namespace(u"I", namespace=u'*labels')
o_index = vocab.add_token_to_namespace(u"O", namespace=u'*labels')
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"*labels")
sequence_label_field.index(vocab)
# pylint: disable=protected-access
assert sequence_label_field._indexed_labels == [b_index, i_index, o_index, o_index, o_index]
# pylint: enable=protected-access
def test_as_tensor_produces_integer_targets(self):
vocab = Vocabulary()
vocab.add_token_to_namespace(u"B", namespace=u'*labels')
vocab.add_token_to_namespace(u"I", namespace=u'*labels')
vocab.add_token_to_namespace(u"O", namespace=u'*labels')
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"*labels")
sequence_label_field.index(vocab)
padding_lengths = sequence_label_field.get_padding_lengths()
tensor = sequence_label_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 1, 2, 2, 2]))
def test_sequence_label_field_raises_on_incorrect_type(self):
with pytest.raises(ConfigurationError):
_ = SequenceLabelField([[], [], [], [], []], self.text)
def test_class_variables_for_namespace_warnings_work_correctly(self):
# pylint: disable=protected-access
tags = [u"B", u"I", u"O", u"O", u"O"]
assert u"text" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger=u"allennlp.data.fields.sequence_label_field", level=u"WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace=u"text")
# We've warned once, so we should have set the class variable to False.
assert u"text" in SequenceLabelField._already_warned_namespaces
with pytest.raises(AssertionError):
with self.assertLogs(logger=u"allennlp.data.fields.sequence_label_field", level=u"WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace=u"text")
# ... but a new namespace should still log a warning.
assert u"text2" not in SequenceLabelField._already_warned_namespaces
with self.assertLogs(logger=u"allennlp.data.fields.sequence_label_field", level=u"WARNING"):
_ = SequenceLabelField(tags, self.text, label_namespace=u"text2")
def test_printing_doesnt_crash(self):
tags = [u"B", u"I", u"O", u"O", u"O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace=u"labels")
print(sequence_label_field)
|
py
|
1a569b93d177298ff2ab7552d0d6096acb5148f8
|
class Memo:
def __init__(self, user_name, tel):
self.user_name = user_name
self.tel = tel
|
py
|
1a569bcc833e4c0cc691d9a79eb690432889c832
|
from __future__ import annotations
from collections import Counter
import re
import string
import numpy as np
import pandas as pd
import torch
from torch.nn.init import xavier_uniform_
from torch.nn import Module, Embedding, Sequential, ELU, Conv1d, Linear, CrossEntropyLoss
from torch.nn.functional import avg_pool1d, dropout, relu, softmax
from torch.optim import Adam
from nlp_pytorch.data.base_dataset import SplitDataset
from nlp_pytorch.data.vocab import Vocabulary
from nlp_pytorch.train import train, make_train_state
def preprocess_text(text):
text = " ".join(word.lower() for word in text.split(" "))
text = re.sub(r"([.,!?])", r" \1 ", text)
text = re.sub(r"[^a-zA-Z.,!?]+", r" ", text)
return text
class NewsVectorizer(object):
def __init__(self, title_vocab, category_vocab, max_title):
self.title_vocab = title_vocab
self.category_vocab = category_vocab
self.max_vector_len = max_title + 2
def vectorize(self, title, vector_length: int = -1) -> np.array:
indices = [self.title_vocab.begin_seq_index]
indices.extend(self.title_vocab.lookup_token(token) for token in title.split(" "))
indices.append(self.title_vocab.end_seq_index)
if vector_length < 0:
vector_length = len(indices)
out_vector = np.zeros(self.max_vector_len, dtype=np.int64)
out_vector[: len(indices)] = indices
out_vector[len(indices) :] = self.title_vocab.mask_index
return out_vector
@classmethod
def from_dataframe(cls, news_df, cutoff=25):
category_vocab = Vocabulary()
for category in sorted(set(news_df.category)):
category_vocab.add_token(category)
max_title = 0
word_counts = Counter()
for title in news_df.title:
title_tokens = title.split(" ")
max_title = max(max_title, len(title_tokens))
for token in title_tokens:
if token not in string.punctuation:
word_counts[token] += 1
title_vocab = Vocabulary()
for word, word_count in word_counts.items():
if word_count >= cutoff:
title_vocab.add_token(word)
return cls(title_vocab, category_vocab, max_title)
class NewsDataset(SplitDataset):
def __init__(self, news_df: pd.DataFrame, vectorizer) -> None:
super().__init__(news_df, vectorizer)
@classmethod
def load_dataset_and_make_vectorizer(cls, csv_file: str) -> NewsDataset:
news_df = pd.read_csv(csv_file)
return cls(news_df, NewsVectorizer.from_dataframe(news_df))
def __getitem__(self, index: int):
row = self._target_df.iloc[index]
title_vector = self.vectorizer.vectorize(row.title)
category_index = self.vectorizer.category_vocab.lookup_token(row.category)
return {
"x_data": title_vector,
"y_target": category_index,
}
def load_glove_from_file(glove_filepath):
word_to_index = {}
embeddings = []
with open(glove_filepath, "r") as fp:
for index, line in enumerate(fp):
line = line.split(" ")
word_to_index[line[0]] = index
embedding_i = np.array([float(val) for val in line[1:]])
embeddings.append(embedding_i)
return word_to_index, np.stack(embeddings)
def make_embedding_matrix(glove_filepath, words):
word_to_idx, glove_embeddings = load_glove_from_file(glove_filepath)
embedding_size = glove_embeddings.shape[1]
final_embeddings = np.zeros((len(words), embedding_size))
for i, word in enumerate(words):
if word in word_to_idx:
final_embeddings[i, :] = glove_embeddings[word_to_idx[word]]
else:
embedding_i = torch.ones(1, embedding_size)
xavier_uniform_(embedding_i)
final_embeddings[i, :] = embedding_i
return final_embeddings
class NewsClassifier(Module):
def __init__(
self,
embedding_size,
num_embeddings,
num_channels,
hidden_dim,
num_classes,
dropout_p,
pretrained_embeddings=None,
padding_idx=0,
):
super().__init__()
if pretrained_embeddings is None:
self.emb = Embedding(
embedding_dim=embedding_size, num_embeddings=num_embeddings, padding_idx=padding_idx
)
else:
self.emb = Embedding(
embedding_dim=embedding_size,
num_embeddings=num_embeddings,
padding_idx=padding_idx,
_weight=pretrained_embeddings,
)
self.convnet = Sequential(
Conv1d(in_channels=embedding_size, out_channels=num_channels, kernel_size=3),
ELU(),
Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2),
ELU(),
Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3, stride=2),
ELU(),
Conv1d(in_channels=num_channels, out_channels=num_channels, kernel_size=3),
ELU(),
)
self._dropout_p = dropout_p
self.fc1 = Linear(num_channels, hidden_dim)
self.fc2 = Linear(hidden_dim, num_classes)
def forward(self, x_in, apply_activator: bool = False):
x_embedded = self.emb(x_in).permute(0, 2, 1)
features = self.convnet(x_embedded)
remaining_size = features.size(dim=2)
features = avg_pool1d(features, remaining_size).squeeze(dim=2)
features = dropout(features, p=self._dropout_p)
intermediate_vector = relu(dropout(self.fc1(features), p=self._dropout_p))
prediction_vector = self.fc2(intermediate_vector)
if apply_activator:
prediction_vector = softmax(prediction_vector, dim=1)
return prediction_vector
def predict_category(title, classifer, vectorizer, max_length, device):
title = preprocess_text(title)
vectorized_title = torch.tensor(vectorizer.vectorize(title, vector_length=max_length)).to(device)
result = classifer(vectorized_title.unsqueeze(0), apply_activator=True)
probability_values, indices = result.max(dim=1)
predicated_category = vectorizer.category_vocab.lookup_index(indices.item())
return {"category": predicated_category, "probability": probability_values.item()}
def compute_accuracy(y_pred, y_target):
_, y_pred_indices = y_pred.max(dim=1)
n_correct = torch.eq(y_pred_indices, y_target).sum().item()
return n_correct / len(y_pred_indices) * 100
def main(num_epochs: int = 100, batch_size: int = 128):
args = {
"news_csv": "data/news_with_splits.csv",
"save_dir": "model_storage/yelp/",
"model_state_file": "model.pth",
"glove_filepath": "data/glove.6B.100d.txt",
"vectorizer_file": "vectorizer.json",
"use_glove": False,
"embedding_size": 100,
"hidden_dim": 100,
"num_channels": 100,
"learning_rate": 0.001,
"num_epochs": num_epochs,
"batch_size": batch_size,
"early_stopping_criteria": 5,
"frequency_cutoff": 25,
"dropout_p": 0.1,
"cuda": False,
}
train_state = make_train_state()
if torch.cuda.is_available():
args["cuda"] = True
args["device"] = torch.device("cuda:0" if args["cuda"] else "cpu")
print(args)
dataset = NewsDataset.load_dataset_and_make_vectorizer(args["news_csv"])
vectorizer = dataset.vectorizer
words = vectorizer.title_vocab._token_to_idx.keys()
embeddings = make_embedding_matrix(glove_filepath=args["glove_filepath"], words=words)
classifier = NewsClassifier(
embedding_size=args["embedding_size"],
num_embeddings=len(vectorizer.title_vocab),
num_channels=args["num_channels"],
hidden_dim=args["hidden_dim"],
num_classes=len(vectorizer.title_vocab),
dropout_p=args["dropout_p"],
pretrained_embeddings=torch.from_numpy(embeddings),
)
classifier = classifier.to(args["device"])
classifier.double()
loss_func = CrossEntropyLoss()
optimizer = Adam(classifier.parameters(), lr=args["learning_rate"])
train(args, train_state, dataset, classifier, optimizer, loss_func, compute_accuracy)
return {
"train_state": train_state,
"args": args,
"dataset": dataset,
"classifier": classifier,
"loss_func": loss_func,
"optimizer": optimizer,
}
|
py
|
1a569c1e5940110d819bee5efc4f89db9475156e
|
import os
import logging
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s] - %(message)s')
ld = logging.debug
class PartialObservabilityProblem:
def __init__(self,
dataset_path,
num_nodes,
T,
num_examples_to_generate,
test_fraction,
validation_fraction,
hidden_power_bus_id_list,
hidden_voltage_bus_id_list,
target_power_bus_id_list,
target_voltage_bus_id_list,
reference_bus_id,
Ns,
Nv):
"""
A data structure that captures the complete description of the partial observability
problem in power grids.
:param dataset_path: a string specifying the path to the dataset containing the
power and voltage recordings (4 csv files).
:param num_nodes: number of buses in the grid
:param T: number of time-steps in the scope of a single train/test example.
The last time step is partially observable
:param num_examples_to_generate: number of examples to be drawn at random from the
power-voltage records.
:param test_fraction: fractional number in [0.0,1.0]. Out of the dataset provided,
which fraction of the examples will be put aside as a test
set.
:param validation_fraction: fractional number in [0.0,1.0]. Out of the training set
provided, which fraction of the examples will be put aside as
a validation set.
:param hidden_power_bus_id_list: a list of bus ids, whose power is not observable at
the last time step.
:param hidden_voltage_bus_id_list:a list of bus ids, whose voltage is not observable at
the last time step.
:param target_power_bus_id_list: a list of bus ids, whose power is to be predicted
the last time step.
:param target_voltage_bus_id_list:a list of bus ids, whose voltage is to be predicted
the last time step.
:param reference_bus_id: an integer, id of the bus defined as a "slack" or aa reference bus.
:param Ns: Number of observable power measurements in the last time step
:param Nv: Number of observable voltage measurements in the last time step
"""
# TODO: add slack bus id as a data member.
self.dataset_path = dataset_path
self.num_nodes = num_nodes
self.T = T
self.num_examples_to_generate = num_examples_to_generate
self.test_fraction = test_fraction
self.validation_fraction = validation_fraction
self.hidden_power_bus_id_list = hidden_power_bus_id_list
self.hidden_voltage_bus_id_list = hidden_voltage_bus_id_list
self.visible_power_bus_id_list = list(sorted(set(range(num_nodes)) - set(hidden_power_bus_id_list)))# complementary to the hidden one
self.visible_voltage_bus_id_list = list(sorted(set(range(num_nodes)) - set(hidden_voltage_bus_id_list)))# complementary to the hidden one
self.target_power_bus_id_list = target_power_bus_id_list
self.target_voltage_bus_id_list = target_voltage_bus_id_list
self.reference_bus_id = reference_bus_id
self.Ns = Ns
self.Nv = Nv
# Measurement counts
self.num_phasors_per_bus = 2 # there are 2 phasors per bus (S,V)
self.num_measurements_per_phasor = 2 # phasors are complex (2 values)
self.num_hidden_power_measurements = len(self.hidden_power_bus_id_list) * self.num_measurements_per_phasor
self.num_hidden_voltage_measurements = len(self.hidden_voltage_bus_id_list) * self.num_measurements_per_phasor
self.num_hidden_measurements = self.num_hidden_power_measurements + self.num_hidden_voltage_measurements
self.num_target_power_measurements = len(self.target_power_bus_id_list) * self.num_measurements_per_phasor
self.num_target_voltage_measurements = len(self.target_voltage_bus_id_list) * self.num_measurements_per_phasor
self.num_target_measurements = self.num_target_voltage_measurements + self.num_target_power_measurements
self.num_target_buses = self.num_target_measurements // self.num_phasors_per_bus if self.num_target_power_measurements == 0 else -1 # this value will be available only when no power measurements are seeked.
self.num_visible_power_measurements = len(self.visible_power_bus_id_list) * self.num_measurements_per_phasor
self.num_visible_voltage_measurements = len(self.visible_voltage_bus_id_list) * self.num_measurements_per_phasor
self.num_all_measurements = self.num_nodes * self.num_phasors_per_bus * self.num_measurements_per_phasor
self.num_remaining_measurements = self.num_all_measurements - self.num_hidden_measurements # what is left after all the hidden measurements are removed.
assert(Ns * self.num_measurements_per_phasor == self.num_visible_power_measurements)
assert(Nv * self.num_measurements_per_phasor == self.num_visible_voltage_measurements)
assert(self.num_all_measurements == self.num_visible_voltage_measurements + self.num_visible_power_measurements + self.num_hidden_voltage_measurements + self.num_hidden_power_measurements)
def set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv, list_bus_id_power_hiding_priority=None, list_bus_id_voltage_hiding_priority=None):
"""
Returns the list of the hidden power bus ids and a list of hidden voltage ids
:param num_nodes: number of buses in the grid
:param Ns: Number of observable power measurements in the last time step
:param Nv: Number of observable voltage measurements in the last time step
:param list_bus_id_power_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:param list_bus_id_voltage_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:return:
"""
if list_bus_id_power_hiding_priority is None:
list_bus_id_power_hiding_priority = list(range(num_nodes))
if list_bus_id_voltage_hiding_priority is None:
list_bus_id_voltage_hiding_priority = list(range(num_nodes))
hidden_power_bus_id_list = []
next_busid_to_hide = 0
for bus_id in range(Ns, num_nodes):
hidden_power_bus_id_list.append(list_bus_id_power_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_voltage_bus_id_list = []
next_busid_to_hide = 0
for bus_id in range(Nv, num_nodes):
hidden_voltage_bus_id_list.append(list_bus_id_voltage_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_power_bus_id_list.sort()
hidden_voltage_bus_id_list.sort()
return hidden_power_bus_id_list, hidden_voltage_bus_id_list
def set_hidden_measurement_lists_from_observability(num_nodes, observability, list_bus_id_hiding_priority=None):
"""
Returns the list of the hidden power bus ids and a list of hidden voltage ids
:param num_nodes: number of buses in the grid
:param observability: a fractional number in [0.0, 1.0] which
sets the observability degree considered
in the problem.
:param list_bus_id_hiding_priority: list of bus indices which was sorted according to the preferred
order of hiding. Index 0 of this list corresponds to the most likely bus to be hidden.
:return:
"""
if list_bus_id_hiding_priority is None:
list_bus_id_hiding_priority = list(range(num_nodes))
observability_step_size = 1 / float(2 * num_nodes)
hidden_power_bus_id_list = []
next_busid_to_hide = 0
for observability_step in range(1,num_nodes+1):
threshold_for_current_measurement = observability_step * observability_step_size
if threshold_for_current_measurement >= observability:
hidden_power_bus_id_list.append(list_bus_id_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_voltage_bus_id_list = []
next_busid_to_hide = 0
for observability_step in range(1,num_nodes+1):
threshold_for_current_measurement = 0.5 + observability_step * observability_step_size
if threshold_for_current_measurement >= observability:
hidden_voltage_bus_id_list.append(list_bus_id_hiding_priority[next_busid_to_hide])
next_busid_to_hide += 1
hidden_power_bus_id_list.sort()
hidden_voltage_bus_id_list.sort()
return hidden_power_bus_id_list, hidden_voltage_bus_id_list
def make_str_for_pretty_print_int_list(lst):
"""
Produce a stirng which neatly prints a list of integers.
This is done by compacting the integers into contiguous ranges.
for example [0,1,2,3,4,10,11,12] will become "[0..4,10..12]"
:param lst: list of integers
:return: string
"""
stri="["
prev=None
seq_start_num = None
for i,num in enumerate(lst):
if prev is None:
# Warmup
seq_start_num = num
stri = stri + str(num)
elif prev != num - 1:
if seq_start_num != prev:
# Previous sequence contained more than 1 number.
if seq_start_num == prev-1:
stri = stri + ", " + str(prev)
else:
stri = stri + ".." + str(prev)
# Start new sequence
stri = stri + ", " + str(num)
seq_start_num = num
elif i==len(lst)-1:
if seq_start_num != num:
# Previous sequence contained more than 1 number.
if seq_start_num == prev:
stri = stri + ", " + str(num)
else:
stri = stri + ".." + str(num)
prev = num
stri = stri +"]"
return stri
def create_partial_observability_problem(dataset_dir, dataset_name, T, Ns, Nv, verbose=True, reverse_bus_hiding_order=False):
"""
Constructs a setting of a partial observability problem.
This function mainly determines the number of nodes and
sets the concrete bus ids for being hidden, targeted,
etc. All with accordance to the well known data sets and
to the observability degree specified as the [0,1]
fractional parameter "observability".
:param dataset_dir: a directory that contains all the datasets
:param dataset_name: a directory name of the dataset
:param T: Number of time steps to be observed at
:param Ns: Number of observable power measurements in the last time step
:param Nv: Number of observable voltage measurements in the last time step
:param verbose: boolean - if true then upon the creation of the pop
object - its attributes will be printed.
:return:
"""
# Common setting:
dataset_path = os.path.join(dataset_dir, dataset_name)
if dataset_name == 'solar_smooth_ord_60_downsampling_factor_60':
# 4-nodes grid with 10080 recorded time steps
num_nodes = 4
reference_bus_id = 3
num_examples_to_generate = 9000 # how many examples will be generated from the existing CSV files (generation is carried out via random T-long time series).
test_fraction = 0.1 # fraction of the generated examples that will become a test set. The splitting between the training and test time series is leakage-safe. Namely, no training time series overlaps with test time series.
validation_fraction = 0.0 # fraction of the train examples that will become a validation set. Warning: th ecurrent way of splitting the training to validation creates data leakage between the trainin and validation since the time series overlap!
# Set the observed bus id lists according to the "observability" parameter in a contiguous manner.
# TODO: Make sure that custom id list is synchronized with the following processing (in the neural net etc)
bus_hiding_priority_list = [0, 1, 2, 3]
bus_hiding_priority_list = list(reversed(bus_hiding_priority_list)) if reverse_bus_hiding_order else bus_hiding_priority_list
hidden_power_bus_id_list, hidden_voltage_bus_id_list = set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv,
bus_hiding_priority_list,
bus_hiding_priority_list)
# Target bus ids:
# We assume that we only want to estimate all the voltage and none of the powers
# as the powers are easily recoverable once the voltages are estimated
target_power_bus_id_list = []
target_voltage_bus_id_list = list(range(num_nodes))
# Example for observability=0.45 in the :
# hidden_power_bus_id_list = [0] # hidden from input in T-1 (last) time-step
# hidden_voltage_bus_id_list = [0, 1, 2, 3] # hidden from input in T-1 (last) time-step
# target_power_bus_id_list = []
# target_voltage_bus_id_list = [0, 1, 2, 3]
elif dataset_name == 'ieee37_smooth_ord_60_downsampling_factor_60':
# 36-nodes grid with 10080 recorded time steps
num_nodes = 36
reference_bus_id = 0
num_examples_to_generate = 9000 # how many examples will be generated from the existing CSV files (generation is carried out via random T-long time series).
test_fraction = 0.1 # fraction of the generated examples that will become a test set. The splitting between the training and test time series is leakage-safe. Namely, no training time series overlaps with test time series.
validation_fraction = 0.0 # fraction of the train examples that will become a validation set. Warning: th ecurrent way of splitting the training to validation creates data leakage between the trainin and validation since the time series overlap!
# Set the observed bus id lists according to the "observability" parameter in a contiguous manner.
# TODO: Make sure that custom id list is synchronized with the following processing (in the neural net etc)
bus_hiding_priority_list = list(reversed(range(num_nodes))) # This creates a topological ordering of the nodes, such that the reference bus (slack bus) is the last to be hidden.
bus_hiding_priority_list = list(reversed(bus_hiding_priority_list[:-1]))+[bus_hiding_priority_list[-1]] if reverse_bus_hiding_order else bus_hiding_priority_list
hidden_power_bus_id_list, hidden_voltage_bus_id_list = set_hidden_measurement_lists_from_Ns_Nv(num_nodes, Ns, Nv,
bus_hiding_priority_list,
bus_hiding_priority_list)
# Target bus ids:
# We assume that we only want to estimate all the voltage and none of the powers
# as the powers are easily recoverable once the voltages are estimated
target_power_bus_id_list = []
target_voltage_bus_id_list = list(range(num_nodes))
else:
raise NameError("Unknown dataset required \"{}\"".format(dataset_name))
pop = PartialObservabilityProblem(dataset_path, num_nodes, T, num_examples_to_generate, test_fraction,
validation_fraction, hidden_power_bus_id_list, hidden_voltage_bus_id_list,
target_power_bus_id_list, target_voltage_bus_id_list, reference_bus_id,
Ns, Nv)
if verbose:
ld("Created PartialObservabilityProblem scenario:")
ld(" Dataset name: {}".format(dataset_name))
ld(" num_nodes: {}".format(num_nodes))
ld(" T: {}".format(T))
ld(" (Ns) number of observable bus powers at time=T-1: {}".format(Ns))
ld(" (Nv) number of observable bus voltages at time=T-1: {}".format(Nv))
ld(" num_examples_to_generate: {}".format(num_examples_to_generate))
ld(" test_fraction: {}".format(test_fraction))
ld(" validation_fraction: {}".format(validation_fraction))
ld(" hidden_power_bus_id_list: {}".format(make_str_for_pretty_print_int_list(hidden_power_bus_id_list)))
ld(" hidden_voltage_bus_id_list: {}".format(make_str_for_pretty_print_int_list(hidden_voltage_bus_id_list)))
ld(" target_power_bus_id_list: {}".format(make_str_for_pretty_print_int_list(target_power_bus_id_list)))
ld(" target_voltage_bus_id_list: {}".format(make_str_for_pretty_print_int_list(target_voltage_bus_id_list)))
return pop
|
py
|
1a569c98bbfc11dd512f93a2db536ed702492852
|
# coding: utf-8
"""
NiFi Rest Api
The Rest Api provides programmatic access to command and control a NiFi instance in real time. Start and stop processors, monitor queues, query provenance data, and more. Each endpoint below includes a description, definitions of the expected input and output, potential response codes, and the authorizations required to invoke each service.
OpenAPI spec version: 1.11.1-SNAPSHOT
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
# import models into sdk package
from .models.about_dto import AboutDTO
from .models.about_entity import AboutEntity
from .models.access_configuration_dto import AccessConfigurationDTO
from .models.access_configuration_entity import AccessConfigurationEntity
from .models.access_policy_dto import AccessPolicyDTO
from .models.access_policy_entity import AccessPolicyEntity
from .models.access_policy_summary_dto import AccessPolicySummaryDTO
from .models.access_policy_summary_entity import AccessPolicySummaryEntity
from .models.access_status_dto import AccessStatusDTO
from .models.access_status_entity import AccessStatusEntity
from .models.action_dto import ActionDTO
from .models.action_details_dto import ActionDetailsDTO
from .models.action_entity import ActionEntity
from .models.activate_controller_services_entity import ActivateControllerServicesEntity
from .models.affected_component_dto import AffectedComponentDTO
from .models.affected_component_entity import AffectedComponentEntity
from .models.allowable_value_dto import AllowableValueDTO
from .models.allowable_value_entity import AllowableValueEntity
from .models.attribute_dto import AttributeDTO
from .models.banner_dto import BannerDTO
from .models.banner_entity import BannerEntity
from .models.batch_settings_dto import BatchSettingsDTO
from .models.batch_size import BatchSize
from .models.bucket import Bucket
from .models.bucket_dto import BucketDTO
from .models.bucket_entity import BucketEntity
from .models.buckets_entity import BucketsEntity
from .models.bulletin_board_dto import BulletinBoardDTO
from .models.bulletin_board_entity import BulletinBoardEntity
from .models.bulletin_dto import BulletinDTO
from .models.bulletin_entity import BulletinEntity
from .models.bundle import Bundle
from .models.bundle_dto import BundleDTO
from .models.cluste_summary_entity import ClusteSummaryEntity
from .models.cluster_dto import ClusterDTO
from .models.cluster_entity import ClusterEntity
from .models.cluster_search_results_entity import ClusterSearchResultsEntity
from .models.cluster_summary_dto import ClusterSummaryDTO
from .models.component_details_dto import ComponentDetailsDTO
from .models.component_difference_dto import ComponentDifferenceDTO
from .models.component_history_dto import ComponentHistoryDTO
from .models.component_history_entity import ComponentHistoryEntity
from .models.component_reference_dto import ComponentReferenceDTO
from .models.component_reference_entity import ComponentReferenceEntity
from .models.component_restriction_permission_dto import ComponentRestrictionPermissionDTO
from .models.component_search_result_dto import ComponentSearchResultDTO
from .models.component_state_dto import ComponentStateDTO
from .models.component_state_entity import ComponentStateEntity
from .models.component_validation_result_dto import ComponentValidationResultDTO
from .models.component_validation_result_entity import ComponentValidationResultEntity
from .models.component_validation_results_entity import ComponentValidationResultsEntity
from .models.connectable_component import ConnectableComponent
from .models.connectable_dto import ConnectableDTO
from .models.connection_dto import ConnectionDTO
from .models.connection_entity import ConnectionEntity
from .models.connection_statistics_dto import ConnectionStatisticsDTO
from .models.connection_statistics_entity import ConnectionStatisticsEntity
from .models.connection_statistics_snapshot_dto import ConnectionStatisticsSnapshotDTO
from .models.connection_status_dto import ConnectionStatusDTO
from .models.connection_status_entity import ConnectionStatusEntity
from .models.connection_status_predictions_snapshot_dto import ConnectionStatusPredictionsSnapshotDTO
from .models.connection_status_snapshot_dto import ConnectionStatusSnapshotDTO
from .models.connection_status_snapshot_entity import ConnectionStatusSnapshotEntity
from .models.connections_entity import ConnectionsEntity
from .models.controller_bulletins_entity import ControllerBulletinsEntity
from .models.controller_configuration_dto import ControllerConfigurationDTO
from .models.controller_configuration_entity import ControllerConfigurationEntity
from .models.controller_dto import ControllerDTO
from .models.controller_entity import ControllerEntity
from .models.controller_service_api import ControllerServiceAPI
from .models.controller_service_api_dto import ControllerServiceApiDTO
from .models.controller_service_dto import ControllerServiceDTO
from .models.controller_service_entity import ControllerServiceEntity
from .models.controller_service_referencing_component_dto import ControllerServiceReferencingComponentDTO
from .models.controller_service_referencing_component_entity import ControllerServiceReferencingComponentEntity
from .models.controller_service_referencing_components_entity import ControllerServiceReferencingComponentsEntity
from .models.controller_service_run_status_entity import ControllerServiceRunStatusEntity
from .models.controller_service_status_dto import ControllerServiceStatusDTO
from .models.controller_service_types_entity import ControllerServiceTypesEntity
from .models.controller_services_entity import ControllerServicesEntity
from .models.controller_status_dto import ControllerStatusDTO
from .models.controller_status_entity import ControllerStatusEntity
from .models.copy_snippet_request_entity import CopySnippetRequestEntity
from .models.counter_dto import CounterDTO
from .models.counter_entity import CounterEntity
from .models.counters_dto import CountersDTO
from .models.counters_entity import CountersEntity
from .models.counters_snapshot_dto import CountersSnapshotDTO
from .models.create_active_request_entity import CreateActiveRequestEntity
from .models.create_template_request_entity import CreateTemplateRequestEntity
from .models.current_user_entity import CurrentUserEntity
from .models.difference_dto import DifferenceDTO
from .models.dimensions_dto import DimensionsDTO
from .models.documented_type_dto import DocumentedTypeDTO
from .models.drop_request_dto import DropRequestDTO
from .models.drop_request_entity import DropRequestEntity
from .models.explicit_restriction_dto import ExplicitRestrictionDTO
from .models.external_controller_service_reference import ExternalControllerServiceReference
from .models.flow_breadcrumb_dto import FlowBreadcrumbDTO
from .models.flow_breadcrumb_entity import FlowBreadcrumbEntity
from .models.flow_comparison_entity import FlowComparisonEntity
from .models.flow_configuration_dto import FlowConfigurationDTO
from .models.flow_configuration_entity import FlowConfigurationEntity
from .models.flow_dto import FlowDTO
from .models.flow_entity import FlowEntity
from .models.flow_file_dto import FlowFileDTO
from .models.flow_file_entity import FlowFileEntity
from .models.flow_file_summary_dto import FlowFileSummaryDTO
from .models.flow_snippet_dto import FlowSnippetDTO
from .models.funnel_dto import FunnelDTO
from .models.funnel_entity import FunnelEntity
from .models.funnels_entity import FunnelsEntity
from .models.garbage_collection_dto import GarbageCollectionDTO
from .models.history_dto import HistoryDTO
from .models.history_entity import HistoryEntity
from .models.input_ports_entity import InputPortsEntity
from .models.instantiate_template_request_entity import InstantiateTemplateRequestEntity
from .models.jaxb_link import JaxbLink
from .models.label_dto import LabelDTO
from .models.label_entity import LabelEntity
from .models.labels_entity import LabelsEntity
from .models.lineage_dto import LineageDTO
from .models.lineage_entity import LineageEntity
from .models.lineage_request_dto import LineageRequestDTO
from .models.lineage_results_dto import LineageResultsDTO
from .models.listing_request_dto import ListingRequestDTO
from .models.listing_request_entity import ListingRequestEntity
from .models.node_connection_statistics_snapshot_dto import NodeConnectionStatisticsSnapshotDTO
from .models.node_connection_status_snapshot_dto import NodeConnectionStatusSnapshotDTO
from .models.node_counters_snapshot_dto import NodeCountersSnapshotDTO
from .models.node_dto import NodeDTO
from .models.node_entity import NodeEntity
from .models.node_event_dto import NodeEventDTO
from .models.node_port_status_snapshot_dto import NodePortStatusSnapshotDTO
from .models.node_process_group_status_snapshot_dto import NodeProcessGroupStatusSnapshotDTO
from .models.node_processor_status_snapshot_dto import NodeProcessorStatusSnapshotDTO
from .models.node_remote_process_group_status_snapshot_dto import NodeRemoteProcessGroupStatusSnapshotDTO
from .models.node_search_result_dto import NodeSearchResultDTO
from .models.node_status_snapshots_dto import NodeStatusSnapshotsDTO
from .models.node_system_diagnostics_snapshot_dto import NodeSystemDiagnosticsSnapshotDTO
from .models.output_ports_entity import OutputPortsEntity
from .models.parameter_context_dto import ParameterContextDTO
from .models.parameter_context_entity import ParameterContextEntity
from .models.parameter_context_reference_dto import ParameterContextReferenceDTO
from .models.parameter_context_reference_entity import ParameterContextReferenceEntity
from .models.parameter_context_update_request_dto import ParameterContextUpdateRequestDTO
from .models.parameter_context_update_request_entity import ParameterContextUpdateRequestEntity
from .models.parameter_context_update_step_dto import ParameterContextUpdateStepDTO
from .models.parameter_context_validation_request_dto import ParameterContextValidationRequestDTO
from .models.parameter_context_validation_request_entity import ParameterContextValidationRequestEntity
from .models.parameter_context_validation_step_dto import ParameterContextValidationStepDTO
from .models.parameter_contexts_entity import ParameterContextsEntity
from .models.parameter_dto import ParameterDTO
from .models.parameter_entity import ParameterEntity
from .models.peer_dto import PeerDTO
from .models.peers_entity import PeersEntity
from .models.permissions import Permissions
from .models.permissions_dto import PermissionsDTO
from .models.port_dto import PortDTO
from .models.port_entity import PortEntity
from .models.port_run_status_entity import PortRunStatusEntity
from .models.port_status_dto import PortStatusDTO
from .models.port_status_entity import PortStatusEntity
from .models.port_status_snapshot_dto import PortStatusSnapshotDTO
from .models.port_status_snapshot_entity import PortStatusSnapshotEntity
from .models.position import Position
from .models.position_dto import PositionDTO
from .models.previous_value_dto import PreviousValueDTO
from .models.prioritizer_types_entity import PrioritizerTypesEntity
from .models.process_group_dto import ProcessGroupDTO
from .models.process_group_entity import ProcessGroupEntity
from .models.process_group_flow_dto import ProcessGroupFlowDTO
from .models.process_group_flow_entity import ProcessGroupFlowEntity
from .models.process_group_name_dto import ProcessGroupNameDTO
from .models.process_group_status_dto import ProcessGroupStatusDTO
from .models.process_group_status_entity import ProcessGroupStatusEntity
from .models.process_group_status_snapshot_dto import ProcessGroupStatusSnapshotDTO
from .models.process_group_status_snapshot_entity import ProcessGroupStatusSnapshotEntity
from .models.process_groups_entity import ProcessGroupsEntity
from .models.processor_config_dto import ProcessorConfigDTO
from .models.processor_dto import ProcessorDTO
from .models.processor_entity import ProcessorEntity
from .models.processor_run_status_entity import ProcessorRunStatusEntity
from .models.processor_status_dto import ProcessorStatusDTO
from .models.processor_status_entity import ProcessorStatusEntity
from .models.processor_status_snapshot_dto import ProcessorStatusSnapshotDTO
from .models.processor_status_snapshot_entity import ProcessorStatusSnapshotEntity
from .models.processor_types_entity import ProcessorTypesEntity
from .models.processors_entity import ProcessorsEntity
from .models.property_descriptor_dto import PropertyDescriptorDTO
from .models.property_descriptor_entity import PropertyDescriptorEntity
from .models.property_history_dto import PropertyHistoryDTO
from .models.provenance_dto import ProvenanceDTO
from .models.provenance_entity import ProvenanceEntity
from .models.provenance_event_dto import ProvenanceEventDTO
from .models.provenance_event_entity import ProvenanceEventEntity
from .models.provenance_link_dto import ProvenanceLinkDTO
from .models.provenance_node_dto import ProvenanceNodeDTO
from .models.provenance_options_dto import ProvenanceOptionsDTO
from .models.provenance_options_entity import ProvenanceOptionsEntity
from .models.provenance_request_dto import ProvenanceRequestDTO
from .models.provenance_results_dto import ProvenanceResultsDTO
from .models.provenance_searchable_field_dto import ProvenanceSearchableFieldDTO
from .models.queue_size_dto import QueueSizeDTO
from .models.registry_client_entity import RegistryClientEntity
from .models.registry_clients_entity import RegistryClientsEntity
from .models.registry_dto import RegistryDTO
from .models.relationship_dto import RelationshipDTO
from .models.remote_port_run_status_entity import RemotePortRunStatusEntity
from .models.remote_process_group_contents_dto import RemoteProcessGroupContentsDTO
from .models.remote_process_group_dto import RemoteProcessGroupDTO
from .models.remote_process_group_entity import RemoteProcessGroupEntity
from .models.remote_process_group_port_dto import RemoteProcessGroupPortDTO
from .models.remote_process_group_port_entity import RemoteProcessGroupPortEntity
from .models.remote_process_group_status_dto import RemoteProcessGroupStatusDTO
from .models.remote_process_group_status_entity import RemoteProcessGroupStatusEntity
from .models.remote_process_group_status_snapshot_dto import RemoteProcessGroupStatusSnapshotDTO
from .models.remote_process_group_status_snapshot_entity import RemoteProcessGroupStatusSnapshotEntity
from .models.remote_process_groups_entity import RemoteProcessGroupsEntity
from .models.reporting_task_dto import ReportingTaskDTO
from .models.reporting_task_entity import ReportingTaskEntity
from .models.reporting_task_run_status_entity import ReportingTaskRunStatusEntity
from .models.reporting_task_status_dto import ReportingTaskStatusDTO
from .models.reporting_task_types_entity import ReportingTaskTypesEntity
from .models.reporting_tasks_entity import ReportingTasksEntity
from .models.required_permission_dto import RequiredPermissionDTO
from .models.resource_dto import ResourceDTO
from .models.resources_entity import ResourcesEntity
from .models.revision_dto import RevisionDTO
from .models.schedule_components_entity import ScheduleComponentsEntity
from .models.search_result_group_dto import SearchResultGroupDTO
from .models.search_results_dto import SearchResultsDTO
from .models.search_results_entity import SearchResultsEntity
from .models.snippet_dto import SnippetDTO
from .models.snippet_entity import SnippetEntity
from .models.start_version_control_request_entity import StartVersionControlRequestEntity
from .models.state_entry_dto import StateEntryDTO
from .models.state_map_dto import StateMapDTO
from .models.status_descriptor_dto import StatusDescriptorDTO
from .models.status_history_dto import StatusHistoryDTO
from .models.status_history_entity import StatusHistoryEntity
from .models.status_snapshot_dto import StatusSnapshotDTO
from .models.storage_usage_dto import StorageUsageDTO
from .models.streaming_output import StreamingOutput
from .models.submit_replay_request_entity import SubmitReplayRequestEntity
from .models.system_diagnostics_dto import SystemDiagnosticsDTO
from .models.system_diagnostics_entity import SystemDiagnosticsEntity
from .models.system_diagnostics_snapshot_dto import SystemDiagnosticsSnapshotDTO
from .models.template_dto import TemplateDTO
from .models.template_entity import TemplateEntity
from .models.templates_entity import TemplatesEntity
from .models.tenant_dto import TenantDTO
from .models.tenant_entity import TenantEntity
from .models.tenants_entity import TenantsEntity
from .models.transaction_result_entity import TransactionResultEntity
from .models.update_controller_service_reference_request_entity import UpdateControllerServiceReferenceRequestEntity
from .models.user_dto import UserDTO
from .models.user_entity import UserEntity
from .models.user_group_dto import UserGroupDTO
from .models.user_group_entity import UserGroupEntity
from .models.user_groups_entity import UserGroupsEntity
from .models.users_entity import UsersEntity
from .models.variable_dto import VariableDTO
from .models.variable_entity import VariableEntity
from .models.variable_registry_dto import VariableRegistryDTO
from .models.variable_registry_entity import VariableRegistryEntity
from .models.variable_registry_update_request_dto import VariableRegistryUpdateRequestDTO
from .models.variable_registry_update_request_entity import VariableRegistryUpdateRequestEntity
from .models.variable_registry_update_step_dto import VariableRegistryUpdateStepDTO
from .models.version_control_component_mapping_entity import VersionControlComponentMappingEntity
from .models.version_control_information_dto import VersionControlInformationDTO
from .models.version_control_information_entity import VersionControlInformationEntity
from .models.version_info_dto import VersionInfoDTO
from .models.versioned_connection import VersionedConnection
from .models.versioned_controller_service import VersionedControllerService
from .models.versioned_flow import VersionedFlow
from .models.versioned_flow_coordinates import VersionedFlowCoordinates
from .models.versioned_flow_dto import VersionedFlowDTO
from .models.versioned_flow_entity import VersionedFlowEntity
from .models.versioned_flow_snapshot import VersionedFlowSnapshot
from .models.versioned_flow_snapshot_entity import VersionedFlowSnapshotEntity
from .models.versioned_flow_snapshot_metadata import VersionedFlowSnapshotMetadata
from .models.versioned_flow_snapshot_metadata_entity import VersionedFlowSnapshotMetadataEntity
from .models.versioned_flow_snapshot_metadata_set_entity import VersionedFlowSnapshotMetadataSetEntity
from .models.versioned_flow_update_request_dto import VersionedFlowUpdateRequestDTO
from .models.versioned_flow_update_request_entity import VersionedFlowUpdateRequestEntity
from .models.versioned_flows_entity import VersionedFlowsEntity
from .models.versioned_funnel import VersionedFunnel
from .models.versioned_label import VersionedLabel
from .models.versioned_parameter import VersionedParameter
from .models.versioned_parameter_context import VersionedParameterContext
from .models.versioned_port import VersionedPort
from .models.versioned_process_group import VersionedProcessGroup
from .models.versioned_processor import VersionedProcessor
from .models.versioned_property_descriptor import VersionedPropertyDescriptor
from .models.versioned_remote_group_port import VersionedRemoteGroupPort
from .models.versioned_remote_process_group import VersionedRemoteProcessGroup
# import apis into sdk package
from .apis.access_api import AccessApi
from .apis.connections_api import ConnectionsApi
from .apis.controller_api import ControllerApi
from .apis.controller_services_api import ControllerServicesApi
from .apis.counters_api import CountersApi
from .apis.data_transfer_api import DataTransferApi
from .apis.flow_api import FlowApi
from .apis.flowfile_queues_api import FlowfileQueuesApi
from .apis.funnel_api import FunnelApi
from .apis.input_ports_api import InputPortsApi
from .apis.labels_api import LabelsApi
from .apis.output_ports_api import OutputPortsApi
from .apis.parameter_contexts_api import ParameterContextsApi
from .apis.policies_api import PoliciesApi
from .apis.process_groups_api import ProcessGroupsApi
from .apis.processors_api import ProcessorsApi
from .apis.provenance_api import ProvenanceApi
from .apis.provenance_events_api import ProvenanceEventsApi
from .apis.remote_process_groups_api import RemoteProcessGroupsApi
from .apis.reporting_tasks_api import ReportingTasksApi
from .apis.resources_api import ResourcesApi
from .apis.site_to_site_api import SiteToSiteApi
from .apis.snippets_api import SnippetsApi
from .apis.system_diagnostics_api import SystemDiagnosticsApi
from .apis.templates_api import TemplatesApi
from .apis.tenants_api import TenantsApi
from .apis.versions_api import VersionsApi
# import ApiClient
from .api_client import ApiClient
from .configuration import Configuration
configuration = Configuration()
|
py
|
1a569cac826f9a213548719219fc8c4ca75a91d2
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/cloud/monitoring_v3/proto/uptime_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.cloud.monitoring_v3.proto import uptime_pb2 as google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/cloud/monitoring_v3/proto/uptime_service.proto',
package='google.monitoring.v3',
syntax='proto3',
serialized_pb=_b('\n5google/cloud/monitoring_v3/proto/uptime_service.proto\x12\x14google.monitoring.v3\x1a\x1cgoogle/api/annotations.proto\x1a-google/cloud/monitoring_v3/proto/uptime.proto\x1a\x1bgoogle/protobuf/empty.proto\x1a google/protobuf/field_mask.proto\"V\n\x1dListUptimeCheckConfigsRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x11\n\tpage_size\x18\x03 \x01(\x05\x12\x12\n\npage_token\x18\x04 \x01(\t\"\x94\x01\n\x1eListUptimeCheckConfigsResponse\x12\x45\n\x14uptime_check_configs\x18\x01 \x03(\x0b\x32\'.google.monitoring.v3.UptimeCheckConfig\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t\x12\x12\n\ntotal_size\x18\x03 \x01(\x05\"+\n\x1bGetUptimeCheckConfigRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"v\n\x1e\x43reateUptimeCheckConfigRequest\x12\x0e\n\x06parent\x18\x01 \x01(\t\x12\x44\n\x13uptime_check_config\x18\x02 \x01(\x0b\x32\'.google.monitoring.v3.UptimeCheckConfig\"\x97\x01\n\x1eUpdateUptimeCheckConfigRequest\x12/\n\x0bupdate_mask\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12\x44\n\x13uptime_check_config\x18\x03 \x01(\x0b\x32\'.google.monitoring.v3.UptimeCheckConfig\".\n\x1e\x44\x65leteUptimeCheckConfigRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"B\n\x19ListUptimeCheckIpsRequest\x12\x11\n\tpage_size\x18\x02 \x01(\x05\x12\x12\n\npage_token\x18\x03 \x01(\t\"t\n\x1aListUptimeCheckIpsResponse\x12=\n\x10uptime_check_ips\x18\x01 \x03(\x0b\x32#.google.monitoring.v3.UptimeCheckIp\x12\x17\n\x0fnext_page_token\x18\x02 \x01(\t2\xc7\x08\n\x12UptimeCheckService\x12\xb7\x01\n\x16ListUptimeCheckConfigs\x12\x33.google.monitoring.v3.ListUptimeCheckConfigsRequest\x1a\x34.google.monitoring.v3.ListUptimeCheckConfigsResponse\"2\x82\xd3\xe4\x93\x02,\x12*/v3/{parent=projects/*}/uptimeCheckConfigs\x12\xa6\x01\n\x14GetUptimeCheckConfig\x12\x31.google.monitoring.v3.GetUptimeCheckConfigRequest\x1a\'.google.monitoring.v3.UptimeCheckConfig\"2\x82\xd3\xe4\x93\x02,\x12*/v3/{name=projects/*/uptimeCheckConfigs/*}\x12\xc1\x01\n\x17\x43reateUptimeCheckConfig\x12\x34.google.monitoring.v3.CreateUptimeCheckConfigRequest\x1a\'.google.monitoring.v3.UptimeCheckConfig\"G\x82\xd3\xe4\x93\x02\x41\"*/v3/{parent=projects/*}/uptimeCheckConfigs:\x13uptime_check_config\x12\xd5\x01\n\x17UpdateUptimeCheckConfig\x12\x34.google.monitoring.v3.UpdateUptimeCheckConfigRequest\x1a\'.google.monitoring.v3.UptimeCheckConfig\"[\x82\xd3\xe4\x93\x02U2>/v3/{uptime_check_config.name=projects/*/uptimeCheckConfigs/*}:\x13uptime_check_config\x12\x9b\x01\n\x17\x44\x65leteUptimeCheckConfig\x12\x34.google.monitoring.v3.DeleteUptimeCheckConfigRequest\x1a\x16.google.protobuf.Empty\"2\x82\xd3\xe4\x93\x02,**/v3/{name=projects/*/uptimeCheckConfigs/*}\x12\x93\x01\n\x12ListUptimeCheckIps\x12/.google.monitoring.v3.ListUptimeCheckIpsRequest\x1a\x30.google.monitoring.v3.ListUptimeCheckIpsResponse\"\x1a\x82\xd3\xe4\x93\x02\x14\x12\x12/v3/uptimeCheckIpsB\xaa\x01\n\x18\x63om.google.monitoring.v3B\x12UptimeServiceProtoP\x01Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\xaa\x02\x1aGoogle.Cloud.Monitoring.V3\xca\x02\x1aGoogle\\Cloud\\Monitoring\\V3b\x06proto3')
,
dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2.DESCRIPTOR,google_dot_protobuf_dot_empty__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,])
_LISTUPTIMECHECKCONFIGSREQUEST = _descriptor.Descriptor(
name='ListUptimeCheckConfigsRequest',
full_name='google.monitoring.v3.ListUptimeCheckConfigsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='google.monitoring.v3.ListUptimeCheckConfigsRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_size', full_name='google.monitoring.v3.ListUptimeCheckConfigsRequest.page_size', index=1,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_token', full_name='google.monitoring.v3.ListUptimeCheckConfigsRequest.page_token', index=2,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=305,
)
_LISTUPTIMECHECKCONFIGSRESPONSE = _descriptor.Descriptor(
name='ListUptimeCheckConfigsResponse',
full_name='google.monitoring.v3.ListUptimeCheckConfigsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uptime_check_configs', full_name='google.monitoring.v3.ListUptimeCheckConfigsResponse.uptime_check_configs', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='google.monitoring.v3.ListUptimeCheckConfigsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='total_size', full_name='google.monitoring.v3.ListUptimeCheckConfigsResponse.total_size', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=308,
serialized_end=456,
)
_GETUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor(
name='GetUptimeCheckConfigRequest',
full_name='google.monitoring.v3.GetUptimeCheckConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.monitoring.v3.GetUptimeCheckConfigRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=458,
serialized_end=501,
)
_CREATEUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor(
name='CreateUptimeCheckConfigRequest',
full_name='google.monitoring.v3.CreateUptimeCheckConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='parent', full_name='google.monitoring.v3.CreateUptimeCheckConfigRequest.parent', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uptime_check_config', full_name='google.monitoring.v3.CreateUptimeCheckConfigRequest.uptime_check_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=503,
serialized_end=621,
)
_UPDATEUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor(
name='UpdateUptimeCheckConfigRequest',
full_name='google.monitoring.v3.UpdateUptimeCheckConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='update_mask', full_name='google.monitoring.v3.UpdateUptimeCheckConfigRequest.update_mask', index=0,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='uptime_check_config', full_name='google.monitoring.v3.UpdateUptimeCheckConfigRequest.uptime_check_config', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=624,
serialized_end=775,
)
_DELETEUPTIMECHECKCONFIGREQUEST = _descriptor.Descriptor(
name='DeleteUptimeCheckConfigRequest',
full_name='google.monitoring.v3.DeleteUptimeCheckConfigRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='google.monitoring.v3.DeleteUptimeCheckConfigRequest.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=777,
serialized_end=823,
)
_LISTUPTIMECHECKIPSREQUEST = _descriptor.Descriptor(
name='ListUptimeCheckIpsRequest',
full_name='google.monitoring.v3.ListUptimeCheckIpsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='page_size', full_name='google.monitoring.v3.ListUptimeCheckIpsRequest.page_size', index=0,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='page_token', full_name='google.monitoring.v3.ListUptimeCheckIpsRequest.page_token', index=1,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=825,
serialized_end=891,
)
_LISTUPTIMECHECKIPSRESPONSE = _descriptor.Descriptor(
name='ListUptimeCheckIpsResponse',
full_name='google.monitoring.v3.ListUptimeCheckIpsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='uptime_check_ips', full_name='google.monitoring.v3.ListUptimeCheckIpsResponse.uptime_check_ips', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='next_page_token', full_name='google.monitoring.v3.ListUptimeCheckIpsResponse.next_page_token', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=893,
serialized_end=1009,
)
_LISTUPTIMECHECKCONFIGSRESPONSE.fields_by_name['uptime_check_configs'].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG
_CREATEUPTIMECHECKCONFIGREQUEST.fields_by_name['uptime_check_config'].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG
_UPDATEUPTIMECHECKCONFIGREQUEST.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_UPDATEUPTIMECHECKCONFIGREQUEST.fields_by_name['uptime_check_config'].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG
_LISTUPTIMECHECKIPSRESPONSE.fields_by_name['uptime_check_ips'].message_type = google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKIP
DESCRIPTOR.message_types_by_name['ListUptimeCheckConfigsRequest'] = _LISTUPTIMECHECKCONFIGSREQUEST
DESCRIPTOR.message_types_by_name['ListUptimeCheckConfigsResponse'] = _LISTUPTIMECHECKCONFIGSRESPONSE
DESCRIPTOR.message_types_by_name['GetUptimeCheckConfigRequest'] = _GETUPTIMECHECKCONFIGREQUEST
DESCRIPTOR.message_types_by_name['CreateUptimeCheckConfigRequest'] = _CREATEUPTIMECHECKCONFIGREQUEST
DESCRIPTOR.message_types_by_name['UpdateUptimeCheckConfigRequest'] = _UPDATEUPTIMECHECKCONFIGREQUEST
DESCRIPTOR.message_types_by_name['DeleteUptimeCheckConfigRequest'] = _DELETEUPTIMECHECKCONFIGREQUEST
DESCRIPTOR.message_types_by_name['ListUptimeCheckIpsRequest'] = _LISTUPTIMECHECKIPSREQUEST
DESCRIPTOR.message_types_by_name['ListUptimeCheckIpsResponse'] = _LISTUPTIMECHECKIPSRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ListUptimeCheckConfigsRequest = _reflection.GeneratedProtocolMessageType('ListUptimeCheckConfigsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTUPTIMECHECKCONFIGSREQUEST,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``ListUptimeCheckConfigs`` request.
Attributes:
parent:
The project whose uptime check configurations are listed. The
format is ``projects/[PROJECT_ID]``.
page_size:
The maximum number of results to return in a single response.
The server may further constrain the maximum number of results
returned in a single page. If the page\_size is <=0, the
server will decide the number of results to be returned.
page_token:
If this field is not empty then it must contain the
``nextPageToken`` value returned by a previous call to this
method. Using this field causes the method to return more
results from the previous method call.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckConfigsRequest)
))
_sym_db.RegisterMessage(ListUptimeCheckConfigsRequest)
ListUptimeCheckConfigsResponse = _reflection.GeneratedProtocolMessageType('ListUptimeCheckConfigsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTUPTIMECHECKCONFIGSRESPONSE,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``ListUptimeCheckConfigs`` response.
Attributes:
uptime_check_configs:
The returned uptime check configurations.
next_page_token:
This field represents the pagination token to retrieve the
next page of results. If the value is empty, it means no
further results for the request. To retrieve the next page of
results, the value of the next\_page\_token is passed to the
subsequent List method call (in the request message's
page\_token field).
total_size:
The total number of uptime check configurations for the
project, irrespective of any pagination.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckConfigsResponse)
))
_sym_db.RegisterMessage(ListUptimeCheckConfigsResponse)
GetUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType('GetUptimeCheckConfigRequest', (_message.Message,), dict(
DESCRIPTOR = _GETUPTIMECHECKCONFIGREQUEST,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``GetUptimeCheckConfig`` request.
Attributes:
name:
The uptime check configuration to retrieve. The format is ``pr
ojects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.GetUptimeCheckConfigRequest)
))
_sym_db.RegisterMessage(GetUptimeCheckConfigRequest)
CreateUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType('CreateUptimeCheckConfigRequest', (_message.Message,), dict(
DESCRIPTOR = _CREATEUPTIMECHECKCONFIGREQUEST,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``CreateUptimeCheckConfig`` request.
Attributes:
parent:
The project in which to create the uptime check. The format is
``projects/[PROJECT_ID]``.
uptime_check_config:
The new uptime check configuration.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.CreateUptimeCheckConfigRequest)
))
_sym_db.RegisterMessage(CreateUptimeCheckConfigRequest)
UpdateUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType('UpdateUptimeCheckConfigRequest', (_message.Message,), dict(
DESCRIPTOR = _UPDATEUPTIMECHECKCONFIGREQUEST,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``UpdateUptimeCheckConfig`` request.
Attributes:
update_mask:
Optional. If present, only the listed fields in the current
uptime check configuration are updated with values from the
new configuration. If this field is empty, then the current
configuration is completely replaced with the new
configuration.
uptime_check_config:
Required. If an ``"updateMask"`` has been specified, this
field gives the values for the set of fields mentioned in the
``"updateMask"``. If an ``"updateMask"`` has not been given,
this uptime check configuration replaces the current
configuration. If a field is mentioned in ``"updateMask"`` but
the corresonding field is omitted in this partial uptime check
configuration, it has the effect of deleting/clearing the
field from the configuration on the server. The following
fields can be updated: ``display_name``, ``http_check``,
``tcp_check``, ``timeout``, ``content_matchers``, and
``selected_regions``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.UpdateUptimeCheckConfigRequest)
))
_sym_db.RegisterMessage(UpdateUptimeCheckConfigRequest)
DeleteUptimeCheckConfigRequest = _reflection.GeneratedProtocolMessageType('DeleteUptimeCheckConfigRequest', (_message.Message,), dict(
DESCRIPTOR = _DELETEUPTIMECHECKCONFIGREQUEST,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``DeleteUptimeCheckConfig`` request.
Attributes:
name:
The uptime check configuration to delete. The format is ``proj
ects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]``.
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.DeleteUptimeCheckConfigRequest)
))
_sym_db.RegisterMessage(DeleteUptimeCheckConfigRequest)
ListUptimeCheckIpsRequest = _reflection.GeneratedProtocolMessageType('ListUptimeCheckIpsRequest', (_message.Message,), dict(
DESCRIPTOR = _LISTUPTIMECHECKIPSREQUEST,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``ListUptimeCheckIps`` request.
Attributes:
page_size:
The maximum number of results to return in a single response.
The server may further constrain the maximum number of results
returned in a single page. If the page\_size is <=0, the
server will decide the number of results to be returned. NOTE:
this field is not yet implemented
page_token:
If this field is not empty then it must contain the
``nextPageToken`` value returned by a previous call to this
method. Using this field causes the method to return more
results from the previous method call. NOTE: this field is not
yet implemented
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckIpsRequest)
))
_sym_db.RegisterMessage(ListUptimeCheckIpsRequest)
ListUptimeCheckIpsResponse = _reflection.GeneratedProtocolMessageType('ListUptimeCheckIpsResponse', (_message.Message,), dict(
DESCRIPTOR = _LISTUPTIMECHECKIPSRESPONSE,
__module__ = 'google.cloud.monitoring_v3.proto.uptime_service_pb2'
,
__doc__ = """The protocol for the ``ListUptimeCheckIps`` response.
Attributes:
uptime_check_ips:
The returned list of IP addresses (including region and
location) that the checkers run from.
next_page_token:
This field represents the pagination token to retrieve the
next page of results. If the value is empty, it means no
further results for the request. To retrieve the next page of
results, the value of the next\_page\_token is passed to the
subsequent List method call (in the request message's
page\_token field). NOTE: this field is not yet implemented
""",
# @@protoc_insertion_point(class_scope:google.monitoring.v3.ListUptimeCheckIpsResponse)
))
_sym_db.RegisterMessage(ListUptimeCheckIpsResponse)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030com.google.monitoring.v3B\022UptimeServiceProtoP\001Z>google.golang.org/genproto/googleapis/monitoring/v3;monitoring\252\002\032Google.Cloud.Monitoring.V3\312\002\032Google\\Cloud\\Monitoring\\V3'))
_UPTIMECHECKSERVICE = _descriptor.ServiceDescriptor(
name='UptimeCheckService',
full_name='google.monitoring.v3.UptimeCheckService',
file=DESCRIPTOR,
index=0,
options=None,
serialized_start=1012,
serialized_end=2107,
methods=[
_descriptor.MethodDescriptor(
name='ListUptimeCheckConfigs',
full_name='google.monitoring.v3.UptimeCheckService.ListUptimeCheckConfigs',
index=0,
containing_service=None,
input_type=_LISTUPTIMECHECKCONFIGSREQUEST,
output_type=_LISTUPTIMECHECKCONFIGSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,\022*/v3/{parent=projects/*}/uptimeCheckConfigs')),
),
_descriptor.MethodDescriptor(
name='GetUptimeCheckConfig',
full_name='google.monitoring.v3.UptimeCheckService.GetUptimeCheckConfig',
index=1,
containing_service=None,
input_type=_GETUPTIMECHECKCONFIGREQUEST,
output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,\022*/v3/{name=projects/*/uptimeCheckConfigs/*}')),
),
_descriptor.MethodDescriptor(
name='CreateUptimeCheckConfig',
full_name='google.monitoring.v3.UptimeCheckService.CreateUptimeCheckConfig',
index=2,
containing_service=None,
input_type=_CREATEUPTIMECHECKCONFIGREQUEST,
output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002A\"*/v3/{parent=projects/*}/uptimeCheckConfigs:\023uptime_check_config')),
),
_descriptor.MethodDescriptor(
name='UpdateUptimeCheckConfig',
full_name='google.monitoring.v3.UptimeCheckService.UpdateUptimeCheckConfig',
index=3,
containing_service=None,
input_type=_UPDATEUPTIMECHECKCONFIGREQUEST,
output_type=google_dot_cloud_dot_monitoring__v3_dot_proto_dot_uptime__pb2._UPTIMECHECKCONFIG,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002U2>/v3/{uptime_check_config.name=projects/*/uptimeCheckConfigs/*}:\023uptime_check_config')),
),
_descriptor.MethodDescriptor(
name='DeleteUptimeCheckConfig',
full_name='google.monitoring.v3.UptimeCheckService.DeleteUptimeCheckConfig',
index=4,
containing_service=None,
input_type=_DELETEUPTIMECHECKCONFIGREQUEST,
output_type=google_dot_protobuf_dot_empty__pb2._EMPTY,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002,**/v3/{name=projects/*/uptimeCheckConfigs/*}')),
),
_descriptor.MethodDescriptor(
name='ListUptimeCheckIps',
full_name='google.monitoring.v3.UptimeCheckService.ListUptimeCheckIps',
index=5,
containing_service=None,
input_type=_LISTUPTIMECHECKIPSREQUEST,
output_type=_LISTUPTIMECHECKIPSRESPONSE,
options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\024\022\022/v3/uptimeCheckIps')),
),
])
_sym_db.RegisterServiceDescriptor(_UPTIMECHECKSERVICE)
DESCRIPTOR.services_by_name['UptimeCheckService'] = _UPTIMECHECKSERVICE
# @@protoc_insertion_point(module_scope)
|
py
|
1a569cc710f2e0b0dbcb6d2d85d711fc4f6bf988
|
import sys
import uuid
from dataclasses import dataclass
from dataclasses import field
from dataclasses import fields
from dataclasses import make_dataclass
from typing import Dict
from typing import get_type_hints
from typing import Iterator
from typing import List
from typing import Union
from unittest import mock
from unittest import TestCase
from xml.etree.ElementTree import QName
from tests.fixtures.artists import Artist
from tests.fixtures.books import BookForm
from tests.fixtures.models import ChoiceType
from tests.fixtures.models import Parent
from tests.fixtures.models import TypeA
from tests.fixtures.models import TypeB
from tests.fixtures.models import UnionType
from tests.fixtures.series import Country
from tests.fixtures.submodels import ChoiceTypeChild
from xsdata.exceptions import XmlContextError
from xsdata.formats.dataclass.compat import class_types
from xsdata.formats.dataclass.models.builders import XmlMetaBuilder
from xsdata.formats.dataclass.models.builders import XmlVarBuilder
from xsdata.formats.dataclass.models.elements import XmlType
from xsdata.models.datatype import XmlDate
from xsdata.utils import text
from xsdata.utils.constants import return_input
from xsdata.utils.constants import return_true
from xsdata.utils.namespaces import build_qname
from xsdata.utils.testing import FactoryTestCase
from xsdata.utils.testing import XmlMetaFactory
from xsdata.utils.testing import XmlVarFactory
class XmlMetaBuilderTests(FactoryTestCase):
def setUp(self):
super().setUp()
self.builder = XmlMetaBuilder(
class_type=class_types.get_type("dataclasses"),
element_name_generator=return_input,
attribute_name_generator=return_input,
)
@mock.patch.object(XmlMetaBuilder, "build_vars")
def test_build(self, mock_build_vars):
var = XmlVarFactory.create(
xml_type=XmlType.ELEMENT, name="foo", qname="{foo}bar", types=(int,)
)
mock_build_vars.return_value = [var]
result = self.builder.build(Artist, None)
expected = XmlMetaFactory.create(
clazz=Artist,
qname="{http://musicbrainz.org/ns/mmd-2.0#}artist",
elements={var.qname: [var]},
)
self.assertEqual(expected, result)
mock_build_vars.assert_called_once_with(
Artist, "http://musicbrainz.org/ns/mmd-2.0#", return_input, return_input
)
@mock.patch.object(XmlMetaBuilder, "build_vars", return_value=[])
def test_build_with_parent_namespace(self, mock_build_vars):
result = self.builder.build(Country, "http://xsdata")
self.assertEqual(build_qname("http://xsdata", "country"), result.qname)
mock_build_vars.assert_called_once_with(
Country, "http://xsdata", return_input, return_input
)
@mock.patch.object(XmlMetaBuilder, "build_vars", return_value=[])
def test_build_with_no_meta_name_and_name_generator(self, *args):
self.builder.element_name_generator = text.snake_case
result = self.builder.build(BookForm, None)
self.assertEqual("book_form", result.qname)
def test_build_block_meta_inheritance(self):
@dataclass
class Bar:
class Meta:
name = "bar"
@dataclass
class Foo(Bar):
pass
@dataclass
class Thug(Bar):
class Meta:
name = "thug"
result = self.builder.build(Foo, None)
self.assertEqual("Foo", result.qname)
result = self.builder.build(Thug, None)
self.assertEqual("thug", result.qname)
def test_build_with_no_dataclass_raises_exception(self, *args):
with self.assertRaises(XmlContextError) as cm:
self.builder.build(int, None)
self.assertEqual(f"Type '{int}' is not a dataclass.", str(cm.exception))
def test_build_locates_globalns_per_field(self):
actual = self.builder.build(ChoiceTypeChild, None)
self.assertEqual(1, len(actual.choices))
self.assertEqual(9, len(actual.choices[0].elements))
self.assertIsNone(self.builder.find_globalns(object, "foo"))
def test_build_inner_type_has_no_target_qname(self):
actual = self.builder.build(Parent.Inner, None)
self.assertIsNone(actual.target_qname)
def test_target_namespace(self):
class Meta:
namespace = "bar"
target_namespace = "foo"
self.assertEqual("foo", self.builder.target_namespace(None, Meta))
del Meta.target_namespace
self.assertEqual("bar", self.builder.target_namespace(None, Meta))
class Module:
__NAMESPACE__ = "gl"
self.assertEqual("gl", self.builder.target_namespace(Module, Meta))
def test_build_vars(self):
result = self.builder.build_vars(BookForm, None, text.pascal_case, str.upper)
self.assertIsInstance(result, Iterator)
expected = [
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=1,
name="author",
qname="Author",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=2,
name="title",
qname="Title",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=3,
name="genre",
qname="Genre",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=4,
name="price",
qname="Price",
types=(float,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=5,
name="pub_date",
qname="PubDate",
types=(XmlDate,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ELEMENT,
index=6,
name="review",
qname="Review",
types=(str,),
required=True,
),
XmlVarFactory.create(
xml_type=XmlType.ATTRIBUTE, index=7, name="id", qname="ID", types=(str,)
),
XmlVarFactory.create(
xml_type=XmlType.ATTRIBUTE,
index=8,
name="lang",
qname="LANG",
types=(str,),
init=False,
default="en",
),
]
result = list(result)
self.assertEqual(expected, result)
for var in result:
self.assertIsNone(var.clazz)
def test_build_vars_with_ignore_types(self):
result = self.builder.build_vars(TypeB, None, return_input, return_input)
self.assertIsInstance(result, Iterator)
actual = list(result)
self.assertEqual(2, len(actual))
def test_default_xml_type(self):
cls = make_dataclass("a", [("x", int)])
self.assertEqual(XmlType.TEXT, self.builder.default_xml_type(cls))
cls = make_dataclass("b", [("x", int), ("y", int)])
self.assertEqual(XmlType.ELEMENT, self.builder.default_xml_type(cls))
cls = make_dataclass(
"c", [("x", int), ("y", int, field(metadata=dict(type="Text")))]
)
self.assertEqual(XmlType.ELEMENT, self.builder.default_xml_type(cls))
cls = make_dataclass(
"d", [("x", int), ("y", int, field(metadata=dict(type="Element")))]
)
self.assertEqual(XmlType.TEXT, self.builder.default_xml_type(cls))
with self.assertRaises(XmlContextError) as cm:
cls = make_dataclass(
"e",
[
("x", int, field(metadata=dict(type="Text"))),
("y", int, field(metadata=dict(type="Text"))),
],
)
self.builder.default_xml_type(cls)
self.assertEqual(
"Dataclass `e` includes more than one text node!", str(cm.exception)
)
class XmlVarBuilderTests(TestCase):
def setUp(self) -> None:
self.builder = XmlVarBuilder(
class_type=class_types.get_type("dataclasses"),
parent_ns=None,
default_xml_type=XmlType.ELEMENT,
element_name_generator=return_input,
attribute_name_generator=return_input,
)
super().setUp()
self.maxDiff = None
def test_build_with_choice_field(self):
globalns = sys.modules[ChoiceType.__module__].__dict__
type_hints = get_type_hints(ChoiceType)
class_field = fields(ChoiceType)[0]
self.builder.parent_ns = "bar"
self.maxDiff = None
actual = self.builder.build(
66,
"choice",
type_hints["choice"],
class_field.metadata,
True,
list,
globalns,
)
expected = XmlVarFactory.create(
index=67,
name="choice",
types=(object,),
factory=list,
any_type=True,
default=list,
xml_type=XmlType.ELEMENTS,
elements={
"{bar}a": XmlVarFactory.create(
index=1,
name="choice",
qname="{bar}a",
types=(TypeA,),
clazz=TypeA,
factory=list,
namespaces=("bar",),
),
"{bar}b": XmlVarFactory.create(
index=2,
name="choice",
qname="{bar}b",
types=(TypeB,),
clazz=TypeB,
factory=list,
namespaces=("bar",),
),
"{bar}int": XmlVarFactory.create(
index=3,
name="choice",
qname="{bar}int",
types=(int,),
factory=list,
namespaces=("bar",),
),
"{bar}int2": XmlVarFactory.create(
index=4,
name="choice",
qname="{bar}int2",
types=(int,),
derived=True,
nillable=True,
factory=list,
namespaces=("bar",),
),
"{bar}float": XmlVarFactory.create(
index=5,
name="choice",
qname="{bar}float",
types=(float,),
factory=list,
namespaces=("bar",),
),
"{bar}qname": XmlVarFactory.create(
index=6,
name="choice",
qname="{bar}qname",
types=(QName,),
factory=list,
namespaces=("bar",),
),
"{bar}tokens": XmlVarFactory.create(
index=7,
name="choice",
qname="{bar}tokens",
types=(int,),
tokens_factory=list,
derived=True,
factory=list,
default=return_true,
namespaces=("bar",),
),
"{foo}union": XmlVarFactory.create(
index=8,
name="choice",
qname="{foo}union",
types=(UnionType,),
clazz=UnionType,
factory=list,
namespaces=("foo",),
),
"{bar}p": XmlVarFactory.create(
index=9,
name="choice",
qname="{bar}p",
types=(float,),
derived=True,
factory=list,
default=1.1,
namespaces=("bar",),
),
},
wildcards=[
XmlVarFactory.create(
index=10,
name="choice",
xml_type=XmlType.WILDCARD,
qname="{http://www.w3.org/1999/xhtml}any",
types=(object,),
factory=list,
default=None,
namespaces=("http://www.w3.org/1999/xhtml",),
),
],
)
self.assertEqual(expected, actual)
def test_build_validates_result(self):
with self.assertRaises(XmlContextError) as cm:
self.builder.build(
1, "foo", List[int], {"type": "Attributes"}, True, None, None
)
self.assertEqual(
"Xml type 'Attributes' does not support typing: typing.List[int]",
str(cm.exception),
)
def test_resolve_namespaces(self):
func = self.builder.resolve_namespaces
self.builder.parent_ns = "bar"
self.assertEqual(("foo",), func(XmlType.ELEMENT, "foo"))
self.assertEqual((), func(XmlType.ELEMENT, ""))
self.assertEqual(("bar",), func(XmlType.ELEMENT, None))
self.assertEqual((), func(XmlType.ATTRIBUTE, None))
self.assertEqual(("bar",), func(XmlType.WILDCARD, None))
self.assertEqual(("##any",), func(XmlType.WILDCARD, "##any"))
self.builder.parent_ns = ""
self.assertEqual(("##any",), func(XmlType.WILDCARD, "##targetNamespace"))
self.builder.parent_ns = None
self.assertEqual(("##any",), func(XmlType.WILDCARD, "##targetNamespace"))
self.builder.parent_ns = "p"
self.assertEqual(("p",), func(XmlType.WILDCARD, "##targetNamespace"))
self.assertEqual(("",), func(XmlType.WILDCARD, "##local"))
self.assertEqual(("!p",), func(XmlType.WILDCARD, "##other"))
self.assertEqual(
("", "!p"), tuple(sorted(func(XmlType.WILDCARD, "##other ##local")))
)
self.assertEqual(
("foo", "p"),
tuple(sorted(func(XmlType.WILDCARD, "##targetNamespace foo"))),
)
def test_analyze_types(self):
actual = self.builder.analyze_types(List[List[Union[str, int]]], None)
self.assertEqual((list, list, (int, str)), actual)
actual = self.builder.analyze_types(Union[str, int], None)
self.assertEqual((None, None, (int, str)), actual)
actual = self.builder.analyze_types(Dict[str, int], None)
self.assertEqual((dict, None, (int, str)), actual)
with self.assertRaises(XmlContextError) as cm:
self.builder.analyze_types(List[List[List[int]]], None)
self.assertEqual(
"Unsupported typing: typing.List[typing.List[typing.List[int]]]",
str(cm.exception),
)
def test_is_valid(self):
# Attributes need origin dict
self.assertFalse(
self.builder.is_valid(XmlType.ATTRIBUTES, None, None, (), False, True)
)
# Attributes don't support any origin
self.assertFalse(
self.builder.is_valid(XmlType.ATTRIBUTES, dict, list, (), False, True)
)
# Attributes don't support xs:NMTOKENS
self.assertFalse(
self.builder.is_valid(XmlType.ATTRIBUTES, dict, None, (), True, True)
)
self.assertTrue(
self.builder.is_valid(
XmlType.ATTRIBUTES, dict, None, (str, str), False, True
)
)
# xs:NMTOKENS need origin list
self.assertFalse(
self.builder.is_valid(XmlType.TEXT, dict, None, (), True, True)
)
# xs:NMTOKENS need origin list
self.assertFalse(self.builder.is_valid(XmlType.TEXT, set, None, (), True, True))
# Any type object is a superset, it's only supported alone
self.assertFalse(
self.builder.is_valid(
XmlType.ELEMENT, None, None, (object, int), False, True
)
)
# Type is not registered in converter.
self.assertFalse(
self.builder.is_valid(
XmlType.TEXT, None, None, (int, uuid.UUID), False, True
)
)
# init false vars are ignored!
self.assertTrue(
self.builder.is_valid(
XmlType.TEXT, None, None, (int, uuid.UUID), False, False
)
)
|
py
|
1a569cfeec8479c60cdd6296153784caf41c56ac
|
import flask
import git
import local_system
import update
api_blueprint = flask.Blueprint('api', __name__, url_prefix='/api')
@api_blueprint.route('/shutdown', methods=['POST'])
def shutdown_post():
try:
local_system.shutdown()
return _json_success()
except local_system.Error as e:
return _json_error(str(e)), 200
@api_blueprint.route('/restart', methods=['POST'])
def restart_post():
try:
local_system.restart()
return _json_success()
except local_system.Error as e:
return _json_error(str(e)), 200
@api_blueprint.route('/update', methods=['POST'])
def update_post():
"""Updates TinyPilot to the latest version available.
This is a slow endpoint, as it is expected to take 2~4 minutes to
complete.
Returns:
A JSON string with two keys: success and error.
success: true if successful.
error: null if successful, str otherwise.
Example of success:
{
'success': true,
'error': null,
}
Example of error:
{
'success': false,
'error': 'sudo: /opt/tinypilot-privileged/update: command not found'
}
"""
try:
update.update()
except update.Error as e:
return _json_error(str(e)), 200
return _json_success()
@api_blueprint.route('/version', methods=['GET'])
def version_get():
"""Retrieves the current installed version of TinyPilot.
Returns:
A JSON string with three keys when successful and two otherwise:
success, error and version (if successful).
success: true if successful.
error: null if successful, str otherwise.
version: str.
Example of success:
{
'success': true,
'error': null,
'version': 'bf07bfe72941457cf068ca0a44c6b0d62dd9ef05',
}
Example of error:
{
'success': false,
'error': 'git rev-parse HEAD failed.',
}
"""
try:
return _json_success({"version": git.local_head_commit_id()})
except git.Error as e:
return _json_error(str(e)), 200
@api_blueprint.route('/latestRelease', methods=['GET'])
def latest_release_get():
"""Retrieves the latest version of TinyPilot.
Returns:
A JSON string with three keys when successful and two otherwise:
success, error and version (if successful).
success: true if successful.
error: null if successful, str otherwise.
version: str.
Example of success:
{
'success': true,
'error': null,
'version': 'bf07bfe72941457cf068ca0a44c6b0d62dd9ef05',
}
Example of error:
{
'success': false,
'error': 'git rev-parse origin/master failed.',
}
"""
try:
return _json_success({"version": git.remote_head_commit_id()})
except git.Error as e:
return _json_error(str(e)), 200
def _json_success(fields={}):
response = {
'success': True,
'error': None,
}
for k, v in fields.items():
response[k] = v
return flask.jsonify(response)
def _json_error(message):
return flask.jsonify({
'success': False,
'error': message,
})
|
py
|
1a569d5d6dacd85a04cbbfd3b6aab798b02b1a74
|
import sys
# XXX: Handle this more automatically. See comments in the testrunner.
from gevent.testing.resources import exit_without_resource
exit_without_resource('subprocess')
from gevent.subprocess import Popen
from gevent.testing.util import alarm
alarm(3)
popen = Popen([sys.executable, '-c', 'pass'])
while popen.poll() is None:
pass
|
py
|
1a569db99e9f836a1ce7662efc1ecb8301d3dce7
|
# -*- coding: utf-8 -*-
import os
import types
import logging
from socket import AF_INET
from socket import AF_INET6
from socket import AF_UNSPEC
from itertools import chain
from functools import partial
from pr2modules import config
from pr2modules.config import AF_BRIDGE
from pr2modules.netlink import NLMSG_ERROR
from pr2modules.netlink import NLM_F_ATOMIC
from pr2modules.netlink import NLM_F_ROOT
from pr2modules.netlink import NLM_F_REPLACE
from pr2modules.netlink import NLM_F_REQUEST
from pr2modules.netlink import NLM_F_ACK
from pr2modules.netlink import NLM_F_DUMP
from pr2modules.netlink import NLM_F_CREATE
from pr2modules.netlink import NLM_F_EXCL
from pr2modules.netlink import NLM_F_APPEND
from pr2modules.netlink.rtnl import RTM_NEWADDR
from pr2modules.netlink.rtnl import RTM_GETADDR
from pr2modules.netlink.rtnl import RTM_DELADDR
from pr2modules.netlink.rtnl import RTM_NEWLINK
from pr2modules.netlink.rtnl import RTM_NEWLINKPROP
from pr2modules.netlink.rtnl import RTM_DELLINKPROP
from pr2modules.netlink.rtnl import RTM_GETLINK
from pr2modules.netlink.rtnl import RTM_DELLINK
from pr2modules.netlink.rtnl import RTM_NEWQDISC
from pr2modules.netlink.rtnl import RTM_GETQDISC
from pr2modules.netlink.rtnl import RTM_DELQDISC
from pr2modules.netlink.rtnl import RTM_NEWTFILTER
from pr2modules.netlink.rtnl import RTM_GETTFILTER
from pr2modules.netlink.rtnl import RTM_DELTFILTER
from pr2modules.netlink.rtnl import RTM_NEWTCLASS
from pr2modules.netlink.rtnl import RTM_GETTCLASS
from pr2modules.netlink.rtnl import RTM_DELTCLASS
from pr2modules.netlink.rtnl import RTM_NEWRULE
from pr2modules.netlink.rtnl import RTM_GETRULE
from pr2modules.netlink.rtnl import RTM_DELRULE
from pr2modules.netlink.rtnl import RTM_NEWROUTE
from pr2modules.netlink.rtnl import RTM_GETROUTE
from pr2modules.netlink.rtnl import RTM_DELROUTE
from pr2modules.netlink.rtnl import RTM_NEWNEIGH
from pr2modules.netlink.rtnl import RTM_GETNEIGH
from pr2modules.netlink.rtnl import RTM_DELNEIGH
from pr2modules.netlink.rtnl import RTM_SETLINK
from pr2modules.netlink.rtnl import RTM_GETNEIGHTBL
from pr2modules.netlink.rtnl import RTM_GETNSID
from pr2modules.netlink.rtnl import RTM_NEWNETNS
from pr2modules.netlink.rtnl import RTM_GETSTATS
from pr2modules.netlink.rtnl import TC_H_ROOT
from pr2modules.netlink.rtnl import rt_type
from pr2modules.netlink.rtnl import rt_scope
from pr2modules.netlink.rtnl import rt_proto
from pr2modules.netlink.rtnl.req import IPLinkRequest
from pr2modules.netlink.rtnl.req import IPBridgeRequest
from pr2modules.netlink.rtnl.req import IPBrPortRequest
from pr2modules.netlink.rtnl.req import IPRouteRequest
from pr2modules.netlink.rtnl.req import IPRuleRequest
from pr2modules.netlink.rtnl.req import IPAddrRequest
from pr2modules.netlink.rtnl.tcmsg import plugins as tc_plugins
from pr2modules.netlink.rtnl.tcmsg import tcmsg
from pr2modules.netlink.rtnl.rtmsg import rtmsg
from pr2modules.netlink.rtnl import ndmsg
from pr2modules.netlink.rtnl.ndtmsg import ndtmsg
from pr2modules.netlink.rtnl.fibmsg import fibmsg
from pr2modules.netlink.rtnl.ifinfmsg import ifinfmsg
from pr2modules.netlink.rtnl.ifinfmsg import IFF_NOARP
from pr2modules.netlink.rtnl.ifaddrmsg import ifaddrmsg
from pr2modules.netlink.rtnl.ifstatsmsg import ifstatsmsg
from pr2modules.netlink.rtnl.iprsocket import IPRSocket
from pr2modules.netlink.rtnl.iprsocket import IPBatchSocket
from pr2modules.netlink.rtnl.riprsocket import RawIPRSocket
from pr2modules.netlink.rtnl.nsidmsg import nsidmsg
from pr2modules.netlink.rtnl.nsinfmsg import nsinfmsg
from pr2modules.netlink.exceptions import SkipInode
from pr2modules.netlink.exceptions import NetlinkError
from pr2modules.common import AF_MPLS
from pr2modules.common import basestring
from pr2modules.common import getbroadcast
DEFAULT_TABLE = 254
log = logging.getLogger(__name__)
def transform_handle(handle):
if isinstance(handle, basestring):
(major, minor) = [int(x if x else '0', 16) for x in handle.split(':')]
handle = (major << 8 * 2) | minor
return handle
class RTNL_API(object):
'''
`RTNL_API` should not be instantiated by itself. It is intended
to be used as a mixin class. Following classes use `RTNL_API`:
* `IPRoute` -- RTNL API to the current network namespace
* `NetNS` -- RTNL API to another network namespace
* `IPBatch` -- RTNL compiler
* `ShellIPR` -- RTNL via standard I/O, runs IPRoute in a shell
It is an old-school API, that provides access to rtnetlink as is.
It helps you to retrieve and change almost all the data, available
through rtnetlink::
from pyroute2 import IPRoute
ipr = IPRoute()
# create an interface
ipr.link('add', ifname='brx', kind='bridge')
# lookup the index
dev = ipr.link_lookup(ifname='brx')[0]
# bring it down
ipr.link('set', index=dev, state='down')
# change the interface MAC address and rename it just for fun
ipr.link('set', index=dev,
address='00:11:22:33:44:55',
ifname='br-ctrl')
# add primary IP address
ipr.addr('add', index=dev,
address='10.0.0.1', mask=24,
broadcast='10.0.0.255')
# add secondary IP address
ipr.addr('add', index=dev,
address='10.0.0.2', mask=24,
broadcast='10.0.0.255')
# bring it up
ipr.link('set', index=dev, state='up')
'''
def __init__(self, *argv, **kwarg):
if 'netns_path' in kwarg:
self.netns_path = kwarg['netns_path']
else:
self.netns_path = config.netns_path
super(RTNL_API, self).__init__(*argv, **kwarg)
if not self.nlm_generator:
def _match(*argv, **kwarg):
return tuple(self._genmatch(*argv, **kwarg))
self._genmatch = self._match
self._match = _match
def _match(self, match, msgs):
# filtered results, the generator version
for msg in msgs:
if hasattr(match, '__call__'):
if match(msg):
yield msg
elif isinstance(match, dict):
matches = []
for key in match:
KEY = msg.name2nla(key)
if isinstance(match[key], types.FunctionType):
if msg.get(key) is not None:
matches.append(match[key](msg.get(key)))
elif msg.get_attr(KEY) is not None:
matches.append(match[key](msg.get_attr(KEY)))
else:
matches.append(False)
else:
matches.append(msg.get(key) == match[key] or
msg.get_attr(KEY) ==
match[key])
if all(matches):
yield msg
# 8<---------------------------------------------------------------
#
def dump(self):
'''
Iterate all the objects -- links, routes, addresses etc.
'''
##
# Well, it's the Linux API, why OpenBSD / FreeBSD here?
#
# 'Cause when you run RemoteIPRoute, it uses this class,
# and the code may be run on BSD systems as well, though
# BSD systems have only subset of the API
#
if self.uname[0] == 'OpenBSD':
methods = (self.get_links,
self.get_addr,
self.get_neighbours,
self.get_routes)
else:
methods = (self.get_links,
self.get_addr,
self.get_neighbours,
self.get_routes,
self.get_vlans,
partial(self.fdb, 'dump'),
partial(self.get_rules, family=AF_INET),
partial(self.get_rules, family=AF_INET6))
for method in methods:
for msg in method():
yield msg
# 8<---------------------------------------------------------------
#
# Listing methods
#
def get_qdiscs(self, index=None):
'''
Get all queue disciplines for all interfaces or for specified
one.
'''
msg = tcmsg()
msg['family'] = AF_UNSPEC
ret = self.nlm_request(msg, RTM_GETQDISC)
if index is None:
return ret
else:
return [x for x in ret if x['index'] == index]
def get_filters(self, index=0, handle=0, parent=0):
'''
Get filters for specified interface, handle and parent.
'''
msg = tcmsg()
msg['family'] = AF_UNSPEC
msg['index'] = index
msg['handle'] = handle
msg['parent'] = parent
return self.nlm_request(msg, RTM_GETTFILTER)
def get_classes(self, index=0):
'''
Get classes for specified interface.
'''
msg = tcmsg()
msg['family'] = AF_UNSPEC
msg['index'] = index
return self.nlm_request(msg, RTM_GETTCLASS)
def get_vlans(self, **kwarg):
'''
Dump available vlan info on bridge ports
'''
# IFLA_EXT_MASK, extended info mask
#
# include/uapi/linux/rtnetlink.h
# 1 << 0 => RTEXT_FILTER_VF
# 1 << 1 => RTEXT_FILTER_BRVLAN
# 1 << 2 => RTEXT_FILTER_BRVLAN_COMPRESSED
# 1 << 3 => RTEXT_FILTER_SKIP_STATS
#
# maybe place it as mapping into ifinfomsg.py?
#
match = kwarg.get('match', None) or kwarg or None
return self.link('dump',
family=AF_BRIDGE,
ext_mask=2,
match=match)
def get_links(self, *argv, **kwarg):
'''
Get network interfaces.
By default returns all interfaces. Arguments vector
can contain interface indices or a special keyword
'all'::
ip.get_links()
ip.get_links('all')
ip.get_links(1, 2, 3)
interfaces = [1, 2, 3]
ip.get_links(*interfaces)
'''
result = []
links = argv or [0]
if links[0] == 'all': # compat syntax
links = [0]
if links[0] == 0:
cmd = 'dump'
else:
cmd = 'get'
for index in links:
kwarg['index'] = index
result.extend(self.link(cmd, **kwarg))
return result
def get_neighbours(self, family=AF_UNSPEC, match=None, **kwarg):
'''
Dump ARP cache records.
The `family` keyword sets the family for the request:
e.g. `AF_INET` or `AF_INET6` for arp cache, `AF_BRIDGE`
for fdb.
If other keyword arguments not empty, they are used as
filter. Also, one can explicitly set filter as a function
with the `match` parameter.
Examples::
# get neighbours on the 3rd link:
ip.get_neighbours(ifindex=3)
# get a particular record by dst:
ip.get_neighbours(dst='172.16.0.1')
# get fdb records:
ip.get_neighbours(AF_BRIDGE)
# and filter them by a function:
ip.get_neighbours(AF_BRIDGE, match=lambda x: x['state'] == 2)
'''
return self.neigh('dump', family=family, match=match or kwarg)
def get_ntables(self, family=AF_UNSPEC):
'''
Get neighbour tables
'''
msg = ndtmsg()
msg['family'] = family
return self.nlm_request(msg, RTM_GETNEIGHTBL)
def get_addr(self, family=AF_UNSPEC, match=None, **kwarg):
'''
Dump addresses.
If family is not specified, both AF_INET and AF_INET6 addresses
will be dumped::
# get all addresses
ip.get_addr()
It is possible to apply filters on the results::
# get addresses for the 2nd interface
ip.get_addr(index=2)
# get addresses with IFA_LABEL == 'eth0'
ip.get_addr(label='eth0')
# get all the subnet addresses on the interface, identified
# by broadcast address (should be explicitly specified upon
# creation)
ip.get_addr(index=2, broadcast='192.168.1.255')
A custom predicate can be used as a filter::
ip.get_addr(match=lambda x: x['index'] == 1)
'''
return self.addr('dump', family=family, match=match or kwarg)
def get_rules(self, family=AF_UNSPEC, match=None, **kwarg):
'''
Get all rules. By default return all rules. To explicitly
request the IPv4 rules use `family=AF_INET`.
Example::
ip.get_rules() # get all the rules for all families
ip.get_rules(family=AF_INET6) # get only IPv6 rules
'''
return self.rule((RTM_GETRULE,
NLM_F_REQUEST | NLM_F_ROOT | NLM_F_ATOMIC),
family=family,
match=match or kwarg)
def get_routes(self, family=255, match=None, **kwarg):
'''
Get all routes. You can specify the table. There
are 255 routing classes (tables), and the kernel
returns all the routes on each request. So the
routine filters routes from full output.
Example::
ip.get_routes() # get all the routes for all families
ip.get_routes(family=AF_INET6) # get only IPv6 routes
ip.get_routes(table=254) # get routes from 254 table
The default family=255 is a hack. Despite the specs,
the kernel returns only IPv4 routes for AF_UNSPEC family.
But it returns all the routes for all the families if one
uses an invalid value here. Hack but true. And let's hope
the kernel team will not fix this bug.
'''
# get a particular route?
if isinstance(kwarg.get('dst'), basestring):
return self.route('get', dst=kwarg['dst'])
else:
return self.route('dump',
family=family,
match=match or kwarg)
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# List NetNS info
#
def _dump_one_ns(self, path, registry):
item = nsinfmsg()
item['netnsid'] = 0xffffffff # default netnsid "unknown"
nsfd = 0
info = nsidmsg()
msg = nsidmsg()
try:
nsfd = os.open(path, os.O_RDONLY)
item['inode'] = os.fstat(nsfd).st_ino
#
# if the inode is registered, skip it
#
if item['inode'] in registry:
raise SkipInode()
registry.add(item['inode'])
#
# request NETNSA_NSID
#
# may not work on older kernels ( <4.20 ?)
#
msg['attrs'] = [('NETNSA_FD', nsfd)]
try:
for info in self.nlm_request(msg,
RTM_GETNSID,
NLM_F_REQUEST):
# response to nlm_request() is a list or a generator,
# that's why loop
item['netnsid'] = info.get_attr('NETNSA_NSID')
break
except Exception:
pass
item['attrs'] = [('NSINFO_PATH', path)]
except OSError:
raise SkipInode()
finally:
if nsfd > 0:
os.close(nsfd)
item['header']['type'] = RTM_NEWNETNS
item['header']['target'] = self.target
item['event'] = 'RTM_NEWNETNS'
return item
def _dump_dir(self, path, registry):
for name in os.listdir(path):
# strictly speaking, there is no need to use os.sep,
# since the code is not portable outside of Linux
nspath = '%s%s%s' % (path, os.sep, name)
try:
yield self._dump_one_ns(nspath, registry)
except SkipInode:
pass
def _dump_proc(self, registry):
for name in os.listdir('/proc'):
try:
int(name)
except ValueError:
continue
try:
yield self._dump_one_ns('/proc/%s/ns/net' % name, registry)
except SkipInode:
pass
def get_netns_info(self, list_proc=False):
'''
A prototype method to list available netns and associated
interfaces. A bit weird to have it here and not under
`pr2modules.netns`, but it uses RTNL to get all the info.
'''
#
# register all the ns inodes, not to repeat items in the output
#
registry = set()
#
# fetch veth peers
#
peers = {}
for peer in self.get_links():
netnsid = peer.get_attr('IFLA_LINK_NETNSID')
if netnsid is not None:
if netnsid not in peers:
peers[netnsid] = []
peers[netnsid].append(peer.get_attr('IFLA_IFNAME'))
#
# chain iterators:
#
# * one iterator for every item in self.path
# * one iterator for /proc/<pid>/ns/net
#
views = []
for path in self.netns_path:
views.append(self._dump_dir(path, registry))
if list_proc:
views.append(self._dump_proc(registry))
#
# iterate all the items
#
for view in views:
try:
for item in view:
#
# remove uninitialized 'value' field
#
del item['value']
#
# fetch peers for that ns
#
for peer in peers.get(item['netnsid'], []):
item['attrs'].append(('NSINFO_PEER', peer))
yield item
except OSError:
pass
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# Shortcuts
#
def get_default_routes(self, family=AF_UNSPEC, table=DEFAULT_TABLE):
'''
Get default routes
'''
# according to iproute2/ip/iproute.c:print_route()
return [x for x in self.get_routes(family, table=table)
if (x.get_attr('RTA_DST', None) is None and
x['dst_len'] == 0)]
def link_lookup(self, **kwarg):
'''
Lookup interface index (indeces) by first level NLA
value.
Example::
ip.link_lookup(address="52:54:00:9d:4e:3d")
ip.link_lookup(ifname="lo")
ip.link_lookup(operstate="UP")
Please note, that link_lookup() returns list, not one
value.
'''
if set(kwarg) in ({'index', }, {'ifname', }, {'index', 'ifname'}):
# shortcut for index and ifname
try:
for link in self.link('get', **kwarg):
return [link['index']]
except NetlinkError:
return []
else:
# otherwise fallback to the userspace filter
return [link['index'] for link in self.get_links(match=kwarg)]
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# Shortcuts to flush RTNL objects
#
def flush_routes(self, *argv, **kwarg):
'''
Flush routes -- purge route records from a table.
Arguments are the same as for `get_routes()`
routine. Actually, this routine implements a pipe from
`get_routes()` to `nlm_request()`.
'''
ret = []
for route in self.get_routes(*argv, **kwarg):
self.put(route, msg_type=RTM_DELROUTE, msg_flags=NLM_F_REQUEST)
ret.append(route)
return ret
def flush_addr(self, *argv, **kwarg):
'''
Flush IP addresses.
Examples::
# flush all addresses on the interface with index 2:
ipr.flush_addr(index=2)
# flush all addresses with IFA_LABEL='eth0':
ipr.flush_addr(label='eth0')
'''
flags = NLM_F_CREATE | NLM_F_EXCL | NLM_F_REQUEST
ret = []
for addr in self.get_addr(*argv, **kwarg):
self.put(addr, msg_type=RTM_DELADDR, msg_flags=flags)
ret.append(addr)
return ret
def flush_rules(self, *argv, **kwarg):
'''
Flush rules. Please keep in mind, that by default the function
operates on **all** rules of **all** families. To work only on
IPv4 rules, one should explicitly specify `family=AF_INET`.
Examples::
# flush all IPv4 rule with priorities above 5 and below 32000
ipr.flush_rules(family=AF_INET, priority=lambda x: 5 < x < 32000)
# flush all IPv6 rules that point to table 250:
ipr.flush_rules(family=socket.AF_INET6, table=250)
'''
flags = NLM_F_CREATE | NLM_F_EXCL | NLM_F_REQUEST
ret = []
for rule in self.get_rules(*argv, **kwarg):
self.put(rule, msg_type=RTM_DELRULE, msg_flags=flags)
ret.append(rule)
return ret
# 8<---------------------------------------------------------------
# 8<---------------------------------------------------------------
#
# Extensions to low-level functions
#
def brport(self, command, **kwarg):
'''
Set bridge port parameters. Example::
idx = ip.link_lookup(ifname='eth0')
ip.brport("set", index=idx, unicast_flood=0, cost=200)
ip.brport("show", index=idx)
Possible keywords are NLA names for the `protinfo_bridge` class,
without the prefix and in lower letters.
'''
if (command in ('dump', 'show')) and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_req = NLM_F_REQUEST | NLM_F_ACK
commands = {'set': (RTM_SETLINK, flags_req),
'dump': (RTM_GETLINK, flags_dump),
'show': (RTM_GETLINK, flags_dump)}
(command, msg_flags) = commands.get(command, command)
msg = ifinfmsg()
if command == RTM_GETLINK:
msg['index'] = kwarg.get('index', 0)
else:
msg['index'] = kwarg.pop('index', 0)
msg['family'] = AF_BRIDGE
protinfo = IPBrPortRequest(kwarg)
msg['attrs'].append(('IFLA_PROTINFO', protinfo, 0x8000))
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=msg_flags)
if match is not None:
ret = self._match(match, ret)
if not (command == RTM_GETLINK and self.nlm_generator):
ret = tuple(ret)
return ret
def vlan_filter(self, command, **kwarg):
'''
Vlan filters is another approach to support vlans in Linux.
Before vlan filters were introduced, there was only one way
to bridge vlans: one had to create vlan interfaces and
then add them as ports::
+------+ +----------+
net --> | eth0 | <--> | eth0.500 | <---+
+------+ +----------+ |
v
+------+ +-----+
net --> | eth1 | | br0 |
+------+ +-----+
^
+------+ +----------+ |
net --> | eth2 | <--> | eth2.500 | <---+
+------+ +----------+
It means that one has to create as many bridges, as there were
vlans. Vlan filters allow to bridge together underlying interfaces
and create vlans already on the bridge::
# v500 label shows which interfaces have vlan filter
+------+ v500
net --> | eth0 | <-------+
+------+ |
v
+------+ +-----+ +---------+
net --> | eth1 | <--> | br0 |<-->| br0v500 |
+------+ +-----+ +---------+
^
+------+ v500 |
net --> | eth2 | <-------+
+------+
In this example vlan 500 will be allowed only on ports `eth0` and
`eth2`, though all three eth nics are bridged.
Some example code::
# create bridge
ip.link("add",
ifname="br0",
kind="bridge")
# attach a port
ip.link("set",
index=ip.link_lookup(ifname="eth0")[0],
master=ip.link_lookup(ifname="br0")[0])
# set vlan filter
ip.vlan_filter("add",
index=ip.link_lookup(ifname="eth0")[0],
vlan_info={"vid": 500})
# create vlan interface on the bridge
ip.link("add",
ifname="br0v500",
kind="vlan",
link=ip.link_lookup(ifname="br0")[0],
vlan_id=500)
# set all UP
ip.link("set",
index=ip.link_lookup(ifname="br0")[0],
state="up")
ip.link("set",
index=ip.link_lookup(ifname="br0v500")[0],
state="up")
ip.link("set",
index=ip.link_lookup(ifname="eth0")[0],
state="up")
# set IP address
ip.addr("add",
index=ip.link_lookup(ifname="br0v500")[0],
address="172.16.5.2",
mask=24)
Now all the traffic to the network 172.16.5.2/24 will go
to vlan 500 only via ports that have such vlan filter.
Required arguments for `vlan_filter()` -- `index` and `vlan_info`.
Vlan info struct::
{"vid": uint16,
"flags": uint16}
More details:
* kernel:Documentation/networking/switchdev.txt
* pr2modules.netlink.rtnl.ifinfmsg:... vlan_info
One can specify `flags` as int or as a list of flag names:
* `master` == 0x1
* `pvid` == 0x2
* `untagged` == 0x4
* `range_begin` == 0x8
* `range_end` == 0x10
* `brentry` == 0x20
E.g.::
{"vid": 20,
"flags": ["pvid", "untagged"]}
# is equal to
{"vid": 20,
"flags": 6}
Commands:
**add**
Add vlan filter to a bridge port. Example::
ip.vlan_filter("add", index=2, vlan_info={"vid": 200})
**del**
Remove vlan filter from a bridge port. Example::
ip.vlan_filter("del", index=2, vlan_info={"vid": 200})
'''
flags_req = NLM_F_REQUEST | NLM_F_ACK
commands = {'add': (RTM_SETLINK, flags_req),
'del': (RTM_DELLINK, flags_req)}
kwarg['family'] = AF_BRIDGE
kwarg['kwarg_filter'] = IPBridgeRequest
(command, flags) = commands.get(command, command)
return tuple(self.link((command, flags), **kwarg))
def fdb(self, command, **kwarg):
'''
Bridge forwarding database management.
More details:
* kernel:Documentation/networking/switchdev.txt
* pr2modules.netlink.rtnl.ndmsg
**add**
Add a new FDB record. Works in the same way as ARP cache
management, but some additional NLAs can be used::
# simple FDB record
#
ip.fdb('add',
ifindex=ip.link_lookup(ifname='br0')[0],
lladdr='00:11:22:33:44:55',
dst='10.0.0.1')
# specify vlan
# NB: vlan should exist on the device, use
# `vlan_filter()`
#
ip.fdb('add',
ifindex=ip.link_lookup(ifname='br0')[0],
lladdr='00:11:22:33:44:55',
dst='10.0.0.1',
vlan=200)
# specify vxlan id and port
# NB: works only for vxlan devices, use
# `link("add", kind="vxlan", ...)`
#
# if port is not specified, the default one is used
# by the kernel.
#
# if vni (vxlan id) is equal to the device vni,
# the kernel doesn't report it back
#
ip.fdb('add',
ifindex=ip.link_lookup(ifname='vx500')[0]
lladdr='00:11:22:33:44:55',
dst='10.0.0.1',
port=5678,
vni=600)
**append**
Append a new FDB record. The same syntax as for **add**.
**del**
Remove an existing FDB record. The same syntax as for **add**.
**dump**
Dump all the FDB records. If any `**kwarg` is provided,
results will be filtered::
# dump all the records
ip.fdb('dump')
# show only specific lladdr, dst, vlan etc.
ip.fdb('dump', lladdr='00:11:22:33:44:55')
ip.fdb('dump', dst='10.0.0.1')
ip.fdb('dump', vlan=200)
'''
kwarg['family'] = AF_BRIDGE
# nud -> state
if 'nud' in kwarg:
kwarg['state'] = kwarg.pop('nud')
if (command in ('add', 'del', 'append')) and \
not (kwarg.get('state', 0) & ndmsg.states['noarp']):
# state must contain noarp in add / del / append
kwarg['state'] = kwarg.pop('state', 0) | ndmsg.states['noarp']
# other assumptions
if not kwarg.get('state', 0) & (ndmsg.states['permanent'] |
ndmsg.states['reachable']):
# permanent (default) or reachable
kwarg['state'] |= ndmsg.states['permanent']
if not kwarg.get('flags', 0) & (ndmsg.flags['self'] |
ndmsg.flags['master']):
# self (default) or master
kwarg['flags'] = kwarg.get('flags', 0) | ndmsg.flags['self']
#
return self.neigh(command, **kwarg)
# 8<---------------------------------------------------------------
#
# General low-level configuration methods
#
def neigh(self, command, **kwarg):
'''
Neighbours operations, same as `ip neigh` or `bridge fdb`
**add**
Add a neighbour record, e.g.::
from pyroute2 import IPRoute
from pr2modules.netlink.rtnl import ndmsg
# add a permanent record on veth0
idx = ip.link_lookup(ifname='veth0')[0]
ip.neigh('add',
dst='172.16.45.1',
lladdr='00:11:22:33:44:55',
ifindex=idx,
state=ndmsg.states['permanent'])
**set**
Set an existing record or create a new one, if it doesn't exist.
The same as above, but the command is "set"::
ip.neigh('set',
dst='172.16.45.1',
lladdr='00:11:22:33:44:55',
ifindex=idx,
state=ndmsg.states['permanent'])
**change**
Change an existing record. If the record doesn't exist, fail.
**del**
Delete an existing record.
**dump**
Dump all the records in the NDB::
ip.neigh('dump')
**get**
Get specific record (dst and ifindex are mandatory). Available
only on recent kernel::
ip.neigh('get',
dst='172.16.45.1',
ifindex=idx)
'''
if (command == 'dump') and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_append = flags_base | NLM_F_CREATE | NLM_F_APPEND
flags_change = flags_base | NLM_F_REPLACE
flags_replace = flags_change | NLM_F_CREATE
commands = {'add': (RTM_NEWNEIGH, flags_make),
'set': (RTM_NEWNEIGH, flags_replace),
'replace': (RTM_NEWNEIGH, flags_replace),
'change': (RTM_NEWNEIGH, flags_change),
'del': (RTM_DELNEIGH, flags_make),
'remove': (RTM_DELNEIGH, flags_make),
'delete': (RTM_DELNEIGH, flags_make),
'dump': (RTM_GETNEIGH, flags_dump),
'get': (RTM_GETNEIGH, flags_base),
'append': (RTM_NEWNEIGH, flags_append)}
(command, flags) = commands.get(command, command)
if 'nud' in kwarg:
kwarg['state'] = kwarg.pop('nud')
msg = ndmsg.ndmsg()
for field in msg.fields:
msg[field[0]] = kwarg.pop(field[0], 0)
msg['family'] = msg['family'] or AF_INET
msg['attrs'] = []
# fix nud kwarg
if isinstance(msg['state'], basestring):
msg['state'] = ndmsg.states_a2n(msg['state'])
for key in kwarg:
nla = ndmsg.ndmsg.name2nla(key)
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags)
if match:
ret = self._match(match, ret)
if not (command == RTM_GETNEIGH and self.nlm_generator):
ret = tuple(ret)
return ret
def link(self, command, **kwarg):
'''
Link operations.
Keywords to set up ifinfmsg fields:
* index -- interface index
* family -- AF_BRIDGE for bridge operations, otherwise 0
* flags -- device flags
* change -- change mask
All other keywords will be translated to NLA names, e.g.
`mtu -> IFLA_MTU`, `af_spec -> IFLA_AF_SPEC` etc. You can
provide a complete NLA structure or let filters do it for
you. E.g., these pairs show equal statements::
# set device MTU
ip.link("set", index=x, mtu=1000)
ip.link("set", index=x, IFLA_MTU=1000)
# add vlan device
ip.link("add", ifname="test", kind="dummy")
ip.link("add", ifname="test",
IFLA_LINKINFO={'attrs': [['IFLA_INFO_KIND', 'dummy']]})
Filters are implemented in the `pr2modules.netlink.rtnl.req` module.
You can contribute your own if you miss shortcuts.
Commands:
**add**
To create an interface, one should specify the interface kind::
ip.link("add",
ifname="test",
kind="dummy")
The kind can be any of those supported by kernel. It can be
`dummy`, `bridge`, `bond` etc. On modern kernels one can specify
even interface index::
ip.link("add",
ifname="br-test",
kind="bridge",
index=2345)
Specific type notes:
► geneve
Create GENEVE tunnel::
ip.link("add",
ifname="genx",
kind="geneve",
geneve_id=42,
geneve_remote="172.16.0.101")
Support for GENEVE over IPv6 is also included; use `geneve_remote6`
to configure a remote IPv6 address.
► gre
Create GRE tunnel::
ip.link("add",
ifname="grex",
kind="gre",
gre_local="172.16.0.1",
gre_remote="172.16.0.101",
gre_ttl=16)
The keyed GRE requires explicit iflags/oflags specification::
ip.link("add",
ifname="grex",
kind="gre",
gre_local="172.16.0.1",
gre_remote="172.16.0.101",
gre_ttl=16,
gre_ikey=10,
gre_okey=10,
gre_iflags=32,
gre_oflags=32)
Support for GRE over IPv6 is also included; use `kind=ip6gre` and
`ip6gre_` as the prefix for its values.
► ipip
Create ipip tunnel::
ip.link("add",
ifname="tun1",
kind="ipip",
ipip_local="172.16.0.1",
ipip_remote="172.16.0.101",
ipip_ttl=16)
Support for sit and ip6tnl is also included; use `kind=sit` and `sit_`
as prefix for sit tunnels, and `kind=ip6tnl` and `ip6tnl_` prefix for
ip6tnl tunnels.
► macvlan
Macvlan interfaces act like VLANs within OS. The macvlan driver
provides an ability to add several MAC addresses on one interface,
where every MAC address is reflected with a virtual interface in
the system.
In some setups macvlan interfaces can replace bridge interfaces,
providing more simple and at the same time high-performance
solution::
ip.link("add",
ifname="mvlan0",
kind="macvlan",
link=ip.link_lookup(ifname="em1")[0],
macvlan_mode="private").commit()
Several macvlan modes are available: "private", "vepa", "bridge",
"passthru". Ususally the default is "vepa".
► macvtap
Almost the same as macvlan, but creates also a character tap device::
ip.link("add",
ifname="mvtap0",
kind="macvtap",
link=ip.link_lookup(ifname="em1")[0],
macvtap_mode="vepa").commit()
Will create a device file `"/dev/tap%s" % index`
► tuntap
Possible `tuntap` keywords:
- `mode` — "tun" or "tap"
- `uid` — integer
- `gid` — integer
- `ifr` — dict of tuntap flags (see ifinfmsg:... tuntap_data)
Create a tap interface::
ip.link("add",
ifname="tap0",
kind="tuntap",
mode="tap")
Tun/tap interfaces are created using `ioctl()`, but the library
provides a transparent way to manage them using netlink API.
► veth
To properly create `veth` interface, one should specify
`peer` also, since `veth` interfaces are created in pairs::
# simple call
ip.link("add", ifname="v1p0", kind="veth", peer="v1p1")
# set up specific veth peer attributes
ip.link("add",
ifname="v1p0",
kind="veth",
peer={"ifname": "v1p1",
"net_ns_fd": "test_netns"})
► vlan
VLAN interfaces require additional parameters, `vlan_id` and
`link`, where `link` is a master interface to create VLAN on::
ip.link("add",
ifname="v100",
kind="vlan",
link=ip.link_lookup(ifname="eth0")[0],
vlan_id=100)
There is a possibility to create also 802.1ad interfaces::
# create external vlan 802.1ad, s-tag
ip.link("add",
ifname="v100s",
kind="vlan",
link=ip.link_lookup(ifname="eth0")[0],
vlan_id=100,
vlan_protocol=0x88a8)
# create internal vlan 802.1q, c-tag
ip.link("add",
ifname="v200c",
kind="vlan",
link=ip.link_lookup(ifname="v100s")[0],
vlan_id=200,
vlan_protocol=0x8100)
► vrf
VRF interfaces (see linux/Documentation/networking/vrf.txt)::
ip.link("add",
ifname="vrf-foo",
kind="vrf",
vrf_table=42)
► vxlan
VXLAN interfaces are like VLAN ones, but require a bit more
parameters::
ip.link("add",
ifname="vx101",
kind="vxlan",
vxlan_link=ip.link_lookup(ifname="eth0")[0],
vxlan_id=101,
vxlan_group='239.1.1.1',
vxlan_ttl=16)
All possible vxlan parameters are listed in the module
`pr2modules.netlink.rtnl.ifinfmsg:... vxlan_data`.
► ipoib
IPoIB driver provides an ability to create several ip interfaces
on one interface.
IPoIB interfaces requires the following parameter:
`link` : The master interface to create IPoIB on.
The following parameters can also be provided:
`pkey` : Inifiniband partition key the ip interface is associated with
`mode` : Underlying infiniband transport mode.
One of: ['datagram' ,'connected']
`umcast` : If set(1), multicast group membership for this interface is
handled by user space.
Example::
ip.link("add",
ifname="ipoib1",
kind="ipoib",
link=ip.link_lookup(ifname="ib0")[0],
pkey=10)
**set**
Set interface attributes::
# get interface index
x = ip.link_lookup(ifname="eth0")[0]
# put link down
ip.link("set", index=x, state="down")
# rename and set MAC addr
ip.link("set", index=x, address="00:11:22:33:44:55", name="bala")
# set MTU and TX queue length
ip.link("set", index=x, mtu=1000, txqlen=2000)
# bring link up
ip.link("set", index=x, state="up")
Keyword "state" is reserved. State can be "up" or "down",
it is a shortcut::
state="up": flags=1, mask=1
state="down": flags=0, mask=0
SR-IOV virtual function setup::
# get PF index
x = ip.link_lookup(ifname="eth0")[0]
# setup macaddr
ip.link("set",
index=x, # PF index
vf={"vf": 0, # VF index
"mac": "00:11:22:33:44:55"}) # address
# setup vlan
ip.link("set",
index=x, # PF index
vf={"vf": 0, # VF index
"vlan": 100}) # the simplest case
# setup QinQ
ip.link("set",
index=x, # PF index
vf={"vf": 0, # VF index
"vlan": [{"vlan": 100, # vlan id
"proto": 0x88a8}, # 802.1ad
{"vlan": 200, # vlan id
"proto": 0x8100}]}) # 802.1q
**update**
Almost the same as `set`, except it uses different flags
and message type. Mostly does the same, but in some cases
differs. If you're not sure what to use, use `set`.
**del**
Destroy the interface::
ip.link("del", index=ip.link_lookup(ifname="dummy0")[0])
**dump**
Dump info for all interfaces
**get**
Get specific interface info::
ip.link("get", index=ip.link_lookup(ifname="br0")[0])
Get extended attributes like SR-IOV setup::
ip.link("get", index=3, ext_mask=1)
'''
if (command == 'dump') and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
if command[:4] == 'vlan':
log.warning('vlan filters are managed via `vlan_filter()`')
log.warning('this compatibility hack will be removed soon')
return self.vlan_filter(command[5:], **kwarg)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_req = NLM_F_REQUEST | NLM_F_ACK
flags_create = flags_req | NLM_F_CREATE | NLM_F_EXCL
flag_append = flags_create | NLM_F_APPEND
commands = {'set': (RTM_NEWLINK, flags_req),
'update': (RTM_SETLINK, flags_create),
'add': (RTM_NEWLINK, flags_create),
'del': (RTM_DELLINK, flags_create),
'property_add': (RTM_NEWLINKPROP, flag_append),
'property_del': (RTM_DELLINKPROP, flags_req),
'remove': (RTM_DELLINK, flags_create),
'delete': (RTM_DELLINK, flags_create),
'dump': (RTM_GETLINK, flags_dump),
'get': (RTM_GETLINK, NLM_F_REQUEST)}
msg = ifinfmsg()
# ifinfmsg fields
#
# ifi_family
# ifi_type
# ifi_index
# ifi_flags
# ifi_change
#
msg['family'] = kwarg.pop('family', 0)
lrq = kwarg.pop('kwarg_filter', IPLinkRequest)
(command, msg_flags) = commands.get(command, command)
# index
msg['index'] = kwarg.pop('index', 0)
# flags
flags = kwarg.pop('flags', 0) or 0
# change
mask = kwarg.pop('mask', 0) or kwarg.pop('change', 0) or 0
# UP/DOWN shortcut
if 'state' in kwarg:
mask = 1 # IFF_UP mask
if kwarg['state'].lower() == 'up':
flags = 1 # 0 (down) or 1 (up)
del kwarg['state']
# arp on/off shortcut
if 'arp' in kwarg:
mask |= IFF_NOARP
if not kwarg.pop('arp'):
flags |= IFF_NOARP
msg['flags'] = flags
msg['change'] = mask
if 'altname' in kwarg:
altname = kwarg.pop("altname")
if command in (RTM_NEWLINKPROP, RTM_DELLINKPROP):
if not isinstance(altname, (list, tuple, set)):
altname = [altname]
kwarg["IFLA_PROP_LIST"] = {"attrs": [
("IFLA_ALT_IFNAME", alt_ifname)
for alt_ifname in altname
]}
else:
kwarg["IFLA_ALT_IFNAME"] = altname
# apply filter
kwarg = lrq(kwarg)
# attach NLA
for key in kwarg:
nla = type(msg).name2nla(key)
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=msg_flags)
if match is not None:
ret = self._match(match, ret)
if not (command == RTM_GETLINK and self.nlm_generator):
ret = tuple(ret)
return ret
def addr(self, command, index=None, address=None, mask=None,
family=None, scope=None, match=None, **kwarg):
'''
Address operations
* command -- add, delete, replace, dump
* index -- device index
* address -- IPv4 or IPv6 address
* mask -- address mask
* family -- socket.AF_INET for IPv4 or socket.AF_INET6 for IPv6
* scope -- the address scope, see /etc/iproute2/rt_scopes
* kwarg -- dictionary, any ifaddrmsg field or NLA
Later the method signature will be changed to::
def addr(self, command, match=None, **kwarg):
# the method body
So only keyword arguments (except of the command) will be accepted.
The reason for this change is an unification of API.
Example::
idx = 62
ip.addr('add', index=idx, address='10.0.0.1', mask=24)
ip.addr('add', index=idx, address='10.0.0.2', mask=24)
With more NLAs::
# explicitly set broadcast address
ip.addr('add', index=idx,
address='10.0.0.3',
broadcast='10.0.0.255',
prefixlen=24)
# make the secondary address visible to ifconfig: add label
ip.addr('add', index=idx,
address='10.0.0.4',
broadcast='10.0.0.255',
prefixlen=24,
label='eth0:1')
Configure p2p address on an interface::
ip.addr('add', index=idx,
address='10.1.1.2',
mask=24,
local='10.1.1.1')
'''
if command in ('get', 'set'):
return
lrq = kwarg.pop('kwarg_filter', IPAddrRequest)
flags_dump = NLM_F_REQUEST | NLM_F_DUMP
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_create = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_replace = flags_base | NLM_F_REPLACE | NLM_F_CREATE
commands = {'add': (RTM_NEWADDR, flags_create),
'del': (RTM_DELADDR, flags_create),
'remove': (RTM_DELADDR, flags_create),
'delete': (RTM_DELADDR, flags_create),
'replace': (RTM_NEWADDR, flags_replace),
'dump': (RTM_GETADDR, flags_dump)}
(command, flags) = commands.get(command, command)
# fetch args
index = index or kwarg.pop('index', 0)
family = family or kwarg.pop('family', None)
prefixlen = mask or kwarg.pop('mask', 0) or kwarg.pop('prefixlen', 0)
scope = scope or kwarg.pop('scope', 0)
# move address to kwarg
# FIXME: add deprecation notice
if address:
kwarg['address'] = address
# try to guess family, if it is not forced
if kwarg.get('address') and family is None:
if address.find(":") > -1:
family = AF_INET6
mask = mask or 128
else:
family = AF_INET
mask = mask or 32
# setup the message
msg = ifaddrmsg()
msg['index'] = index
msg['family'] = family or 0
msg['prefixlen'] = prefixlen
msg['scope'] = scope
kwarg = lrq(kwarg)
try:
kwarg.sync_cacheinfo()
except AttributeError:
pass
# inject IFA_LOCAL, if family is AF_INET and IFA_LOCAL is not set
if family == AF_INET and \
kwarg.get('address') and \
kwarg.get('local') is None:
kwarg['local'] = kwarg['address']
# patch broadcast, if needed
if kwarg.get('broadcast') is True:
kwarg['broadcast'] = getbroadcast(address, mask, family)
# work on NLA
for key in kwarg:
nla = ifaddrmsg.name2nla(key)
if kwarg[key] not in (None, ''):
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags,
terminate=lambda x: x['header']['type'] ==
NLMSG_ERROR)
if match:
ret = self._match(match, ret)
if not (command == RTM_GETADDR and self.nlm_generator):
ret = tuple(ret)
return ret
def tc(self, command, kind=None, index=0, handle=0, **kwarg):
'''
"Swiss knife" for traffic control. With the method you can
add, delete or modify qdiscs, classes and filters.
* command -- add or delete qdisc, class, filter.
* kind -- a string identifier -- "sfq", "htb", "u32" and so on.
* handle -- integer or string
Command can be one of ("add", "del", "add-class", "del-class",
"add-filter", "del-filter") (see `commands` dict in the code).
Handle notice: traditional iproute2 notation, like "1:0", actually
represents two parts in one four-bytes integer::
1:0 -> 0x10000
1:1 -> 0x10001
ff:0 -> 0xff0000
ffff:1 -> 0xffff0001
Target notice: if your target is a class/qdisc that applies an
algorithm that can only apply to upstream traffic profile, but your
keys variable explicitly references a match that is only relevant for
upstream traffic, the kernel will reject the filter. Unless you're
dealing with devices like IMQs
For pyroute2 tc() you can use both forms: integer like 0xffff0000
or string like 'ffff:0000'. By default, handle is 0, so you can add
simple classless queues w/o need to specify handle. Ingress queue
causes handle to be 0xffff0000.
So, to set up sfq queue on interface 1, the function call
will be like that::
ip = IPRoute()
ip.tc("add", "sfq", 1)
Instead of string commands ("add", "del"...), you can use also
module constants, `RTM_NEWQDISC`, `RTM_DELQDISC` and so on::
ip = IPRoute()
flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE | NLM_F_EXCL
ip.tc((RTM_NEWQDISC, flags), "sfq", 1)
It should be noted that "change", "change-class" and
"change-filter" work like "replace", "replace-class" and
"replace-filter", except they will fail if the node doesn't
exist (while it would have been created by "replace"). This is
not the same behaviour as with "tc" where "change" can be used
to modify the value of some options while leaving the others
unchanged. However, as not all entities support this
operation, we believe the "change" commands as implemented
here are more useful.
Also available "modules" (returns tc plugins dict) and "help"
commands::
help(ip.tc("modules")["htb"])
print(ip.tc("help", "htb"))
'''
if command == 'set':
return
if command == 'modules':
return tc_plugins
if command == 'help':
p = tc_plugins.get(kind)
if p is not None and hasattr(p, '__doc__'):
return p.__doc__
else:
return 'No help available'
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_change = flags_base | NLM_F_REPLACE
flags_replace = flags_change | NLM_F_CREATE
commands = {'add': (RTM_NEWQDISC, flags_make),
'del': (RTM_DELQDISC, flags_make),
'remove': (RTM_DELQDISC, flags_make),
'delete': (RTM_DELQDISC, flags_make),
'change': (RTM_NEWQDISC, flags_change),
'replace': (RTM_NEWQDISC, flags_replace),
'add-class': (RTM_NEWTCLASS, flags_make),
'del-class': (RTM_DELTCLASS, flags_make),
'change-class': (RTM_NEWTCLASS, flags_change),
'replace-class': (RTM_NEWTCLASS, flags_replace),
'add-filter': (RTM_NEWTFILTER, flags_make),
'del-filter': (RTM_DELTFILTER, flags_make),
'change-filter': (RTM_NEWTFILTER, flags_change),
'replace-filter': (RTM_NEWTFILTER, flags_replace)}
if isinstance(command, int):
command = (command, flags_make)
if command == 'del':
if index == 0:
index = [x['index'] for x in self.get_links()
if x['index'] != 1]
if isinstance(index, (list, tuple, set)):
return list(chain(*(self.tc('del', index=x) for x in index)))
command, flags = commands.get(command, command)
msg = tcmsg()
# transform handle, parent and target, if needed:
handle = transform_handle(handle)
for item in ('parent', 'target', 'default'):
if item in kwarg and kwarg[item] is not None:
kwarg[item] = transform_handle(kwarg[item])
msg['index'] = index
msg['handle'] = handle
opts = kwarg.get('opts', None)
##
#
#
if kind in tc_plugins:
p = tc_plugins[kind]
msg['parent'] = kwarg.pop('parent', getattr(p, 'parent', 0))
if hasattr(p, 'fix_msg'):
p.fix_msg(msg, kwarg)
if kwarg:
if command in (RTM_NEWTCLASS, RTM_DELTCLASS):
opts = p.get_class_parameters(kwarg)
else:
opts = p.get_parameters(kwarg)
else:
msg['parent'] = kwarg.get('parent', TC_H_ROOT)
if kind is not None:
msg['attrs'].append(['TCA_KIND', kind])
if opts is not None:
msg['attrs'].append(['TCA_OPTIONS', opts])
return tuple(self.nlm_request(msg, msg_type=command, msg_flags=flags))
def route(self, command, **kwarg):
'''
Route operations.
Keywords to set up rtmsg fields:
* dst_len, src_len -- destination and source mask(see `dst` below)
* tos -- type of service
* table -- routing table
* proto -- `redirect`, `boot`, `static` (see `rt_proto`)
* scope -- routing realm
* type -- `unicast`, `local`, etc. (see `rt_type`)
`pr2modules/netlink/rtnl/rtmsg.py` rtmsg.nla_map:
* table -- routing table to use (default: 254)
* gateway -- via address
* prefsrc -- preferred source IP address
* dst -- the same as `prefix`
* iif -- incoming traffic interface
* oif -- outgoing traffic interface
etc.
One can specify mask not as `dst_len`, but as a part of `dst`,
e.g.: `dst="10.0.0.0/24"`.
Commands:
**add**
Example::
ip.route("add", dst="10.0.0.0/24", gateway="192.168.0.1")
It is possible to set also route metrics. There are two ways
to do so. The first is to use 'raw' NLA notation::
ip.route("add",
dst="10.0.0.0",
mask=24,
gateway="192.168.0.1",
metrics={"attrs": [["RTAX_MTU", 1400],
["RTAX_HOPLIMIT", 16]]})
The second way is to use shortcuts, provided by `IPRouteRequest`
class, which is applied to `**kwarg` automatically::
ip.route("add",
dst="10.0.0.0/24",
gateway="192.168.0.1",
metrics={"mtu": 1400,
"hoplimit": 16})
...
More `route()` examples. Blackhole route::
ip.route("add",
dst="10.0.0.0/24",
type="blackhole")
Create a route with metrics::
ip.route('add',
dst='172.16.0.0/24',
gateway='10.0.0.10',
metrics={'mtu': 1400,
'hoplimit': 16})
Multipath route::
ip.route("add",
dst="10.0.0.0/24",
multipath=[{"gateway": "192.168.0.1", "hops": 2},
{"gateway": "192.168.0.2", "hops": 1},
{"gateway": "192.168.0.3"}])
MPLS lwtunnel on eth0::
idx = ip.link_lookup(ifname='eth0')[0]
ip.route("add",
dst="10.0.0.0/24",
oif=idx,
encap={"type": "mpls",
"labels": "200/300"})
Create MPLS route: push label::
# $ sudo modprobe mpls_router
# $ sudo sysctl net.mpls.platform_labels=1024
ip.route('add',
family=AF_MPLS,
oif=idx,
dst=0x200,
newdst=[0x200, 0x300])
MPLS multipath::
idx = ip.link_lookup(ifname='eth0')[0]
ip.route("add",
dst="10.0.0.0/24",
table=20,
multipath=[{"gateway": "192.168.0.1",
"encap": {"type": "mpls",
"labels": 200}},
{"ifindex": idx,
"encap": {"type": "mpls",
"labels": 300}}])
MPLS target can be int, string, dict or list::
"labels": 300 # simple label
"labels": "300" # the same
"labels": (200, 300) # stacked
"labels": "200/300" # the same
# explicit label definition
"labels": {"bos": 1,
"label": 300,
"tc": 0,
"ttl": 16}
Create SEG6 tunnel encap mode (kernel >= 4.10)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6',
'mode': 'encap',
'segs': '2000::5,2000::6'})
Create SEG6 tunnel inline mode (kernel >= 4.10)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6',
'mode': 'inline',
'segs': ['2000::5', '2000::6']})
Create SEG6 tunnel inline mode with hmac (kernel >= 4.10)::
ip.route('add',
dst='2001:0:0:22::2/128',
oif=idx,
encap={'type': 'seg6',
'mode': 'inline',
'segs':'2000::5,2000::6,2000::7,2000::8',
'hmac':0xf})
Create SEG6 tunnel with ip4ip6 encapsulation (kernel >= 4.14)::
ip.route('add',
dst='172.16.0.0/24',
oif=idx,
encap={'type': 'seg6',
'mode': 'encap',
'segs': '2000::5,2000::6'})
Create SEG6LOCAL tunnel End.DX4 action (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.DX4',
'nh4': '172.16.0.10'})
Create SEG6LOCAL tunnel End.DT6 action (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.DT6',
'table':'10'})
Create SEG6LOCAL tunnel End.B6 action (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.B6',
'srh':{'segs': '2000::5,2000::6'}})
Create SEG6LOCAL tunnel End.B6 action with hmac (kernel >= 4.14)::
ip.route('add',
dst='2001:0:0:10::2/128',
oif=idx,
encap={'type': 'seg6local',
'action': 'End.B6',
'srh': {'segs': '2000::5,2000::6',
'hmac':0xf}})
**change**, **replace**, **append**
Commands `change`, `replace` and `append` have the same meanings
as in ip-route(8): `change` modifies only existing route, while
`replace` creates a new one, if there is no such route yet.
`append` allows to create an IPv6 multipath route.
**del**
Remove the route. The same syntax as for **add**.
**get**
Get route by spec.
**dump**
Dump all routes.
'''
# 8<----------------------------------------------------
# FIXME
# flags should be moved to some more general place
flags_dump = NLM_F_DUMP | NLM_F_REQUEST
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_change = flags_base | NLM_F_REPLACE
flags_replace = flags_change | NLM_F_CREATE
flags_append = flags_base | NLM_F_CREATE | NLM_F_APPEND
# 8<----------------------------------------------------
# transform kwarg
if command in ('add', 'set', 'replace', 'change', 'append'):
kwarg['proto'] = kwarg.get('proto', 'static') or 'static'
kwarg['type'] = kwarg.get('type', 'unicast') or 'unicast'
kwarg = IPRouteRequest(kwarg)
if 'match' not in kwarg and command in ('dump', 'show'):
match = kwarg
else:
match = kwarg.pop('match', None)
callback = kwarg.pop('callback', None)
commands = {'add': (RTM_NEWROUTE, flags_make),
'set': (RTM_NEWROUTE, flags_replace),
'replace': (RTM_NEWROUTE, flags_replace),
'change': (RTM_NEWROUTE, flags_change),
'append': (RTM_NEWROUTE, flags_append),
'del': (RTM_DELROUTE, flags_make),
'remove': (RTM_DELROUTE, flags_make),
'delete': (RTM_DELROUTE, flags_make),
'get': (RTM_GETROUTE, NLM_F_REQUEST),
'show': (RTM_GETROUTE, flags_dump),
'dump': (RTM_GETROUTE, flags_dump)}
(command, flags) = commands.get(command, command)
msg = rtmsg()
# table is mandatory; by default == 254
# if table is not defined in kwarg, save it there
# also for nla_attr:
table = kwarg.get('table', 254)
msg['table'] = table if table <= 255 else 252
msg['family'] = kwarg.pop('family', AF_INET)
msg['scope'] = kwarg.pop('scope', rt_scope['universe'])
msg['dst_len'] = kwarg.pop('dst_len', None) or kwarg.pop('mask', 0)
msg['src_len'] = kwarg.pop('src_len', 0)
msg['tos'] = kwarg.pop('tos', 0)
msg['flags'] = kwarg.pop('flags', 0)
msg['type'] = kwarg.pop('type', rt_type['unspec'])
msg['proto'] = kwarg.pop('proto', rt_proto['unspec'])
msg['attrs'] = []
if msg['family'] == AF_MPLS:
for key in tuple(kwarg):
if key not in ('dst', 'newdst', 'via', 'multipath', 'oif'):
kwarg.pop(key)
for key in kwarg:
nla = rtmsg.name2nla(key)
if nla == 'RTA_DST' and not kwarg[key]:
continue
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
# fix IP family, if needed
if msg['family'] in (AF_UNSPEC, 255):
if key in ('dst', 'src', 'gateway', 'prefsrc', 'newdst') \
and isinstance(kwarg[key], basestring):
msg['family'] = AF_INET6 if kwarg[key].find(':') >= 0 \
else AF_INET
elif key == 'multipath' and len(kwarg[key]) > 0:
hop = kwarg[key][0]
attrs = hop.get('attrs', [])
for attr in attrs:
if attr[0] == 'RTA_GATEWAY':
msg['family'] = AF_INET6 if \
attr[1].find(':') >= 0 else AF_INET
break
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags,
callback=callback)
if match:
ret = self._match(match, ret)
if not (command == RTM_GETROUTE and self.nlm_generator):
ret = tuple(ret)
return ret
def rule(self, command, *argv, **kwarg):
'''
Rule operations
- command — add, delete
- table — 0 < table id < 253
- priority — 0 < rule's priority < 32766
- action — type of rule, default 'FR_ACT_NOP' (see fibmsg.py)
- rtscope — routing scope, default RT_SCOPE_UNIVERSE
`(RT_SCOPE_UNIVERSE|RT_SCOPE_SITE|\
RT_SCOPE_LINK|RT_SCOPE_HOST|RT_SCOPE_NOWHERE)`
- family — rule's family (socket.AF_INET (default) or
socket.AF_INET6)
- src — IP source for Source Based (Policy Based) routing's rule
- dst — IP for Destination Based (Policy Based) routing's rule
- src_len — Mask for Source Based (Policy Based) routing's rule
- dst_len — Mask for Destination Based (Policy Based) routing's
rule
- iifname — Input interface for Interface Based (Policy Based)
routing's rule
- oifname — Output interface for Interface Based (Policy Based)
routing's rule
- uid_range — Range of user identifiers, as a string like "1000:1234"
- dport_range — Range of destination ports, as a string like "80-120"
- sport_range — Range of source ports, as a string like "80-120"
All packets route via table 10::
# 32000: from all lookup 10
# ...
ip.rule('add', table=10, priority=32000)
Default action::
# 32001: from all lookup 11 unreachable
# ...
iproute.rule('add',
table=11,
priority=32001,
action='FR_ACT_UNREACHABLE')
Use source address to choose a routing table::
# 32004: from 10.64.75.141 lookup 14
# ...
iproute.rule('add',
table=14,
priority=32004,
src='10.64.75.141')
Use dst address to choose a routing table::
# 32005: from 10.64.75.141/24 lookup 15
# ...
iproute.rule('add',
table=15,
priority=32005,
dst='10.64.75.141',
dst_len=24)
Match fwmark::
# 32006: from 10.64.75.141 fwmark 0xa lookup 15
# ...
iproute.rule('add',
table=15,
priority=32006,
dst='10.64.75.141',
fwmark=10)
'''
if command == 'set':
return
flags_base = NLM_F_REQUEST | NLM_F_ACK
flags_make = flags_base | NLM_F_CREATE | NLM_F_EXCL
flags_dump = NLM_F_REQUEST | NLM_F_ROOT | NLM_F_ATOMIC
commands = {'add': (RTM_NEWRULE, flags_make),
'del': (RTM_DELRULE, flags_make),
'remove': (RTM_DELRULE, flags_make),
'delete': (RTM_DELRULE, flags_make),
'dump': (RTM_GETRULE, flags_dump)}
if isinstance(command, int):
command = (command, flags_make)
command, flags = commands.get(command, command)
if argv:
# this code block will be removed in some release
log.error('rule(): positional parameters are deprecated')
names = ['table', 'priority', 'action', 'family',
'src', 'src_len', 'dst', 'dst_len', 'fwmark',
'iifname', 'oifname']
kwarg.update(dict(zip(names, argv)))
kwarg = IPRuleRequest(kwarg)
msg = fibmsg()
table = kwarg.get('table', 0)
msg['table'] = table if table <= 255 else 252
for key in ('family',
'src_len',
'dst_len',
'action',
'tos',
'flags'):
msg[key] = kwarg.pop(key, 0)
msg['attrs'] = []
for key in kwarg:
nla = fibmsg.name2nla(key)
if kwarg[key] is not None:
msg['attrs'].append([nla, kwarg[key]])
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags)
if 'match' in kwarg:
ret = self._match(kwarg['match'], ret)
if not (command == RTM_GETRULE and self.nlm_generator):
ret = tuple(ret)
return ret
def stats(self, command, **kwarg):
'''
Stats prototype.
'''
if (command == 'dump') and ('match' not in kwarg):
match = kwarg
else:
match = kwarg.pop('match', None)
commands = {'dump': (RTM_GETSTATS, NLM_F_REQUEST | NLM_F_DUMP),
'get': (RTM_GETSTATS, NLM_F_REQUEST | NLM_F_ACK)}
command, flags = commands.get(command, command)
msg = ifstatsmsg()
msg['filter_mask'] = kwarg.get('filter_mask', 31)
msg['ifindex'] = kwarg.get('ifindex', 0)
ret = self.nlm_request(msg,
msg_type=command,
msg_flags=flags)
if match is not None:
ret = self._match(match, ret)
if not (command == RTM_GETSTATS and self.nlm_generator):
ret = tuple(ret)
return ret
# 8<---------------------------------------------------------------
class IPBatch(RTNL_API, IPBatchSocket):
'''
Netlink requests compiler. Does not send any requests, but
instead stores them in the internal binary buffer. The
contents of the buffer can be used to send batch requests,
to test custom netlink parsers and so on.
Uses `RTNL_API` and provides all the same API as normal
`IPRoute` objects::
# create the batch compiler
ipb = IPBatch()
# compile requests into the internal buffer
ipb.link("add", index=550, ifname="test", kind="dummy")
ipb.link("set", index=550, state="up")
ipb.addr("add", index=550, address="10.0.0.2", mask=24)
# save the buffer
data = ipb.batch
# reset the buffer
ipb.reset()
...
# send the buffer
IPRoute().sendto(data, (0, 0))
'''
pass
class IPRoute(RTNL_API, IPRSocket):
'''
Regular ordinary utility class, see RTNL API for the list of methods.
'''
pass
class RawIPRoute(RTNL_API, RawIPRSocket):
'''
The same as `IPRoute`, but does not use the netlink proxy.
Thus it can not manage e.g. tun/tap interfaces.
'''
pass
|
py
|
1a569ef6e760f23c90f7dab4949d27176a6d7750
|
"""``resources`` module of ``dataql.solvers``.
This module holds the base solvers for resources:
- ``AttributeSolver`` for ``Field``
- ``ObjectsSolver`` for ``Object``
- ``ListSolver`` for ``List``
Notes
-----
When we talk about "resources", we talk about subclasses of ``dataql.resources.Resource``.
"""
from abc import abstractmethod, ABCMeta
from collections import Iterable
from dataql.resources import Field, List, Object
from dataql.solvers.exceptions import NotIterable
class Solver(metaclass=ABCMeta):
"""Base class for all resource solvers.
The main entry point of a solver is the ``solve`` method
Attributes
----------
solvable_resources : tuple (class attribute)
Holds the resource classes (subclasses of ``dataql.resources.Resource``) that can be
solved by this solver.
Must be defined in each sub-classes.
registry : dataql.solvers.registry.Registry
The registry that instantiated this solver.
Notes
-----
The ``solve`` method simply calls ``solve_value``, then ``coerce`` with the result.
To change the behavior, simply override at least one of these two methods.
"""
solvable_resources = ()
def __init__(self, registry):
"""Init the solver.
Arguments
---------
registry : dataql.solvers.registry.Registry
The registry to use to get the source and solve sub-resources if any.
"""
self.registry = registry
def __repr__(self):
"""String representation of a ``Solver`` instance.
Returns
-------
str
The string representation of the current ``Solver`` instance.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, allow_class=True)
>>> class MySolver(Solver):
... def coerce(self, value, resource): return value
>>> MySolver(registry)
<MySolver>
"""
return '<%s>' % self.__class__.__name__
def solve(self, value, resource):
"""Solve a resource with a value.
Arguments
---------
value : ?
A value to solve in combination with the given resource. The first filter of the
resource will be applied on this value (next filters on the result of the previous
filter).
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to solve with the given value.
Returns
-------
(depends on the implementation of the ``coerce`` method)
Raises
------
CannotSolve
If a solver accepts to solve a resource but cannot finally solve it.
Allows ``Registry.solve_resource`` to use the next available solver.
Notes
-----
This method simply calls ``solve_value``, then ``coerce`` with the result.
To change the behavior, simply override at least one of these two methods.
"""
result = self.solve_value(value, resource)
return self.coerce(result, resource)
def solve_value(self, value, resource):
"""Solve a resource with a value, without coercing.
Arguments
---------
value : ?
A value to solve in combination with the given resource. The first filter of the
resource will be applied on this value (next filters on the result of the previous
filter).
resource : dataql.resources.Resource
An instance of a subclass of ``Resource`` to solve with the given value.
Returns
-------
The result of all filters applied on the value for the first filter, and result of the
previous filter for next filters.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date, allow_class=True)
>>> registry.register(str)
>>> class MySolver(Solver):
... def coerce(self, value, resource): return value
>>> solver = MySolver(registry)
>>> from dataql.resources import Filter, NamedArg, PosArg, SliceFilter
>>> field = Field(None,
... filters=[
... Filter(name='fromtimestamp', args=[PosArg(1433109600)]),
... Filter(name='replace', args=[NamedArg('year', '=', 2014)]),
... Filter(name='strftime', args=[PosArg('%F')]),
... Filter(name='replace', args=[PosArg('2014'), PosArg('2015')]),
... ]
... )
>>> solver.solve_value(date, field)
'2015-06-01'
>>> solver.solve_value(None, field)
>>> d = {'foo': {'date': date(2015, 6, 1)}, 'bar': {'date': None}, 'baz': [{'date': None}]}
>>> registry.register(dict)
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='foo'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
'2015-06-01'
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='bar'),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
>>> solver.solve_value(d, Field(None, filters=[
... Filter(name='baz'),
... SliceFilter(0),
... Filter(name='date'),
... Filter(name='strftime', args=[PosArg('%F')]),
... ]))
# Example of how to raise a ``CannotSolve`` exception.
>>> from dataql.solvers.exceptions import CannotSolve
>>> raise CannotSolve(solver, Field('fromtimestamp'), date) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql...CannotSolve: Solver `<MySolver>` was not able to solve...`<Field[fromtimestamp]>`.
"""
# The given value is the starting point on which we apply the first filter.
result = value
# Apply filters one by one on the previous result.
if result is not None:
for filter_ in resource.filters:
result = self.registry.solve_filter(result, filter_)
if result is None:
break
return result
@abstractmethod
def coerce(self, value, resource):
"""Convert the value got after ``solve_value``.
Must be implemented in subclasses.
"""
raise NotImplementedError()
@classmethod
def can_solve(cls, resource):
"""Tells if the solver is able to resolve the given resource.
Arguments
---------
resource : subclass of ``dataql.resources.Resource``
The resource to check if it is solvable by the current solver class
Returns
-------
boolean
``True`` if the current solver class can solve the given resource, ``False`` otherwise.
Example
-------
>>> AttributeSolver.solvable_resources
(<class 'dataql.resources.Field'>,)
>>> AttributeSolver.can_solve(Field('foo'))
True
>>> AttributeSolver.can_solve(Object('bar'))
False
"""
for solvable_resource in cls.solvable_resources:
if isinstance(resource, solvable_resource):
return True
return False
class AttributeSolver(Solver):
"""Solver aimed to retrieve fields from values.
This solver can only handle ``dataql.resources.Field`` resources.
Attributes
----------
solvable_resources : tuple (class attribute)
Holds the resource classes (from ``dataql.resources``) that can be solved by this solver.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date)
>>> solver = AttributeSolver(registry)
>>> solver.solve(date(2015, 6, 1), Field('year'))
2015
"""
solvable_resources = (Field,)
def coerce(self, value, resource):
"""Coerce the value to an acceptable one.
Only these kinds of values are returned as is:
- str
- int
- float
- True
- False
- None
For all others values, it will be coerced using ``self.coerce_default`` (with convert the
value to a string in the default implementation).
Arguments
---------
value : ?
The value to be coerced.
resource : dataql.resources.Resource
The ``Resource`` object used to obtain this value from the original one.
Returns
-------
str | int | float | True | False | None
The coerced value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date)
>>> solver = AttributeSolver(registry)
>>> solver.coerce('foo', None)
'foo'
>>> solver.coerce(11, None)
11
>>> solver.coerce(1.1, None)
1.1
>>> solver.coerce(True, None)
True
>>> solver.coerce(False, None)
False
>>> solver.coerce(date(2015, 6, 1), None)
'2015-06-01'
>>> solver.coerce(None, None)
"""
if value in (True, False, None):
return value
if isinstance(value, (int, float)):
return value
if isinstance(value, str):
return value
return self.coerce_default(value, resource)
def coerce_default(self, value, resource):
"""Coerce a value using a default converter, ``str()``.
Arguments
---------
value : ?
The value to be coerced.
resource : dataql.resources.Resource
The ``Resource`` object used to obtain this value from the original one.
Returns
-------
str
The coerced value.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date)
>>> solver = AttributeSolver(registry)
>>> solver.coerce_default(date(2015, 6, 1), None)
'2015-06-01'
>>> solver.coerce_default(date, None)
"<class 'datetime.date'>"
"""
return str(value)
class ObjectSolver(Solver):
"""Solver aimed to retrieve many fields from values.
This solver can only handle ``dataql.resources.Object`` resources.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date)
# Create an object from which we'll want an object (``date``)
>>> from dataql.solvers.registry import EntryPoints
>>> obj = EntryPoints(registry,
... date = date(2015, 6, 1),
... )
>>> solver = ObjectSolver(registry)
>>> d = solver.solve(
... obj,
... Object('date', resources=[Field('day'), Field('month'), Field('year')])
... )
>>> [(k, d[k]) for k in sorted(d)]
[('day', 1), ('month', 6), ('year', 2015)]
"""
solvable_resources = (Object,)
def coerce(self, value, resource):
"""Get a dict with attributes from ``value``.
Arguments
---------
value : ?
The value to get some resources from.
resource : dataql.resources.Object
The ``Object`` object used to obtain this value from the original one.
Returns
-------
dict
A dictionary containing the wanted resources for the given value.
Key are the ``name`` attributes of the resources, and the values are the solved values.
"""
return {r.name: self.registry.solve_resource(value, r) for r in resource.resources}
class ListSolver(Solver):
"""Solver aimed to retrieve many fields from many values of the same type.
This solver can only handle ``dataql.resources.List`` resources.
Example
-------
>>> from dataql.solvers.registry import Registry
>>> registry = Registry()
>>> from datetime import date
>>> registry.register(date)
# Create an object from which we'll want a list (``dates``)
>>> from dataql.solvers.registry import EntryPoints
>>> obj = EntryPoints(registry,
... dates = [date(2015, 6, 1), date(2015, 6, 2)],
... date = date(2015, 6, 1),
... )
>>> solver = ListSolver(registry)
>>> from dataql.resources import Filter, PosArg
>>> solver.solve(
... obj,
... List('dates', resources=[Field(None, [Filter('strftime', args=[PosArg('%F')])])])
... )
['2015-06-01', '2015-06-02']
>>> solver.solve(
... obj,
... List('dates', resources=[Field('day'), Field('month'), Field('year')])
... )
[[1, 6, 2015], [2, 6, 2015]]
>>> from pprint import pprint # will sort the dicts by keys
>>> pprint(solver.solve(
... obj,
... List('dates', resources=[
... Field(None, [Filter('strftime', args=[PosArg('%F')])]),
... Object(None, resources=[Field('day'), Field('month'), Field('year')]),
... ])
... ))
[['2015-06-01', {'day': 1, 'month': 6, 'year': 2015}],
['2015-06-02', {'day': 2, 'month': 6, 'year': 2015}]]
>>> pprint(solver.solve(
... obj,
... List('dates', resources=[
... Object(None, resources=[Field('day'),Field('month'), Field('year')])
... ])
... ))
[{'day': 1, 'month': 6, 'year': 2015}, {'day': 2, 'month': 6, 'year': 2015}]
>>> solver.solve(
... obj,
... List('date', resources=[Field('day'), Field('month'), Field('year')])
... ) # doctest: +ELLIPSIS
Traceback (most recent call last):
dataql.solvers.exceptions.NotIterable: ...
"""
solvable_resources = (List,)
def coerce(self, value, resource):
"""Convert a list of objects in a list of dicts.
Arguments
---------
value : iterable
The list (or other iterable) to get values to get some resources from.
resource : dataql.resources.List
The ``List`` object used to obtain this value from the original one.
Returns
-------
list
A list with one entry for each iteration get from ``value``.
If the ``resource`` has only one sub-resource, each entry in the result list will
be the result for the subresource for each iteration.
If the ``resource`` has more that one sub-resource, each entry in the result list will
be another list with an entry for each subresource for the current iteration.
Raises
------
dataql.solvers.exceptions.NotIterable
When the value is not iterable.
"""
if not isinstance(value, Iterable):
raise NotIterable(resource, self.registry[value])
# Case #1: we only have one sub-resource, so we return a list with this item for
# each iteration
if len(resource.resources) == 1:
res = resource.resources[0]
return [self.registry.solve_resource(v, res) for v in value]
# Case #2: we have many sub-resources, we return a list with, for each iteration, a
# list with all entries
return [
[self.registry.solve_resource(v, res) for res in resource.resources]
for v in value
]
|
py
|
1a569f768ccd6f466f3832329c72a5795e430796
|
import numpy as np
import time
from rllab.misc import logger
def rollout(env, policy, path_length, render=False, speedup=None):
Da = env.action_space.flat_dim
Do = env.observation_space.flat_dim
observation = env.reset()
policy.reset()
observations = np.zeros((path_length + 1, Do))
actions = np.zeros((path_length, Da))
terminals = np.zeros((path_length, ))
rewards = np.zeros((path_length, ))
agent_infos = []
env_infos = []
t = 0
for t in range(path_length):
action, agent_info = policy.get_action(observation)
next_obs, reward, terminal, env_info = env.step(action)
agent_infos.append(agent_info)
env_infos.append(env_info)
actions[t] = action
terminals[t] = terminal
rewards[t] = reward
observations[t] = observation
observation = next_obs
if render:
env.render()
time_step = 0.05
time.sleep(time_step / speedup)
if terminal:
break
observations[t + 1] = observation
path = {
'observations': observations[:t + 1],
'actions': actions[:t + 1],
'rewards': rewards[:t + 1],
'terminals': terminals[:t + 1],
'next_observations': observations[1:t + 2],
'agent_infos': agent_infos,
'env_infos': env_infos
}
return path
def rollouts(env, policy, path_length, n_paths):
paths = list()
for i in range(n_paths):
paths.append(rollout(env, policy, path_length))
return paths
class Sampler(object):
def __init__(self, max_path_length, min_pool_size, batch_size):
self._max_path_length = max_path_length
self._min_pool_size = min_pool_size
self._batch_size = batch_size
self.env = None
self.policy = None
self.pool = None
def initialize(self, env, policy, pool):
self.env = env
self.policy = policy
self.pool = pool
def sample(self):
raise NotImplementedError
def batch_ready(self):
enough_samples = self.pool.size >= self._min_pool_size
return enough_samples
def random_batch(self):
return self.pool.random_batch(self._batch_size)
def terminate(self):
self.env.terminate()
def log_diagnostics(self):
logger.record_tabular('pool-size', self.pool.size)
class SimpleSampler(Sampler):
def __init__(self, **kwargs):
super(SimpleSampler, self).__init__(**kwargs)
self._path_length = 0
self._path_return = 0
self._last_path_return = 0
self._max_path_return = -np.inf
self._n_episodes = 0
self._current_observation = None
self._total_samples = 0
def sample(self):
if self._current_observation is None:
self._current_observation = self.env.reset()
action, _ = self.policy.get_action(self._current_observation)
next_observation, reward, terminal, info = self.env.step(action)
self._path_length += 1
self._path_return += reward
self._total_samples += 1
self.pool.add_sample(
observation=self._current_observation,
action=action,
reward=reward,
terminal=terminal,
next_observation=next_observation)
if terminal or self._path_length >= self._max_path_length:
self.policy.reset()
self._current_observation = self.env.reset()
self._path_length = 0
self._max_path_return = max(self._max_path_return,
self._path_return)
self._last_path_return = self._path_return
self._path_return = 0
self._n_episodes += 1
else:
self._current_observation = next_observation
def log_diagnostics(self):
super(SimpleSampler, self).log_diagnostics()
logger.record_tabular('max-path-return', self._max_path_return)
logger.record_tabular('last-path-return', self._last_path_return)
logger.record_tabular('episodes', self._n_episodes)
logger.record_tabular('total-samples', self._total_samples)
class DummySampler(Sampler):
def __init__(self, batch_size, max_path_length):
super(DummySampler, self).__init__(
max_path_length=max_path_length,
min_pool_size=0,
batch_size=batch_size)
def sample(self):
pass
|
py
|
1a56a16b43adda124565739c274bc9d97a09777f
|
import os
from flask import Flask, request, abort, jsonify, json
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import exc
from flask_cors import CORS
import random
from flask_migrate import Migrate
from models import setup_db, Movies, Actors, db
from auth.auth import AuthError, requires_auth
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
setup_db(app)
CORS(app, resources={r'/*': {'origins': '*'}})
migrate = Migrate(app, db)
# CORS Headers
@app.after_request
def after_request(response):
response.headers.add('Access-Control-Allow-Headers', 'Content-Type\
,Authorization, true')
response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST\
,DELETE, OPTIONS')
return response
def get_all_movies():
movies = []
all_movies = Movies.query.all()
for movie in all_movies:
movies.append(movie.format())
return movies
def get_all_actors():
actors = []
all_actors = Actors.query.all()
for actor in all_actors:
actors.append(actor.format())
return actors
# Error Handler
@app.errorhandler(401)
def bad_request(error):
"""
:error handler for error 400
:param error: Unauthorized
:return: error: HTTP status code, message: Error description
"""
return jsonify({
'success': False,
'error': 401,
'message': ' Unauthorized ' + str(error)
}), 401
@app.errorhandler(404)
def not_found(error):
return jsonify({
'success': False,
'error': 404,
'message': 'Not Found'
}), 404
@app.errorhandler(500)
def not_found(error):
return jsonify({
'success': False,
'error': 500,
'message': 'Token Expired or Internal Server error'
}), 500
@app.errorhandler(422)
def unprocessable_entity(error):
return jsonify({
'success': False,
'error': 422,
'message': 'Unprocessable Entity'
})
@app.route('/')
def get_greeting():
excited = os.environ['EXCITED']
greeting = "Hello,Your are in public Land"
if excited == 'true':
greeting = greeting + "!!!!!"
return greeting
@app.route('/actors', methods=['GET'])
@requires_auth('get:actors')
def getactors(payload):
formated_actors = []
all_actors = Actors.query.all()
for actor in all_actors:
formated_actors.append(actor.format())
return jsonify({
'actors': formated_actors,
'total_actors': len(formated_actors),
'success': True
})
@app.route('/actors', methods=['GET', 'POST'])
@requires_auth('add:actors')
def add_actors(payload):
if request.get_json().get('actor_age'):
body = request.get_json()
actor_age = body.get('actor_age')
actor_gender = body.get('actor_gender')
actor_name = body.get('actor_name')
actor = Actors(name=actor_name, age=actor_age, gender=actor_gender)
actor.insert()
actor_id = actor.id
actor_added = Actors.query.filter_by(id=actor_id).first()
return jsonify({
'success': True,
'added_actors': actor_added.format()
})
@app.route('/movies', methods=['GET'])
@requires_auth('get:movies')
def movies_get(payload):
formated_movies = []
dictionarize = {}
all_movies_with_actor_name = Movies.query.with_entities(Movies.id,
Movies.title,
Movies.
release_date,
Actors.name).join(Actors, (Movies.actor_id == Actors.id)).all()
for movies in all_movies_with_actor_name:
dictionarize['id'] = movies[0]
dictionarize['movie_name'] = movies[1]
dictionarize['release_date'] = movies[2]
dictionarize['actor_name'] = movies[3]
formated_movies.append(dict(dictionarize))
return jsonify({
'movies': formated_movies,
'total_movies': len(formated_movies),
'success': True
})
@app.route('/movies', methods=['GET', 'POST'])
@requires_auth('add:movies')
def movies(payload):
if request.get_json().get('movie_name'):
body = request.get_json()
movie_name = body.get('movie_name')
release_date = body.get('release_date')
id_actor = body.get('actor_id')
movie = Movies(title=movie_name, release_date=release_date,
actor_id=id_actor)
movie.insert()
movie_id = movie.id
movie_added = Movies.query.filter_by(id=movie_id).first()
return jsonify({
'success': True,
'added_movie': movie_added.format()
})
@app.route('/movies/<int:movie_id>', methods=['PATCH'])
@requires_auth('patch:movies')
def update_movie_by_id(payload, movie_id):
movie_by_id = Movies.query.filter_by(id=movie_id).first()
if movie_by_id is None:
abort(404)
try:
if request.get_json().get('new_movie_name') and request.get_json()\
.get('new_release_date'):
body = request.get_json()
new_title = body.get('new_movie_name')
new_release_date = body.get('new_release_date')
movie_by_id.title = new_title
movie_by_id.release_date = new_release_date
except ValueError:
try:
if request.get_json().get('new_movie_name'):
body = request.get_json()
new_title = body.get('new_movie_name')
movie_by_id.title = new_title
except ValueError:
try:
if request.get_json().get('new_release_date'):
body = request.get_json()
new_release_date = body.get('new_release_date')
movie_by_id.release_date = new_release_date
except ValueError:
abort(404)
movie_by_id.update()
all_movies = get_all_movies()
return jsonify({
'success': True,
'all_movies': all_movies
})
@app.route('/actors/<int:actor_id>', methods=['PATCH'])
@requires_auth('patch:actors')
def update_actor_by_id(payload, actor_id):
actor_by_id = Actors.query.filter_by(id=actor_id).first()
if actor_by_id is None:
abort(404)
try:
if request.get_json().get('new_actor_name') and request.get_json()\
.get('new_actor_age'):
body = request.get_json()
new_actor_name = body.get('new_actor_name')
new_actor_age = body.get('new_actor_age')
actor_by_id.name = new_actor_name
actor_by_id.age = new_actor_age
except ValueError:
try:
if request.get_json().get('new_actor_name'):
body = request.get_json()
new_actor_name = body.get('new_actor_name')
actor_by_id.name = new_actor_name
except ValueError:
try:
if request.get_json().get('new_actor_age'):
body = request.get_json()
new_actor_name = body.get('new_actor_age')
actor_by_id.age = new_actor_age
except ValueError:
abort(404)
actor_by_id.update()
all_actors = get_all_actors()
return jsonify({
'success': True,
'all_actors': all_actors
})
@app.route('/movies/<int:movie_id>', methods=['DELETE'])
@requires_auth('delete:movies')
def delete_movie_by_id(payload, movie_id):
movie_by_id = Movies.query.filter_by(id=movie_id).first()
if movie_by_id is None:
abort(404)
movie_by_id.delete()
return jsonify({
'success': True,
'deleted': movie_id
})
@app.route('/actors/<int:actor_id>', methods=['DELETE'])
@requires_auth('delete:actors')
def delete_actor_by_id(payload, actor_id):
actor_by_id = Actors.query.filter_by(id=actor_id).first()
if actor_by_id is None:
abort(404)
try:
actor_by_id.delete()
except exc.IntegrityError:
abort(404)
return jsonify({
'success': True,
'deleted': actor_id
})
return app
app = create_app()
if __name__ == '__main__':
app.run()
|
py
|
1a56a2acba4a41b57adbec3f729dec4960aaac55
|
import aiohttp
import asyncio
async def aio_1():
async with aiohttp.ClientSession() as session:
async with session.get('https://www.baidu.com/') as resp:
print(resp.status)
# print(await resp.text())
loop = asyncio.get_event_loop()
loop.run_until_complete(aio_1())
async def aio_2():
async with aiohttp.ClientSession() as session:
async with session.get('https://www.geekdigging.com/') as resp:
print(resp.status)
print(await resp.text())
loop = asyncio.get_event_loop()
loop.run_until_complete(aio_2())
async def aio_3():
timeout = aiohttp.ClientTimeout(total=60)
async with aiohttp.ClientSession(timeout = timeout) as session:
async with session.get('https://www.geekdigging.com/', timeout = timeout) as resp:
print(resp.status)
loop = asyncio.get_event_loop()
loop.run_until_complete(aio_3())
|
py
|
1a56a39a1edc972654e287b4cab07c5a4c0e7ff5
|
import os
import pytest
from pyinsights.cli import run
CONFIG_FILEPATH_FOR_TEST = os.getenv('CONFIG_FILEPATH_FOR_TEST')
PROFILE_FOR_TEST = os.getenv('PROFILE_FOR_TEST')
REGION_FOR_TEST = os.getenv('REGION_FOR_TEST')
@pytest.mark.skipif(
CONFIG_FILEPATH_FOR_TEST is None,
reason='Use AWS Resource'
)
class TestPyInsights:
@pytest.fixture()
def kwargs(self):
return {
'profile': PROFILE_FOR_TEST,
'region': REGION_FOR_TEST,
'config': CONFIG_FILEPATH_FOR_TEST
}
def test_valid_kwargs_with_json_format(self, kwargs):
kwargs['format'] = 'json'
result = run(kwargs)
assert result is True
def test_valid_kwargs_with_table_format(self, kwargs):
kwargs['format'] = 'table'
result = run(kwargs)
assert result is True
|
py
|
1a56a40df8faca452f7d5d093ad70324d47ce499
|
"""EESG.py
Created by Latha Sethuraman, Katherine Dykes.
Copyright (c) NREL. All rights reserved.
Electromagnetic design based on conventional magnetic circuit laws
Structural design based on McDonald's thesis """
from openmdao.api import Group, Problem, ExplicitComponent,ExecComp,IndepVarComp,ScipyOptimizeDriver
import numpy as np
from math import pi, cos, sqrt, radians, sin, exp, log10, log, tan, atan
import sys, os
class EESG(ExplicitComponent):
""" Estimates overall mass dimensions and Efficiency of Electrically Excited Synchronous generator. """
def setup(self):
# EESG generator design inputs
#self.add_input('r_s', val=0.0, units ='m', desc='airgap radius r_s')
self.add_input('rad_ag', val=0.0, units ='m', desc='airgap radius')
self.add_input('l_s', val=0.0, units ='m', desc='Stator core length l_s')
self.add_input('h_s', val=0.0, units ='m', desc='Yoke height h_s')
self.add_input('tau_p',val=0.0, units ='m', desc='Pole pitch self.tau_p')
self.add_input('machine_rating',val=0.0, units ='W', desc='Machine rating')
self.add_input('n_nom',val=0.0, units ='rpm', desc='rated speed')
self.add_input('Torque',val=0.0, units ='N*m', desc='Rated torque ')
self.add_input('I_f',val=0.0000,units='A',desc='Excitation current')
self.add_input('N_f',val=0.0,units='A',desc='field turns')
self.add_input('h_ys',val=0.0, units ='m', desc='Yoke height')
self.add_input('h_yr',val=0.0, units ='m', desc='rotor yoke height')
# structural design variables
self.add_input('n_s' ,val=0.0, desc='number of stator arms n_s')
self.add_input('b_st' , val=0.0, units ='m', desc='arm width b_st')
self.add_input('d_s',val=0.0,units ='m', desc='arm depth d_s')
self.add_input('t_ws' ,val=0.0,units ='m', desc='arm depth thickness self.t_wr')
self.add_input('n_r' ,val=0.0, desc='number of arms n')
self.add_input('b_r' ,val=0.0,units ='m', desc='arm width b_r')
self.add_input('d_r' ,val=0.0, units ='m', desc='arm depth d_r')
self.add_input('t_wr' ,val=0.0, units ='m', desc='arm depth thickness self.t_wr')
self.add_input('R_o',val=0.0, units ='m',desc='Shaft radius')
# EESG generator design outputs
# Magnetic loading
self.add_output('B_symax' ,val=0.0, desc='Peak Stator Yoke flux density B_ymax')
self.add_output('B_tmax',val=0.0, desc='Peak Teeth flux density')
self.add_output('B_rymax',val=0.0, desc='Peak Rotor yoke flux density')
self.add_output('B_gfm',val=0.0, desc='Average air gap flux density B_g')
self.add_output('B_g' ,val=0.0, desc='Peak air gap flux density B_g')
self.add_output('B_pc',val=0.0, desc='Pole core flux density')
# Stator design
self.add_output('N_s' ,val=0.0, desc='Number of turns in the stator winding')
self.add_output('b_s',val=0.0, desc='slot width')
self.add_output('b_t',val=0.0, desc='tooth width')
self.add_output('A_Cuscalc',val=0.0, desc='Conductor cross-section mm^2')
self.add_output('S',val=0.0, desc='Stator slots')
# # Output parameters : Rotor design
self.add_output('h_p',val=0.0, desc='Pole height')
self.add_output('b_p',val=0.0, desc='Pole width')
self.add_output('p',val=0.0, desc='No of pole pairs')
self.add_output('n_brushes',val=0.0, desc='number of brushes')
self.add_output('A_Curcalc',val=0.0, desc='Rotor Conductor cross-section')
# Output parameters : Electrical performance
self.add_output('E_s',val=0.0, desc='Stator phase voltage')
self.add_output('f',val=0.0, desc='Generator output frequency')
self.add_output('I_s',val=0.0, desc='Generator output phase current')
self.add_output('R_s',val=0.0, desc='Stator resistance')
self.add_output('R_r',val=0.0, desc='Rotor resistance')
self.add_output('L_m',val=0.0, desc='Stator synchronising inductance')
self.add_output('J_s',val=0.0, desc='Stator Current density')
self.add_output('J_f',val=0.0, desc='rotor Current density')
self.add_output('A_1',val=0.0, desc='Specific current loading')
self.add_output('Load_mmf_ratio',val=0.0, desc='mmf_ratio')
# Objective functions and output
self.add_output('Mass',val=0.0, desc='Actual mass')
self.add_output('K_rad',val=0.0, desc='K_rad')
self.add_output('Losses',val=0.0, desc='Total loss')
self.add_output('gen_eff',val=0.0, desc='Generator efficiency')
# Structural performance
self.add_output('u_Ar',val=0.0, desc='Rotor radial deflection')
self.add_output('y_Ar',val=0.0, desc='Rotor axial deflection')
self.add_output('z_A_r',val=0.0, desc='Rotor circumferential deflection')
self.add_output('u_As',val=0.0, desc='Stator radial deflection')
self.add_output('y_As',val=0.0, desc='Stator axial deflection')
self.add_output('z_A_s',val=0.0, desc='Stator circumferential deflection')
self.add_output('u_all_r',val=0.0, desc='Allowable radial rotor')
self.add_output('u_all_s',val=0.0, desc='Allowable radial stator')
self.add_output('y_all',val=0.0, desc='Allowable axial')
self.add_output('z_all_s',val=0.0, desc='Allowable circum stator')
self.add_output('z_all_r',val=0.0, desc='Allowable circum rotor')
self.add_output('b_all_s',val=0.0, desc='Allowable arm')
self.add_output('b_all_r',val=0.0, desc='Allowable arm dimensions')
self.add_output('TC1',val=0.0, desc='Torque constraint')
self.add_output('TC2',val=0.0, desc='Torque constraint-rotor')
self.add_output('TC3',val=0.0, desc='Torque constraint-stator')
# Material properties
self.add_input('rho_Fes',val=0.0,units='kg*m**-3', desc='Structural Steel density ')
self.add_input('rho_Fe',val=0.0,units='kg*m**-3', desc='Magnetic Steel density ')
self.add_input('rho_Copper',val=0.0,units='kg*m**-3', desc='Copper density ')
# Mass Outputs
self.add_output('Copper', val=0.0, units='kg', desc='Copper Mass')
self.add_output('Iron', val=0.0, units='kg', desc='Electrical Steel Mass')
self.add_output('Structural_mass', val=0.0, units='kg', desc='Structural Mass')
# Other parameters
self.add_output('Power_ratio',val=0.0, desc='Power_ratio')
self.add_output('Slot_aspect_ratio',val=0.0,desc='Stator slot aspect ratio')
self.add_output('R_out',val=0.0, desc='Outer radius')
# inputs/outputs for interface with drivese
self.add_input('shaft_cm',val= np.zeros(3), units='m', desc='Main Shaft CM')
self.add_input('shaft_length',val=0.0, units='m', desc='main shaft length')
self.add_output('I',val=np.zeros(3),desc='Moments of Inertia for the component [Ixx, Iyy, Izz] around its center of mass')
self.add_output('cm', val=np.zeros(3),desc='COM [x,y,z]')
#self.declare_partials('*', '*', method='fd', form='central', step=1e-6)
def compute(self, inputs, outputs):
# Unpack outputs
rad_ag = inputs['rad_ag']
l_s = inputs['l_s']
h_s = inputs['h_s']
tau_p = inputs['tau_p']
N_f = inputs['N_f']
I_f = inputs['I_f']
h_ys = inputs['h_ys']
h_yr = inputs['h_yr']
machine_rating = inputs['machine_rating']
n_nom = inputs['n_nom']
Torque = inputs['Torque']
b_st = inputs['b_st']
d_s = inputs['d_s']
t_ws = inputs['t_ws']
n_r = inputs['n_r']
n_s = inputs['n_s']
b_r = inputs['b_r']
d_r = inputs['d_r']
t_wr = inputs['t_wr']
R_o = inputs['R_o']
rho_Fe = inputs['rho_Fe']
rho_Copper = inputs['rho_Copper']
rho_Fes = inputs['rho_Fes']
shaft_cm = inputs['shaft_cm']
shaft_length = inputs['shaft_length']
# Assign values to universal constants
g1 = 9.81 # m / s^2 acceleration due to gravity
E = 2e11 # N / m^2 young's modulus
sigma = 48.373e3 # shear stress
mu_0 = pi * 4e-7 # permeability of free space
phi = radians(90)
# Assign values to design constants
h_w = 0.005
b_so = 0.004 # Stator slot opening
m = 3 # number of phases
q1 = 2 # no of stator slots per pole per phase
b_s_tau_s = 0.45 # ratio of slot width to slot pitch
k_sfil = 0.65 # Slot fill factor (not used)
P_Fe0h = 4 # specific hysteresis losses W / kg @ 1.5 T @50 Hz
P_Fe0e = 1 # specific eddy losses W / kg @ 1.5 T @50 Hz
rho_Cu = 1.8e-8 * 1.4 # resisitivity of copper
k_fes = 0.9 # iron fill factor (not used)
y_tau_p = 1 # coil span / pole pitch fullpitch
k_fillr = 0.7 # rotor slot fill factor
k_s = 0.2 # magnetic saturation factor for iron
T = Torque
cos_phi = 0.85 # power factor
# back iron thickness for rotor and stator
t_s = h_ys
t = h_yr
# Aspect ratio
K_rad = l_s / (2 * rad_ag)
###################################################### Electromagnetic design#############################################
alpha_p = pi / 2 * .7 # (not used)
dia = 2 * rad_ag # air gap diameter
# air gap length and minimum values
g = 0.001 * dia
if(g < 0.005):
g = 0.005
r_r = rad_ag - g # rotor radius
d_se = dia + 2 * h_s + 2 * h_ys # stator outer diameter (not used)
p = np.round(pi * dia / (2 * tau_p)) # number of pole pairs
S = 2 * p*q1 * m # number of slots of stator phase winding
N_conductors = S * 2
N_s = N_conductors / 2/3 # Stator turns per phase
alpha = 180 / S/p # electrical angle (not used)
tau_s = pi * dia / S # slot pitch
h_ps = 0.1 * tau_p # height of pole shoe
b_pc = 0.4 * tau_p # width of pole core
h_pc = 0.6 * tau_p # height of pole core
h_p = 0.7 * tau_p # pole height
b_p = h_p
b_s = tau_s * b_s_tau_s # slot width
Slot_aspect_ratio = h_s / b_s
b_t = tau_s - b_s # tooth width
# Calculating carter factor and effective air gap
g_a = g
K_C1 = (tau_s + 10 * g_a) / (tau_s - b_s + 10 * g_a) # salient pole rotor
g_1 = K_C1 * g
# calculating angular frequency
om_m = 2 * pi * n_nom / 60
om_e = 60
f = n_nom * p / 60
# Slot fill factor according to air gap radius
if (2 * rad_ag>2):
K_fills = 0.65
else:
K_fills = 0.4
# Calculating Stator winding factor
k_y1 = sin(y_tau_p * pi / 2) # chording factor
k_q1 = sin(pi / 6) / q1 / sin(pi / 6 / q1) # winding zone factor
k_wd = k_y1 * k_q1
# Calculating stator winding conductor length, cross-section and resistance
shortpitch = 0
l_Cus = 2 * N_s * (2 * (tau_p - shortpitch / m/q1) + l_s) # length of winding
A_s = b_s * (h_s - h_w)
A_scalc = b_s * 1000 * (h_s - h_w) * 1000 # cross section in mm^2
A_Cus = A_s * q1 * p * K_fills / N_s
A_Cuscalc = A_scalc * q1 * p * K_fills / N_s
R_s = l_Cus * rho_Cu / A_Cus
# field winding design, conductor lenght, cross-section and resistance
N_f = np.round(N_f) # rounding the field winding turns to the nearest integer
I_srated = machine_rating / (sqrt(3) * 5000 * cos_phi)
l_pole = l_s - 0.05 + 0.120 # 50mm smaller than stator and 120mm longer to accommodate end stack
K_fe = 0.95
l_pfe = l_pole * K_fe
l_Cur = 4 * p*N_f * (l_pfe + b_pc + pi / 4 * (pi * (r_r - h_pc - h_ps) / p - b_pc))
A_Cur = k_fillr * h_pc * 0.5 / N_f * (pi * (r_r - h_pc - h_ps) / p - b_pc)
A_Curcalc = k_fillr * h_pc * 1000 * 0.5 / N_f * (pi * (r_r - h_pc - h_ps) * 1000 / p - b_pc * 1000)
Slot_Area = A_Cur * 2 * N_f / k_fillr # (not used)
R_r = rho_Cu * l_Cur / A_Cur
# field winding current density
J_f = I_f / A_Curcalc
# calculating air flux density
B_gfm = mu_0 * N_f * I_f / (g_1 * (1 + k_s)) # No load air gap flux density
B_g = B_gfm * 4*sin(0.5 * b_p * pi / tau_p) / pi # fundamental component
B_symax = tau_p * B_g / pi / h_ys # stator yoke flux density
L_fg = 2 * mu_0 * p*l_s * 4*N_f**2 * ((h_ps / (tau_p - b_p)) + (h_pc / (3 * pi * (r_r - h_pc - h_ps) / p - b_pc))) # (not used)
# calculating no-load voltage and stator current
E_s = 2 * N_s * l_s * rad_ag * k_wd * om_m * B_g / sqrt(2) # no-load voltage
I_s = (E_s - (E_s**2 - 4 * R_s * machine_rating / m)**0.5) / (2 * R_s)
# Calculating stator winding current density and specific current loading
A_1 = 6 * N_s * I_s / (pi * dia)
J_s = I_s / A_Cuscalc
# Calculating magnetic loading in other parts of the machine
delta_m = 0 # Initialising load angle
# peak flux density in pole core, rotor yoke and stator teeth
B_pc = (1 / b_pc) * ((2 * tau_p / pi) * B_g * cos(delta_m) + (2 * mu_0 * I_f * N_f * ((2 * h_ps / (tau_p - b_p)) + (h_pc / (tau_p - b_pc)))))
B_rymax = 0.5 * b_pc * B_pc / h_yr
B_tmax = (B_gfm + B_g) * tau_s * 0.5 / b_t
# Calculating leakage inductances in the stator
L_ssigmas = 2 * mu_0 * l_s * N_s**2 / p / q1 * ((h_s - h_w) / (3 * b_s) + h_w / b_so) # slot leakage inductance
L_ssigmaew = mu_0 * 1.2 * N_s**2 / p * 1.2 * (2 / 3 * tau_p + 0.01) # end winding leakage inductance
L_ssigmag = 2 * mu_0 * l_s * N_s**2 / p / q1 * (5 * (g / b_so) / (5 + 4 * (g / b_so))) # tooth tip leakage inductance
L_ssigma = (L_ssigmas + L_ssigmaew + L_ssigmag) # stator leakage inductance
# Calculating effective air gap
'''
What is the source of this function that combines 1st and 13th powers? Very suspicious...
Inputs appear to be in the range of 0.45 to 2.2, so outputs are 180 to 178000
def airGapFn(B, fact):
val = 400 * B + 7 * B**13
ans = val * fact
sys.stderr.write('aGF: B {} val {} ans {}\n'.format(B, val, ans))
return val
At_t = h_s * airGapFn(B_tmax, h_s)
At_sy = tau_p * 0.5 * airGapFn(B_symax, tau_p/2)
At_pc = (h_pc + h_ps) * airGapFn(B_pc, h_pc + h_ps)
At_ry = tau_p * 0.5 * airGapFn(B_rymax, tau_p/2)
'''
At_g = g_1 * B_gfm / mu_0
At_t = h_s * (400 * B_tmax + 7 * B_tmax**13)
At_sy = tau_p * 0.5 * (400 * B_symax + 7 * B_symax**13)
At_pc = (h_pc + h_ps) * (400 * B_pc + 7 * B_pc**13)
At_ry = tau_p * 0.5 * (400 * B_rymax + 7 * B_rymax**13)
g_eff = (At_g + At_t + At_sy + At_pc + At_ry) * g_1 / At_g
L_m = 6 * k_wd**2 * N_s**2 * mu_0 * rad_ag * l_s / pi / g_eff / p**2
B_r1 = (mu_0 * I_f * N_f * 4 * sin(0.5 * (b_p / tau_p) * pi)) / g_eff / pi # (not used)
# Calculating direct axis and quadrature axes inductances
L_dm = (b_p / tau_p +(1 / pi) * sin(pi * b_p / tau_p)) * L_m
L_qm = (b_p / tau_p -(1 / pi) * sin(pi * b_p / tau_p) + 2 / (3 * pi) * cos(b_p * pi / 2 * tau_p)) * L_m
# Calculating actual load angle
delta_m = (atan(om_e * L_qm * I_s / E_s))
L_d = L_dm + L_ssigma # (not used)
L_q = L_qm + L_ssigma # (not used)
I_sd = I_s * sin(delta_m)
I_sq = I_s * cos(delta_m)
# induced voltage
E_p = om_e * L_dm * I_sd + sqrt(E_s**2 - (om_e * L_qm * I_sq)**2) # (not used)
# M_sf = mu_0 * 8*rad_ag * l_s * k_wd * N_s * N_f * sin(0.5 * b_p / tau_p * pi) / (p * g_eff * pi)
# I_f1 = sqrt(2) * (E_p) / (om_e * M_sf)
# I_f2 = (E_p / E_s) * B_g * g_eff * pi / (4 * N_f * mu_0 * sin(pi * b_p / 2/tau_p))
# phi_max_stator = k_wd * N_s * pi * rad_ag * l_s * 2*mu_0 * N_f * I_f * 4*sin(0.5 * b_p / tau_p / pi) / (p * pi * g_eff * pi)
# M_sf = mu_0 * 8*rad_ag * l_s * k_wd * N_s * N_f * sin(0.5 * b_p / tau_p / pi) / (p * g_eff * pi)
L_tot = l_s + 2 * tau_p
# Excitation power
V_fn = 500
Power_excitation = V_fn * 2*I_f # total rated power in excitation winding
Power_ratio = Power_excitation * 100 / machine_rating
# Calculating Electromagnetically Active mass
L_tot = l_s + 2 * tau_p # (not used)
V_Cuss = m * l_Cus * A_Cus # volume of copper in stator
V_Cusr = l_Cur * A_Cur # volume of copper in rotor
V_Fest = (l_s * pi * ((rad_ag + h_s)**2 - rad_ag**2) - 2 * m*q1 * p*b_s * h_s * l_s) # volume of iron in stator tooth
V_Fesy = l_s * pi * ((rad_ag + h_s + h_ys)**2 - (rad_ag + h_s)**2) # volume of iron in stator yoke
V_Fert = 2 * p*l_pfe * (h_pc * b_pc + b_p * h_ps) # volume of iron in rotor pole
V_Fery = l_pfe * pi * ((r_r - h_ps - h_pc)**2 - (r_r - h_ps - h_pc - h_yr)**2) # volume of iron in rotor yoke
Copper = (V_Cuss + V_Cusr) * rho_Copper
M_Fest = V_Fest * rho_Fe
M_Fesy = V_Fesy * rho_Fe
M_Fert = V_Fert * rho_Fe
M_Fery = V_Fery * rho_Fe
Iron = M_Fest + M_Fesy + M_Fert + M_Fery
I_snom = machine_rating / (3 * E_s * cos_phi)
## Optional## Calculating mmf ratio
F_1no_load = 3 * 2**0.5 * N_s * k_wd * I_s / (pi * p) # (not used)
Nf_If_no_load = N_f * I_f
F_1_rated = (3 * 2**0.5 * N_s * k_wd * I_srated) / (pi * p)
Nf_If_rated = 2 * Nf_If_no_load
Load_mmf_ratio = Nf_If_rated / F_1_rated
## Calculating losses
#1. Copper losses
K_R = 1.2
P_Cuss = m * I_snom**2 * R_s * K_R
P_Cusr = I_f**2 * R_r
P_Cusnom_total = P_Cuss + P_Cusr
#2. Iron losses ( Hysteresis and Eddy currents)
P_Hyys = M_Fesy * (B_symax / 1.5)**2 * (P_Fe0h * om_e / (2 * pi * 60)) # Hysteresis losses in stator yoke
P_Ftys = M_Fesy * (B_symax / 1.5)**2 * (P_Fe0e * (om_e / (2 * pi * 60))**2) # Eddy losses in stator yoke
P_Fesynom = P_Hyys + P_Ftys
P_Hyd = M_Fest * (B_tmax / 1.5)**2 * (P_Fe0h * om_e / (2 * pi * 60)) # Hysteresis losses in stator teeth
P_Ftd = M_Fest * (B_tmax / 1.5)**2 * (P_Fe0e * (om_e / (2 * pi * 60))**2) # Eddy losses in stator teeth
P_Festnom = P_Hyd + P_Ftd
# brushes
delta_v = 1
n_brushes = (I_f * 2 / 120)
if (n_brushes<0.5):
n_brushes = 1
else:
n_brushes = np.round(n_brushes)
#3. brush losses
p_b = 2 * delta_v * (I_f)
Losses = P_Cusnom_total + P_Festnom + P_Fesynom + p_b
gen_eff = machine_rating * 100 / (Losses + machine_rating)
################################################## Structural Design ########################################################
## Structural deflection calculations
# rotor structure
q3 = B_g**2 / 2/mu_0 # normal component of Maxwell's stress
#l = l_s # l - stator core length - now using l_s everywhere
l_b = 2 * tau_p # end winding length # (not used)
l_e = l_s + 2 * 0.001 * rad_ag # equivalent core length # (not used)
a_r = (b_r * d_r) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)) # cross-sectional area of rotor armms
A_r = l_s * t # cross-sectional area of rotor cylinder
N_r = np.round(n_r)
theta_r = pi / N_r # half angle between spokes
I_r = l_s * t**3 / 12 # second moment of area of rotor cylinder
I_arm_axi_r = ((b_r * d_r**3) - ((b_r - 2 * t_wr) * (d_r - 2 * t_wr)**3)) / 12 # second moment of area of rotor arm
I_arm_tor_r = ((d_r * b_r**3) - ((d_r - 2 * t_wr) * (b_r - 2 * t_wr)**3)) / 12 # second moment of area of rotot arm w.r.t torsion
R = r_r - h_ps - h_pc - 0.5 * h_yr
R_1 = R - h_yr * 0.5 # inner radius of rotor cylinder
k_1 = sqrt(I_r / A_r) # radius of gyration
m1 = (k_1 / R)**2
c = R / 500 # (not used)
u_all_r = R / 10000 # allowable radial deflection
b_all_r = 2 * pi * R_o / N_r # allowable circumferential arm dimension
# Calculating radial deflection of rotor structure according to Mc Donald's
Numer = R**3 * ((0.25 * (sin(theta_r) - (theta_r * cos(theta_r))) / (sin(theta_r))**2) - (0.5 / sin(theta_r)) + (0.5 / theta_r))
Pov = ((theta_r / (sin(theta_r))**2) + 1 / tan(theta_r)) * ((0.25 * R / A_r) + (0.25 * R**3 / I_r))
Qov = R**3 / (2 * I_r * theta_r * (m1 + 1))
Lov = (R_1 - R_o) / a_r
Denom = I_r * (Pov - Qov + Lov) # radial deflection % rotor
u_Ar = (q3 * R**2 / E / h_yr) * (1 + Numer / Denom)
# Calculating axial deflection of rotor structure
w_r = rho_Fes * g1 * sin(phi) * a_r * N_r
mass_st_lam = rho_Fe * 2*pi * (R + 0.5 * h_yr) * l_s * h_yr # mass of rotor yoke steel
W = g1 * sin(phi) * (mass_st_lam + (V_Cusr * rho_Copper) + M_Fert) / N_r # weight of rotor cylinder
l_ir = R # length of rotor arm beam at which rotor cylinder acts
l_iir = R_1
y_Ar = (W * l_ir**3 / 12 / E / I_arm_axi_r) + (w_r * l_iir**4 / 24 / E / I_arm_axi_r) # axial deflection
# Calculating torsional deflection of rotor structure
z_all_r = radians(0.05 * R) # allowable torsional deflection
z_A_r = (2 * pi * (R - 0.5 * h_yr) * l_s / N_r) * sigma * (l_ir - 0.5 * h_yr)**3 / 3 / E / I_arm_tor_r # circumferential deflection
# STATOR structure
A_st = l_s * t_s
a_s = (b_st * d_s) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws))
N_st = np.round(n_s)
theta_s = pi / N_st
I_st = l_s * t_s**3 / 12
I_arm_axi_s = ((b_st * d_s**3) - ((b_st - 2 * t_ws) * (d_s - 2 * t_ws)**3)) / 12 # second moment of area of stator arm
I_arm_tor_s = ((d_s * b_st**3) - ((d_s - 2 * t_ws) * (b_st - 2 * t_ws)**3)) / 12 # second moment of area of rotot arm w.r.t torsion
R_st = rad_ag + h_s + h_ys * 0.5
R_1s = R_st - h_ys * 0.5
k_2 = sqrt(I_st / A_st)
m2 = (k_2 / R_st)**2
# allowable deflections
b_all_s = 2 * pi * R_o / N_st
u_all_s = R_st / 10000
y_all = 2 * l_s / 100 # allowable axial deflection
z_all_s = radians(0.05 * R_st) # allowable torsional deflection
# Calculating radial deflection according to McDonald's
Numers = R_st**3 * ((0.25 * (sin(theta_s) - (theta_s * cos(theta_s))) / (sin(theta_s))**2) - (0.5 / sin(theta_s)) + (0.5 / theta_s))
Povs = ((theta_s / (sin(theta_s))**2) + 1 / tan(theta_s)) * ((0.25 * R_st / A_st) + (0.25 * R_st**3 / I_st))
Qovs = R_st**3 / (2 * I_st * theta_s * (m2 + 1))
Lovs = (R_1s - R_o) * 0.5 / a_s
Denoms = I_st * (Povs - Qovs + Lovs)
R_out = (R / 0.995 + h_s + h_ys)
u_As = (q3 * R_st**2 / E / t_s) * (1 + Numers / Denoms)
# Calculating axial deflection according to McDonald
l_is = R_st - R_o
l_iis = l_is
l_iiis = l_is # length of rotor arm beam at which self-weight acts
mass_st_lam_s = M_Fest + pi * l_s * rho_Fe * ((R_st + 0.5 * h_ys)**2 - (R_st - 0.5 * h_ys)**2)
W_is = g1 * sin(phi) * (rho_Fes * l_s * d_s**2 * 0.5) # weight of rotor cylinder
W_iis = g1 * sin(phi) * (V_Cuss * rho_Copper + mass_st_lam_s) / 2/N_st
w_s = rho_Fes * g1 * sin(phi) * a_s * N_st
X_comp1 = W_is * l_is**3 / 12 / E / I_arm_axi_s
X_comp2 = W_iis * l_iis**4 / 24 / E / I_arm_axi_s
X_comp3 = w_s * l_iiis**4 / 24 / E / I_arm_axi_s
y_As = X_comp1 + X_comp2 + X_comp3 # axial deflection
# Calculating torsional deflection
z_A_s = 2 * pi * (R_st + 0.5 * t_s) * l_s / (2 * N_st) * sigma * (l_is + 0.5 * t_s)**3 / 3 / E / I_arm_tor_s
# tangential stress constraints
TC1 = T / (2 * pi * sigma)
TC2 = R**2 * l_s
TC3 = R_st**2 * l_s
mass_stru_steel = 2 * (N_st * (R_1s - R_o) * a_s * rho_Fes)
# Calculating inactive mass and total mass
Structural_mass = mass_stru_steel + (N_r * (R_1 - R_o) * a_r * rho_Fes)
Mass = Copper + Iron + Structural_mass
I = np.zeros(3)
# Calculating mass moments of inertia and center of mass
I[0] = (0.50 * Mass*R_out**2)
I[1] = (0.25 * Mass*R_out**2 + Mass * l_s**2 / 12)
I[2] = I[1]
cm = np.zeros(3)
cm[0] = shaft_cm[0] + shaft_length / 2. + l_s / 2
cm[1] = shaft_cm[1]
cm[2] = shaft_cm[2]
outputs['B_symax'] = B_symax
outputs['B_tmax'] = B_tmax
outputs['B_rymax'] = B_rymax
outputs['B_gfm'] = B_gfm
outputs['B_g'] = B_g
outputs['B_pc'] = B_pc
outputs['N_s'] = N_s
outputs['b_s'] = b_s
outputs['b_t'] = b_t
outputs['A_Cuscalc'] = A_Cuscalc
outputs['A_Curcalc'] = A_Curcalc
outputs['b_p'] = b_p
outputs['h_p'] = h_p
outputs['p'] = p
outputs['E_s'] = E_s
outputs['f'] = f
outputs['I_s'] = I_s
outputs['R_s'] = R_s
outputs['L_m'] = L_m
outputs['A_1'] = A_1
outputs['J_s'] = J_s
outputs['R_r'] = R_r
outputs['Losses'] = Losses
outputs['Load_mmf_ratio'] = Load_mmf_ratio
outputs['Power_ratio'] = Power_ratio
outputs['n_brushes'] = n_brushes
outputs['J_f'] = J_f
outputs['K_rad'] = K_rad
outputs['gen_eff'] = gen_eff
outputs['S'] = S
outputs['Slot_aspect_ratio'] = Slot_aspect_ratio
outputs['Copper'] = Copper
outputs['Iron'] = Iron
outputs['u_Ar'] = u_Ar
outputs['y_Ar'] = y_Ar
outputs['z_A_r'] = z_A_r
outputs['u_As'] = u_As
outputs['y_As'] = y_As
outputs['z_A_s'] = z_A_s
outputs['u_all_r'] = u_all_r
outputs['u_all_s'] = u_all_s
outputs['y_all'] = y_all
outputs['z_all_s'] = z_all_s
outputs['z_all_r'] = z_all_r
outputs['b_all_s'] = b_all_s
outputs['b_all_r'] = b_all_r
outputs['TC1'] = TC1
outputs['TC2'] = TC2
outputs['TC3'] = TC3
outputs['R_out'] = R_out
outputs['Structural_mass'] = Structural_mass
outputs['Mass'] = Mass
outputs['cm'] = cm
outputs['I'] = I
|
py
|
1a56a4d7b99ebbb63e621dc55aafffca9608e2f5
|
from jinja2 import Environment, PackageLoader
templates = {
'drawing': 'drawing.xml',
'hyperlink': 'hyperlink.xml',
'insert': 'insert.xml',
'main': 'base.xml',
'p': 'p.xml',
'pict': 'pict.xml',
'r': 'r.xml',
'sectPr': 'sectPr.xml',
'smartTag': 'smart_tag.xml',
'style': 'style.xml',
'styles': 'styles.xml',
'table': 'table.xml',
'tc': 'tc.xml',
'tr': 'tr.xml',
}
env = Environment(
loader=PackageLoader(
'docx2html.tests',
'templates',
),
)
class DocxBuilder(object):
@classmethod
def xml(self, body):
template = env.get_template(templates['main'])
return template.render(body=body)
@classmethod
def p_tag(self, text, bold=False):
if isinstance(text, str):
# Use create a single r tag based on the text and the bold
run_tag = DocxBuilder.r_tag(text, bold)
run_tags = [run_tag]
elif isinstance(text, list):
run_tags = text
else:
run_tags = [self.r_tag(None)]
template = env.get_template(templates['p'])
kwargs = {
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def r_tag(self, text, is_bold=False, include_linebreak=False):
template = env.get_template(templates['r'])
kwargs = {
'include_linebreak': include_linebreak,
'text': text,
'is_bold': is_bold,
}
return template.render(**kwargs)
@classmethod
def hyperlink_tag(self, r_id, run_tags):
template = env.get_template(templates['hyperlink'])
kwargs = {
'r_id': r_id,
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def insert_tag(self, run_tags):
template = env.get_template(templates['insert'])
kwargs = {
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def smart_tag(self, run_tags):
template = env.get_template(templates['smartTag'])
kwargs = {
'run_tags': run_tags,
}
return template.render(**kwargs)
@classmethod
def li(self, text, ilvl, numId, bold=False):
if isinstance(text, str):
# Use create a single r tag based on the text and the bold
run_tag = DocxBuilder.r_tag(text, bold)
run_tags = [run_tag]
elif isinstance(text, list):
run_tags = []
for run_text, run_bold in text:
run_tags.append(DocxBuilder.r_tag(run_tags, run_bold))
else:
raise AssertionError('text must be a string or a list')
template = env.get_template(templates['p'])
kwargs = {
'run_tags': run_tags,
'is_list': True,
'ilvl': ilvl,
'numId': numId,
}
return template.render(**kwargs)
@classmethod
def table(self, num_rows, num_columns, text):
def _tc(cell_value):
template = env.get_template(templates['tc'])
return template.render(p_tag=cell_value)
def _tr(rows, text):
tcs = [_tc(next(text)) for _ in range(rows)]
template = env.get_template(templates['tr'])
return template.render(table_cells=tcs)
trs = [_tr(num_rows, text) for _ in range(num_rows)]
template = env.get_template(templates['table'])
return template.render(table_rows=trs)
@classmethod
def drawing(self, r_id):
template = env.get_template(templates['drawing'])
return template.render(r_id=r_id)
@classmethod
def pict(self, r_id=None):
template = env.get_template(templates['pict'])
return template.render(r_id=r_id)
@classmethod
def sectPr_tag(self, p_tag):
template = env.get_template(templates['sectPr'])
kwargs = {
'p_tag': p_tag,
}
return template.render(**kwargs)
@classmethod
def styles_xml(self, style_tags):
template = env.get_template(templates['styles'])
kwargs = {
'style_tags': style_tags,
}
return template.render(**kwargs)
@classmethod
def style(self, style_id, value):
template = env.get_template(templates['style'])
kwargs = {
'style_id': style_id,
'value': value,
}
return template.render(**kwargs)
|
py
|
1a56a61c69e8b5db8b4b547a810faef14ce09866
|
name = "ktis_parser"
|
py
|
1a56a85c4c46eff0246c8226a5dc87ad3e987d0b
|
from discord.ext import commands
from cassiopeia import riotapi
config: dict = {}
def init(bot: commands.Bot, cfg: dict):
global config
config = cfg[__name__]
riotapi.set_region(config["api_region"])
riotapi.set_api_key(config["api_key"])
from .trivia import LoLTrivia
bot.add_cog(LoLTrivia(bot))
|
py
|
1a56a8b4c9d5e98a36c7a50205c9caccdb61f26e
|
from JumpScale9Portal.portal import exceptions
import re
INT = r"""(?:[+-]?(?:[0-9]+))"""
BASE10NUM = r"""(?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)))"""
NUMBER = r"""(?<![0-9.+-])(?>[+-]?(?:(?:[0-9]+(?:\.[0-9]+)?)|(?:\.[0-9]+)))"""
BASE16NUM = r"""(?<![0-9A-Fa-f])(?:[+-]?(?:0x)?(?:[0-9A-Fa-f]+))"""
BASE16FLOAT = r"""\b(?<![0-9A-Fa-f.])(?:[+-]?(?:0x)?(?:(?:[0-9A-Fa-f]+(?:\.[0-9A-Fa-f]*)?)|(?:\.[0-9A-Fa-f]+)))\b"""
POSINT = r"""\b(?:[1-9][0-9]*)\b"""
NONNEGINT = r"""\b(?:[0-9]+)\b"""
WORD = r"""\b\w+\b"""
NOTSPACE = r"""\S+"""
SPACE = r"""\s*"""
DATA = r""".*?"""
GREEDYDATA = r""".*"""
QUOTEDSTRING = r"""(?>(?<!\\)(?>"(?>\\.|[^\\"]+)+"|""|(?>'(?>\\.|[^\\']+)+')|''|(?>`(?>\\.|[^\\`]+)+`)|``))"""
UUID = r"""[A-Fa-f0-9]{8}-(?:[A-Fa-f0-9]{4}-){3}[A-Fa-f0-9]{12}"""
def NAME(val):
for i in r"""<>"'""":
if i in val:
raise exceptions.BadRequest('The name you entered contains invalid characters')
if len(val) < 2:
raise exceptions.BadRequest('The name cannot be shorter than two characters')
return True
def IP(val):
return sum([x.isdigit() and 0 <= int(x) <= 255 for x in val.split('.')]) == 4
def PASSWORD(val):
return 8 <= len(val) <= 60
def USERNAME(val):
m = re.match("[a-zA-Z0-9._-]+(?:@[a-zA-Z0-9._-]+)?", val)
if 2 < len(val.split('@')[0]) < 40 and m and m.end() == len(val):
return True
else:
raise exceptions.BadRequest('Usernames can only contain alphanumeric characters, dots, dashes, underscores and should be between 2 and 40 characters')
def GROUPNAME(val):
m = re.match("[a-zA-Z0-9._-]+", val)
if 2 < len(val) < 40 and m and m.end() == len(val):
return True
else:
raise exceptions.BadRequest('Groupnames can only contain alphanumeric characters, dots, dashes, underscores and should be between 2 and 40 characters')
def EMAIL(val):
atpos = val.find('@')
dotpos = val.find('.')
if atpos == -1 or dotpos == -1:
raise exceptions.BadRequest('Invalid Email Address given')
elif dotpos < atpos:
raise exceptions.BadRequest('Invalid Email Address given')
|
py
|
1a56aa14549dae8f1828d11b295b170f7e286360
|
class Camera:
def __init__(self, game):
self.game = game
self.dx = 0
self.dy = 0
def apply(self, obj):
obj.rect.x = self.dx + int(obj.x)
obj.rect.y = self.dy + int(obj.y)
def update(self, target):
self.dx = -(int(target.x) + target.rect.w // 2 - self.game.width // 2)
self.dy = -(int(target.y) + target.rect.h // 2 - self.game.height // 2)
|
py
|
1a56aa8eab660a39e6ca62f43d033af48ce738b5
|
#!/usr/bin/env python
# coding: utf-8
import logging
import os
from timeit import default_timer as timer
import emmental
import torch
from emmental.data import EmmentalDataLoader
from emmental.learner import EmmentalLearner
from emmental.model import EmmentalModel
from fonduer import Meta, init_logging
from fonduer.candidates import CandidateExtractor, MentionExtractor, MentionFigures
from fonduer.candidates.matchers import _Matcher
from fonduer.candidates.models import Mention, candidate_subclass, mention_subclass
from fonduer.parser.models import Document, Figure, Paragraph, Section, Sentence
from PIL import Image
from hack.circular_connectors.augment_policy import Augmentation
from hack.circular_connectors.config import emmental_config
from hack.circular_connectors.scheduler import DauphinScheduler
from hack.circular_connectors.task import create_task
from hack.circular_connectors.thumbnail_dataset import ThumbnailDataset
from hack.utils import parse_dataset
# Configure logging for Fonduer
logger = logging.getLogger(__name__)
TRUE = 1
FALSE = 0
torch.backends.cudnn.deterministic = True # type: ignore
torch.backends.cudnn.benchmark = False # type: ignore
def main(
conn_string,
max_docs=float("inf"),
parse=False,
first_time=False,
gpu=None,
parallel=4,
log_dir=None,
verbose=False,
):
if not log_dir:
log_dir = "logs"
if verbose:
level = logging.INFO
else:
level = logging.WARNING
dirname = os.path.dirname(os.path.abspath(__file__))
init_logging(log_dir=os.path.join(dirname, log_dir), level=level)
session = Meta.init(conn_string).Session()
# Parsing
logger.info(f"Starting parsing...")
start = timer()
docs, train_docs, dev_docs, test_docs = parse_dataset(
session, dirname, first_time=first_time, parallel=parallel, max_docs=max_docs
)
end = timer()
logger.warning(f"Parse Time (min): {((end - start) / 60.0):.1f}")
logger.info(f"# of train Documents: {len(train_docs)}")
logger.info(f"# of dev Documents: {len(dev_docs)}")
logger.info(f"# of test Documents: {len(test_docs)}")
logger.info(f"Documents: {session.query(Document).count()}")
logger.info(f"Sections: {session.query(Section).count()}")
logger.info(f"Paragraphs: {session.query(Paragraph).count()}")
logger.info(f"Sentences: {session.query(Sentence).count()}")
logger.info(f"Figures: {session.query(Figure).count()}")
start = timer()
Thumbnails = mention_subclass("Thumbnails")
thumbnails_img = MentionFigures()
class HasFigures(_Matcher):
def _f(self, m):
file_path = ""
for prefix in [
f"{dirname}/data/train/html/",
f"{dirname}/data/dev/html/",
f"{dirname}/data/test/html/",
]:
if os.path.exists(prefix + m.figure.url):
file_path = prefix + m.figure.url
if file_path == "":
return False
img = Image.open(file_path)
width, height = img.size
min_value = min(width, height)
return min_value > 50
mention_extractor = MentionExtractor(
session, [Thumbnails], [thumbnails_img], [HasFigures()], parallelism=parallel
)
if first_time:
mention_extractor.apply(docs)
logger.info("Total Mentions: {}".format(session.query(Mention).count()))
ThumbnailLabel = candidate_subclass("ThumbnailLabel", [Thumbnails])
candidate_extractor = CandidateExtractor(
session, [ThumbnailLabel], throttlers=[None], parallelism=parallel
)
if first_time:
candidate_extractor.apply(train_docs, split=0)
candidate_extractor.apply(dev_docs, split=1)
candidate_extractor.apply(test_docs, split=2)
train_cands = candidate_extractor.get_candidates(split=0)
# Sort the dev_cands, which are used for training, for deterministic behavior
dev_cands = candidate_extractor.get_candidates(split=1, sort=True)
test_cands = candidate_extractor.get_candidates(split=2)
end = timer()
logger.warning(f"Candidate Extraction Time (min): {((end - start) / 60.0):.1f}")
logger.info("Total train candidate:\t{}".format(len(train_cands[0])))
logger.info("Total dev candidate:\t{}".format(len(dev_cands[0])))
logger.info("Total test candidate:\t{}".format(len(test_cands[0])))
fin = open(f"{dirname}/data/ground_truth.txt", "r")
gt = set()
for line in fin:
gt.add("::".join(line.lower().split()))
fin.close()
# Labeling
start = timer()
def LF_gt_label(c):
doc_file_id = (
f"{c[0].context.figure.document.name.lower()}.pdf::"
f"{os.path.basename(c[0].context.figure.url.lower())}"
)
return TRUE if doc_file_id in gt else FALSE
gt_dev = [LF_gt_label(cand) for cand in dev_cands[0]]
gt_test = [LF_gt_label(cand) for cand in test_cands[0]]
end = timer()
logger.warning(f"Supervision Time (min): {((end - start) / 60.0):.1f}")
batch_size = 64
input_size = 224
K = 2
emmental.init(log_dir=Meta.log_path, config=emmental_config)
emmental.Meta.config["learner_config"]["task_scheduler_config"][
"task_scheduler"
] = DauphinScheduler(augment_k=K, enlarge=1)
train_dataset = ThumbnailDataset(
"Thumbnail",
dev_cands[0],
gt_dev,
"train",
prob_label=True,
prefix=f"{dirname}/data/dev/html/",
input_size=input_size,
transform_cls=Augmentation(2),
k=K,
)
val_dataset = ThumbnailDataset(
"Thumbnail",
dev_cands[0],
gt_dev,
"valid",
prob_label=False,
prefix=f"{dirname}/data/dev/html/",
input_size=input_size,
k=1,
)
test_dataset = ThumbnailDataset(
"Thumbnail",
test_cands[0],
gt_test,
"test",
prob_label=False,
prefix=f"{dirname}/data/test/html/",
input_size=input_size,
k=1,
)
dataloaders = []
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict={"Thumbnail": "labels"},
dataset=train_dataset,
split="train",
shuffle=True,
batch_size=batch_size,
num_workers=1,
)
)
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict={"Thumbnail": "labels"},
dataset=val_dataset,
split="valid",
shuffle=False,
batch_size=batch_size,
num_workers=1,
)
)
dataloaders.append(
EmmentalDataLoader(
task_to_label_dict={"Thumbnail": "labels"},
dataset=test_dataset,
split="test",
shuffle=False,
batch_size=batch_size,
num_workers=1,
)
)
model = EmmentalModel(name=f"Thumbnail")
model.add_task(
create_task("Thumbnail", n_class=2, model="resnet18", pretrained=True)
)
emmental_learner = EmmentalLearner()
emmental_learner.learn(model, dataloaders)
scores = model.score(dataloaders)
logger.warning("Model Score:")
logger.warning(f"precision: {scores['Thumbnail/Thumbnail/test/precision']:.3f}")
logger.warning(f"recall: {scores['Thumbnail/Thumbnail/test/recall']:.3f}")
logger.warning(f"f1: {scores['Thumbnail/Thumbnail/test/f1']:.3f}")
|
py
|
1a56aadbeef2a04f51a487ee9c2d54abe585e865
|
import os
import unittest
from livestreamer import Livestreamer, PluginError, NoPluginError
from livestreamer.plugins import Plugin
from livestreamer.stream import *
class TestPluginStream(unittest.TestCase):
def setUp(self):
self.session = Livestreamer()
def assertDictHas(self, a, b):
for key, value in a.items():
self.assertEqual(b[key], value)
def _test_akamaihd(self, surl, url):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, AkamaiHDStream))
self.assertEqual(stream.url, url)
def _test_hls(self, surl, url):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, HLSStream))
self.assertEqual(stream.url, url)
def _test_rtmp(self, surl, url, params):
channel = self.session.resolve_url(surl)
streams = channel.get_streams()
self.assertTrue("live" in streams)
stream = streams["live"]
self.assertTrue(isinstance(stream, RTMPStream))
self.assertEqual(stream.params["rtmp"], url)
self.assertDictHas(params, stream.params)
def test_plugin(self):
self._test_rtmp("rtmp://hostname.se/stream",
"rtmp://hostname.se/stream", dict())
self._test_rtmp("rtmp://hostname.se/stream live=1 num=47",
"rtmp://hostname.se/stream", dict(live=True, num=47))
self._test_rtmp("rtmp://hostname.se/stream live=1 qarg='a \'string' noq=test",
"rtmp://hostname.se/stream", dict(live=True, qarg='a \'string', noq="test"))
self._test_hls("hls://http://hostname.se/playlist.m3u8",
"http://hostname.se/playlist.m3u8")
self._test_akamaihd("akamaihd://http://hostname.se/stream",
"http://hostname.se/stream")
if __name__ == "__main__":
unittest.main()
|
py
|
1a56ac966aee308543428f0b4e0bac914bf32422
|
import copy_reg
import unittest
from test import test_support
from test.pickletester import ExtensionSaver
class C:
pass
class WithoutSlots(object):
pass
class WithWeakref(object):
__slots__ = ('__weakref__',)
class WithPrivate(object):
__slots__ = ('__spam',)
class WithSingleString(object):
__slots__ = 'spam'
class WithInherited(WithSingleString):
__slots__ = ('eggs',)
class CopyRegTestCase(unittest.TestCase):
def test_class(self):
self.assertRaises(TypeError, copy_reg.pickle,
C, None, None)
def test_noncallable_reduce(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), "not a callable")
def test_noncallable_constructor(self):
self.assertRaises(TypeError, copy_reg.pickle,
type(1), int, "not a callable")
def test_bool(self):
import copy
self.assertEqual(True, copy.copy(True))
def test_extension_registry(self):
mod, func, code = 'junk1 ', ' junk2', 0xabcd
e = ExtensionSaver(code)
try:
# Shouldn't be in registry now.
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code)
copy_reg.add_extension(mod, func, code)
# Should be in the registry.
self.assertTrue(copy_reg._extension_registry[mod, func] == code)
self.assertTrue(copy_reg._inverted_registry[code] == (mod, func))
# Shouldn't be in the cache.
self.assertNotIn(code, copy_reg._extension_cache)
# Redundant registration should be OK.
copy_reg.add_extension(mod, func, code) # shouldn't blow up
# Conflicting code.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code + 1)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func, code + 1)
# Conflicting module name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod[1:], func, code )
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func, code )
# Conflicting function name.
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func[1:], code)
self.assertRaises(ValueError, copy_reg.remove_extension,
mod, func[1:], code)
# Can't remove one that isn't registered at all.
if code + 1 not in copy_reg._inverted_registry:
self.assertRaises(ValueError, copy_reg.remove_extension,
mod[1:], func[1:], code + 1)
finally:
e.restore()
# Shouldn't be there anymore.
self.assertNotIn((mod, func), copy_reg._extension_registry)
# The code *may* be in copy_reg._extension_registry, though, if
# we happened to pick on a registered code. So don't check for
# that.
# Check valid codes at the limits.
for code in 1, 0x7fffffff:
e = ExtensionSaver(code)
try:
copy_reg.add_extension(mod, func, code)
copy_reg.remove_extension(mod, func, code)
finally:
e.restore()
# Ensure invalid codes blow up.
for code in -1, 0, 0x80000000L:
self.assertRaises(ValueError, copy_reg.add_extension,
mod, func, code)
def test_slotnames(self):
self.assertEqual(copy_reg._slotnames(WithoutSlots), [])
self.assertEqual(copy_reg._slotnames(WithWeakref), [])
expected = ['_WithPrivate__spam']
self.assertEqual(copy_reg._slotnames(WithPrivate), expected)
self.assertEqual(copy_reg._slotnames(WithSingleString), ['spam'])
expected = ['eggs', 'spam']
expected.sort()
result = copy_reg._slotnames(WithInherited)
result.sort()
self.assertEqual(result, expected)
def test_main():
test_support.run_unittest(CopyRegTestCase)
if __name__ == "__main__":
test_main()
|
py
|
1a56ada54c35d3c4b386ae0c754a9ec857934011
|
# coding=utf-8
# *** WARNING: this file was generated by test. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from ._inputs import *
import pulumi_aws
__all__ = ['StaticPageArgs', 'StaticPage']
@pulumi.input_type
class StaticPageArgs:
def __init__(__self__, *,
index_content: pulumi.Input[str],
foo: Optional['FooArgs'] = None):
"""
The set of arguments for constructing a StaticPage resource.
:param pulumi.Input[str] index_content: The HTML content for index.html.
"""
pulumi.set(__self__, "index_content", index_content)
if foo is not None:
pulumi.set(__self__, "foo", foo)
@property
@pulumi.getter(name="indexContent")
def index_content(self) -> pulumi.Input[str]:
"""
The HTML content for index.html.
"""
return pulumi.get(self, "index_content")
@index_content.setter
def index_content(self, value: pulumi.Input[str]):
pulumi.set(self, "index_content", value)
@property
@pulumi.getter
def foo(self) -> Optional['FooArgs']:
return pulumi.get(self, "foo")
@foo.setter
def foo(self, value: Optional['FooArgs']):
pulumi.set(self, "foo", value)
class StaticPage(pulumi.ComponentResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
foo: Optional[pulumi.InputType['FooArgs']] = None,
index_content: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a StaticPage resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] index_content: The HTML content for index.html.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StaticPageArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a StaticPage resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param StaticPageArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StaticPageArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
foo: Optional[pulumi.InputType['FooArgs']] = None,
index_content: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is not None:
raise ValueError('ComponentResource classes do not support opts.id')
else:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StaticPageArgs.__new__(StaticPageArgs)
__props__.__dict__["foo"] = foo
if index_content is None and not opts.urn:
raise TypeError("Missing required property 'index_content'")
__props__.__dict__["index_content"] = index_content
__props__.__dict__["bucket"] = None
__props__.__dict__["website_url"] = None
super(StaticPage, __self__).__init__(
'xyz:index:StaticPage',
resource_name,
__props__,
opts,
remote=True)
@property
@pulumi.getter
def bucket(self) -> pulumi.Output['pulumi_aws.s3.Bucket']:
"""
The bucket resource.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="websiteUrl")
def website_url(self) -> pulumi.Output[str]:
"""
The website URL.
"""
return pulumi.get(self, "website_url")
|
py
|
1a56ae91aff7f76d8ff8608421972f3679eb5a20
|
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <[email protected]>
# Angelos Tzotsos <[email protected]>
#
# Copyright (c) 2015 Tom Kralidis
# Copyright (c) 2015 Angelos Tzotsos
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import logging
import os
from sqlalchemy import create_engine, asc, desc, func, __version__, select
from sqlalchemy.sql import text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import create_session
from pycsw.core import util
LOGGER = logging.getLogger(__name__)
class Repository(object):
_engines = {}
@classmethod
def create_engine(clazz, url):
'''
SQL Alchemy engines are thread-safe and simple wrappers for connection pools
https://groups.google.com/forum/#!topic/sqlalchemy/t8i3RSKZGb0
To reduce startup time we can cache the engine as a class variable in the
repository object and do database initialization once
Engines are memoized by url
'''
if url not in clazz._engines:
LOGGER.debug('creating new engine: %s', url)
engine = create_engine('%s' % url, echo=False)
# load SQLite query bindings
# This can be directly bound via events
# for sqlite < 0.7, we need to to this on a per-connection basis
if engine.name in ['sqlite', 'sqlite3'] and __version__ >= '0.7':
from sqlalchemy import event
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_rec):
dbapi_connection.create_function(
'query_spatial', 4, util.query_spatial)
dbapi_connection.create_function(
'update_xpath', 3, util.update_xpath)
dbapi_connection.create_function('get_anytext', 1,
util.get_anytext)
dbapi_connection.create_function('get_geometry_area', 1,
util.get_geometry_area)
dbapi_connection.create_function('get_spatial_overlay_rank', 2,
util.get_spatial_overlay_rank)
clazz._engines[url] = engine
return clazz._engines[url]
''' Class to interact with underlying repository '''
def __init__(self, database, context, app_root=None, table='records', repo_filter=None):
''' Initialize repository '''
self.context = context
self.filter = repo_filter
self.fts = False
# Don't use relative paths, this is hack to get around
# most wsgi restriction...
if (app_root and database.startswith('sqlite:///') and
not database.startswith('sqlite:////')):
database = database.replace('sqlite:///',
'sqlite:///%s%s' % (app_root, os.sep))
self.engine = Repository.create_engine('%s' % database)
base = declarative_base(bind=self.engine)
LOGGER.debug('binding ORM to existing database')
self.postgis_geometry_column = None
schema, table = util.sniff_table(table)
self.dataset = type('dataset', (base,),
dict(__tablename__=table,__table_args__={'autoload': True,
'schema': schema}))
self.dbtype = self.engine.name
self.session = create_session(self.engine)
temp_dbtype = None
if self.dbtype == 'postgresql':
# check if PostgreSQL is enabled with PostGIS 1.x
try:
self.session.execute(select([func.postgis_version()]))
temp_dbtype = 'postgresql+postgis+wkt'
LOGGER.debug('PostgreSQL+PostGIS1+WKT detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS1+WKT detection failed')
# check if PostgreSQL is enabled with PostGIS 2.x
try:
self.session.execute('select(postgis_version())')
temp_dbtype = 'postgresql+postgis+wkt'
LOGGER.debug('PostgreSQL+PostGIS2+WKT detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS2+WKT detection failed')
# check if a native PostGIS geometry column exists
try:
result = self.session.execute("select f_geometry_column from geometry_columns where f_table_name = '%s' and f_geometry_column != 'wkt_geometry' limit 1;" % table)
row = result.fetchone()
self.postgis_geometry_column = str(row['f_geometry_column'])
temp_dbtype = 'postgresql+postgis+native'
LOGGER.debug('PostgreSQL+PostGIS+Native detected')
except Exception as err:
LOGGER.debug('PostgreSQL+PostGIS+Native not picked up: %s', str(err))
# check if a native PostgreSQL FTS GIN index exists
result = self.session.execute("select relname from pg_class where relname='fts_gin_idx'").scalar()
self.fts = bool(result)
LOGGER.debug('PostgreSQL FTS enabled: %r', self.fts)
if temp_dbtype is not None:
LOGGER.debug('%s support detected' % temp_dbtype)
self.dbtype = temp_dbtype
if self.dbtype in ['sqlite', 'sqlite3']: # load SQLite query bindings
# <= 0.6 behaviour
if not __version__ >= '0.7':
self.connection = self.engine.raw_connection()
self.connection.create_function(
'query_spatial', 4, util.query_spatial)
self.connection.create_function(
'update_xpath', 3, util.update_xpath)
self.connection.create_function('get_anytext', 1,
util.get_anytext)
self.connection.create_function('get_geometry_area', 1,
util.get_geometry_area)
self.connection.create_function('get_spatial_overlay_rank', 2,
util.get_spatial_overlay_rank)
LOGGER.debug('setting repository queryables')
# generate core queryables db and obj bindings
self.queryables = {}
for tname in self.context.model['typenames']:
for qname in self.context.model['typenames'][tname]['queryables']:
self.queryables[qname] = {}
for qkey, qvalue in \
self.context.model['typenames'][tname]['queryables'][qname].items():
self.queryables[qname][qkey] = qvalue
# flatten all queryables
# TODO smarter way of doing this
self.queryables['_all'] = {}
for qbl in self.queryables:
self.queryables['_all'].update(self.queryables[qbl])
self.queryables['_all'].update(self.context.md_core_model['mappings'])
def _create_values(self, values):
value_dict = {}
for num, value in enumerate(values):
value_dict['pvalue%d' % num] = value
return value_dict
def query_ids(self, ids):
''' Query by list of identifiers '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:Identifier'])
query = self.session.query(self.dataset).filter(column.in_(ids))
return self._get_repo_filter(query).all()
def query_domain(self, domain, typenames, domainquerytype='list',
count=False):
''' Query by property domain values '''
domain_value = getattr(self.dataset, domain)
if domainquerytype == 'range':
LOGGER.debug('Generating property name range values')
query = self.session.query(func.min(domain_value),
func.max(domain_value))
else:
if count:
LOGGER.debug('Generating property name frequency counts')
query = self.session.query(getattr(self.dataset, domain),
func.count(domain_value)).group_by(domain_value)
else:
query = self.session.query(domain_value).distinct()
return self._get_repo_filter(query).all()
def query_insert(self, direction='max'):
''' Query to get latest (default) or earliest update to repository '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:InsertDate'])
if direction == 'min':
return self._get_repo_filter(self.session.query(func.min(column))).first()[0]
# else default max
return self._get_repo_filter(self.session.query(func.max(column))).first()[0]
def query_source(self, source):
''' Query by source '''
column = getattr(self.dataset, \
self.context.md_core_model['mappings']['pycsw:Source'])
query = self.session.query(self.dataset).filter(column == source)
return self._get_repo_filter(query).all()
def query(self, constraint, sortby=None, typenames=None,
maxrecords=10, startposition=0):
''' Query records from underlying repository '''
# run the raw query and get total
if 'where' in constraint: # GetRecords with constraint
LOGGER.debug('constraint detected')
query = self.session.query(self.dataset).filter(
text(constraint['where'])).params(self._create_values(constraint['values']))
else: # GetRecords sans constraint
LOGGER.debug('No constraint detected')
query = self.session.query(self.dataset)
total = self._get_repo_filter(query).count()
if util.ranking_pass: #apply spatial ranking
#TODO: Check here for dbtype so to extract wkt from postgis native to wkt
LOGGER.debug('spatial ranking detected')
LOGGER.debug('Target WKT: %s', getattr(self.dataset, self.context.md_core_model['mappings']['pycsw:BoundingBox']))
LOGGER.debug('Query WKT: %s', util.ranking_query_geometry)
query = query.order_by(func.get_spatial_overlay_rank(getattr(self.dataset, self.context.md_core_model['mappings']['pycsw:BoundingBox']), util.ranking_query_geometry).desc())
#trying to make this wsgi safe
util.ranking_pass = False
util.ranking_query_geometry = ''
if sortby is not None: # apply sorting
LOGGER.debug('sorting detected')
#TODO: Check here for dbtype so to extract wkt from postgis native to wkt
sortby_column = getattr(self.dataset, sortby['propertyname'])
if sortby['order'] == 'DESC': # descending sort
if 'spatial' in sortby and sortby['spatial']: # spatial sort
query = query.order_by(func.get_geometry_area(sortby_column).desc())
else: # aspatial sort
query = query.order_by(sortby_column.desc())
else: # ascending sort
if 'spatial' in sortby and sortby['spatial']: # spatial sort
query = query.order_by(func.get_geometry_area(sortby_column))
else: # aspatial sort
query = query.order_by(sortby_column)
# always apply limit and offset
return [str(total), self._get_repo_filter(query).limit(
maxrecords).offset(startposition).all()]
def insert(self, record, source, insert_date):
''' Insert a record into the repository '''
try:
self.session.begin()
self.session.add(record)
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
def update(self, record=None, recprops=None, constraint=None):
''' Update a record in the repository based on identifier '''
if record is not None:
identifier = getattr(record,
self.context.md_core_model['mappings']['pycsw:Identifier'])
xml = getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:XML'])
anytext = getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:AnyText'])
if recprops is None and constraint is None: # full update
LOGGER.debug('full update')
update_dict = dict([(getattr(self.dataset, key),
getattr(record, key)) \
for key in record.__dict__.keys() if key != '_sa_instance_state'])
try:
self.session.begin()
self._get_repo_filter(self.session.query(self.dataset)).filter_by(
identifier=identifier).update(update_dict, synchronize_session='fetch')
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
else: # update based on record properties
LOGGER.debug('property based update')
try:
rows = rows2 = 0
self.session.begin()
for rpu in recprops:
# update queryable column and XML document via XPath
if 'xpath' not in rpu['rp']:
self.session.rollback()
raise RuntimeError('XPath not found for property %s' % rpu['rp']['name'])
if 'dbcol' not in rpu['rp']:
self.session.rollback()
raise RuntimeError('property not found for XPath %s' % rpu['rp']['name'])
rows += self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values'])).update({
getattr(self.dataset,
rpu['rp']['dbcol']): rpu['value'],
'xml': func.update_xpath(str(self.context.namespaces),
getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:XML']),
str(rpu)),
}, synchronize_session='fetch')
# then update anytext tokens
rows2 += self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values'])).update({
'anytext': func.get_anytext(getattr(
self.dataset, self.context.md_core_model['mappings']['pycsw:XML']))
}, synchronize_session='fetch')
self.session.commit()
return rows
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
def delete(self, constraint):
''' Delete a record from the repository '''
try:
self.session.begin()
rows = self._get_repo_filter(self.session.query(self.dataset)).filter(
text(constraint['where'])).params(self._create_values(constraint['values']))
parentids = []
for row in rows: # get ids
parentids.append(getattr(row,
self.context.md_core_model['mappings']['pycsw:Identifier']))
rows=rows.delete(synchronize_session='fetch')
if rows > 0:
LOGGER.debug('Deleting all child records')
# delete any child records which had this record as a parent
rows += self._get_repo_filter(self.session.query(self.dataset)).filter(
getattr(self.dataset,
self.context.md_core_model['mappings']['pycsw:ParentIdentifier']).in_(parentids)).delete(
synchronize_session='fetch')
self.session.commit()
except Exception as err:
self.session.rollback()
raise RuntimeError('ERROR: %s' % str(err.orig))
return rows
def _get_repo_filter(self, query):
''' Apply repository wide side filter / mask query '''
if self.filter is not None:
return query.filter(self.filter)
return query
|
py
|
1a56af3a8742207042b37bd8fba1a8479c6c44b3
|
# model settings
norm_cfg = dict(type='SyncBN', requires_grad=True)
model = dict(
type='CascadeEncoderDecoder',
num_stages=2,
pretrained='open-mmlab://resnet50_v1c',
backbone=dict(
type='ResNetV1c',
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_cfg=norm_cfg,
norm_eval=False,
style='pytorch',
contract_dilation=True),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
num_outs=4),
decode_head=[
dict(
type='FPNHead',
in_channels=[256, 256, 256, 256],
in_index=[0, 1, 2, 3],
feature_strides=[4, 8, 16, 32],
channels=128,
dropout_ratio=-1,
num_classes=19,
norm_cfg=norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)),
dict(
type='PointHead',
in_channels=[256],
in_index=[0],
channels=256,
num_fcs=3,
coarse_pred_each_layer=True,
dropout_ratio=-1,
num_classes=19,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
])
# model training and testing settings
train_cfg = dict(
num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75)
test_cfg = dict(
mode='whole',
subdivision_steps=2,
subdivision_num_points=8196,
scale_factor=2)
|
py
|
1a56b37daab21c615ecdf3c9fb21affd00cf56d1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
THEANO_LINKER = "cvm"
from controllers import TrainingController, TrainingValidator
from base import NeuralTrainer
from trainers import *
from optimize import *
from annealers import *
from customize_trainer import CustomizeTrainer
from util import wrap_core, multiple_l2_norm
from delayed_trainers import DelayedBatchSGDTrainer
from scipy_trainer import ScipyTrainer
from train_logger import TrainLogger
|
py
|
1a56b3c20553435d3d882a7e8e4530d1e6b1291d
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
用于正式环境的全局配置
"""
from settings import APP_ID
# ===============================================================================
# 数据库设置, 正式环境数据库设置
# ===============================================================================
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # 默认用mysql
'NAME': 'helloworldapp', # 数据库名 (默认与APP_ID相同)
'USER': 'helloworldapp', # 你的数据库user
'PASSWORD': 'helloworldapp@2018', # 你的数据库password
'HOST': '172.50.19.22', # 数据库HOST
'PORT': '3306', # 默认3306
},
}
|
py
|
1a56b3e3bbabf1ae2dbb022b4a62eb9bf5c8a321
|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from ax.exceptions.model import ModelError
from ax.models.discrete.thompson import ThompsonSampler
from ax.utils.common.testutils import TestCase
class ThompsonSamplerTest(TestCase):
def setUp(self):
self.Xs = [[[1, 1], [2, 2], [3, 3], [4, 4]]] # 4 arms, each of dimensionality 2
self.Ys = [[1, 2, 3, 4]]
self.Yvars = [[1, 1, 1, 1]]
self.parameter_values = [[1, 2, 3, 4], [1, 2, 3, 4]]
self.outcome_names = ["x", "y"] # not used for regular TS
self.multiple_metrics_Xs = [
[[1, 1], [2, 2], [3, 3], [4, 4]],
[[1, 1], [2, 2], [3, 3], [4, 4]],
] # 2 metrics, 4 arms, each of dimensionality 2
self.multiple_metrics_Ys = [[1, 2, 3, 4], [0, 0, 0, 1]]
self.multiple_metrics_Yvars = [[1, 1, 1, 1], [1, 1, 1, 1]]
def testThompsonSampler(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, gen_metadata = generator.gen(
n=3, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(
weights, [3 * i for i in [0.725, 0.225, 0.05]]
):
self.assertAlmostEqual(weight, expected_weight, 1)
self.assertEqual(len(gen_metadata["arms_to_weights"]), 4)
def testThompsonSamplerValidation(self):
generator = ThompsonSampler(min_weight=0.01)
# all Xs are not the same
with self.assertRaises(ValueError):
generator.fit(
Xs=[[[1, 1], [2, 2], [3, 3], [4, 4]], [[1, 1], [2, 2], [4, 4]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# multiple observations per parameterization
with self.assertRaises(ValueError):
generator.fit(
Xs=[[[1, 1], [2, 2], [2, 2]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# these are not the same observations, so should not error
generator.fit(
Xs=[[[1, 1], [2.0, 2], [2, 2]]],
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
# requires objective weights
with self.assertRaises(ValueError):
generator.gen(5, self.parameter_values, objective_weights=None)
def testThompsonSamplerMinWeight(self):
generator = ThompsonSampler(min_weight=0.01)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=5, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(
weights, [3 * i for i in [0.725, 0.225, 0.05]]
):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerUniformWeights(self):
generator = ThompsonSampler(min_weight=0.0, uniform_weights=True)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=3, parameter_values=self.parameter_values, objective_weights=np.ones(1)
)
self.assertEqual(arms, [[4, 4], [3, 3], [2, 2]])
for weight, expected_weight in zip(weights, [1.0, 1.0, 1.0]):
self.assertAlmostEqual(weight, expected_weight, 1)
def testThompsonSamplerInfeasible(self):
generator = ThompsonSampler(min_weight=0.9)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
with self.assertRaises(ModelError):
generator.gen(
n=3,
parameter_values=self.parameter_values,
objective_weights=np.ones(1),
)
def testThompsonSamplerOutcomeConstraints(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.multiple_metrics_Xs,
Ys=self.multiple_metrics_Ys,
Yvars=self.multiple_metrics_Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
arms, weights, _ = generator.gen(
n=4,
parameter_values=self.parameter_values,
objective_weights=np.array([1, 0]),
outcome_constraints=(
# pass in multiples of the same constraint
# to ensure that shapes are correct for multiple constraints
np.array([[0, 1], [0, 1], [0, 1]]),
np.array([[1], [1], [1]]),
),
)
self.assertEqual(arms, [[3, 3], [4, 4], [2, 2], [1, 1]])
for weight, expected_weight in zip(
weights, [4 * i for i in [0.4, 0.4, 0.15, 0.05]]
):
self.assertAlmostEqual(weight, expected_weight, delta=0.15)
def testThompsonSamplerOutcomeConstraintsInfeasible(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.multiple_metrics_Xs,
Ys=self.multiple_metrics_Ys,
Yvars=self.multiple_metrics_Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
with self.assertRaises(ModelError):
generator.gen(
n=3,
parameter_values=self.parameter_values,
objective_weights=np.ones(2),
outcome_constraints=(np.array([[0, 1]]), np.array([[-10]])),
)
def testThompsonSamplerPredict(self):
generator = ThompsonSampler(min_weight=0.0)
generator.fit(
Xs=self.Xs,
Ys=self.Ys,
Yvars=self.Yvars,
parameter_values=self.parameter_values,
outcome_names=self.outcome_names,
)
f, cov = generator.predict([[1, 1], [3, 3]])
self.assertTrue(np.array_equal(f, np.array([[1], [3]])))
self.assertTrue(np.array_equal(cov, np.ones((2, 1, 1))))
with self.assertRaises(ValueError):
generator.predict([[1, 2]])
|
py
|
1a56b4b4d748e2ac650093163cefb9d8bc724075
|
"""Basic tests of the Misty VM
SCL <[email protected]>
Copyright (c) 2020 rerobots, Inc.
"""
import pytest
import mistygrind
@pytest.fixture
def client(loop, aiohttp_client):
yield loop.run_until_complete(
aiohttp_client(mistygrind.vm.create_vm())
)
async def test_api_battery(client):
resp = await client.get('/api/battery')
assert resp.status == 200
payload = await resp.json()
assert payload['status'] == 'Success'
assert 'chargePercent' in payload['result'] and 'isCharging' in payload['result']
|
py
|
1a56b4f0e00619c8cc359f549587e1b9d2858696
|
#! /usr/bin/env python
import sys
from aubio import source, sink, pvoc, tss
if __name__ == '__main__':
if len(sys.argv) < 2:
print('usage: %s <inputfile> <outputfile_transient> <outputfile_steady>' % sys.argv[0])
sys.exit(1)
samplerate = 44100
win_s = 1024 # fft size
hop_s = win_s // 8 # block size
f = source(sys.argv[1], samplerate, hop_s)
g = sink(sys.argv[2], samplerate)
h = sink(sys.argv[3], samplerate)
pva = pvoc(win_s, hop_s) # a phase vocoder
pvb = pvoc(win_s, hop_s) # another phase vocoder
t = tss(win_s, hop_s) # transient steady state separation
t.set_threshold(0.01)
t.set_alpha(3.)
t.set_beta(4.)
read = hop_s
while read:
samples, read = f() # read file
spec = pva(samples) # compute spectrum
trans_spec, stead_spec = t(spec) # transient steady-state separation
transients = pva.rdo(trans_spec) # overlap-add synthesis of transients
steadstate = pvb.rdo(stead_spec) # overlap-add synthesis of steady states
g(transients, read) # write transients to output
h(steadstate, read) # write steady states to output
del f, g, h # finish writing the files now
sys.exit(0)
from demo_spectrogram import get_spectrogram
from pylab import subplot, show
subplot(311)
get_spectrogram(sys.argv[1])
subplot(312)
get_spectrogram(sys.argv[2])
subplot(313)
get_spectrogram(sys.argv[3])
show()
|
py
|
1a56b515d064e29d84e793965634b12835f6bbb0
|
import re
from urllib.parse import urlencode
import collections
from directory_api_client.client import api_client
from directory_constants import choices
import directory_components.helpers
from ipware import get_client_ip
from django.http import Http404
from django.utils import translation
from django.urls import reverse
from django.utils.html import escape, mark_safe
from core import constants
from core.constants import HeaderConfig
INDUSTRY_CHOICES = dict(choices.INDUSTRIES)
def unslugify(slug):
return slug.replace('-', ' ').capitalize()
def get_language_from_querystring(request):
language_codes = translation.trans_real.get_languages()
language_code = request.GET.get('language') or request.GET.get('lang')
if language_code and language_code in language_codes:
return language_code
NotifySettings = collections.namedtuple(
'NotifySettings',
[
'company_template',
'support_template',
'investor_template',
'support_email_address',
]
)
def get_ga_data_for_page(page_type):
return constants.GA_DATA_MAPPING[page_type]
def get_paginator_url(filters, url_name):
url = reverse(url_name)
querystring = urlencode({
key: value
for key, value in filters.lists()
if value and key != 'page'
}, doseq=True)
return f'{url}?{querystring}'
class SectorFilter:
def __init__(self, sectors):
self.sectors = sectors
def matches(self, opportunity):
return any(
sector['related_sector'].get('heading') in self.sectors
for sector in opportunity.get('related_sectors', [])
if sector['related_sector'] and sector['related_sector']['heading']
)
Scale = collections.namedtuple("Scale", "title min max")
class ScaleFilter:
scales_with_values = [
Scale(title='< £100m', min=1, max=99),
Scale(title='£100m - £499m', min=100, max=499),
Scale(title='£500m - £999m', min=500, max=999),
Scale(title='> £1bn', min=1000, max='None'),
Scale(title='Value unknown', min=0, max=0)
]
def __init__(self, scale_strings):
self.selected_scales = [
scaleFilter for scaleFilter in self.scales_with_values
if scaleFilter.title in scale_strings
]
def matches(self, opportunity):
for scale_chosen in self.selected_scales:
if scale_chosen.min == 0 and scale_chosen.max == 0:
if not opportunity['scale_value']:
return True
elif float(opportunity['scale_value']) == 0.00:
return True
elif scale_chosen.max == 'None':
if scale_chosen.min <= float(opportunity['scale_value']):
return True
elif scale_chosen.max:
if scale_chosen.min <= float(opportunity['scale_value']) <= scale_chosen.max: # NOQA
return True
class MultipleRegionsFilter:
def __init__(self, regions):
self.regions = regions
def matches(self, opportunity):
for related_region in opportunity.get('related_regions', []):
if related_region['title'] and related_region['title'] in self.regions:
return True
class SubSectorFilter:
def __init__(self, sub_sectors):
self.sub_sectors = sub_sectors
def matches(self, opportunity):
if 'sub_sectors' in opportunity and opportunity['sub_sectors']:
for sub_sector in opportunity['sub_sectors']:
if sub_sector in self.sub_sectors:
return True
return False
class InvestmentTypeFilter:
def __init__(self, investment_types):
self.investment_types = investment_types
def matches(self, opportunity):
if 'investment_type' in opportunity and opportunity['investment_type']:
if opportunity['investment_type'] in self.investment_types:
return True
return False
class PlanningStatusFilter:
def __init__(self, planning_statuses):
self.planning_statuses = planning_statuses
def matches(self, opportunity):
if 'planning_status' in opportunity and opportunity['planning_status']:
if opportunity['planning_status'] in self.planning_statuses:
return True
return False
def filter_opportunities(opportunities, filter_chosen):
return [opp for opp in opportunities if filter_chosen.matches(opp)]
Sort_by = collections.namedtuple("Sort_by", "title value reverse")
class SortFilter:
sort_by_with_values = [
Sort_by(title='Opportunity name: A to Z', value='title', reverse=False),
Sort_by(title='Opportunity name: Z to A', value='title', reverse=True),
Sort_by(
title='Scale: Low to High', value='scale_value', reverse=False
),
Sort_by(title='Scale: High to Low', value='scale_value', reverse=True)
]
def __init__(self, sort_by_filter_chosen):
self.sort_by_filter_chosen = next(
(sort_by for sort_by
in self.sort_by_with_values
if sort_by.title == sort_by_filter_chosen),
self.sort_by_with_values[0])
def sort_opportunities(opportunities, sort_by_chosen):
sort_filter = sort_by_chosen.sort_by_filter_chosen
if sort_filter.value == 'title':
opportunities.sort(
key=lambda x: x['title'],
reverse=sort_filter.reverse
)
if sort_filter.value == 'scale_value':
opportunities.sort(
key=lambda x: float(x['scale_value']),
reverse=sort_filter.reverse
)
return opportunities
class CompanyParser(directory_components.helpers.CompanyParser):
def serialize_for_template(self):
if not self.data:
return {}
return {
**self.data,
'date_of_creation': self.date_of_creation,
'address': self.address,
'sectors': self.sectors_label,
'keywords': self.keywords,
'employees': self.employees_label,
'expertise_industries': self.expertise_industries_label,
'expertise_regions': self.expertise_regions_label,
'expertise_countries': self.expertise_countries_label,
'expertise_languages': self.expertise_languages_label,
'has_expertise': self.has_expertise,
'expertise_products_services': (
self.expertise_products_services_label
),
'is_in_companies_house': self.is_in_companies_house,
}
def get_results_from_search_response(response):
parsed = response.json()
formatted_results = []
for result in parsed['hits']['hits']:
parser = CompanyParser(result['_source'])
formatted = parser.serialize_for_template()
if 'highlight' in result:
highlighted = '...'.join(
result['highlight'].get('description', '') or
result['highlight'].get('summary', '')
)
# escape all html tags other than <em> and </em>
highlighted_escaped = (
escape(highlighted).replace('<em>', '<em>').replace('</em>', '</em>')
)
formatted['highlight'] = mark_safe(highlighted_escaped)
formatted_results.append(formatted)
parsed['results'] = formatted_results
return parsed
def get_filters_labels(filters):
sectors = dict(choices.INDUSTRIES)
languages = dict(choices.EXPERTISE_LANGUAGES)
labels = []
skip_fields = [
'q',
'page',
# Prevents duplicates labels not to be displayed in filter list
'expertise_products_services_labels'
]
for name, values in filters.items():
if name in skip_fields:
pass
elif name == 'industries':
labels += [sectors[item] for item in values if item in sectors]
elif name == 'expertise_languages':
labels += [languages[item] for item in values if item in languages]
elif name.startswith('expertise_products_services_'):
labels += values
else:
for value in values:
labels.append(value.replace('_', ' ').title())
return labels
def get_company_profile(number):
response = api_client.company.published_profile_retrieve(number=number)
if response.status_code == 404:
raise Http404(f'API returned 404 for company number {number}')
response.raise_for_status()
return response.json()
def count_data_with_field(list_of_data, field):
filtered_list = [item for item in list_of_data if item[field]]
return len(filtered_list)
def pair_sector_values_with_label(sectors_values):
if not sectors_values:
return []
return [
pair_sector_value_with_label(value) for value in sectors_values
if value in INDUSTRY_CHOICES
]
def pair_sector_value_with_label(sectors_value):
return {'value': sectors_value, 'label': get_sectors_label(sectors_value)}
def get_sectors_label(sectors_value):
if not sectors_value:
return sectors_value
return INDUSTRY_CHOICES.get(sectors_value)
def get_case_study_details_from_response(response):
parsed = response.json()
# `format_company_details` expects `supplier_case_studies` key.
parsed['company']['supplier_case_studies'] = []
parsed['sector'] = pair_sector_value_with_label(parsed['sector'])
parsed['company'] = CompanyParser(
parsed['company']
).serialize_for_template()
return parsed
def format_case_study(case_study):
case_study_url = reverse(
'find-a-supplier:case-study-details',
kwargs={'id': case_study['pk'], 'slug': case_study['slug']},
)
return {
**case_study,
'sector': pair_sector_value_with_label(case_study['sector']),
'case_study_url': case_study_url,
}
def get_case_study(case_study_id):
response = api_client.company.published_case_study_retrieve(case_study_id)
if response.status_code == 404:
raise Http404(
"API returned 404 for case study with id %s", case_study_id,
)
response.raise_for_status()
return get_case_study_details_from_response(response)
def get_map_labels_with_vertical_positions(list_of_title_words, middle_x, middle_y):
lowest_y = middle_y - ((len(list_of_title_words) - 1) / 2) * 25
labels_with_coordinates = [
{'title': list_of_title_words[i], 'x': str(middle_x), 'y': str((lowest_y + (i * 25)))}
for i in range(len(list_of_title_words))
]
return labels_with_coordinates
def get_header_config(path):
for (pattern, config) in constants.HEADER_SECTION_MAPPING.items():
compiled_pattern = re.compile(pattern)
if compiled_pattern.match(path):
return config
# If no matching URL is found, just return a default config.
return HeaderConfig(section=None, sub_section=None)
def get_header_section(path):
return get_header_config(path).section
def get_header_sub_section(path):
return get_header_config(path).sub_section
def get_sender_ip_address(request):
ip, is_routable = get_client_ip(request)
return ip or None
|
py
|
1a56b6318edfe554b95fbf55b4bb60e308d7f1c7
|
import functools
import logging
def catch_exception(func):
"""
A decorator that wraps the passed in function and logs
exceptions should one occur
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except:
# log the exception
err = "There was an exception in "
err += func.__name__
logging.exception(err)
# re-raise the exception
raise
return wrapper
|
py
|
1a56b64df8ee188820b1c896585f2dabf2c9bbb5
|
# OpenMV Unit Tests.
#
import os, sensor, gc
TEST_DIR = "unittest"
TEMP_DIR = "/sd/unittest/temp"
DATA_DIR = "/sd/unittest/data"
SCRIPT_DIR = "/sd/unittest/script"
if not (TEST_DIR in os.listdir("/sd")):
raise Exception('Unittest dir not found!')
print("")
test_failed = False
def print_result(test, result):
s = "Unittest (%s)"%(test)
padding = "."*(60-len(s))
print(s + padding + result)
for test in sorted(os.listdir(SCRIPT_DIR)):
if test.endswith(".py"):
test_result = "PASSED"
test_path = "/".join((SCRIPT_DIR, test))
try:
f = open(test_path)
exec(f.read())
if unittest(DATA_DIR, TEMP_DIR) == False:
f.close()
raise Exception()
f.close()
except Exception as e:
print(str(e))
if "unavailable" in str(e):
test_result = "DISABLED"
else:
test_failed = True
test_result = "FAILED"
print_result(test, test_result)
gc.collect()
if test_failed:
print("\nSome tests have FAILED!!!\n\n")
else:
print("\nAll tests PASSED.\n\n")
|
py
|
1a56b6adf2e7adb3d5c505e01e0d6b16bb5ae5b2
|
import pygame
import os
import random
CAR = pygame.image.load("Car.png")
BACKGROUND = pygame.image.load("Road.png")
BG_CARS = [
pygame.transform.scale(pygame.image.load("cars/" + vehicle), (100, 100))
for vehicle in os.listdir("cars")
]
MAX_CARS = 5
class Game:
RANDOM_CARS_COUNT = 0
def __init__(self):
pygame.init()
self.score = 0
self.window = pygame.display.set_mode((500, 800))
pygame.display.set_caption("Racing AI")
self.clock = pygame.time.Clock()
self.execute = True
def cleanUpCars(self, bg_cars):
for c in bg_cars:
if c.y >= 800:
bg_cars.remove(c)
self.RANDOM_CARS_COUNT -= 1
return bg_cars
def createNewCars(self, bg_cars):
extra = len([car for car in bg_cars if not car.onScreen()])
while self.RANDOM_CARS_COUNT != MAX_CARS + extra:
new_car = BackgroundCars(BG_CARS[random.randint(0, 5)], self.window)
will_append = True
for cars in bg_cars:
if cars.collide(new_car) or self.RANDOM_CARS_COUNT == MAX_CARS + extra:
will_append = False
break
if will_append:
bg_cars.append(new_car)
self.RANDOM_CARS_COUNT += 1
return bg_cars
def run(self):
car = Car(250, 650, self.window)
track = Track(50, self.window)
bg_cars = []
self.createNewCars(bg_cars)
while self.execute:
keys = pygame.key.get_pressed()
self.window.fill((0, 255, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT or keys[pygame.K_0]:
self.execute = False
bg_cars = self.cleanUpCars(bg_cars)
bg_cars = self.createNewCars(bg_cars)
track.draw()
self.score = track.move(self.score)
car.draw()
for i in random.sample(
list(range(self.RANDOM_CARS_COUNT)), self.RANDOM_CARS_COUNT
):
bg_cars[i].draw()
bg_cars[i].move()
if keys[pygame.K_LEFT]:
car.x -= car.vel
if keys[pygame.K_RIGHT]:
car.x += car.vel
if keys[pygame.K_UP] and car.y + car.vel >= 250:
car.y -= car.vel
if keys[pygame.K_DOWN] and car.y + car.vel + car.height <= 750:
car.y += car.vel
for cars in bg_cars:
if cars.collide(car):
self.execute = False
if car.x < 50 or car.x + car.width > 450:
self.execute = False
self.clock.tick(60)
font = pygame.font.Font('freesansbold.ttf', 32)
text = font.render(" Score: " + str(self.score) + " ", True, (255, 0, 0), (0, 0, 0))
textRect = text.get_rect()
textRect.center = (400, 50)
self.window.blit(text, textRect)
pygame.display.update()
print("Score:", self.score)
pygame.time.wait(100)
pygame.quit()
class BackgroundCars:
def __init__(self, car, window):
self.x = random.randint(50, 350)
self.y = random.randint(-400, -100)
self.vel = 5
self.width = 100
self.height = 100
self.window = window
self.car = car
def move(self):
self.y += self.vel
def draw(self):
self.window.blit(self.car, (self.x, self.y))
def collide(self, gaddi):
playerMask = gaddi.mask()
carMask = self.mask()
collision = playerMask.overlap(carMask, (self.x - gaddi.x, self.y - gaddi.y))
return bool(collision)
def mask(self):
return pygame.mask.from_surface(self.car)
def onScreen(self):
if self.y <= 650:
return True
return False
def __str__(self):
return f"y: {self.y} , onScreen: {self.onScreen()}"
class Track:
def __init__(self, x, window):
self.x = x
self.y1 = 0
self.y2 = 800
self.vel = 10
self.window = window
def move(self, score):
self.y1 += self.vel
self.y2 += self.vel
if self.y1 - 800 > 0:
self.y1 = self.y2 - 800
if self.y2 - 800 > 0:
self.y2 = self.y1 - 800
return score + 1
def draw(self):
self.window.blit(BACKGROUND, (self.x, self.y1))
self.window.blit(BACKGROUND, (self.x, self.y2))
class Car:
def __init__(self, x, y, window):
self.x = x
self.y = y
self.vel = 6
self.width = 44
self.height = 100
self.window = window
self.car = CAR
def move(self):
self.y += self.vel
def draw(self):
self.window.blit(self.car, (self.x, self.y))
def mask(self):
return pygame.mask.from_surface(self.car)
if __name__ == "__main__":
game = Game()
game.run()
|
py
|
1a56b7411df408818c16c266321ecba1c5946558
|
from hmlvaraus import admin
from django.conf.urls import include, url
from rest_framework.routers import DefaultRouter
from hmlvaraus.api.hml_reservation import PurchaseView, RenewalView, HMLReservationViewSet, SmsView
from hmlvaraus.api.berth import BerthViewSet, GroundBerthPriceView
from hmlvaraus.api.unit import UnitViewSet
from hmlvaraus.api.user import UserViewSet
from hmlvaraus.views.spa import IndexView
from hmlvaraus.api.importer import ImporterView
router = DefaultRouter()
router.register(r'hml_reservation', HMLReservationViewSet)
router.register(r'berth', BerthViewSet)
router.register(r'unit', UnitViewSet)
router.register(r'user', UserViewSet)
urlpatterns = [
url(r'^accounts/', include('allauth.urls')),
url(r'^grappelli/', include('grappelli.urls')),
url(r'^sysadmin/', include(admin.site.urls)),
url(r'^$', IndexView.as_view()),
url(r'^api/purchase/', PurchaseView.as_view()),
url(r'^api/sms/', SmsView.as_view()),
url(r'^api/renewal/', RenewalView.as_view()),
url(r'^api/ground_berth_price/', GroundBerthPriceView.as_view()),
url(r'^api/importer/', ImporterView.as_view()),
url(r'^api/', include(router.urls))
]
|
py
|
1a56b916ba178b3a0f0dfe6c78a8091ffbe8a2ed
|
#!/usr/bin/env python
from lib.cloudstack import CloudStackAPI
__all__ = [
'VMReboot'
]
class VMReboot(CloudStackAPI):
def run(self, url, apikey, secretkey, vm_id):
cs = self.get_client(url, apikey, secretkey)
return cs.rebootVirtualMachine(id=vm_id)
|
py
|
1a56b921b36126da65f5ab3663ea31df86737e3e
|
# Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
"""These version tests were taken from the RPM source code.
We try to maintain compatibility with RPM's version semantics
where it makes sense.
"""
import pytest
from spack.version import Version, ver
def assert_ver_lt(a, b):
"""Asserts the results of comparisons when 'a' is less than 'b'."""
a, b = ver(a), ver(b)
assert a < b
assert a <= b
assert a != b
assert not a == b
assert not a > b
assert not a >= b
def assert_ver_gt(a, b):
"""Asserts the results of comparisons when 'a' is greater than 'b'."""
a, b = ver(a), ver(b)
assert a > b
assert a >= b
assert a != b
assert not a == b
assert not a < b
assert not a <= b
def assert_ver_eq(a, b):
"""Asserts the results of comparisons when 'a' is equal to 'b'."""
a, b = ver(a), ver(b)
assert not a > b
assert a >= b
assert not a != b
assert a == b
assert not a < b
assert a <= b
def assert_in(needle, haystack):
"""Asserts that 'needle' is in 'haystack'."""
assert ver(needle) in ver(haystack)
def assert_not_in(needle, haystack):
"""Asserts that 'needle' is not in 'haystack'."""
assert ver(needle) not in ver(haystack)
def assert_canonical(canonical_list, version_list):
"""Asserts that a redundant list is reduced to canonical form."""
assert ver(canonical_list) == ver(version_list)
def assert_overlaps(v1, v2):
"""Asserts that two version ranges overlaps."""
assert ver(v1).overlaps(ver(v2))
def assert_no_overlap(v1, v2):
"""Asserts that two version ranges do not overlap."""
assert not ver(v1).overlaps(ver(v2))
def assert_satisfies(v1, v2):
"""Asserts that 'v1' satisfies 'v2'."""
assert ver(v1).satisfies(ver(v2))
def assert_does_not_satisfy(v1, v2):
"""Asserts that 'v1' does not satisfy 'v2'."""
assert not ver(v1).satisfies(ver(v2))
def check_intersection(expected, a, b):
"""Asserts that 'a' intersect 'b' == 'expected'."""
assert ver(expected) == ver(a).intersection(ver(b))
def check_union(expected, a, b):
"""Asserts that 'a' union 'b' == 'expected'."""
assert ver(expected) == ver(a).union(ver(b))
def test_two_segments():
assert_ver_eq('1.0', '1.0')
assert_ver_lt('1.0', '2.0')
assert_ver_gt('2.0', '1.0')
assert_ver_eq('develop', 'develop')
assert_ver_lt('1.0', 'develop')
assert_ver_gt('develop', '1.0')
def test_three_segments():
assert_ver_eq('2.0.1', '2.0.1')
assert_ver_lt('2.0', '2.0.1')
assert_ver_gt('2.0.1', '2.0')
def test_alpha():
# TODO: not sure whether I like this. 2.0.1a is *usually*
# TODO: less than 2.0.1, but special-casing it makes version
# TODO: comparison complicated. See version.py
assert_ver_eq('2.0.1a', '2.0.1a')
assert_ver_gt('2.0.1a', '2.0.1')
assert_ver_lt('2.0.1', '2.0.1a')
def test_patch():
assert_ver_eq('5.5p1', '5.5p1')
assert_ver_lt('5.5p1', '5.5p2')
assert_ver_gt('5.5p2', '5.5p1')
assert_ver_eq('5.5p10', '5.5p10')
assert_ver_lt('5.5p1', '5.5p10')
assert_ver_gt('5.5p10', '5.5p1')
def test_num_alpha_with_no_separator():
assert_ver_lt('10xyz', '10.1xyz')
assert_ver_gt('10.1xyz', '10xyz')
assert_ver_eq('xyz10', 'xyz10')
assert_ver_lt('xyz10', 'xyz10.1')
assert_ver_gt('xyz10.1', 'xyz10')
def test_alpha_with_dots():
assert_ver_eq('xyz.4', 'xyz.4')
assert_ver_lt('xyz.4', '8')
assert_ver_gt('8', 'xyz.4')
assert_ver_lt('xyz.4', '2')
assert_ver_gt('2', 'xyz.4')
def test_nums_and_patch():
assert_ver_lt('5.5p2', '5.6p1')
assert_ver_gt('5.6p1', '5.5p2')
assert_ver_lt('5.6p1', '6.5p1')
assert_ver_gt('6.5p1', '5.6p1')
def test_rc_versions():
assert_ver_gt('6.0.rc1', '6.0')
assert_ver_lt('6.0', '6.0.rc1')
def test_alpha_beta():
assert_ver_gt('10b2', '10a1')
assert_ver_lt('10a2', '10b2')
def test_double_alpha():
assert_ver_eq('1.0aa', '1.0aa')
assert_ver_lt('1.0a', '1.0aa')
assert_ver_gt('1.0aa', '1.0a')
def test_padded_numbers():
assert_ver_eq('10.0001', '10.0001')
assert_ver_eq('10.0001', '10.1')
assert_ver_eq('10.1', '10.0001')
assert_ver_lt('10.0001', '10.0039')
assert_ver_gt('10.0039', '10.0001')
def test_close_numbers():
assert_ver_lt('4.999.9', '5.0')
assert_ver_gt('5.0', '4.999.9')
def test_date_stamps():
assert_ver_eq('20101121', '20101121')
assert_ver_lt('20101121', '20101122')
assert_ver_gt('20101122', '20101121')
def test_underscores():
assert_ver_eq('2_0', '2_0')
assert_ver_eq('2.0', '2_0')
assert_ver_eq('2_0', '2.0')
assert_ver_eq('2-0', '2_0')
assert_ver_eq('2_0', '2-0')
def test_rpm_oddities():
assert_ver_eq('1b.fc17', '1b.fc17')
assert_ver_lt('1b.fc17', '1.fc17')
assert_ver_gt('1.fc17', '1b.fc17')
assert_ver_eq('1g.fc17', '1g.fc17')
assert_ver_gt('1g.fc17', '1.fc17')
assert_ver_lt('1.fc17', '1g.fc17')
# Stuff below here is not taken from RPM's tests and is
# unique to spack
def test_version_ranges():
assert_ver_lt('1.2:1.4', '1.6')
assert_ver_gt('1.6', '1.2:1.4')
assert_ver_eq('1.2:1.4', '1.2:1.4')
assert ver('1.2:1.4') != ver('1.2:1.6')
assert_ver_lt('1.2:1.4', '1.5:1.6')
assert_ver_gt('1.5:1.6', '1.2:1.4')
def test_contains():
assert_in('1.3', '1.2:1.4')
assert_in('1.2.5', '1.2:1.4')
assert_in('1.3.5', '1.2:1.4')
assert_in('1.3.5-7', '1.2:1.4')
assert_not_in('1.1', '1.2:1.4')
assert_not_in('1.5', '1.2:1.4')
assert_in('1.4.2', '1.2:1.4')
assert_not_in('1.4.2', '1.2:1.4.0')
assert_in('1.2.8', '1.2.7:1.4')
assert_in('1.2.7:1.4', ':')
assert_not_in('1.2.5', '1.2.7:1.4')
assert_in('1.4.1', '1.2.7:1.4')
assert_not_in('1.4.1', '1.2.7:1.4.0')
def test_in_list():
assert_in('1.2', ['1.5', '1.2', '1.3'])
assert_in('1.2.5', ['1.5', '1.2:1.3'])
assert_in('1.5', ['1.5', '1.2:1.3'])
assert_not_in('1.4', ['1.5', '1.2:1.3'])
assert_in('1.2.5:1.2.7', [':'])
assert_in('1.2.5:1.2.7', ['1.5', '1.2:1.3'])
assert_not_in('1.2.5:1.5', ['1.5', '1.2:1.3'])
assert_not_in('1.1:1.2.5', ['1.5', '1.2:1.3'])
def test_ranges_overlap():
assert_overlaps('1.2', '1.2')
assert_overlaps('1.2.1', '1.2.1')
assert_overlaps('1.2.1b', '1.2.1b')
assert_overlaps('1.2:1.7', '1.6:1.9')
assert_overlaps(':1.7', '1.6:1.9')
assert_overlaps(':1.7', ':1.9')
assert_overlaps(':1.7', '1.6:')
assert_overlaps('1.2:', '1.6:1.9')
assert_overlaps('1.2:', ':1.9')
assert_overlaps('1.2:', '1.6:')
assert_overlaps(':', ':')
assert_overlaps(':', '1.6:1.9')
assert_overlaps('1.6:1.9', ':')
def test_overlap_with_containment():
assert_in('1.6.5', '1.6')
assert_in('1.6.5', ':1.6')
assert_overlaps('1.6.5', ':1.6')
assert_overlaps(':1.6', '1.6.5')
assert_not_in(':1.6', '1.6.5')
assert_in('1.6.5', ':1.6')
def test_lists_overlap():
assert_overlaps('1.2b:1.7,5', '1.6:1.9,1')
assert_overlaps('1,2,3,4,5', '3,4,5,6,7')
assert_overlaps('1,2,3,4,5', '5,6,7')
assert_overlaps('1,2,3,4,5', '5:7')
assert_overlaps('1,2,3,4,5', '3, 6:7')
assert_overlaps('1, 2, 4, 6.5', '3, 6:7')
assert_overlaps('1, 2, 4, 6.5', ':, 5, 8')
assert_overlaps('1, 2, 4, 6.5', ':')
assert_no_overlap('1, 2, 4', '3, 6:7')
assert_no_overlap('1,2,3,4,5', '6,7')
assert_no_overlap('1,2,3,4,5', '6:7')
def test_canonicalize_list():
assert_canonical(['1.2', '1.3', '1.4'], ['1.2', '1.3', '1.3', '1.4'])
assert_canonical(['1.2', '1.3:1.4'], ['1.2', '1.3', '1.3:1.4'])
assert_canonical(['1.2', '1.3:1.4'], ['1.2', '1.3:1.4', '1.4'])
assert_canonical(['1.3:1.4'], ['1.3:1.4', '1.3', '1.3.1', '1.3.9', '1.4'])
assert_canonical(['1.3:1.4'], ['1.3', '1.3.1', '1.3.9', '1.4', '1.3:1.4'])
assert_canonical(
['1.3:1.5'], ['1.3', '1.3.1', '1.3.9', '1.4:1.5', '1.3:1.4']
)
assert_canonical(['1.3:1.5'], ['1.3, 1.3.1,1.3.9,1.4:1.5,1.3:1.4'])
assert_canonical(['1.3:1.5'], ['1.3, 1.3.1,1.3.9,1.4 : 1.5 , 1.3 : 1.4'])
assert_canonical([':'], [':,1.3, 1.3.1,1.3.9,1.4 : 1.5 , 1.3 : 1.4'])
def test_intersection():
check_intersection('2.5', '1.0:2.5', '2.5:3.0')
check_intersection('2.5:2.7', '1.0:2.7', '2.5:3.0')
check_intersection('0:1', ':', '0:1')
check_intersection(['1.0', '2.5:2.7'], ['1.0:2.7'], ['2.5:3.0', '1.0'])
check_intersection(['2.5:2.7'], ['1.1:2.7'], ['2.5:3.0', '1.0'])
check_intersection(['0:1'], [':'], ['0:1'])
def test_intersect_with_containment():
check_intersection('1.6.5', '1.6.5', ':1.6')
check_intersection('1.6.5', ':1.6', '1.6.5')
check_intersection('1.6:1.6.5', ':1.6.5', '1.6')
check_intersection('1.6:1.6.5', '1.6', ':1.6.5')
def test_union_with_containment():
check_union(':1.6', '1.6.5', ':1.6')
check_union(':1.6', ':1.6', '1.6.5')
check_union(':1.6', ':1.6.5', '1.6')
check_union(':1.6', '1.6', ':1.6.5')
check_union(':', '1.0:', ':2.0')
check_union('1:4', '1:3', '2:4')
check_union('1:4', '2:4', '1:3')
# Tests successor/predecessor case.
check_union('1:4', '1:2', '3:4')
def test_basic_version_satisfaction():
assert_satisfies('4.7.3', '4.7.3')
assert_satisfies('4.7.3', '4.7')
assert_satisfies('4.7.3b2', '4.7')
assert_satisfies('4.7b6', '4.7')
assert_satisfies('4.7.3', '4')
assert_satisfies('4.7.3b2', '4')
assert_satisfies('4.7b6', '4')
assert_does_not_satisfy('4.8.0', '4.9')
assert_does_not_satisfy('4.8', '4.9')
assert_does_not_satisfy('4', '4.9')
def test_basic_version_satisfaction_in_lists():
assert_satisfies(['4.7.3'], ['4.7.3'])
assert_satisfies(['4.7.3'], ['4.7'])
assert_satisfies(['4.7.3b2'], ['4.7'])
assert_satisfies(['4.7b6'], ['4.7'])
assert_satisfies(['4.7.3'], ['4'])
assert_satisfies(['4.7.3b2'], ['4'])
assert_satisfies(['4.7b6'], ['4'])
assert_does_not_satisfy(['4.8.0'], ['4.9'])
assert_does_not_satisfy(['4.8'], ['4.9'])
assert_does_not_satisfy(['4'], ['4.9'])
def test_version_range_satisfaction():
assert_satisfies('4.7b6', '4.3:4.7')
assert_satisfies('4.3.0', '4.3:4.7')
assert_satisfies('4.3.2', '4.3:4.7')
assert_does_not_satisfy('4.8.0', '4.3:4.7')
assert_does_not_satisfy('4.3', '4.4:4.7')
assert_satisfies('4.7b6', '4.3:4.7')
assert_does_not_satisfy('4.8.0', '4.3:4.7')
def test_version_range_satisfaction_in_lists():
assert_satisfies(['4.7b6'], ['4.3:4.7'])
assert_satisfies(['4.3.0'], ['4.3:4.7'])
assert_satisfies(['4.3.2'], ['4.3:4.7'])
assert_does_not_satisfy(['4.8.0'], ['4.3:4.7'])
assert_does_not_satisfy(['4.3'], ['4.4:4.7'])
assert_satisfies(['4.7b6'], ['4.3:4.7'])
assert_does_not_satisfy(['4.8.0'], ['4.3:4.7'])
def test_satisfaction_with_lists():
assert_satisfies('4.7', '4.3, 4.6, 4.7')
assert_satisfies('4.7.3', '4.3, 4.6, 4.7')
assert_satisfies('4.6.5', '4.3, 4.6, 4.7')
assert_satisfies('4.6.5.2', '4.3, 4.6, 4.7')
assert_does_not_satisfy('4', '4.3, 4.6, 4.7')
assert_does_not_satisfy('4.8.0', '4.2, 4.3:4.7')
assert_satisfies('4.8.0', '4.2, 4.3:4.8')
assert_satisfies('4.8.2', '4.2, 4.3:4.8')
def test_formatted_strings():
versions = (
'1.2.3b', '1_2_3b', '1-2-3b',
'1.2-3b', '1.2_3b', '1-2.3b',
'1-2_3b', '1_2.3b', '1_2-3b'
)
for item in versions:
v = Version(item)
assert v.dotted.string == '1.2.3b'
assert v.dashed.string == '1-2-3b'
assert v.underscored.string == '1_2_3b'
assert v.joined.string == '123b'
assert v.dotted.dashed.string == '1-2-3b'
assert v.dotted.underscored.string == '1_2_3b'
assert v.dotted.dotted.string == '1.2.3b'
assert v.dotted.joined.string == '123b'
def test_up_to():
v = Version('1.23-4_5b')
assert v.up_to(1).string == '1'
assert v.up_to(2).string == '1.23'
assert v.up_to(3).string == '1.23-4'
assert v.up_to(4).string == '1.23-4_5'
assert v.up_to(5).string == '1.23-4_5b'
assert v.up_to(-1).string == '1.23-4_5'
assert v.up_to(-2).string == '1.23-4'
assert v.up_to(-3).string == '1.23'
assert v.up_to(-4).string == '1'
assert v.up_to(2).dotted.string == '1.23'
assert v.up_to(2).dashed.string == '1-23'
assert v.up_to(2).underscored.string == '1_23'
assert v.up_to(2).joined.string == '123'
assert v.dotted.up_to(2).string == '1.23' == v.up_to(2).dotted.string
assert v.dashed.up_to(2).string == '1-23' == v.up_to(2).dashed.string
assert v.underscored.up_to(2).string == '1_23'
assert v.up_to(2).underscored.string == '1_23'
assert v.up_to(2).up_to(1).string == '1'
def test_repr_and_str():
def check_repr_and_str(vrs):
a = Version(vrs)
assert repr(a) == "Version('" + vrs + "')"
b = eval(repr(a))
assert a == b
assert str(a) == vrs
assert str(a) == str(b)
check_repr_and_str('1.2.3')
check_repr_and_str('R2016a')
check_repr_and_str('R2016a.2-3_4')
def test_len():
a = Version('1.2.3.4')
assert len(a) == len(a.version)
assert(len(a) == 4)
b = Version('2018.0')
assert(len(b) == 2)
def test_get_item():
a = Version('0.1_2-3')
assert isinstance(a[1], int)
# Test slicing
b = a[0:2]
assert isinstance(b, Version)
assert b == Version('0.1')
assert repr(b) == "Version('0.1')"
assert str(b) == '0.1'
b = a[0:3]
assert isinstance(b, Version)
assert b == Version('0.1_2')
assert repr(b) == "Version('0.1_2')"
assert str(b) == '0.1_2'
b = a[1:]
assert isinstance(b, Version)
assert b == Version('1_2-3')
assert repr(b) == "Version('1_2-3')"
assert str(b) == '1_2-3'
# Raise TypeError on tuples
with pytest.raises(TypeError):
b.__getitem__(1, 2)
|
py
|
1a56bad7fb0da39df785e33f02f36337d4f4992e
|
#!/usr/bin/env python3
'''
Author: Tom McLaughlin
Email: [email protected]
Description: DynamoDB tables
'''
import os
import boto3
from boto3.dynamodb.conditions import Key
from .errors import ApiAuthSvcBaseError
from . import logging
_logger = logging.get_logger(__name__)
DDB_TABLE_NAME = os.environ.get('DDB_TABLE_NAME', '')
DDB_HASH_KEY = os.environ.get('DDB_HASH_KEY', '')
DDB_RANGE_KEY = os.environ.get('DDB_RANGE_KEY', '')
class DynamoDBTableBaseError(Exception):
'''Base exception class'''
class DynamoDBTableCheckItemError(DynamoDBTableBaseError):
'''Error checking item existence in dynamoDB'''
msg = "Error checking item existence in DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTableGetItemError(DynamoDBTableBaseError):
'''Error Getting item in dynamoDB'''
msg = "Unable to get item from DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTablePutItemError(DynamoDBTableBaseError):
'''Error Putting item in dynamoDB'''
msg = "Unable to write item to DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTableQueryItemError(DynamoDBTableBaseError):
'''Error querying item in dynamoDB'''
msg = "Unable to query item in DynamoDB"
def __init__(self, message=msg) -> None:
super().__init__(message)
class DynamoDBTable:
'''DynamoDB Table'''
def __init__(self, table_name: str = DDB_TABLE_NAME, hash_key: str = DDB_HASH_KEY, range_key: str = DDB_RANGE_KEY) -> None:
self._table_name = table_name
self._hash_key = hash_key
self._range_key = range_key
self._ddb_resoruce = boto3.resource('dynamodb')
self._ddb_table = self._ddb_resoruce.Table(self._table_name)
@property
def table_name(self) -> str:
'''DDB table name.'''
return self._table_name
@property
def hash_key(self) -> str:
'''DDB table hash key'''
return self._hash_key
@property
def range_key(self) -> str:
'''DDB table range key'''
return self._range_key
def check_item_exists(self, item_id) -> bool:
'''Check if item already exists'''
try:
resp = self._ddb_table.query(
Select='COUNT',
KeyConditionExpression=Key(self._hash_key).eq(item_id)
)
except Exception as e:
_logger.exception(e)
raise DynamoDBTableCheckItemError
return resp.get('Count') > 0
def get_item(self, item_id, range_value, consistent_read=False) -> dict:
'''Return an item'''
_logger.info(item_id)
try:
items = self._ddb_table.get_item(
Key={
self._hash_key: item_id,
self._range_key: range_value
},
ConsistentRead=consistent_read
)
except Exception as e:
_logger.exception(e)
raise DynamoDBTableGetItemError
return items.get('Items')
def put_item(self, item: dict) -> None:
'''Put item in DDB'''
try:
self._ddb_table.put_item(
Item=item
)
except Exception as e:
_logger.exception(e)
raise DynamoDBTablePutItemError
def query_by_item_id(self, item_id, start_key: dict = {}) -> list:
'''query for item'''
item_list = []
query_kwargs = {
'KeyConditionExpression': Key(self._hash_key).eq(item_id)
}
if bool(start_key):
query_kwargs['ExclusiveStartKey'] = start_key
try:
resp = self._ddb_table.query(**query_kwargs)
except Exception as e:
_logger.exception(e)
raise DynamoDBTableQueryItemError
item_list += resp.get('Items')
if bool(resp.get('LastEvaluatedKey')):
item_list += self.query_by_item_id(item_id, resp.get('LastEvaluatedKey'))
return item_list
|
py
|
1a56bbc0a60358a5f808fddba58c159ecf28a1c2
|
#!/usr/bin/env pnpython4
# -*- coding: iso-8859-15 -*-
#
# Read Fairfield SEG-D (Version 1.6) from the Sweetwater experiment.
# Write PH5
#
# Steve Azevedo, May 2014
# Modified to read SEG-D from 3C's, July 2016
#
import os
import sys
import logging
import time
import json
import re
from math import modf
import warnings
from pyproj import Proj, transform
import construct
import bcd_py
from tables import NaturalNameWarning
from ph5.core import experiment, columns, segdreader, segdreader_smartsolo
from ph5 import LOGGING_FORMAT
warnings.filterwarnings('ignore', category=NaturalNameWarning)
PROG_VERSION = "2021.159"
LOGGER = logging.getLogger(__name__)
MAX_PH5_BYTES = 1073741824 * 100. # 100 GB (1024 X 1024 X 1024 X 2)
os.environ['TZ'] = 'GMT'
time.tzset()
APPEND = 1 # Number of SEG-D events to append to make 1 ph5 event.
DAS_INFO = {}
MAP_INFO = {}
# Current raw file processing
F = None
# RE for mini files
miniPH5RE = re.compile(r".*miniPH5_(\d\d\d\d\d)\.ph5")
# -2.5V to 2.5V
mV_full_scale = 5000
# 24-bit
counts_full_scale = 2**24
def bitweight(db):
# where db = 20log(V1,V2)
return (mV_full_scale / (10.**(db/20.))) / counts_full_scale
dbs = (0, 6, 12, 18, 24, 30, 36)
LSB_MAP = {db: bitweight(db) for db in dbs}
LSB = LSB_MAP[36]
# Manufacturers codes
FAIRFIELD = 20
OTHER = 0
def read_manufacture_code(filename):
""" read byte 17 for manufacture code"""
f = open(filename, 'rb')
f.seek(16)
byte = f.read(1)
swap = True
if sys.byteorder == 'big':
swap = False
bin = construct.BitStruct("BIN",
construct.BitField(
"field", 8, swapped=swap))
bcd = bin.parse(byte)['field']
if sys.byteorder == 'little':
bcd = construct.ULInt64("xxx").build(bcd)
else:
bcd = construct.UBInt64("xxx").build(bcd)
code = bcd_py.bcd2int(bcd, 0, 2)
f.close()
return code
def get_segdreader(filename, manucode):
"""
get the segdreader from manufacture code infile
or from --manufacturers_code argument
"""
KNOWN_CODE = {20: (segdreader, 'FairField'),
61: (segdreader_smartsolo, 'SmartSolo')}
req_code_list = ["%s for %s format" % (k, KNOWN_CODE[k][1])
for k in KNOWN_CODE.keys()]
req_code_str = ("Please give flag --manufacturers_code either "
' or '.join(req_code_list))
manu = read_manufacture_code(filename)
if manu in KNOWN_CODE.keys():
reader = KNOWN_CODE[manu][0]
else:
try:
if manucode in KNOWN_CODE.keys():
reader = KNOWN_CODE[manucode][0]
else:
LOGGER.error("manufacturers_code flag {0} is not one of "
"the known codes: {1}.\n{2}".
format(manucode, KNOWN_CODE.keys(), req_code_str))
raise Exception
except IndexError:
LOGGER.error("The manufacture code recorded in file {0} is not "
"one of the known codes: {1}.\n{2}".
format(manucode, KNOWN_CODE.keys(), req_code_str))
raise Exception
return reader
#
# To hold table rows and keys
#
class Rows_Keys(object):
__slots__ = ('rows', 'keys')
def __init__(self, rows=None, keys=None):
self.rows = rows
self.keys = keys
def set(self, rows=None, keys=None):
if rows is not None:
self.rows = rows
if keys is not None:
self.keys = keys
class Index_t_Info(object):
__slots__ = ('das', 'ph5file', 'ph5path', 'startepoch', 'stopepoch')
def __init__(self, das, ph5file, ph5path, startepoch, stopepoch):
self.das = das
self.ph5file = ph5file
self.ph5path = ph5path
self.startepoch = startepoch
self.stopepoch = stopepoch
class Resp(object):
__slots__ = ('lines', 'keys', 't')
def __init__(self, t):
self.t = t
self.update()
def update(self):
self.lines, self.keys = self.t.read_responses()
def match(self, bw, gain):
for ln in self.lines:
if ln['bit_weight/value_d'] == bw and ln['gain/value_i'] == gain:
return ln['n_i']
return -1
def next_i(self):
return len(self.lines)
class Trace(object):
__slots__ = ("trace", "headers")
def __init__(self, trace, headers):
self.trace = trace
self.headers = headers
def read_infile(infile):
''' Read list of input SEG-D files from a file '''
global FILES
def fn_sort(a, b):
return cmp(os.path.basename(a), os.path.basename(b))
try:
fh = file(infile)
except Exception:
LOGGER.warning("Failed to open %s\n" % infile)
return
while True:
line = fh.readline()
if not line:
break
line = line.strip()
if not line:
continue
if line[0] == '#':
continue
FILES.append(line)
FILES.sort(fn_sort)
def get_args():
global PH5, FILES, EVERY, NUM_MINI, TSPF, UTM, FIRST_MINI, APPEND,\
MANUFACTURERS_CODE
TSPF = False
from optparse import OptionParser
class MyParser(OptionParser):
"""
Override format_epilog to allow newlines
"""
def format_epilog(self, formatter):
return self.epilog
oparser = MyParser()
oparser.usage = "Version: {0} Usage: segdtoph5 [options]".format(
PROG_VERSION)
oparser.epilog = (
"Notice:\n"
"\tData of a Das can't be stored in more than one mini file.\n\n"
"\tUpdate astropy package for the lastes leap second table used in "
"converting time from GPS to UTC in SmartSolo's:\n"
"\t\tconda update astropy\n")
oparser.add_option("-r", "--raw", dest="rawfile",
help="Fairfield SEG-D v1.6 file.", metavar="raw_file")
oparser.add_option("-f", "--file",
action="store", dest="infile", type="string",
help="File containing list of Fairfield SEG-D\
v1.6 file names.",
metavar="file_list_file")
oparser.add_option("-n", "--nickname", dest="outfile",
help="The ph5 file prefix (experiment nick name).",
metavar="output_file_prefix")
oparser.add_option("-U", "--UTM", dest="utm_zone",
help="Locations in SEG-D file are UTM, --UTM=utmzone."
" Zone number and N or S designation"
" eg 13N",
type='str', default=0,
metavar="utm_zone")
oparser.add_option("-T", "--TSPF", dest="texas_spc",
help="Locations are in texas state plane coordinates.",
action='store_true', default=False)
oparser.add_option("-M", "--num_mini",
help=("Create a given number of miniPH5 files."
" Ex: -M 38"),
metavar="num_mini", type='int', default=None)
oparser.add_option("-S", "--first_mini",
help=("The index of the first miniPH5_xxxxx.ph5 "
"file of all. Ex: -S 5"),
metavar="first_mini", type='int', default=1)
oparser.add_option("-c", "--combine", dest="combine",
help="Combine this number if SEG-D traces to one\
PH5 trace.",
metavar="combine", type='int', default=APPEND)
oparser.add_option("-E", "--allevents", action="store_true",
dest="all_events",
default=False, metavar="all_events")
oparser.add_option("--manufacturers_code", dest="manufacturers_code",
help="Manufacturers code. Defaults to 20 for Fairfield.\
Most likely will not work for SEG-D written by other\
data loggers,",
type='int', default=None)
options, args = oparser.parse_args()
if options.rawfile and options.infile:
oparser.error("argument -f/--file: not allowed with argument -r/--raw")
FILES = []
PH5 = None
EVERY = options.all_events
NUM_MINI = options.num_mini
FIRST_MINI = options.first_mini
UTM = options.utm_zone
TSPF = options.texas_spc
APPEND = options.combine
MANUFACTURERS_CODE = options.manufacturers_code
if options.infile is not None:
read_infile(options.infile)
elif options.rawfile is not None:
FILES.append(options.rawfile)
if len(FILES) == 0:
raise Exception("No input file given.\n")
# Set output file
if options.outfile is not None:
PH5 = options.outfile
else:
raise Exception("No outfile (PH5) given.\n")
setLogger()
def setLogger():
if LOGGER.handlers != []:
LOGGER.removeHandler(LOGGER.handlers[0])
# Write log to file
ch = logging.FileHandler("segd2ph5.log")
ch.setLevel(logging.INFO)
# Add formatter
formatter = logging.Formatter(LOGGING_FORMAT)
ch.setFormatter(formatter)
LOGGER.addHandler(ch)
def initializeExperiment():
global EX
EX = experiment.ExperimentGroup(nickname=PH5)
EDIT = True
EX.ph5open(EDIT)
EX.initgroup()
def openPH5(filename):
''' Open PH5 file, miniPH5_xxxxx.ph5 '''
try:
if EXREC.ph5.isopen:
if EXREC.filename != filename:
EXREC.ph5close()
else:
return EXREC
except BaseException:
pass
exrec = experiment.ExperimentGroup(nickname=filename)
exrec.ph5open(True)
exrec.initgroup()
return exrec
def update_index_t_info(starttime, samples, sps):
''' Update info that gets saved in Index_t '''
global DAS_INFO, MAP_INFO
ph5file = EXREC.filename
ph5path = '/Experiment_g/Receivers_g/' + \
EXREC.ph5_g_receivers.current_g_das._v_name
ph5map = '/Experiment_g/Maps_g/' + EXREC.ph5_g_maps.current_g_das._v_name
das = ph5path[32:]
stoptime = starttime + (float(samples) / float(sps))
di = Index_t_Info(das, ph5file, ph5path, starttime, stoptime)
dm = Index_t_Info(das, ph5file, ph5map, starttime, stoptime)
if das not in DAS_INFO:
DAS_INFO[das] = []
MAP_INFO[das] = []
DAS_INFO[das].append(di)
MAP_INFO[das].append(dm)
LOGGER.info(
"DAS: {0} File: {1} First Sample: {2} Last Sample: {3}".format(
das, ph5file, time.ctime(starttime), time.ctime(stoptime)))
def update_external_references():
''' Update external references in master.ph5 to
miniPH5 files in Receivers_t '''
global F
LOGGER.info("Updating external references...")
n = 0
for i in INDEX_T_DAS.rows:
external_file = i['external_file_name_s'][2:]
external_path = i['hdf5_path_s']
target = external_file + ':' + external_path
external_group = external_path.split('/')[3]
# Nuke old node
try:
group_node = EX.ph5.get_node(external_path)
group_node.remove()
except Exception as e:
pass
# Re-create node
try:
EX.ph5.create_external_link(
'/Experiment_g/Receivers_g', external_group, target)
n += 1
except Exception as e:
# pass
LOGGER.error("{0}\n".format(e.message))
LOGGER.info("done, {0} das nodes recreated.\n".format(n))
n = 0
for i in INDEX_T_MAP.rows:
external_file = i['external_file_name_s'][2:]
external_path = i['hdf5_path_s']
target = external_file + ':' + external_path
external_group = external_path.split('/')[3]
# Nuke old node
try:
group_node = EX.ph5.get_node(external_path)
group_node.remove()
except Exception as e:
pass
# Re-create node
try:
EX.ph5.create_external_link(
'/Experiment_g/Maps_g', external_group, target)
n += 1
except Exception as e:
# pass
LOGGER.error("{0}\n".format(e.message))
LOGGER.info("done, {0} map nodes recreated.\n".format(n))
def get_current_data_only(size_of_data, das=None):
''' Return opened file handle for data only PH5 file that will be
less than MAX_PH5_BYTES after raw data is added to it.
'''
def sstripp(s):
s = s.replace('.ph5', '')
s = s.replace('./', '')
return s
def smallest():
''' Return the name of the smallest miniPH5_xxxxx.ph5 '''
minifiles = filter(miniPH5RE.match, os.listdir('.'))
tiny = minifiles[0]
for f in minifiles:
if os.path.getsize(f) < os.path.getsize(tiny):
tiny = f
return tiny
das = str(das)
newestfile = ''
# Get the most recent data only PH5 file or match DAS serialnumber
n = 0
for index_t in INDEX_T_DAS.rows:
# This DAS already exists in a ph5 file
if index_t['serial_number_s'] == das:
newestfile = sstripp(index_t['external_file_name_s'])
return openPH5(newestfile)
# miniPH5_xxxxx.ph5 with largest xxxxx
mh = miniPH5RE.match(index_t['external_file_name_s'])
if n < int(mh.groups()[0]):
newestfile = sstripp(index_t['external_file_name_s'])
n = int(mh.groups()[0])
if not newestfile:
# This is the first file added
return openPH5('miniPH5_{0:05d}'.format(FIRST_MINI))
size_of_exrec = os.path.getsize(newestfile + '.ph5')
if NUM_MINI is not None:
fm = FIRST_MINI - 1
if (int(newestfile[8:13]) - fm) < NUM_MINI:
newestfile = "miniPH5_{0:05d}".format(int(newestfile[8:13]) + 1)
else:
small = sstripp(smallest())
return openPH5(small)
elif (size_of_data + size_of_exrec) > MAX_PH5_BYTES:
newestfile = "miniPH5_{0:05d}".format(int(newestfile[8:13]) + 1)
return openPH5(newestfile)
def getLOG():
''' Create a open a new and unique header file under Maps_g/Das_g_
/Sta_g_
/Evt_g_
/Hdr_a_
'''
current_das = EXREC.ph5_g_receivers.get_das_name()
g = EXREC.ph5_g_maps.newdas('Das_g_', current_das)
EXREC.ph5_g_maps.setcurrent(g)
try:
name = EXREC.ph5_g_maps.nextarray('Hdr_a_')
except TypeError:
return None
log_array = EXREC.ph5_g_maps.newearray(
name, description="SEG-D header entries: {0}".format(Das))
return log_array, name
def process_traces(rh, th, tr):
'''
Inputs:
rh -> reel headers
th -> first trace header
tr -> trace data
'''
def get_true_channel(SD):
if SD.manufacturer == 'FairfieldNodal':
'''
Orientation Code:
chan 1 -> N Changed to '1'
chan 2 -> E Changed to '2'
chan 3 -> Z
or
chan 1 -> Z
'''
# Find channel by mapping to streamer_cable_number
if rh.channel_set_to_streamer_cable_map[
th.trace_header.channel_set] \
== 0:
true_channel = th.trace_header.channel_set
else:
true_channel = rh.channel_set_to_streamer_cable_map[
th.trace_header.channel_set]
if SD.chan_sets_per_scan >= 3:
OM = {1: '1', 2: '2', 3: 'Z'}
elif SD.chan_sets_per_scan == 1:
OM = {1: 'Z'}
else:
OM = None
if OM is None:
orientation_code = true_channel
else:
orientation_code = OM[true_channel]
elif SD.manufacturer == 'SmartSolo':
channel_list = ['N', 'E', 'Z']
filename_parts = SD.name().split('.')
found_channel = False
true_channel = 0
orientation_code = None
for p in filename_parts:
if p in channel_list:
orientation_code = p
true_channel = channel_list.index(p) + 1
found_channel = True
break
if not found_channel:
LOGGER.warning(
"Neither E, N, nor Z can't be found in filename")
return true_channel, orientation_code
def get_raw_file_name(SD):
filename = SD.name()
if SD.manufacturer == 'SmartSolo':
channel_list = ['E', 'N', 'Z']
filename_parts = filename.split('.')
chanidx = -1
for c in channel_list:
try:
chanidx = filename_parts.index(c)
break
except ValueError:
pass
"""
Shorten filename to fit the field:
remove 'segd' at the end
remove second and decimal of second
add . in front of chan to show somethings have been removed
Ex: filename: 453005483.1.2021.03.15.16.00.00.000.E.segd
=> shorten: 453005483.1.2021.03.15.16.00..E
"""
filename_parts.remove('segd')
filename_parts[chanidx] = '.' + filename_parts[chanidx]
filename_parts.pop(chanidx - 1) # remove decimal part
filename_parts.pop(chanidx - 2) # remove second part
filename = '.'.join(filename_parts)
return os.path.basename(filename)
def process_das():
global LSB
'''
'''
p_das_t = {}
''' Das_t
receiver_table_n_i
response_table_n_i
time_table_n_i
time/
type_s
epoch_l
ascii_s
micro_seconds_i
event_number_i
channel_number_i
sample_rate_i
sample_rate_multiplier_i
sample_count_i
stream_number_i
raw_file_name_s
array_name_data_a
array_name_SOH_a
array_name_event_a
array_name_log_a
'''
# Check to see if group exists for this das, if not build it
das_g, das_t, receiver_t, time_t = EXREC.ph5_g_receivers.newdas(
str(Das))
# Build maps group (XXX)
EXREC.ph5_g_maps.newdas('Das_g_', str(Das))
if rh.general_header_blocks[0].chan_sets_per_scan == 1:
# Single channel
p_das_t['receiver_table_n_i'] = 0 # 0 -> Z
elif rh.general_header_blocks[0].chan_sets_per_scan >= 3:
# 1 (N node) -> 1 (N PH5), 2 (E Node)-> 2 (E PH5), 3 (Z Node) -> 0
# (Z PH5)
M = {1: 1, 2: 2, 3: 0}
p_das_t['receiver_table_n_i'] = M[get_true_channel(SD)[0]]
else:
p_das_t['receiver_table_n_i'] = 0 # 0 -> Z
LOGGER.warning(
"Header channel set: {0}. Check Receiver_t entries!".format(
th.trace_header.channel_set))
p_das_t['response_table_n_i'] = None
p_das_t['time_table_n_i'] = 0
p_das_t['time/type_s'] = 'BOTH'
try:
trace_epoch = th.trace_epoch
except Exception as e:
LOGGER.warning("Failed to read shot epoch: {0}.".format(e.message))
trace_epoch = 0.
f, i = modf(trace_epoch / 1000000.)
p_das_t['time/epoch_l'] = int(i)
p_das_t['time/ascii_s'] = time.ctime(p_das_t['time/epoch_l'])
p_das_t['time/micro_seconds_i'] = int(f * 1000000.)
p_das_t['event_number_i'] = th.event_number
p_das_t['channel_number_i'] = get_true_channel(SD)[0]
p_das_t['sample_rate_i'] = SD.sample_rate
p_das_t['sample_rate_i'] = SD.sample_rate
p_das_t['sample_rate_multiplier_i'] = 1
p_das_t['sample_count_i'] = len(tr)
p_das_t['stream_number_i'] = 1
p_das_t['raw_file_name_s'] = get_raw_file_name(SD)
p_das_t['array_name_data_a'] = EXREC.ph5_g_receivers.nextarray(
'Data_a_')
p_response_t = {}
'''
n_i
gain/
units_s
value_i
bit_weight/
units_s
value_d
response_file_a
'''
try:
LSB = LSB_MAP[th.preamp_gain_db]
n_i = RESP.match(LSB, th.preamp_gain_db)
except Exception as e:
n_i = 0
p_response_t['gain/units_s'] = 'dB'
try:
p_response_t['gain/value_i'] = th.preamp_gain_db
except Exception as e:
LOGGER.warning(
"Failed to read trace pre amp gain: {0}.".format(e.message))
p_response_t['gain/value_i'] = 0.
p_response_t['gain/units_s'] = 'Unknown'
p_response_t['bit_weight/units_s'] = 'mV/count'
p_response_t['bit_weight/value_d'] = LSB
if n_i < 0:
n_i = RESP.next_i()
p_response_t['n_i'] = n_i
EX.ph5_g_responses.populateResponse_t(p_response_t)
RESP.update()
p_das_t['response_table_n_i'] = n_i
EXREC.ph5_g_receivers.populateDas_t(p_das_t)
des = "Epoch: " + str(p_das_t['time/epoch_l']) + \
" Channel: " + str(p_das_t['channel_number_i'])
# Write trace data here
try:
if SD.manufacturer == 'FairfieldNodal':
# Convert to counts
tr_counts = tr / LSB
EXREC.ph5_g_receivers.newarray(
p_das_t['array_name_data_a'], tr_counts, dtype='int32',
description=des)
elif SD.manufacturer == 'SmartSolo':
# SmartSolo is recorded by mV
EXREC.ph5_g_receivers.newarray(
p_das_t['array_name_data_a'], tr, dtype='float32',
description=des)
except Exception as e:
# Failed, leave as float
LOGGER.warning(
"Could not convert trace to counts. max: {1},\
min {2}\n{0}".format(
e.message, tr.max(), tr.min()))
p_response_t['bit_weight/value_d'] = 1.
EXREC.ph5_g_receivers.newarray(
p_das_t['array_name_data_a'], tr, dtype='float32',
description=des)
update_index_t_info(p_das_t['time/epoch_l'] + (
float(p_das_t['time/micro_seconds_i']) / 1000000.),
p_das_t['sample_count_i'],
p_das_t['sample_rate_i'] / p_das_t[
'sample_rate_multiplier_i'])
def process_array():
p_array_t = {}
def seen_sta():
if line not in ARRAY_T:
return False
elif Das not in ARRAY_T[line]:
return False
elif dtime not in ARRAY_T[line][Das]:
return False
elif chan_set in ARRAY_T[line][Das][dtime]:
if not ARRAY_T[line][Das][dtime][chan_set]:
return False
else:
return True
'''
deploy_time/
type_s
epoch_l
ascii_s
micro_seconds_i
pickup_time/
type_s
epoch_l
ascii_s
micro_seconds_i
id_s
das/
manufacturer_s
model_s
serial_number_s
notes_s
sensor/
manufacturer_s
model_s
serial_number_s
notes_s
location/
coordinate_system_s
projection_s
ellipsoid_s
X/
units_s
value_d
Y/
units_s
value_d
Z/
units_s
value_d
description_s
channel_number_i
description_s
sample_rate_i
sample_rate_multiplier_i
'''
'''
Band Code:
1000 <= G < 5000
250 <= D < 1000
80 <= E < 250
10 <= S < 80
'''
if SD.sample_rate >= 1000:
band_code = 'G'
elif SD.sample_rate >= 250 and SD.sample_rate < 1000:
band_code = 'D'
elif SD.sample_rate >= 80 and SD.sample_rate < 250:
band_code = 'E'
elif SD.sample_rate >= 10 and SD.sample_rate < 80:
band_code = 'S'
else:
band_code = 'X'
'''
Instrument Code:
Changed from H to P at request from Akram
'''
instrument_code = 'P'
chan_set, orientation_code = get_true_channel(SD)
p_array_t['seed_band_code_s'] = band_code
p_array_t['seed_instrument_code_s'] = instrument_code
p_array_t['seed_orientation_code_s'] = orientation_code
p_array_t['seed_station_name_s'] = Das.split('X')[1]
p_array_t['sample_rate_i'] = SD.sample_rate
p_array_t['sample_rate_multiplier_i'] = 1
p_array_t['deploy_time/type_s'] = 'BOTH'
try:
f, i = modf(SD.deploy_epoch)
except Exception as e:
LOGGER.warning(
"Failed to read deploy epoch: {0}.".format(
e.message))
f = i = 0.
p_array_t['deploy_time/epoch_l'] = int(i)
p_array_t['deploy_time/ascii_s'] = time.ctime(int(i))
p_array_t['deploy_time/micro_seconds_i'] = int(f * 1000000.)
p_array_t['pickup_time/type_s'] = 'BOTH'
try:
f, i = modf(SD.pickup_epoch)
except Exception as e:
LOGGER.warning(
"Failed to read pickup epoch: {0}.".format(
e.message))
f = i = 0.
p_array_t['pickup_time/epoch_l'] = int(i)
p_array_t['pickup_time/ascii_s'] = time.ctime(int(i))
p_array_t['pickup_time/micro_seconds_i'] = int(f * 1000000.)
p_array_t['id_s'] = Das.split('X')[1]
# use manu_code to decide SMARTSOLO dasmodel
p_array_t['das/manufacturer_s'] = SD.manufacturer
try:
if SD.manufacturer == "SmartSolo":
p_array_t['das/model_s'] = 'SmartSolo IGU16'
elif SD.manufacturer == "FairfieldNodal":
if SD.chan_sets_per_scan >= 3:
p_array_t['das/model_s'] = "ZLAND 3C"
else:
p_array_t['das/model_s'] = 'ZLAND 1C'
except Exception as e:
LOGGER.warning(
"Failed to read channel sets per scan: {0}.".format(e.message))
p_array_t['das/model_s'] = 'zland-[13]C'
p_array_t['das/serial_number_s'] = Das
p_array_t[
'das/notes_s'] = "manufacturer and model not read from data file."
p_array_t['sensor/manufacturer_s'] = 'Geo Space'
p_array_t['sensor/model_s'] = 'GS-30CT'
p_array_t[
'sensor/notes_s'] = "manufacturer and model not read from file."
if SD.manufacturer == 'FairfieldNodal':
if TSPF:
p_array_t['location/description_s'] = (
"Converted from Texas State Plane FIPS zone 4202")
elif UTM:
p_array_t['location/description_s'] = (
"Converted from UTM Zone {0}".format(UTM))
else:
p_array_t['location/description_s'] = "Read from SEG-D as is."
else:
p_array_t['location/description_s'] = "Read from SEG-D as is."
p_array_t['location/coordinate_system_s'] = 'geographic'
p_array_t['location/projection_s'] = 'WGS84'
p_array_t['location/X/units_s'] = 'degrees'
p_array_t['location/X/value_d'] = LON
p_array_t['location/Y/units_s'] = 'degrees'
p_array_t['location/Y/value_d'] = LAT
p_array_t['location/Z/units_s'] = 'unknown'
try:
p_array_t['location/Z/value_d'] = th.ele / 10.
except Exception as e:
LOGGER.warning(
"Failed to read elevation: {0}.".format(e.message))
p_array_t['location/Z/value_d'] = 0.
p_array_t['channel_number_i'] = chan_set
try:
p_array_t['description_s'] = "DAS: {0}, Node ID: {1}".format(
Das, SD.id_number)
except Exception as e:
LOGGER.warning(
"Failed to read ID number: {0}.".format(
e.message))
try:
line = th.line_number
if line == -1:
line = 1
except Exception as e:
LOGGER.warning("Failed to read line number: {0}.".format(
e.message))
line = 0
dtime = p_array_t['deploy_time/epoch_l']
if line not in ARRAY_T:
ARRAY_T[line] = {}
if Das not in ARRAY_T[line]:
ARRAY_T[line][Das] = {}
if dtime not in ARRAY_T[line][Das]:
ARRAY_T[line][Das][dtime] = {}
if chan_set not in ARRAY_T[line][Das][dtime]:
ARRAY_T[line][Das][dtime][chan_set] = []
if not seen_sta():
ARRAY_T[line][Das][dtime][chan_set].append(p_array_t)
elif SD.manufacturer == "SmartSolo":
# need to update the row after each trace is readed
# because the pickup time will be
# updated depend on trace_epoch
ARRAY_T[line][Das][dtime][chan_set][-1] = p_array_t
def process_reel_headers():
global RH
''' Save receiver record header information in\
Maps_g/Das_g_xxxxxxx/Hdr_a_xxxx file '''
def process(hdr, header_type):
ll = [{'FileType': 'SEG-D', 'HeaderType': header_type}, hdr]
log_array.append(json.dumps(
ll, sort_keys=True, indent=4).split('\n'))
log_array, log_name = getLOG()
for i in range(len(rh.general_header_blocks)):
ht = "General {0}".format(i+1)
process(rh.general_header_blocks[i], ht)
# Channel set descriptors
for i in range(len(rh.channel_set_descriptor)):
ht = "Channel Set {0}".format(i + 1)
process(rh.channel_set_descriptor, ht)
for i in range(len(rh.extended_headers)):
ht = "Extended {0}".format(i)
process(rh.extended_headers[i], ht)
# External header
process(rh.external_header, "External Header")
# External header shot
for i in range(len(rh.external_header_shot)):
ht = "External Shot {0}".format(i + 1)
process(rh.external_header_shot[i], ht)
RH = True
def process_trace_header():
''' Save trace header information in\
Maps_g/Das_g_xxxxxxx/Hdr_a_xxxx file '''
def process(hdr, header_type):
global TRACE_JSON
ll = [{'FileType': 'SEG-D', 'HeaderType': 'trace',
'HeaderSubType': header_type}, hdr]
TRACE_JSON.append(json.dumps(
ll, sort_keys=True, indent=4).split('\n'))
process(th.trace_header, "Trace Header")
for i in range(len(th.trace_header_N)):
ht = "Header N-{0}".format(i + 1)
process(th.trace_header_N[i], ht)
process_das()
process_array()
if not RH:
process_reel_headers()
process_trace_header()
def write_arrays(SD, Array_t):
''' Write /Experiment_g/Sorts_g/Array_t_xxx '''
lines = sorted(Array_t.keys())
# Loop through arrays/lines
for line in lines:
name = "Array_t_{0:03d}".format(int(line))
a = EX.ph5_g_sorts.newArraySort(name)
das_list = sorted(Array_t[line].keys())
# Loop through das_list
for das in das_list:
if SD.manufacturer == 'SmartSolo':
Array_t[line][das] = combine_array_entries(
name, Array_t[line][das])
dtimes = sorted(Array_t[line][das].keys())
# Loop through deploying times
for dtime in dtimes:
chan_sets = sorted(Array_t[line][das][dtime].keys())
# Loop through channel sets
for chan_set in chan_sets:
try:
for array_t in Array_t[line][das][dtime][chan_set]:
columns.populate(a, array_t)
except Exception as e:
print(e.message)
def combine_array_entries(aName, aOfDas):
"""
:para aName: "Array_t_xxx" to add to warning message
:param aOfDas: {dtime: {c:[entry]}} in which each dtime is an entry
:return aOnDeployTimes which has the same structure of aOfDas but the
times are combined if gap less than 2m
"""
aOnChannels = {} # {c_i: list of entries according to dtimes' order}
dtimes = sorted(aOfDas.keys())
for dtime in dtimes:
chan_sets = sorted(aOfDas[dtime].keys())
for c in chan_sets:
if c not in aOnChannels:
aOnChannels[c] = []
for entry in aOfDas[dtime][c]:
aOnChannels[c].append(entry)
# same structure of aOfDas but the times are combined if deploy time of
# the current entry is exactly the same as the pickup time of the previous
# one: # {dtime: {c:[combined entry] } }
aOnDeployTimes = {}
for c in aOnChannels:
prevPickupTime = 0
currDeployTime = 0
dEntries = aOnChannels[c]
for d in dEntries:
deployTime = d['deploy_time/epoch_l']
if deployTime > prevPickupTime:
currDeployTime = deployTime
if deployTime not in aOnDeployTimes:
aOnDeployTimes[deployTime] = {}
if c not in aOnDeployTimes[deployTime]:
aOnDeployTimes[deployTime][c] = [d]
else:
uEntry = aOnDeployTimes[currDeployTime][c][0]
msg = "Das %s - %s - station %s - chan %s: " % (
d['das/serial_number_s'], aName,
d['id_s'], d['channel_number_i'])
msg += "Combine %s"
msg += ("entry [%s - %s] into previous entry [%s - %s]" %
(d['deploy_time/ascii_s'],
d['pickup_time/ascii_s'],
uEntry['deploy_time/ascii_s'],
uEntry['pickup_time/ascii_s']))
descr = ""
if deployTime < prevPickupTime:
descr = "overlapping "
msg %= descr
LOGGER.warning(msg)
uEntry['pickup_time/epoch_l'] = d['pickup_time/epoch_l']
uEntry['pickup_time/ascii_s'] = d['pickup_time/ascii_s']
uEntry['pickup_time/micro_seconds_i'] = d['pickup_time/'
'micro_seconds_i']
prevPickupTime = d['pickup_time/epoch_l']
return aOnDeployTimes
def writeINDEX():
''' Write /Experiment_g/Receivers_g/Index_t '''
global DAS_INFO, MAP_INFO, INDEX_T_DAS, INDEX_T_MAP
dass = sorted(DAS_INFO.keys())
for das in dass:
di = {}
mi = {}
start = sys.maxsize
stop = 0.
dm = [(d, m) for d in DAS_INFO[das] for m in MAP_INFO[das]]
for d, m in dm:
di['external_file_name_s'] = d.ph5file
mi['external_file_name_s'] = m.ph5file
di['hdf5_path_s'] = d.ph5path
mi['hdf5_path_s'] = m.ph5path
di['serial_number_s'] = das
mi['serial_number_s'] = das
if d.startepoch < start:
start = d.startepoch
if d.stopepoch > stop:
stop = d.stopepoch
di['time_stamp/epoch_l'] = int(time.time())
mi['time_stamp/epoch_l'] = int(time.time())
di['time_stamp/micro_seconds_i'] = 0
mi['time_stamp/micro_seconds_i'] = 0
di['time_stamp/type_s'] = 'BOTH'
mi['time_stamp/type_s'] = 'BOTH'
di['time_stamp/ascii_s'] = time.ctime(di['time_stamp/epoch_l'])
mi['time_stamp/ascii_s'] = time.ctime(mi['time_stamp/epoch_l'])
di['start_time/epoch_l'] = int(modf(start)[1])
mi['start_time/epoch_l'] = int(modf(start)[1])
di['start_time/micro_seconds_i'] = int(modf(start)[0] * 1000000)
mi['start_time/micro_seconds_i'] = int(modf(start)[0] * 1000000)
di['start_time/type_s'] = 'BOTH'
mi['start_time/type_s'] = 'BOTH'
di['start_time/ascii_s'] = time.ctime(start)
mi['start_time/ascii_s'] = time.ctime(start)
di['end_time/epoch_l'] = modf(stop)[1]
mi['end_time/epoch_l'] = modf(stop)[1]
di['end_time/micro_seconds_i'] = int(modf(stop)[0] * 1000000)
mi['end_time/micro_seconds_i'] = int(modf(stop)[0] * 1000000)
di['end_time/type_s'] = 'BOTH'
mi['end_time/type_s'] = 'BOTH'
di['end_time/ascii_s'] = time.ctime(stop)
mi['end_time/ascii_s'] = time.ctime(stop)
EX.ph5_g_receivers.populateIndex_t(di)
EX.ph5_g_maps.populateIndex_t(mi)
rows, keys = EX.ph5_g_receivers.read_index()
INDEX_T_DAS = Rows_Keys(rows, keys)
rows, keys = EX.ph5_g_maps.read_index()
INDEX_T_MAP = Rows_Keys(rows, keys)
DAS_INFO = {}
MAP_INFO = {}
def txncsptolatlon(northing, easting):
'''
Sweetwater
Convert texas state plane coordinates in feet to
geographic coordinates, WGS84.
'''
# Texas NC state plane feet Zone 4202
sp = Proj(init='epsg:32038')
# WGS84, geographic
wgs = Proj(init='epsg:4326', proj='latlong')
# Texas SP coordinates: survey foot is 1200/3937 meters
lon, lat = transform(sp, wgs, easting * 0.30480060960121924,
northing * 0.30480060960121924)
return lat, lon
def utmcsptolatlon(northing, easting):
'''
Mount Saint Helens
Convert UTM to
geographic coordinates, WGS84.
'''
# UTM
new_UTM = re.split(r'(\d+)', UTM)
utmzone = str(new_UTM[1])
if str(new_UTM[2]).upper() == 'N':
NS = 'north'
elif str(new_UTM[2]).upper() == 'S':
NS = 'south'
else:
NS = 'north'
utmc = Proj("+proj=utm +zone="+utmzone+" +"+NS+" +ellps=WGS84")
print
# WGS84, geographic
wgs = Proj(init='epsg:4326', proj='latlong')
#
lon, lat = transform(utmc, wgs, easting, northing)
return lat, lon
def get_latlon(manu, th):
try:
if manu == 'FairfieldNodal':
if UTM:
# UTM
LAT, LON = utmcsptolatlon(th.lat/10., th.lon/10.)
elif TSPF:
# Texas State Plane coordinates
LAT, LON = txncsptolatlon(th.lat/10., th.lon/10.)
else:
LAT = th.lat / 10.
LON = th.lon / 10.
elif manu == 'SmartSolo':
LAT = th.lat
LON = th.lon
except Exception as e:
LOGGER.warning(
"Failed to convert location: {0}.\n".format(
e.message))
return LAT, LON
def main():
import time
then = time.time()
from numpy import append as npappend
def prof():
global RESP, INDEX_T_DAS, INDEX_T_MAP, SD, EXREC, MINIPH5, Das, SIZE,\
ARRAY_T, RH, LAT, LON, F, TRACE_JSON, APPEND
MINIPH5 = None
ARRAY_T = {}
def get_das(sd, warn=False):
if sd.manufacturer == 'FairfieldNodal':
# Return line_station or das#[-9:]
try:
das = "{0}X{1}".format(
sd.reel_headers.extended_headers[2].line_number,
sd.reel_headers.extended_headers[2].receiver_point)
except Exception:
try:
das = "{0}X{1}".format(
sd.reel_headers.external_header.receiver_line,
sd.reel_headers.external_header.receiver_point)
except Exception:
das = "sn" + \
str(sd.reel_headers.general_header_blocks[0].
manufactures_sn)
if das == 0:
das = "id" + \
str(sd.reel_headers
.extended_headers[0].id_number)[-9:]
elif sd.manufacturer == 'SmartSolo':
line_number = sd.trace_headers.line_number
receiver_point = sd.trace_headers.receiver_point
if line_number == -1:
if warn:
LOGGER.warning(
"Line number is using invalid default value -1. "
"Using 1 instead.")
line_number = 1
if receiver_point == -1:
if warn:
LOGGER.warning(
"Receiver point (stationID) is using invalid "
"default value -1. Using 1 instead.")
receiver_point = 1
das = "{0}X{1}".format(line_number, receiver_point)
# das = sd.id_number
return das
def get_node(sd):
# Return node part number, node id, and number of channels
pn = None # Part Number
id = None # Node ID
nc = None # Number of channel sets
try:
nc = sd.reel_headers.general_header_blocks[0][
'chan_sets_per_scan']
id = sd.id_number
if sd.manufacturer == 'FairfieldNodal':
pn = sd.reel_headers.extended_headers[0]['part_number']
except Exception:
pass
return pn, id, nc
try:
get_args()
except Exception as err_msg:
LOGGER.error(err_msg)
return 1
initializeExperiment()
LOGGER.info("segd2ph5 {0}".format(PROG_VERSION))
LOGGER.info("{0}".format(sys.argv))
if len(FILES) > 0:
RESP = Resp(EX.ph5_g_responses)
rows, keys = EX.ph5_g_receivers.read_index()
INDEX_T_DAS = Rows_Keys(rows, keys)
rows, keys = EX.ph5_g_maps.read_index()
INDEX_T_MAP = Rows_Keys(rows, keys)
for f in FILES:
F = f
traces = []
TRACE_JSON = []
try:
SIZE = os.path.getsize(f)
except Exception as e:
LOGGER.error("Failed to read {0}, {1}.\
Skipping...\n".format(f, str(e.message)))
continue
try:
segd_reader = get_segdreader(f, MANUFACTURERS_CODE)
except Exception:
continue
SD = segd_reader.Reader(infile=f)
LAT = None
LON = None
RH = False
try:
SD.process_general_headers()
SD.process_channel_set_descriptors()
SD.process_extended_headers()
SD.process_external_headers()
if SD.manufacturer == 'SmartSolo':
SD.process_trace_headers()
except segdreader.InputsError as e:
LOGGER.error(
"Possible bad SEG-D file -- {0}".format(
"".join(e.message)))
continue
nleft = APPEND
Das = get_das(SD, warn=True)
if not Das.isalnum():
LOGGER.error(
"DAS %s is not alphanumeric. Can't process." % Das)
return 1
part_number, node_id, number_of_channels = get_node(SD)
EXREC = get_current_data_only(SIZE, Das)
LOGGER.info(":<Processing>: {0}\n".format(SD.name()))
LOGGER.info(
"Processing: {0}... Size: {1}\n".format(SD.name(), SIZE))
if EXREC.filename != MINIPH5:
LOGGER.info("Opened: {0}...\n".format(EXREC.filename))
if node_id is None:
node_id_str = ''
else:
node_id_str = ', Node ID: %s' % node_id
LOGGER.info(
"DAS: {0}{1}, PN: {2}, Channels: {3}".format(
Das, node_id_str, part_number, number_of_channels))
MINIPH5 = EXREC.filename
n = 0
trace_index = 0
trace_headers_list = []
while True:
if SD.isEOF():
if n != 0:
thl = []
chan_set = None
t = None
new_traces = []
for T in traces:
thl.append(T.headers)
if chan_set is None:
chan_set = T.headers.trace_header.channel_set
if chan_set == T.headers.trace_header.channel_set:
if isinstance(t, type(None)):
t = T.trace
else:
t = npappend(t, T.trace)
else:
new_traces.append(T)
traces = new_traces
process_traces(SD.reel_headers, thl[0], t)
if DAS_INFO:
writeINDEX()
break
try:
trace, cs = SD.process_trace(trace_index)
trace_index += 1
except segdreader.InputsError as e:
LOGGER.error("{0}\n".format(F))
LOGGER.error(
"Possible bad SEG-D file -- {0}".format(
"".join(e.message)))
break
if not LAT and not LON:
LAT, LON = get_latlon(SD.manufacturer, SD.trace_headers)
trace_headers_list.append(SD.trace_headers)
if n == 0:
traces.append(Trace(trace, SD.trace_headers))
n = 1
Das = get_das(SD)
else:
traces.append(Trace(trace, SD.trace_headers))
if n >= nleft or EVERY is True:
thl = []
chan_set = None
chan_set_next = None
t = None
new_traces = []
# Need to check for gaps here!
for T in traces:
thl.append(T.headers)
if chan_set is None:
chan_set = T.headers.trace_header.channel_set
if chan_set == T.headers.trace_header.channel_set:
if isinstance(t, type(None)):
t = T.trace
else:
t = npappend(t, T.trace)
else:
new_traces.append(T)
if chan_set_next is None:
chan_set_next =\
T.headers.trace_header.channel_set
traces = new_traces
process_traces(SD.reel_headers, thl[0], t)
if new_traces:
nleft = APPEND - len(new_traces)
else:
nleft = APPEND
chan_set = chan_set_next
chan_set_next = None
if DAS_INFO:
writeINDEX()
n = 0
trace_headers_list = []
continue
n += 1
update_external_references()
if TRACE_JSON:
log_array, name = getLOG()
for line in TRACE_JSON:
log_array.append(line)
LOGGER.info(":<Finished>: {0}\n".format(F))
write_arrays(SD, ARRAY_T)
seconds = time.time() - then
try:
EX.ph5close()
EXREC.ph5close()
except Exception as e:
LOGGER.warning("{0}\n".format("".join(e.message)))
LOGGER.info("Done...{0:b}".format(int(seconds / 6.)))
logging.shutdown()
prof()
if __name__ == '__main__':
main()
|
py
|
1a56bc35c49d210fb158907c4f8d8b91b127fa78
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 2
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class SnapshotAliasExtended(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'created': 'int',
'id': 'int',
'name': 'str',
'target_id': 'int',
'target_name': 'str'
}
attribute_map = {
'created': 'created',
'id': 'id',
'name': 'name',
'target_id': 'target_id',
'target_name': 'target_name'
}
def __init__(self, created=None, id=None, name=None, target_id=None, target_name=None): # noqa: E501
"""SnapshotAliasExtended - a model defined in Swagger""" # noqa: E501
self._created = None
self._id = None
self._name = None
self._target_id = None
self._target_name = None
self.discriminator = None
self.created = created
self.id = id
self.name = name
self.target_id = target_id
self.target_name = target_name
@property
def created(self):
"""Gets the created of this SnapshotAliasExtended. # noqa: E501
The Unix Epoch time the snapshot alias was created. # noqa: E501
:return: The created of this SnapshotAliasExtended. # noqa: E501
:rtype: int
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this SnapshotAliasExtended.
The Unix Epoch time the snapshot alias was created. # noqa: E501
:param created: The created of this SnapshotAliasExtended. # noqa: E501
:type: int
"""
if created is None:
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def id(self):
"""Gets the id of this SnapshotAliasExtended. # noqa: E501
The system ID given to the snapshot alias. # noqa: E501
:return: The id of this SnapshotAliasExtended. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this SnapshotAliasExtended.
The system ID given to the snapshot alias. # noqa: E501
:param id: The id of this SnapshotAliasExtended. # noqa: E501
:type: int
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this SnapshotAliasExtended. # noqa: E501
The user or system supplied snapshot alias name. # noqa: E501
:return: The name of this SnapshotAliasExtended. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this SnapshotAliasExtended.
The user or system supplied snapshot alias name. # noqa: E501
:param name: The name of this SnapshotAliasExtended. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def target_id(self):
"""Gets the target_id of this SnapshotAliasExtended. # noqa: E501
The ID of the snapshot pointed to. # noqa: E501
:return: The target_id of this SnapshotAliasExtended. # noqa: E501
:rtype: int
"""
return self._target_id
@target_id.setter
def target_id(self, target_id):
"""Sets the target_id of this SnapshotAliasExtended.
The ID of the snapshot pointed to. # noqa: E501
:param target_id: The target_id of this SnapshotAliasExtended. # noqa: E501
:type: int
"""
if target_id is None:
raise ValueError("Invalid value for `target_id`, must not be `None`") # noqa: E501
self._target_id = target_id
@property
def target_name(self):
"""Gets the target_name of this SnapshotAliasExtended. # noqa: E501
The name of the snapshot pointed to. # noqa: E501
:return: The target_name of this SnapshotAliasExtended. # noqa: E501
:rtype: str
"""
return self._target_name
@target_name.setter
def target_name(self, target_name):
"""Sets the target_name of this SnapshotAliasExtended.
The name of the snapshot pointed to. # noqa: E501
:param target_name: The target_name of this SnapshotAliasExtended. # noqa: E501
:type: str
"""
if target_name is None:
raise ValueError("Invalid value for `target_name`, must not be `None`") # noqa: E501
self._target_name = target_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SnapshotAliasExtended):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a56bc5c215f2ed84a6f56a40458873701e6aa51
|
from django.apps import AppConfig
class AdminxznConfig(AppConfig):
name = 'AdminXZN'
|
py
|
1a56bcd66afd26b14b61a41b2fe2e5eb72ebac71
|
import os
import sys
def ProcessFile(fileObj):
result = ""
line = fileObj.readline()
#skip to related materials section
while not("=Related Material" in line or "= Related Material" in line or "=Manual" in line or "==Manual" in line or "= Manual" in line or "== Manual" in line or line == ""):
result += line
line = fileObj.readline()
while not(line == ""):
if not(line[0] == "=" or line[0] == " " or line[0] == "\n" or line[0] == "-"):
line = "- " + line
result += line
line = fileObj.readline()
return result
# Set the directory you want to start from
rootDir = '.'
translatedDir = "I:\\github\\translated_files"
for dirName, subdirList, fileList in os.walk(rootDir):
#print('Found directory: %s' % dirName)
for fname in fileList:
filePath = dirName + "\\" + fname
fileCopyPath = dirName + "\\" + fname
if(".remarkup" in filePath):
print("processing: " + filePath)
fileObj = open(filePath, "r")
result = ProcessFile(fileObj)
result += " \n "
fileObj.close()
fileObj = open(fileCopyPath, "w")
fileObj.write(result)
fileObj.close()
|
py
|
1a56bce39a963a807d01103420259526cce3a3d7
|
from django.http import Http404
from django.conf import settings
from django.shortcuts import get_list_or_404
from rest_framework.generics import RetrieveAPIView, DestroyAPIView, GenericAPIView
from rest_framework.response import Response
from client.authentication import ClientSenderIdAuthentication
from submission.models import Submission, Sender
from submission.constants import (
ACTION_NAME_EMAIL,
ACTION_NAME_GOV_NOTIFY_EMAIL,
ACTION_NAME_PARDOT,
ACTION_NAME_ZENDESK,
)
class TestAPIView(GenericAPIView):
authentication_classes = [ClientSenderIdAuthentication]
def dispatch(self, *args, **kwargs):
if not settings.FEATURE_TEST_API_ENABLED:
raise Http404
return super().dispatch(*args, **kwargs)
class SubmissionsTestAPIView(TestAPIView, DestroyAPIView, RetrieveAPIView):
queryset = Submission.objects.all()
http_method_names = ('delete', 'get',)
@staticmethod
def data_and_meta(submission: Submission):
return {
'data': dict(submission.data),
'meta': dict(submission.meta),
'is_sent': submission.is_sent,
'form_url': submission.form_url,
}
def get_submissions(self, email_address):
results = []
for submission in self.queryset.all():
if submission.meta['action_name'] == ACTION_NAME_PARDOT:
if submission.data['email'] == email_address:
results.append(self.data_and_meta(submission))
if submission.meta['action_name'] in [
ACTION_NAME_GOV_NOTIFY_EMAIL, ACTION_NAME_ZENDESK
]:
if submission.meta['email_address'] == email_address:
results.append(self.data_and_meta(submission))
if submission.meta['action_name'] == ACTION_NAME_EMAIL:
if email_address in submission.meta['recipients']:
results.append(self.data_and_meta(submission))
return results
def get(self, request, *args, **kwargs):
email_address = kwargs['email_address']
meta = self.get_submissions(email_address)
return Response(meta) if meta else Response(status=404)
def delete(self, request, **kwargs):
test_email_pattern = r'^test\+(.*)@directory\.uktrade\.io'
try:
test_submissions = get_list_or_404(
Submission,
sender__email_address__regex=test_email_pattern,
)
for submission in test_submissions:
submission.delete()
except Http404:
try:
test_email_actions = get_list_or_404(
Submission,
meta__recipients__0__regex=test_email_pattern,
)
for email_notification in test_email_actions:
email_notification.delete()
except Http404:
try:
test_zendesk_actions = get_list_or_404(
Submission,
meta__email_address__regex=test_email_pattern,
)
for zendesk_action in test_zendesk_actions:
zendesk_action.delete()
except Http404:
test_gov_notify_actions = get_list_or_404(
Submission,
meta__sender__email_address__regex=test_email_pattern,
)
for gov_notify_action in test_gov_notify_actions:
gov_notify_action.delete()
return Response(status=204)
class SendersTestAPIView(TestAPIView, DestroyAPIView):
queryset = Sender.objects.all()
http_method_names = 'delete'
def delete(self, request, **kwargs):
test_email_pattern = r'^test\+(.*)@directory\.uktrade\.io'
test_senders = get_list_or_404(
Sender,
email_address__regex=test_email_pattern,
)
for sender in test_senders:
sender.delete()
return Response(status=204)
|
py
|
1a56bcfa0cb86c0ffe110f7051736a58106b08ad
|
# -*- coding: utf-8 -*-
"""Utilities to get elements of generated spec"""
def get_definitions(spec):
if spec.openapi_version.major < 3:
return spec.to_dict()['definitions']
return spec.to_dict()['components']['schemas']
def get_parameters(spec):
if spec.openapi_version.major < 3:
return spec.to_dict()['parameters']
return spec.to_dict()['components']['parameters']
def get_responses(spec):
if spec.openapi_version.major < 3:
return spec.to_dict()['responses']
return spec.to_dict()['components']['responses']
def get_paths(spec):
return spec.to_dict()['paths']
def ref_path(spec):
if spec.openapi_version.version[0] < 3:
return '#/definitions/'
return '#/components/schemas/'
|
py
|
1a56bd1f14b2dfa712c1407f215f541ea1d53a7c
|
#!/usr/bin/env python
# Copyright 2018-2019 the Deno authors. All rights reserved. MIT license.
import os
import sys
import time
import subprocess
import util
import third_party
# Some of the benchmarks in this file have been renamed. In case the history
# somehow gets messed up:
# "node_http" was once called "node"
# "deno_tcp" was once called "deno"
# "deno_http" was once called "deno_net_http"
DURATION = "10s"
LAST_PORT = 4544
def server_addr(port):
return "0.0.0.0:%s" % port
def get_port(port=None):
global LAST_PORT
if port is None:
port = LAST_PORT
LAST_PORT = LAST_PORT + 1
# Return port as str because all usages below are as a str and having it an
# integer just adds complexity.
return str(port)
def deno_tcp(deno_exe):
port = get_port()
deno_cmd = [
deno_exe, "run", "--allow-net", "tools/deno_tcp.ts",
server_addr(port)
]
print "http_benchmark testing DENO tcp."
return run(deno_cmd, port)
def deno_tcp_current_thread(deno_exe):
port = get_port()
deno_cmd = [
deno_exe, "run", "--current-thread", "--allow-net",
"tools/deno_tcp.ts",
server_addr(port)
]
print "http_benchmark testing DENO tcp (single-thread)."
return run(deno_cmd, port)
def deno_http(deno_exe):
port = get_port()
deno_cmd = [
deno_exe, "run", "--allow-net", "std/http/http_bench.ts",
server_addr(port)
]
print "http_benchmark testing DENO using net/http."
return run(deno_cmd, port)
def deno_tcp_proxy(deno_exe, hyper_hello_exe):
port = get_port()
origin_port = get_port()
deno_cmd = [
deno_exe, "run", "--allow-net", "tools/deno_tcp_proxy.ts",
server_addr(port),
server_addr(origin_port)
]
print "http_proxy_benchmark testing DENO using net/tcp."
return run(
deno_cmd,
port,
origin_cmd=http_proxy_origin(hyper_hello_exe, origin_port))
def deno_http_proxy(deno_exe, hyper_hello_exe):
port = get_port()
origin_port = get_port()
deno_cmd = [
deno_exe, "run", "--allow-net", "tools/deno_http_proxy.ts",
server_addr(port),
server_addr(origin_port)
]
print "http_proxy_benchmark testing DENO using net/http."
return run(
deno_cmd,
port,
origin_cmd=http_proxy_origin(hyper_hello_exe, origin_port))
def deno_core_single(exe):
print "http_benchmark testing deno_core_single"
return run([exe, "--single-thread"], 4544)
def deno_core_multi(exe):
print "http_benchmark testing deno_core_multi"
return run([exe, "--multi-thread"], 4544)
def node_http():
port = get_port()
node_cmd = ["node", "tools/node_http.js", port]
print "http_benchmark testing NODE."
return run(node_cmd, port)
def node_http_proxy(hyper_hello_exe):
port = get_port()
origin_port = get_port()
node_cmd = ["node", "tools/node_http_proxy.js", port, origin_port]
print "http_proxy_benchmark testing NODE."
return run(node_cmd, port, None,
http_proxy_origin(hyper_hello_exe, origin_port))
def node_tcp_proxy(hyper_hello_exe):
port = get_port()
origin_port = get_port()
node_cmd = ["node", "tools/node_tcp_proxy.js", port, origin_port]
print "http_proxy_benchmark testing NODE tcp."
return run(node_cmd, port, None,
http_proxy_origin(hyper_hello_exe, origin_port))
def node_tcp():
port = get_port()
node_cmd = ["node", "tools/node_tcp.js", port]
print "http_benchmark testing node_tcp.js"
return run(node_cmd, port)
def http_proxy_origin(hyper_hello_exe, port):
return [hyper_hello_exe, port]
def hyper_http(hyper_hello_exe):
port = get_port()
hyper_cmd = [hyper_hello_exe, port]
print "http_benchmark testing RUST hyper."
return run(hyper_cmd, port)
def http_benchmark(build_dir):
hyper_hello_exe = os.path.join(build_dir, "hyper_hello")
core_http_bench_exe = os.path.join(build_dir,
"examples/deno_core_http_bench")
deno_exe = os.path.join(build_dir, "deno")
return {
# "deno_tcp" was once called "deno"
"deno_tcp": deno_tcp(deno_exe),
"deno_tcp_current_thread": deno_tcp_current_thread(deno_exe),
# "deno_http" was once called "deno_net_http"
"deno_http": deno_http(deno_exe),
"deno_proxy": deno_http_proxy(deno_exe, hyper_hello_exe),
"deno_proxy_tcp": deno_tcp_proxy(deno_exe, hyper_hello_exe),
"deno_core_single": deno_core_single(core_http_bench_exe),
"deno_core_multi": deno_core_multi(core_http_bench_exe),
# "node_http" was once called "node"
"node_http": node_http(),
"node_proxy": node_http_proxy(hyper_hello_exe),
"node_proxy_tcp": node_tcp_proxy(hyper_hello_exe),
"node_tcp": node_tcp(),
"hyper": hyper_http(hyper_hello_exe)
}
def run(server_cmd, port, merge_env=None, origin_cmd=None):
# Run deno echo server in the background.
if merge_env is None:
env = None
else:
env = os.environ.copy()
for key, value in merge_env.iteritems():
env[key] = value
# Wait for port 4544 to become available.
# TODO Need to use SO_REUSEPORT with tokio::net::TcpListener.
time.sleep(5)
origin = None
if origin_cmd is not None:
origin = subprocess.Popen(origin_cmd, env=env)
print server_cmd
server = subprocess.Popen(server_cmd, env=env)
time.sleep(5) # wait for server to wake up. TODO racy.
try:
wrk = third_party.get_prebuilt_tool_path("wrk")
assert os.path.exists(wrk)
cmd = "%s -d %s --latency http://127.0.0.1:%s/" % (wrk, DURATION, port)
print cmd
output = subprocess.check_output(cmd, shell=True)
stats = util.parse_wrk_output(output)
print output
return stats
finally:
server.kill()
if origin is not None:
origin.kill()
if __name__ == '__main__':
if len(sys.argv) < 2:
print "Usage ./tools/http_benchmark.py target/debug/deno"
sys.exit(1)
deno_http(sys.argv[1])
|
py
|
1a56bd384006d6d33e23183672a4e7edc96c0bf6
|
# code adpated from https://github.com/m-lundberg/simple-pid
import time
import warnings
def _clamp(value, limits):
lower, upper = limits
if value is None:
return None
elif (upper is not None) and (value > upper):
return upper
elif (lower is not None) and (value < lower):
return lower
return value
try:
# Get monotonic time to ensure that time deltas are always positive
_current_time = time.monotonic
except AttributeError:
# time.monotonic() not available (using python < 3.3), fallback to time.time()
_current_time = time.time
warnings.warn('time.monotonic() not available in python < 3.3, using time.time() as fallback')
class PID(object):
"""A simple PID controller."""
def __init__(
self,
kp=1.0,
ki=0.0,
kd=0.0,
setpoint=0,
sample_time=0.01,
output_limits=(None, None),
auto_mode=True,
proportional_on_measurement=False,
error_map=None,
):
"""
Initialize a new PID controller.
:param kp: The value for the proportional gain kp
:param ki: The value for the integral gain ki
:param kd: The value for the derivative gain kd
:param setpoint: The initial setpoint that the PID will try to achieve
:param sample_time: The time in seconds which the controller should wait before generating
a new output value. The PID works best when it is constantly called (eg. during a
loop), but with a sample time set so that the time difference between each update is
(close to) constant. If set to None, the PID will compute a new output value every time
it is called.
:param output_limits: The initial output limits to use, given as an iterable with 2
elements, for example: (lower, upper). The output will never go below the lower limit
or above the upper limit. Either of the limits can also be set to None to have no limit
in that direction. Setting output limits also avoids integral windup, since the
integral term will never be allowed to grow outside of the limits.
:param auto_mode: Whether the controller should be enabled (auto mode) or not (manual mode)
:param proportional_on_measurement: Whether the proportional term should be calculated on
the input directly rather than on the error (which is the traditional way). Using
proportional-on-measurement avoids overshoot for some types of systems.
:param error_map: Function to transform the error value in another constrained value.
"""
self.kp, self.ki, self.kd = kp, ki, kd
self.setpoint = setpoint
self.sample_time = sample_time
self._min_output, self._max_output = None, None
self._auto_mode = auto_mode
self.proportional_on_measurement = proportional_on_measurement
self.error_map = error_map
self._proportional = 0
self._integral = 0
self._derivative = 0
self._last_time = None
self._last_output = None
self._last_input = None
self.output_limits = output_limits
self.reset()
def __call__(self, input_, dt=None):
"""
Update the PID controller.
Call the PID controller with *input_* and calculate and return a control output if
sample_time seconds has passed since the last update. If no new output is calculated,
return the previous output instead (or None if no value has been calculated yet).
:param dt: If set, uses this value for timestep instead of real time. This can be used in
simulations when simulation time is different from real time.
"""
if not self.auto_mode:
return self._last_output
now = _current_time()
if dt is None:
dt = now - self._last_time if (now - self._last_time) else 1e-16
elif dt <= 0:
raise ValueError('dt has negative value {}, must be positive'.format(dt))
if self.sample_time is not None and dt < self.sample_time and self._last_output is not None:
# Only update every sample_time seconds
return self._last_output
# Compute error terms
error = self.setpoint - input_
d_input = input_ - (self._last_input if (self._last_input is not None) else input_)
# Check if must map the error
if self.error_map is not None:
error = self.error_map(error)
# Compute the proportional term
if not self.proportional_on_measurement:
# Regular proportional-on-error, simply set the proportional term
self._proportional = self.kp * error
else:
# Add the proportional error on measurement to error_sum
self._proportional -= self.kp * d_input
# Compute integral and derivative terms
self._integral += self.ki * error * dt
self._integral = _clamp(self._integral, self.output_limits) # Avoid integral windup
self._derivative = -self.kd * d_input / dt
# Compute final output
output = self._proportional + self._integral + self._derivative
output = _clamp(output, self.output_limits)
# Keep track of state
self._last_output = output
self._last_input = input_
self._last_time = now
return output
def __repr__(self):
return (
'{self.__class__.__name__}('
'kp={self.kp!r}, ki={self.ki!r}, kd={self.kd!r}, '
'setpoint={self.setpoint!r}, sample_time={self.sample_time!r}, '
'output_limits={self.output_limits!r}, auto_mode={self.auto_mode!r}, '
'proportional_on_measurement={self.proportional_on_measurement!r},'
'error_map={self.error_map!r}'
')'
).format(self=self)
@property
def components(self):
"""
The P-, I- and D-terms from the last computation as separate components as a tuple. Useful
for visualizing what the controller is doing or when tuning hard-to-tune systems.
"""
return self._proportional, self._integral, self._derivative
@property
def tunings(self):
"""The tunings used by the controller as a tuple: (kp, ki, kd)."""
return self.kp, self.ki, self.kd
@tunings.setter
def tunings(self, tunings):
"""Set the PID tunings."""
self.kp, self.ki, self.kd = tunings
@property
def auto_mode(self):
"""Whether the controller is currently enabled (in auto mode) or not."""
return self._auto_mode
@auto_mode.setter
def auto_mode(self, enabled):
"""Enable or disable the PID controller."""
self.set_auto_mode(enabled)
def set_auto_mode(self, enabled, last_output=None):
"""
Enable or disable the PID controller, optionally setting the last output value.
This is useful if some system has been manually controlled and if the PID should take over.
In that case, disable the PID by setting auto mode to False and later when the PID should
be turned back on, pass the last output variable (the control variable) and it will be set
as the starting I-term when the PID is set to auto mode.
:param enabled: Whether auto mode should be enabled, True or False
:param last_output: The last output, or the control variable, that the PID should start
from when going from manual mode to auto mode. Has no effect if the PID is already in
auto mode.
"""
if enabled and not self._auto_mode:
# Switching from manual mode to auto, reset
self.reset()
self._integral = last_output if (last_output is not None) else 0
self._integral = _clamp(self._integral, self.output_limits)
self._auto_mode = enabled
@property
def output_limits(self):
"""
The current output limits as a 2-tuple: (lower, upper).
See also the *output_limits* parameter in :meth:`PID.__init__`.
"""
return self._min_output, self._max_output
@output_limits.setter
def output_limits(self, limits):
"""Set the output limits."""
if limits is None:
self._min_output, self._max_output = None, None
return
min_output, max_output = limits
if (None not in limits) and (max_output < min_output):
raise ValueError('lower limit must be less than upper limit')
self._min_output = min_output
self._max_output = max_output
self._integral = _clamp(self._integral, self.output_limits)
self._last_output = _clamp(self._last_output, self.output_limits)
def reset(self):
"""
Reset the PID controller internals.
This sets each term to 0 as well as clearing the integral, the last output and the last
input (derivative calculation).
"""
self._proportional = 0
self._integral = 0
self._derivative = 0
self._integral = _clamp(self._integral, self.output_limits)
self._last_time = _current_time()
self._last_output = None
self._last_input = None
|
py
|
1a56bd404ea13116900d3cfd477688efa63764b1
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs
) -> "_models.PrivateEndpointConnectionListResult":
"""Gets a list of private endpoint connections in the specified managed cluster.
Gets a list of private endpoint connections in the specified managed cluster. The operation
returns properties of each private endpoint connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnectionListResult, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateEndpointConnectionListResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections'} # type: ignore
async def get(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> "_models.PrivateEndpointConnection":
"""Gets the private endpoint connection.
Gets the details of the private endpoint connection by managed cluster and resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def update(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs
) -> "_models.PrivateEndpointConnection":
"""Updates a private endpoint connection.
Updates a private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update a private endpoint connection operation.
:type parameters: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.containerservice.v2021_02_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
resource_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a private endpoint connection.
Deletes the private endpoint connection in the specified managed cluster.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param resource_name: The name of the managed cluster resource.
:type resource_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
|
py
|
1a56be0f90949d671f98fafb9eee93f2ebf53d07
|
"""
sphinx.builders.texinfo
~~~~~~~~~~~~~~~~~~~~~~~
Texinfo builder.
:copyright: Copyright 2007-2021 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
from os import path
from typing import Any, Dict, Iterable, List, Tuple, Union
from docutils import nodes
from docutils.frontend import OptionParser
from docutils.io import FileOutput
from sphinx import addnodes, package_dir
from sphinx.application import Sphinx
from sphinx.builders import Builder
from sphinx.config import Config
from sphinx.environment.adapters.asset import ImageAdapter
from sphinx.errors import NoUri
from sphinx.locale import _, __
from sphinx.util import logging, progress_message, status_iterator
from sphinx.util.console import darkgreen # type: ignore
from sphinx.util.docutils import new_document
from sphinx.util.fileutil import copy_asset_file
from sphinx.util.nodes import inline_all_toctrees
from sphinx.util.osutil import SEP, ensuredir, make_filename_from_project
from sphinx.writers.texinfo import TexinfoTranslator, TexinfoWriter
logger = logging.getLogger(__name__)
template_dir = os.path.join(package_dir, 'templates', 'texinfo')
class TexinfoBuilder(Builder):
"""
Builds Texinfo output to create Info documentation.
"""
name = 'texinfo'
format = 'texinfo'
epilog = __('The Texinfo files are in %(outdir)s.')
if os.name == 'posix':
epilog += __("\nRun 'make' in that directory to run these through "
"makeinfo\n"
"(use 'make info' here to do that automatically).")
supported_image_types = ['image/png', 'image/jpeg',
'image/gif']
default_translator_class = TexinfoTranslator
def init(self) -> None:
self.docnames = [] # type: Iterable[str]
self.document_data = [] # type: List[Tuple[str, str, str, str, str, str, str, bool]]
def get_outdated_docs(self) -> Union[str, List[str]]:
return 'all documents' # for now
def get_target_uri(self, docname: str, typ: str = None) -> str:
if docname not in self.docnames:
raise NoUri(docname, typ)
else:
return '%' + docname
def get_relative_uri(self, from_: str, to: str, typ: str = None) -> str:
# ignore source path
return self.get_target_uri(to, typ)
def init_document_data(self) -> None:
preliminary_document_data = [list(x) for x in self.config.texinfo_documents]
if not preliminary_document_data:
logger.warning(__('no "texinfo_documents" config value found; no documents '
'will be written'))
return
# assign subdirs to titles
self.titles = [] # type: List[Tuple[str, str]]
for entry in preliminary_document_data:
docname = entry[0]
if docname not in self.env.all_docs:
logger.warning(__('"texinfo_documents" config value references unknown '
'document %s'), docname)
continue
self.document_data.append(entry) # type: ignore
if docname.endswith(SEP + 'index'):
docname = docname[:-5]
self.titles.append((docname, entry[2]))
def write(self, *ignored: Any) -> None:
self.init_document_data()
for entry in self.document_data:
docname, targetname, title, author = entry[:4]
targetname += '.texi'
direntry = description = category = ''
if len(entry) > 6:
direntry, description, category = entry[4:7]
toctree_only = False
if len(entry) > 7:
toctree_only = entry[7]
destination = FileOutput(
destination_path=path.join(self.outdir, targetname),
encoding='utf-8')
with progress_message(__("processing %s") % targetname):
appendices = self.config.texinfo_appendices or []
doctree = self.assemble_doctree(docname, toctree_only, appendices=appendices)
with progress_message(__("writing")):
self.post_process_images(doctree)
docwriter = TexinfoWriter(self)
settings = OptionParser(
defaults=self.env.settings,
components=(docwriter,),
read_config_files=True).get_default_values() # type: Any
settings.author = author
settings.title = title
settings.texinfo_filename = targetname[:-5] + '.info'
settings.texinfo_elements = self.config.texinfo_elements
settings.texinfo_dir_entry = direntry or ''
settings.texinfo_dir_category = category or ''
settings.texinfo_dir_description = description or ''
settings.docname = docname
doctree.settings = settings
docwriter.write(doctree, destination)
self.copy_image_files(targetname[:-5])
def assemble_doctree(self, indexfile: str, toctree_only: bool, appendices: List[str]) -> nodes.document: # NOQA
self.docnames = set([indexfile] + appendices)
logger.info(darkgreen(indexfile) + " ", nonl=True)
tree = self.env.get_doctree(indexfile)
tree['docname'] = indexfile
if toctree_only:
# extract toctree nodes from the tree and put them in a
# fresh document
new_tree = new_document('<texinfo output>')
new_sect = nodes.section()
new_sect += nodes.title('<Set title in conf.py>',
'<Set title in conf.py>')
new_tree += new_sect
for node in tree.traverse(addnodes.toctree):
new_sect += node
tree = new_tree
largetree = inline_all_toctrees(self, self.docnames, indexfile, tree,
darkgreen, [indexfile])
largetree['docname'] = indexfile
for docname in appendices:
appendix = self.env.get_doctree(docname)
appendix['docname'] = docname
largetree.append(appendix)
logger.info('')
logger.info(__("resolving references..."))
self.env.resolve_references(largetree, indexfile, self)
# TODO: add support for external :ref:s
for pendingnode in largetree.traverse(addnodes.pending_xref):
docname = pendingnode['refdocname']
sectname = pendingnode['refsectname']
newnodes = [nodes.emphasis(sectname, sectname)] # type: List[nodes.Node]
for subdir, title in self.titles:
if docname.startswith(subdir):
newnodes.append(nodes.Text(_(' (in '), _(' (in ')))
newnodes.append(nodes.emphasis(title, title))
newnodes.append(nodes.Text(')', ')'))
break
else:
pass
pendingnode.replace_self(newnodes)
return largetree
def finish(self) -> None:
self.copy_support_files()
def copy_image_files(self, targetname: str) -> None:
if self.images:
stringify_func = ImageAdapter(self.app.env).get_original_image_uri
for src in status_iterator(self.images, __('copying images... '), "brown",
len(self.images), self.app.verbosity,
stringify_func=stringify_func):
dest = self.images[src]
try:
imagedir = path.join(self.outdir, targetname + '-figures')
ensuredir(imagedir)
copy_asset_file(path.join(self.srcdir, src),
path.join(imagedir, dest))
except Exception as err:
logger.warning(__('cannot copy image file %r: %s'),
path.join(self.srcdir, src), err)
def copy_support_files(self) -> None:
try:
with progress_message(__('copying Texinfo support files')):
logger.info('Makefile ', nonl=True)
copy_asset_file(os.path.join(template_dir, 'Makefile'), self.outdir)
except OSError as err:
logger.warning(__("error writing file Makefile: %s"), err)
def default_texinfo_documents(config: Config) -> List[Tuple[str, str, str, str, str, str, str]]: # NOQA
""" Better default texinfo_documents settings. """
filename = make_filename_from_project(config.project)
return [(config.master_doc, filename, config.project, config.author, filename,
'One line description of project', 'Miscellaneous')]
def setup(app: Sphinx) -> Dict[str, Any]:
app.add_builder(TexinfoBuilder)
app.add_config_value('texinfo_documents', default_texinfo_documents, None)
app.add_config_value('texinfo_appendices', [], None)
app.add_config_value('texinfo_elements', {}, None)
app.add_config_value('texinfo_domain_indices', True, None, [list])
app.add_config_value('texinfo_show_urls', 'footnote', None)
app.add_config_value('texinfo_no_detailmenu', False, None)
return {
'version': 'builtin',
'parallel_read_safe': True,
'parallel_write_safe': True,
}
|
py
|
1a56be1e9149691dbfb2508e87524ad30f4d97d1
|
import re, textwrap
import z3
from .utils import logger
from .z3api import z3utils
###############################################################################
# Serialize Declarations
###############################################################################
def smt_sort_str(sort):
assert isinstance(sort, z3.SortRef), \
"Received {} of type {} != SortRef".format(c, type(c).__name__)
if z3utils.is_array_sort(sort):
return '(Array {} {})'.format(smt_sort_str(sort.domain()), smt_sort_str(sort.range()))
else:
return sort.name()
def smt_const_decl(c):
assert isinstance(c, z3.ExprRef), \
"Received {} of type {} != ExprRef".format(c, type(c).__name__)
assert c.decl().arity() == 0, \
"Received {} of arity {} != 0 as const decl".format(c, c.decl().arity())
return '(declare-fun {} () {})'.format(c, smt_sort_str(c.decl().range()))
def smt_list(ls):
return '({})'.format(' '.join(ls))
def smt_fun_decl(f):
assert isinstance(f, z3.FuncDeclRef), \
"Received {} of type {} != FuncDeclRef".format(f, type(f).__name__)
dom = smt_list([smt_sort_str(f.domain(i)) for i in range(0,f.arity())])
rng = smt_sort_str(f.range())
return '(declare-fun {} {} {})'.format(f, dom, rng)
def smt_sort_decl(sort):
return '(declare-sort {} 0)'.format(sort)
###############################################################################
# Serialize Expression
###############################################################################
def translate_head_func_decl(expr):
decl = expr.decl()
assert isinstance(decl,z3.FuncDeclRef)
s = str(decl)
if s == '==': return '='
elif z3.is_K(expr): #s == 'K':
# Const array => Must include type
return '(as const {})'.format(smt_sort_str(decl.range()))
elif z3.is_map(expr):
# FIXME: Not general enough for data maps?
return '(_ map {})'.format(str(z3.get_map_func(expr)).lower())
else: return s.lower()
def expr_to_smt2_string(encoding, multi_line = True, indent = ' '):
assert isinstance(encoding, z3.ExprRef), \
'Received {} of type {} for ExprRef serialization'.format(encoding, type(encoding).__name__)
if not multi_line:
indent = ''
pat = re.compile(r'\s+')
def smtify(expr, children):
if z3.is_var(expr):
# TODO: Allow more than one quantified var?
assert str(expr)=='Var(0)', \
'Currently only support for expressions with a single quantified variable'
return '_x_'
elif z3.is_quantifier(expr):
assert expr.num_vars() == 1, \
'Currently only support for expressions with a single quantified variable'
return '{}({} ((_x_ {}))\n{})'.format(
'forall' if expr.is_forall() else 'exists',
expr.var_sort(0),
children[0]
)
else:
#print('{!r} with children {!r}'.format(expr, children))
assert z3.is_app(expr)
assert isinstance(encoding, z3.ExprRef)
# TODO: Improve/simplify the whitespace handling
sjoin = '\n' if multi_line else ' '
child_string = sjoin.join(children)
if indent:
child_string = textwrap.indent(child_string, indent)
stripped = pat.sub(' ', child_string)
while stripped[0] == ' ':
stripped = stripped[1:]
if len(stripped) < 20 or not multi_line:
rep = stripped
else:
rep = '\n' + child_string
res = '({} {})'.format(
translate_head_func_decl(expr),
rep)
#print('Will return {}'.format(res))
return res
def leaf_to_smt(leaf):
#print('Leaf: {!r}'.format(leaf))
s = str(leaf)
if (s == 'True' or s == 'False'):
return s.lower()
else:
return s
return z3utils.expr_fold(encoding, leaf_to_smt, smtify)
###############################################################################
# Serialize Complete Encoding
###############################################################################
def write_encoding_to_file(file, encoding, structs):
with open(file, 'w') as f:
f.write(serialize_encoding(encoding, structs))
def serialize_encoding(encoding, structs):
assert(isinstance(encoding, z3.ExprRef))
# Const decls
consts = z3utils.collect_consts(encoding)
ordered = sorted(consts, key=z3utils.by_complexity)
const_decls = [smt_const_decl(c) for c in ordered]
# Generate sort-based decls based on the sorts for which we have constants
sort_decls = []
fun_refs = []
# FIXME: With the lambda backend we declare functions for data structures that aren't used (because they all use the same sort, Int) => Determine based on parsed input instead?
for struct in structs:
sort = struct.sort
if z3utils.contains_sort(consts, sort):
if sort.to_declare():
logger.debug('Declaring uninterpreted sort {}'.format(sort))
sort_decls.append(sort)
fun_refs += struct.heap_fns()
main_decls = ([smt_sort_decl(s) for s in sort_decls]
+ [smt_fun_decl(f) for f in fun_refs])
# Full list of declarations
decls = main_decls + const_decls
# use our encoding of the assertion directly for readability
smt2_encoding = expr_to_smt2_string(encoding)
assertion = '(assert \n {}\n)'.format(smt2_encoding)
checks = '(check-sat)\n(get-model)'
# TODO: Re-enable set-logic for the quantified backend?
logic = '' # '(set-logic AUFLIA)' + '\n'
full_encoding = logic + '\n'.join(decls) + '\n' + assertion + '\n' + checks + '\n'
return full_encoding
|
py
|
1a56bedadce762ac0c4aebfdc7fb0e8071b0b1c3
|
from formaloo import constants, helper
class RowVote(helper.RequestHandler):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.actions = {
"get_list": {
"url": constants.V_1_0_ROW_VOTE_LIST_CREATE_ENDPOINT,
"has_url_params": True,
"body": None,
"method": self.client.get
},
"create": {
"url": constants.V_1_0_ROW_VOTE_LIST_CREATE_ENDPOINT,
"has_url_params": True,
"body": self.get_body(),
"method": self.client.post
},
"patch": {
"url": constants.V_1_0_ROW_VOTE_ITEM_ENDPOINT,
"has_url_params": True,
"body": self.get_body(),
"method": self.client.patch
},
"delete": {
"url": constants.V_1_0_ROW_VOTE_ITEM_ENDPOINT,
"has_url_params": True,
"body": None,
"method": self.client.delete
}
}
|
py
|
1a56bedec162c89c6d11ac2bba5d6f882bcd0172
|
# coding: utf-8
"""
Mimir DataHub API
No descripton provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 0.0.9
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.data_set_release import DataSetRelease
class TestDataSetRelease(unittest.TestCase):
""" DataSetRelease unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testDataSetRelease(self):
"""
Test DataSetRelease
"""
model = swagger_client.models.data_set_release.DataSetRelease()
if __name__ == '__main__':
unittest.main()
|
py
|
1a56bfe469fcdcfab1a0354a030e271c0296224f
|
"""
Defines different methods to configure a connection to a Kubernetes cluster.
"""
import asyncio
import base64
import contextlib
import copy
import datetime
import json
import logging
import os
import kubernetes
import kubernetes_asyncio
from kubernetes_asyncio.client import Configuration
from kubernetes_asyncio.config.kube_config import KubeConfigLoader, KubeConfigMerger
from kubernetes_asyncio.config.google_auth import google_auth_credentials
from kubernetes_asyncio.config.dateutil import parse_rfc3339
logger = logging.getLogger(__name__)
tzUTC = datetime.timezone.utc
class AutoRefreshKubeConfigLoader(KubeConfigLoader):
"""
Extends KubeConfigLoader, automatically attempts to refresh authentication
credentials before they expire.
"""
def __init__(self, *args, **kwargs):
super(AutoRefreshKubeConfigLoader, self).__init__(*args, **kwargs)
self._retry_count = 0
self._max_retries = float("Inf")
self.auto_refresh = True
self.refresh_task = None
self.last_refreshed = None
self.token_expire_ts = None
def __del__(self):
self.auto_refresh = False
def extract_oid_expiration_from_provider(self, provider):
"""
Extracts the expiration datestamp for the provider token
Parameters
----------
provider : authentication provider dictionary.
Returns
-------
expires : expiration timestamp
"""
parts = provider["config"]["id-token"].split(".")
if len(parts) != 3:
raise ValueError("oidc: JWT tokens should contain 3 period-delimited parts")
id_token = parts[1]
# Re-pad the unpadded JWT token
id_token += (4 - len(id_token) % 4) * "="
jwt_attributes = json.loads(base64.b64decode(id_token).decode("utf8"))
expires = jwt_attributes.get("exp")
return expires
async def create_refresh_task_from_expiration_timestamp(self, expiration_timestamp):
"""
Takes an expiration timestamp, and creates a refresh task to ensure that the token
does not expire.
Parameters
----------
expiration_timestamp : time at which the current authentication token will expire
Returns
-------
N/A
"""
# Set our token expiry to be actual expiry - 20%
expiry = parse_rfc3339(expiration_timestamp)
expiry_delta = datetime.timedelta(
seconds=(expiry - datetime.datetime.now(tz=tzUTC)).total_seconds()
)
scaled_expiry_delta = datetime.timedelta(
seconds=0.8 * expiry_delta.total_seconds()
)
self.refresh_task = asyncio.create_task(
self.refresh_after(
when=scaled_expiry_delta.total_seconds(), reschedule_on_failure=True
),
name="dask_auth_auto_refresh",
)
self.last_refreshed = datetime.datetime.now(tz=tzUTC)
self.token_expire_ts = self.last_refreshed + scaled_expiry_delta
async def refresh_after(self, when, reschedule_on_failure=False):
"""
Refresh kuberenetes authentication
Parameters
----------
when : Seconds before we should refresh. This should be set to some delta before
the actual token expiration time, or you will likely see authentication race
/ failure conditions.
reschedule_on_failure : If the refresh task fails, re-try in 30 seconds, until
_max_retries is exceeded, then raise an exception.
"""
if not self.auto_refresh:
return
logger.debug(
msg=f"Refresh_at coroutine sleeping for "
f"{int(when // 60)} minutes {(when % 60):0.2f} seconds."
)
try:
await asyncio.sleep(when)
if self.provider == "gcp":
await self.refresh_gcp_token()
elif self.provider == "oidc":
await self.refresh_oid_token()
return
elif "exec" in self._user:
logger.warning(msg="Auto-refresh doesn't support generic ExecProvider")
return
except Exception as e:
logger.warning(
msg=f"Authentication refresh failed for provider '{self.provider}.'",
exc_info=e,
)
if not reschedule_on_failure or self._retry_count > self._max_retries:
raise
logger.warning(msg=f"Retrying '{self.provider}' in 30 seconds.")
self._retry_count += 1
self.refresh_task = asyncio.create_task(self.refresh_after(30))
async def refresh_oid_token(self):
"""
Adapted from kubernetes_asyncio/config/kube_config:_load_oid_token
Refreshes the existing oid token, if necessary, and creates a refresh task
that will keep the token from expiring.
Returns
-------
"""
provider = self._user["auth-provider"]
logger.debug("Refreshing OID token.")
if "config" not in provider:
raise ValueError("oidc: missing configuration")
if (not self.token_expire_ts) or (
self.token_expire_ts <= datetime.datetime.now(tz=tzUTC)
):
await self._refresh_oidc(provider)
expires = self.extract_oid_expiration_from_provider(provider=provider)
await self.create_refresh_task_from_expiration_timestamp(
expiration_timestamp=expires
)
self.token = "Bearer {}".format(provider["config"]["id-token"])
async def refresh_gcp_token(self):
"""
Adapted from kubernetes_asyncio/config/kube_config:load_gcp_token
Refreshes the existing gcp token, if necessary, and creates a refresh task
that will keep the token from expiring.
Returns
-------
"""
if "config" not in self._user["auth-provider"]:
self._user["auth-provider"].value["config"] = {}
config = self._user["auth-provider"]["config"]
if (not self.token_expire_ts) or (
self.token_expire_ts <= datetime.datetime.now(tz=tzUTC)
):
logger.debug("Refreshing GCP token.")
if self._get_google_credentials is not None:
if asyncio.iscoroutinefunction(self._get_google_credentials):
credentials = await self._get_google_credentials()
else:
credentials = self._get_google_credentials()
else:
# config is read-only.
extra_args = " --force-auth-refresh"
_config = {
"cmd-args": config["cmd-args"] + extra_args,
"cmd-path": config["cmd-path"],
}
credentials = await google_auth_credentials(_config)
config.value["access-token"] = credentials.token
config.value["expiry"] = credentials.expiry
# Set our token expiry to be actual expiry - 20%
await self.create_refresh_task_from_expiration_timestamp(
expiration_timestamp=config.value["expiry"]
)
if self._config_persister:
self._config_persister(self._config.value)
self.token = "Bearer %s" % config["access-token"]
async def _load_oid_token(self):
"""
Overrides KubeConfigLoader implementation.
Returns
-------
Auth token
"""
await self.refresh_oid_token()
return self.token
async def load_gcp_token(self):
"""
Override KubeConfigLoader implementation so that we can keep track of the expiration timestamp
and automatically refresh auth tokens.
Returns
-------
GCP access token
"""
await self.refresh_gcp_token()
return self.token
class AutoRefreshConfiguration(Configuration):
"""
Extends kubernetes_async Configuration to support automatic token refresh.
Lets us keep track of the original loader object, which can be used
to regenerate the authentication token.
"""
def __init__(self, loader, refresh_frequency=None, *args, **kwargs):
super(AutoRefreshConfiguration, self).__init__(*args, **kwargs)
# Set refresh api callback
self.refresh_api_key_hook = self.refresh_api_key
self.last_refreshed = datetime.datetime.now(tz=tzUTC)
self.loader = loader
# Adapted from kubernetes_asyncio/client/configuration.py:__deepcopy__
def __deepcopy__(self, memo):
"""
Modified so that we don't try to deep copy the loader off the config
"""
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
if k not in ("logger", "logger_file_handler", "loader"):
setattr(result, k, copy.deepcopy(v, memo))
# shallow copy loader object
result.loader = self.loader
# shallow copy of loggers
result.logger = copy.copy(self.logger)
# use setters to configure loggers
result.logger_file = self.logger_file
result.debug = self.debug
return result
def refresh_api_key(self, client_configuration):
"""
Checks to see if the loader has updated the authentication token. If it
has, the token is copied from the loader into the current configuration.
This function is assigned to Configuration.refresh_api_key_hook, and will
fire when entering get_api_key_with_prefix, before the api_key is retrieved.
"""
if self.loader.last_refreshed is not None:
if (
self.last_refreshed is None
or self.last_refreshed < self.loader.last_refreshed
):
logger.debug("Entering refresh_api_key_hook")
client_configuration.api_key[
"authorization"
] = client_configuration.loader.token
self.last_refreshed = datetime.datetime.now(tz=tzUTC)
class ClusterAuth(object):
"""
An abstract base class for methods for configuring a connection to a
Kubernetes API server.
Examples
--------
>>> from dask_kubernetes import KubeConfig
>>> auth = KubeConfig(context='minikube')
>>> from dask_kubernetes import KubeAuth
>>> auth = KubeAuth(host='https://localhost', username='superuser', password='pass')
"""
async def load(self):
"""
Load Kubernetes configuration and set as default
Raises
------
kubernetes.client.KubeConfigException
"""
raise NotImplementedError()
@staticmethod
async def load_first(auth=None):
"""
Load the first valid configuration in the list *auth*. A single
configuration method can be passed.
Parameters
----------
auth: List[ClusterAuth] (optional)
Configuration methods to attempt in order. Defaults to
``[InCluster(), KubeConfig()]``.
"""
if auth is None:
auth = ClusterAuth.DEFAULT
elif isinstance(auth, ClusterAuth):
auth = [auth]
elif isinstance(auth, list):
if not auth:
raise kubernetes_asyncio.config.ConfigException(
"No authorization methods were provided"
)
else:
msg = (
"Invalid authorization method provided. See ClusterAuth "
"docstring for ways to specify authentication methods"
)
raise ValueError(msg)
auth_exc = None
for auth_instance in auth:
try:
await auth_instance.load()
except (
kubernetes_asyncio.config.ConfigException,
kubernetes.config.ConfigException,
) as exc:
logger.debug(
"Failed to load configuration with %s method: %s",
auth_instance.__class__,
exc,
)
auth_exc = exc
else:
break
else:
raise auth_exc
class InCluster(ClusterAuth):
"""Configure the Kubernetes connection from a container's environment.
This authentication method is intended for use when the client is running
in a container started by Kubernetes with an authorized service account.
This loads the mounted service account token and discovers the Kubernetes
API via Kubernetes service discovery.
"""
async def load(self):
kubernetes.config.load_incluster_config()
kubernetes_asyncio.config.load_incluster_config()
class KubeConfig(ClusterAuth):
"""Configure the Kubernetes connection from a kubeconfig file.
Parameters
----------
config_file: str (optional)
The path of the kubeconfig file to load. Defaults to the value of the
``KUBECONFIG`` environment variable, or the string ``~/.kube/config``.
context: str (optional)
The kubeconfig context to use. Defaults to the value of ``current-context``
in the configuration file.
persist_config: bool (optional)
Whether changes to the configuration will be saved back to disk (e.g.
GCP token refresh). Defaults to ``True``.
"""
def __init__(self, config_file=None, context=None, persist_config=True):
self.config_file = config_file
self.context = context
self.persist_config = persist_config
async def load(self):
with contextlib.suppress(KeyError):
if self.config_file is None:
self.config_file = os.path.abspath(
os.path.expanduser(os.environ.get("KUBECONFIG", "~/.kube/config"))
)
await self.load_kube_config()
# Adapted from from kubernetes_asyncio/config/kube_config.py:get_kube_config_loader_for_yaml_file
def get_kube_config_loader_for_yaml_file(self):
kcfg = KubeConfigMerger(self.config_file)
config_persister = None
if self.persist_config:
config_persister = kcfg.save_changes()
return AutoRefreshKubeConfigLoader(
config_dict=kcfg.config,
config_base_path=None,
config_persister=config_persister,
)
# Adapted from kubernetes_asyncio/config/kube_config.py:load_kube_config
async def load_kube_config(self):
# Create a config loader, this will automatically refresh our credentials before they expire
loader = self.get_kube_config_loader_for_yaml_file()
# Grab our async + callback aware configuration
config = AutoRefreshConfiguration(loader)
await loader.load_and_set(config)
Configuration.set_default(config)
class KubeAuth(ClusterAuth):
"""Configure the Kubernetes connection explicitly.
Parameters
----------
host: str
The base URL of the Kubernetes host to connect
username: str (optional)
Username for HTTP basic authentication
password: str (optional)
Password for HTTP basic authentication
debug: bool (optional)
Debug switch
verify_ssl: bool (optional)
Set this to false to skip verifying SSL certificate when calling API
from https server. Defaults to ``True``.
ssl_ca_cert: str (optional)
Set this to customize the certificate file to verify the peer.
cert_file: str (optional)
Client certificate file
key_file: str (optional)
Client key file
assert_hostname: bool (optional)
Set this to True/False to enable/disable SSL hostname verification.
Defaults to True.
proxy: str (optional)
URL for a proxy to connect through
"""
def __init__(self, host, **kwargs):
# We need to create a new configuration in this way, because if we just
# instantiate a new Configuration object we will get the default
# values.
config = type.__call__(kubernetes.client.Configuration)
config.host = host
for key, value in kwargs.items():
setattr(config, key, value)
self.config = config
async def load(self):
kubernetes.client.Configuration.set_default(self.config)
await kubernetes_asyncio.client.Configuration.set_default(self.config)
ClusterAuth.DEFAULT = [InCluster(), KubeConfig()]
|
py
|
1a56c05c4eaf78e6aa000cb21e7f8fa39446acdb
|
#!/usr/bin/env python3
"""
Generate f32x4 floating-point arithmetic operation cases.
"""
from simd_arithmetic import SimdArithmeticCase
from simd_float_op import FloatingPointArithOp
from test_assert import AssertReturn
from simd import SIMD
class F32ArithOp(FloatingPointArithOp):
maximum = '0x1.fffffep+127'
class Simdf32x4ArithmeticCase(SimdArithmeticCase):
LANE_LEN = 4
LANE_TYPE = 'f32x4'
floatOp = F32ArithOp()
UNARY_OPS = ('neg', 'sqrt')
BINARY_OPS = ('add', 'sub', 'mul', 'div')
FLOAT_NUMBERS = (
'0x0p+0', '-0x0p+0', '0x1p-149', '-0x1p-149', '0x1p-126', '-0x1p-126', '0x1p-1', '-0x1p-1', '0x1p+0', '-0x1p+0',
'0x1.921fb6p+2', '-0x1.921fb6p+2', '0x1.fffffep+127', '-0x1.fffffep+127', 'inf', '-inf'
)
LITERAL_NUMBERS = ('0123456789', '0123456789e019', '0123456789e+019', '0123456789e-019',
'0123456789.', '0123456789.e019', '0123456789.e+019', '0123456789.e-019',
'0123456789.0123456789', '0123456789.0123456789e019',
'0123456789.0123456789e+019', '0123456789.0123456789e-019',
'0x0123456789ABCDEF', '0x0123456789ABCDEFp019',
'0x0123456789ABCDEFp+019', '0x0123456789ABCDEFp-019',
'0x0123456789ABCDEF.', '0x0123456789ABCDEF.p019',
'0x0123456789ABCDEF.p+019', '0x0123456789ABCDEF.p-019',
'0x0123456789ABCDEF.019aF', '0x0123456789ABCDEF.019aFp019',
'0x0123456789ABCDEF.019aFp+019', '0x0123456789ABCDEF.019aFp-019'
)
NAN_NUMBERS = ('nan', '-nan', 'nan:0x200000', '-nan:0x200000')
def full_op_name(self, op_name):
return self.LANE_TYPE + '.' + op_name
@staticmethod
def v128_const(lane, value):
return '(v128.const {lane_type} {value})'.format(lane_type=lane, value=' '.join([str(value)] * 4))
@property
def combine_ternary_arith_test_data(self):
return {
'add-sub': [
['1.125'] * 4, ['0.25'] * 4, ['0.125'] * 4, ['1.0'] * 4
],
'sub-add': [
['1.125'] * 4, ['0.25'] * 4, ['0.125'] * 4, ['1.25'] * 4
],
'mul-add': [
['1.25'] * 4, ['0.25'] * 4, ['0.25'] * 4, ['0.375'] * 4
],
'mul-sub': [
['1.125'] * 4, ['0.125'] * 4, ['0.25'] * 4, ['0.25'] * 4
],
'div-add': [
['1.125'] * 4, ['0.125'] * 4, ['0.25'] * 4, ['5.0'] * 4
],
'div-sub': [
['1.125'] * 4, ['0.125'] * 4, ['0.25'] * 4, ['4.0'] * 4
],
'mul-div': [
['1.125'] * 4, ['0.125'] * 4, ['0.25'] * 4, ['2.25'] * 4
],
'div-mul': [
['1.125'] * 4, ['4'] * 4, ['0.25'] * 4, ['18.0'] * 4
]
}
@property
def combine_binary_arith_test_data(self):
return {
'add-neg': [
['1.125'] * 4, ['0.125'] * 4, ['-1.0'] * 4
],
'sub-neg': [
['1.125'] * 4, ['0.125'] * 4, ['-1.25'] * 4
],
'mul-neg': [
['1.5'] * 4, ['0.25'] * 4, ['-0.375'] * 4
],
'div-neg': [
['1.5'] * 4, ['0.25'] * 4, ['-6'] * 4
],
'add-sqrt': [
['2.25'] * 4, ['0.25'] * 4, ['1.75'] * 4
],
'sub-sqrt': [
['2.25'] * 4, ['0.25'] * 4, ['1.25'] * 4
],
'mul-sqrt': [
['2.25'] * 4, ['0.25'] * 4, ['0.375'] * 4
],
'div-sqrt': [
['2.25'] * 4, ['0.25'] * 4, ['6'] * 4
]
}
def get_normal_case(self):
"""Normal test cases from WebAssembly core tests
"""
cases = []
binary_test_data = []
unary_test_data = []
for op in self.BINARY_OPS:
op_name = self.full_op_name(op)
for operand1 in self.FLOAT_NUMBERS:
for operand2 in self.FLOAT_NUMBERS:
result = self.floatOp.binary_op(op, operand1, operand2)
if 'nan' not in result:
# Normal floating point numbers as the results
binary_test_data.append([op_name, operand1, operand2, result])
else:
# Since the results contain the 'nan' string, the result literals would be
# nan:canonical
binary_test_data.append([op_name, operand1, operand2, 'nan:canonical'])
for operand1 in self.NAN_NUMBERS:
for operand2 in self.FLOAT_NUMBERS:
if 'nan:' in operand1 or 'nan:' in operand2:
# When the arguments contain 'nan:', the result literal is nan:arithmetic
# Consider the different order of arguments as different cases.
binary_test_data.append([op_name, operand1, operand2, 'nan:arithmetic'])
binary_test_data.append([op_name, operand2, operand1, 'nan:arithmetic'])
else:
# No 'nan' string found, then the result literal is nan:canonical.
binary_test_data.append([op_name, operand1, operand2, 'nan:canonical'])
binary_test_data.append([op_name, operand2, operand1, 'nan:canonical'])
for operand2 in self.NAN_NUMBERS:
if 'nan:' in operand1 or 'nan:' in operand2:
binary_test_data.append([op_name, operand1, operand2, 'nan:arithmetic'])
else:
binary_test_data.append([op_name, operand1, operand2, 'nan:canonical'])
for operand in self.LITERAL_NUMBERS:
if self.LANE_TYPE == 'f32x4':
single_precision = True
else:
single_precision = False
result = self.floatOp.binary_op(op, operand, operand, single_prec=single_precision)
binary_test_data.append([op_name, operand, operand, result])
for case in binary_test_data:
cases.append(str(AssertReturn(case[0],
[SIMD.v128_const(elem, self.LANE_TYPE) for elem in case[1:-1]],
SIMD.v128_const(case[-1], self.LANE_TYPE))))
for operand in self.FLOAT_NUMBERS + self.NAN_NUMBERS + self.LITERAL_NUMBERS:
if 'nan:' in operand:
unary_test_data.append([op_name, operand, 'nan:arithmetic'])
elif 'nan' in operand:
unary_test_data.append([op_name, operand, 'nan:canonical'])
else:
# Normal floating point numbers for sqrt operation
op_name = self.full_op_name('sqrt')
result = self.floatOp.float_sqrt(operand)
if 'nan' not in result:
# Get the sqrt value correctly
unary_test_data.append([op_name, operand, result])
else:
#
unary_test_data.append([op_name, operand, 'nan:canonical'])
for operand in self.FLOAT_NUMBERS + self.NAN_NUMBERS + self.LITERAL_NUMBERS:
op_name = self.full_op_name('neg')
result = self.floatOp.float_neg(operand)
# Neg operation is valid for all the floating point numbers
unary_test_data.append([op_name, operand, result])
for case in unary_test_data:
cases.append(str(AssertReturn(case[0],
[SIMD.v128_const(elem, self.LANE_TYPE) for elem in case[1:-1]],
SIMD.v128_const(case[-1], self.LANE_TYPE))))
self.mixed_nan_test(cases)
return '\n'.join(cases)
@property
def mixed_sqrt_nan_test_data(self):
return {
"sqrt_canon": [
('-1.0', 'nan', '4.0', '9.0'),
('nan:canonical', 'nan:canonical', '2.0', '3.0')
],
'sqrt_arith': [
('nan:0x200000', '-nan:0x200000', '16.0', '25.0'),
('nan:arithmetic', 'nan:arithmetic', '4.0', '5.0')
],
'sqrt_mixed': [
('-inf', 'nan:0x200000', '36.0', '49.0'),
('nan:canonical', 'nan:arithmetic', '6.0', '7.0')
]
}
def mixed_nan_test(self, cases):
"""Mixed f32x4 tests when only expects NaNs in a subset of lanes.
"""
mixed_cases = ['\n\n;; Mixed f32x4 tests when some lanes are NaNs', '(module\n']
cases.extend(mixed_cases)
for test_type, test_data in sorted(self.mixed_sqrt_nan_test_data.items()):
func = [' (func (export "{lane}_{t}") (result v128)'.format(
lane=self.LANE_TYPE, t=test_type),
' ({lane}.{op} (v128.const {lane} {value})))'.format(
lane=self.LANE_TYPE, op=test_type.split('_')[0], value=' '.join(test_data[0]))]
cases.extend(func)
cases.append(')\n')
for test_type, test_data in sorted(self.mixed_sqrt_nan_test_data.items()):
cases.append('(assert_return (invoke "{lane}_{t}") (v128.const {lane} {result}))'.format(
lane=self.LANE_TYPE, t=test_type, result=' '.join(test_data[1])))
def gen_test_cases():
simd_f32x4_arith = Simdf32x4ArithmeticCase()
simd_f32x4_arith.gen_test_cases()
if __name__ == '__main__':
gen_test_cases()
|
py
|
1a56c1f6ef7975ce7734e28b68d5aeca61e29cd1
|
#
# PySNMP MIB module INTEL-FREXT-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/INTEL-FREXT-MIB
# Produced by pysmi-0.3.4 at Wed May 1 13:54:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion, ConstraintsIntersection, ValueRangeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion", "ConstraintsIntersection", "ValueRangeConstraint")
DLCI, = mibBuilder.importSymbols("FRAME-RELAY-DTE-MIB", "DLCI")
mib2ext, = mibBuilder.importSymbols("INTEL-GEN-MIB", "mib2ext")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
ObjectIdentity, Gauge32, Integer32, Unsigned32, Counter32, IpAddress, MibIdentifier, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, NotificationType, TimeTicks, ModuleIdentity, Counter64, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ObjectIdentity", "Gauge32", "Integer32", "Unsigned32", "Counter32", "IpAddress", "MibIdentifier", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "NotificationType", "TimeTicks", "ModuleIdentity", "Counter64", "Bits")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
frEx = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 28))
frCircuitExt = MibIdentifier((1, 3, 6, 1, 4, 1, 343, 6, 28, 1))
class InterfaceIndex(Integer32):
pass
frCirExtEncTable = MibTable((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1), )
if mibBuilder.loadTexts: frCirExtEncTable.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncTable.setDescription('Encryption information table.')
frCirExtEncEntry = MibTableRow((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1), ).setIndexNames((0, "INTEL-FREXT-MIB", "frCirExtEncIfIndex"), (0, "INTEL-FREXT-MIB", "frCirExtEncDlci"))
if mibBuilder.loadTexts: frCirExtEncEntry.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncEntry.setDescription('')
frCirExtEncIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncIfIndex.setDescription('The ifIndex Value of the ifEntry this virtual circuit is layered onto.')
frCirExtEncDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 2), DLCI()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncDlci.setReference('Draft American National Standard T1.618-1991, Section 3.3.6')
if mibBuilder.loadTexts: frCirExtEncDlci.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncDlci.setDescription('The Data Link Connection Identifier for this virtual circuit.')
frCirExtEncLogicalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 3), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncLogicalIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncLogicalIfIndex.setDescription('Normally the same value as frDlcmiIfIndex, but different when an implementation associates a virtual ifEntry with a DLC or set of DLCs in order to associate higher layer objects such as the ipAddrEntry with a subset of the virtual circuits on a Frame Relay interface. The type of such ifEntries is defined by the higher layer object; for example, if PPP/Frame Relay is implemented, the ifType of this ifEntry would be PPP. If it is not so defined, as would be the case with an ipAddrEntry, it should be of type Other.')
frCirExtEncEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncEnabled.setDescription('Encryption enabled on link.')
frCirExtEncNegotiated = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncNegotiated.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncNegotiated.setDescription('Encryption negotiated on link.')
frCirExtEncResetRequestsRx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncResetRequestsRx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncResetRequestsRx.setDescription('Number of encryption history reset requests received.')
frCirExtEncResetRequestsTx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncResetRequestsTx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncResetRequestsTx.setDescription('Number of encryption history reset requests sent.')
frCirExtEncResetAcksRx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncResetAcksRx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncResetAcksRx.setDescription('Number of encryption history reset acknowledgments received.')
frCirExtEncResetAcksTx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncResetAcksTx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncResetAcksTx.setDescription('Number of encryption history reset acknowledgments sent.')
frCirExtEncRxDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncRxDiscarded.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncRxDiscarded.setDescription('Number of packets discarded due to lack of synchronisation, buffer lacks or packet errors.')
frCirExtEncTxDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncTxDiscarded.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncTxDiscarded.setDescription('Number of packets discarded due to buffer lacks or transmit queue overflows')
frCirExtEncReceiverState = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 1, 1, 12), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ok", 1), ("error", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtEncReceiverState.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtEncReceiverState.setDescription('State of receiver. Error state means waiting for reset acknowledgment')
frCirExtCompTable = MibTable((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2), )
if mibBuilder.loadTexts: frCirExtCompTable.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompTable.setDescription('Compression information table.')
frCirExtCompEntry = MibTableRow((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1), ).setIndexNames((0, "INTEL-FREXT-MIB", "frCirExtCompIfIndex"), (0, "INTEL-FREXT-MIB", "frCirExtCompDlci"))
if mibBuilder.loadTexts: frCirExtCompEntry.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEntry.setDescription('')
frCirExtCompIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 1), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompIfIndex.setDescription('The ifIndex Value of the ifEntry this virtual circuit is layered onto.')
frCirExtCompDlci = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 2), DLCI()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDlci.setReference('Draft American National Standard T1.618-1991, Section 3.3.6')
if mibBuilder.loadTexts: frCirExtCompDlci.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDlci.setDescription('The Data Link Connection Identifier for this virtual circuit.')
frCirExtCompLogicalIfIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 3), InterfaceIndex()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompLogicalIfIndex.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompLogicalIfIndex.setDescription('Normally the same value as frDlcmiIfIndex, but different when an implementation associates a virtual ifEntry with a DLC or set of DLCs in order to associate higher layer objects such as the ipAddrEntry with a subset of the virtual circuits on a Frame Relay interface. The type of such ifEntries is defined by the higher layer object; for example, if PPP/Frame Relay is implemented, the ifType of this ifEntry would be PPP. If it is not so defined, as would be the case with an ipAddrEntry, it should be of type Other.')
frCirExtCompEnabled = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 4), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEnabled.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEnabled.setDescription('Encryption enabled on link.')
frCirExtCompNegotiated = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("no", 1), ("yes", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompNegotiated.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompNegotiated.setDescription('Encryption negotiated on link.')
frCirExtCompDecoderBytesIn = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 6), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderBytesIn.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderBytesIn.setDescription('Total number of bytes received by the decoder.')
frCirExtCompDecoderDecompBytesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 7), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderDecompBytesOut.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderDecompBytesOut.setDescription('Decompressed bytes from the decoder.')
frCirExtCompDecoderUncompBytesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 8), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderUncompBytesOut.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderUncompBytesOut.setDescription('Uncompressed bytes received by the decoder.')
frCirExtCompDecoderCompPacketsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 9), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderCompPacketsIn.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderCompPacketsIn.setDescription('Compressed packets received by the decoder.')
frCirExtCompDecoderUncompPacketsIn = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 10), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderUncompPacketsIn.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderUncompPacketsIn.setDescription('Uncompressed packets received by the decoder.')
frCirExtCompDecoderDecompQueueLength = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 11), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderDecompQueueLength.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderDecompQueueLength.setDescription('Number of packets waiting to be decompressed.')
frCirExtCompDecoderCompressionRatio = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 12), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderCompressionRatio.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderCompressionRatio.setDescription('Receive compression ratio (multiplied by 100).')
frCirExtCompDecoderResetRequestTx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 13), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderResetRequestTx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderResetRequestTx.setDescription('The number of times the decoder requested re-initialization of the compression history.')
frCirExtCompDecoderResetAcksRx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 14), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderResetAcksRx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderResetAcksRx.setDescription('The number of acknowledgments to reset requests received by the router.')
frCirExtCompDecoderRxDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 15), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderRxDiscarded.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderRxDiscarded.setDescription('Number of packets discarded by the decoder due to buffer lacks or synchronisation problems.')
frCirExtCompDecoderState = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 16), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2))).clone(namedValues=NamedValues(("ok", 1), ("error", 2)))).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompDecoderState.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompDecoderState.setDescription('State of decoder. Error state means waiting for reset acknowledgment')
frCirExtCompEncoderBytesIn = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 17), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderBytesIn.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderBytesIn.setDescription('Total number of bytes received by the encoder.')
frCirExtCompEncoderCompBytesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 18), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderCompBytesOut.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderCompBytesOut.setDescription('Number of compressed bytes leaving the encoder.')
frCirExtCompEncoderUncompBytesOut = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 19), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderUncompBytesOut.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderUncompBytesOut.setDescription('Number of uncompressed bytes leaving the encoder.')
frCirExtCompEncoderCompPacketsOut = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 20), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderCompPacketsOut.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderCompPacketsOut.setDescription('Number of compressed packets leaving the encoder.')
frCirExtCompEncoderUncompPacketsOut = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 21), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderUncompPacketsOut.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderUncompPacketsOut.setDescription('Number of uncompressed packets leaving the encoder.')
frCirExtCompEncoderCompQueueLength = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 22), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderCompQueueLength.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderCompQueueLength.setDescription('Number of packets waiting to be compressed.')
frCirExtCompEncoderCompressionRation = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 23), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderCompressionRation.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderCompressionRation.setDescription('Transmit compression ratio (multiplied by 100).')
frCirExtCompEncoderResetRequestRx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 24), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderResetRequestRx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderResetRequestRx.setDescription('The number of times the remote end requested re-initialization of the compression history.')
frCirExtCompEncoderResetAckTx = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 25), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderResetAckTx.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderResetAckTx.setDescription('The number of acknowledgments to reset requests transmitted by the router.')
frCirExtCompEncoderTxDiscarded = MibTableColumn((1, 3, 6, 1, 4, 1, 343, 6, 28, 1, 2, 1, 26), Counter32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: frCirExtCompEncoderTxDiscarded.setStatus('mandatory')
if mibBuilder.loadTexts: frCirExtCompEncoderTxDiscarded.setDescription('Number of packets discarded by the encoder due to buffer lacks or transmit queue overflows.')
mibBuilder.exportSymbols("INTEL-FREXT-MIB", frCirExtEncTxDiscarded=frCirExtEncTxDiscarded, frCirExtCompDecoderUncompBytesOut=frCirExtCompDecoderUncompBytesOut, frCircuitExt=frCircuitExt, frCirExtCompIfIndex=frCirExtCompIfIndex, frCirExtEncResetRequestsRx=frCirExtEncResetRequestsRx, frCirExtCompDecoderState=frCirExtCompDecoderState, frCirExtCompEncoderUncompPacketsOut=frCirExtCompEncoderUncompPacketsOut, frCirExtCompLogicalIfIndex=frCirExtCompLogicalIfIndex, frCirExtEncResetAcksTx=frCirExtEncResetAcksTx, frCirExtCompEncoderUncompBytesOut=frCirExtCompEncoderUncompBytesOut, frCirExtCompEncoderBytesIn=frCirExtCompEncoderBytesIn, frCirExtEncNegotiated=frCirExtEncNegotiated, frCirExtCompEncoderCompBytesOut=frCirExtCompEncoderCompBytesOut, frCirExtEncRxDiscarded=frCirExtEncRxDiscarded, frCirExtEncEnabled=frCirExtEncEnabled, frCirExtCompDecoderDecompQueueLength=frCirExtCompDecoderDecompQueueLength, frCirExtEncTable=frCirExtEncTable, frCirExtCompDecoderRxDiscarded=frCirExtCompDecoderRxDiscarded, frCirExtCompDecoderDecompBytesOut=frCirExtCompDecoderDecompBytesOut, frCirExtEncReceiverState=frCirExtEncReceiverState, frCirExtEncEntry=frCirExtEncEntry, frCirExtCompEncoderCompPacketsOut=frCirExtCompEncoderCompPacketsOut, frCirExtCompEntry=frCirExtCompEntry, frCirExtCompDlci=frCirExtCompDlci, frCirExtEncDlci=frCirExtEncDlci, frCirExtCompEncoderCompressionRation=frCirExtCompEncoderCompressionRation, frCirExtCompEncoderResetAckTx=frCirExtCompEncoderResetAckTx, frCirExtCompEncoderResetRequestRx=frCirExtCompEncoderResetRequestRx, frCirExtCompTable=frCirExtCompTable, frCirExtCompEnabled=frCirExtCompEnabled, frCirExtCompEncoderCompQueueLength=frCirExtCompEncoderCompQueueLength, frCirExtCompDecoderCompressionRatio=frCirExtCompDecoderCompressionRatio, frCirExtEncLogicalIfIndex=frCirExtEncLogicalIfIndex, frCirExtCompDecoderBytesIn=frCirExtCompDecoderBytesIn, frCirExtCompDecoderResetAcksRx=frCirExtCompDecoderResetAcksRx, InterfaceIndex=InterfaceIndex, frCirExtCompEncoderTxDiscarded=frCirExtCompEncoderTxDiscarded, frCirExtEncIfIndex=frCirExtEncIfIndex, frCirExtCompDecoderUncompPacketsIn=frCirExtCompDecoderUncompPacketsIn, frCirExtEncResetRequestsTx=frCirExtEncResetRequestsTx, frCirExtCompDecoderResetRequestTx=frCirExtCompDecoderResetRequestTx, frCirExtEncResetAcksRx=frCirExtEncResetAcksRx, frEx=frEx, frCirExtCompDecoderCompPacketsIn=frCirExtCompDecoderCompPacketsIn, frCirExtCompNegotiated=frCirExtCompNegotiated)
|
py
|
1a56c20d58b14a6b032104a689d88974797bbd43
|
import math
from math import pi, radians, degrees, sqrt
def sin(x):
return math.sin(radians(x))
def cos(x):
return math.cos(radians(x))
def tan(x):
return math.tan(radians(x))
def atan(x):
return degrees(math.atan(x))
|
py
|
1a56c2558de881bdb89abd0d1179676f6c177245
|
'''
The oil properties for the sample oil named oil_ans_mp
(Note: we put these things in their own separate file because
some oil properties records can get quite large)
'''
json_data = {'name': u'oil_ans_mp',
'adios_oil_id': u'AD01759',
'api': 29.9,
'saturates_fraction': None,
'aromatics_fraction': None,
'resins_fraction': None,
'asphaltenes_fraction': None,
'flash_point_min_k': 250.15,
'flash_point_max_k': 250.15,
'pour_point_min_k': 219.15,
'pour_point_max_k': 219.15,
'oil_water_interfacial_tension_n_m': 0.0238,
'oil_water_interfacial_tension_ref_temp_k': 273.15,
'oil_seawater_interfacial_tension_n_m': 0.0,
'oil_seawater_interfacial_tension_ref_temp_k': 273.15,
'bullwinkle_time': None,
'bullwinkle_fraction': 0.1937235,
'adhesion_kg_m_2': 0.28,
'emulsion_water_fraction_max': 0.9,
'solubility': 0.0,
'benzene_fraction': None,
'naphthenes_fraction': None,
'paraffins_fraction': None,
'polars_fraction': None,
'sulphur_fraction': 0.0116,
'wax_content_fraction': None,
'vanadium_ppm': None,
'nickel_ppm': None,
'k0y': 2.024e-06,
'densities': [{'kg_m_3': 886.9, 'ref_temp_k': 273.15,
'weathering': 0.0},
{'kg_m_3': 876.1, 'ref_temp_k': 288.15,
'weathering': 0.0}],
'kvis': [{'m_2_s': 3.8335776e-05, 'ref_temp_k': 273.15,
'weathering': 0.0},
{'m_2_s': 1.8262755e-05, 'ref_temp_k': 288.15,
'weathering': 0.0}],
'cuts': [{'liquid_temp_k': None, 'vapor_temp_k': 313.15,
'fraction': 0.03},
{'liquid_temp_k': None, 'vapor_temp_k': 353.15,
'fraction': 0.07},
{'liquid_temp_k': None, 'vapor_temp_k': 393.15,
'fraction': 0.13},
{'liquid_temp_k': None, 'vapor_temp_k': 433.15,
'fraction': 0.19},
{'liquid_temp_k': None, 'vapor_temp_k': 473.15,
'fraction': 0.25},
{'liquid_temp_k': None, 'vapor_temp_k': 573.15,
'fraction': 0.42},
{'liquid_temp_k': None, 'vapor_temp_k': 673.15,
'fraction': 0.6},
{'liquid_temp_k': None, 'vapor_temp_k': 773.15,
'fraction': 0.76},
{'liquid_temp_k': None, 'vapor_temp_k': 873.15,
'fraction': 0.88},
{'liquid_temp_k': None, 'vapor_temp_k': 973.15,
'fraction': 0.95}],
'molecular_weights': [{'g_mol': 73.745515,
'ref_temp_k': 313.15,
'sara_type': u'Saturates'},
{'g_mol': 63.128827,
'ref_temp_k': 313.15,
'sara_type': u'Aromatics'},
{'g_mol': 91.356145,
'ref_temp_k': 353.15,
'sara_type': u'Saturates'},
{'g_mol': 79.350748,
'ref_temp_k': 353.15,
'sara_type': u'Aromatics'},
{'g_mol': 111.29727,
'ref_temp_k': 393.15,
'sara_type': u'Saturates'},
{'g_mol': 97.887821,
'ref_temp_k': 393.15,
'sara_type': u'Aromatics'},
{'g_mol': 133.84122,
'ref_temp_k': 433.15,
'sara_type': u'Saturates'},
{'g_mol': 119.03745,
'ref_temp_k': 433.15,
'sara_type': u'Aromatics'},
{'g_mol': 159.32505,
'ref_temp_k': 473.15,
'sara_type': u'Saturates'},
{'g_mol': 143.17399,
'ref_temp_k': 473.15,
'sara_type': u'Aromatics'},
{'g_mol': 238.9158,
'ref_temp_k': 573.15,
'sara_type': u'Saturates'},
{'g_mol': 220.03378,
'ref_temp_k': 573.15,
'sara_type': u'Aromatics'},
{'g_mol': 350.08277,
'ref_temp_k': 673.15,
'sara_type': u'Saturates'},
{'g_mol': 331.15639,
'ref_temp_k': 673.15,
'sara_type': u'Aromatics'},
{'g_mol': 513.22113,
'ref_temp_k': 773.15,
'sara_type': u'Saturates'},
{'g_mol': 503.20373,
'ref_temp_k': 773.15,
'sara_type': u'Aromatics'},
{'g_mol': 777.0013,
'ref_temp_k': 873.15,
'sara_type': u'Saturates'},
{'g_mol': 811.5901,
'ref_temp_k': 873.15,
'sara_type': u'Aromatics'},
{'g_mol': 1310.7383,
'ref_temp_k': 973.15,
'sara_type': u'Saturates'},
{'g_mol': 1680.1263,
'ref_temp_k': 973.15,
'sara_type': u'Aromatics'}],
'sara_densities': [{'density': 1100.0,
'ref_temp_k': 1015.0,
'sara_type': u'Asphaltenes'},
{'density': 1100.0,
'ref_temp_k': 1015.0,
'sara_type': u'Resins'},
{'density': 587.197919,
'ref_temp_k': 313.15,
'sara_type': u'Saturates'},
{'density': 611.204823,
'ref_temp_k': 353.15,
'sara_type': u'Saturates'},
{'density': 633.460874,
'ref_temp_k': 393.15,
'sara_type': u'Saturates'},
{'density': 654.254134,
'ref_temp_k': 433.15,
'sara_type': u'Saturates'},
{'density': 673.803585,
'ref_temp_k': 473.15,
'sara_type': u'Saturates'},
{'density': 718.27343,
'ref_temp_k': 573.15,
'sara_type': u'Saturates'},
{'density': 757.82859,
'ref_temp_k': 673.15,
'sara_type': u'Saturates'},
{'density': 793.63648,
'ref_temp_k': 773.15,
'sara_type': u'Saturates'},
{'density': 826.475509,
'ref_temp_k': 873.15,
'sara_type': u'Saturates'},
{'density': 856.89373,
'ref_temp_k': 973.15,
'sara_type': u'Saturates'},
{'density': 704.637503,
'ref_temp_k': 313.15,
'sara_type': u'Aromatics'},
{'density': 733.445788,
'ref_temp_k': 353.15,
'sara_type': u'Aromatics'},
{'density': 760.153049,
'ref_temp_k': 393.15,
'sara_type': u'Aromatics'},
{'density': 785.104961,
'ref_temp_k': 433.15,
'sara_type': u'Aromatics'},
{'density': 808.564302,
'ref_temp_k': 473.15,
'sara_type': u'Aromatics'},
{'density': 861.928116,
'ref_temp_k': 573.15,
'sara_type': u'Aromatics'},
{'density': 909.394309,
'ref_temp_k': 673.15,
'sara_type': u'Aromatics'},
{'density': 952.363777,
'ref_temp_k': 773.15,
'sara_type': u'Aromatics'},
{'density': 991.770611,
'ref_temp_k': 873.15,
'sara_type': u'Aromatics'},
{'density': 1028.27248,
'ref_temp_k': 973.15,
'sara_type': u'Aromatics'}],
'sara_fractions': [{'fraction': 0.0766216,
'ref_temp_k': 1015.0,
'sara_type': u'Resins'},
{'fraction': 0.0169778,
'ref_temp_k': 1015.0,
'sara_type': u'Asphaltenes'},
{'fraction': 0.0147412,
'ref_temp_k': 313.15,
'sara_type': u'Saturates'},
{'fraction': 0.0152588,
'ref_temp_k': 313.15,
'sara_type': u'Aromatics'},
{'fraction': 0.0114053,
'ref_temp_k': 353.15,
'sara_type': u'Saturates'},
{'fraction': 0.0285947,
'ref_temp_k': 353.15,
'sara_type': u'Aromatics'},
{'fraction': 0.0036607,
'ref_temp_k': 393.15,
'sara_type': u'Saturates'},
{'fraction': 0.0563393,
'ref_temp_k': 393.15,
'sara_type': u'Aromatics'},
{'fraction': 0.0,
'ref_temp_k': 433.15,
'sara_type': u'Saturates'},
{'fraction': 0.06,
'ref_temp_k': 433.15,
'sara_type': u'Aromatics'},
{'fraction': 0.0,
'ref_temp_k': 473.15,
'sara_type': u'Saturates'},
{'fraction': 0.06,
'ref_temp_k': 473.15,
'sara_type': u'Aromatics'},
{'fraction': 0.085,
'ref_temp_k': 573.15,
'sara_type': u'Saturates'},
{'fraction': 0.085,
'ref_temp_k': 573.15,
'sara_type': u'Aromatics'},
{'fraction': 0.09,
'ref_temp_k': 673.15,
'sara_type': u'Saturates'},
{'fraction': 0.09,
'ref_temp_k': 673.15,
'sara_type': u'Aromatics'},
{'fraction': 0.08,
'ref_temp_k': 773.15,
'sara_type': u'Saturates'},
{'fraction': 0.08,
'ref_temp_k': 773.15,
'sara_type': u'Aromatics'},
{'fraction': 0.06,
'ref_temp_k': 873.15,
'sara_type': u'Saturates'},
{'fraction': 0.06,
'ref_temp_k': 873.15,
'sara_type': u'Aromatics'},
{'fraction': 0.0132,
'ref_temp_k': 973.15,
'sara_type': u'Saturates'},
{'fraction': 0.0132,
'ref_temp_k': 973.15,
'sara_type': u'Aromatics'}],
'categories': [{'children': [],
'name': u'Medium',
'parent': {'name': u'Crude'},
}],
}
|
py
|
1a56c2b6fd915b593a1779a66aa340cd6f26287f
|
import csv
import numpy as np
from sklearn.svm import SVR
import matplotlib.pyplot as plt
import time
print('Please enter your username.')
name=input()
username='Ayush'
if name == (username):
print('Please wait')
time.sleep(1.5)
print('')
print('Correct, please enter your password.')
password=input()
password1='15bcs1402'
if password == (password1):
print('Please wait')
time.sleep(1.5)
print('')
print('Correct, logging in.')
time.sleep(1.5)
print('Welcome '+username)
time.sleep(3)
dates = []
prices = []
def get_data(filename):
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
next(csvFileReader) # skipping column names
for row in csvFileReader:
dates.append(int(row[0].split('-')[0]))
prices.append(float(row[1]))
return
def predict_price(dates, prices, x):
dates = np.reshape(dates,(len(dates), 1)) # converting to matrix of n X 1
svr_rbf = SVR(kernel= 'rbf', C= 1e3, gamma= 0.1) # defining the support vector regression models
svr_lin = SVR(kernel= 'linear', C= 1e3)
svr_poly = SVR(kernel= 'poly', C= 1e3, degree= 2)
svr_rbf.fit(dates, prices) # fitting the data points in the models
svr_lin.fit(dates, prices)
svr_poly.fit(dates, prices)
plt.scatter(dates, prices, color= 'black', label= 'Data') # plotting the initial datapoints
plt.plot(dates, svr_rbf.predict(dates), color= 'red', label= 'RBF model') # plotting the line made by the RBF kernel
plt.plot(dates,svr_lin.predict(dates), color= 'green', label= 'Linear model') # plotting the line made by linear kernel
plt.plot(dates,svr_poly.predict(dates), color= 'blue', label= 'Polynomial model') # plotting the line made by polynomial kernel
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
return svr_rbf.predict(x)[0], svr_lin.predict(x)[0], svr_poly.predict(x)[0]
get_data('cisco.csv') # calling get_data method by passing the csv file to it
print ("Dates- ", dates)
print ("Prices- ", prices)
predicted_price = predict_price(dates, prices, 29)
print ("\nThe stock open price for 5th March is:")
print ("RBF kernel: $", str(predicted_price[0]))
print ("Linear kernel: $", str(predicted_price[1]))
print ("Polynomial kernel: $", str(predicted_price[2]))
if name != (username):
print('Please wait')
time.sleep(1.5)
print('')
print('Incorrected, closing program.')
time.sleep(1.5)
|
py
|
1a56c2d51a0a6ce0a873187890b6b173a89b45bb
|
#!/usr/bin/env python
# coding: utf-8
# # Tutoriel complet Regression lineaire
# ## Utilisation de l'intégration continue
# ## Collect data using pandas
# In[59]:
# modules nécessaires pour le notebook
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn import model_selection
from sklearn import metrics
# In[60]:
# lire le fichier de données
#utiliser le param index_col: Column to use as the row labels of the DataFrame
df = pd.read_csv('Advertising.csv',
index_col=0)
df.head()
# In[61]:
df.describe()
# # identification des descripteurs, cible et observations
# Quels sont les descripteurs? On a 3 descripteurs dans ce dataset qui sont:
# * TV
# * Radio
# * Newspaper
# Quelle est la cible?
# * Sales: vente d'un produit
# Quelle est la forme ou shape du dataframe?
# In[62]:
df.shape
# On voit que l'on a 200 observations avec 4 colonnes dont 3 sont des descripteurs
# # Tracé des relations entre les descripteurs et la cible
# In[63]:
#utilisation d'une figure avec 3 plots aligné sur une ligne
fig, axes = plt.subplots(1,3,sharey=False)
df.plot(kind='scatter', x='TV', y='sales',
ax=axes[0], figsize=(16,8))
df.plot(kind='scatter', x='radio', y='sales',
ax=axes[1], figsize=(16,8))
df.plot(kind='scatter', x='newspaper', y='sales',
ax=axes[2], figsize=(16,8))
# On voit au niveau des graphes qu'il existe une certaine relation linéaire entre TV et Sales ainsi que radio et Sales
# In[64]:
#meme chose mais avec seaborn
sns.pairplot(data=df, x_vars=['TV','radio','newspaper'],
y_vars='sales', height=7, aspect=0.7)
# # Tracé des correlations entre les différents descripteurs et cible
# * Cette partie n'a pas encore été faite.
# # Développement du modele linear regression
# In[65]:
cols_predicteurs = ['TV','radio','newspaper']
#predicteurs
X = df[cols_predicteurs]
y = df.sales
# In[66]:
#Effectuer la séparation Training-Test
X_train, X_test, y_train, y_test = model_selection.train_test_split(X,
y , test_size = 0.2, random_state=42)
#detail de chacun des sous-dataset
print (X_train.shape, y_train.shape)
print (X_test.shape, y_test.shape)
# In[67]:
#estimation des coeeficients du modele lineaire
lm = LinearRegression()
lm.fit(X_train,y_train)
#Afficher les coefficients
print(lm.intercept_)
print(lm.coef_)
# In[68]:
#Afficher l'equation
list(zip(cols_predicteurs, lm.coef_))
# In[69]:
# proceder au test
y_pred = lm.predict(X_test)
# In[70]:
import numpy as np
#comparer les valeurs test et prédites
test_pred_df = pd.DataFrame( { 'Valeurs test': y_test,
'Valeurs prédites': np.round( y_pred, 2),
'residuels': y_test - y_pred } )
test_pred_df[0:10]
# In[71]:
# RMSE
mse = np.sqrt(metrics.mean_squared_error(y_test,
y_pred))
print(np.sqrt(metrics.mean_squared_error(y_test,
y_pred)))
#Calcul du R-squared
r2 = metrics.r2_score(y_test, y_pred)
print(r2)
# In[72]:
# Write scores to a file
with open("metrics.txt", 'w') as outfile:
outfile.write("MSE: {0:2.1f} \n".format(mse))
outfile.write("R2: {0:2.1f}\n".format(r2))
# In[73]:
#Référence: The Elements of Statistical Learning - Hastie, Tibshirani and Friedman, voir https://web.stanford.edu/~hastie/ElemStatLearn/
|
py
|
1a56c3cc893a02b94d943a6ccbadaf389e381a09
|
# Copyright 2021 ST John
# Copyright 2016 James Hensman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from gpflow import default_float
from functools import reduce
import numpy as np
class BlockDiagMat_many:
def __init__(self, mats):
self.mats = mats
@property
def shape(self):
return (sum([m.shape[0] for m in mats]), sum([m.shape[1] for m in mats]))
@property
def sqrt_dims(self):
return sum([m.sqrt_dims for m in mats])
def _get_rhs_slices(self, X):
ret = []
start = 0
for m in self.mats:
ret.append(tf.slice(X, begin=tf.stack([start, 0]), size=tf.stack([m.shape[1], -1])))
start = start + m.shape[1]
return ret
def _get_rhs_blocks(self, X):
"""
X is a solid matrix, same size as this one. Get the blocks of X that
correspond to the structure of this matrix
"""
ret = []
start1 = 0
start2 = 0
for m in self.mats:
ret.append(tf.slice(X, begin=tf.stack([start1, start2]), size=m.shape))
start1 = start1 + m.shape[0]
start2 = start2 + m.shape[1]
return ret
def get(self):
ret = self.mats[0].get()
for m in self.mats[1:]:
tr_shape = tf.stack([tf.shape(ret)[0], m.shape[1]])
bl_shape = tf.stack([m.shape[0], tf.shape(ret)[1]])
top = tf.concat([ret, tf.zeros(tr_shape, default_float())], axis=1)
bottom = tf.concat([tf.zeros(bl_shape, default_float()), m.get()], axis=1)
ret = tf.concat([top, bottom], axis=0)
return ret
def logdet(self):
return reduce(tf.add, [m.logdet() for m in self.mats])
def matmul(self, X):
return tf.concat(
[m.matmul(Xi) for m, Xi in zip(self.mats, self._get_rhs_slices(X))], axis=0
)
def solve(self, X):
return tf.concat([m.solve(Xi) for m, Xi in zip(self.mats, self._get_rhs_slices(X))], axis=0)
def inv(self):
return BlockDiagMat_many([mat.inv() for mat in self.mats])
def trace_KiX(self, X):
"""
X is a square matrix of the same size as this one.
if self is K, compute tr(K^{-1} X)
"""
return reduce(
tf.add, [m.trace_KiX(Xi) for m, Xi in zip(self.mats, self._get_rhs_blocks(X))]
)
def get_diag(self):
return tf.concat([m.get_diag() for m in self.mats], axis=0)
def inv_diag(self):
return tf.concat([m.inv_diag() for m in self.mats], axis=0)
def matmul_sqrt(self, X):
return tf.concat(
[m.matmul_sqrt(Xi) for m, Xi in zip(self.mats, self._get_rhs_slices(X))], axis=0
)
def matmul_sqrt_transpose(self, X):
ret = []
start = np.zeros((2, np.int32))
for m in self.mats:
ret.append(
m.matmul_sqrt_transpose(tf.slice(X, begin=start, size=tf.stack([m.sqrt_dims, -1])))
)
start[0] += m.sqrt_dims
return tf.concat(ret, axis=0)
class BlockDiagMat:
def __init__(self, A, B):
self.A, self.B = A, B
@property
def shape(self):
mats = [self.A, self.B]
return (sum([m.shape[0] for m in mats]), sum([m.shape[1] for m in mats]))
@property
def sqrt_dims(self):
mats = [self.A, self.B]
return sum([m.sqrt_dims for m in mats])
def _get_rhs_slices(self, X):
# X1 = X[:self.A.shape[1], :]
X1 = tf.slice(X, begin=tf.zeros((2,), tf.int32), size=tf.stack([self.A.shape[1], -1]))
# X2 = X[self.A.shape[1]:, :]
X2 = tf.slice(X, begin=tf.stack([self.A.shape[1], 0]), size=-tf.ones((2,), tf.int32))
return X1, X2
def get(self):
tl_shape = tf.stack([self.A.shape[0], self.B.shape[1]])
br_shape = tf.stack([self.B.shape[0], self.A.shape[1]])
top = tf.concat([self.A.get(), tf.zeros(tl_shape, default_float())], axis=1)
bottom = tf.concat([tf.zeros(br_shape, default_float()), self.B.get()], axis=1)
return tf.concat([top, bottom], axis=0)
def logdet(self):
return self.A.logdet() + self.B.logdet()
def matmul(self, X):
X1, X2 = self._get_rhs_slices(X)
top = self.A.matmul(X1)
bottom = self.B.matmul(X2)
return tf.concat([top, bottom], axis=0)
def solve(self, X):
X1, X2 = self._get_rhs_slices(X)
top = self.A.solve(X1)
bottom = self.B.solve(X2)
return tf.concat([top, bottom], axis=0)
def inv(self):
return BlockDiagMat(self.A.inv(), self.B.inv())
def trace_KiX(self, X):
"""
X is a square matrix of the same size as this one.
if self is K, compute tr(K^{-1} X)
"""
X1, X2 = tf.slice(X, [0, 0], self.A.shape), tf.slice(X, self.A.shape, [-1, -1])
top = self.A.trace_KiX(X1)
bottom = self.B.trace_KiX(X2)
return top + bottom
def get_diag(self):
return tf.concat([self.A.get_diag(), self.B.get_diag()], axis=0)
def inv_diag(self):
return tf.concat([self.A.inv_diag(), self.B.inv_diag()], axis=0)
def matmul_sqrt(self, X):
X1, X2 = self._get_rhs_slices(X)
top = self.A.matmul_sqrt(X1)
bottom = self.B.matmul_sqrt(X2)
return tf.concat([top, bottom], axis=0)
def matmul_sqrt_transpose(self, X):
X1 = tf.slice(X, begin=tf.zeros((2,), tf.int32), size=tf.stack([self.A.sqrt_dims, -1]))
X2 = tf.slice(X, begin=tf.stack([self.A.sqrt_dims, 0]), size=-tf.ones((2,), tf.int32))
top = self.A.matmul_sqrt_transpose(X1)
bottom = self.B.matmul_sqrt_transpose(X2)
return tf.concat([top, bottom], axis=0)
class LowRankMat:
def __init__(self, d, W):
"""
A matrix of the form
diag(d) + W W^T
"""
self.d = d
self.W = W
@property
def shape(self):
return (tf.size(self.d), tf.size(self.d))
@property
def sqrt_dims(self):
return tf.size(self.d) + tf.shape(W)[1]
def get(self):
return tf.linalg.diag(self.d) + tf.matmul(self.W, self.W, transpose_b=True)
def logdet(self):
part1 = tf.reduce_sum(tf.math.log(self.d))
I = tf.eye(tf.shape(self.W)[1], dtype=default_float())
M = I + tf.matmul(tf.transpose(self.W) / self.d, self.W) # XXX
part2 = 2 * tf.reduce_sum(tf.math.log(tf.linalg.diag_part(tf.linalg.cholesky(M))))
return part1 + part2
def matmul(self, B):
WTB = tf.matmul(self.W, B, transpose_a=True)
WWTB = tf.matmul(self.W, WTB)
DB = tf.reshape(self.d, [-1, 1]) * B
return DB + WWTB
def get_diag(self):
return self.d + tf.reduce_sum(tf.square(self.W), 1)
def solve(self, B):
d_col = tf.expand_dims(self.d, 1)
DiB = B / d_col
DiW = self.W / d_col
WTDiB = tf.matmul(DiW, B, transpose_a=True)
I = tf.eye(tf.shape(self.W)[1], dtype=default_float())
M = I + tf.matmul(DiW, self.W, transpose_a=True)
L = tf.linalg.cholesky(M)
Minv_WTDiB = tf.linalg.cholesky_solve(L, WTDiB)
return DiB - tf.matmul(DiW, Minv_WTDiB)
def inv(self):
di = tf.math.reciprocal(self.d)
d_col = tf.expand_dims(self.d, 1)
DiW = self.W / d_col
I = tf.eye(tf.shape(self.W)[1], dtype=default_float())
M = I + tf.matmul(DiW, self.W, transpose_a=True)
L = tf.linalg.cholesky(M)
v = tf.transpose(tf.linalg.triangular_solve(L, tf.transpose(DiW), lower=True)) # XXX
return LowRankMatNeg(di, V)
def trace_KiX(self, X):
"""
X is a square matrix of the same size as this one.
if self is K, compute tr(K^{-1} X)
"""
d_col = tf.expand_dims(self.d, 1)
R = self.W / d_col
RTX = tf.matmul(R, X, transpose_a=True)
RTXR = tf.matmul(RTX, R)
I = tf.eye(tf.shape(self.W)[1], dtype=default_float())
M = I + tf.matmul(R, self.W, transpose_a=True)
Mi = tf.linalg.inv(M)
return tf.reduce_sum(tf.linalg.diag_part(X) * 1.0 / self.d) - tf.reduce_sum(RTXR * Mi)
def inv_diag(self):
d_col = tf.expand_dims(self.d, 1)
WTDi = tf.transpose(self.W / d_col) # XXX
I = tf.eye(tf.shape(self.W)[1], dtype=default_float())
M = I + tf.matmul(WTDi, self.W)
L = tf.linalg.cholesky(M)
tmp1 = tf.linalg.triangular_solve(L, WTDi, lower=True)
return 1.0 / self.d - tf.reduce_sum(tf.square(tmp1), 0)
def matmul_sqrt(self, B):
"""
There's a non-square sqrt of this matrix given by
[ D^{1/2}]
[ W^T ]
This method right-multiplies the sqrt by the matrix B
"""
DB = tf.expand_dims(tf.sqrt(self.d), 1) * B
VTB = tf.matmul(self.W, B, transpose_a=True)
return tf.concat([DB, VTB], axis=0)
def matmul_sqrt_transpose(self, B):
"""
There's a non-square sqrt of this matrix given by
[ D^{1/2}]
[ W^T ]
This method right-multiplies the transposed-sqrt by the matrix B
"""
B1 = tf.slice(B, tf.zeros((2,), tf.int32), tf.stack([tf.size(self.d), -1]))
B2 = tf.slice(B, tf.stack([tf.size(self.d), 0]), -tf.ones((2,), tf.int32))
return tf.expand_dims(tf.sqrt(self.d), 1) * B1 + tf.matmul(self.W, B2)
class LowRankMatNeg:
def __init__(self, d, W):
"""
A matrix of the form
diag(d) - W W^T
(note the minus sign)
"""
self.d = d
self.W = W
@property
def shape(self):
return (tf.size(self.d), tf.size(self.d))
def get(self):
return tf.linalg.diag(self.d) - tf.matmul(self.W, self.W, transpose_b=True)
class Rank1Mat:
def __init__(self, d, v):
"""
A matrix of the form
diag(d) + v v^T
"""
self.d = d
self.v = v
@property
def shape(self):
return (tf.size(self.d), tf.size(self.d))
@property
def sqrt_dims(self):
return tf.size(self.d) + 1
def get(self):
V = tf.expand_dims(self.v, 1)
return tf.linalg.diag(self.d) + tf.matmul(V, V, transpose_b=True)
def logdet(self):
return tf.reduce_sum(tf.math.log(self.d)) + tf.math.log(
1.0 + tf.reduce_sum(tf.square(self.v) / self.d)
)
def matmul(self, B):
V = tf.expand_dims(self.v, 1)
return tf.expand_dims(self.d, 1) * B + tf.matmul(V, tf.matmul(V, B, transpose_a=True))
def solve(self, B):
div = self.v / self.d
c = 1.0 + tf.reduce_sum(div * self.v)
div = tf.expand_dims(div, 1)
return B / tf.expand_dims(self.d, 1) - tf.matmul(
div / c, tf.matmul(div, B, transpose_a=True)
)
def inv(self):
di = tf.math.reciprocal(self.d)
Div = self.v * di
M = 1.0 + tf.reduce_sum(Div * self.v)
v_new = Div / tf.sqrt(M)
return Rank1MatNeg(di, v_new)
def trace_KiX(self, X):
"""
X is a square matrix of the same size as this one.
if self is K, compute tr(K^{-1} X)
"""
R = tf.expand_dims(self.v / self.d, 1)
RTX = tf.matmul(R, X, transpose_a=True)
RTXR = tf.matmul(RTX, R)
M = 1 + tf.reduce_sum(tf.square(self.v) / self.d)
return tf.reduce_sum(tf.linalg.diag_part(X) / self.d) - RTXR / M
def get_diag(self):
return self.d + tf.square(self.v)
def inv_diag(self):
div = self.v / self.d
c = 1.0 + tf.reduce_sum(div * self.v)
return 1.0 / self.d - tf.square(div) / c
def matmul_sqrt(self, B):
"""
There's a non-square sqrt of this matrix given by
[ D^{1/2}]
[ V^T ]
This method right-multiplies the sqrt by the matrix B
"""
DB = tf.expand_dims(tf.sqrt(self.d), 1) * B
VTB = tf.matmul(tf.expand_dims(self.v, 0), B)
return tf.concat([DB, VTB], axis=0)
def matmul_sqrt_transpose(self, B):
"""
There's a non-square sqrt of this matrix given by
[ D^{1/2}]
[ W^T ]
This method right-multiplies the transposed-sqrt by the matrix B
"""
B1 = tf.slice(B, tf.zeros((2,), tf.int32), tf.stack([tf.size(self.d), -1]))
B2 = tf.slice(B, tf.stack([tf.size(self.d), 0]), -tf.ones((2,), tf.int32))
return tf.expand_dims(tf.sqrt(self.d), 1) * B1 + tf.matmul(tf.expand_dims(self.v, 1), B2)
class Rank1MatNeg:
def __init__(self, d, v):
"""
A matrix of the form
diag(d) - v v^T
(note the minus sign)
"""
self.d = d
self.v = v
@property
def shape(self):
return (tf.size(self.d), tf.size(self.d))
def get(self):
W = tf.expand_dims(self.v, 1)
return tf.linalg.diag(self.d) - tf.matmul(W, W, transpose_b=True)
class DiagMat:
def __init__(self, d):
self.d = d
@property
def shape(self):
return (tf.size(self.d), tf.size(self.d))
@property
def sqrt_dims(self):
return tf.size(self.d)
def get(self):
return tf.linalg.diag(self.d)
def logdet(self):
return tf.reduce_sum(tf.math.log(self.d))
def matmul(self, B):
return tf.expand_dims(self.d, 1) * B
def solve(self, B):
return B / tf.expand_dims(self.d, 1)
def inv(self):
return DiagMat(tf.math.reciprocal(self.d))
def trace_KiX(self, X):
"""
X is a square matrix of the same size as this one.
if self is K, compute tr(K^{-1} X)
"""
return tf.reduce_sum(tf.linalg.diag_part(X) / self.d)
def get_diag(self):
return self.d
def inv_diag(self):
return 1.0 / self.d
def matmul_sqrt(self, B):
return tf.expand_dims(tf.sqrt(self.d), 1) * B
def matmul_sqrt_transpose(self, B):
return tf.expand_dims(tf.sqrt(self.d), 1) * B
|
py
|
1a56c4e1d9a56dfd76281a870399088e156dca84
|
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
#
# PanDA authors:
# - Aleksandr Alekseev, [email protected], 2022
# - Paul Nilsson, [email protected], 2022
from abc import ABC, abstractmethod
from typing import Iterator, Union
import json
import logging
import socket
import ssl
from requests.auth import HTTPBasicAuth
import requests
try:
import pylogbeat
from logstash_async.utils import ichunked
except ImportError:
pass
logger = logging.getLogger(__name__)
class TimeoutNotSet:
pass
class Transport(ABC):
"""The :class:`Transport <Transport>` is the abstract base class of
all transport protocols.
:param host: The name of the host.
:type host: str
:param port: The TCP/UDP port.
:type port: int
:param timeout: The connection timeout.
:type timeout: None or float
:param ssl_enable: Activates TLS.
:type ssl_enable: bool
:param ssl_verify: Activates the TLS certificate verification.
:type ssl_verify: bool or str
:param use_logging: Use logging for debugging.
:type use_logging: bool
"""
def __init__(
self,
host: str,
port: int,
timeout: Union[None, float],
ssl_enable: bool,
ssl_verify: Union[bool, str],
use_logging: bool,
):
self._host = host
self._port = port
self._timeout = None if timeout is TimeoutNotSet else timeout
self._ssl_enable = ssl_enable
self._ssl_verify = ssl_verify
self._use_logging = use_logging
super().__init__()
@abstractmethod
def send(self, events: list, **kwargs):
pass
@abstractmethod
def close(self):
pass
class UdpTransport:
_keep_connection = False
# ----------------------------------------------------------------------
# pylint: disable=unused-argument
def __init__(self, host, port, timeout=TimeoutNotSet, **kwargs):
self._host = host
self._port = port
self._timeout = timeout
self._sock = None
# ----------------------------------------------------------------------
def send(self, events, use_logging=False): # pylint: disable=unused-argument
# Ideally we would keep the socket open but this is risky because we might not notice
# a broken TCP connection and send events into the dark.
# On UDP we push into the dark by design :)
self._create_socket()
try:
self._send(events)
finally:
self._close()
# ----------------------------------------------------------------------
def _create_socket(self):
if self._sock is not None:
return
# from logging.handlers.DatagramHandler
self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
if self._timeout is not TimeoutNotSet:
self._sock.settimeout(self._timeout)
# ----------------------------------------------------------------------
def _send(self, events):
for event in events:
self._send_via_socket(event)
# ----------------------------------------------------------------------
def _send_via_socket(self, data):
data_to_send = self._convert_data_to_send(data)
self._sock.sendto(data_to_send, (self._host, self._port))
# ----------------------------------------------------------------------
def _convert_data_to_send(self, data):
if not isinstance(data, bytes):
return bytes(data, 'utf-8')
return data
# ----------------------------------------------------------------------
def _close(self, force=False):
if not self._keep_connection or force:
if self._sock:
self._sock.close()
self._sock = None
# ----------------------------------------------------------------------
def close(self):
self._close(force=True)
class TcpTransport(UdpTransport):
# ----------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
host,
port,
ssl_enable,
ssl_verify,
keyfile,
certfile,
ca_certs,
timeout=TimeoutNotSet,
**kwargs):
super().__init__(host, port)
self._ssl_enable = ssl_enable
self._ssl_verify = ssl_verify
self._keyfile = keyfile
self._certfile = certfile
self._ca_certs = ca_certs
self._timeout = timeout
# ----------------------------------------------------------------------
def _create_socket(self):
if self._sock is not None:
return
# from logging.handlers.SocketHandler
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self._timeout is not TimeoutNotSet:
self._sock.settimeout(self._timeout)
try:
self._sock.connect((self._host, self._port))
# non-SSL
if not self._ssl_enable:
return
# SSL
cert_reqs = ssl.CERT_REQUIRED
ssl_context = ssl.create_default_context(cafile=self._ca_certs)
if not self._ssl_verify:
if self._ca_certs:
cert_reqs = ssl.CERT_OPTIONAL
else:
cert_reqs = ssl.CERT_NONE
ssl_context.verify_mode = cert_reqs
ssl_context.check_hostname = False
ssl_context.load_cert_chain(self._certfile, self._keyfile)
self._sock = ssl_context.wrap_socket(self._sock, server_side=False)
except socket.error:
self._close()
raise
# ----------------------------------------------------------------------
def _send_via_socket(self, data):
data_to_send = self._convert_data_to_send(data)
self._sock.sendall(data_to_send)
class BeatsTransport:
_batch_size = 10
# ----------------------------------------------------------------------
def __init__( # pylint: disable=too-many-arguments
self,
host,
port,
ssl_enable,
ssl_verify,
keyfile,
certfile,
ca_certs,
timeout=TimeoutNotSet,
**kwargs):
timeout_ = None if timeout is TimeoutNotSet else timeout
self._client_arguments = dict(
host=host,
port=port,
timeout=timeout_,
ssl_enable=ssl_enable,
ssl_verify=ssl_verify,
keyfile=keyfile,
certfile=certfile,
ca_certs=ca_certs,
**kwargs)
# ----------------------------------------------------------------------
def close(self):
pass # nothing to do
# ----------------------------------------------------------------------
def send(self, events, use_logging=False):
client = pylogbeat.PyLogBeatClient(use_logging=use_logging, **self._client_arguments)
with client:
for events_subset in ichunked(events, self._batch_size):
try:
client.send(events_subset)
except Exception:
pass
class HttpTransport(Transport):
"""The :class:`HttpTransport <HttpTransport>` implements a client for the
logstash plugin `inputs_http`.
For more details visit:
https://www.elastic.co/guide/en/logstash/current/plugins-inputs-http.html
:param host: The hostname of the logstash HTTP server.
:type host: str
:param port: The TCP port of the logstash HTTP server.
:type port: int
:param timeout: The connection timeout. (Default: None)
:type timeout: float
:param ssl_enable: Activates TLS. (Default: True)
:type ssl_enable: bool
:param ssl_verify: Activates the TLS certificate verification. If the flag
is True the class tries to verify the TLS certificate with certifi. If you
pass a string with a file location to CA certificate the class tries to
validate it against it. (Default: True)
:type ssl_verify: bool or str
:param use_logging: Use logging for debugging.
:type use_logging: bool
:param username: Username for basic authorization. (Default: "")
:type username: str
:param password: Password for basic authorization. (Default: "")
:type password: str
:param max_content_length: The max content of an HTTP request in bytes.
(Default: 100MB)
:type max_content_length: int
"""
def __init__(
self,
host: str,
port: int,
timeout: Union[None, float] = TimeoutNotSet,
ssl_enable: bool = True,
ssl_verify: Union[bool, str] = True,
use_logging: bool = False,
#keyfile: Union[bool, str] = True,
#certfile: Union[bool, str] = True,
**kwargs
):
super().__init__(host, port, timeout, ssl_enable, ssl_verify, use_logging)
self._username = kwargs.get('username', None)
self._password = kwargs.get('password', None)
self._max_content_length = kwargs.get('max_content_length', 100 * 1024 * 1024)
self.__session = None
self._cert = kwargs.get('cert', None)
@property
def url(self) -> str:
"""The URL of the logstash pipeline based on the hostname, the port and
the TLS usage.
:return: The URL of the logstash HTTP pipeline.
:rtype: str
"""
protocol = 'http'
if self._ssl_enable:
protocol = 'https'
return f'{protocol}://{self._host}:{self._port}'
def __batches(self, events: list) -> Iterator[list]:
"""Generate dynamic sized batches based on the max content length.
:param events: A list of events.
:type events: list
:return: A iterator which generates batches of events.
:rtype: Iterator[list]
"""
current_batch = []
event_iter = iter(events)
while True:
try:
current_event = next(event_iter)
except StopIteration:
current_event = None
if not current_batch:
return
yield current_batch
if current_event is None:
return
if len(current_event) > self._max_content_length:
msg = 'The event size <%s> is greater than the max content length <%s>.'
msg += 'Skipping event.'
if self._use_logging:
logger.warning(msg, len(current_event), self._max_content_length)
continue
obj = json.loads(current_event)
content_length = len(json.dumps(current_batch + [obj]).encode('utf8'))
if content_length > self._max_content_length:
batch = current_batch
current_batch = [obj]
yield batch
else:
current_batch += [obj]
def __auth(self) -> HTTPBasicAuth:
"""The authentication method for the logstash pipeline. If the username
or the password is not set correctly it will return None.
:return: A HTTP basic auth object or None.
:rtype: HTTPBasicAuth
"""
if self._username is None or self._password is None:
return None
return HTTPBasicAuth(self._username, self._password)
def close(self) -> None:
"""Close the HTTP session.
"""
if self.__session is not None:
self.__session.close()
def send(self, events: list, **kwargs):
"""Send events to the logstash pipeline.
Max Events: `logstash_async.Constants.QUEUED_EVENTS_BATCH_SIZE`
Max Content Length: `HttpTransport._max_content_length`
The method receives a list of events from the worker. It tries to send
as much of the events as possible in one request. If the total size of
the received events is greater than the maximal content length the
events will be divide into batches.
:param events: A list of events
:type events: list
"""
self.__session = requests.Session()
#print(self._cert)
for batch in self.__batches(events):
if self._use_logging:
logger.debug('Batch length: %s, Batch size: %s',
len(batch), len(json.dumps(batch).encode('utf8')))
response = self.__session.post(
self.url,
headers={'Content-Type': 'application/json'},
json=batch,
verify=self._ssl_verify,
timeout=self._timeout,
auth=self.__auth(),
cert=self._cert)
#print(response)
if response.status_code != 200:
self.close()
response.raise_for_status()
self.close()
|
py
|
1a56c7255a5c5fa90f9769d397e3410692c70c6e
|
def get_visited(width, height):
visited = list()
for i in range(width):
temp = list()
for j in range(height):
temp.append(False)
visited.append(temp)
return visited
def get_front(direction):
if direction == 0: # bot pointing upwards
return 0
elif direction == 1: # bot pointing rightwards
return 1
elif direction == 2: # bot pointing downwards
return 2
return 3 # bot pointing leftwards
def get_right(direction):
if direction == 0:
return 1
elif direction == 1:
return 2
elif direction == 2:
return 3
return 0
def get_left(direction):
if direction == 0:
return 3
elif direction == 1:
return 0
elif direction == 2:
return 1
return 2
def get_direction(curr_direction, new_direction):
if curr_direction == new_direction:
return None
if curr_direction == 0:
if new_direction == 1:
return 'right'
elif new_direction == 2:
return 'down'
else:
return 'left'
elif curr_direction == 1:
if new_direction == 2:
return 'right'
elif new_direction == 3:
return 'down'
else:
return 'left'
elif curr_direction == 2:
if new_direction == 3:
return 'right'
elif new_direction == 0:
return 'down'
else:
return 'left'
else:
if new_direction == 0:
return 'right'
elif new_direction == 1:
return 'down'
else:
return 'left'
def get_opposite_direction(direction):
if direction == 0:
return 2
elif direction == 1:
return 3
elif direction == 2:
return 0
return 1
|
py
|
1a56c7ef3f13600dd1c9182306c665b3972a6753
|
import unittest
from werkzeug import Client
from werkzeug.wrappers.base_response import BaseResponse
from blazeweb.users import User, UserProxy
from blazewebtestapp.applications import make_wsgi
class TestUserFunctional(unittest.TestCase):
def setUp(self):
self.app = make_wsgi('Testruns')
self.client = Client(self.app, BaseResponse)
def tearDown(self):
self.client = None
self.app = None
def test_attr(self):
r = self.client.get('/usertests/setfoo')
self.assertEqual(r.status, '200 OK')
self.assertEqual(r.data, b'foo set')
r = self.client.get('/usertests/getfoo')
self.assertEqual(r.status, '200 OK')
self.assertEqual(r.data, b'barbaz')
def test_auth(self):
r = self.client.get('/usertests/setauth')
self.assertEqual(r.status, '200 OK')
r = self.client.get('/usertests/getauth')
self.assertEqual(r.status, '200 OK')
self.assertEqual(r.data, b'True')
def test_perm(self):
r = self.client.get('/usertests/addperm')
self.assertEqual(r.status, '200 OK')
r = self.client.get('/usertests/getperms')
self.assertEqual(r.status, '200 OK')
self.assertEqual(r.data, b'TrueFalseTrue')
def test_clear(self):
r = self.client.get('/usertests/clear')
self.assertEqual(r.status, '200 OK')
self.assertEqual(r.data, b'FalseFalseNone')
def test_message(self):
r = self.client.get('/usertests/setmsg')
self.assertEqual(r.status, '200 OK')
r = self.client.get('/usertests/getmsg')
self.assertEqual(r.status, '200 OK')
self.assertEqual(r.data, b'test: my message')
r = self.client.get('/usertests/nomsg')
self.assertEqual(r.status, '200 OK')
self.assertEqual(r.data, b'0')
class TestUserUnit(object):
def _check_empty(self, u):
assert u.is_authenticated is False
assert u.is_super_user is False
assert not u.perms
def test_defaults(self):
u = User()
self._check_empty(u)
def test_clear(self):
u = User()
u.is_authenticated = True
u.is_super_user = True
u.add_perm('foobar')
u.clear()
self._check_empty(u)
def test_lazy_dict_attrs(self):
u = User()
u.foobar = 1
assert u['foobar'] == 1
def test_api_attrs_not_in_dict(self):
u = User()
u.foobar = 1
assert u._is_authenticated is False
assert '_is_authenticated' not in u
assert u._is_super_user is False
assert '_is_super_user' not in u
assert not u.perms
assert 'perms' not in u
assert not u._messages
assert '_messages' not in u
def test_perms(self):
u = User()
assert not u.has_perm('foobar')
u.add_perm('foobar')
assert u.has_perm('foobar')
assert not u.has_any_perm('baz', 'zip')
assert not u.has_any_perm(('baz', 'zip'))
assert u.has_any_perm('baz', 'foobar')
assert u.has_any_perm('foobar', 'baz')
assert u.has_any_perm(('baz', 'foobar'))
assert u.has_any_perm(['foobar', 'baz'])
def test_super_user_perms(self):
u = User()
u.is_super_user = True
assert u.is_super_user
assert u.has_perm('foobar')
u.add_perm('foobar')
assert u.has_perm('foobar')
assert u.has_any_perm('baz', 'zip')
assert u.has_any_perm('foobar', 'baz')
def test_get_set_properties(self):
u = User()
assert not u.is_authenticated
u.is_authenticated = True
assert u.is_authenticated
u = User()
assert not u.is_super_user
u.is_super_user = True
assert u.is_super_user
def test_repr(self):
u = User()
assert repr(u)
class TestUserProxy(object):
def test_bool_value(self):
# make sure UserProxy._current_obj() returns a SOP with the real
# User behind it instead of returning the real User instance.
u = UserProxy()
if not u:
assert False, 'expected user'
|
py
|
1a56c82fbd9e2cdb60d774333e71d02341629f92
|
# Copyright 2016-2020 The Matrix.org Foundation C.I.C.
# Copyright 2020 Sorunome
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import logging
import random
from http import HTTPStatus
from typing import TYPE_CHECKING, Iterable, List, Optional, Set, Tuple
from synapse import types
from synapse.api.constants import (
AccountDataTypes,
EventContentFields,
EventTypes,
GuestAccess,
Membership,
)
from synapse.api.errors import (
AuthError,
Codes,
LimitExceededError,
ShadowBanError,
SynapseError,
)
from synapse.api.ratelimiting import Ratelimiter
from synapse.event_auth import get_named_level, get_power_level_event
from synapse.events import EventBase
from synapse.events.snapshot import EventContext
from synapse.handlers.profile import MAX_AVATAR_URL_LEN, MAX_DISPLAYNAME_LEN
from synapse.types import (
JsonDict,
Requester,
RoomAlias,
RoomID,
StateMap,
UserID,
create_requester,
get_domain_from_id,
)
from synapse.util.async_helpers import Linearizer
from synapse.util.distributor import user_left_room
from ._base import BaseHandler
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
class RoomMemberHandler(metaclass=abc.ABCMeta):
# TODO(paul): This handler currently contains a messy conflation of
# low-level API that works on UserID objects and so on, and REST-level
# API that takes ID strings and returns pagination chunks. These concerns
# ought to be separated out a lot better.
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.store = hs.get_datastore()
self.auth = hs.get_auth()
self.state_handler = hs.get_state_handler()
self.config = hs.config
self._server_name = hs.hostname
self.federation_handler = hs.get_federation_handler()
self.directory_handler = hs.get_directory_handler()
self.identity_handler = hs.get_identity_handler()
self.registration_handler = hs.get_registration_handler()
self.profile_handler = hs.get_profile_handler()
self.event_creation_handler = hs.get_event_creation_handler()
self.account_data_handler = hs.get_account_data_handler()
self.event_auth_handler = hs.get_event_auth_handler()
self.member_linearizer: Linearizer = Linearizer(name="member")
self.clock = hs.get_clock()
self.spam_checker = hs.get_spam_checker()
self.third_party_event_rules = hs.get_third_party_event_rules()
self._server_notices_mxid = self.config.servernotices.server_notices_mxid
self._enable_lookup = hs.config.enable_3pid_lookup
self.allow_per_room_profiles = self.config.allow_per_room_profiles
self._join_rate_limiter_local = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_local.per_second,
burst_count=hs.config.ratelimiting.rc_joins_local.burst_count,
)
self._join_rate_limiter_remote = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_joins_remote.per_second,
burst_count=hs.config.ratelimiting.rc_joins_remote.burst_count,
)
self._invites_per_room_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_room.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_room.burst_count,
)
self._invites_per_user_limiter = Ratelimiter(
store=self.store,
clock=self.clock,
rate_hz=hs.config.ratelimiting.rc_invites_per_user.per_second,
burst_count=hs.config.ratelimiting.rc_invites_per_user.burst_count,
)
# This is only used to get at the ratelimit function. It's fine there are
# multiple of these as it doesn't store state.
self.base_handler = BaseHandler(hs)
@abc.abstractmethod
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and join a room that this server is not in
Args:
requester
remote_room_hosts: List of servers that can be used to join via.
room_id: Room that we are trying to join
user: User who is trying to join
content: A dict that should be used as the content of the join event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Try and knock on a room that this server is not in
Args:
remote_room_hosts: List of servers that can be used to knock via.
room_id: Room that we are trying to knock on.
user: User who is trying to knock.
content: A dict that should be used as the content of the knock event.
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite we have received from a remote server
Args:
invite_event_id: ID of the invite to be rejected
txn_id: optional transaction ID supplied by the client
requester: user making the rejection request, according to the access token
content: additional content to include in the rejection event.
Normally an empty dict.
Returns:
event id, stream_id of the leave event
"""
raise NotImplementedError()
@abc.abstractmethod
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Rescind a local knock made on a remote room.
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: An optional transaction ID supplied by the client.
requester: The user making the request, according to the access token.
content: The content of the generated leave event.
Returns:
A tuple containing (event_id, stream_id of the leave event).
"""
raise NotImplementedError()
@abc.abstractmethod
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Notifies distributor on master process that the user has left the
room.
Args:
target
room_id
"""
raise NotImplementedError()
@abc.abstractmethod
async def forget(self, user: UserID, room_id: str) -> None:
raise NotImplementedError()
async def ratelimit_multiple_invites(
self,
requester: Optional[Requester],
room_id: Optional[str],
n_invites: int,
update: bool = True,
) -> None:
"""Ratelimit more than one invite sent by the given requester in the given room.
Args:
requester: The requester sending the invites.
room_id: The room the invites are being sent in.
n_invites: The amount of invites to ratelimit for.
update: Whether to update the ratelimiter's cache.
Raises:
LimitExceededError: The requester can't send that many invites in the room.
"""
await self._invites_per_room_limiter.ratelimit(
requester,
room_id,
update=update,
n_actions=n_invites,
)
async def ratelimit_invite(
self,
requester: Optional[Requester],
room_id: Optional[str],
invitee_user_id: str,
) -> None:
"""Ratelimit invites by room and by target user.
If room ID is missing then we just rate limit by target user.
"""
if room_id:
await self._invites_per_room_limiter.ratelimit(requester, room_id)
await self._invites_per_user_limiter.ratelimit(requester, invitee_user_id)
async def _local_membership_update(
self,
requester: Requester,
target: UserID,
room_id: str,
membership: str,
prev_event_ids: List[str],
auth_event_ids: Optional[List[str]] = None,
txn_id: Optional[str] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
) -> Tuple[str, int]:
"""
Internal membership update function to get an existing event or create
and persist a new event for the new membership change.
Args:
requester:
target:
room_id:
membership:
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
txn_id:
ratelimit:
content:
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
Returns:
Tuple of event ID and stream ordering position
"""
user_id = target.to_string()
if content is None:
content = {}
content["membership"] = membership
if requester.is_guest:
content["kind"] = "guest"
# Check if we already have an event with a matching transaction ID. (We
# do this check just before we persist an event as well, but may as well
# do it up front for efficiency.)
if txn_id and requester.access_token_id:
existing_event_id = await self.store.get_event_id_from_transaction_id(
room_id,
requester.user.to_string(),
requester.access_token_id,
txn_id,
)
if existing_event_id:
event_pos = await self.store.get_position_for_event(existing_event_id)
return existing_event_id, event_pos.stream
event, context = await self.event_creation_handler.create_event(
requester,
{
"type": EventTypes.Member,
"content": content,
"room_id": room_id,
"sender": requester.user.to_string(),
"state_key": user_id,
# For backwards compatibility:
"membership": membership,
},
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
require_consent=require_consent,
outlier=outlier,
)
prev_state_ids = await context.get_prev_state_ids()
prev_member_event_id = prev_state_ids.get((EventTypes.Member, user_id), None)
if event.membership == Membership.JOIN:
newly_joined = True
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
newly_joined = prev_member_event.membership != Membership.JOIN
# Only rate-limit if the user actually joined the room, otherwise we'll end
# up blocking profile updates.
if newly_joined and ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_local.can_do_action(requester)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[target],
ratelimit=ratelimit,
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target, room_id)
# we know it was persisted, so should have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def copy_room_tags_and_direct_to_room(
self, old_room_id: str, new_room_id: str, user_id: str
) -> None:
"""Copies the tags and direct room state from one room to another.
Args:
old_room_id: The room ID of the old room.
new_room_id: The room ID of the new room.
user_id: The user's ID.
"""
# Retrieve user account data for predecessor room
user_account_data, _ = await self.store.get_account_data_for_user(user_id)
# Copy direct message state if applicable
direct_rooms = user_account_data.get(AccountDataTypes.DIRECT, {})
# Check which key this room is under
if isinstance(direct_rooms, dict):
for key, room_id_list in direct_rooms.items():
if old_room_id in room_id_list and new_room_id not in room_id_list:
# Add new room_id to this key
direct_rooms[key].append(new_room_id)
# Save back to user's m.direct account data
await self.account_data_handler.add_account_data_for_user(
user_id, AccountDataTypes.DIRECT, direct_rooms
)
break
# Copy room tags if applicable
room_tags = await self.store.get_tags_for_room(user_id, old_room_id)
# Copy each room tag to the new room
for tag, tag_content in room_tags.items():
await self.account_data_handler.add_tag_to_room(
user_id, new_room_id, tag, tag_content
)
async def update_membership(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Update a user's membership in a room.
Params:
requester: The user who is performing the update.
target: The user whose membership is being updated.
room_id: The room ID whose membership is being updated.
action: The membership change, see synapse.api.constants.Membership.
txn_id: The transaction ID, if given.
remote_room_hosts: Remote servers to send the update to.
third_party_signed: Information from a 3PID invite.
ratelimit: Whether to rate limit the request.
content: The content of the created event.
require_consent: Whether consent is required.
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
Raises:
ShadowBanError if a shadow-banned requester attempts to send an invite.
"""
if action == Membership.INVITE and requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
key = (room_id,)
with (await self.member_linearizer.queue(key)):
result = await self.update_membership_locked(
requester,
target,
room_id,
action,
txn_id=txn_id,
remote_room_hosts=remote_room_hosts,
third_party_signed=third_party_signed,
ratelimit=ratelimit,
content=content,
require_consent=require_consent,
outlier=outlier,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
)
return result
async def update_membership_locked(
self,
requester: Requester,
target: UserID,
room_id: str,
action: str,
txn_id: Optional[str] = None,
remote_room_hosts: Optional[List[str]] = None,
third_party_signed: Optional[dict] = None,
ratelimit: bool = True,
content: Optional[dict] = None,
require_consent: bool = True,
outlier: bool = False,
prev_event_ids: Optional[List[str]] = None,
auth_event_ids: Optional[List[str]] = None,
) -> Tuple[str, int]:
"""Helper for update_membership.
Assumes that the membership linearizer is already held for the room.
Args:
requester:
target:
room_id:
action:
txn_id:
remote_room_hosts:
third_party_signed:
ratelimit:
content:
require_consent:
outlier: Indicates whether the event is an `outlier`, i.e. if
it's from an arbitrary point and floating in the DAG as
opposed to being inline with the current DAG.
prev_event_ids: The event IDs to use as the prev events
auth_event_ids:
The event ids to use as the auth_events for the new event.
Should normally be left as None, which will cause them to be calculated
based on the room state at the prev_events.
Returns:
A tuple of the new event ID and stream ID.
"""
content_specified = bool(content)
if content is None:
content = {}
else:
# We do a copy here as we potentially change some keys
# later on.
content = dict(content)
# allow the server notices mxid to set room-level profile
is_requester_server_notices_user = (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
)
if (
not self.allow_per_room_profiles and not is_requester_server_notices_user
) or requester.shadow_banned:
# Strip profile data, knowing that new profile data will be added to the
# event's content in event_creation_handler.create_event() using the target's
# global profile.
content.pop("displayname", None)
content.pop("avatar_url", None)
if len(content.get("displayname") or "") > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400,
f"Displayname is too long (max {MAX_DISPLAYNAME_LEN})",
errcode=Codes.BAD_JSON,
)
if len(content.get("avatar_url") or "") > MAX_AVATAR_URL_LEN:
raise SynapseError(
400,
f"Avatar URL is too long (max {MAX_AVATAR_URL_LEN})",
errcode=Codes.BAD_JSON,
)
effective_membership_state = action
if action in ["kick", "unban"]:
effective_membership_state = "leave"
# if this is a join with a 3pid signature, we may need to turn a 3pid
# invite into a normal invite before we can handle the join.
if third_party_signed is not None:
await self.federation_handler.exchange_third_party_invite(
third_party_signed["sender"],
target.to_string(),
room_id,
third_party_signed,
)
if not remote_room_hosts:
remote_room_hosts = []
if effective_membership_state not in ("leave", "ban"):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
if effective_membership_state == Membership.INVITE:
target_id = target.to_string()
if ratelimit:
await self.ratelimit_invite(requester, room_id, target_id)
# block any attempts to invite the server notices mxid
if target_id == self._server_notices_mxid:
raise SynapseError(HTTPStatus.FORBIDDEN, "Cannot invite this user")
block_invite = False
if (
self._server_notices_mxid is not None
and requester.user.to_string() == self._server_notices_mxid
):
# allow the server notices mxid to send invites
is_requester_admin = True
else:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
if self.config.block_non_admin_invites:
logger.info(
"Blocking invite: user is not admin and non-admin "
"invites disabled"
)
block_invite = True
if not await self.spam_checker.user_may_invite(
requester.user.to_string(), target_id, room_id
):
logger.info("Blocking invite due to spam checker")
block_invite = True
if block_invite:
raise SynapseError(403, "Invites have been disabled on this server")
if prev_event_ids:
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
)
latest_event_ids = await self.store.get_prev_events_for_room(room_id)
current_state_ids = await self.state_handler.get_current_state_ids(
room_id, latest_event_ids=latest_event_ids
)
# TODO: Refactor into dictionary of explicitly allowed transitions
# between old and new state, with specific error messages for some
# transitions and generic otherwise
old_state_id = current_state_ids.get((EventTypes.Member, target.to_string()))
if old_state_id:
old_state = await self.store.get_event(old_state_id, allow_none=True)
old_membership = old_state.content.get("membership") if old_state else None
if action == "unban" and old_membership != "ban":
raise SynapseError(
403,
"Cannot unban user who was not banned"
" (membership=%s)" % old_membership,
errcode=Codes.BAD_STATE,
)
if old_membership == "ban" and action not in ["ban", "unban", "leave"]:
raise SynapseError(
403,
"Cannot %s user who was banned" % (action,),
errcode=Codes.BAD_STATE,
)
if old_state:
same_content = content == old_state.content
same_membership = old_membership == effective_membership_state
same_sender = requester.user.to_string() == old_state.sender
if same_sender and same_membership and same_content:
# duplicate event.
# we know it was persisted, so must have a stream ordering.
assert old_state.internal_metadata.stream_ordering
return (
old_state.event_id,
old_state.internal_metadata.stream_ordering,
)
if old_membership in ["ban", "leave"] and action == "kick":
raise AuthError(403, "The target user is not in the room")
# we don't allow people to reject invites to the server notice
# room, but they can leave it once they are joined.
if (
old_membership == Membership.INVITE
and effective_membership_state == Membership.LEAVE
):
is_blocked = await self._is_server_notice_room(room_id)
if is_blocked:
raise SynapseError(
HTTPStatus.FORBIDDEN,
"You cannot reject this invite",
errcode=Codes.CANNOT_LEAVE_SERVER_NOTICE_ROOM,
)
else:
if action == "kick":
raise AuthError(403, "The target user is not in the room")
is_host_in_room = await self._is_host_in_room(current_state_ids)
if effective_membership_state == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(current_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
# Check if a remote join should be performed.
remote_join, remote_room_hosts = await self._should_perform_remote_join(
target.to_string(), room_id, remote_room_hosts, content, is_host_in_room
)
if remote_join:
if ratelimit:
time_now_s = self.clock.time()
(
allowed,
time_allowed,
) = await self._join_rate_limiter_remote.can_do_action(
requester,
)
if not allowed:
raise LimitExceededError(
retry_after_ms=int(1000 * (time_allowed - time_now_s))
)
inviter = await self._get_inviter(target.to_string(), room_id)
if inviter and not self.hs.is_mine(inviter):
remote_room_hosts.append(inviter.domain)
content["membership"] = Membership.JOIN
profile = self.profile_handler
if not content_specified:
content["displayname"] = await profile.get_displayname(target)
content["avatar_url"] = await profile.get_avatar_url(target)
if requester.is_guest:
content["kind"] = "guest"
remote_join_response = await self._remote_join(
requester, remote_room_hosts, room_id, target, content
)
return remote_join_response
elif effective_membership_state == Membership.LEAVE:
if not is_host_in_room:
# Figure out the user's current membership state for the room
(
current_membership_type,
current_membership_event_id,
) = await self.store.get_local_current_membership_for_user_in_room(
target.to_string(), room_id
)
if not current_membership_type or not current_membership_event_id:
logger.info(
"%s sent a leave request to %s, but that is not an active room "
"on this server, or there is no pending invite or knock",
target,
room_id,
)
raise SynapseError(404, "Not a known room")
# perhaps we've been invited
if current_membership_type == Membership.INVITE:
invite = await self.store.get_event(current_membership_event_id)
logger.info(
"%s rejects invite to %s from %s",
target,
room_id,
invite.sender,
)
if not self.hs.is_mine_id(invite.sender):
# send the rejection to the inviter's HS (with fallback to
# local event)
return await self.remote_reject_invite(
invite.event_id,
txn_id,
requester,
content,
)
# the inviter was on our server, but has now left. Carry on
# with the normal rejection codepath, which will also send the
# rejection out to any other servers we believe are still in the room.
# thanks to overzealous cleaning up of event_forward_extremities in
# `delete_old_current_state_events`, it's possible to end up with no
# forward extremities here. If that happens, let's just hang the
# rejection off the invite event.
#
# see: https://github.com/matrix-org/synapse/issues/7139
if len(latest_event_ids) == 0:
latest_event_ids = [invite.event_id]
# or perhaps this is a remote room that a local user has knocked on
elif current_membership_type == Membership.KNOCK:
knock = await self.store.get_event(current_membership_event_id)
return await self.remote_rescind_knock(
knock.event_id, txn_id, requester, content
)
elif effective_membership_state == Membership.KNOCK:
if not is_host_in_room:
# The knock needs to be sent over federation instead
remote_room_hosts.append(get_domain_from_id(room_id))
content["membership"] = Membership.KNOCK
profile = self.profile_handler
if "displayname" not in content:
content["displayname"] = await profile.get_displayname(target)
if "avatar_url" not in content:
content["avatar_url"] = await profile.get_avatar_url(target)
return await self.remote_knock(
remote_room_hosts, room_id, target, content
)
return await self._local_membership_update(
requester=requester,
target=target,
room_id=room_id,
membership=effective_membership_state,
txn_id=txn_id,
ratelimit=ratelimit,
prev_event_ids=latest_event_ids,
auth_event_ids=auth_event_ids,
content=content,
require_consent=require_consent,
outlier=outlier,
)
async def _should_perform_remote_join(
self,
user_id: str,
room_id: str,
remote_room_hosts: List[str],
content: JsonDict,
is_host_in_room: bool,
) -> Tuple[bool, List[str]]:
"""
Check whether the server should do a remote join (as opposed to a local
join) for a user.
Generally a remote join is used if:
* The server is not yet in the room.
* The server is in the room, the room has restricted join rules, the user
is not joined or invited to the room, and the server does not have
another user who is capable of issuing invites.
Args:
user_id: The user joining the room.
room_id: The room being joined.
remote_room_hosts: A list of remote room hosts.
content: The content to use as the event body of the join. This may
be modified.
is_host_in_room: True if the host is in the room.
Returns:
A tuple of:
True if a remote join should be performed. False if the join can be
done locally.
A list of remote room hosts to use. This is an empty list if a
local join is to be done.
"""
# If the host isn't in the room, pass through the prospective hosts.
if not is_host_in_room:
return True, remote_room_hosts
# If the host is in the room, but not one of the authorised hosts
# for restricted join rules, a remote join must be used.
room_version = await self.store.get_room_version(room_id)
current_state_ids = await self.store.get_current_state_ids(room_id)
# If restricted join rules are not being used, a local join can always
# be used.
if not await self.event_auth_handler.has_restricted_join_rules(
current_state_ids, room_version
):
return False, []
# If the user is invited to the room or already joined, the join
# event can always be issued locally.
prev_member_event_id = current_state_ids.get((EventTypes.Member, user_id), None)
prev_member_event = None
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership in (
Membership.JOIN,
Membership.INVITE,
):
return False, []
# If the local host has a user who can issue invites, then a local
# join can be done.
#
# If not, generate a new list of remote hosts based on which
# can issue invites.
event_map = await self.store.get_events(current_state_ids.values())
current_state = {
state_key: event_map[event_id]
for state_key, event_id in current_state_ids.items()
}
allowed_servers = get_servers_from_users(
get_users_which_can_issue_invite(current_state)
)
# If the local server is not one of allowed servers, then a remote
# join must be done. Return the list of prospective servers based on
# which can issue invites.
if self.hs.hostname not in allowed_servers:
return True, list(allowed_servers)
# Ensure the member should be allowed access via membership in a room.
await self.event_auth_handler.check_restricted_join_rules(
current_state_ids, room_version, user_id, prev_member_event
)
# If this is going to be a local join, additional information must
# be included in the event content in order to efficiently validate
# the event.
content[
"join_authorised_via_users_server"
] = await self.event_auth_handler.get_user_which_could_invite(
room_id,
current_state_ids,
)
return False, []
async def transfer_room_state_on_room_upgrade(
self, old_room_id: str, room_id: str
) -> None:
"""Upon our server becoming aware of an upgraded room, either by upgrading a room
ourselves or joining one, we can transfer over information from the previous room.
Copies user state (tags/push rules) for every local user that was in the old room, as
well as migrating the room directory state.
Args:
old_room_id: The ID of the old room
room_id: The ID of the new room
"""
logger.info("Transferring room state from %s to %s", old_room_id, room_id)
# Find all local users that were in the old room and copy over each user's state
users = await self.store.get_users_in_room(old_room_id)
await self.copy_user_state_on_room_upgrade(old_room_id, room_id, users)
# Add new room to the room directory if the old room was there
# Remove old room from the room directory
old_room = await self.store.get_room(old_room_id)
if old_room and old_room["is_public"]:
await self.store.set_room_is_public(old_room_id, False)
await self.store.set_room_is_public(room_id, True)
# Transfer alias mappings in the room directory
await self.store.update_aliases_for_room(old_room_id, room_id)
# Check if any groups we own contain the predecessor room
local_group_ids = await self.store.get_local_groups_for_room(old_room_id)
for group_id in local_group_ids:
# Add new the new room to those groups
await self.store.add_room_to_group(group_id, room_id, old_room["is_public"])
# Remove the old room from those groups
await self.store.remove_room_from_group(group_id, old_room_id)
async def copy_user_state_on_room_upgrade(
self, old_room_id: str, new_room_id: str, user_ids: Iterable[str]
) -> None:
"""Copy user-specific information when they join a new room when that new room is the
result of a room upgrade
Args:
old_room_id: The ID of upgraded room
new_room_id: The ID of the new room
user_ids: User IDs to copy state for
"""
logger.debug(
"Copying over room tags and push rules from %s to %s for users %s",
old_room_id,
new_room_id,
user_ids,
)
for user_id in user_ids:
try:
# It is an upgraded room. Copy over old tags
await self.copy_room_tags_and_direct_to_room(
old_room_id, new_room_id, user_id
)
# Copy over push rules
await self.store.copy_push_rules_from_room_to_room_for_user(
old_room_id, new_room_id, user_id
)
except Exception:
logger.exception(
"Error copying tags and/or push rules from rooms %s to %s for user %s. "
"Skipping...",
old_room_id,
new_room_id,
user_id,
)
continue
async def send_membership_event(
self,
requester: Optional[Requester],
event: EventBase,
context: EventContext,
ratelimit: bool = True,
) -> None:
"""
Change the membership status of a user in a room.
Args:
requester: The local user who requested the membership
event. If None, certain checks, like whether this homeserver can
act as the sender, will be skipped.
event: The membership event.
context: The context of the event.
ratelimit: Whether to rate limit this request.
Raises:
SynapseError if there was a problem changing the membership.
"""
target_user = UserID.from_string(event.state_key)
room_id = event.room_id
if requester is not None:
sender = UserID.from_string(event.sender)
assert (
sender == requester.user
), "Sender (%s) must be same as requester (%s)" % (sender, requester.user)
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
else:
requester = types.create_requester(target_user)
prev_state_ids = await context.get_prev_state_ids()
if event.membership == Membership.JOIN:
if requester.is_guest:
guest_can_join = await self._can_guest_join(prev_state_ids)
if not guest_can_join:
# This should be an auth check, but guests are a local concept,
# so don't really fit into the general auth process.
raise AuthError(403, "Guest access not allowed")
if event.membership not in (Membership.LEAVE, Membership.BAN):
is_blocked = await self.store.is_room_blocked(room_id)
if is_blocked:
raise SynapseError(403, "This room has been blocked on this server")
event = await self.event_creation_handler.handle_new_client_event(
requester, event, context, extra_users=[target_user], ratelimit=ratelimit
)
prev_member_event_id = prev_state_ids.get(
(EventTypes.Member, event.state_key), None
)
if event.membership == Membership.LEAVE:
if prev_member_event_id:
prev_member_event = await self.store.get_event(prev_member_event_id)
if prev_member_event.membership == Membership.JOIN:
await self._user_left_room(target_user, room_id)
async def _can_guest_join(self, current_state_ids: StateMap[str]) -> bool:
"""
Returns whether a guest can join a room based on its current state.
"""
guest_access_id = current_state_ids.get((EventTypes.GuestAccess, ""), None)
if not guest_access_id:
return False
guest_access = await self.store.get_event(guest_access_id)
return bool(
guest_access
and guest_access.content
and guest_access.content.get(EventContentFields.GUEST_ACCESS)
== GuestAccess.CAN_JOIN
)
async def kick_guest_users(self, current_state: Iterable[EventBase]) -> None:
"""Kick any local guest users from the room.
This is called when the room state changes from guests allowed to not-allowed.
Params:
current_state: the current state of the room. We will iterate this to look
for guest users to kick.
"""
for member_event in current_state:
try:
if member_event.type != EventTypes.Member:
continue
if not self.hs.is_mine_id(member_event.state_key):
continue
if member_event.content["membership"] not in {
Membership.JOIN,
Membership.INVITE,
}:
continue
if (
"kind" not in member_event.content
or member_event.content["kind"] != "guest"
):
continue
# We make the user choose to leave, rather than have the
# event-sender kick them. This is partially because we don't
# need to worry about power levels, and partially because guest
# users are a concept which doesn't hugely work over federation,
# and having homeservers have their own users leave keeps more
# of that decision-making and control local to the guest-having
# homeserver.
target_user = UserID.from_string(member_event.state_key)
requester = create_requester(
target_user, is_guest=True, authenticated_entity=self._server_name
)
handler = self.hs.get_room_member_handler()
await handler.update_membership(
requester,
target_user,
member_event.room_id,
"leave",
ratelimit=False,
require_consent=False,
)
except Exception as e:
logger.exception("Error kicking guest user: %s" % (e,))
async def lookup_room_alias(
self, room_alias: RoomAlias
) -> Tuple[RoomID, List[str]]:
"""
Get the room ID associated with a room alias.
Args:
room_alias: The alias to look up.
Returns:
A tuple of:
The room ID as a RoomID object.
Hosts likely to be participating in the room ([str]).
Raises:
SynapseError if room alias could not be found.
"""
directory_handler = self.directory_handler
mapping = await directory_handler.get_association(room_alias)
if not mapping:
raise SynapseError(404, "No such room alias")
room_id = mapping["room_id"]
servers = mapping["servers"]
# put the server which owns the alias at the front of the server list.
if room_alias.domain in servers:
servers.remove(room_alias.domain)
servers.insert(0, room_alias.domain)
return RoomID.from_string(room_id), servers
async def _get_inviter(self, user_id: str, room_id: str) -> Optional[UserID]:
invite = await self.store.get_invite_for_local_user_in_room(
user_id=user_id, room_id=room_id
)
if invite:
return UserID.from_string(invite.sender)
return None
async def do_3pid_invite(
self,
room_id: str,
inviter: UserID,
medium: str,
address: str,
id_server: str,
requester: Requester,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
"""Invite a 3PID to a room.
Args:
room_id: The room to invite the 3PID to.
inviter: The user sending the invite.
medium: The 3PID's medium.
address: The 3PID's address.
id_server: The identity server to use.
requester: The user making the request.
txn_id: The transaction ID this is part of, or None if this is not
part of a transaction.
id_access_token: The optional identity server access token.
Returns:
The new stream ID.
Raises:
ShadowBanError if the requester has been shadow-banned.
"""
if self.config.block_non_admin_invites:
is_requester_admin = await self.auth.is_server_admin(requester.user)
if not is_requester_admin:
raise SynapseError(
403, "Invites have been disabled on this server", Codes.FORBIDDEN
)
if requester.shadow_banned:
# We randomly sleep a bit just to annoy the requester.
await self.clock.sleep(random.randint(1, 10))
raise ShadowBanError()
# We need to rate limit *before* we send out any 3PID invites, so we
# can't just rely on the standard ratelimiting of events.
await self.base_handler.ratelimit(requester)
can_invite = await self.third_party_event_rules.check_threepid_can_be_invited(
medium, address, room_id
)
if not can_invite:
raise SynapseError(
403,
"This third-party identifier can not be invited in this room",
Codes.FORBIDDEN,
)
if not self._enable_lookup:
raise SynapseError(
403, "Looking up third-party identifiers is denied from this server"
)
invitee = await self.identity_handler.lookup_3pid(
id_server, medium, address, id_access_token
)
if invitee:
# Note that update_membership with an action of "invite" can raise
# a ShadowBanError, but this was done above already.
_, stream_id = await self.update_membership(
requester, UserID.from_string(invitee), room_id, "invite", txn_id=txn_id
)
else:
stream_id = await self._make_and_store_3pid_invite(
requester,
id_server,
medium,
address,
room_id,
inviter,
txn_id=txn_id,
id_access_token=id_access_token,
)
return stream_id
async def _make_and_store_3pid_invite(
self,
requester: Requester,
id_server: str,
medium: str,
address: str,
room_id: str,
user: UserID,
txn_id: Optional[str],
id_access_token: Optional[str] = None,
) -> int:
room_state = await self.state_handler.get_current_state(room_id)
inviter_display_name = ""
inviter_avatar_url = ""
member_event = room_state.get((EventTypes.Member, user.to_string()))
if member_event:
inviter_display_name = member_event.content.get("displayname", "")
inviter_avatar_url = member_event.content.get("avatar_url", "")
# if user has no display name, default to their MXID
if not inviter_display_name:
inviter_display_name = user.to_string()
canonical_room_alias = ""
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
if canonical_alias_event:
canonical_room_alias = canonical_alias_event.content.get("alias", "")
room_name = ""
room_name_event = room_state.get((EventTypes.Name, ""))
if room_name_event:
room_name = room_name_event.content.get("name", "")
room_type = None
room_create_event = room_state.get((EventTypes.Create, ""))
if room_create_event:
room_type = room_create_event.content.get(EventContentFields.ROOM_TYPE)
room_join_rules = ""
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
if join_rules_event:
room_join_rules = join_rules_event.content.get("join_rule", "")
room_avatar_url = ""
room_avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
if room_avatar_event:
room_avatar_url = room_avatar_event.content.get("url", "")
(
token,
public_keys,
fallback_public_key,
display_name,
) = await self.identity_handler.ask_id_server_for_third_party_invite(
requester=requester,
id_server=id_server,
medium=medium,
address=address,
room_id=room_id,
inviter_user_id=user.to_string(),
room_alias=canonical_room_alias,
room_avatar_url=room_avatar_url,
room_join_rules=room_join_rules,
room_name=room_name,
room_type=room_type,
inviter_display_name=inviter_display_name,
inviter_avatar_url=inviter_avatar_url,
id_access_token=id_access_token,
)
(
event,
stream_id,
) = await self.event_creation_handler.create_and_send_nonmember_event(
requester,
{
"type": EventTypes.ThirdPartyInvite,
"content": {
"display_name": display_name,
"public_keys": public_keys,
# For backwards compatibility:
"key_validity_url": fallback_public_key["key_validity_url"],
"public_key": fallback_public_key["public_key"],
},
"room_id": room_id,
"sender": user.to_string(),
"state_key": token,
},
ratelimit=False,
txn_id=txn_id,
)
return stream_id
async def _is_host_in_room(self, current_state_ids: StateMap[str]) -> bool:
# Have we just created the room, and is this about to be the very
# first member event?
create_event_id = current_state_ids.get(("m.room.create", ""))
if len(current_state_ids) == 1 and create_event_id:
# We can only get here if we're in the process of creating the room
return True
for etype, state_key in current_state_ids:
if etype != EventTypes.Member or not self.hs.is_mine_id(state_key):
continue
event_id = current_state_ids[(etype, state_key)]
event = await self.store.get_event(event_id, allow_none=True)
if not event:
continue
if event.membership == Membership.JOIN:
return True
return False
async def _is_server_notice_room(self, room_id: str) -> bool:
if self._server_notices_mxid is None:
return False
user_ids = await self.store.get_users_in_room(room_id)
return self._server_notices_mxid in user_ids
class RoomMemberMasterHandler(RoomMemberHandler):
def __init__(self, hs: "HomeServer"):
super().__init__(hs)
self.distributor = hs.get_distributor()
self.distributor.declare("user_left_room")
async def _is_remote_room_too_complex(
self, room_id: str, remote_room_hosts: List[str]
) -> Optional[bool]:
"""
Check if complexity of a remote room is too great.
Args:
room_id
remote_room_hosts
Returns: bool of whether the complexity is too great, or None
if unable to be fetched
"""
max_complexity = self.hs.config.limit_remote_rooms.complexity
complexity = await self.federation_handler.get_room_complexity(
remote_room_hosts, room_id
)
if complexity:
return complexity["v1"] > max_complexity
return None
async def _is_local_room_too_complex(self, room_id: str) -> bool:
"""
Check if the complexity of a local room is too great.
Args:
room_id: The room ID to check for complexity.
"""
max_complexity = self.hs.config.limit_remote_rooms.complexity
complexity = await self.store.get_room_complexity(room_id)
return complexity["v1"] > max_complexity
async def _remote_join(
self,
requester: Requester,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Implements RoomMemberHandler._remote_join"""
# filter ourselves out of remote_room_hosts: do_invite_join ignores it
# and if it is the only entry we'd like to return a 404 rather than a
# 500.
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
check_complexity = self.hs.config.limit_remote_rooms.enabled
if check_complexity and self.hs.config.limit_remote_rooms.admins_can_join:
check_complexity = not await self.auth.is_server_admin(user)
if check_complexity:
# Fetch the room complexity
too_complex = await self._is_remote_room_too_complex(
room_id, remote_room_hosts
)
if too_complex is True:
raise SynapseError(
code=400,
msg=self.hs.config.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
# We don't do an auth check if we are doing an invite
# join dance for now, since we're kinda implicitly checking
# that we are allowed to join when we decide whether or not we
# need to do the invite/join dance.
event_id, stream_id = await self.federation_handler.do_invite_join(
remote_room_hosts, room_id, user.to_string(), content
)
# Check the room we just joined wasn't too large, if we didn't fetch the
# complexity of it before.
if check_complexity:
if too_complex is False:
# We checked, and we're under the limit.
return event_id, stream_id
# Check again, but with the local state events
too_complex = await self._is_local_room_too_complex(room_id)
if too_complex is False:
# We're under the limit.
return event_id, stream_id
# The room is too large. Leave.
requester = types.create_requester(
user, authenticated_entity=self._server_name
)
await self.update_membership(
requester=requester, target=user, room_id=room_id, action="leave"
)
raise SynapseError(
code=400,
msg=self.hs.config.limit_remote_rooms.complexity_error,
errcode=Codes.RESOURCE_LIMIT_EXCEEDED,
)
return event_id, stream_id
async def remote_reject_invite(
self,
invite_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rejects an out-of-band invite received from a remote user
Implements RoomMemberHandler.remote_reject_invite
"""
invite_event = await self.store.get_event(invite_event_id)
room_id = invite_event.room_id
target_user = invite_event.state_key
# first of all, try doing a rejection via the inviting server
fed_handler = self.federation_handler
try:
inviter_id = UserID.from_string(invite_event.sender)
event, stream_id = await fed_handler.do_remotely_reject_invite(
[inviter_id.domain], room_id, target_user, content=content
)
return event.event_id, stream_id
except Exception as e:
# if we were unable to reject the invite, we will generate our own
# leave event.
#
# The 'except' clause is very broad, but we need to
# capture everything from DNS failures upwards
#
logger.warning("Failed to reject invite: %s", e)
return await self._generate_local_out_of_band_leave(
invite_event, txn_id, requester, content
)
async def remote_rescind_knock(
self,
knock_event_id: str,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""
Rescinds a local knock made on a remote room
Args:
knock_event_id: The ID of the knock event to rescind.
txn_id: The transaction ID to use.
requester: The originator of the request.
content: The content of the leave event.
Implements RoomMemberHandler.remote_rescind_knock
"""
# TODO: We don't yet support rescinding knocks over federation
# as we don't know which homeserver to send it to. An obvious
# candidate is the remote homeserver we originally knocked through,
# however we don't currently store that information.
# Just rescind the knock locally
knock_event = await self.store.get_event(knock_event_id)
return await self._generate_local_out_of_band_leave(
knock_event, txn_id, requester, content
)
async def _generate_local_out_of_band_leave(
self,
previous_membership_event: EventBase,
txn_id: Optional[str],
requester: Requester,
content: JsonDict,
) -> Tuple[str, int]:
"""Generate a local leave event for a room
This can be called after we e.g fail to reject an invite via a remote server.
It generates an out-of-band membership event locally.
Args:
previous_membership_event: the previous membership event for this user
txn_id: optional transaction ID supplied by the client
requester: user making the request, according to the access token
content: additional content to include in the leave event.
Normally an empty dict.
Returns:
A tuple containing (event_id, stream_id of the leave event)
"""
room_id = previous_membership_event.room_id
target_user = previous_membership_event.state_key
content["membership"] = Membership.LEAVE
event_dict = {
"type": EventTypes.Member,
"room_id": room_id,
"sender": target_user,
"content": content,
"state_key": target_user,
}
# the auth events for the new event are the same as that of the previous event, plus
# the event itself.
#
# the prev_events consist solely of the previous membership event.
prev_event_ids = [previous_membership_event.event_id]
auth_event_ids = previous_membership_event.auth_event_ids() + prev_event_ids
event, context = await self.event_creation_handler.create_event(
requester,
event_dict,
txn_id=txn_id,
prev_event_ids=prev_event_ids,
auth_event_ids=auth_event_ids,
)
event.internal_metadata.outlier = True
event.internal_metadata.out_of_band_membership = True
result_event = await self.event_creation_handler.handle_new_client_event(
requester,
event,
context,
extra_users=[UserID.from_string(target_user)],
)
# we know it was persisted, so must have a stream ordering
assert result_event.internal_metadata.stream_ordering
return result_event.event_id, result_event.internal_metadata.stream_ordering
async def remote_knock(
self,
remote_room_hosts: List[str],
room_id: str,
user: UserID,
content: dict,
) -> Tuple[str, int]:
"""Sends a knock to a room. Attempts to do so via one remote out of a given list.
Args:
remote_room_hosts: A list of homeservers to try knocking through.
room_id: The ID of the room to knock on.
user: The user to knock on behalf of.
content: The content of the knock event.
Returns:
A tuple of (event ID, stream ID).
"""
# filter ourselves out of remote_room_hosts
remote_room_hosts = [
host for host in remote_room_hosts if host != self.hs.hostname
]
if len(remote_room_hosts) == 0:
raise SynapseError(404, "No known servers")
return await self.federation_handler.do_knock(
remote_room_hosts, room_id, user.to_string(), content=content
)
async def _user_left_room(self, target: UserID, room_id: str) -> None:
"""Implements RoomMemberHandler._user_left_room"""
user_left_room(self.distributor, target, room_id)
async def forget(self, user: UserID, room_id: str) -> None:
user_id = user.to_string()
member = await self.state_handler.get_current_state(
room_id=room_id, event_type=EventTypes.Member, state_key=user_id
)
membership = member.membership if member else None
if membership is not None and membership not in [
Membership.LEAVE,
Membership.BAN,
]:
raise SynapseError(400, "User %s in room %s" % (user_id, room_id))
if membership:
await self.store.forget(user_id, room_id)
def get_users_which_can_issue_invite(auth_events: StateMap[EventBase]) -> List[str]:
"""
Return the list of users which can issue invites.
This is done by exploring the joined users and comparing their power levels
to the necessyar power level to issue an invite.
Args:
auth_events: state in force at this point in the room
Returns:
The users which can issue invites.
"""
invite_level = get_named_level(auth_events, "invite", 0)
users_default_level = get_named_level(auth_events, "users_default", 0)
power_level_event = get_power_level_event(auth_events)
# Custom power-levels for users.
if power_level_event:
users = power_level_event.content.get("users", {})
else:
users = {}
result = []
# Check which members are able to invite by ensuring they're joined and have
# the necessary power level.
for (event_type, state_key), event in auth_events.items():
if event_type != EventTypes.Member:
continue
if event.membership != Membership.JOIN:
continue
# Check if the user has a custom power level.
if users.get(state_key, users_default_level) >= invite_level:
result.append(state_key)
return result
def get_servers_from_users(users: List[str]) -> Set[str]:
"""
Resolve a list of users into their servers.
Args:
users: A list of users.
Returns:
A set of servers.
"""
servers = set()
for user in users:
try:
servers.add(get_domain_from_id(user))
except SynapseError:
pass
return servers
|
py
|
1a56c850afb6dcc1e066f2784e7a8ae10208330e
|
import argparse
import asyncio
from pybecker.becker import Becker
async def main():
"""Main function"""
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--channel', required=True, help='channel')
parser.add_argument('-a', '--action', required=True, help='Command to execute (UP, DOWN, HALT, PAIR)')
parser.add_argument('-d', '--device', required=False, help='Device to use for connectivity')
args = parser.parse_args()
client = Becker()
if args.action == "UP":
await client.move_up(args.channel)
elif args.action == "HALT":
await client.stop(args.channel)
elif args.action == "DOWN":
await client.move_down(args.channel)
elif args.action == "PAIR":
await client.pair(args.channel)
if __name__ == '__main__':
asyncio.run(main())
|
py
|
1a56c9739632660400e84c7b97388be457795f06
|
"""Adapted from:
@longcw faster_rcnn_pytorch: https://github.com/longcw/faster_rcnn_pytorch
@rbgirshick py-faster-rcnn https://github.com/rbgirshick/py-faster-rcnn
Licensed under The MIT License [see LICENSE for details]
"""
from __future__ import print_function
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
# from data import VOC_ROOT, VOCAnnotationTransform, VOCDetection, BaseTransform
# from data import VOC_CLASSES as labelmap
import torch.utils.data as data
from data import BaseTransform
from data.custom import CUSTOM_CLASSES as labelmap
from data.custom import customDetection, customAnnotationTransform, CUSTOM_CLASSES, CUSTOM_ROOT
# from ssd import build_ssd
from ssd_resnet_101_new import build_ssd
import sys
import os
import time
import argparse
import numpy as np
import pickle
import cv2
if sys.version_info[0] == 2:
import xml.etree.cElementTree as ET
else:
import xml.etree.ElementTree as ET
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(
description='Single Shot MultiBox Detector Evaluation')
parser.add_argument('--trained_model',
default='weights/CUSTOM.pth', type=str,
help='Trained state_dict file path to open')
parser.add_argument('--save_folder', default='eval/', type=str,
help='File path to save results')
parser.add_argument('--confidence_threshold', default=0.01, type=float,
help='Detection confidence threshold')
parser.add_argument('--top_k', default=5, type=int,
help='Further restrict the number of predictions to parse')
parser.add_argument('--cuda', default=True, type=str2bool,
help='Use cuda to train model')
parser.add_argument('--custom_root', default=CUSTOM_ROOT,
help='Location of VOC root directory')
parser.add_argument('--cleanup', default=True, type=str2bool,
help='Cleanup and remove results files following eval')
args = parser.parse_args()
if not os.path.exists(args.save_folder):
os.mkdir(args.save_folder)
if torch.cuda.is_available():
if args.cuda:
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if not args.cuda:
print("WARNING: It looks like you have a CUDA device, but aren't using \
CUDA. Run with --cuda for optimal eval speed.")
torch.set_default_tensor_type('torch.FloatTensor')
else:
torch.set_default_tensor_type('torch.FloatTensor')
annopath = os.path.join(args.custom_root, 'shenhe', 'Annotations', '%s.xml')
imgpath = os.path.join(args.custom_root, 'shenhe', 'JPEGImages', '%s.jpg')
imgsetpath = os.path.join(args.custom_root, 'shenhe', 'ImageSets', 'Main', '%s.txt')
devkit_path = args.custom_root + 'shenhe'
dataset_mean = (104, 117, 123)
set_type = 'test'
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text) - 1,
int(bbox.find('ymin').text) - 1,
int(bbox.find('xmax').text) - 1,
int(bbox.find('ymax').text) - 1]
objects.append(obj_struct)
return objects
def get_output_dir(name, phase):
"""Return the directory where experimental artifacts are placed.
If the directory does not exist, it is created.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
filedir = os.path.join(name, phase)
if not os.path.exists(filedir):
os.makedirs(filedir)
return filedir
def get_voc_results_file_template(image_set, cls):
# VOCdevkit/VOC2007/results/det_test_aeroplane.txt
filename = 'det_' + image_set + '_%s.txt' % (cls)
filedir = os.path.join(devkit_path, 'results')
if not os.path.exists(filedir):
os.makedirs(filedir)
path = os.path.join(filedir, filename)
return path
def write_voc_results_file(all_boxes, dataset):
for cls_ind, cls in enumerate(labelmap):
print('Writing {:s} VOC results file'.format(cls))
filename = get_voc_results_file_template(set_type, cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(dataset.ids):
dets = all_boxes[cls_ind+1][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index[1], dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def do_python_eval(output_dir='output', use_07=True):
cachedir = os.path.join(devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = use_07
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(labelmap):
filename = get_voc_results_file_template(set_type, cls)
rec, prec, ap = voc_eval(
filename, annopath, imgsetpath % (set_type), cls, cachedir,
ovthresh=0.1, use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('--------------------------------------------------------------')
def voc_ap(rec, prec, use_07_metric=True):
""" ap = voc_ap(rec, prec, [use_07_metric])
Compute VOC AP given precision and recall.
If use_07_metric is true, uses the
VOC 07 11 point method (default:True).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=True):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default True)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath % (imagename))
if i % 100 == 0:
print('Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames)))
# save
print('Saving cached annotations to {:s}'.format(cachefile))
with open(cachefile, 'wb') as f:
pickle.dump(recs, f)
else:
# load
with open(cachefile, 'rb') as f:
recs = pickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
if any(lines) == 1:
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = ((bb[2] - bb[0]) * (bb[3] - bb[1]) +
(BBGT[:, 2] - BBGT[:, 0]) *
(BBGT[:, 3] - BBGT[:, 1]) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
# avoid divide by zero in case the first detection matches a difficult
# ground truth
prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, use_07_metric)
else:
rec = -1.
prec = -1.
ap = -1.
return rec, prec, ap
def test_net(save_folder, net, cuda, dataset, transform, top_k,
im_size=300, thresh=0.05):
num_images = len(dataset)
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(len(labelmap)+1)]
# timers
_t = {'im_detect': Timer(), 'misc': Timer()}
output_dir = get_output_dir('ssd300_120000', set_type)
det_file = os.path.join(output_dir, 'detections.pkl')
for i in range(num_images):
im, gt, h, w = dataset.pull_item(i)
x = Variable(im.unsqueeze(0))
if args.cuda:
x = x.cuda()
_t['im_detect'].tic()
detections = net(x).data
detect_time = _t['im_detect'].toc(average=False)
# skip j = 0, because it's the background class
for j in range(1, detections.size(1)):
dets = detections[0, j, :]
mask = dets[:, 0].gt(0.).expand(5, dets.size(0)).t()
dets = torch.masked_select(dets, mask).view(-1, 5)
if dets.size(0) == 0:
continue
boxes = dets[:, 1:]
boxes[:, 0] *= w
boxes[:, 2] *= w
boxes[:, 1] *= h
boxes[:, 3] *= h
scores = dets[:, 0].cpu().numpy()
cls_dets = np.hstack((boxes.cpu().numpy(),
scores[:, np.newaxis])).astype(np.float32,
copy=False)
all_boxes[j][i] = cls_dets
print('im_detect: {:d}/{:d} {:.3f}s'.format(i + 1,
num_images, detect_time))
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
evaluate_detections(all_boxes, output_dir, dataset)
def evaluate_detections(box_list, output_dir, dataset):
write_voc_results_file(box_list, dataset)
do_python_eval(output_dir)
if __name__ == '__main__':
# load net
num_classes = len(labelmap) + 1 # +1 for background
net = build_ssd('test', 300, num_classes) # initialize SSD
net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
# load data
dataset = customDetection(args.custom_root, [('shenhe', set_type)],
BaseTransform(300, dataset_mean),
customAnnotationTransform())
if args.cuda:
net = net.cuda()
cudnn.benchmark = True
# evaluation
test_net(args.save_folder, net, args.cuda, dataset,
BaseTransform(net.size, dataset_mean), args.top_k, 300,
thresh=args.confidence_threshold)
|
py
|
1a56c9a737e7db6132e59013947fcb7fc49963c4
|
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.cnn import constant_init, kaiming_init
from ..builder import BACKBONES
import os
from mmdet.ops.CSPOSAModule import CSPOSAModule
class ConvStride2(nn.Module):
def __init__(self, in_ch, out_ch, kernel_size=3, exp=1, norm_cfg=dict(type='BN', requires_grad=True)):
super(ConvStride2, self).__init__()
# self.conv1x1 = ConvModule(in_ch, out_ch, kernel_size=1, stride=2, padding=0,
# norm_cfg=dict(type='BN', requires_grad=True))
self.conv3x3 = ConvModule(in_ch, out_ch, kernel_size=3, stride=2, padding=1,
norm_cfg=norm_cfg)
def forward(self, x):
# return self.conv1x1(x)+self.conv3x3(x)
return self.conv3x3(x)
class CSPOSAStage(nn.Module):
def __init__(self, in_ch, stage_ch, num_block, kernel_size,
conv_type=dict(type="NormalConv",
info=dict(norm_cfg=dict(type='BN', requires_grad=True))),
conv1x1=True):
assert isinstance(conv_type, dict), "conv_type must be string"
super(CSPOSAStage, self).__init__()
self.Block = nn.Sequential(ConvStride2(in_ch, stage_ch, kernel_size=kernel_size),
CSPOSAModule(stage_ch, num_block, conv_type, kernel_size=kernel_size, conv1x1=conv1x1))
def forward(self, x):
return self.Block(x)
@BACKBONES.register_module()
class CSPOSANet(nn.Module):
def __init__(self,
stem_channels,
stage_channels,
block_per_stage,
conv_type=dict(type="NormalConv",
info=dict(norm_cfg=dict(type='BN', requires_grad=True))),
num_out=5,
kernel_size=3,
conv1x1=True
):
super(CSPOSANet, self).__init__()
if isinstance(kernel_size, int):
kernel_sizes = [kernel_size for _ in range(len(stage_channels))]
if isinstance(kernel_size, list):
assert len(kernel_size) == len(stage_channels), \
"if kernel_size is list, len(kernel_size) should == len(stage_channels)"
kernel_sizes = kernel_size
else:
raise TypeError("type of kernel size should be int or list")
assert num_out <= len(stage_channels), 'num output should be less than stage channels!'
conv_info = conv_type["info"]
norm_cfg = conv_info["norm_cfg"]
self.stage_nums = len(stage_channels)
self.stem = ConvModule(3, stem_channels, kernel_size=3, stride=2, padding=1,
norm_cfg=norm_cfg)
'''defult end_stage is the last stage'''
self.start_stage = len(stage_channels)-num_out+1
self.stages = nn.ModuleList()
self.last_stage = len(stage_channels)
in_channel = stem_channels
for num_stages in range(self.stage_nums):
stage = CSPOSAStage(in_channel, stage_channels[num_stages], block_per_stage[num_stages],
kernel_size=kernel_sizes[num_stages], conv_type=conv_type, conv1x1=conv1x1)
in_channel = stage_channels[num_stages]
# stage = OrderedDict()
# for num_layers in range(block_per_stage[num_stages]):
# stage.update({'stage_{}_layer{}'.format(num_stages, num_layers):_OSA_stage(in_channel, stage_channels[num_stages],
# concat_channels[num_stages], layer_per_block[num_stages])})
# in_channel = concat_channels[num_stages]
self.stages.append(stage)
def init_weights(self, pretrained=None):
if isinstance(pretrained, str):
import torch
assert os.path.isfile(pretrained), "file {} not found.".format(pretrained)
self.load_state_dict(torch.load(pretrained), strict=False)
elif pretrained is None:
for m in self.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
constant_init(m, 1)
else:
raise TypeError('pretrained must be a str or None')
def forward(self, x):
tmp = x
x = self.stem(x)
for i in range(self.start_stage):
x = self.stages[i](x)
out = []
for i in range(self.start_stage, len(self.stages)):
out.append(x)
x = self.stages[i](x)
out.append(x)
return out
|
py
|
1a56ca59f003b93e006521a5768ab470868bd12b
|
import Eva
from collections import defaultdict
from cdlib import AttrNodeClustering
import networkx as nx
from cdlib.utils import convert_graph_formats
from cdlib.algorithms.internal.ILouvain import ML2
__all__ = ["eva", "ilouvain"]
def eva(
g_original: object,
labels: dict,
weight: str = "weight",
resolution: float = 1.0,
alpha: float = 0.5,
) -> AttrNodeClustering:
"""
The Eva algorithm extends the Louvain approach in order to deal with the attributes of the nodes (aka Louvain Extended to Vertex Attributes).
It optimizes - combining them linearly - two quality functions, a structural and a clustering one, namely Newman's modularity and purity, estimated as the product of the frequencies of the most frequent labels carried by the nodes within the communities.
A parameter alpha tunes the importance of the two functions: an high value of alpha favors the clustering criterion instead of the structural one.
**Supported Graph Types**
========== ======== ======== ======== ==============
Undirected Directed Weighted Temporal Node Attribute
========== ======== ======== ======== ==============
Yes No No No Yes
========== ======== ======== ======== ==============
:param g_original: a networkx/igraph object
:param labels: dictionary specifying for each node (key) a dict (value) specifying the name attribute (key) and its value (value)
:param weight: str, optional the key in graph to use as weight. Default to 'weight'
:param resolution: double, optional Will change the size of the communities, default to 1.
:param alpha: float, assumed in [0,1], optional Will tune the importance of modularity and purity criteria, default to 0.5
:return: AttrNodeClustering object
:Example:
>>> from cdlib.algorithms import eva
>>> import networkx as nx
>>> import random
>>> l1 = ['A', 'B', 'C', 'D']
>>> l2 = ["E", "F", "G"]
>>> g_attr = nx.barabasi_albert_graph(100, 5)
>>> labels=dict()
>>> for node in g_attr.nodes():
>>> labels[node]={"l1":random.choice(l1), "l2":random.choice(l2)}
>>> communities = eva(g_attr, labels, alpha=0.8)
:References:
Citraro, S., & Rossetti, G. (2019, December). Eva: Attribute-Aware Network Segmentation. In International Conference on Complex Networks and Their Applications (pp. 141-151). Springer, Cham.
.. note:: Reference implementation: https://github.com/GiulioRossetti/Eva/tree/master/Eva
"""
g = convert_graph_formats(g_original, nx.Graph)
nx.set_node_attributes(g, labels)
coms, coms_labels = Eva.eva_best_partition(
g, weight=weight, resolution=resolution, alpha=alpha
)
# Reshaping the results
coms_to_node = defaultdict(list)
for n, c in coms.items():
coms_to_node[c].append(n)
coms_eva = [list(c) for c in coms_to_node.values()]
return AttrNodeClustering(
coms_eva,
g_original,
"Eva",
coms_labels,
method_parameters={
"weight": weight,
"resolution": resolution,
"alpha": alpha,
},
)
def ilouvain(g_original: object, labels: dict) -> AttrNodeClustering:
"""
The I-Louvain algorithm extends the Louvain approach in order to deal only with the scalar attributes of the nodes.
It optimizes Newman's modularity combined with an entropy measure.
**Supported Graph Types**
========== ======== ======== ======== ==============
Undirected Directed Weighted Temporal Node Attribute
========== ======== ======== ======== ==============
Yes No No No Yes
========== ======== ======== ======== ==============
:param g_original: a networkx/igraph object
:param labels: dictionary specifying for each node (key) a dict (value) specifying the name attribute (key) and its value (value)
:return: AttrNodeClustering object
:Example:
>>> from cdlib.algorithms import ilouvain
>>> import networkx as nx
>>> import random
>>> l1 = [0.1, 0.4, 0.5]
>>> l2 = [34, 3, 112]
>>> g_attr = nx.barabasi_albert_graph(100, 5)
>>> labels=dict()
>>> for node in g_attr.nodes():
>>> labels[node]={"l1":random.choice(l1), "l2":random.choice(l2)}
>>> id = dict()
>>> for n in g.nodes():
>>> id[n] = n
>>> communities = ilouvain(g_attr, labels, id)
:References:
Combe D., Largeron C., Géry M., Egyed-Zsigmond E. "I-Louvain: An Attributed Graph Clustering Method". <https://link.springer.com/chapter/10.1007/978-3-319-24465-5_16> In: Fromont E., De Bie T., van Leeuwen M. (eds) Advances in Intelligent Data Analysis XIV. IDA (2015). Lecture Notes in Computer Science, vol 9385. Springer, Cham
"""
g = convert_graph_formats(g_original, nx.Graph)
nx.set_node_attributes(g, labels)
nid = dict()
for n in g.nodes():
nid[n] = n
algo = ML2(g, labels, nid)
coms = algo.findPartition()
# Reshaping the results
coms_to_node = defaultdict(list)
for n, c in coms.items():
coms_to_node[c].append(n)
coms_ilouv = [list(c) for c in coms_to_node.values()]
return AttrNodeClustering(coms_ilouv, g_original, "ILouvain")
|
py
|
1a56cb213e127325ebad767dd479f02e057b455b
|
######################################################################
#
# File: test/unit/v3/apiver/apiver_deps_exception.py
#
# Copyright 2021 Backblaze Inc. All Rights Reserved.
#
# License https://www.backblaze.com/using_b2_code.html
#
######################################################################
from b2sdk._v3.exception import * # noqa
|
py
|
1a56cbcb996fca96ad0bf4ad52d1893bf6d3c4e9
|
from rest_framework import generics, permissions
from backend.reviews.models import Review
from backend.api.v2.reviews.serializers import ReviewSerializer
class ReviewView(generics.ListCreateAPIView):
"""Вывод и добавление отзывов"""
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
queryset = Review.objects.filter(moderated=True)
serializer_class = ReviewSerializer
def perform_create(self, serializer):
serializer.save(user=self.request.user)
|
py
|
1a56ce2cef21e16d096be18a5f9308784b69156d
|
import os
import datetime
import requests
from django.shortcuts import render
def index(request):
api_key=os.environ['WEATHER_API_KEY']
today = datetime.datetime.today()
response = requests.get(f'http://api.openweathermap.org/data/2.5/weather?q=London,uk&units=metric&appid={api_key}')
# TODO: handle extra errors
weather_data = response.json()
response2 = requests.get(f'http://api.openweathermap.org/data/2.5/forecast?q=London,uk&units=metric&appid={api_key}')
# TODO: handle extra errors
weather_forecast_data = response2.json()
weather_summary = get_weather_summary(weather_data)
weather_forecast_summary = get_temps_for_tomorrow(get_weather_forecast_temp_and_dt(weather_forecast_data['list']), today)
weather_forecast_tomorrow = get_temps_for_tomorrow_without_date(weather_forecast_summary)
return render(request, 'index.html', { 'weather_forecast_tomorrow': weather_forecast_tomorrow,
'weather_summary': weather_summary
})
def get_weather_summary(weather_data):
return {
'temp': weather_data['main']['temp'],
'min': weather_data['main']['temp_min'],
'max': weather_data['main']['temp_max'],
'humidity':weather_data['main']['humidity']
}
def get_weather_forecast_temp_and_dt(weather_forecast_data):
return list(map(lambda x: {
'y': x['main']['temp'],
'x': x['dt_txt']
}, weather_forecast_data))
def get_temps_for_tomorrow(filtered_forecast_data, today):
tomorrow = str(today + datetime.timedelta(days = 1)).split(' ')[0]
return list(filter(lambda x: x['x'].split(' ')[0] == tomorrow, filtered_forecast_data ))
def get_temps_for_tomorrow_without_date(tomorrow_temps_data):
return list(map(lambda x: {
'x': dt_txt_formatter(x['x']), 'y': x['y']}, tomorrow_temps_data))
def dt_txt_formatter(dateTime):
return dateTime.split(' ')[1][:-3]
|
py
|
1a56cea58fb740fcf1e8fbb537fc140ffc58ad82
|
total = 0
line = input()
while line != "NoMoreMoney":
current = float(line)
if current < 0:
print("Invalid operation!")
break
total += current
print(f"Increase: {current:.2f}")
line = input()
print(f"Total: {total:.2f}")
|
py
|
1a56d0481932f63fa6d795d466ba7e0d99b1634c
|
import logging
from concurrent.futures import ProcessPoolExecutor
from functools import partial
from typing import Iterable, List
import pandas as pd
from pyarrow import parquet as pq
from feast.constants import DATETIME_COLUMN
from feast.feature_set import FeatureSet
from feast.type_map import (
pa_column_to_proto_column,
pa_column_to_timestamp_proto_column,
)
from feast.types import Field_pb2 as FieldProto
from feast.types.FeatureRow_pb2 import FeatureRow
_logger = logging.getLogger(__name__)
GRPC_CONNECTION_TIMEOUT_DEFAULT = 3 # type: int
GRPC_CONNECTION_TIMEOUT_APPLY = 300 # type: int
FEAST_SERVING_URL_ENV_KEY = "FEAST_SERVING_URL" # type: str
FEAST_CORE_URL_ENV_KEY = "FEAST_CORE_URL" # type: str
BATCH_FEATURE_REQUEST_WAIT_TIME_SECONDS = 300
KAFKA_CHUNK_PRODUCTION_TIMEOUT = 120 # type: int
def _encode_pa_tables(
file: str, feature_set: str, fields: dict, ingestion_id: str, row_group_idx: int
) -> List[bytes]:
"""
Helper function to encode a PyArrow table(s) read from parquet file(s) into
FeatureRows.
This function accepts a list of file directory pointing to many parquet
files. All parquet files must have the same schema.
Each parquet file will be read into as a table and encoded into FeatureRows
using a pool of max_workers workers.
Args:
file (str):
File directory of all the parquet file to encode.
Parquet file must have more than one row group.
feature_set (str):
Feature set reference in the format f"{project}/{name}".
fields (dict[str, enum.Enum.ValueType]):
A mapping of field names to their value types.
ingestion_id (str):
UUID unique to this ingestion job.
row_group_idx(int):
Row group index to read and encode into byte like FeatureRow
protobuf objects.
Returns:
List[bytes]:
List of byte encoded FeatureRows from the parquet file.
"""
pq_file = pq.ParquetFile(file)
# Read parquet file as a PyArrow table
table = pq_file.read_row_group(row_group_idx)
# Add datetime column
datetime_col = pa_column_to_timestamp_proto_column(table.column(DATETIME_COLUMN))
# Preprocess the columns by converting all its values to Proto values
proto_columns = {
field_name: pa_column_to_proto_column(dtype, table.column(field_name))
for field_name, dtype in fields.items()
}
# List to store result
feature_rows: List[bytes] = []
# Loop optimization declaration(s)
field = FieldProto.Field
proto_items = proto_columns.items()
append = feature_rows.append
# Iterate through the rows
for row_idx in range(table.num_rows):
feature_row = FeatureRow(
event_timestamp=datetime_col[row_idx],
feature_set=feature_set,
ingestion_id=ingestion_id,
)
# Loop optimization declaration
ext = feature_row.fields.extend
# Insert field from each column
for k, v in proto_items:
ext([field(name=k, value=v[row_idx])])
# Append FeatureRow in byte string form
append(feature_row.SerializeToString())
return feature_rows
def get_feature_row_chunks(
file: str,
row_groups: List[int],
fs: FeatureSet,
ingestion_id: str,
max_workers: int,
) -> Iterable[List[bytes]]:
"""
Iterator function to encode a PyArrow table read from a parquet file to
FeatureRow(s).
Args:
file (str):
File directory of the parquet file. The parquet file must have more
than one row group.
row_groups (List[int]):
Specific row group indexes to be read and transformed in the parquet
file.
fs (feast.feature_set.FeatureSet):
FeatureSet describing parquet files.
ingestion_id (str):
UUID unique to this ingestion job.
max_workers (int):
Maximum number of workers to spawn.
Returns:
Iterable[List[bytes]]:
Iterable list of byte encoded FeatureRow(s).
"""
feature_set = f"{fs.project}/{fs.name}"
field_map = {field.name: field.dtype for field in fs.fields.values()}
func = partial(_encode_pa_tables, file, feature_set, field_map, ingestion_id)
with ProcessPoolExecutor(max_workers) as pool:
for chunk in pool.map(func, row_groups):
yield chunk
return
def validate_dataframe(dataframe: pd.DataFrame, feature_set: FeatureSet):
if "datetime" not in dataframe.columns:
raise ValueError(
f'Dataframe does not contain entity "datetime" in columns {dataframe.columns}'
)
for entity in feature_set.entities:
if entity.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain entity {entity.name} in columns {dataframe.columns}"
)
for feature in feature_set.features:
if feature.name not in dataframe.columns:
raise ValueError(
f"Dataframe does not contain feature {feature.name} in columns {dataframe.columns}"
)
|
py
|
1a56d10ffdb05a2ff6344493f3b8fc5a93ac2717
|
def remove_adjacent(li):
result = []
last = None
for x in li:
if x != last:
result.append(x)
last = x
return result
print remove_adjacent([1, 2, 2, 3])
print remove_adjacent([2, 2, 3, 3, 3])
print remove_adjacent([])
|
py
|
1a56d1a328e35ca586805a121266ea32aeb933b2
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: AMPAS
# Copyright Academy of Motion Picture Arts and Sciences
"""
Defines unit tests for *ACES* configuration.
"""
from __future__ import division
import hashlib
import os
import re
import shutil
import sys
import tempfile
import unittest
sys.path.append(os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..')))
from aces_ocio.utilities import files_walker
from aces_ocio.generate_config import (
ACES_OCIO_CTL_DIRECTORY_ENVIRON,
generate_config)
__author__ = (
'Haarm-Pieter Duiker, Thomas Mansencal, Stephen Hill, Kevin Wheatley')
__copyright__ = (
'Copyright (C) 2014-2021 Academy of Motion Picture Arts and Sciences')
__license__ = 'Academy of Motion Picture Arts and Sciences License Terms'
__maintainer__ = 'Academy of Motion Picture Arts and Sciences'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = ['REFERENCE_CONFIG_ROOT_DIRECTORY',
'HASH_TEST_PATTERNS',
'UNHASHABLE_TEST_PATTERNS',
'TestACESConfig']
# TODO: Investigate how the current config has been generated to use it for
# tests.
REFERENCE_CONFIG_ROOT_DIRECTORY = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
HASH_TEST_PATTERNS = ('\.3dl', '\.lut', '\.csp')
UNHASHABLE_TEST_PATTERNS = ('\.icc', '\.ocio')
class TestACESConfig(unittest.TestCase):
"""
Performs tests on the *ACES* configuration.
"""
def setUp(self):
"""
Initialises common tests attributes.
"""
self.__aces_ocio_ctl_directory = os.environ.get(
ACES_OCIO_CTL_DIRECTORY_ENVIRON, None)
assert self.__aces_ocio_ctl_directory is not None, (
'Undefined "{0}" environment variable!'.format(
ACES_OCIO_CTL_DIRECTORY_ENVIRON))
assert os.path.exists(self.__aces_ocio_ctl_directory) is True, (
'"{0}" directory does not exists!'.format(
self.__aces_ocio_ctl_directory))
self.maxDiff = None
self.__temporary_directory = tempfile.mkdtemp()
def tearDown(self):
"""
Post tests actions.
"""
shutil.rmtree(self.__temporary_directory)
@staticmethod
def directory_hashes(directory,
filters_in=None,
filters_out=None,
flags=0):
"""
Recursively computes the hashes from the file within given directory.
Parameters
----------
directory : str or unicode
Directory to compute the file hashes.
filters_in : array_like
Included patterns.
filters_out : array_like
Excluded patterns.
flags : int
Regex flags.
Returns
-------
dict
Directory file hashes.
"""
hashes = {}
for path in files_walker(directory,
filters_in=filters_in,
filters_out=filters_out,
flags=flags):
with open(path) as file:
digest = hashlib.md5(
re.sub('\s', '', file.read())).hexdigest()
hashes[path.replace(directory, '')] = digest
return hashes
def test_ACES_config(self):
"""
Performs tests on the *ACES* configuration by computing hashes on the
generated configuration and comparing them to the existing one.
"""
self.assertTrue(generate_config(self.__aces_ocio_ctl_directory,
self.__temporary_directory))
reference_hashes = self.directory_hashes(
REFERENCE_CONFIG_ROOT_DIRECTORY,
HASH_TEST_PATTERNS)
test_hashes = self.directory_hashes(
self.__temporary_directory,
HASH_TEST_PATTERNS)
self.assertDictEqual(reference_hashes, test_hashes)
# Checking that unashable files ('.icc', '.ocio') are generated.
unashable = lambda x: (
sorted([file.replace(x, '') for file in
files_walker(x, UNHASHABLE_TEST_PATTERNS)]))
self.assertListEqual(unashable(REFERENCE_CONFIG_ROOT_DIRECTORY),
unashable(self.__temporary_directory))
if __name__ == '__main__':
unittest.main()
|
py
|
1a56d23b6fd4e3b964c0814111537bb27cd161e9
|
from plotly.basedatatypes import BaseTraceHierarchyType
import copy
class Titlefont(BaseTraceHierarchyType):
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, saddlebrown, salmon, sandybrown,
seagreen, seashell, sienna, silver, skyblue,
slateblue, slategray, slategrey, snow, springgreen,
steelblue, tan, teal, thistle, tomato, turquoise,
violet, wheat, white, whitesmoke, yellow,
yellowgreen
Returns
-------
str
"""
return self['color']
@color.setter
def color(self, val):
self['color'] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The plotly service (at https://plot.ly or on-
premise) generates images on a server, where only a select
number of fonts are installed and supported. These include
*Arial*, *Balto*, *Courier New*, *Droid Sans*,, *Droid Serif*,
*Droid Sans Mono*, *Gravitas One*, *Old Standard TT*, *Open
Sans*, *Overpass*, *PT Sans Narrow*, *Raleway*, *Times New
Roman*.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self['family']
@family.setter
def family(self, val):
self['family'] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self['size']
@size.setter
def size(self, val):
self['size'] = val
# property parent name
# --------------------
@property
def _parent_path_str(self):
return 'splom.marker.colorbar'
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Titlefont object
Sets this color bar's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
plotly.graph_objs.splom.marker.colorbar.Titlefont
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The
plotly service (at https://plot.ly or on-premise)
generates images on a server, where only a select
number of fonts are installed and supported. These
include *Arial*, *Balto*, *Courier New*, *Droid Sans*,,
*Droid Serif*, *Droid Sans Mono*, *Gravitas One*, *Old
Standard TT*, *Open Sans*, *Overpass*, *PT Sans
Narrow*, *Raleway*, *Times New Roman*.
size
Returns
-------
Titlefont
"""
super(Titlefont, self).__init__('titlefont')
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.splom.marker.colorbar.Titlefont
constructor must be a dict or
an instance of plotly.graph_objs.splom.marker.colorbar.Titlefont"""
)
# Import validators
# -----------------
from plotly.validators.splom.marker.colorbar import (
titlefont as v_titlefont
)
# Initialize validators
# ---------------------
self._validators['color'] = v_titlefont.ColorValidator()
self._validators['family'] = v_titlefont.FamilyValidator()
self._validators['size'] = v_titlefont.SizeValidator()
# Populate data dict with properties
# ----------------------------------
v = arg.pop('color', None)
self.color = color if color is not None else v
v = arg.pop('family', None)
self.family = family if family is not None else v
v = arg.pop('size', None)
self.size = size if size is not None else v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
|
py
|
1a56d27d7c5cc13ea0bad85fe7103fe1dd72e2f2
|
import config
import telebot
bot = telebot.TeleBot(config.token)
@bot.message_handler(commands=['start'])
def start_message(message):
keyboard = telebot.types.ReplyKeyboardMarkup(True)
keyboard.row('Добавить вещь', 'Найти вещь')
bot.send_message(message.chat.id, '''Привет!
Я помогу тебе обменять что-то ненужное на очень нужное.
Чтобы разместить вещь к обмену нажми - “Добавить вещь”. После этого тебе станут доступны вещи других пользователей.
Нажми “Найти вещь” и я пришлю тебе фотографии вещей для обмена. Понравилась вещь - пиши “Обменяться”, нет - снова нажимай “Найти вещь”.
Нажал “Обменяться”? - если владельцу вещи понравится что-то из твоих вещей, то я пришлю контакты вам обоим.''', reply_markup=keyboard)
@bot.message_handler(content_types=['text'])
def reply_all_message(message):
if message.text == 'Добавить вещь':
bot.send_message(message.chat.id, 'Введи название вещи')
elif message.text == 'Найти вещь':
keyboard = telebot.types.InlineKeyboardMarkup()
exchange_button = telebot.types.InlineKeyboardButton(text='Обменяться', url='https://www.google.com/')
keyboard.add(exchange_button)
img = open('sn.jpg', 'rb')
bot.send_photo(message.chat.id, img, 'Кроссовки', reply_markup=keyboard)
else:
print(f'Упаковываем вещь {message.text} в список юзера {message.chat.username}')
if __name__ == '__main__':
bot.infinity_polling()
|
py
|
1a56d3612c01cc83b36e54eec20c7eb6c9f4cf80
|
import Backends
import random
import numpy as np
from .theano_helpers import floatX
def create_dropout_masks(route, fname, dimensionality, ks=1000):
"""
route = path where to create a file
fname = filename
ks = thousand of masks to create (1e6 masks by default)
"""
hdf5_backend = Backends.HDF5(route, fname)
for i in xrange(ks):
mask = random.random_binary_mask(
(dimensionality, 1000), np.random.randint(dimensionality, size=1000))
mask = mask.astype(floatX)
hdf5_backend.write([], "masks/%d/masks" % i, mask.T)
del hdf5_backend
def test_dropout_mask_creation():
create_dropout_masks("/tmp", "domask", 5, 2)
if __name__ == "__main__":
test_dropout_mask_creation()
|
py
|
1a56d593b71ddd12531d5daa5c0d0fbd840a2785
|
# -*- coding: utf-8 -*-
"""Document __init__.py here.
Copyright (C) 2020, Auto Trader UK
Created 15. Dec 2020 16:19
"""
|
py
|
1a56d5ac808fed016cc15828c8de470c29e0f681
|
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import re
import warnings
import multiprocessing as mp
import matplotlib.pyplot as plt
import time
import os
import platform
from .condition_fun import *
from .info_value import *
# converting vector (breaks & special_values) to dataframe
def split_vec_todf(vec):
'''
Create a dataframe based on provided vector.
Split the rows that including '%,%' into multiple rows.
Replace 'missing' by np.nan.
Params
------
vec: list
Returns
------
pandas.DataFrame
returns a dataframe with three columns
{'bin_chr':orginal vec, 'rowid':index of vec, 'value':splited vec}
'''
if vec is not None:
vec = [str(i) for i in vec]
a = pd.DataFrame({'bin_chr':vec}).assign(rowid=lambda x:x.index)
b = pd.DataFrame([i.split('%,%') for i in vec], index=vec)\
.stack().replace('missing', np.nan) \
.reset_index(name='value')\
.rename(columns={'level_0':'bin_chr'})[['bin_chr','value']]
# return
return pd.merge(a,b,on='bin_chr')
def add_missing_spl_val(dtm, breaks, spl_val):
'''
add missing to spl_val if there is nan in dtm.value and
missing is not specified in breaks and spl_val
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
list
returns spl_val list
'''
if dtm.value.isnull().any():
if breaks is None:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in str(i)) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
elif any([('missing' in str(i)) for i in breaks]):
spl_val=spl_val
else:
if spl_val is None:
spl_val=['missing']
elif any([('missing' in str(i)) for i in spl_val]):
spl_val=spl_val
else:
spl_val=['missing']+spl_val
# return
return spl_val
# count number of good or bad in y
def n0(x): return sum(x==0)
def n1(x): return sum(x==1)
# split dtm into bin_sv and dtm (without speical_values)
def dtm_binning_sv(dtm, breaks, spl_val):
'''
Split the orginal dtm (melt dataframe) into
binning_sv (binning of special_values) and
a new dtm (without special_values).
Params
------
dtm: melt dataframe
spl_val: speical values list
Returns
------
list
returns a list with binning_sv and dtm
'''
spl_val = add_missing_spl_val(dtm, breaks, spl_val)
if spl_val is not None:
# special_values from vector to dataframe
sv_df = split_vec_todf(spl_val)
# value
if is_numeric_dtype(dtm['value']):
sv_df['value'] = sv_df['value'].astype(dtm['value'].dtypes)
# sv_df['bin_chr'] = sv_df['bin_chr'].astype(dtm['value'].dtypes).astype(str)
sv_df['bin_chr'] = np.where(
np.isnan(sv_df['value']), sv_df['bin_chr'],
sv_df['value'].astype(dtm['value'].dtypes).astype(str))
# sv_df = sv_df.assign(value = lambda x: x.value.astype(dtm['value'].dtypes))
# dtm_sv & dtm
dtm_sv = pd.merge(dtm.fillna("missing"), sv_df[['value']].fillna("missing"), how='inner', on='value', right_index=True)
dtm = dtm[~dtm.index.isin(dtm_sv.index)].reset_index() if len(dtm_sv.index) < len(dtm.index) else None
# dtm_sv = dtm.query('value in {}'.format(sv_df['value'].tolist()))
# dtm = dtm.query('value not in {}'.format(sv_df['value'].tolist()))
if dtm_sv.shape[0] == 0:
return {'binning_sv':None, 'dtm':dtm}
# binning_sv
binning_sv = pd.merge(
dtm_sv.fillna('missing').groupby(['variable','value'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'}),
sv_df.fillna('missing'),
on='value'
).groupby(['variable', 'rowid', 'bin_chr']).agg({'bad':sum,'good':sum})\
.reset_index().rename(columns={'bin_chr':'bin'})\
.drop('rowid', axis=1)
else:
binning_sv = None
# return
return {'binning_sv':binning_sv, 'dtm':dtm}
# check empty bins for unmeric variable
def check_empty_bins(dtm, binning):
# check empty bins
bin_list = np.unique(dtm.bin.astype(str)).tolist()
if 'nan' in bin_list:
bin_list.remove('nan')
binleft = set([re.match(r'\[(.+),(.+)\)', i).group(1) for i in bin_list]).difference(set(['-inf', 'inf']))
binright = set([re.match(r'\[(.+),(.+)\)', i).group(2) for i in bin_list]).difference(set(['-inf', 'inf']))
if binleft != binright:
bstbrks = sorted(list(map(float, ['-inf'] + list(binright) + ['inf'])))
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# warnings.warn("The break points are modified into '[{}]'. There are empty bins based on the provided break points.".format(','.join(binright)))
# binning
# dtm['bin'] = dtm['bin'].astype(str)
# return
return binning
# required in woebin2 # return binning if breaks provided
#' @import data.table
def woebin2_breaks(dtm, breaks, spl_val):
'''
get binning if breaks is provided
Params
------
dtm: melt dataframe
breaks: breaks list
spl_val: speical values list
Returns
------
DataFrame
returns a binning datafram
'''
# breaks from vector to dataframe
bk_df = split_vec_todf(breaks)
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'binning':None}
# binning
if is_numeric_dtype(dtm['value']):
# best breaks
bstbrks = ['-inf'] + list(set(bk_df.value.tolist()).difference(set([np.nan, '-inf', 'inf', 'Inf', '-Inf']))) + ['inf']
bstbrks = sorted(list(map(float, bstbrks)))
# cut
labels = ['[{},{})'.format(bstbrks[i], bstbrks[i+1]) for i in range(len(bstbrks)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], bstbrks, right=False, labels=labels)
dtm['bin'] = dtm['bin'].astype(str)
binning = dtm.groupby(['variable','bin'])['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# check empty bins for unmeric variable
binning = check_empty_bins(dtm, binning)
# sort bin
binning = pd.merge(
binning.assign(value=lambda x: [float(re.search(r"^\[(.*),(.*)\)", i).group(2)) if i != 'nan' else np.nan for i in binning['bin']] ),
bk_df.assign(value=lambda x: x.value.astype(float)),
how='left',on='value'
).sort_values(by="rowid").reset_index(drop=True)
# merge binning and bk_df if nan isin value
if bk_df['value'].isnull().any():
binning = binning.assign(bin=lambda x: [i if i != 'nan' else 'missing' for i in x['bin']])\
.fillna('missing').groupby(['variable','rowid'])\
.agg({'bin':lambda x: '%,%'.join(x), 'good':sum, 'bad':sum})\
.reset_index()
else:
# merge binning with bk_df
binning = pd.merge(
dtm,
bk_df.assign(bin=lambda x: x.bin_chr),
how='left', on='value'
).fillna('missing').groupby(['variable', 'rowid', 'bin'])['y'].agg([n0,n1])\
.rename(columns={'n0':'good','n1':'bad'})\
.reset_index().drop('rowid', axis=1)
# return
return {'binning_sv':binning_sv, 'binning':binning}
# required in woebin2_init_bin # return pretty breakpoints
def pretty(low, high, n):
'''
pretty breakpoints, the same as pretty function in R
Params
------
low: minimal value
low: maximal value
n: number of intervals
Returns
------
numpy.ndarray
returns a breakpoints array
'''
# nicenumber
def nicenumber(x):
exp = np.trunc(np.log10(abs(x)))
f = abs(x) / 10**exp
if f < 1.5:
nf = 1.
elif f < 3.:
nf = 2.
elif f < 7.:
nf = 5.
else:
nf = 10.
return np.sign(x) * nf * 10.**exp
# pretty breakpoints
d = abs(nicenumber((high-low)/(n-1)))
miny = np.floor(low / d) * d
maxy = np.ceil (high / d) * d
return np.arange(miny, maxy+0.5*d, d)
# required in woebin2 # return initial binning
def woebin2_init_bin(dtm, init_count_distr, breaks, spl_val):
'''
initial binning
Params
------
dtm: melt dataframe
init_count_distr: the minimal precentage in the fine binning process
breaks: breaks
breaks: breaks list
spl_val: speical values list
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# dtm $ binning_sv
dtm_binsv_list = dtm_binning_sv(dtm, breaks, spl_val)
dtm = dtm_binsv_list['dtm']
binning_sv = dtm_binsv_list['binning_sv']
if dtm is None: return {'binning_sv':binning_sv, 'initial_binning':None}
# binning
if is_numeric_dtype(dtm['value']): # numeric variable
xvalue = dtm['value'].astype(float)
# breaks vector & outlier
iq = xvalue.quantile([0.01, 0.25, 0.75, 0.99])
iqr = iq[0.75] - iq[0.25]
if iqr == 0:
prob_down = 0.01
prob_up = 0.99
else:
prob_down = 0.25
prob_up = 0.75
xvalue_rm_outlier = xvalue[(xvalue >= iq[prob_down]-3*iqr) & (xvalue <= iq[prob_up]+3*iqr)]
# number of initial binning
n = np.trunc(1/init_count_distr)
len_uniq_x = len(np.unique(xvalue_rm_outlier))
if len_uniq_x < n: n = len_uniq_x
# initial breaks
brk = np.unique(xvalue_rm_outlier) if len_uniq_x < 10 else pretty(min(xvalue_rm_outlier), max(xvalue_rm_outlier), n)
brk = list(filter(lambda x: x>np.nanmin(xvalue) and x<=np.nanmax(xvalue), brk))
brk = [float('-inf')] + sorted(brk) + [float('inf')]
# initial binning datatable
# cut
labels = ['[{},{})'.format(brk[i], brk[i+1]) for i in range(len(brk)-1)]
dtm.loc[:,'bin'] = pd.cut(dtm['value'], brk, right=False, labels=labels)#.astype(str)
# init_bin
init_bin = dtm.groupby('bin')['y'].agg([n0, n1])\
.reset_index().rename(columns={'n0':'good','n1':'bad'})
# check empty bins for unmeric variable
init_bin = check_empty_bins(dtm, init_bin)
init_bin = init_bin.assign(
variable = dtm['variable'].values[0],
brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']],
badprob = lambda x: x['bad']/(x['bad']+x['good'])
)[['variable', 'bin', 'brkp', 'good', 'bad', 'badprob']]
else: # other type variable
# initial binning datatable
init_bin = dtm.groupby('value')['y'].agg([n0,n1])\
.rename(columns={'n0':'good','n1':'bad'})\
.assign(
variable = dtm['variable'].values[0],
badprob = lambda x: x['bad']/(x['bad']+x['good'])
).reset_index()
# order by badprob if is.character
if dtm.value.dtype.name not in ['category', 'bool']:
init_bin = init_bin.sort_values(by='badprob').reset_index()
# add index as brkp column
init_bin = init_bin.assign(brkp = lambda x: x.index)\
[['variable', 'value', 'brkp', 'good', 'bad', 'badprob']]\
.rename(columns={'value':'bin'})
# remove brkp that good == 0 or bad == 0 ------
while len(init_bin.query('(good==0) or (bad==0)')) > 0:
# brkp needs to be removed if good==0 or bad==0
rm_brkp = init_bin.assign(count = lambda x: x['good']+x['bad'])\
.assign(
count_lag = lambda x: x['count'].shift(1).fillna(len(dtm)+1),
count_lead = lambda x: x['count'].shift(-1).fillna(len(dtm)+1)
).assign(merge_tolead = lambda x: x['count_lag'] > x['count_lead'])\
.query('(good==0) or (bad==0)')\
.query('count == count.min()').iloc[0,]
# set brkp to lead's or lag's
shift_period = -1 if rm_brkp['merge_tolead'] else 1
init_bin = init_bin.assign(brkp2 = lambda x: x['brkp'].shift(shift_period))\
.assign(brkp = lambda x:np.where(x['brkp'] == rm_brkp['brkp'], x['brkp2'], x['brkp']))
# groupby brkp
init_bin = init_bin.groupby('brkp').agg({
'variable':lambda x: np.unique(x),
'bin': lambda x: '%,%'.join(x),
'good': sum,
'bad': sum
}).assign(badprob = lambda x: x['bad']/(x['good']+x['bad']))\
.reset_index()
# format init_bin
if is_numeric_dtype(dtm['value']):
init_bin = init_bin\
.assign(bin = lambda x: [re.sub(r'(?<=,).+%,%.+,', '', i) if ('%,%' in i) else i for i in x['bin']])\
.assign(brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']])
# return
return {'binning_sv':binning_sv, 'initial_binning':init_bin}
# required in woebin2_tree # add 1 best break for tree-like binning
def woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks=None):
'''
add a breakpoint into provided bestbreaks
Params
------
dtm
initial_binning
count_distr_limit
bestbreaks
Returns
------
DataFrame
a binning dataframe with updated breaks
'''
# dtm removed values in spl_val
# total_iv for all best breaks
def total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows):
# best breaks set
breaks_set = set(initial_binning.brkp).difference(set(list(map(float, ['-inf', 'inf']))))
if bestbreaks is not None: breaks_set = breaks_set.difference(set(bestbreaks))
breaks_set = sorted(breaks_set)
# loop on breaks_set
init_bin_all_breaks = initial_binning.copy(deep=True)
for i in breaks_set:
# best break + i
bestbreaks_i = [float('-inf')]+sorted(bestbreaks+[i] if bestbreaks is not None else [i])+[float('inf')]
# best break datatable
labels = ['[{},{})'.format(bestbreaks_i[i], bestbreaks_i[i+1]) for i in range(len(bestbreaks_i)-1)]
init_bin_all_breaks.loc[:,'bstbin'+str(i)] = pd.cut(init_bin_all_breaks['brkp'], bestbreaks_i, right=False, labels=labels)#.astype(str)
# best break dt
total_iv_all_brks = pd.melt(
init_bin_all_breaks, id_vars=["variable", "good", "bad"], var_name='bstbin',
value_vars=['bstbin'+str(i) for i in breaks_set])\
.groupby(['variable', 'bstbin', 'value'])\
.agg({'good':sum, 'bad':sum}).reset_index()\
.assign(count=lambda x: x['good']+x['bad'])
total_iv_all_brks['count_distr'] = total_iv_all_brks.groupby(['variable', 'bstbin'])\
['count'].apply(lambda x: x/dtm_rows).reset_index(drop=True)
total_iv_all_brks['min_count_distr'] = total_iv_all_brks.groupby(['variable', 'bstbin'])\
['count_distr'].transform(lambda x: min(x))
total_iv_all_brks = total_iv_all_brks\
.assign(bstbin = lambda x: [float(re.sub('^bstbin', '', i)) for i in x['bstbin']] )\
.groupby(['variable','bstbin','min_count_distr'])\
.apply(lambda x: iv_01(x['good'], x['bad'])).reset_index(name='total_iv')
# return
return total_iv_all_brks
# binning add 1best break
def binning_add_1bst(initial_binning, bestbreaks):
if bestbreaks is None:
bestbreaks_inf = [float('-inf'),float('inf')]
else:
if not is_numeric_dtype(dtm['value']):
bestbreaks = [i for i in bestbreaks if int(i) != min(initial_binning.brkp)]
bestbreaks_inf = [float('-inf')]+sorted(bestbreaks)+[float('inf')]
labels = ['[{},{})'.format(bestbreaks_inf[i], bestbreaks_inf[i+1]) for i in range(len(bestbreaks_inf)-1)]
binning_1bst_brk = initial_binning.assign(
bstbin = lambda x: pd.cut(x['brkp'], bestbreaks_inf, right=False, labels=labels)
)
if is_numeric_dtype(dtm['value']):
binning_1bst_brk = binning_1bst_brk.groupby(['variable', 'bstbin'])\
.agg({'good':sum, 'bad':sum}).reset_index().assign(bin=lambda x: x['bstbin'])\
[['bstbin', 'variable', 'bin', 'good', 'bad']]
else:
binning_1bst_brk = binning_1bst_brk.groupby(['variable', 'bstbin'])\
.agg({'good':sum, 'bad':sum, 'bin':lambda x:'%,%'.join(x)}).reset_index()\
[['bstbin', 'variable', 'bin', 'good', 'bad']]
# format
binning_1bst_brk['total_iv'] = iv_01(binning_1bst_brk.good, binning_1bst_brk.bad)
binning_1bst_brk['bstbrkp'] = [float(re.match("^\[(.*),.+", i).group(1)) for i in binning_1bst_brk['bstbin']]
# return
return binning_1bst_brk
# dtm_rows
dtm_rows = len(dtm.index)
# total_iv for all best breaks
total_iv_all_brks = total_iv_all_breaks(initial_binning, bestbreaks, dtm_rows)
# bestbreaks: total_iv == max(total_iv) & min(count_distr) >= count_distr_limit
bstbrk_maxiv = total_iv_all_brks.loc[lambda x: x['min_count_distr'] >= count_distr_limit]
if len(bstbrk_maxiv.index) > 0:
bstbrk_maxiv = bstbrk_maxiv.loc[lambda x: x['total_iv']==max(x['total_iv'])]
bstbrk_maxiv = bstbrk_maxiv['bstbin'].tolist()[0]
else:
bstbrk_maxiv = None
# bestbreaks
if bstbrk_maxiv is not None:
# add 1best break to bestbreaks
bestbreaks = bestbreaks+[bstbrk_maxiv] if bestbreaks is not None else [bstbrk_maxiv]
# binning add 1best break
bin_add_1bst = binning_add_1bst(initial_binning, bestbreaks)
# return
return bin_add_1bst
# required in woebin2 # return tree-like binning
def woebin2_tree(dtm, init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, breaks=None, spl_val=None):
'''
binning using tree-like method
Params
------
dtm:
init_count_distr:
count_distr_limit:
stop_limit:
bin_num_limit:
breaks:
spl_val:
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list['initial_binning']
binning_sv = bin_list['binning_sv']
if len(initial_binning.index)==1:
return {'binning_sv':binning_sv, 'binning':initial_binning}
# initialize parameters
len_brks = len(initial_binning.index)
bestbreaks = None
IVt1 = IVt2 = 1e-10
IVchg = 1 ## IV gain ratio
step_num = 1
# best breaks from three to n+1 bins
binning_tree = None
while (IVchg >= stop_limit) and (step_num+1 <= min([bin_num_limit, len_brks])):
binning_tree = woebin2_tree_add_1brkp(dtm, initial_binning, count_distr_limit, bestbreaks)
# best breaks
bestbreaks = binning_tree.loc[lambda x: x['bstbrkp'] != float('-inf'), 'bstbrkp'].tolist()
# information value
IVt2 = binning_tree['total_iv'].tolist()[0]
IVchg = IVt2/IVt1-1 ## ratio gain
IVt1 = IVt2
# step_num
step_num = step_num + 1
if binning_tree is None: binning_tree = initial_binning
# return
return {'binning_sv':binning_sv, 'binning':binning_tree}
# examples
# import time
# start = time.time()
# # binning_dict = woebin2_init_bin(dtm, init_count_distr=0.02, breaks=None, spl_val=None)
# # woebin2_tree_add_1brkp(dtm, binning_dict['initial_binning'], count_distr_limit=0.05)
# # woebin2_tree(dtm, binning_dict['initial_binning'], count_distr_limit=0.05)
# end = time.time()
# print(end - start)
# required in woebin2 # return chimerge binning
#' @importFrom stats qchisq
def woebin2_chimerge(dtm, init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, breaks=None, spl_val=None):
'''
binning using chimerge method
Params
------
dtm:
init_count_distr:
count_distr_limit:
stop_limit:
bin_num_limit:
breaks:
spl_val:
Returns
------
dict
returns a dict with initial binning and special_value binning
'''
# [chimerge](http://blog.csdn.net/qunxingvip/article/details/50449376)
# [ChiMerge:Discretization of numeric attributs](http://www.aaai.org/Papers/AAAI/1992/AAAI92-019.pdf)
# chisq = function(a11, a12, a21, a22) {
# A = list(a1 = c(a11, a12), a2 = c(a21, a22))
# Adf = do.call(rbind, A)
#
# Edf =
# matrix(rowSums(Adf), ncol = 1) %*%
# matrix(colSums(Adf), nrow = 1) /
# sum(Adf)
#
# sum((Adf-Edf)^2/Edf)
# }
# function to create a chisq column in initial_binning
def add_chisq(initial_binning):
chisq_df = pd.melt(initial_binning,
id_vars=["brkp", "variable", "bin"], value_vars=["good", "bad"],
var_name='goodbad', value_name='a')\
.sort_values(by=['goodbad', 'brkp']).reset_index(drop=True)
###
chisq_df['a_lag'] = chisq_df.groupby('goodbad')['a'].apply(lambda x: x.shift(1))#.reset_index(drop=True)
chisq_df['a_rowsum'] = chisq_df.groupby('brkp')['a'].transform(lambda x: sum(x))#.reset_index(drop=True)
chisq_df['a_lag_rowsum'] = chisq_df.groupby('brkp')['a_lag'].transform(lambda x: sum(x))#.reset_index(drop=True)
###
chisq_df = pd.merge(
chisq_df.assign(a_colsum = lambda df: df.a+df.a_lag),
chisq_df.groupby('brkp').apply(lambda df: sum(df.a+df.a_lag)).reset_index(name='a_sum'))\
.assign(
e = lambda df: df.a_rowsum*df.a_colsum/df.a_sum,
e_lag = lambda df: df.a_lag_rowsum*df.a_colsum/df.a_sum
).assign(
ae = lambda df: (df.a-df.e)**2/df.e + (df.a_lag-df.e_lag)**2/df.e_lag
).groupby('brkp').apply(lambda x: sum(x.ae)).reset_index(name='chisq')
# return
return pd.merge(initial_binning.assign(count = lambda x: x['good']+x['bad']), chisq_df, how='left')
# initial binning
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
initial_binning = bin_list['initial_binning']
binning_sv = bin_list['binning_sv']
# return initial binning if its row number equals 1
if len(initial_binning.index)==1:
return {'binning_sv':binning_sv, 'binning':initial_binning}
# dtm_rows
dtm_rows = len(dtm.index)
# chisq limit
from scipy.special import chdtri
chisq_limit = chdtri(1, stop_limit)
# binning with chisq column
binning_chisq = add_chisq(initial_binning)
# param
bin_chisq_min = binning_chisq.chisq.min()
bin_count_distr_min = min(binning_chisq['count']/dtm_rows)
bin_nrow = len(binning_chisq.index)
# remove brkp if chisq < chisq_limit
while bin_chisq_min < chisq_limit or bin_count_distr_min < count_distr_limit or bin_nrow > bin_num_limit:
# brkp needs to be removed
if bin_chisq_min < chisq_limit:
rm_brkp = binning_chisq.assign(merge_tolead = False).sort_values(by=['chisq', 'count']).iloc[0,]
elif bin_count_distr_min < count_distr_limit:
rm_brkp = binning_chisq.assign(
count_distr = lambda x: x['count']/sum(x['count']),
chisq_lead = lambda x: x['chisq'].shift(-1).fillna(float('inf'))
).assign(merge_tolead = lambda x: x['chisq'] > x['chisq_lead'])
# replace merge_tolead as True
rm_brkp.loc[np.isnan(rm_brkp['chisq']), 'merge_tolead']=True
# order select 1st
rm_brkp = rm_brkp.sort_values(by=['count_distr']).iloc[0,]
elif bin_nrow > bin_num_limit:
rm_brkp = binning_chisq.assign(merge_tolead = False).sort_values(by=['chisq', 'count']).iloc[0,]
else:
break
# set brkp to lead's or lag's
shift_period = -1 if rm_brkp['merge_tolead'] else 1
binning_chisq = binning_chisq.assign(brkp2 = lambda x: x['brkp'].shift(shift_period))\
.assign(brkp = lambda x:np.where(x['brkp'] == rm_brkp['brkp'], x['brkp2'], x['brkp']))
# groupby brkp
binning_chisq = binning_chisq.groupby('brkp').agg({
'variable':lambda x:np.unique(x),
'bin': lambda x: '%,%'.join(x),
'good': sum,
'bad': sum
}).assign(badprob = lambda x: x['bad']/(x['good']+x['bad']))\
.reset_index()
# update
## add chisq to new binning dataframe
binning_chisq = add_chisq(binning_chisq)
## param
bin_nrow = len(binning_chisq.index)
if bin_nrow == 1:
break
bin_chisq_min = binning_chisq.chisq.min()
bin_count_distr_min = min(binning_chisq['count']/dtm_rows)
# format init_bin # remove (.+\\)%,%\\[.+,)
if is_numeric_dtype(dtm['value']):
binning_chisq = binning_chisq\
.assign(bin = lambda x: [re.sub(r'(?<=,).+%,%.+,', '', i) if ('%,%' in i) else i for i in x['bin']])\
.assign(brkp = lambda x: [float(re.match('^\[(.*),.+', i).group(1)) for i in x['bin']])
# return
return {'binning_sv':binning_sv, 'binning':binning_chisq}
# required in woebin2 # # format binning output
def binning_format(binning):
'''
format binning dataframe
Params
------
binning: with columns of variable, bin, good, bad
Returns
------
DataFrame
binning dataframe with columns of 'variable', 'bin',
'count', 'count_distr', 'good', 'bad', 'badprob', 'woe',
'bin_iv', 'total_iv', 'breaks', 'is_special_values'
'''
binning['count'] = binning['good'] + binning['bad']
binning['count_distr'] = binning['count']/sum(binning['count'])
binning['badprob'] = binning['bad']/binning['count']
# binning = binning.assign(
# count = lambda x: (x['good']+x['bad']),
# count_distr = lambda x: (x['good']+x['bad'])/sum(x['good']+x['bad']),
# badprob = lambda x: x['bad']/(x['good']+x['bad']))
# new columns: woe, iv, breaks, is_sv
binning['woe'] = woe_01(binning['good'],binning['bad'])
binning['bin_iv'] = miv_01(binning['good'],binning['bad'])
binning['total_iv'] = binning['bin_iv'].sum()
# breaks
binning['breaks'] = binning['bin']
if any([r'[' in str(i) for i in binning['bin']]):
def re_extract_all(x):
gp23 = re.match(r"^\[(.*), *(.*)\)((%,%missing)*)", x)
breaks_string = x if gp23 is None else gp23.group(2)+gp23.group(3)
return breaks_string
binning['breaks'] = [re_extract_all(i) for i in binning['bin']]
# is_sv
binning['is_special_values'] = binning['is_sv']
# return
return binning[['variable', 'bin', 'count', 'count_distr', 'good', 'bad', 'badprob', 'woe', 'bin_iv', 'total_iv', 'breaks', 'is_special_values']]
# woebin2
# This function provides woe binning for only two columns (one x and one y) dataframe.
def woebin2(dtm, breaks=None, spl_val=None,
init_count_distr=0.02, count_distr_limit=0.05,
stop_limit=0.1, bin_num_limit=8, method="tree"):
'''
provides woe binning for only two series
Params
------
Returns
------
DataFrame
'''
# binning
if breaks is not None:
# 1.return binning if breaks provided
bin_list = woebin2_breaks(dtm=dtm, breaks=breaks, spl_val=spl_val)
else:
if stop_limit == 'N':
# binning of initial & specialvalues
bin_list = woebin2_init_bin(dtm, init_count_distr=init_count_distr, breaks=breaks, spl_val=spl_val)
else:
if method == 'tree':
# 2.tree-like optimal binning
bin_list = woebin2_tree(
dtm, init_count_distr=init_count_distr, count_distr_limit=count_distr_limit,
stop_limit=stop_limit, bin_num_limit=bin_num_limit, breaks=breaks, spl_val=spl_val)
elif method == "chimerge":
# 2.chimerge optimal binning
bin_list = woebin2_chimerge(
dtm, init_count_distr=init_count_distr, count_distr_limit=count_distr_limit,
stop_limit=stop_limit, bin_num_limit=bin_num_limit, breaks=breaks, spl_val=spl_val)
# rbind binning_sv and binning
binning = pd.concat(bin_list, keys=bin_list.keys()).reset_index()\
.assign(is_sv = lambda x: x.level_0 =='binning_sv')
# return
return binning_format(binning)
def bins_to_breaks(bins, dt, to_string=False, save_string=None):
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# x variables
xs_all = bins['variable'].unique()
# dtypes of variables
vars_class = pd.DataFrame({
'variable': xs_all,
'not_numeric': [not is_numeric_dtype(dt[i]) for i in xs_all]
})
# breakslist of bins
bins_breakslist = bins[~bins['breaks'].isin(["-inf","inf","missing"]) & ~bins['is_special_values']]
bins_breakslist = pd.merge(bins_breakslist[['variable', 'breaks']], vars_class, how='left', on='variable')
bins_breakslist.loc[bins_breakslist['not_numeric'], 'breaks'] = '\''+bins_breakslist.loc[bins_breakslist['not_numeric'], 'breaks']+'\''
bins_breakslist = bins_breakslist.groupby('variable')['breaks'].agg(lambda x: ','.join(x))
if to_string:
bins_breakslist = "breaks_list={\n"+', \n'.join('\''+bins_breakslist.index[i]+'\': ['+bins_breakslist[i]+']' for i in np.arange(len(bins_breakslist)))+"}"
if save_string is not None:
brk_lst_name = '{}_{}.py'.format(save_string, time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time())))
with open(brk_lst_name, 'w') as f:
f.write(bins_breakslist)
print('[INFO] The breaks_list is saved as {}'.format(brk_lst_name))
return
return bins_breakslist
def woebin(dt, y, x=None,
var_skip=None, breaks_list=None, special_values=None,
stop_limit=0.1, count_distr_limit=0.05, bin_num_limit=8,
# min_perc_fine_bin=0.02, min_perc_coarse_bin=0.05, max_num_bin=8,
positive="bad|1", no_cores=None, print_step=0, method="tree",
ignore_const_cols=True, ignore_datetime_cols=True,
check_cate_num=True, replace_blank=True,
save_breaks_list=None, **kwargs):
'''
WOE Binning
------
`woebin` generates optimal binning for numerical, factor and categorical
variables using methods including tree-like segmentation or chi-square
merge. woebin can also customizing breakpoints if the breaks_list or
special_values was provided.
The default woe is defined as ln(Distr_Bad_i/Distr_Good_i). If you
prefer ln(Distr_Good_i/Distr_Bad_i), please set the argument `positive`
as negative value, such as '0' or 'good'. If there is a zero frequency
class when calculating woe, the zero will replaced by 0.99 to make the
woe calculable.
Params
------
dt: A data frame with both x (predictor/feature) and y (response/label) variables.
y: Name of y variable.
x: Name of x variables. Default is None. If x is None,
then all variables except y are counted as x variables.
var_skip: Name of variables that will skip for binning. Defaults to None.
breaks_list: List of break points, default is None.
If it is not None, variable binning will based on the
provided breaks.
special_values: the values specified in special_values
will be in separate bins. Default is None.
count_distr_limit: The minimum percentage of final binning
class number over total. Accepted range: 0.01-0.2; default
is 0.05.
stop_limit: Stop binning segmentation when information value
gain ratio less than the stop_limit, or stop binning merge
when the minimum of chi-square less than 'qchisq(1-stoplimit, 1)'.
Accepted range: 0-0.5; default is 0.1.
bin_num_limit: Integer. The maximum number of binning.
positive: Value of positive class, default "bad|1".
no_cores: Number of CPU cores for parallel computation.
Defaults None. If no_cores is None, the no_cores will
set as 1 if length of x variables less than 10, and will
set as the number of all CPU cores if the length of x variables
greater than or equal to 10.
print_step: A non-negative integer. Default is 1. If print_step>0,
print variable names by each print_step-th iteration.
If print_step=0 or no_cores>1, no message is print.
method: Optimal binning method, it should be "tree" or "chimerge".
Default is "tree".
ignore_const_cols: Logical. Ignore constant columns. Defaults to True.
ignore_datetime_cols: Logical. Ignore datetime columns. Defaults to True.
check_cate_num: Logical. Check whether the number of unique values in
categorical columns larger than 50. It might make the binning process slow
if there are too many unique categories. Defaults to True.
replace_blank: Logical. Replace blank values with None. Defaults to True.
save_breaks_list: The file name to save breaks_list. Default is None.
Returns
------
dictionary
Optimal or customized binning dataframe.
Examples
------
import scorecardpy as sc
import pandas as pd
# load data
dat = sc.germancredit()
# Example I
# binning of two variables in germancredit dataset
bins_2var = sc.woebin(dat, y = "creditability",
x = ["credit.amount", "purpose"])
# Example II
# binning of the germancredit dataset
bins_germ = sc.woebin(dat, y = "creditability")
# Example III
# customizing the breakpoints of binning
dat2 = pd.DataFrame({'creditability':['good','bad']}).sample(50, replace=True)
dat_nan = pd.concat([dat, dat2], ignore_index=True)
breaks_list = {
'age.in.years': [26, 35, 37, "Inf%,%missing"],
'housing': ["own", "for free%,%rent"]
}
special_values = {
'credit.amount': [2600, 9960, "6850%,%missing"],
'purpose': ["education", "others%,%missing"]
}
bins_cus_brk = sc.woebin(dat_nan, y="creditability",
x=["age.in.years","credit.amount","housing","purpose"],
breaks_list=breaks_list, special_values=special_values)
'''
# start time
start_time = time.time()
# arguments
## print_info
print_info = kwargs.get('print_info', True)
## init_count_distr
min_perc_fine_bin = kwargs.get('min_perc_fine_bin', None)
init_count_distr = kwargs.get('init_count_distr', min_perc_fine_bin)
if init_count_distr is None: init_count_distr = 0.02
## count_distr_limit
min_perc_coarse_bin = kwargs.get('min_perc_coarse_bin', None)
if min_perc_coarse_bin is not None: count_distr_limit = min_perc_coarse_bin
## bin_num_limit
max_num_bin = kwargs.get('max_num_bin', None)
if max_num_bin is not None: bin_num_limit = max_num_bin
# print infomation
if print_info: print('[INFO] creating woe binning ...')
dt = dt.copy(deep=True)
if isinstance(y, str):
y = [y]
if isinstance(x, str) and x is not None:
x = [x]
if x is not None:
dt = dt[y+x]
# check y
dt = check_y(dt, y, positive)
# remove constant columns
if ignore_const_cols: dt = check_const_cols(dt)
# remove date/time col
if ignore_datetime_cols: dt = check_datetime_cols(dt)
# check categorical columns' unique values
if check_cate_num: check_cateCols_uniqueValues(dt, var_skip)
# replace black with na
if replace_blank: dt = rep_blank_na(dt)
# x variable names
xs = x_variable(dt, y, x, var_skip)
xs_len = len(xs)
# print_step
print_step = check_print_step(print_step)
# breaks_list
breaks_list = check_breaks_list(breaks_list, xs)
# special_values
special_values = check_special_values(special_values, xs)
### ###
# stop_limit range
if stop_limit<0 or stop_limit>0.5 or not isinstance(stop_limit, (float, int)):
warnings.warn("Incorrect parameter specification; accepted stop_limit parameter range is 0-0.5. Parameter was set to default (0.1).")
stop_limit = 0.1
# init_count_distr range
if init_count_distr<0.01 or init_count_distr>0.2 or not isinstance(init_count_distr, (float, int)):
warnings.warn("Incorrect parameter specification; accepted init_count_distr parameter range is 0.01-0.2. Parameter was set to default (0.02).")
init_count_distr = 0.02
# count_distr_limit
if count_distr_limit<0.01 or count_distr_limit>0.2 or not isinstance(count_distr_limit, (float, int)):
warnings.warn("Incorrect parameter specification; accepted count_distr_limit parameter range is 0.01-0.2. Parameter was set to default (0.05).")
count_distr_limit = 0.05
# bin_num_limit
if not isinstance(bin_num_limit, (float, int)):
warnings.warn("Incorrect inputs; bin_num_limit should be numeric variable. Parameter was set to default (8).")
bin_num_limit = 8
# method
if method not in ["tree", "chimerge"]:
warnings.warn("Incorrect inputs; method should be tree or chimerge. Parameter was set to default (tree).")
method = "tree"
### ###
# binning for each x variable
# loop on xs
if (no_cores is None) or (no_cores < 1):
all_cores = mp.cpu_count() - 1
no_cores = int(np.ceil(xs_len/5 if xs_len/5 < all_cores else all_cores*0.9))
if platform.system() == 'Windows':
no_cores = 1
# ylist to str
y = y[0]
# binning for variables
if no_cores == 1:
# create empty bins dict
bins = {}
for i in np.arange(xs_len):
x_i = xs[i]
# print(x_i)
# print xs
if print_step>0 and bool((i+1)%print_step):
print(('{:'+str(len(str(xs_len)))+'.0f}/{} {}').format(i, xs_len, x_i), flush=True)
# woebining on one variable
bins[x_i] = woebin2(
dtm = pd.DataFrame({'y':dt[y], 'variable':x_i, 'value':dt[x_i]}),
breaks=breaks_list[x_i] if (breaks_list is not None) and (x_i in breaks_list.keys()) else None,
spl_val=special_values[x_i] if (special_values is not None) and (x_i in special_values.keys()) else None,
init_count_distr=init_count_distr,
count_distr_limit=count_distr_limit,
stop_limit=stop_limit,
bin_num_limit=bin_num_limit,
method=method
)
# try catch:
# "The variable '{}' caused the error: '{}'".format(x_i, error-info)
else:
pool = mp.Pool(processes=no_cores)
# arguments
args = zip(
[pd.DataFrame({'y':dt[y], 'variable':x_i, 'value':dt[x_i]}) for x_i in xs],
[breaks_list[i] if (breaks_list is not None) and (i in list(breaks_list.keys())) else None for i in xs],
[special_values[i] if (special_values is not None) and (i in list(special_values.keys())) else None for i in xs],
[init_count_distr]*xs_len, [count_distr_limit]*xs_len,
[stop_limit]*xs_len, [bin_num_limit]*xs_len, [method]*xs_len
)
# bins in dictionary
bins = dict(zip(xs, pool.starmap(woebin2, args)))
pool.close()
# runingtime
runingtime = time.time() - start_time
if runingtime >= 10 and print_info:
# print(time.strftime("%H:%M:%S", time.gmtime(runingtime)))
print('Binning on {} rows and {} columns in {}'.format(dt.shape[0], dt.shape[1], time.strftime("%H:%M:%S", time.gmtime(runingtime))))
if save_breaks_list is not None:
bins_to_breaks(bins, dt, to_string=True, save_string=save_breaks_list)
# return
return bins,breaks_list
#' @import data.table
def woepoints_ply1(dtx, binx, x_i, woe_points):
'''
Transform original values into woe or porints for one variable.
Params
------
Returns
------
'''
# woe_points: "woe" "points"
# binx = bins.loc[lambda x: x.variable == x_i]
# https://stackoverflow.com/questions/12680754/split-explode-pandas-dataframe-string-entry-to-separate-rows
binx = pd.merge(
binx[['bin']].assign(v1=binx['bin'].str.split('%,%')).explode('v1'),
binx[['bin', woe_points]],
how='left', on='bin'
).rename(columns={'v1':'V1',woe_points:'V2'})
# dtx
## cut numeric variable
if is_numeric_dtype(dtx[x_i]):
is_sv = pd.Series(not bool(re.search(r'\[', str(i))) for i in binx.V1)
binx_sv = binx.loc[is_sv]
binx_other = binx.loc[~is_sv]
# create bin column
breaks_binx_other = np.unique(list(map(float, ['-inf']+[re.match(r'.*\[(.*),.+\).*', str(i)).group(1) for i in binx_other['bin']]+['inf'])))
labels = ['[{},{})'.format(breaks_binx_other[i], breaks_binx_other[i+1]) for i in range(len(breaks_binx_other)-1)]
dtx = dtx.assign(xi_bin = lambda x: pd.cut(x[x_i], breaks_binx_other, right=False, labels=labels))\
.assign(xi_bin = lambda x: [i if (i != i) else str(i) for i in x['xi_bin']])
# dtx.loc[:,'xi_bin'] = pd.cut(dtx[x_i], breaks_binx_other, right=False, labels=labels)
# dtx.loc[:,'xi_bin'] = np.where(pd.isnull(dtx['xi_bin']), dtx['xi_bin'], dtx['xi_bin'].astype(str))
#
mask = dtx[x_i].isin(binx_sv['V1'])
dtx.loc[mask,'xi_bin'] = dtx.loc[mask, x_i].astype(str)
dtx = dtx[['xi_bin']].rename(columns={'xi_bin':x_i})
## to charcarter, na to missing
if not is_string_dtype(dtx[x_i]):
dtx.loc[:,x_i] = dtx.loc[:,x_i].astype(str).replace('nan', 'missing')
# dtx.loc[:,x_i] = np.where(pd.isnull(dtx[x_i]), dtx[x_i], dtx[x_i].astype(str))
dtx = dtx.replace(np.nan, 'missing').assign(rowid = dtx.index).sort_values('rowid')
# rename binx
binx.columns = ['bin', x_i, '_'.join([x_i,woe_points])]
# merge
dtx_suffix = pd.merge(dtx, binx, how='left', on=x_i).sort_values('rowid')\
.set_index(dtx.index)[['_'.join([x_i,woe_points])]]
return dtx_suffix
def woebin_ply(dt, bins, no_cores=None, print_step=0, replace_blank=True, **kwargs):
'''
WOE Transformation
------
`woebin_ply` converts original input data into woe values
based on the binning information generated from `woebin`.
Params
------
dt: A data frame.
bins: Binning information generated from `woebin`.
no_cores: Number of CPU cores for parallel computation.
Defaults None. If no_cores is None, the no_cores will
set as 1 if length of x variables less than 10, and will
set as the number of all CPU cores if the length of x
variables greater than or equal to 10.
print_step: A non-negative integer. Default is 1. If
print_step>0, print variable names by each print_step-th
iteration. If print_step=0 or no_cores>1, no message is print.
replace_blank: Logical. Replace blank values with None. Defaults to True.
Returns
-------
DataFrame
a dataframe of woe values for each variables
Examples
-------
import scorecardpy as sc
import pandas as pd
# load data
dat = sc.germancredit()
# Example I
dt = dat[["creditability", "credit.amount", "purpose"]]
# binning for dt
bins = sc.woebin(dt, y = "creditability")
# converting original value to woe
dt_woe = sc.woebin_ply(dt, bins=bins)
# Example II
# binning for germancredit dataset
bins_germancredit = sc.woebin(dat, y="creditability")
# converting the values in germancredit to woe
## bins is a dict
germancredit_woe = sc.woebin_ply(dat, bins=bins_germancredit)
## bins is a dataframe
germancredit_woe = sc.woebin_ply(dat, bins=pd.concat(bins_germancredit))
'''
# start time
start_time = time.time()
## print_info
print_info = kwargs.get('print_info', True)
if print_info: print('[INFO] converting into woe values ...')
# remove date/time col
# dt = rmcol_datetime_unique1(dt)
# replace "" by NA
if replace_blank: dt = rep_blank_na(dt)
# ncol of dt
# if len(dt.index) <= 1: raise Exception("Incorrect inputs; dt should have at least two columns.")
# print_step
print_step = check_print_step(print_step)
# bins # if (is.list(bins)) rbindlist(bins)
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# x variables
xs_bin = bins['variable'].unique()
xs_dt = list(dt.columns)
xs = list(set(xs_bin).intersection(xs_dt))
# length of x variables
xs_len = len(xs)
# initial data set
dat = dt.loc[:,list(set(xs_dt) - set(xs))]
# loop on xs
if (no_cores is None) or (no_cores < 1):
all_cores = mp.cpu_count() - 1
no_cores = int(np.ceil(xs_len/5 if xs_len/5 < all_cores else all_cores*0.9))
if platform.system() == 'Windows':
no_cores = 1
#
if no_cores == 1:
for i in np.arange(xs_len):
x_i = xs[i]
# print xs
# print(x_i)
if print_step>0 and bool((i+1) % print_step):
print(('{:'+str(len(str(xs_len)))+'.0f}/{} {}').format(i, xs_len, x_i), flush=True)
#
binx = bins[bins['variable'] == x_i].reset_index()
# bins.loc[lambda x: x.variable == x_i]
# bins.loc[bins['variable'] == x_i] #
# bins.query('variable == \'{}\''.format(x_i))
dtx = dt[[x_i]]
dat = pd.concat([dat, woepoints_ply1(dtx, binx, x_i, woe_points="woe")], axis=1)
else:
pool = mp.Pool(processes=no_cores)
# arguments
args = zip(
[dt[[i]] for i in xs],
[bins[bins['variable'] == i] for i in xs],
[i for i in xs],
["woe"]*xs_len
)
# bins in dictionary
dat_suffix = pool.starmap(woepoints_ply1, args)
dat = pd.concat([dat]+dat_suffix, axis=1)
pool.close()
# runingtime
runingtime = time.time() - start_time
if runingtime >= 10 and print_info:
# print(time.strftime("%H:%M:%S", time.gmtime(runingtime)))
print('Woe transformating on {} rows and {} columns in {}'.format(dt.shape[0], xs_len, time.strftime("%H:%M:%S", time.gmtime(runingtime))))
return dat
# required in woebin_plot
#' @import data.table ggplot2
def plot_bin(binx, title, show_iv, rot = 0):
'''
plot binning of one variable
Params
------
binx:
title:
show_iv:
Returns
------
matplotlib fig object
'''
# y_right_max
y_right_max = np.ceil(binx['badprob'].max()*10)
if y_right_max % 2 == 1: y_right_max=y_right_max+1
if y_right_max - binx['badprob'].max()*10 <= 0.3: y_right_max = y_right_max+2
y_right_max = y_right_max/10
if y_right_max>1 or y_right_max<=0 or y_right_max is np.nan or y_right_max is None: y_right_max=1
## y_left_max
y_left_max = np.ceil(binx['count_distr'].max()*10)/10
if y_left_max>1 or y_left_max<=0 or y_left_max is np.nan or y_left_max is None: y_left_max=1
# title
title_string = binx.loc[0,'variable']+" (iv:"+str(round(binx.loc[0,'total_iv'],4))+")" if show_iv else binx.loc[0,'variable']
title_string = title+'-'+title_string if title is not None else title_string
# param
ind = np.arange(len(binx.index)) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
###### plot ######
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
# ax1
p1 = ax1.bar(ind, binx['good_distr'], width, color=(24/254, 192/254, 196/254))
p2 = ax1.bar(ind, binx['bad_distr'], width, bottom=binx['good_distr'], color=(246/254, 115/254, 109/254))
for i in ind:
ax1.text(i, binx.loc[i,'count_distr']*1.02, str(round(binx.loc[i,'count_distr']*100,1))+'%, '+str(binx.loc[i,'count']), ha='center')
# ax2
ax2.plot(ind, binx['badprob'], marker='o', color='blue')
for i in ind:
ax2.text(i, binx.loc[i,'badprob']*1.02, str(round(binx.loc[i,'badprob']*100,1))+'%', color='blue', ha='center')
# settings
# Plot the reference bad rate
bad_rate = [np.sum(binx['bad_distr'].values)/(np.sum(binx['good_distr'])+np.sum(binx['bad_distr'].values))]*len(binx)
ax2.plot(bad_rate, marker = '_', color = 'black', linestyle = "--", linewidth=2.0)
ax2.text(ax2.get_xlim()[1]/2, bad_rate[0] + 0.001 , 'Ref: '+ str(np.round(100*bad_rate[0],2))+ ' %')
ax1.set_ylabel('Bin count distribution')
ax2.set_ylabel('Bad probability', color='blue')
ax1.set_yticks(np.arange(0, y_left_max+0.2, 0.2))
ax1.set_xticklabels(ax1.get_xticklabels(), rotation = rot)
ax2.set_yticks(np.arange(0, y_right_max+0.2, 0.2))
ax2.tick_params(axis='y', colors='blue')
ax2.grid(False)
plt.xticks(ind, binx['bin'])
plt.title(title_string, loc='left')
plt.legend((p2[0], p1[0]), ('bad', 'good'), loc='upper right')
# show plot
plt.show()
return fig
def woebin_plot(bins, x=None, title=None, show_iv=True,orient = 0):
'''
WOE Binning Visualization
------
`woebin_plot` create plots of count distribution and bad probability
for each bin. The binning informations are generates by `woebin`.
Params
------
bins: A list or data frame. Binning information generated by `woebin`.
x: Name of x variables. Default is None. If x is None, then all
variables except y are counted as x variables.
title: String added to the plot title. Default is None.
show_iv: Logical. Default is True, which means show information value
in the plot title.
Returns
------
dict
a dict of matplotlib figure objests
Examples
------
import scorecardpy as sc
import matplotlib.pyplot as plt
# load data
dat = sc.germancredit()
# Example I
dt1 = dat[["creditability", "credit.amount"]]
bins1 = sc.woebin(dt1, y="creditability")
p1 = sc.woebin_plot(bins1)
plt.show(p1)
# Example II
bins = sc.woebin(dat, y="creditability")
plotlist = sc.woebin_plot(bins)
# # save binning plot
# for key,i in plotlist.items():
# plt.show(i)
# plt.savefig(str(key)+'.png')
'''
xs = x
# bins concat
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# good bad distr
def gb_distr(binx):
binx['good_distr'] = binx['good']/sum(binx['count'])
binx['bad_distr'] = binx['bad']/sum(binx['count'])
return binx
bins = bins.groupby('variable').apply(gb_distr)
# x variable names
if xs is None: xs = bins['variable'].unique()
# plot export
plotlist = {}
for i in xs:
binx = bins[bins['variable'] == i].reset_index()
plotlist[i] = plot_bin(binx, title, show_iv ,orient)
return plotlist
# print basic information in woebin_adj
def woebin_adj_print_basic_info(i, xs, bins, dt, bins_breakslist):
'''
print basic information of woebinnig in adjusting process
Params
------
Returns
------
'''
x_i = xs[i-1]
xs_len = len(xs)
binx = bins.loc[bins['variable']==x_i]
print("--------", str(i)+"/"+str(xs_len), x_i, "--------")
# print(">>> dt["+x_i+"].dtypes: ")
# print(str(dt[x_i].dtypes), '\n')
#
print(">>> dt["+x_i+"].describe(): ")
print(dt[x_i].describe(), '\n')
if len(dt[x_i].unique()) < 10 or not is_numeric_dtype(dt[x_i]):
print(">>> dt["+x_i+"].value_counts(): ")
print(dt[x_i].value_counts(), '\n')
else:
dt[x_i].hist()
plt.title(x_i)
plt.show()
## current breaks
print(">>> Current breaks:")
print(bins_breakslist[x_i], '\n')
## woebin plotting
plt.show(woebin_plot(binx)[x_i])
# plot adjusted binning in woebin_adj
def woebin_adj_break_plot(dt, y, x_i, breaks, stop_limit, sv_i, method):
'''
update breaks and provies a binning plot
Params
------
Returns
------
'''
if breaks == '':
breaks = None
breaks_list = None if breaks is None else {x_i: eval('['+breaks+']')}
special_values = None if sv_i is None else {x_i: sv_i}
# binx update
bins_adj = woebin(dt[[x_i,y]], y, breaks_list=breaks_list, special_values=special_values, stop_limit = stop_limit, method=method)
## print adjust breaks
breaks_bin = set(bins_adj[x_i]['breaks']) - set(["-inf","inf","missing"])
breaks_bin = ', '.join(breaks_bin) if is_numeric_dtype(dt[x_i]) else ', '.join(['\''+ i+'\'' for i in breaks_bin])
print(">>> Current breaks:")
print(breaks_bin, '\n')
# print bin_adj
plt.show(woebin_plot(bins_adj))
# return breaks
if breaks == '' or breaks is None: breaks = breaks_bin
return breaks
def woebin_adj(dt, y, bins, adj_all_var=False, special_values=None, method="tree", save_breaks_list=None, count_distr_limit=0.05):
'''
WOE Binning Adjustment
------
`woebin_adj` interactively adjust the binning breaks.
Params
------
dt: A data frame.
y: Name of y variable.
bins: A list or data frame. Binning information generated from woebin.
adj_all_var: Logical, whether to show monotonic woe variables. Default
is True
special_values: the values specified in special_values will in separate
bins. Default is None.
method: optimal binning method, it should be "tree" or "chimerge".
Default is "tree".
save_breaks_list: The file name to save breaks_list. Default is None.
count_distr_limit: The minimum percentage of final binning
class number over total. Accepted range: 0.01-0.2; default
is 0.05.
Returns
------
dict
dictionary of breaks
Examples
------
import scorecardpy as sc
# load data
dat = sc.germancredit()
# Example I
dt = dat[["creditability", "age.in.years", "credit.amount"]]
bins = sc.woebin(dt, y="creditability")
breaks_adj = sc.woebin_adj(dt, y="creditability", bins=bins)
bins_final = sc.woebin(dt, y="creditability", breaks_list=breaks_adj)
# Example II
binsII = sc.woebin(dat, y="creditability")
breaks_adjII = sc.woebin_adj(dat, "creditability", binsII)
bins_finalII = sc.woebin(dat, y="creditability", breaks_list=breaks_adjII)
'''
# bins concat
if isinstance(bins, dict):
bins = pd.concat(bins, ignore_index=True)
# x variables
xs_all = bins['variable'].unique()
# adjust all variables
if not adj_all_var:
bins2 = bins.loc[~((bins['bin'] == 'missing') & (bins['count_distr'] >= count_distr_limit))].reset_index(drop=True)
bins2['badprob2'] = bins2.groupby('variable').apply(lambda x: x['badprob'].shift(1)).reset_index(drop=True)
bins2 = bins2.dropna(subset=['badprob2']).reset_index(drop=True)
bins2 = bins2.assign(badprob_trend = lambda x: x.badprob >= x.badprob2)
xs_adj = bins2.groupby('variable')['badprob_trend'].nunique()
xs_adj = xs_adj[xs_adj>1].index
else:
xs_adj = xs_all
# length of adjusting variables
xs_len = len(xs_adj)
# special_values
special_values = check_special_values(special_values, xs_adj)
# breakslist of bins
bins_breakslist = bins_to_breaks(bins,dt)
# loop on adjusting variables
if xs_len == 0:
warnings.warn('The binning breaks of all variables are perfect according to default settings.')
breaks_list = "{"+', '.join('\''+bins_breakslist.index[i]+'\': ['+bins_breakslist[i]+']' for i in np.arange(len(bins_breakslist)))+"}"
return breaks_list
# else
def menu(i, xs_len, x_i):
print('>>> Adjust breaks for ({}/{}) {}?'.format(i, xs_len, x_i))
print('1: next \n2: yes \n3: back')
adj_brk = input("Selection: ")
adj_brk = int(adj_brk)
if adj_brk not in [0,1,2,3]:
warnings.warn('Enter an item from the menu, or 0 to exit.')
adj_brk = input("Selection: ")
adj_brk = int(adj_brk)
return adj_brk
# init param
i = 1
breaks_list = None
while i <= xs_len:
breaks = stop_limit = None
# x_i
x_i = xs_adj[i-1]
sv_i = special_values[x_i] if (special_values is not None) and (x_i in special_values.keys()) else None
# if sv_i is not None:
# sv_i = ','.join('\'')
# basic information of x_i variable ------
woebin_adj_print_basic_info(i, xs_adj, bins, dt, bins_breakslist)
# adjusting breaks ------
adj_brk = menu(i, xs_len, x_i)
if adj_brk == 0:
return
while adj_brk == 2:
# modify breaks adj_brk == 2
breaks = input(">>> Enter modified breaks: ")
breaks = re.sub("^[,\.]+|[,\.]+$", "", breaks)
if breaks == 'N':
stop_limit = 'N'
breaks = None
else:
stop_limit = 0.1
try:
breaks = woebin_adj_break_plot(dt, y, x_i, breaks, stop_limit, sv_i, method=method)
except:
pass
# adj breaks again
adj_brk = menu(i, xs_len, x_i)
if adj_brk == 3:
# go back adj_brk == 3
i = i-1 if i>1 else i
else:
# go next adj_brk == 1
if breaks is not None and breaks != '':
bins_breakslist[x_i] = breaks
i += 1
# return
breaks_list = "{"+', '.join('\''+bins_breakslist.index[i]+'\': ['+bins_breakslist[i]+']' for i in np.arange(len(bins_breakslist)))+"}"
if save_breaks_list is not None:
bins_adj = woebin(dt, y, x=bins_breakslist.index, breaks_list=breaks_list)
bins_to_breaks(bins_adj, dt, to_string=True, save_string=save_breaks_list)
return breaks_list
|
py
|
1a56d6ab216a4a2a5a6b753480a91c55749ecaaf
|
# Copyright (c) 2018-2019 Beta Five Ltd
#
# SPDX-License-Identifier: Apache-2.0
#
"""Test runner with Automake "Simple tests" format output."""
import sys
import unittest
class AMTestResult(unittest.TestResult):
"""
Test result which prints output in automake "Simple tests" format. See
http://www.gnu.org/software/automake/manual/automake.html#Simple-Tests
for further details on this format.
"""
def __init__(self, runner):
super(AMTestResult, self).__init__()
self.runner = runner
def addError(self, test, err):
super(AMTestResult, self).addError(test, err)
self.runner.write("ERROR: %s: %s\n" % (str(test), str(err[1])))
if self.runner.show_tracebacks:
import traceback
traceback.print_tb(err[2])
def addSuccess(self, test):
super(AMTestResult, self).addSuccess(test)
self.runner.write("PASS: %s\n" % str(test))
def addFailure(self, test, err):
super(AMTestResult, self).addFailure(test, err)
self.runner.write("FAIL: %s: %s\n" % (str(test), str(err[1])))
def addSkip(self, test, reason):
super(AMTestResult, self).addSkip(test, reason)
self.runner.write("SKIP: %s: %s\n" % (str(test), str(reason)))
def addExpectedFailure(self, test, err):
super(AMTestResult, self).addExpectedFailure(test, err)
self.runner.write("XFAIL: %s: %s\n" % (str(test), str(err[1])))
def addUnexpectedSuccess(self, test):
super(AMTestResult, self).addUnexpectedSuccess(test)
self.runner.write("XPASS: %s\n" % str(test))
def addSubTest(self, test, subtest, err):
super(AMTestResult, self).addSubTest(test, subtest, err)
if err:
self.runner.write("FAIL: %s: %s\n" % (str(subtest), str(err[1])))
elif self.runner.verbose_subtests:
self.runner.write("PASS: %s\n" % (str(subtest)))
class AMTestRunner:
"""
Test runner which prints output in automake "Simple tests" format. See
http://www.gnu.org/software/automake/manual/automake.html#Simple-Tests
for further details on this format.
"""
def __init__(self, stream=sys.stderr, show_tracebacks=False,
verbose_subtests=False):
self.stream = stream
self.show_tracebacks = show_tracebacks
self.verbose_subtests = verbose_subtests
def write(self, message):
"Print a message to the test runner's output stream."
self.stream.write(message)
def run(self, test):
"Run the given test case or test suite."
result = AMTestResult(self)
test(result)
return result
|
py
|
1a56d792e840ebd035997ca7a6b9003a41915934
|
def get_level():
level = input("난이도 (상중하) 중에서 하나 선택하여 입력: ")
while level not in {"상","중","하"}:
level = input("난이도 (상중하) 중에서 하나 선택하여 입력: ")
if level=="하": return 6
if level == "중": return 8
if level == "상": return 10
print(get_level())
|
py
|
1a56d79f9fdb5da9cae2c56079aa7cbe453d054d
|
# -*- coding: utf-8 -*-
from jqdatasdk import auth, query, indicator, get_fundamentals, logout
from zvdata.api import get_data
from zvdata.utils.pd_utils import df_is_not_null
from zvt.api.api import get_finance_factors
from zvt.api.common import to_jq_entity_id, to_jq_report_period
from zvt.domain import FinanceFactor
from zvt.recorders.eastmoney.common import company_type_flag, get_fc, EastmoneyTimestampsDataRecorder, \
call_eastmoney_api, get_from_path_fields
from zvt.settings import JQ_ACCOUNT, JQ_PASSWD
from zvt.utils.pd_utils import index_df
from zvt.utils.time_utils import to_time_str, to_pd_timestamp
class BaseChinaStockFinanceRecorder(EastmoneyTimestampsDataRecorder):
finance_report_type = None
data_type = 1
timestamps_fetching_url = 'https://emh5.eastmoney.com/api/CaiWuFenXi/GetCompanyReportDateList'
timestamp_list_path_fields = ['CompanyReportDateList']
timestamp_path_fields = ['ReportDate']
def __init__(self, entity_type='stock', exchanges=['sh', 'sz'], entity_ids=None, codes=None, batch_size=10,
force_update=False, sleeping_time=5, default_size=2000, one_shot=False,
fix_duplicate_way='add') -> None:
super().__init__(entity_type, exchanges, entity_ids, codes, batch_size, force_update, sleeping_time,
default_size, one_shot, fix_duplicate_way)
auth(JQ_ACCOUNT, JQ_PASSWD)
def init_timestamps(self, entity):
param = {
"color": "w",
"fc": get_fc(entity),
"DataType": self.data_type
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['ReportType'] = 1
timestamp_json_list = call_eastmoney_api(url=self.timestamps_fetching_url,
path_fields=self.timestamp_list_path_fields,
param=param)
if self.timestamp_path_fields:
timestamps = [get_from_path_fields(data, self.timestamp_path_fields) for data in timestamp_json_list]
return [to_pd_timestamp(t) for t in timestamps]
def generate_request_param(self, security_item, start, end, size, timestamps):
if len(timestamps) <= 10:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": '',
"latestCount": size
}
else:
param = {
"color": "w",
"fc": get_fc(security_item),
"corpType": company_type_flag(security_item),
# 0 means get all types
"reportDateType": 0,
"endDate": to_time_str(timestamps[10]),
"latestCount": 10
}
if self.finance_report_type == 'LiRunBiaoList' or self.finance_report_type == 'XianJinLiuLiangBiaoList':
param['reportType'] = 1
return param
def generate_path_fields(self, security_item):
comp_type = company_type_flag(security_item)
if comp_type == "3":
return ['{}_YinHang'.format(self.finance_report_type)]
elif comp_type == "2":
return ['{}_BaoXian'.format(self.finance_report_type)]
elif comp_type == "1":
return ['{}_QuanShang'.format(self.finance_report_type)]
elif comp_type == "4":
return ['{}_QiYe'.format(self.finance_report_type)]
def record(self, entity, start, end, size, timestamps):
# different with the default timestamps handling
param = self.generate_request_param(entity, start, end, size, timestamps)
self.logger.info('request param:{}'.format(param))
return self.api_wrapper.request(url=self.url, param=param, method=self.request_method,
path_fields=self.generate_path_fields(entity))
def get_original_time_field(self):
return 'ReportDate'
def fill_timestamp_with_jq(self, security_item, the_data):
# get report published date from jq
q = query(
indicator.pubDate
).filter(
indicator.code == to_jq_entity_id(security_item),
)
df = get_fundamentals(q, statDate=to_jq_report_period(the_data.report_date))
if not df.empty:
the_data.timestamp = to_pd_timestamp(df['pubDate'][0])
self.logger.info(
'jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, security_item.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
def on_finish_entity(self, entity):
# fill the timestamp for report published date
the_data_list = get_data(data_schema=self.data_schema,
provider=self.provider,
entity_id=entity.id,
order=self.data_schema.timestamp.asc(),
return_type='domain',
session=self.session,
filters=[self.data_schema.timestamp == self.data_schema.report_date,
self.data_schema.timestamp >= to_pd_timestamp('2005-01-01')])
if the_data_list:
if self.data_schema == FinanceFactor:
for the_data in the_data_list:
self.fill_timestamp_with_jq(entity, the_data)
else:
df = get_finance_factors(entity_id=entity.id,
columns=[FinanceFactor.timestamp, FinanceFactor.report_date, FinanceFactor.id],
filters=[FinanceFactor.timestamp != FinanceFactor.report_date,
FinanceFactor.timestamp >= to_pd_timestamp('2005-01-01'),
FinanceFactor.report_date >= the_data_list[0].report_date,
FinanceFactor.report_date <= the_data_list[-1].report_date, ])
if df_is_not_null(df):
index_df(df, index='report_date')
for the_data in the_data_list:
if (df is not None) and (not df.empty) and the_data.report_date in df.index:
the_data.timestamp = df.at[the_data.report_date, 'timestamp']
self.logger.info(
'db fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema, entity.id,
the_data.timestamp,
the_data.report_date))
self.session.commit()
else:
# self.logger.info(
# 'waiting jq fill {} {} timestamp:{} for report_date:{}'.format(self.data_schema,
# security_item.id,
# the_data.timestamp,
# the_data.report_date))
self.fill_timestamp_with_jq(entity, the_data)
def on_finish(self):
super().on_finish()
logout()
|
py
|
1a56d8223888dbf768271c23761541392ac49ec0
|
import numpy as np
import pytest
from devito.logger import info
from devito import norm, configuration
from examples.seismic.viscoacoustic import ViscoacousticWaveSolver
from examples.seismic import demo_model, setup_geometry, seismic_args
def viscoacoustic_setup(shape=(50, 50), spacing=(15.0, 15.0), tn=500., space_order=4,
nbl=40, preset='layers-viscoacoustic', kernel='sls',
time_order=2, **kwargs):
model = demo_model(preset, space_order=space_order, shape=shape, nbl=nbl,
dtype=kwargs.pop('dtype', np.float32), spacing=spacing)
# Source and receiver geometries
geometry = setup_geometry(model, tn)
# Create solver object to provide relevant operators
solver = ViscoacousticWaveSolver(model, geometry, space_order=space_order,
kernel=kernel, time_order=time_order, **kwargs)
return solver
def run(shape=(50, 50), spacing=(20.0, 20.0), tn=1000.0,
space_order=4, nbl=40, autotune=False, preset='layers-viscoacoustic',
kernel='sls', time_order=2, **kwargs):
solver = viscoacoustic_setup(shape=shape, spacing=spacing, nbl=nbl, tn=tn,
space_order=space_order, preset=preset,
kernel=kernel, time_order=time_order, **kwargs)
info("Applying Forward")
rec, p, v, summary = solver.forward(autotune=autotune)
return (summary.gflopss, summary.oi, summary.timings, [rec, p, v])
@pytest.mark.skipif(configuration['language'] == 'openacc', reason="see issue #1560")
@pytest.mark.parametrize('kernel, time_order, normrec, atol', [
('sls', 2, 684.385, 1e-2),
('sls', 1, 18.774, 1e-2),
('ren', 2, 677.673, 1e-2),
('ren', 1, 17.995, 1e-2),
('deng_mcmechan', 2, 673.041, 1e-2),
('deng_mcmechan', 1, 18.488, 1e-2),
])
def test_viscoacoustic(kernel, time_order, normrec, atol):
_, _, _, [rec, _, _] = run(kernel=kernel, time_order=time_order)
assert np.isclose(norm(rec), normrec, atol=atol, rtol=0)
@pytest.mark.skipif(configuration['language'] == 'openacc', reason="see issue #1560")
@pytest.mark.parametrize('ndim', [2, 3])
@pytest.mark.parametrize('kernel', ['sls', 'ren', 'deng_mcmechan'])
@pytest.mark.parametrize('time_order', [1, 2])
def test_viscoacoustic_stability(ndim, kernel, time_order):
shape = tuple([11]*ndim)
spacing = tuple([20]*ndim)
_, _, _, [rec, _, _] = run(shape=shape, spacing=spacing, tn=20000.0, nbl=0,
kernel=kernel, time_order=time_order)
assert np.isfinite(norm(rec))
if __name__ == "__main__":
description = ("Example script for a set of viscoacoustic operators.")
parser = seismic_args(description)
parser.add_argument("-k", dest="kernel", default='sls',
choices=['sls', 'ren', 'deng_mcmechan'],
help="Choice of finite-difference kernel")
parser.add_argument("-to", "--time_order", default=2,
type=int, help="Time order of the equation")
args = parser.parse_args()
# Preset parameters
ndim = args.ndim
shape = args.shape[:args.ndim]
spacing = tuple(ndim * [10.0])
tn = args.tn if args.tn > 0 else (750. if ndim < 3 else 1250.)
preset = 'constant-viscoacoustic' if args.constant else 'layers-viscoacoustic'
run(shape=shape, spacing=spacing, nbl=args.nbl, tn=tn, opt=args.opt,
space_order=args.space_order, autotune=args.autotune, preset=preset,
kernel=args.kernel, time_order=args.time_order)
|
py
|
1a56d82443ed433d6081e59d4bee1dfa4cfb5e4a
|
"""Reducing Function Arguments
"""
def my_func(a, b, c):
print(a, b, c)
def fn(b, c):
return my_func(10, b, c) # fn(20, 30) -> 10, 20, 30
f =lambda b, c: my_func(10, b, c)
from functools import partial
f = partial(my_func, 10)
f(20, 30)
|
py
|
1a56d9a976b5149996d2d276116f7b1510973bcf
|
import traceback
from urllib.parse import urlparse
import click
import timeago
from metaflowbot.cli import action
from metaflowbot.message_templates.templates import error_message
from metaflowbot.state import MFBState
MAX_ARTIFACT_SIZE = 1000
import json
import requests
def random_joke():
ENDPOINT = r"https://official-joke-api.appspot.com/jokes/programming/random"
data = requests.get(ENDPOINT)
tt = json.loads(data.text)
return tt
@action.command(help="Tell me a joke")
@click.option("--create-thread/--no-create-thread", help="Will create a new thread")
@click.pass_context
def joke(ctx, create_thread=False):
obj = ctx.obj
if create_thread:
obj.publish_state(MFBState.message_new_thread(obj.thread))
try:
joke = random_joke()[0]
setup = joke["setup"]
punchline = joke["punchline"]
obj.reply(
f"""
{setup} \n{punchline}
"""
)
except:
traceback.print_exc()
my_traceback = traceback.format_exc()
err_msg = "Sorry, I couldn't find a joke at the moment :meow_dead:"
obj.reply(err_msg, **error_message(my_traceback, message=err_msg))
|
py
|
1a56da20f172d3bd4ba1febc0d0da5f332bdb222
|
"""
Network tools to run from the Master
"""
import logging
import socket
import salt.utils.files
import salt.utils.network
import salt.utils.stringutils
log = logging.getLogger(__name__)
def wollist(maclist, bcast="255.255.255.255", destport=9):
"""
Send a "Magic Packet" to wake up a list of Minions.
This list must contain one MAC hardware address per line
CLI Example:
.. code-block:: bash
salt-run network.wollist '/path/to/maclist'
salt-run network.wollist '/path/to/maclist' 255.255.255.255 7
salt-run network.wollist '/path/to/maclist' 255.255.255.255 7
"""
ret = []
try:
with salt.utils.files.fopen(maclist, "r") as ifile:
for mac in ifile:
mac = salt.utils.stringutils.to_unicode(mac).strip()
wol(mac, bcast, destport)
print("Waking up {}".format(mac))
ret.append(mac)
except Exception as err: # pylint: disable=broad-except
__jid_event__.fire_event(
{"error": "Failed to open the MAC file. Error: {}".format(err)}, "progress"
)
return []
return ret
def wol(mac, bcast="255.255.255.255", destport=9):
"""
Send a "Magic Packet" to wake up a Minion
CLI Example:
.. code-block:: bash
salt-run network.wol 08-00-27-13-69-77
salt-run network.wol 080027136977 255.255.255.255 7
salt-run network.wol 08:00:27:13:69:77 255.255.255.255 7
"""
dest = salt.utils.network.mac_str_to_bytes(mac)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.sendto(b"\xff" * 6 + dest * 16, (bcast, int(destport)))
return True
def wolmatch(tgt, tgt_type="glob", bcast="255.255.255.255", destport=9):
"""
Send a "Magic Packet" to wake up Minions that are matched in the grains cache
CLI Example:
.. code-block:: bash
salt-run network.wolmatch minion_id
salt-run network.wolmatch 192.168.0.0/16 tgt_type='ipcidr' bcast=255.255.255.255 destport=7
"""
ret = []
minions = __salt__["cache.grains"](tgt, tgt_type)
for minion in minions:
for iface, mac in minions[minion]["hwaddr_interfaces"].items():
if iface == "lo":
continue
mac = mac.strip()
wol(mac, bcast, destport)
log.info("Waking up %s", mac)
ret.append(mac)
return ret
|
py
|
1a56dc6e2dce72e3404a114c7590c4135bb0e48a
|
#!/usr/bin/python
# Copyright 2014 Google.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Unit tests for the optimizer. """
import os
import re
import unittest
import encoder
import encoder_configuration
import optimizer
import test_tools
class DummyCodec(encoder.Codec):
def __init__(self):
super(DummyCodec, self).__init__('dummy')
self.extension = 'fake'
self.option_set = encoder.OptionSet(
encoder.IntegerOption('score', 0, 10),
encoder.Option('another_parameter', ['yes']),
)
def StartEncoder(self, context):
return encoder.Encoder(context,
encoder.OptionValueSet(self.option_set,
"--score=5"))
def Execute(self, parameters, rate, videofile, workdir):
# pylint: disable=W0613
match = re.search(r'--score=(\d+)', parameters.ToString())
if match:
return {'psnr': int(match.group(1)), 'bitrate': 100}
else:
return {'psnr': -100, 'bitrate': 100}
class DummyVideofile(encoder.Videofile):
def __init__(self, filename, clip_time):
super(DummyVideofile, self).__init__(filename)
self.clip_time = clip_time
def ClipTime(self):
return self.clip_time
def Returns1(target_bitrate, result):
"""Score function that returns a constant value."""
# pylint: disable=W0613
return 1.0
def ReturnsClipTime(target_bitrate, result):
# pylint: disable=W0613
return float(result['cliptime'])
class TestOptimizer(unittest.TestCase):
def setUp(self):
self.codec = DummyCodec()
self.file_set = None
self.cache_class = encoder.EncodingMemoryCache
self.score_function = None
self.videofile = DummyVideofile('foofile_640_480_30.yuv', clip_time=1)
self.optimizer = None
def StdOptimizer(self):
# This function is not in setup because some tests
# do not need it.
if not self.optimizer:
self.optimizer = optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class)
return self.optimizer
def EncoderFromParameterString(self, parameter_string):
return encoder.Encoder(self.optimizer.context,
encoder.OptionValueSet(self.optimizer.context.codec.option_set,
parameter_string))
def testInit(self):
optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class)
def test_AlternateScorer(self):
my_optimizer = optimizer.Optimizer(self.codec, self.file_set,
cache_class=self.cache_class,
score_function=Returns1)
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertAlmostEqual(1,
my_optimizer.Score(my_optimizer.BestEncoding(100, self.videofile)),
places=4)
def test_FirstBestEncodingNoScore(self):
my_optimizer = self.StdOptimizer()
encoding = my_optimizer.BestEncoding(100, self.videofile)
self.assertIsNone(encoding.Result())
def test_BestEncodingOneAlternative(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Store()
encoding = my_optimizer.BestEncoding(100, self.videofile)
self.assertEqual(encoding.videofile, self.videofile)
def test_BestEncodingExecuteGivesScore(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertAlmostEqual(5, my_optimizer.Score(
my_optimizer.BestEncoding(100, self.videofile)),
places=4)
def test_BestEncodingOtherSpeedNoScore(self):
my_optimizer = self.StdOptimizer()
my_optimizer.BestEncoding(100, self.videofile).Execute().Store()
self.assertIsNone(my_optimizer.BestEncoding(200, self.videofile).Result())
def test_BestUntriedEncodingReturnsSomething(self):
my_optimizer = self.StdOptimizer()
first_encoding = my_optimizer.BestEncoding(100, self.videofile)
first_encoding.Execute().Store()
other_encoding = my_optimizer.BestUntriedEncoding(100, self.videofile)
self.assertTrue(other_encoding)
self.assertNotEqual(first_encoding.encoder.parameters.ToString(),
other_encoding.encoder.parameters.ToString())
def test_WorksBetterOnSomeOtherClip(self):
my_optimizer = self.StdOptimizer()
videofile2 = DummyVideofile('barfile_640_480_30.yuv', clip_time=1)
# Note - may have to do deterministic generation of these.
encoder1 = self.EncoderFromParameterString('--score=5') # Low score
encoder2 = self.EncoderFromParameterString('--score=10') # High score
# Store 2 scores for the second videofile.
encoding = encoder1.Encoding(100, videofile2)
encoding.Execute().Store()
encoding = encoder2.Encoding(100, videofile2)
encoding.Execute().Store()
# Store 1 score for the first videofile
first_encoding = encoder1.Encoding(100, self.videofile)
first_encoding.Execute().Store()
# pylint: disable=W0212
second_encoding = my_optimizer._WorksBetterOnSomeOtherClip(first_encoding,
100,
self.videofile)
self.assertTrue(second_encoding)
second_encoding.Execute()
self.assertEquals(first_encoding.videofile, second_encoding.videofile)
self.assertAlmostEqual(10, my_optimizer.Score(second_encoding),
places=4)
def test_ShorterParameterListsScoreHigher(self):
my_optimizer = self.StdOptimizer()
encoder1 = self.EncoderFromParameterString('--score=5')
encoder2 = self.EncoderFromParameterString(
'--score=5 --another_parameter=yes')
encoding1 = encoder1.Encoding(100, self.videofile)
encoding1.Execute()
encoding2 = encoder2.Encoding(100, self.videofile)
encoding2.Execute()
self.assertGreater(my_optimizer.Score(encoding1),
my_optimizer.Score(encoding2))
def test_EncodingWithOneLessParameter(self):
my_optimizer = self.StdOptimizer()
my_encoder = self.EncoderFromParameterString('--score=5')
first_encoding = my_encoder.Encoding(100, self.videofile)
# pylint: disable=W0212
next_encoding = my_optimizer._EncodingWithOneLessParameter(first_encoding,
100,
self.videofile,
None)
self.assertTrue(next_encoding)
self.assertEqual(next_encoding.encoder.parameters.ToString(), '')
def test_EncodingGoodOnOtherRate(self):
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
my_optimizer = self.StdOptimizer()
my_encoder = self.EncoderFromParameterString('--score=7')
my_encoder.Encoding(100, self.videofile).Execute().Store()
first_encoder = self.EncoderFromParameterString('--score=8')
first_encoding = first_encoder.Encoding(200, self.videofile)
first_encoding.Execute().Store()
# pylint: disable=W0212
next_encoding = my_optimizer._EncodingGoodOnOtherRate(first_encoding,
200,
self.videofile,
None)
self.assertTrue(next_encoding)
self.assertEqual('--score=7', next_encoding.encoder.parameters.ToString())
def test_BestOverallConfiguration(self):
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
my_optimizer = self.StdOptimizer()
# When there is nothing in the database, None should be returned.
best_encoder = my_optimizer.BestOverallEncoder()
self.assertIsNone(best_encoder)
# Fill in the database with all the files and rates.
my_encoder = self.EncoderFromParameterString('--score=7')
for rate, filename in self.file_set.AllFilesAndRates():
my_encoder.Encoding(rate, encoder.Videofile(filename)).Execute().Store()
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
# Add an incomplete encode. This should be ignored.
(self.EncoderFromParameterString('--score=9')
.Encoding(100, self.videofile).Execute().Store())
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
# Complete the set for 'score=9'. This should cause a change.
(self.EncoderFromParameterString('--score=9')
.Encoding(200, self.videofile).Execute().Store())
best_encoder = my_optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals('--score=9',
best_encoder.parameters.ToString())
class TestOptimizerWithRealFiles(test_tools.FileUsingCodecTest):
def setUp(self):
self.codec = DummyCodec()
self.file_set = None
self.score_function = None
self.videofile = DummyVideofile('foofile_640_480_30.yuv', clip_time=1)
self.optimizer = None
def EncoderFromParameterString(self, parameter_string):
return encoder.Encoder(self.optimizer.context,
encoder.OptionValueSet(self.optimizer.context.codec.option_set,
parameter_string))
def test_BestOverallConfigurationNotInWorkDirectory(self):
other_dir = os.path.join(encoder_configuration.conf.sysdir(),
'multirepo_test')
os.mkdir(other_dir)
encoder_configuration.conf.override_scorepath_for_test([other_dir])
self.file_set = optimizer.FileAndRateSet(verify_files_present=False)
self.file_set.AddFilesAndRates([self.videofile.filename], [100, 200])
self.optimizer = optimizer.Optimizer(self.codec, self.file_set)
# When there is nothing in the database, None should be returned.
best_encoder = self.optimizer.BestOverallEncoder()
self.assertIsNone(best_encoder)
# Fill in the database with all the files and rates.
other_context = encoder.Context(self.codec, encoder.EncodingDiskCache,
scoredir='multirepo_test')
my_encoder = self.EncoderFromParameterString('--score=7')
other_context.cache.StoreEncoder(my_encoder)
my_encoder.context.cache.StoreEncoder(my_encoder)
for rate, filename in self.file_set.AllFilesAndRates():
my_encoding = my_encoder.Encoding(rate, encoder.Videofile(filename))
my_encoding.Execute()
other_context.cache.StoreEncoding(my_encoding)
# The best encoder should now be from the workdir, but the results are
# all fetched from the searchpath.
best_encoder = self.optimizer.BestOverallEncoder()
self.assertTrue(best_encoder)
self.assertEquals(my_encoder.parameters.ToString(),
best_encoder.parameters.ToString())
one_encoding = best_encoder.Encoding(100, self.videofile)
one_encoding.Recover()
self.assertTrue(one_encoding.Result())
def test_MultipleOptimizers(self):
# Make sure other score directories don't interfere with this test.
encoder_configuration.conf.override_scorepath_for_test([])
os.mkdir(os.path.join(encoder_configuration.conf.sysdir(), 'first_dir'))
os.mkdir(os.path.join(encoder_configuration.conf.sysdir(), 'second_dir'))
one_optimizer = optimizer.Optimizer(self.codec, scoredir='first_dir')
another_optimizer = optimizer.Optimizer(self.codec, scoredir='second_dir')
self.assertNotEqual(one_optimizer.context.cache.workdir,
another_optimizer.context.cache.workdir)
# Storing one encoding's score should not affect the other's.
one_encoding = one_optimizer.BestEncoding(100,
self.videofile)
one_encoding.Execute().Store()
another_encoding = another_optimizer.BestEncoding(100, self.videofile)
self.assertFalse(another_encoding.Result())
another_encoding.Recover()
self.assertFalse(another_encoding.Result())
class TestFileAndRateSet(unittest.TestCase):
def test_OneFileAddedAndReturned(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100], 'dirname')
self.assertEqual([(100, 'dirname/filename')], the_set.AllFilesAndRates())
def test_NoDirName(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100])
self.assertEqual([(100, 'filename')], the_set.AllFilesAndRates())
def test_OneFileMultipleRates(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200], 'dirname')
self.assertEqual(set([(100, 'dirname/filename'),
(200, 'dirname/filename')]),
set(the_set.AllFilesAndRates()))
def test_TwoAddCalls(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200], 'dirname')
the_set.AddFilesAndRates(['otherfilename'], [200, 300], 'dirname')
self.assertEqual(set([(100, 'dirname/filename'),
(200, 'dirname/filename'),
(200, 'dirname/otherfilename'),
(300, 'dirname/otherfilename')]),
set(the_set.AllFilesAndRates()))
def test_RatesForFile(self):
the_set = optimizer.FileAndRateSet(verify_files_present=False)
the_set.AddFilesAndRates(['filename'], [100, 200])
the_set.AddFilesAndRates(['otherfilename'], [200, 300])
self.assertEqual(set([100, 200]),
set(the_set.AllRatesForFile('filename')))
class TestFileAndRateSetWithRealFiles(test_tools.FileUsingCodecTest):
def test_AddMissingFile(self):
the_set = optimizer.FileAndRateSet()
the_set.AddFilesAndRates(['nosuchfile'], [100])
self.assertFalse(the_set.AllFilesAndRates())
self.assertFalse(the_set.set_is_complete)
def test_AddPresentFile(self):
the_set = optimizer.FileAndRateSet()
file_name = 'file_1024_768_30.yuv'
test_tools.MakeYuvFileWithOneBlankFrame(file_name)
the_set.AddFilesAndRates([file_name], [100],
basedir=encoder_configuration.conf.workdir())
self.assertTrue(the_set.AllFilesAndRates())
self.assertTrue(the_set.set_is_complete)
if __name__ == '__main__':
unittest.main()
|
py
|
1a56dc93500bb11f60fe06a6480edce01b1264db
|
"""StudentHomepage URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from app01 import views
urlpatterns = [
path('admin/', admin.site.urls),
path('', views.index),
path('login/', views.LoginView.as_view(), name='login'),
path('reg/', views.reg, name='reg'),
path('logout/', views.logout, name='logout'),
path('photo_album/', views.photo_album, name='photo_album'),
path('home/', views.home, name='home'),
path('school/', views.school, name='school'),
path('hobby/', views.Hobby.as_view()),
path('admirer/', views.Admirer.as_view()),
path('reading_matter/', views.ReadingMatter.as_view()),
]
|
py
|
1a56dcbba1fe757b61652519a471039a3ef161f5
|
# This code gives back JSON file for:
# --> only in the documents where this word appears: how many time does the word appears on average
import os
import json
count_file = 0
word_count = 0
word_dic = {}
result_dic = {}
# This is path of data folder
path = '/media/neel/Extra/gigaword_eng_5/data/afp_eng/'
# This is path of output file
for filename in os.listdir(path):
# File count
file_add = ("/media/neel/Extra/gigaword_eng_5/word_per_doc/{}.json".format(str(filename)))
outfile = open(file_add, "w")
count_file += 1
file = open(path+filename,"r")
text = file.read().lower()
# Fetching only <p> </p> tage data
for item in text.split("</p>"):
if "<p>" in item:
temp_lis = []
data = str(item [ item.find("<p>")+len("<p>") : ])
data = data.replace(',','').replace('.','').replace('"','').replace("'","").replace("(","").replace(")","").replace('\n',' ').replace("-","")
temp_lis = data.split(" ")
# counting words
for word in temp_lis:
word_count += 1
try:
word_dic[str(word)] = int(word_dic[str(word)]) + 1
except:
word_dic[str(word)] = 1
file.close()
json.dump(word_dic, outfile)
outfile.close()
print('done')
|
py
|
1a56dd0b9ebb1bd8f5a396dbafaed7c732805195
|
"""
WSGI config for email_verification_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'email_verification_service.settings')
application = get_wsgi_application()
|
py
|
1a56dd11f116e209fb20e5e855aa0e36109b00e8
|
import discord
from discord.ext import commands
from asyncdagpi import ImageFeatures
class Image(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def colors(self, ctx, member:discord.Member=None):
if member is None:
member = ctx.author
url = str(member.avatar_url_as(format="png", size=1024))
img = await self.bot.dag.image_process(ImageFeatures.colors(), url)
await ctx.send(file = discord.File(fp=img.image,filename=f"pixel.{img.format}"))
@commands.command()
async def wanted(self, ctx, member:discord.Member=None):
if member is None:
member = ctx.author
warn_msg = 'the dagpi wanted endpoint has a flaw which makes it very slow to compute' if ctx.author.id in self.bot.owner_ids else 'This may take some time'
warn = await ctx.send(warn_msg)
url = str(member.avatar_url_as(format="png", size=1024))
img = await self.bot.dag.image_process(ImageFeatures.wanted(), url)
try:
await warn.delete()
except:
pass
finally:
await (commands.Context(prefix=ctx.prefix, message=ctx.message)).send(file = discord.File(fp=img.image,filename=f"pixel.{img.format}"))
def setup(bot):
bot.add_cog(Image(bot))
|
py
|
1a56de4835ac512bb80fcab47983ad0c98c3db1d
|
import json
from argparse import ArgumentParser
from ibm_watson import AssistantV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
import dateutil.parser
import datetime
import time
DEFAULT_WCS_VERSION='2018-09-20'
DEFAULT_PAGE_SIZE=500
DEFAULT_NUMBER_OF_PAGES=20
def getAssistant(iam_apikey, url, version=DEFAULT_WCS_VERSION):
'''Retrieve Watson Assistant SDK object'''
authenticator = IAMAuthenticator(iam_apikey)
c = AssistantV1(
version=version,
authenticator=authenticator
)
c.set_service_url(url)
return c
def getLogs(iam_apikey, url, workspace_id, filter, page_size_limit=DEFAULT_PAGE_SIZE, page_num_limit=DEFAULT_NUMBER_OF_PAGES, version=DEFAULT_WCS_VERSION):
'''Public API for script, connects to Watson Assistant and downloads all logs'''
service = getAssistant(iam_apikey, url, version)
return getLogsInternal(service, workspace_id, filter, page_size_limit, page_num_limit)
def getLogsInternal(assistant, workspace_id, filter, page_size_limit=DEFAULT_PAGE_SIZE, page_num_limit=DEFAULT_NUMBER_OF_PAGES):
'''Fetches `page_size_limit` logs at a time through Watson Assistant log API, a maximum of `page_num_limit` times, and returns array of log events'''
cursor = None
pages_retrieved = 0
allLogs = []
noMore = False
while pages_retrieved < page_num_limit and noMore != True:
if workspace_id is None:
#all - requires a workspace_id, assistant id, or deployment id in the filter
output = assistant.list_all_logs(sort='-request_timestamp', filter=filter, page_limit=page_size_limit, cursor=cursor)
else:
output = assistant.list_logs(workspace_id=workspace_id, sort='-request_timestamp', filter=filter, page_limit=page_size_limit, cursor=cursor)
#Hack for API compatibility between v1 and v2 of the API - v2 adds a 'result' property on the response. v2 simplest form is list_logs().get_result()
output = json.loads(str(output))
if 'result' in output:
logs = output['result']
else:
logs = output
if 'pagination' in logs and len(logs['pagination']) != 0:
cursor = logs['pagination'].get('next_cursor', None)
#Do not DOS the list_logs function!
time.sleep(3.0)
else:
noMore = True
if 'logs' in logs:
allLogs.extend(logs['logs'])
pages_retrieved = pages_retrieved + 1
print("Fetched {} log pages".format(pages_retrieved))
else:
return None
#Analysis is easier when logs are in increasing timestamp order
allLogs.reverse()
return allLogs
def writeLogs(logs, output_file, output_columns="raw"):
'''
Writes log output to file system or screen. Includes three modes:
`raw`: logs are written in JSON format
`all`: all log columns useful for intent training are written in CSV format
`utterance`: only the `input.text` column is written (one per line)
'''
file = None
if output_file != None:
file = open(output_file,'w')
print("Writing logs to", output_file)
if 'raw' == output_columns:
writeOut(file, json.dumps(logs,indent=2))
if file is not None:
file.close()
return
if 'all' == output_columns:
writeOut(file, 'Utterance\tIntent\tConfidence\tDate\tLast Visited')
for log in logs:
utterance = log['request' ]['input']['text']
intent = 'unknown_intent'
confidence = 0.0
date = 'unknown_date'
last_visited = 'unknown_last_visited'
if 'response' in log and 'intents' in log['response'] and len(log['response']['intents'])>0:
intent = log['response']['intents'][0]['intent']
confidence = log['response']['intents'][0]['confidence']
dateStr = log['request_timestamp']
date = dateutil.parser.parse(dateStr).strftime("%Y-%m-%d")
if 'nodes_visited' in log['response']['output'] and len (log['response']['output']['nodes_visited']) > 0:
last_visited = log['response']['output']['nodes_visited'][-1]
if 'all' == output_columns:
output_line = '{}\t{}\t{}\t{}\t{}'.format(utterance, intent, confidence, date, last_visited)
else:
#assumed just 'utterance'
output_line = utterance
writeOut(file, output_line)
if output_file != None:
file.close()
def writeOut(file, message):
if file != None:
file.write(message + '\n')
else:
print(message)
def create_parser():
parser = ArgumentParser(description='Extracts Watson Assistant logs from a given workspace')
parser.add_argument('-c', '--output_columns', type=str, help='Which columns you want in output, either "utterance", "raw", or "all" (default is "raw")', default='raw')
parser.add_argument('-o', '--output_file', type=str, help='Filename to write results to')
parser.add_argument('-w', '--workspace_id', type=str, help='Workspace identifier')
parser.add_argument('-a', '--iam_apikey', type=str, required=True, help='Assistant service iam api key')
parser.add_argument('-f', '--filter', type=str, required=True, help='Watson Assistant log query filter')
parser.add_argument('-v', '--version', type=str, default=DEFAULT_WCS_VERSION, help="Watson Assistant version in YYYY-MM-DD form.")
parser.add_argument('-n', '--number_of_pages', type=int, default=DEFAULT_NUMBER_OF_PAGES, help='Number of result pages to download (default is {})'.format(DEFAULT_NUMBER_OF_PAGES))
parser.add_argument('-p', '--page_limit', type=int, default=DEFAULT_PAGE_SIZE, help='Number of results per page (default is {})'.format(DEFAULT_PAGE_SIZE))
parser.add_argument('-l', '--url', type=str, default='https://gateway.watsonplatform.net/assistant/api',
help='URL to Watson Assistant. Ex: https://gateway-wdc.watsonplatform.net/assistant/api')
return parser
if __name__ == '__main__':
ARGS = create_parser().parse_args()
service = getAssistant(ARGS.iam_apikey,ARGS.url,ARGS.version)
logs = getLogsInternal(service, ARGS.workspace_id, ARGS.filter, ARGS.page_limit, ARGS.number_of_pages)
writeLogs(logs, ARGS.output_file, ARGS.output_columns)
|
py
|
1a56df5043086e897670921736cb0ed748efd355
|
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import requests
import socket
from urlparse import urlparse
def CheckServiceAddress(address):
hostname = urlparse(address).hostname
service_address = socket.gethostbyname(hostname)
print service_address
def GetServerResponse(address):
print 'Send request to:', address
response = requests.get(address)
print response
print response.content
def Main():
parser = argparse.ArgumentParser()
parser.add_argument('address')
args = parser.parse_args()
CheckServiceAddress(args.address)
GetServerResponse(args.address)
if __name__ == "__main__":
Main()
|
py
|
1a56df926b468d068b83798da6bf8b4b8117caee
|
from __future__ import division
import numpy as np
import seaborn as sns
import sys
from sys import platform as sys_pf
if sys_pf == 'Darwin':
import matplotlib
matplotlib.use("TkAgg")
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
def normalize(v):
norm=np.linalg.norm(v)
if norm==0:
return v
return v/norm
def visualize_cluster(No_of_traj, l_no, nrows, ncols, x, y, c, col):
Number_of_traj = No_of_traj
label_no =l_no
counter = 0
alpha = c[label_no] - 1
fig, axes = plt.subplots(nrows, ncols, sharex=True, sharey=True)
for row in axes:
for cols in range(ncols):
if (counter > alpha):
break
X = x[counter]
Y = y[counter]
row[cols].plot(X, Y, color=col[int(label_no)])
counter = counter + 1
fig.suptitle('Trajectories of Cluster '+str(label_no), fontsize='large')
plt.show()
def main():
#loading files ...
labels = np.loadtxt('labels.txt', delimiter=',')
labelsbefore = np.loadtxt('labelsbefore.txt', delimiter=',')
XA = np.loadtxt('XA.txt', delimiter=',')
XB = np.loadtxt('XB.txt', delimiter=',')
YA = np.loadtxt('YA.txt', delimiter=',')
YB = np.loadtxt('YB.txt', delimiter=',')
Number_of_traj = np.shape(XA)[0]
Number_of_frames = np.shape(XA)[1]
col = ['red', 'black', 'blue', 'green', 'cyan']
c = np.zeros( shape=(5), dtype=int)
for i in range(139):
if (int(labels[i]) == 0) : c[0] += 1
elif (int(labels[i]) == 1) : c[1] += 1
elif (int(labels[i]) == 2) : c[2] += 1
elif (int(labels[i]) == 3) : c[3] += 1
elif (int(labels[i]) == 4) : c[4] += 1
C0x = np.zeros(shape=(c[0],Number_of_frames))
C1x = np.zeros(shape=(c[1],Number_of_frames))
C2x = np.zeros(shape=(c[2],Number_of_frames))
C3x = np.zeros(shape=(c[3],Number_of_frames))
C4x = np.zeros(shape=(c[4],Number_of_frames))
C0y = np.zeros(shape=(c[0],Number_of_frames))
C1y = np.zeros(shape=(c[1],Number_of_frames))
C2y = np.zeros(shape=(c[2],Number_of_frames))
C3y = np.zeros(shape=(c[3],Number_of_frames))
C4y = np.zeros(shape=(c[4],Number_of_frames))
C0xb = np.zeros(shape=(c[0],Number_of_frames))
C1xb = np.zeros(shape=(c[1],Number_of_frames))
C2xb = np.zeros(shape=(c[2],Number_of_frames))
C3xb = np.zeros(shape=(c[3],Number_of_frames))
C4xb = np.zeros(shape=(c[4],Number_of_frames))
C0yb = np.zeros(shape=(c[0],Number_of_frames))
C1yb = np.zeros(shape=(c[1],Number_of_frames))
C2yb = np.zeros(shape=(c[2],Number_of_frames))
C3yb = np.zeros(shape=(c[3],Number_of_frames))
C4yb = np.zeros(shape=(c[4],Number_of_frames))
index = np.zeros( shape=(5), dtype= int)
for trajectory in range(139):
if (col[int(labels[trajectory])]) == 'red' :
C0x[index[0]] = XA[trajectory]
C0y[index[0]] = YA[trajectory]
C0xb[index[0]] = XB[trajectory]
C0yb[index[0]] = YB[trajectory]
index[0] +=1
elif (col[int(labels[trajectory])]) == 'black' :
C1x[index[1]] = XA[trajectory]
C1y[index[1]] = YA[trajectory]
C1xb[index[1]] = XB[trajectory]
C1yb[index[1]] = YB[trajectory]
index[1] +=1
elif (col[int(labels[trajectory])]) == 'blue' :
C2x[index[2]] = XA[trajectory]
C2y[index[2]] = YA[trajectory]
C2xb[index[2]] = XB[trajectory]
C2yb[index[2]] = YB[trajectory]
index[2] +=1
elif (col[int(labels[trajectory])]) == 'green' :
C3x[index[3]] = XA[trajectory]
C3y[index[3]] = YA[trajectory]
C3xb[index[3]] = XB[trajectory]
C3yb[index[3]] = YB[trajectory]
index[3] +=1
else :
C4x[index[4]] = XA[trajectory]
C4y[index[4]] = YA[trajectory]
C4xb[index[4]] = XB[trajectory]
C4yb[index[4]] = YB[trajectory]
index[4] +=1
print (index)
visualize_cluster(Number_of_traj, 0, 5, 6, C0xb, C0yb, c, col)
visualize_cluster(Number_of_traj, 1, 6, 8, C1xb, C1yb, c, col)
visualize_cluster(Number_of_traj, 2, 3, 6, C2xb, C2yb, c, col)
visualize_cluster(Number_of_traj, 3, 3, 2, C3xb, C3yb, c, col)
visualize_cluster(Number_of_traj, 4, 5, 8, C4xb, C4yb, c, col)
for Trajectories in range (Number_of_traj):
print (Trajectories, labelsbefore[Trajectories], labels[Trajectories])
if __name__ == "__main__":
main()
|
py
|
1a56dfd2f43be6ab225730de4013bad25e94e53b
|
from tkinter import * # para tkinter y widgets
# ventana para cambiar el nombre de un solo archivo GRUPALES
class Ventana:
def __init__(self, root):
self.existe = True
# crear ventana para cambiar un solo nombre
self.root = root
self.root.title("Renombrar [GRUPO]")
self.root.geometry("450x280") # tamaño y posicion
self.root.minsize(width=450, height=280)
self.root.configure(background='#272829')
self.root.resizable(height=FALSE, width=FALSE) # no redimensiona
self.root.attributes("-topmost", True) # colocar al frente la ventana
# CAPTURAR evento de cerrar ventana y lo direcciona a una funcion
self.root.protocol("WM_DELETE_WINDOW", self.cerrar_vent)
# # Campo a buscar nombre
self.frame_01 = Frame(self.root, bg="black") # frame para el label y Entry
self.frame_01.pack(anchor=NW, fill=BOTH)
self.label_01 = Label(self.frame_01, text="Buscar palabra:",
fg="white", bg="black", font=("arial", 12)) # label identifica Campo de Entrada
self.label_01.pack(anchor=NW, side=LEFT, pady=5)
self.archivo_buscar = StringVar() # variable de entrada string
self.campo_nombre_buscar = Entry(self.frame_01, font=("arial", 12),
textvariable=self.archivo_buscar) # Campo de entrada
self.campo_nombre_buscar.pack(anchor=E, side=LEFT, pady=5, padx=5, fill=X, expand=1)
# # Campo a cambiar nombre
self.frame_03 = Frame(self.root, bg="black") # frame para el label y Entry
self.frame_03.pack(anchor=NW, fill=BOTH)
self.label_03 = Label(self.frame_03, text="Cambiar por:",
fg="white", bg="black", font=("arial", 12)) # label identifica Campo de Entrada
self.label_03.pack(anchor=NW, side=LEFT, pady=5)
self.archivo_cambiar = StringVar() # variable de entrada string
self.campo_nombre_cambiar = Entry(self.frame_03, font=("arial", 12),
textvariable=self.archivo_cambiar) # Campo de entrada
self.campo_nombre_cambiar.pack(anchor=E, side=LEFT, pady=5, padx=5, fill=X, expand=1)
# botones
self.frame_02 = Frame(self.root, bg="black") # frame para el label y Entry
self.frame_02.pack(anchor=NW, fill=BOTH)
self.btn_si = Button(self.frame_02, text="Buscar Archivos", font=("arial", 12))
self.btn_si.pack(side=LEFT, fill=BOTH, expand=1, pady=5, padx=5)
# titulo central
self.etiqueta = Label(self.root, text="Confirmar Acción", font=("arial", 14), bg="gray", fg="white")
self.etiqueta.pack(padx=0, pady=0, fill=BOTH, expand=1)
# descripcion
self.texto = 'None...'
self.descripcion = Label(self.root, text=self.texto, justify=CENTER,
font=("arial", 12), bg="black", fg="white")
self.descripcion.pack(padx=0, pady=5, fill=BOTH, ipadx=5, ipady=5)
# botones
self.btn_aplicar = Button(self.root, text="Aplicar", width=4, font=("arial", 12))
self.btn_aplicar.pack(side=LEFT, fill=BOTH, expand=1)
self.btn_no_salir = Button(self.root, text="No", width=4, font=("arial", 12))
self.btn_no_salir.pack(side=LEFT, fill=BOTH, expand=1)
# Metodo para cerrar ventana
def cerrar_vent(self):
self.existe = False
del self.existe
self.root.destroy()
# Metodo para destruir ventana al cerrarla
def _onDestroy(self):
self.existe = False
del self.existe
self.root.destroy()
# -----------------------
# probador de ventana
# raiz = Tk()
# gui_ppal = Ventana(raiz)
# raiz.mainloop()
|
py
|
1a56dff3d2dcdfc414f1debae57a438857ee973b
|
import random
import numpy as np
import sys
from domain.make_env import make_env
from domain.task_gym import GymTask
from neat_src import *
class WannGymTask(GymTask):
"""Problem domain to be solved by neural network. Uses OpenAI Gym patterns.
"""
def __init__(self, game, paramOnly=False, nReps=1):
"""Initializes task environment
Args:
game - (string) - dict key of task to be solved (see domain/config.py)
Optional:
paramOnly - (bool) - only load parameters instead of launching task?
nReps - (nReps) - number of trials to get average fitness
"""
GymTask.__init__(self, game, paramOnly, nReps)
# -- 'Weight Agnostic Network' evaluation -------------------------------- -- #
def setWeights(self, wVec, wVal):
"""Set single shared weight of network
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
wVal - (float) - value to assign to all weights
Returns:
wMat - (np_array) - weight matrix with single shared weight
[N X N]
"""
# Create connection matrix
wVec[np.isnan(wVec)] = 0
dim = int(np.sqrt(np.shape(wVec)[0]))
cMat = np.reshape(wVec,(dim,dim))
cMat[cMat!=0] = 1.0
# Assign value to all weights
wMat = np.copy(cMat) * wVal
return wMat
def getFitness(self, wVec, aVec, hyp, \
seed=-1,nRep=False,nVals=6,view=False,returnVals=False):
"""Get fitness of a single individual with distribution of weights
Args:
wVec - (np_array) - weight matrix as a flattened vector
[N**2 X 1]
aVec - (np_array) - activation function of each node
[N X 1] - stored as ints (see applyAct in ann.py)
hyp - (dict) - hyperparameters
['alg_wDist'] - weight distribution [standard;fixed;linspace]
['alg_absWCap'] - absolute value of highest weight for linspace
Optional:
seed - (int) - starting random seed for trials
nReps - (int) - number of trials to get average fitness
nVals - (int) - number of weight values to test
Returns:
fitness - (float) - mean reward over all trials
"""
if nRep is False:
nRep = hyp['alg_nReps']
# Set weight values to test WANN with
if (hyp['alg_wDist'] == "standard") and nVals==6: # Double, constant, and half signal
wVals = np.array((-2,-1.0,-0.5,0.5,1.0,2))
else:
wVals = np.linspace(-self.absWCap, self.absWCap ,nVals)
# Get reward from 'reps' rollouts -- test population on same seeds
reward = np.empty((nRep,nVals))
for iRep in range(nRep):
for iVal in range(nVals):
wMat = self.setWeights(wVec,wVals[iVal])
if seed == -1:
reward[iRep,iVal] = self.testInd(wMat, aVec, seed=seed,view=view)
else:
reward[iRep,iVal] = self.testInd(wMat, aVec, seed=seed+iRep,view=view)
if returnVals is True:
return np.mean(reward,axis=0), wVals
return np.mean(reward,axis=0)
|
py
|
1a56e09763aa382c812dbcbc6d7ef3cd50b1bf69
|
from keras import backend as K
import numpy as np
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
def random_binomial(shape, n=0, p=0.5, dtype=K.floatx(), seed=None):
if seed is None:
seed = np.random.randint(10e6)
rng = RandomStreams(seed=seed)
return rng.binomial(shape, n=n, p=p, dtype=dtype)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.