hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
79429776ed9e3e859666dd5c433b183534f5b0ab | 5,821 | py | Python | convert.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
] | 56 | 2017-12-07T12:29:14.000Z | 2021-05-14T16:45:59.000Z | convert.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
] | 7 | 2017-12-26T09:00:23.000Z | 2019-01-14T03:55:56.000Z | convert.py | tonyngjichun/pspnet-pytorch | 75297aa4fdb4f7a712ef9185be1ec805044f8328 | [
"MIT"
] | 16 | 2017-12-20T00:36:51.000Z | 2020-12-31T07:41:06.000Z | #!/usr/bin/env python
# coding: utf-8
#
# Author: Kazuto Nakashima
# URL: http://kazuto1011.github.io
# Created: 2017-11-15
from __future__ import print_function
import re
from collections import OrderedDict
import click
import numpy as np
import torch
import yaml
from addict import Dict
from libs import caffe_pb2
from libs.models import PSPNet
def parse_caffemodel(model_path):
caffemodel = caffe_pb2.NetParameter()
with open(model_path, "rb") as f:
caffemodel.MergeFromString(f.read())
# Check trainable layers
print(set([(layer.type, len(layer.blobs)) for layer in caffemodel.layer]))
params = OrderedDict()
for layer in caffemodel.layer:
print("{} ({}): {}".format(layer.name, layer.type, len(layer.blobs)))
# Convolution or Dilated Convolution
if "Convolution" in layer.type:
params[layer.name] = {}
params[layer.name]["kernel_size"] = layer.convolution_param.kernel_size[0]
params[layer.name]["stride"] = layer.convolution_param.stride[0]
params[layer.name]["weight"] = list(layer.blobs[0].data)
if len(layer.blobs) == 2:
params[layer.name]["bias"] = list(layer.blobs[1].data)
if len(layer.convolution_param.pad) == 1: # or []
params[layer.name]["padding"] = layer.convolution_param.pad[0]
else:
params[layer.name]["padding"] = 0
if isinstance(layer.convolution_param.dilation, int): # or []
params[layer.name]["dilation"] = layer.convolution_param.dilation
else:
params[layer.name]["dilation"] = 1
# Batch Normalization
elif "BN" in layer.type:
params[layer.name] = {}
params[layer.name]["weight"] = list(layer.blobs[0].data)
params[layer.name]["bias"] = list(layer.blobs[1].data)
params[layer.name]["running_mean"] = list(layer.blobs[2].data)
params[layer.name]["running_var"] = list(layer.blobs[3].data)
params[layer.name]["eps"] = layer.bn_param.eps
params[layer.name]["momentum"] = layer.bn_param.momentum
return params
# Hard coded translater
def translate_layer_name(source):
def conv_or_bn(source):
if "bn" in source:
return ".bn"
else:
return ".conv"
source = re.split("[_/]", source)
layer = int(source[0][4]) # Remove "conv"
target = ""
if layer == 1:
target += "fcn.layer{}.conv{}".format(layer, source[1])
target += conv_or_bn(source)
elif layer in range(2, 6):
block = int(source[1])
# Auxirally layer
if layer == 4 and len(source) == 3 and source[2] == "bn":
target += "aux.conv4_aux.bn"
elif layer == 4 and len(source) == 2:
target += "aux.conv4_aux.conv"
# Pyramid pooling modules
elif layer == 5 and block == 3 and "pool" in source[2]:
pyramid = {1: 3, 2: 2, 3: 1, 6: 0}[int(source[2][4])]
target += "ppm.stages.s{}.conv".format(pyramid)
target += conv_or_bn(source)
# Last convolutions
elif layer == 5 and block == 4:
target += "final.conv5_4"
target += conv_or_bn(source)
else:
target += "fcn.layer{}".format(layer)
target += ".block{}".format(block)
if source[2] == "3x3":
target += ".conv3x3"
else:
target += ".{}".format(source[3])
target += conv_or_bn(source)
elif layer == 6:
if len(source) == 1:
target += "final.conv6"
else:
target += "aux.conv6_1"
return target
@click.command()
@click.option("--config", "-c", required=True)
def main(config):
WHITELIST = ["kernel_size", "stride", "padding", "dilation", "eps", "momentum"]
CONFIG = Dict(yaml.load(open(config)))
params = parse_caffemodel(CONFIG.CAFFE_MODEL)
model = PSPNet(
n_classes=CONFIG.N_CLASSES, n_blocks=CONFIG.N_BLOCKS, pyramids=CONFIG.PYRAMIDS
)
model.eval()
own_state = model.state_dict()
report = []
state_dict = OrderedDict()
for layer_name, layer_dict in params.items():
for param_name, values in layer_dict.items():
if param_name in WHITELIST:
attribute = translate_layer_name(layer_name)
attribute = eval("model." + attribute + "." + param_name)
message = " ".join(
[
layer_name.ljust(25),
"->",
param_name,
"pytorch: " + str(attribute),
"caffe: " + str(values),
]
)
print(message, end="")
if isinstance(attribute, tuple):
if attribute[0] != values:
report.append(message)
else:
if abs(attribute - values) > 1e-4:
report.append(message)
print(": Checked!")
continue
param_name = translate_layer_name(layer_name) + "." + param_name
if param_name in own_state:
print(layer_name.ljust(25), "->", param_name, end="")
values = torch.FloatTensor(values)
values = values.view_as(own_state[param_name])
state_dict[param_name] = values
print(": Copied!")
print("Inconsistent parameters (*_3x3 dilation and momentum can be ignored):")
print(*report, sep="\n")
# Check
model.load_state_dict(state_dict)
torch.save(state_dict, CONFIG.PYTORCH_MODEL)
if __name__ == "__main__":
main()
| 34.241176 | 86 | 0.552482 |
7942980b6aa13cebcca6b812f2901652441fa6ae | 6,483 | py | Python | models/train_classifier.py | jeena72/disaster-response-pipeline | 4621425a29e7fa2f162c725555787b6fc24f8010 | [
"MIT"
] | null | null | null | models/train_classifier.py | jeena72/disaster-response-pipeline | 4621425a29e7fa2f162c725555787b6fc24f8010 | [
"MIT"
] | null | null | null | models/train_classifier.py | jeena72/disaster-response-pipeline | 4621425a29e7fa2f162c725555787b6fc24f8010 | [
"MIT"
] | null | null | null | import sys
import pandas as pd
import numpy as np
import nltk
from joblib import dump
from sqlalchemy import create_engine
from nltk.tokenize import word_tokenize
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.multioutput import MultiOutputClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('averaged_perceptron_tagger')
def load_data(database_filepath):
"""
Load and generate datasets for fitting along with message categories list
Parameters
-----------
database_filepath : str
SQLite database file path
Returns
----------
X : DataFrame
Contains messages for generating features
Y : DataFrame
Contains binary labels for various message categories
category_names : list
List of different message categories
"""
engine = create_engine('sqlite:///' + database_filepath)
df = pd.read_sql_table("DisasterResponseData", con=engine)
X = df["message"]
Y = df[[col for col in df.columns.tolist() if col not in ["id", "message", "original", "genre"]]]
category_names = Y.columns.tolist()
return X, Y, category_names
def tokenize(text):
"""
Passed string is normalized, lemmatized, and tokenized
Parameters
-----------
text : str
text to be tokenized
Returns
----------
clean_tokens : list
Contains generated tokens
"""
tokens = word_tokenize(text)
lemmatizer = WordNetLemmatizer()
clean_tokens = []
for tok in tokens:
clean_tok = lemmatizer.lemmatize(tok).lower().strip()
clean_tokens.append(clean_tok)
return clean_tokens
class StartingVerbExtractor(BaseEstimator, TransformerMixin):
"""
This transformer class extract the starting verb of a sentence
"""
def starting_verb(self, text):
sentence_list = nltk.sent_tokenize(text)
for sentence in sentence_list:
pos_tags = nltk.pos_tag(tokenize(sentence))
first_word, first_tag = pos_tags[0]
if first_tag in ['VB', 'VBP'] or first_word == 'RT':
return True
return False
def fit(self, X, y=None):
return self
def transform(self, X):
X_tagged = pd.Series(X).apply(self.starting_verb)
return pd.DataFrame(X_tagged)
def build_model(useGridSearch=False):
"""
Creates scikit Pipeline object for processing text messages and fitting a classifier.
Parameters
-----------
useGridSearch: bool
If grid search be used for model training
Returns
----------
pipeline : Pipeline
Pipeline object
"""
pipeline = Pipeline([
("features", FeatureUnion([
('text_pipeline', Pipeline([
('count_vectorizer', CountVectorizer(tokenizer=tokenize)),
('scaler', StandardScaler(with_mean=False))
])),
('tfidf_transformer', TfidfVectorizer()),
('starting_verb_extr', StartingVerbExtractor())
])),
("clf", MultiOutputClassifier(AdaBoostClassifier()))
])
if useGridSearch:
parameters = {
'features__text_pipeline__count_vectorizer__max_df': (0.5, 1.0),
'features__tfidf_transformer__use_idf': (True, False),
'features__transformer_weights': (
{'text_pipeline': 1, 'tfidf_transformer': 1, 'starting_verb': 1},
{'text_pipeline': 0.5, 'tfidf_transformer': 1, 'starting_verb': 0.5},
)
}
cv = GridSearchCV(pipeline, param_grid=parameters, cv=3, verbose=2.1)
return cv
return pipeline
def evaluate_model(model, X_test, Y_test, category_names):
"""
Method applies scikit pipeline to test set and prints the model performance (accuracy and f1score)
Parameters
-----------
model : Pipeline
fit pipeline
X_test : ndarray
test features
Y_test : ndarray
test labels
category_names : list
List of different message categories
Returns
----------
None
"""
Y_pred = model.predict(X_test)
print(classification_report(Y_test, Y_pred, target_names=category_names))
def save_model(model, model_filepath):
"""
Save trained model
Parameters
-----------
model : Pipeline
fit pipeline
model_filepath : str
path with dump format
Returns
----------
None
"""
dump(model, "{}".format(model_filepath))
def main():
"""
Runner function
This function:
1) Extract data from SQLite db
2) Train ML model on training set
3) Estimate model performance on test set
4) Save trained model
"""
if len(sys.argv) == 3:
database_filepath, model_filepath = sys.argv[1:]
print('Loading data...\n DATABASE: {}'.format(database_filepath))
X, Y, category_names = load_data(database_filepath)
X_train, X_test, Y_train, Y_test = train_test_split(X.values, Y.values, test_size=0.2, random_state=42)
print('Building model...')
model = build_model()
print('Training model...')
model.fit(X_train, Y_train)
print('Evaluating model...')
evaluate_model(model, X_test, Y_test, category_names)
print('Saving model...\n MODEL: {}'.format(model_filepath))
save_model(model, model_filepath)
print('Trained model saved!')
else:
print('Please provide the filepath of the disaster messages database '\
'as the first argument and the filepath of the pickle file to '\
'save the model to as the second argument. \n\nExample: python '\
'train_classifier.py ../data/DisasterResponse.db classifier.pkl')
if __name__ == '__main__':
main() | 30.294393 | 111 | 0.629647 |
7942981bb8ea05ce9d378f0ba901a8042a2ac6ee | 140 | py | Python | rdr_service/lib_fhir/fhirclient_1_0_6/__init__.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | rdr_service/lib_fhir/fhirclient_3_0_0/__init__.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | rdr_service/lib_fhir/fhirclient_3_0_0/__init__.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | import os.path
import sys
abspath = os.path.abspath(os.path.dirname(__file__))
if abspath not in sys.path:
sys.path.insert(0, abspath)
| 20 | 52 | 0.742857 |
79429855c3dfba52bc80a82e0494dfde0e7d082f | 5,047 | py | Python | tests/test_images_color.py | JParzival/mediafier | f5da82b6bdda4bb41638860cd133aeb332329db1 | [
"MIT"
] | 3 | 2021-06-02T16:15:26.000Z | 2021-06-06T22:07:05.000Z | tests/test_images_color.py | JParzival/mediafier | f5da82b6bdda4bb41638860cd133aeb332329db1 | [
"MIT"
] | 8 | 2021-06-02T17:08:33.000Z | 2021-06-06T16:32:50.000Z | tests/test_images_color.py | JParzival/mediafier | f5da82b6bdda4bb41638860cd133aeb332329db1 | [
"MIT"
] | null | null | null | import cv2
import os
from numpy.core.defchararray import find
from mediafier.image.color import modifyContrast, modifyBrightness, changeBGRColorspace, findMostCommonColor
SRC_IMG_DIR = os.path.join('test_media', 'imgs_src_test')
SAVE_IMG_DIR = os.path.join('test_media', 'imgs_result_test', 'color')
if not os.path.exists(SAVE_IMG_DIR):
os.makedirs(SAVE_IMG_DIR)
def test_image_color_contrast():
img = cv2.imread(os.path.join(SRC_IMG_DIR, 'test.png'))
"""Transform contrast default"""
params_default=[
{
'image': img,
'value': 1
},
{
'image': img,
'value': 3
},
{
'image': img,
'value': 0.5
},
{
'image': img,
'value': 0
},
]
for param in params_default:
cv2.imwrite(os.path.join(SAVE_IMG_DIR, f"img_contrast_{param['value']}_default.png"), modifyContrast(param['image'], param['value']))
"""Transform contrast CLAHE"""
params_clahe=[
{
'image': img,
'value': 1,
'method': 'CLAHE',
'name': 'img_contrast_1_CLAHE.png'
},
{
'image': img,
'value': 2,
'method': 'CLAHE',
'name': 'img_contrast_2_CLAHE.png'
},
{
'image': img,
'value': 0.5,
'method': 'CLAHE',
'name': 'img_contrast_05_CLAHE.png'
},
{
'image': img,
'value': 0,
'method': 'CLAHE',
'name': 'img_contrast_0_CLAHE.png'
},
]
for param in params_clahe:
cv2.imwrite(os.path.join(SAVE_IMG_DIR, f"img_contrast_{param['value']}_CLAHE.png"), modifyContrast(param['image'], param['value'], param['method']))
"""Failure example"""
#modifyContrast(img, "a")
#modifyContrast(img, 1, ':S')
#modifyContrast(img, -1)
def test_image_color_brightness():
img = cv2.imread(os.path.join(SRC_IMG_DIR, 'test.png'))
"""Transform brightness"""
params_default=[
{
'image': img,
'value': 1
},
{
'image': img,
'value': 1.5
},
{
'image': img,
'value': 2
},
{
'image': img,
'value': 0.5
},
{
'image': img,
'value': 0
},
]
for param in params_default:
cv2.imwrite(os.path.join(SAVE_IMG_DIR, f"img_brightness_{param['value']}.png"), modifyBrightness(param['image'], param['value']))
"""Failure example"""
#modifyBrightness(img, "a")
#modifyBrightness(img, 1, ':S')
#modifyBrightness(img, -1)
def test_image_color_colorspaceBGR():
img = cv2.imread(os.path.join(SRC_IMG_DIR, 'test.png'))
params = [
{
'image': img,
'to': 'gray'
},
{
'image': img,
'to': 'hsv'
},
{
'image': img,
'to': 'hls'
},
{
'image': img,
'to': 'lab'
},
{
'image': img,
'to': 'luv'
},
{
'image': img,
'to': 'yuv'
},
{
'image': img,
'to': 'rgb'
}
]
for param in params:
cv2.imwrite(os.path.join(SAVE_IMG_DIR, f"img_colorspaceBGR_{param['to']}.png"), changeBGRColorspace(param['image'], param['to']))
"""Failure example"""
#changeBGRColorspace(img, "a")
#changeBGRColorspace(img, 1)
def test_image_color_mostCommonColor():
img = cv2.imread(os.path.join(SRC_IMG_DIR, 'test.png'))
params = [
{
'image': img,
'method': 'average'
},
{
'image': img,
'method': 'frequency'
},
{
'image': img,
'method': 'kmeans',
'clusters': 3
},
{
'image': img,
'method': 'kmeans',
'clusters': 6
}
]
for param in params:
try:
a, b = findMostCommonColor(param['image'], param['method'], param['clusters'])
cv2.imwrite(os.path.join(SAVE_IMG_DIR, f"img_mostCommonColor_{param['method']}_{param['clusters']}.png"), b)
with open(os.path.join(SAVE_IMG_DIR, f"img_mostCommonColor_{param['method']}_{param['clusters']}.txt"), 'w') as txtfile:
txtfile.write(str(a))
except Exception as e:
a, b = findMostCommonColor(param['image'], param['method'])
cv2.imwrite(os.path.join(SAVE_IMG_DIR, f"img_mostCommonColor_{param['method']}.png"), b)
with open(os.path.join(SAVE_IMG_DIR, f"img_mostCommonColor_{param['method']}.txt"), 'w') as txtfile:
txtfile.write(str(a))
"""Failure example"""
#findMostCommonColor(img, "a")
#findMostCommonColor(img, "kmeans", -1) | 25.109453 | 156 | 0.490192 |
794298f90cedfe1848447994a942ea23ee2dc33d | 116 | py | Python | signalpings/urls.py | soratidus999/allianceauth-signal-pings | 127d301f9511171eeef7822bb7f3cc38c0d13d56 | [
"MIT"
] | 1 | 2020-09-19T11:59:33.000Z | 2020-09-19T11:59:33.000Z | signalpings/urls.py | soratidus999/allianceauth-signal-pings | 127d301f9511171eeef7822bb7f3cc38c0d13d56 | [
"MIT"
] | 12 | 2020-07-14T11:16:07.000Z | 2021-06-15T08:04:43.000Z | signalpings/urls.py | soratidus999/allianceauth-signal-pings | 127d301f9511171eeef7822bb7f3cc38c0d13d56 | [
"MIT"
] | 3 | 2020-09-19T11:59:42.000Z | 2021-08-12T04:27:59.000Z | from django.urls import path
from . import views
app_name = 'signalpings'
urlpatterns = [
]
# no urls admmin app | 11.6 | 28 | 0.724138 |
79429a1e3518d5017b81045b3f3039784e2efe17 | 3,717 | py | Python | ncclient/operations/retrieve.py | MrRagga-/ncclient | d7baa5c4df3119356c6ca239701c4c67c00bc533 | [
"Apache-2.0"
] | 1 | 2021-07-22T16:21:42.000Z | 2021-07-22T16:21:42.000Z | ncclient/operations/retrieve.py | MrRagga-/ncclient | d7baa5c4df3119356c6ca239701c4c67c00bc533 | [
"Apache-2.0"
] | null | null | null | ncclient/operations/retrieve.py | MrRagga-/ncclient | d7baa5c4df3119356c6ca239701c4c67c00bc533 | [
"Apache-2.0"
] | null | null | null | # Copyright 2009 Shikhar Bhushan
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rpc import RPC, RPCReply
from ncclient.xml_ import *
import util
class GetReply(RPCReply):
"""Adds attributes for the *data* element to `RPCReply`."""
def _parsing_hook(self, root):
self._data = None
if not self._errors:
self._data = root.find(qualify("data"))
@property
def data_ele(self):
"*data* element as an :class:`~xml.etree.ElementTree.Element`"
if not self._parsed:
self.parse()
return self._data
@property
def data_xml(self):
"*data* element as an XML string"
if not self._parsed:
self.parse()
return to_xml(self._data)
data = data_ele
"Same as :attr:`data_ele`"
class Get(RPC):
"The *get* RPC."
REPLY_CLS = GetReply
"See :class:`GetReply`."
def request(self, filter=None):
"""Retrieve running configuration and device state information.
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`
"""
node = new_ele("get")
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
class GetConfig(RPC):
"The *get-config* RPC."
REPLY_CLS = GetReply
"See :class:`GetReply`."
def request(self, source, filter=None):
"""Retrieve all or part of a specified configuration.
*source* name of the configuration datastore being queried
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`"""
node = new_ele("get-config")
node.append(util.datastore_or_url("source", source, self._assert))
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
class Dispatch(RPC):
"Generic retrieving wrapper"
REPLY_CLS = GetReply
"See :class:`GetReply`."
def request(self, rpc_command, source=None, filter=None):
"""
*rpc_command* specifies rpc command to be dispatched either in plain text or in xml element format (depending on command)
*source* name of the configuration datastore being queried
*filter* specifies the portion of the configuration to retrieve (by default entire configuration is retrieved)
:seealso: :ref:`filter_params`
Examples of usage::
dispatch('clear-arp-table')
or dispatch element like ::
xsd_fetch = new_ele('get-xnm-information')
sub_ele(xsd_fetch, 'type').text="xml-schema"
sub_ele(xsd_fetch, 'namespace').text="junos-configuration"
dispatch(xsd_fetch)
"""
if etree.iselement(rpc_command):
node = rpc_command
else:
node = new_ele(rpc_command)
if source is not None:
node.append(util.datastore_or_url("source", source, self._assert))
if filter is not None:
node.append(util.build_filter(filter))
return self._request(node)
| 29.039063 | 129 | 0.652677 |
79429b0ba7e1cef55e3c24d6b0ac0bb04ab52c35 | 55 | py | Python | syncio/__init__.py | davidbrochart/syncio | 6d7451ce83186ff3d1b28bd22bfb6374938858fe | [
"MIT"
] | 5 | 2021-10-18T08:15:24.000Z | 2021-11-02T06:28:33.000Z | syncio/__init__.py | davidbrochart/syncio | 6d7451ce83186ff3d1b28bd22bfb6374938858fe | [
"MIT"
] | null | null | null | syncio/__init__.py | davidbrochart/syncio | 6d7451ce83186ff3d1b28bd22bfb6374938858fe | [
"MIT"
] | null | null | null | from .syncio import sync, sleep
__version__ = "0.0.1"
| 13.75 | 31 | 0.709091 |
79429b3f555f9e204edc0947a8326dcf00a01f90 | 1,920 | py | Python | google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py | TheMichaelHu/python-aiplatform | e03f373a7e44c354eda88875a41c771f6d7e3ce1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py | TheMichaelHu/python-aiplatform | e03f373a7e44c354eda88875a41c771f6d7e3ce1 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform_v1beta1/types/lineage_subgraph.py | TheMichaelHu/python-aiplatform | e03f373a7e44c354eda88875a41c771f6d7e3ce1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import artifact
from google.cloud.aiplatform_v1beta1.types import event
from google.cloud.aiplatform_v1beta1.types import execution
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"LineageSubgraph",
},
)
class LineageSubgraph(proto.Message):
r"""A subgraph of the overall lineage graph. Event edges connect
Artifact and Execution nodes.
Attributes:
artifacts (Sequence[google.cloud.aiplatform_v1beta1.types.Artifact]):
The Artifact nodes in the subgraph.
executions (Sequence[google.cloud.aiplatform_v1beta1.types.Execution]):
The Execution nodes in the subgraph.
events (Sequence[google.cloud.aiplatform_v1beta1.types.Event]):
The Event edges between Artifacts and
Executions in the subgraph.
"""
artifacts = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=artifact.Artifact,
)
executions = proto.RepeatedField(
proto.MESSAGE,
number=2,
message=execution.Execution,
)
events = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=event.Event,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.47619 | 79 | 0.702083 |
79429b50d41e76c2e1909a244980ca0b9f70b48c | 9,099 | py | Python | pirates/leveleditor/worldData/SwampAreaA.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 3 | 2021-02-25T06:38:13.000Z | 2022-03-22T07:00:15.000Z | pirates/leveleditor/worldData/SwampAreaA.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | null | null | null | pirates/leveleditor/worldData/SwampAreaA.py | itsyaboyrocket/pirates | 6ca1e7d571c670b0d976f65e608235707b5737e3 | [
"BSD-3-Clause"
] | 1 | 2021-02-25T06:38:17.000Z | 2021-02-25T06:38:17.000Z | # uncompyle6 version 3.2.0
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.14 (v2.7.14:84471935ed, Sep 16 2017, 20:19:30) [MSC v.1500 32 bit (Intel)]
# Embedded file name: pirates.leveleditor.worldData.SwampAreaA
from pandac.PandaModules import Point3, VBase3
objectStruct = {'Objects': {'1152910301.05sdnaik': {'Type': 'Island Game Area', 'Name': 'SwampAreaA', 'File': '', 'Objects': {'1152839242.37jubutler': {'Type': 'Townsperson', 'Category': 'Gypsy', 'DNA': '1152839242.37jubutler', 'Hpr': VBase3(-162.596, 0.0, 0.0), 'Pos': Point3(-101.512, -18.535, 1.149), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Start State': 'Idle', 'Team': 'Villager'}, '1153424527.2sdnaik': {'Type': 'Locator Node', 'Name': 'portal_interior_1', 'Hpr': VBase3(-7.207, 0.0, 0.0), 'Pos': Point3(-228.544, -32.226, 9.648), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1153424527.2sdnaik0': {'Type': 'Locator Node', 'Name': 'portal_interior_2', 'Hpr': VBase3(172.793, 0.0, 0.0), 'Pos': Point3(350.771, 207.569, 9.648), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1153424659.83sdnaik': {'Type': 'Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Pos': Point3(-62.013, -40.396, -0.376), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skeleton', 'Team': '1'}, '1153424672.19sdnaik': {'Type': 'Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Pos': Point3(-41.646, -78.756, 0.006), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skeleton', 'Team': '1'}, '1153424709.64sdnaik': {'Type': 'Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Pos': Point3(207.369, 82.797, 13.753), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skeleton', 'Team': '1'}, '1153424723.64sdnaik': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(193.97, 103.772, 13.735), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1153424726.66sdnaik': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(215.256, 102.547, 13.754), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1153424744.5sdnaik': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(170.618, 73.537, 9.679), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1153424752.06sdnaik': {'Type': 'Movement Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(176.803, 63.891, 9.891), 'Scale': VBase3(1.0, 1.0, 1.0)}, '1153424892.06sdnaik': {'Type': 'Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Pos': Point3(264.33, 147.449, 0.414), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skeleton', 'Team': '1'}, '1154501246.82jubutler': {'Type': 'Spawn Node', 'Hpr': Point3(0.0, 0.0, 0.0), 'Min Population': '1', 'Pos': Point3(321.312, 179.83, -10.0), 'Scale': VBase3(1.0, 1.0, 1.0), 'Spawnables': 'Skeleton', 'Team': '1'}, '1154501366.24jubutler': {'Type': 'Tree', 'Hpr': VBase3(-4.84, 0.0, 0.0), 'Pos': Point3(291.544, 276.427, -11.094), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0), 'Model': 'models/vegetation/gen_tree_e'}}, '1154501485.11jubutler': {'Type': 'Tree', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(329.141, 278.075, -10.116), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0), 'Model': 'models/vegetation/gen_tree_b'}}, '1154501557.28jubutler': {'Type': 'Tree', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(184.73, 204.266, -13.826), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Color': (0.4000000059604645, 0.4000000059604645, 0.4000000059604645, 1.0), 'Model': 'models/vegetation/gen_tree_a'}}, '1154567223.28sdnaik': {'Type': 'Rock', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(895.16, 310.005, -498.915), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/props/zz_dont_use_rock_Dk_1F'}}, '1154567458.89sdnaik': {'Type': 'Tree', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(373.213, 215.386, -3.01), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/fern_tree_a'}}, '1154567518.56sdnaik': {'Type': 'Tree', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(368.763, 206.674, -3.01), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/fern_tree_a'}}, '1154567528.52sdnaik': {'Type': 'Tree', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(368.98, 212.788, -3.01), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/fern_tree_d'}}, '1154567543.67sdnaik': {'Type': 'Tree', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(371.757, 202.476, -3.01), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/fern_tree_c'}}, '1154567555.73sdnaik': {'Type': 'Tree', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(372.529, 198.822, -3.01), 'Scale': VBase3(1.0, 1.0, 1.0), 'Visual': {'Model': 'models/vegetation/fern_tree_b'}}, '1154577668.63jubutler': {'Type': 'Creature', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(228.037, 226.146, -10.0), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'FlyTrap', 'Start State': 'Idle'}, '1154577691.57jubutler': {'Type': 'Creature', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(191.92, 171.456, -3.009), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'FlyTrap', 'Start State': 'Idle'}, '1154577700.0jubutler': {'Type': 'Creature', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(132.196, 90.191, -3.009), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'FlyTrap', 'Start State': 'Idle'}, '1154577724.85jubutler': {'Type': 'Creature', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(86.804, -5.279, -3.009), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Species': 'FlyTrap', 'Start State': 'Idle'}, '1154633129.0jubutler': {'Type': 'NavySailor', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(25.57, -27.24, -10.0), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Start State': 'Walk'}, '1154633139.96jubutler': {'Type': 'NavySailor', 'Hpr': Point3(0.0, 0.0, 0.0), 'Pos': Point3(42.362, -25.277, -3.01), 'Respawns': True, 'Scale': VBase3(1.0, 1.0, 1.0), 'Start State': 'Walk'}}, 'Visual': {'Model': 'models/swamps/pir_m_are_swm_a'}}}, 'Node Links': [['1153424709.64sdnaik', '1153424726.66sdnaik', 'Bi-directional'], ['1153424709.64sdnaik', '1153424723.64sdnaik', 'Bi-directional'], ['1153424709.64sdnaik', '1153424744.5sdnaik', 'Bi-directional'], ['1153424726.66sdnaik', '1153424723.64sdnaik', 'Bi-directional'], ['1153424723.64sdnaik', '1153424752.06sdnaik', 'Bi-directional'], ['1153424744.5sdnaik', '1153424752.06sdnaik', 'Bi-directional']], 'Layers': {}, 'ObjectIds': {'1152839242.37jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1152839242.37jubutler"]', '1152910301.05sdnaik': '["Objects"]["1152910301.05sdnaik"]', '1153424527.2sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424527.2sdnaik"]', '1153424527.2sdnaik0': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424527.2sdnaik0"]', '1153424659.83sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424659.83sdnaik"]', '1153424672.19sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424672.19sdnaik"]', '1153424709.64sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424709.64sdnaik"]', '1153424723.64sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424723.64sdnaik"]', '1153424726.66sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424726.66sdnaik"]', '1153424744.5sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424744.5sdnaik"]', '1153424752.06sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424752.06sdnaik"]', '1153424892.06sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1153424892.06sdnaik"]', '1154501246.82jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154501246.82jubutler"]', '1154501366.24jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154501366.24jubutler"]', '1154501485.11jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154501485.11jubutler"]', '1154501557.28jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154501557.28jubutler"]', '1154567223.28sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154567223.28sdnaik"]', '1154567458.89sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154567458.89sdnaik"]', '1154567518.56sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154567518.56sdnaik"]', '1154567528.52sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154567528.52sdnaik"]', '1154567543.67sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154567543.67sdnaik"]', '1154567555.73sdnaik': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154567555.73sdnaik"]', '1154577668.63jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154577668.63jubutler"]', '1154577691.57jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154577691.57jubutler"]', '1154577700.0jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154577700.0jubutler"]', '1154577724.85jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154577724.85jubutler"]', '1154633129.0jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154633129.0jubutler"]', '1154633139.96jubutler': '["Objects"]["1152910301.05sdnaik"]["Objects"]["1154633139.96jubutler"]'}} | 1,516.5 | 8,827 | 0.652379 |
79429bcf1aeb56c9202ee557e67cec98109fd3cd | 4,135 | py | Python | paramak/parametric_components/poloidal_field_coil_case_fc.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | null | null | null | paramak/parametric_components/poloidal_field_coil_case_fc.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | null | null | null | paramak/parametric_components/poloidal_field_coil_case_fc.py | generein/paramak | cec738b278c285a17eaa69fc1f35ea4788204a8c | [
"MIT"
] | null | null | null | from typing import Optional, Tuple
from paramak import RotateStraightShape
class PoloidalFieldCoilCaseFC(RotateStraightShape):
"""Creates a casing for a rectangular poloidal field coil by building
around an existing coil (which is passed as an argument on construction).
Args:
pf_coil (paramak.PoloidalFieldCoil): a pf coil object with a set width,
height and center point.
casing_thickness (float): the thickness of the coil casing (cm).
"""
def __init__(
self,
pf_coil,
casing_thickness,
color: Tuple[float, float, float, Optional[float]] = (1.0, 1.0, 0.498),
**kwargs
):
super().__init__(color=color, **kwargs)
self.pf_coil = pf_coil
self.center_point = pf_coil.center_point
self.height = pf_coil.height
self.width = pf_coil.width
self.casing_thickness = casing_thickness
@property
def center_point(self):
return self._center_point
@center_point.setter
def center_point(self, value):
self._center_point = value
@property
def height(self):
return self._height
@height.setter
def height(self, height):
self._height = height
@property
def width(self):
return self._width
@width.setter
def width(self, width):
self._width = width
def find_points(self):
"""Finds the XZ points joined by straight connections that describe
the 2D profile of the poloidal field coil case shape."""
points = [
(
self.center_point[0] + self.width / 2.0,
self.center_point[1] + self.height / 2.0,
), # upper right
(
self.center_point[0] + self.width / 2.0,
self.center_point[1] - self.height / 2.0,
), # lower right
(
self.center_point[0] - self.width / 2.0,
self.center_point[1] - self.height / 2.0,
), # lower left
(
self.center_point[0] - self.width / 2.0,
self.center_point[1] + self.height / 2.0,
), # upper left
(
self.center_point[0] + self.width / 2.0,
self.center_point[1] + self.height / 2.0,
), # upper right
(
self.center_point[0] + (self.casing_thickness + self.width / 2.0),
self.center_point[1] + (self.casing_thickness + self.height / 2.0),
),
(
self.center_point[0] + (self.casing_thickness + self.width / 2.0),
self.center_point[1] - (self.casing_thickness + self.height / 2.0),
),
(
self.center_point[0] - (self.casing_thickness + self.width / 2.0),
self.center_point[1] - (self.casing_thickness + self.height / 2.0),
),
(
self.center_point[0] - (self.casing_thickness + self.width / 2.0),
self.center_point[1] + (self.casing_thickness + self.height / 2.0),
),
(
self.center_point[0] + (self.casing_thickness + self.width / 2.0),
self.center_point[1] + (self.casing_thickness + self.height / 2.0),
),
]
self.points = points
def create_solid(self):
# creates a small box that surrounds the geometry
inner_box = self.pf_coil
# creates a large box that surrounds the smaller box
outer_box = RotateStraightShape(
points=self.points[5:9],
rotation_axis=inner_box.rotation_axis,
rotation_angle=inner_box.rotation_angle,
azimuth_placement_angle=inner_box.azimuth_placement_angle,
workplane=inner_box.workplane,
cut=self.cut,
intersect=self.intersect,
union=self.union,
)
# subtracts the two boxes to leave a hollow box
new_shape = outer_box.solid.cut(inner_box.solid)
self.solid = new_shape
return new_shape
| 32.304688 | 83 | 0.561548 |
79429c3689c82a9d8ee2371c5c13aebcc653abca | 147 | py | Python | gitogether/__init__.py | nklapste/gitogether | 9190ea4a1a32e62f985b3694d3a1b949f1fb819c | [
"MIT"
] | null | null | null | gitogether/__init__.py | nklapste/gitogether | 9190ea4a1a32e62f985b3694d3a1b949f1fb819c | [
"MIT"
] | null | null | null | gitogether/__init__.py | nklapste/gitogether | 9190ea4a1a32e62f985b3694d3a1b949f1fb819c | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""gitogether
Scripts:
+ :mod:`.__main__` - argparse entry point
Module:
"""
__version__ = (0, 0, 0)
| 12.25 | 42 | 0.598639 |
79429c5fe9d35872f7b7dcd813152048010a21ef | 821 | py | Python | lib/taniumpy/object_types/cache_info.py | c1rdan/pytan | 5e537a6dcf4136e3b9c3905a39f073396e7f044f | [
"MIT"
] | 1 | 2019-01-29T21:22:06.000Z | 2019-01-29T21:22:06.000Z | lib/taniumpy/object_types/cache_info.py | c1rdan/pytan | 5e537a6dcf4136e3b9c3905a39f073396e7f044f | [
"MIT"
] | null | null | null | lib/taniumpy/object_types/cache_info.py | c1rdan/pytan | 5e537a6dcf4136e3b9c3905a39f073396e7f044f | [
"MIT"
] | null | null | null |
# Copyright (c) 2015 Tanium Inc
#
# Generated from console.wsdl version 0.0.1
#
#
from .base import BaseType
class CacheInfo(BaseType):
_soap_tag = 'cache_info'
def __init__(self):
BaseType.__init__(
self,
simple_properties={'cache_id': int,
'page_row_count': int,
'filtered_row_count': int,
'cache_row_count': int,
'expiration': str},
complex_properties={'errors': ErrorList},
list_properties={},
)
self.cache_id = None
self.page_row_count = None
self.filtered_row_count = None
self.cache_row_count = None
self.expiration = None
self.errors = None
from .error_list import ErrorList
| 22.805556 | 53 | 0.545676 |
79429e32af6661df8b341c63293326f69baab703 | 1,089 | py | Python | loss_fn/detection_loss_fns/__init__.py | KelOdgSmile/ml-cvnets | 503ec3b4ec187cfa0ed451d0f61de22f669b0081 | [
"AML"
] | 1 | 2021-12-20T09:25:18.000Z | 2021-12-20T09:25:18.000Z | loss_fn/detection_loss_fns/__init__.py | footh/ml-cvnets | d9064fe7e7a2d6a7a9817df936432856a0500a25 | [
"AML"
] | null | null | null | loss_fn/detection_loss_fns/__init__.py | footh/ml-cvnets | d9064fe7e7a2d6a7a9817df936432856a0500a25 | [
"AML"
] | null | null | null | #
# For licensing see accompanying LICENSE file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
import importlib
import os
SUPPORTED_DETECTION_LOSS_FNS = []
def register_detection_loss_fn(name):
def register_fn(fn):
if name in SUPPORTED_DETECTION_LOSS_FNS:
raise ValueError("Cannot register duplicate detection loss function ({})".format(name))
SUPPORTED_DETECTION_LOSS_FNS.append(name)
return fn
return register_fn
# automatically import different loss functions
loss_fn_dir = os.path.dirname(__file__)
for file in os.listdir(loss_fn_dir):
path = os.path.join(loss_fn_dir, file)
if (
not file.startswith("_")
and not file.startswith(".")
and (file.endswith(".py") or os.path.isdir(path))
):
model_name = file[: file.find(".py")] if file.endswith(".py") else file
module = importlib.import_module("loss_fn.detection_loss_fns." + model_name)
# import these after loading loss_fn names to avoid looping
from loss_fn.detection_loss_fns.ssd_multibox_loss import SSDLoss | 31.114286 | 99 | 0.706152 |
79429e5d78d7585785060aee96bc86d8d5629e84 | 8,606 | py | Python | util/DataBank.py | F-Stuckmann/PATARA | 26b5c821e356e33e949817a1475ef38e75880a03 | [
"MIT"
] | 2 | 2021-11-04T06:48:45.000Z | 2022-01-19T16:25:20.000Z | util/DataBank.py | F-Stuckmann/PATARA | 26b5c821e356e33e949817a1475ef38e75880a03 | [
"MIT"
] | 1 | 2021-05-05T09:05:22.000Z | 2021-05-05T09:05:22.000Z | util/DataBank.py | F-Stuckmann/PATARA | 26b5c821e356e33e949817a1475ef38e75880a03 | [
"MIT"
] | 2 | 2021-05-05T08:42:57.000Z | 2021-11-04T06:48:49.000Z | import copy
import csv
import os, glob
from typing import List
import xmltodict
import Constants
from Constants import *
from util.Instruction import Instruction
from util.Processor import Processor
from util.TestInstruction import TestInstruction
class DataBank:
class __Databank:
def __init__(self):
self._dict = {}
self._readImmediateAssembly()
self.read_xml()
self._read_comparison_xml()
self._read_init_register()
self._readPostInit()
n=3
def _isInstruction(self, instr):
if instr == COMPARISON:
return False
if instr == INSTRUCTION_DEFAULT_MODES:
return False
if instr == INIT:
return False
if instr == POST_INIT:
return False
if instr == IMMEDIATE_ASSEMBLY:
return False
if instr == CONDITIONAL_READ:
return False
return True
def __parseInstructions(self, dictInstruction):
result = []
instructions = dictInstruction
if not isinstance(dictInstruction, list):
instructions = [dictInstruction]
# instructions = [dictInstruction] if isinstance(dictInstruction, str) else dictInstruction
for instr in instructions:
i = Instruction(instr)
mandatoryFeatures = {IMMEDIATE: IMMEDIATE_LONG}
if isinstance(instr, dict):
if instr[MANDATORY_FEATURE]:
mandatoryFeatures = {}
for key, value in instr[MANDATORY_FEATURE].items():
mandatoryFeatures[key] = Processor().getProcessorFeatureList(key)[value]
i.setGlobalMandatoryFeatures(mandatoryFeatures)
result.append(i)
return result
def _readImmediateAssembly(self):
self.immediateAssembly = None
file = open(Constants.PATH_INSTRUCTION)
xml_content = xmltodict.parse(file.read())
for group in xml_content[INST_LIST]:
if group == IMMEDIATE_ASSEMBLY:
immidateVariants = xml_content[INST_LIST][IMMEDIATE_ASSEMBLY]
self.immediateAssembly = {}
for imm in immidateVariants:
assemlby = Processor().getProcessorFeatureAssembly(IMMEDIATE, imm)
self.immediateAssembly[assemlby] = {}
if INSTR in xml_content[INST_LIST][IMMEDIATE_ASSEMBLY][imm]:
instr = xml_content[INST_LIST][IMMEDIATE_ASSEMBLY][imm][INSTR]
instructions = self.__parseInstructions(instr)
self.immediateAssembly[assemlby][vMutable] = instructions
else:
for simd in xml_content[INST_LIST][IMMEDIATE_ASSEMBLY][imm]:
instr = xml_content[INST_LIST][IMMEDIATE_ASSEMBLY][imm][simd][INSTR]
instructions = self.__parseInstructions(instr)
self.immediateAssembly[assemlby][simd] = instructions
# instruction = [instr] if isinstance(instr, str) else instr
# for instr in instruction:
# i = Instruction_v1(instr)
# i.setGlobalMandatoryFeatures({IMMEDIATE: IMMEDIATE_LONG})
# self.immediateAssembly[assemlby].append(i)
n=3
def read_xml(self):
file = open(Constants.PATH_INSTRUCTION)
self.xml_content = xmltodict.parse(file.read())
self.testinstruction_list = []
self.instructionList = []
# generate Characteristics Dictionary about Instructions
for instr in self.xml_content[INST_LIST]:
if not self._isInstruction(instr):
continue
self._addDictEntry(instr)
self.instructionList.append(instr)
# generate TestInstructions
for instr in self.xml_content[INST_LIST]:
if not self._isInstruction(instr):
continue
temp = self.xml_content[INST_LIST][instr]
original = temp[INSTR]
original = [original] if isinstance(original, str) else original
reverse = self._extractReverseSIMD(temp)
specialization = {}
if SPECIALIZATION in self.xml_content[INST_LIST][instr]:
specialization = self.xml_content[INST_LIST][instr][SPECIALIZATION]
testInstruction = TestInstruction(original, reverse, instr, specialization, self.instructionList, copy.deepcopy(self.immediateAssembly))
self.testinstruction_list.append(testInstruction)
file.close()
def _addDictEntry(self, instr):
self._dict[instr] = copy.deepcopy(self.xml_content[INST_LIST][instr])
del self._dict[instr][INSTR]
del self._dict[instr][REVERSE]
def _extractReverseSIMD(self, temp):
reverse = temp[REVERSE]
reverseDict = {}
if isinstance(reverse, dict):
simdVersions = Processor().getSIMD()
for (simdKey, simdValue) in simdVersions.items():
if simdKey in reverse:
reverseDict[simdValue] = [reverse[simdKey][REVERSE]] if isinstance(reverse[simdKey][REVERSE],
str) else reverse[simdKey][
REVERSE]
else:
reverseList = [reverse] if isinstance(reverse, str) else reverse
reverseDict[vMutable] = reverseList
return reverseDict
def _read_comparison_xml(self) -> list:
"""Reads xml to parse the comparison code."""
file = open(Constants.PATH_INSTRUCTION)
parser = xmltodict.parse(file.read())
return_list = parser[INST_LIST][COMPARISON][INSTR]
self._comparisonCode = return_list
def _read_init_register(self) -> list:
file = open(Constants.PATH_INSTRUCTION)
parser = xmltodict.parse(file.read())
return_list = parser[INST_LIST][INIT][REGISTER]
# guarantee a list
if not isinstance(return_list, list):
return_list = [return_list]
self._listInitRegister = return_list
def _readPostInit(self):
file = open(Constants.PATH_INSTRUCTION)
parser = xmltodict.parse(file.read())
return_list = parser[INST_LIST][POST_INIT][INSTR]
self._listPostInit = return_list
instance = None
def __init__(self):
if not DataBank.instance:
DataBank.instance = DataBank.__Databank()
def printdatabase(self):
for k, v in self.instance._dict.items():
print("-------------- %s" % (k))
print(v)
print(Processor().getAvailableInstructionFeatures(k))
# find TestInstruction
for testInstr in self.instance.testinstruction_list:
if k == testInstr.getInstruction():
# print Instructions
for instruction in testInstr.getAssemblyInstructions():
print(instruction.getRawString())
print()
for simdVariant in testInstr.getReversiAssemblyInstructions():
print("SIMD: " ,simdVariant)
for instruction in testInstr.getReversiAssemblyInstructions()[simdVariant]:
print(instruction.getRawString())
print("\n\n")
def getTestInstructions(self) -> List[TestInstruction]:
return self.instance.testinstruction_list
def getComparisonCode(self):
return self.instance._comparisonCode
def getInitRegister(self):
return self.instance._listInitRegister
def getSpecificTestInstruction(self, testInstructionName):
for testInstruction in self.instance.testinstruction_list:
if testInstruction.getInstruction() == testInstructionName:
return testInstruction
return None
def getPostInitCode(self):
return self.instance._listPostInit
| 38.591928 | 152 | 0.570067 |
7942a064c81545418fd8a287ae3d24160109fc05 | 127 | py | Python | dosirak/middleware.py | drexly/tonginBlobStore | 28d355b63184ad168a1467bef8a58a1a871289d2 | [
"BSD-3-Clause"
] | null | null | null | dosirak/middleware.py | drexly/tonginBlobStore | 28d355b63184ad168a1467bef8a58a1a871289d2 | [
"BSD-3-Clause"
] | null | null | null | dosirak/middleware.py | drexly/tonginBlobStore | 28d355b63184ad168a1467bef8a58a1a871289d2 | [
"BSD-3-Clause"
] | 1 | 2020-11-04T08:42:35.000Z | 2020-11-04T08:42:35.000Z |
class DisableCSRF(object):
def process_request(self, request):
setattr(request, '_dont_enforce_csrf_checks', True) | 31.75 | 59 | 0.740157 |
7942a0b03b0b209a6077449fa4a20bf50a99c166 | 1,664 | py | Python | test/knn_test.py | haomingdouranggouqil/cuml | 4fbbc2bcf381f57333be99fa8490eccb3168b641 | [
"MIT"
] | null | null | null | test/knn_test.py | haomingdouranggouqil/cuml | 4fbbc2bcf381f57333be99fa8490eccb3168b641 | [
"MIT"
] | null | null | null | test/knn_test.py | haomingdouranggouqil/cuml | 4fbbc2bcf381f57333be99fa8490eccb3168b641 | [
"MIT"
] | null | null | null | from mlcu.ml.KNN import *
from sklearn import datasets #datasets模块
from sklearn.model_selection import train_test_split #分离训练集和测试集数据
from sklearn.neighbors import KNeighborsClassifier #k近邻分类器模块
from sklearn.neighbors import KNeighborsRegressor
import time
if __name__ == '__main__':
#测试数据
train_data = [[1, 1, 1], [2, 2, 2], [10, 10, 10], [13, 13, 13]]
#数字标签
train_label = [1, 2, 30, 60]
#非数字标签
#train_label = ['aa', 'aa', 'bb', 'bb']
#测试数据
test_data = [[3, 2, 4], [9, 13, 11], [10, 20, 10]]
#默认分类
knn = KNN()
#knn回归任务
#knn = KNN(task_type = 'regression')
#训练
knn.fit(train_data, train_label)
#预测
preds = knn.predict(test_data, k=2)
print(preds)
'''
#鸢尾花测试
#准备数据
loaded_data = datasets.load_iris() #加载鸢尾花数据
X = loaded_data.data #x有4个属性
y = loaded_data.target #y 有三类
'''
'''
#生成大数据测试
X = np.random.rand(100000,4).tolist()
y = np.random.randint(1,4,size=(100000)).tolist()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)#测试数据占30%
#在此数据量下,cuml模型运算12s,用了相同算法(暴力搜索)的sklearn运算了31s,numpy模型运算192s
'''
'''
#cuml
knn = KNN(task_type = 'regression')
knn.fit(X_train, y_train)
start_time = time.time()
preds = knn.predict(X_test)
end_time = time.time()
consum_time = end_time-start_time
print(consum_time)
'''
'''
knn = KNeighborsRegressor(algorithm = 'brute') #k近邻分离器
knn.fit(X_train, y_train) #fit学习函数
start_time = time.time()
knn.predict(X_test)
end_time = time.time()
consum_time = end_time-start_time
print(consum_time)
'''
| 25.6 | 85 | 0.623197 |
7942a0fc718c326f0ea186fd6b41a7110909afca | 9,087 | py | Python | records_mover/db/driver.py | cwegrzyn/records-mover | e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2 | [
"Apache-2.0"
] | 36 | 2020-03-17T11:56:51.000Z | 2022-01-19T16:03:32.000Z | records_mover/db/driver.py | cwegrzyn/records-mover | e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2 | [
"Apache-2.0"
] | 60 | 2020-03-02T23:13:29.000Z | 2021-05-19T15:05:42.000Z | records_mover/db/driver.py | cwegrzyn/records-mover | e3b71d6c09d99d0bcd6a956b9d09d20f8abe98d2 | [
"Apache-2.0"
] | 4 | 2020-08-11T13:17:37.000Z | 2021-11-05T21:11:52.000Z | from sqlalchemy.schema import CreateTable
from ..records.records_format import BaseRecordsFormat
from .loader import LoaderFromFileobj, LoaderFromRecordsDirectory
from .unloader import Unloader
import logging
import sqlalchemy
from sqlalchemy import MetaData
from sqlalchemy.schema import Table
from records_mover.db.quoting import quote_group_name, quote_user_name, quote_schema_and_table
from abc import ABCMeta, abstractmethod
from records_mover.records import RecordsSchema
from typing import Union, Dict, List, Tuple, Optional, TYPE_CHECKING
if TYPE_CHECKING:
from typing_extensions import Literal # noqa
logger = logging.getLogger(__name__)
class DBDriver(metaclass=ABCMeta):
def __init__(self,
db: Union[sqlalchemy.engine.Engine,
sqlalchemy.engine.Connection], **kwargs) -> None:
self.db = db
self.db_engine = db.engine
self.meta = MetaData()
def has_table(self, schema: str, table: str) -> bool:
return self.db.dialect.has_table(self.db, table_name=table, schema=schema)
def table(self,
schema: str,
table: str) -> Table:
return Table(table, self.meta, schema=schema, autoload=True, autoload_with=self.db_engine)
def schema_sql(self,
schema: str,
table: str) -> str:
"""Generate DDL which will recreate the specified table and return it
as a string. Returns None if database or permissions don't
support operation.
"""
# http://docs.sqlalchemy.org/en/latest/core/reflection.html
table_obj = self.table(schema, table)
return str(CreateTable(table_obj, bind=self.db))
def varchar_length_is_in_chars(self) -> bool:
"""True if the 'n' in VARCHAR(n) is represented in natural language
characters, rather than in post-encoding bytes. This varies by database -
override it to control"""
return False
def set_grant_permissions_for_groups(self, schema_name: str, table: str,
groups: Dict[str, List[str]],
db: Union[sqlalchemy.engine.Engine,
sqlalchemy.engine.Connection]) -> None:
schema_and_table: str = quote_schema_and_table(self.db.engine, schema_name, table)
for perm_type in groups:
groups_list = groups[perm_type]
for group in groups_list:
group_name: str = quote_group_name(self.db.engine, group)
if not perm_type.isalpha():
raise TypeError("Please make sure your permission types"
" are an acceptable value.")
perms_sql = f'GRANT {perm_type} ON TABLE {schema_and_table} TO {group_name}'
db.execute(perms_sql)
def set_grant_permissions_for_users(self, schema_name: str, table: str,
users: Dict[str, List[str]],
db: Union[sqlalchemy.engine.Engine,
sqlalchemy.engine.Connection]) -> None:
schema_and_table: str = quote_schema_and_table(self.db.engine, schema_name, table)
for perm_type in users:
user_list = users[perm_type]
for user in user_list:
user_name: str = quote_user_name(self.db.engine, user)
if not perm_type.isalpha():
raise TypeError("Please make sure your permission types"
" are an acceptable value.")
perms_sql = f'GRANT {perm_type} ON TABLE {schema_and_table} TO {user_name}'
db.execute(perms_sql)
def supports_time_type(self) -> bool:
return True
def type_for_date_plus_time(self, has_tz: bool=False) -> sqlalchemy.sql.sqltypes.DateTime:
"""Different DB vendors have different names for a date, a time, and
an optional timezone"""
return sqlalchemy.sql.sqltypes.DateTime(timezone=has_tz)
def type_for_integer(self,
min_value: Optional[int],
max_value: Optional[int]) -> sqlalchemy.types.TypeEngine:
"""Find correct integral column type to fit data matching the given
min and max integer values"""
logger.warning("Using default integer type")
return sqlalchemy.sql.sqltypes.Integer()
def type_for_fixed_point(self,
precision: int,
scale: int) -> sqlalchemy.sql.sqltypes.Numeric:
"""Find correct decimal column type to fit data from the given fixed point type"""
return sqlalchemy.sql.sqltypes.Numeric(precision=precision,
scale=scale)
@abstractmethod
def loader(self) -> Optional[LoaderFromRecordsDirectory]:
...
@abstractmethod
def loader_from_fileobj(self) -> Optional[LoaderFromFileobj]:
...
@abstractmethod
def unloader(self) -> Optional[Unloader]:
...
def type_for_floating_point(self,
fp_total_bits: int,
fp_significand_bits: int) -> sqlalchemy.sql.sqltypes.Numeric:
"""Find correct decimal column type to fit data from the given floating point type"""
# SQL spec (at least in the publically available pre-release
# 1992 version) declares the meaning of the precision in type
# "FLOAT(precision)" to be defined by the database vendor:
#
# 49)Subclause 6.1, "<data type>": The binary precision of a
# data type defined as FLOAT for each value specified by
# <precision> is implementation-defined.
#
# http://www.contrib.andrew.cmu.edu/~shadow/sql/sql1992.txt
#
# That said, at least Postgres, MySQL and ORacle agree that
# the 'precision' in their cases mean the significand_bits of
# either an IEEE or Intel-flavored float , so let's default to
# that:
#
# https://dev.mysql.com/doc/refman/8.0/en/floating-point-types.html#targetText=MySQL%20permits%20a%20nonstandard%20syntax,look%20like%20%2D999.9999%20when%20displayed.
# https://docs.oracle.com/javadb/10.8.3.0/ref/rrefsqlj27281.html
# https://www.postgresql.org/docs/10/datatype-numeric.html
return sqlalchemy.sql.sqltypes.Float(precision=fp_significand_bits)
def make_column_name_valid(self, colname: str) -> str:
return colname
def integer_limits(self,
type_: sqlalchemy.types.Integer) ->\
Optional[Tuple[int, int]]:
"""returns the integer limits (min and max value as a tuple) of the
given column type for this database type. These are
represented as a Python int, which is of arbitrary length.
Since SQLAlchemy doesn't track this detail, if details of this
database type aren't known in the DBDriver hierarchy, None
will be returned.
"""
logger.warning(f"{type(self)} does not know how to gather limits of {type(type_)}")
return None
def fp_constraints(self,
type_: sqlalchemy.types.Float) ->\
Optional[Tuple[int, int]]:
"""returns the floating point representation (total floating point bits minus padding
and significand bits as a tuple) of the given column type for
this database type.
Since SQLAlchemy doesn't track this detail, if details of this
database type aren't known in the DBDriver hierarchy, None
will be returned.
"""
logger.warning(f"{type(self)} does not know how to gather limits of {type(type_)}")
return None
def fixed_point_constraints(self,
type_: sqlalchemy.types.Numeric) ->\
Optional[Tuple[int, int]]:
"""returns the fixed-point representation - total number of digits
(precision) and number of those digits to the right of the
decimal point (scale) - as a tuple for the given column type
for this database type.
"""
if type_.precision is None or type_.scale is None:
return None
return (type_.precision, type_.scale)
def tweak_records_schema_for_load(self,
records_schema: RecordsSchema,
records_format: BaseRecordsFormat) -> RecordsSchema:
return records_schema
def tweak_records_schema_after_unload(self,
records_schema: RecordsSchema,
records_format: BaseRecordsFormat) -> RecordsSchema:
return records_schema
class GenericDBDriver(DBDriver):
def loader_from_fileobj(self) -> None:
return None
def loader(self) -> None:
return None
def unloader(self) -> Optional[Unloader]:
return None
| 44.11165 | 175 | 0.617586 |
7942a1cbefdf6550f59be223ea7864d115e4f3df | 2,989 | py | Python | find_all_sites.py | Pudit/FarewellSI126 | 91e92d869a6930797e8b05ab9a6920af901cde62 | [
"MIT"
] | null | null | null | find_all_sites.py | Pudit/FarewellSI126 | 91e92d869a6930797e8b05ab9a6920af901cde62 | [
"MIT"
] | null | null | null | find_all_sites.py | Pudit/FarewellSI126 | 91e92d869a6930797e8b05ab9a6920af901cde62 | [
"MIT"
] | null | null | null | #import libraries
from bs4 import BeautifulSoup
from urllib.request import urlopen
import urllib.error
import pandas as pd
#define func to find subfolder
def find_folder(student_id: int):
if student_id < 1 :
return None
elif student_id <= 50 :
return "001-050"
elif student_id <= 100 :
return "051-100"
elif student_id <= 150 :
return "101-150"
elif student_id <= 200 :
return "151-200"
elif student_id <= 250 :
return "201-250"
elif student_id <= 300 :
return "251-300"
elif student_id <= 326 :
return "301-326"
else:
return None
# define func to get url
def url_si(student_id):
return f"https://sites.google.com/view/seniorfarewell2021/mirror/{find_folder(i)}/{i:03d}"
# create blank list to collect url and HTTP response code
urllist = list()
checkerlist = list()
for i in range(326 + 1):
urllist.append(url_si(i))
urllist[0] = ""
#check that each person is exist or not
for i in range(327):
try:
urlopen(url_si(i))
except urllib.error.HTTPError as e:
checkerlist.append(404)
else:
checkerlist.append(200)
# finding name and real google doc path
namelist = list()
formlist = list()
for i in range(327):
if checkerlist[i] == 200:
bsObj = BeautifulSoup(urlopen(urllist[i]))
title = bsObj.find("h1").getText()
gform = bsObj.find_all("a", href=True)[-2]['href']
namelist.append(title)
formlist.append(gform)
else:
namelist.append("NotFound 404")
formlist.append("404 Not Found")
#Check GSX, send to my high-school classmates
#Because of duplicated nickname, plz check manually
is_gsx = [False] * 327 #0 to 326 people in SI126 code
is_gsx[11] = True # Max
is_gsx[12] = True # Film
is_gsx[23] = True # Pea
is_gsx[26] = True # Poom
is_gsx[28] = True # Win Sukrit
is_gsx[33] = True # Krit Kitty
is_gsx[37] = True # Ball
is_gsx[59] = True # Ji
is_gsx[61] = True # Tong
is_gsx[104] = True # Now
is_gsx[130] = True # Pond
is_gsx[139] = True # Thames
is_gsx[142] = True # Win Nawin
is_gsx[147] = True # Jan
is_gsx[164] = True # Mhee
is_gsx[185] = True # Jane Glasses
is_gsx[200] = True # Ana
is_gsx[209] = True # Jane Juice
is_gsx[232] = True # Fangpao
is_gsx[277] = True # Guggug
is_gsx[285] = True # Ken Whale
is_gsx[290] = True # Bell Tao
#create pandas dataframe from lists
si126_df = pd.DataFrame({
'url': urllist,
'formlink':formlist,
'title' : namelist,
'status': checkerlist,
"GSX" : is_gsx
})
#save dataframe to csv
si126_df.to_csv("si126_namelist.csv")
#cleaning some minor texts manually!, add some missing names, strip texts, do on text editors
#read csv file after cleaning some dirts
si126_df = pd.read_csv("si126_namelist.csv")
#find his/her nickname
si126_df["nickname"] = si126_df.title.str.split(" ",expand = True,n=1)[0]
#export to csv again
si126_df.to_csv("si126_namelist.csv")
| 24.104839 | 94 | 0.653061 |
7942a1e66c4f7555505395cbeac8d199485809d6 | 1,831 | py | Python | setup.py | thehomebrewnerd/featuretools | c704cea13e7ed5332712b053e80de360d54f1429 | [
"BSD-3-Clause"
] | 1 | 2019-05-31T04:40:03.000Z | 2019-05-31T04:40:03.000Z | setup.py | BillyOtieno/featuretools | 5a7e09edf02b463ad903c6d8c40daa86f208c0c0 | [
"BSD-3-Clause"
] | null | null | null | setup.py | BillyOtieno/featuretools | 5a7e09edf02b463ad903c6d8c40daa86f208c0c0 | [
"BSD-3-Clause"
] | null | null | null | from os import path
from setuptools import find_packages, setup
from setuptools.command.build_ext import build_ext as _build_ext
dirname = path.abspath(path.dirname(__file__))
with open(path.join(dirname, 'README.md')) as f:
long_description = f.read()
# Bootstrap numpy install
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
setup(
name='featuretools',
version='0.7.1',
packages=find_packages(),
description='a framework for automated feature engineering',
url='http://featuretools.com',
license='BSD 3-clause',
author='Feature Labs, Inc.',
author_email='[email protected]',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
],
install_requires=open('requirements.txt').readlines(),
setup_requires=open('setup-requirements.txt').readlines(),
python_requires='>=2.7, <4',
cmdclass={'build_ext': build_ext},
test_suite='featuretools/tests',
tests_require=open('test-requirements.txt').readlines(),
keywords='feature engineering data science machine learning',
include_package_data=True,
entry_points={
'console_scripts': [
'featuretools = featuretools.__main__:cli'
]
},
long_description=long_description,
long_description_content_type='text/markdown'
)
| 32.696429 | 71 | 0.676679 |
7942a26f6981b6ae3ec7be71fb36e552b3d4122c | 9,546 | py | Python | Packs/Whois/Integrations/Whois/Whois_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | null | null | null | Packs/Whois/Integrations/Whois/Whois_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | null | null | null | Packs/Whois/Integrations/Whois/Whois_test.py | diCagri/content | c532c50b213e6dddb8ae6a378d6d09198e08fc9f | [
"MIT"
] | null | null | null | import datetime
import Whois
import demistomock as demisto
import pytest
import subprocess
import time
import tempfile
import sys
from CommonServerPython import DBotScoreReliability
import json
INTEGRATION_NAME = 'Whois'
@pytest.fixture(autouse=True)
def handle_calling_context(mocker):
mocker.patch.object(demisto, 'callingContext', {'context': {'IntegrationBrand': INTEGRATION_NAME}})
def load_test_data(json_path):
with open(json_path) as f:
return json.load(f)
def assert_results_ok():
assert demisto.results.call_count == 1
# call_args is tuple (args list, kwargs). we only need the first one
results = demisto.results.call_args[0]
assert len(results) == 1
assert results[0] == 'ok'
def test_test_command(mocker):
mocker.patch.object(demisto, 'results')
mocker.patch.object(demisto, 'command', return_value='test-module')
Whois.main()
assert_results_ok()
@pytest.mark.parametrize(
'query,expected',
[("app.paloaltonetwork.com", "paloaltonetwork.com"),
("test.this.google.co.il", "google.co.il"),
("app.XSOAR.test", "app.XSOAR.test")]
)
def test_get_domain_from_query(query, expected):
from Whois import get_domain_from_query
assert get_domain_from_query(query) == expected
def test_socks_proxy_fail(mocker):
mocker.patch.object(demisto, 'params', return_value={'proxy_url': 'socks5://localhost:1180'})
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'results')
with pytest.raises(SystemExit) as err:
Whois.main()
assert err.type == SystemExit
assert demisto.results.call_count == 1
# call_args is tuple (args list, kwargs). we only need the first one
results = demisto.results.call_args[0]
assert len(results) == 1
assert "Couldn't connect with the socket-server" in results[0]['Contents']
def test_socks_proxy(mocker, request):
mocker.patch.object(demisto, 'params', return_value={'proxy_url': 'socks5h://localhost:9980'})
mocker.patch.object(demisto, 'command', return_value='test-module')
mocker.patch.object(demisto, 'results')
tmp = tempfile.TemporaryFile('w+')
microsocks = './test_data/microsocks_darwin' if 'darwin' in sys.platform else './test_data/microsocks'
process = subprocess.Popen([microsocks, "-p", "9980"], stderr=subprocess.STDOUT, stdout=tmp)
def cleanup():
process.kill()
request.addfinalizer(cleanup)
time.sleep(1)
Whois.main()
assert_results_ok()
tmp.seek(0)
assert 'connected to' in tmp.read() # make sure we went through microsocks
TEST_QUERY_RESULT_INPUT = [
(
{'contacts': {'admin': None, 'billing': None, 'registrant': None, 'tech': None},
'raw': ['NOT FOUND\n>>> Last update of WHOIS database: 2020-05-07T13:55:34Z <<<']},
'rsqupuo.info',
DBotScoreReliability.B,
False
),
(
{'contacts': {'admin': None, 'billing': None, 'registrant': None, 'tech': None},
'raw': ['No match for "BLABLA43213422342AS.COM".>>> Last update of whois database: 2020-05-20T08:39:17Z <<<']},
"BLABLA43213422342AS.COM",
DBotScoreReliability.B, False
),
(
{'status': ['clientUpdateProhibited (https://www.icann.org/epp#clientUpdateProhibited)'],
'updated_date': [datetime.datetime(2019, 9, 9, 8, 39, 4)],
'contacts': {'admin': {'country': 'US', 'state': 'CA', 'name': 'Google LLC'},
'tech': {'organization': 'Google LLC', 'state': 'CA', 'country': 'US'},
'registrant': {'organization': 'Google LLC', 'state': 'CA', 'country': 'US'}, 'billing': None},
'nameservers': ['ns1.google.com', 'ns4.google.com', 'ns3.google.com', 'ns2.google.com'],
'expiration_date': [datetime.datetime(2028, 9, 13, 0, 0), datetime.datetime(2028, 9, 13, 0, 0)],
'emails': ['[email protected]', '[email protected]'],
'raw': ['Domain Name: google.com\nRegistry Domain ID: 2138514_DOMAIN_COM-VRSN'],
'creation_date': [datetime.datetime(1997, 9, 15, 0, 0)], 'id': ['2138514_DOMAIN_COM-VRSN']},
'google.com',
DBotScoreReliability.B,
True
),
(
{'contacts': {'admin': None, 'billing': None, 'registrant': None, 'tech': None}},
'rsqupuo.info',
DBotScoreReliability.B,
False
),
(
{'status': ['clientUpdateProhibited (https://www.icann.org/epp#clientUpdateProhibited)'],
'updated_date': [datetime.datetime(2019, 9, 9, 8, 39, 4)],
'contacts': {'admin': {'country': 'US', 'state': 'CA', 'name': 'Google LLC'},
'tech': {'organization': 'Google LLC', 'state': 'CA', 'country': 'US'},
'registrant': {'organization': 'Google LLC', 'state': 'CA', 'country': 'US'}, 'billing': None},
'nameservers': ['ns1.google.com', 'ns4.google.com', 'ns3.google.com', 'ns2.google.com'],
'expiration_date': [datetime.datetime(2028, 9, 13, 0, 0), datetime.datetime(2028, 9, 13, 0, 0)],
'emails': ['[email protected]', '[email protected]'],
'raw': 'Domain Name: google.com\nRegistry Domain ID: 2138514_DOMAIN_COM-VRSN',
'creation_date': [datetime.datetime(1997, 9, 15, 0, 0)], 'id': ['2138514_DOMAIN_COM-VRSN']},
'google.com',
DBotScoreReliability.B,
True
),
(
{'status': ['clientUpdateProhibited (https://www.icann.org/epp#clientUpdateProhibited)'],
'updated_date': [datetime.datetime(2019, 9, 9, 8, 39, 4)],
'contacts': {'admin': {'country': 'US', 'state': 'CA', 'name': 'Google LLC'},
'tech': {'organization': 'Google LLC', 'state': 'CA', 'country': 'US'},
'registrant': {'organization': 'Google LLC', 'state': 'CA', 'country': 'US'}, 'billing': None},
'nameservers': ['ns1.google.com', 'ns4.google.com', 'ns3.google.com', 'ns2.google.com'],
'expiration_date': [datetime.datetime(2028, 9, 13, 0, 0), datetime.datetime(2028, 9, 13, 0, 0)],
'emails': ['[email protected]', '[email protected]'],
'raw': {'data': 'Domain Name: google.com\nRegistry Domain ID: 2138514_DOMAIN_COM-VRSN'},
'creation_date': [datetime.datetime(1997, 9, 15, 0, 0)], 'id': ['2138514_DOMAIN_COM-VRSN']},
'google.com',
DBotScoreReliability.B,
True
),
(
{'contacts': {'admin': None, 'billing': None, 'registrant': None, 'tech': None},
'raw': {'data': 'Domain Name: google.com\nRegistry Domain ID: 2138514_DOMAIN_COM-VRSN'}},
'rsqupuo.info',
DBotScoreReliability.B,
True
),
]
@pytest.mark.parametrize('whois_result, domain, reliability, expected', TEST_QUERY_RESULT_INPUT)
def test_query_result(whois_result, domain, reliability, expected):
from Whois import create_outputs
md, standard_ec, dbot_score = create_outputs(whois_result, domain, reliability)
assert standard_ec['Whois']['QueryResult'] == expected
assert dbot_score.get('DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor && '
'val.Type == obj.Type)').get('Reliability') == 'B - Usually reliable'
def test_ip_command(mocker):
"""
Given:
- IP addresses
When:
- running the IP command
Then:
- Verify the result is as expected
- Verify support list of IPs
"""
from Whois import ip_command
response = load_test_data('./test_data/ip_output.json')
mocker.patch.object(Whois, 'get_whois_ip', return_value=response)
result = ip_command(['4.4.4.4', '4.4.4.4'], DBotScoreReliability.B)
assert len(result) == 2
assert result[0].outputs_prefix == 'Whois.IP'
assert result[0].outputs.get('query') == '4.4.4.4'
assert result[0].indicator.to_context() == {
'IP(val.Address && val.Address == obj.Address)': {
'Organization': {'Name': u'LEVEL3, US'},
'FeedRelatedIndicators': [{'type': 'CIDR', 'description': None, 'value': u'4.4.0.0/16'}],
'Geo': {'Country': u'US'},
'ASN': u'3356',
'Address': '4.4.4.4'},
'DBotScore('
'val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor && val.Type == obj.Type)':
{'Reliability': 'B - Usually reliable',
'Vendor': 'Whois',
'Indicator': '4.4.4.4',
'Score': 0,
'Type': 'ip'}}
def test_get_whois_ip_proxy_param(mocker):
"""
Given:
- proxy address
When:
- running the get_whois_ip function
Then:
- Verify the function doesn't fail due to type errors
"""
from Whois import get_whois_ip
mocker.patch.object(demisto, 'params', return_value={"proxy": True})
result = get_whois_ip('1.1.1.1')
assert result
def test_indian_tld():
"""
Given:
- indian domain
When:
- running the get_root_server function
Then:
- Verify the function returns the correct Whois server
"""
from Whois import get_root_server
result = get_root_server("google.in")
assert result == "in.whois-servers.net"
def test_parse_raw_whois():
with open('test_data/EU domains.text', 'r') as f:
raw_data = f.read()
result = Whois.parse_raw_whois([raw_data], [], never_query_handles=False, handle_server='whois.eu')
assert result['registrar'] == ['IONOS SE']
| 39.122951 | 120 | 0.621726 |
7942a3d5b03bf5233ad4110585a526bf6162ad3f | 30,632 | py | Python | tests/plugins/test_resolve_remote_source.py | chmeliik/atomic-reactor | ca5a804dcb0aecf8b6bc0c8b16a84ac6847b4d15 | [
"BSD-3-Clause"
] | null | null | null | tests/plugins/test_resolve_remote_source.py | chmeliik/atomic-reactor | ca5a804dcb0aecf8b6bc0c8b16a84ac6847b4d15 | [
"BSD-3-Clause"
] | 16 | 2021-12-07T09:12:31.000Z | 2022-03-30T07:24:04.000Z | tests/plugins/test_resolve_remote_source.py | chmeliik/atomic-reactor | ca5a804dcb0aecf8b6bc0c8b16a84ac6847b4d15 | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2019-2022 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
import base64
import io
import sys
import tarfile
from pathlib import Path
from textwrap import dedent
from typing import Callable, Dict
from flexmock import flexmock
import pytest
import koji
import yaml
from atomic_reactor.dirs import BuildDir
import atomic_reactor.utils.koji as koji_util
from atomic_reactor.utils.cachito import CachitoAPI, CFG_TYPE_B64
from atomic_reactor.constants import (
CACHITO_ENV_ARG_ALIAS,
CACHITO_ENV_FILENAME,
PLUGIN_BUILD_ORCHESTRATE_KEY,
REMOTE_SOURCE_DIR,
REMOTE_SOURCE_TARBALL_FILENAME,
REMOTE_SOURCE_JSON_FILENAME,
)
from atomic_reactor.plugin import PreBuildPluginsRunner, PluginFailedException
from atomic_reactor.plugins.pre_resolve_remote_source import (
RemoteSource,
ResolveRemoteSourcePlugin,
)
from atomic_reactor.source import SourceConfig
from tests.stubs import StubSource
KOJI_HUB = 'http://koji.com/hub'
KOJI_TASK_ID = 123
KOJI_TASK_OWNER = 'spam'
CACHITO_URL = 'https://cachito.example.com'
CACHITO_REQUEST_ID = 98765
SECOND_CACHITO_REQUEST_ID = 98766
CACHITO_REQUEST_DOWNLOAD_URL = '{}/api/v1/{}/download'.format(CACHITO_URL, CACHITO_REQUEST_ID)
SECOND_CACHITO_REQUEST_DOWNLOAD_URL = '{}/api/v1/{}/download'.format(CACHITO_URL,
SECOND_CACHITO_REQUEST_ID)
CACHITO_REQUEST_CONFIG_URL = '{}/api/v1/requests/{}/configuration-files'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
SECOND_CACHITO_REQUEST_CONFIG_URL = '{}/api/v1/requests/{}/configuration-files'.format(
CACHITO_URL,
SECOND_CACHITO_REQUEST_ID
)
CACHITO_ICM_URL = '{}/api/v1/content-manifest?requests={}'.format(
CACHITO_URL,
CACHITO_REQUEST_ID
)
SECOND_CACHITO_ICM_URL = '{}/api/v1/content-manifest?requests={}'.format(
CACHITO_URL,
SECOND_CACHITO_REQUEST_ID
)
REMOTE_SOURCE_REPO = 'https://git.example.com/team/repo.git'
REMOTE_SOURCE_REF = 'b55c00f45ec3dfee0c766cea3d395d6e21cc2e5a'
REMOTE_SOURCE_PACKAGES = [
{
'name': 'test-package',
'type': 'npm',
'version': '0.0.1'
}
]
SECOND_REMOTE_SOURCE_REPO = 'https://git.example.com/other-team/other-repo.git'
SECOND_REMOTE_SOURCE_REF = 'd55c00f45ec3dfee0c766cea3d395d6e21cc2e5c'
CACHITO_SOURCE_REQUEST = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
'extra_cruft': 'ignored',
}
SECOND_CACHITO_SOURCE_REQUEST = {
'id': SECOND_CACHITO_REQUEST_ID,
'repo': SECOND_REMOTE_SOURCE_REPO,
'ref': SECOND_REMOTE_SOURCE_REF,
'environment_variables': {
'PIP_CERT': 'app/package-index-ca.pem',
'PIP_INDEX_URL': 'http://example-pip-index.url/stuff'
},
'flags': [],
'pkg_managers': ['pip'],
'dependencies': [
{
'name': 'click',
'type': 'pip',
'version': '5.0',
}
],
'packages': [
{
'name': 'osbs/cachito-pip-with-deps',
'type': 'pip',
'version': '1.0.0'
}
],
'configuration_files': SECOND_CACHITO_REQUEST_CONFIG_URL,
'content_manifest': SECOND_CACHITO_ICM_URL,
'extra_cruft': 'ignored',
}
REMOTE_SOURCE_JSON = {
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'environment_variables': {
'GO111MODULE': 'on',
'GOPATH': 'deps/gomod',
'GOCACHE': 'deps/gomod',
},
'flags': ['enable-confeti', 'enable-party-popper'],
'pkg_managers': ['gomod'],
'dependencies': [
{
'name': 'github.com/op/go-logging',
'type': 'gomod',
'version': 'v0.1.1',
}
],
'packages': [
{
'name': 'github.com/spam/bacon/v2',
'type': 'gomod',
'version': 'v2.0.3'
}
],
'configuration_files': CACHITO_REQUEST_CONFIG_URL,
'content_manifest': CACHITO_ICM_URL,
}
SECOND_REMOTE_SOURCE_JSON = {
'repo': SECOND_REMOTE_SOURCE_REPO,
'ref': SECOND_REMOTE_SOURCE_REF,
'environment_variables': {
'PIP_CERT': 'app/package-index-ca.pem',
'PIP_INDEX_URL': 'http://example-pip-index.url/stuff'
},
'flags': [],
'pkg_managers': ['pip'],
'dependencies': [
{
'name': 'click',
'type': 'pip',
'version': '5.0',
}
],
'packages': [
{
'name': 'osbs/cachito-pip-with-deps',
'type': 'pip',
'version': '1.0.0'
}
],
'configuration_files': SECOND_CACHITO_REQUEST_CONFIG_URL,
'content_manifest': SECOND_CACHITO_ICM_URL,
}
CACHITO_ENV_VARS_JSON = {
'GO111MODULE': {'kind': 'literal', 'value': 'on'},
'GOPATH': {'kind': 'path', 'value': 'deps/gomod'},
'GOCACHE': {'kind': 'path', 'value': 'deps/gomod'},
}
# Assert this with the CACHITO_ENV_VARS_JSON
CACHITO_BUILD_ARGS = {
'GO111MODULE': 'on',
'GOPATH': '/remote-source/deps/gomod',
'GOCACHE': '/remote-source/deps/gomod',
}
SECOND_CACHITO_ENV_VARS_JSON = {
'PIP_CERT': {'kind': 'path', 'value': 'app/package-index-ca.pem'},
'PIP_INDEX_URL': {'kind': 'literal', 'value': 'http://example-pip-index.url/stuff'},
}
# The response from CACHITO_REQUEST_CONFIG_URL
CACHITO_CONFIG_FILES = [
{
"path": "app/some-config.txt",
"type": CFG_TYPE_B64,
"content": base64.b64encode(b"gomod requests don't actually have configs").decode(),
},
]
# The response from SECOND_CACHITO_REQUEST_CONFIG_URL
SECOND_CACHITO_CONFIG_FILES = [
{
"path": "app/package-index-ca.pem",
"type": CFG_TYPE_B64,
"content": base64.b64encode(b"-----BEGIN CERTIFICATE-----").decode(),
},
]
def mock_reactor_config(workflow, data=None):
if data is None:
data = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
workflow._tmpdir.joinpath('cert').touch()
config = yaml.safe_load(data)
workflow.conf.conf = config
def mock_user_params(workflow, user_params):
if not workflow.user_params:
workflow.user_params = user_params
else:
workflow.user_params.update(user_params)
def mock_repo_config(workflow, data=None):
if data is None:
data = dedent("""\
remote_source:
repo: {}
ref: {}
""".format(REMOTE_SOURCE_REPO, REMOTE_SOURCE_REF))
workflow._tmpdir.joinpath('container.yaml').write_text(data, "utf-8")
# The repo config is read when SourceConfig is initialized. Force
# reloading here to make usage easier.
workflow.source.config = SourceConfig(str(workflow._tmpdir))
@pytest.fixture
def workflow(workflow, source_dir):
# Stash the tmpdir in workflow so it can be used later
workflow._tmpdir = source_dir
class MockSource(StubSource):
def __init__(self, workdir):
super(MockSource, self).__init__()
self.workdir = workdir
self.path = workdir
workflow.source = MockSource(str(source_dir))
workflow.buildstep_plugins_conf = [{'name': PLUGIN_BUILD_ORCHESTRATE_KEY}]
workflow.user_params = {'koji_task_id': KOJI_TASK_ID}
mock_repo_config(workflow)
mock_reactor_config(workflow)
mock_koji()
workflow.build_dir.init_build_dirs(["x86_64", "ppc64le"], workflow.source)
return workflow
def expected_build_dir(workflow) -> str:
"""The primary build_dir that the plugin is expected to work with."""
return str(workflow.build_dir.any_platform.path)
def expected_dowload_path(workflow, remote_source_name=None) -> str:
if remote_source_name:
filename = f'remote-source-{remote_source_name}.tar.gz'
else:
filename = 'remote-source.tar.gz'
path = Path(expected_build_dir(workflow), filename)
return str(path)
def mock_cachito_tarball(create_at_path) -> str:
"""Create a mocked tarball for a remote source at the specified path."""
create_at_path = Path(create_at_path)
file_content = f"Content of {create_at_path.name}".encode("utf-8")
readme = tarfile.TarInfo("app/README.txt")
readme.size = len(file_content)
with tarfile.open(create_at_path, 'w:gz') as tf:
tf.addfile(readme, io.BytesIO(file_content))
return str(create_at_path)
def mock_cachito_api_multiple_remote_sources(workflow, user=KOJI_TASK_OWNER):
(
flexmock(CachitoAPI)
.should_receive("request_sources")
.with_args(
repo=REMOTE_SOURCE_REPO,
ref=REMOTE_SOURCE_REF,
user=user,
dependency_replacements=None,
)
.and_return({"id": CACHITO_REQUEST_ID})
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("request_sources")
.with_args(
repo=SECOND_REMOTE_SOURCE_REPO,
ref=SECOND_REMOTE_SOURCE_REF,
user=user,
dependency_replacements=None,
)
.and_return({"id": SECOND_CACHITO_REQUEST_ID})
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("wait_for_request")
.with_args({"id": CACHITO_REQUEST_ID})
.and_return(CACHITO_SOURCE_REQUEST)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("wait_for_request")
.with_args({"id": SECOND_CACHITO_REQUEST_ID})
.and_return(SECOND_CACHITO_SOURCE_REQUEST)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("download_sources")
.with_args(
CACHITO_SOURCE_REQUEST,
dest_dir=expected_build_dir(workflow),
dest_filename="remote-source-gomod.tar.gz",
)
.and_return(mock_cachito_tarball(expected_dowload_path(workflow, "gomod")))
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("get_request_env_vars")
.with_args(CACHITO_SOURCE_REQUEST["id"])
.and_return(CACHITO_ENV_VARS_JSON)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("download_sources")
.with_args(
SECOND_CACHITO_SOURCE_REQUEST,
dest_dir=expected_build_dir(workflow),
dest_filename="remote-source-pip.tar.gz",
)
.and_return(mock_cachito_tarball(expected_dowload_path(workflow, "pip")))
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("get_request_env_vars")
.with_args(SECOND_CACHITO_SOURCE_REQUEST["id"])
.and_return(SECOND_CACHITO_ENV_VARS_JSON)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("get_request_config_files")
.with_args(CACHITO_SOURCE_REQUEST["id"])
.and_return(CACHITO_CONFIG_FILES)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("get_request_config_files")
.with_args(SECOND_CACHITO_SOURCE_REQUEST["id"])
.and_return(SECOND_CACHITO_CONFIG_FILES)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("assemble_download_url")
.with_args(CACHITO_REQUEST_ID)
.and_return(CACHITO_REQUEST_DOWNLOAD_URL)
.ordered()
)
(
flexmock(CachitoAPI)
.should_receive("assemble_download_url")
.with_args(SECOND_CACHITO_REQUEST_ID)
.and_return(SECOND_CACHITO_REQUEST_DOWNLOAD_URL)
.ordered()
)
def mock_cachito_api(workflow, user=KOJI_TASK_OWNER, source_request=None,
dependency_replacements=None,
env_vars_json=None):
if source_request is None:
source_request = CACHITO_SOURCE_REQUEST
(flexmock(CachitoAPI)
.should_receive('request_sources')
.with_args(
repo=REMOTE_SOURCE_REPO,
ref=REMOTE_SOURCE_REF,
user=user,
dependency_replacements=dependency_replacements,
)
.and_return({'id': CACHITO_REQUEST_ID}))
(flexmock(CachitoAPI)
.should_receive('wait_for_request')
.with_args({'id': CACHITO_REQUEST_ID})
.and_return(source_request))
(flexmock(CachitoAPI)
.should_receive('download_sources')
.with_args(source_request, dest_dir=expected_build_dir(workflow),
dest_filename=REMOTE_SOURCE_TARBALL_FILENAME)
.and_return(mock_cachito_tarball(expected_dowload_path(workflow))))
(flexmock(CachitoAPI)
.should_receive('assemble_download_url')
.with_args(CACHITO_REQUEST_ID)
.and_return(CACHITO_REQUEST_DOWNLOAD_URL))
(flexmock(CachitoAPI)
.should_receive('get_request_env_vars')
.with_args(source_request['id'])
.and_return(env_vars_json or CACHITO_ENV_VARS_JSON))
(flexmock(CachitoAPI)
.should_receive('get_request_config_files')
.with_args(source_request['id'])
.and_return(CACHITO_CONFIG_FILES))
def mock_koji(user=KOJI_TASK_OWNER):
koji_session = flexmock(krb_login=lambda: 'some')
flexmock(koji, ClientSession=lambda hub, opts: koji_session)
flexmock(koji_util).should_receive('get_koji_task_owner').and_return({'name': user})
def check_injected_files(expected_files: Dict[str, str]) -> Callable[[BuildDir], None]:
"""Make a callable that checks expected files in a BuildDir."""
def check_files(build_dir: BuildDir) -> None:
"""Check the presence and content of files in the unpacked_remote_sources directory."""
unpacked_remote_sources = build_dir.path / ResolveRemoteSourcePlugin.REMOTE_SOURCE
for path, expected_content in expected_files.items():
abspath = unpacked_remote_sources / path
assert abspath.read_text() == expected_content
return check_files
def setup_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
def teardown_function(*args):
# IMPORTANT: This needs to be done to ensure mocks at the module
# level are reset between test cases.
sys.modules.pop('pre_resolve_remote_source', None)
def test_source_request_to_json_missing_optional_keys(workflow):
p = ResolveRemoteSourcePlugin(workflow)
source_request = {
"repo": REMOTE_SOURCE_REPO,
"ref": REMOTE_SOURCE_REF,
"packages": [],
}
# test that missing optional keys are ignored as expected
assert p.source_request_to_json(source_request) == source_request
@pytest.mark.parametrize('scratch', (True, False))
@pytest.mark.parametrize('dr_strs, dependency_replacements',
((None, None),
(['gomod:foo.bar/project:2'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'version': '2'}]),
(['gomod:foo.bar/project:2:newproject'],
[{
'name': 'foo.bar/project',
'type': 'gomod',
'new_name': 'newproject',
'version': '2'}]),
(['gomod:foo.bar/project'], None)))
def test_resolve_remote_source(workflow, scratch, dr_strs, dependency_replacements):
mock_cachito_api(workflow, dependency_replacements=dependency_replacements)
workflow.user_params['scratch'] = scratch
err = None
if dr_strs and not scratch:
err = 'Cachito dependency replacements are only allowed for scratch builds'
if dr_strs and any(len(dr.split(':')) < 3 for dr in dr_strs):
err = 'Cachito dependency replacements must be'
expected_plugin_results = [
{
"name": None,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": REMOTE_SOURCE_JSON_FILENAME,
},
"remote_source_tarball": {
"filename": REMOTE_SOURCE_TARBALL_FILENAME,
"path": expected_dowload_path(workflow),
},
},
]
run_plugin_with_args(
workflow,
dependency_replacements=dr_strs,
expect_error=err,
expected_plugin_results=expected_plugin_results,
)
if err:
return
cachito_env_content = dedent(
"""\
#!/bin/bash
export GO111MODULE=on
export GOPATH=/remote-source/deps/gomod
export GOCACHE=/remote-source/deps/gomod
"""
)
workflow.build_dir.for_each_platform(
check_injected_files(
{
"cachito.env": cachito_env_content,
"app/README.txt": "Content of remote-source.tar.gz",
"app/some-config.txt": "gomod requests don't actually have configs",
},
)
)
assert workflow.buildargs == {
**CACHITO_BUILD_ARGS,
"REMOTE_SOURCE": ResolveRemoteSourcePlugin.REMOTE_SOURCE,
"REMOTE_SOURCE_DIR": REMOTE_SOURCE_DIR,
CACHITO_ENV_ARG_ALIAS: str(Path(REMOTE_SOURCE_DIR, CACHITO_ENV_FILENAME)),
}
# https://github.com/openshift/imagebuilder/issues/139
assert not workflow.buildargs["REMOTE_SOURCE"].startswith("/")
@pytest.mark.parametrize(
'env_vars_json',
[
{
'GOPATH': {'kind': 'path', 'value': 'deps/gomod'},
'GOCACHE': {'kind': 'path', 'value': 'deps/gomod'},
'GO111MODULE': {'kind': 'literal', 'value': 'on'},
'GOX': {'kind': 'new', 'value': 'new-kind'},
},
]
)
def test_fail_build_if_unknown_kind(workflow, env_vars_json):
mock_cachito_api(workflow, env_vars_json=env_vars_json)
run_plugin_with_args(workflow, expect_error=r'.*Unknown kind new got from Cachito')
def test_no_koji_user(workflow, caplog):
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, workflow._tmpdir))
mock_reactor_config(workflow, reactor_config)
mock_cachito_api(workflow, user='unknown_user')
workflow.user_params['koji_task_id'] = 'x'
log_msg = 'Invalid Koji task ID'
expected_plugin_results = [
{
"name": None,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": REMOTE_SOURCE_JSON_FILENAME,
},
"remote_source_tarball": {
"filename": REMOTE_SOURCE_TARBALL_FILENAME,
"path": expected_dowload_path(workflow),
},
},
]
run_plugin_with_args(workflow, expected_plugin_results=expected_plugin_results)
assert log_msg in caplog.text
@pytest.mark.parametrize('pop_key', ('repo', 'ref', 'packages'))
def test_invalid_remote_source_structure(workflow, pop_key):
source_request = {
'id': CACHITO_REQUEST_ID,
'repo': REMOTE_SOURCE_REPO,
'ref': REMOTE_SOURCE_REF,
'packages': REMOTE_SOURCE_PACKAGES,
}
source_request.pop(pop_key)
mock_cachito_api(workflow, source_request=source_request)
run_plugin_with_args(workflow, expect_error='Received invalid source request')
def test_fail_when_missing_cachito_config(workflow):
reactor_config = dedent("""\
version: 1
koji:
hub_url: /
root_url: ''
auth: {}
""")
mock_reactor_config(workflow, reactor_config)
with pytest.raises(PluginFailedException) as exc:
run_plugin_with_args(workflow, expect_result=False)
assert 'No Cachito configuration defined' in str(exc.value)
def test_invalid_cert_reference(workflow):
bad_certs_dir = str(workflow._tmpdir / 'invalid-dir')
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
""".format(CACHITO_URL, bad_certs_dir))
mock_reactor_config(workflow, reactor_config)
run_plugin_with_args(workflow, expect_error="Cachito ssl_certs_dir doesn't exist")
def test_ignore_when_missing_remote_source_config(workflow):
remote_source_config = dedent("""---""")
mock_repo_config(workflow, remote_source_config)
result = run_plugin_with_args(workflow, expect_result=False)
assert result is None
@pytest.mark.parametrize(('task_id', 'log_entry'), (
(None, 'Invalid Koji task ID'),
('not-an-int', 'Invalid Koji task ID'),
))
def test_bad_build_metadata(workflow, task_id, log_entry, caplog):
workflow.user_params['koji_task_id'] = task_id
mock_cachito_api(workflow, user='unknown_user')
expected_plugin_results = [
{
"name": None,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": REMOTE_SOURCE_JSON_FILENAME,
},
"remote_source_tarball": {
"filename": REMOTE_SOURCE_TARBALL_FILENAME,
"path": expected_dowload_path(workflow),
},
},
]
run_plugin_with_args(workflow, expected_plugin_results=expected_plugin_results)
assert log_entry in caplog.text
assert 'unknown_user' in caplog.text
@pytest.mark.parametrize('allow_multiple_remote_sources', [True, False])
def test_allow_multiple_remote_sources(workflow, allow_multiple_remote_sources):
first_remote_source_name = 'gomod'
first_remote_tarball_filename = 'remote-source-gomod.tar.gz'
first_remote_json_filename = 'remote-source-gomod.json'
second_remote_source_name = 'pip'
second_remote_tarball_filename = 'remote-source-pip.tar.gz'
second_remote_json_filename = 'remote-source-pip.json'
container_yaml_config = dedent(
"""\
remote_sources:
- name: {}
remote_source:
repo: {}
ref: {}
- name: {}
remote_source:
repo: {}
ref: {}
"""
).format(
first_remote_source_name,
REMOTE_SOURCE_REPO,
REMOTE_SOURCE_REF,
second_remote_source_name,
SECOND_REMOTE_SOURCE_REPO,
SECOND_REMOTE_SOURCE_REF,
)
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
allow_multiple_remote_sources: {}
""".format(CACHITO_URL, workflow._tmpdir, allow_multiple_remote_sources))
mock_repo_config(workflow, data=container_yaml_config)
mock_reactor_config(workflow, reactor_config)
mock_cachito_api_multiple_remote_sources(workflow)
if not allow_multiple_remote_sources:
err_msg = (
"Multiple remote sources are not enabled, "
"use single remote source in container.yaml"
)
result = run_plugin_with_args(workflow, expect_result=False, expect_error=err_msg)
assert result is None
else:
expected_plugin_results = [
{
"name": first_remote_source_name,
"url": CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": REMOTE_SOURCE_JSON,
"filename": first_remote_json_filename,
},
"remote_source_tarball": {
"filename": first_remote_tarball_filename,
"path": expected_dowload_path(workflow, "gomod"),
},
},
{
"name": second_remote_source_name,
"url": SECOND_CACHITO_REQUEST_DOWNLOAD_URL,
"remote_source_json": {
"json": SECOND_REMOTE_SOURCE_JSON,
"filename": second_remote_json_filename,
},
"remote_source_tarball": {
"filename": second_remote_tarball_filename,
"path": expected_dowload_path(workflow, "pip"),
},
},
]
run_plugin_with_args(workflow, expected_plugin_results=expected_plugin_results)
first_cachito_env = dedent(
"""\
#!/bin/bash
export GO111MODULE=on
export GOPATH=/remote-source/gomod/deps/gomod
export GOCACHE=/remote-source/gomod/deps/gomod
"""
)
second_cachito_env = dedent(
"""\
#!/bin/bash
export PIP_CERT=/remote-source/pip/app/package-index-ca.pem
export PIP_INDEX_URL=http://example-pip-index.url/stuff
"""
)
workflow.build_dir.for_each_platform(
check_injected_files(
{
"gomod/cachito.env": first_cachito_env,
"gomod/app/README.txt": "Content of remote-source-gomod.tar.gz",
"gomod/app/some-config.txt": "gomod requests don't actually have configs",
"pip/cachito.env": second_cachito_env,
"pip/app/README.txt": "Content of remote-source-pip.tar.gz",
"pip/app/package-index-ca.pem": "-----BEGIN CERTIFICATE-----",
},
)
)
assert workflow.buildargs == {
"REMOTE_SOURCES": ResolveRemoteSourcePlugin.REMOTE_SOURCE,
"REMOTE_SOURCES_DIR": REMOTE_SOURCE_DIR,
}
# https://github.com/openshift/imagebuilder/issues/139
assert not workflow.buildargs["REMOTE_SOURCES"].startswith("/")
def test_multiple_remote_sources_non_unique_names(workflow):
container_yaml_config = dedent("""\
remote_sources:
- name: same
remote_source:
repo: https://git.example.com/team/repo.git
ref: a55c00f45ec3dfee0c766cea3d395d6e21cc2e5a
- name: same
remote_source:
repo: https://git.example.com/team/repo.git
ref: a55c00f45ec3dfee0c766cea3d395d6e21cc2e5a
- name: bit-different
remote_source:
repo: https://git.example.com/team/repo.git
ref: a55c00f45ec3dfee0c766cea3d395d6e21cc2e5a
""")
reactor_config = dedent("""\
version: 1
cachito:
api_url: {}
auth:
ssl_certs_dir: {}
koji:
hub_url: /
root_url: ''
auth: {{}}
allow_multiple_remote_sources: True
""".format(CACHITO_URL, workflow._tmpdir))
mock_repo_config(workflow, data=container_yaml_config)
mock_reactor_config(workflow, reactor_config)
err_msg = (
r"Provided remote sources parameters contain non unique names: \['same'\]"
)
result = run_plugin_with_args(workflow, expect_result=False, expect_error=err_msg)
assert result is None
def run_plugin_with_args(workflow, dependency_replacements=None, expect_error=None,
expect_result=True, expected_plugin_results=None):
runner = PreBuildPluginsRunner(
workflow,
[
{
"name": ResolveRemoteSourcePlugin.key,
"args": {"dependency_replacements": dependency_replacements},
},
],
)
if expect_error:
with pytest.raises(PluginFailedException, match=expect_error):
runner.run()
return
results = runner.run()[ResolveRemoteSourcePlugin.key]
if expect_result:
assert results == expected_plugin_results
return results
def test_inject_remote_sources_dest_already_exists(workflow):
plugin = ResolveRemoteSourcePlugin(workflow)
processed_remote_sources = [
RemoteSource(
id=CACHITO_REQUEST_ID,
name=None,
json_data={},
build_args={},
tarball_path=Path("/does/not/matter"),
),
]
builddir_path = Path(expected_build_dir(workflow))
builddir_path.joinpath(ResolveRemoteSourcePlugin.REMOTE_SOURCE).mkdir()
err_msg = "Conflicting path unpacked_remote_sources already exists"
with pytest.raises(RuntimeError, match=err_msg):
plugin.inject_remote_sources(processed_remote_sources)
def test_generate_cachito_env_file_shell_quoting(workflow):
plugin = ResolveRemoteSourcePlugin(workflow)
dest_dir = Path(expected_build_dir(workflow))
plugin.generate_cachito_env_file(dest_dir, {"foo": "somefile; rm -rf ~"})
cachito_env = dest_dir / "cachito.env"
assert cachito_env.read_text() == dedent(
"""\
#!/bin/bash
export foo='somefile; rm -rf ~'
"""
)
def test_generate_cachito_config_files_unknown_type(workflow):
plugin = ResolveRemoteSourcePlugin(workflow)
dest_dir = Path(expected_build_dir(workflow))
cfg_files = [{"path": "foo", "type": "unknown", "content": "does not matter"}]
with pytest.raises(ValueError, match="Unknown cachito configuration file data type 'unknown'"):
plugin.generate_cachito_config_files(dest_dir, cfg_files)
| 31.775934 | 99 | 0.614488 |
7942a48d10c6b50997b0302eb21dfad6d431e0aa | 16,351 | py | Python | train_mask_rcnn.py | maktu6/garment_segmentation | 8a010fa1087ba6d0c9e77299ab13a9266750d0c1 | [
"MIT"
] | 2 | 2019-12-20T07:36:16.000Z | 2020-12-31T23:40:05.000Z | train_mask_rcnn.py | maktu6/garment_segmentation | 8a010fa1087ba6d0c9e77299ab13a9266750d0c1 | [
"MIT"
] | null | null | null | train_mask_rcnn.py | maktu6/garment_segmentation | 8a010fa1087ba6d0c9e77299ab13a9266750d0c1 | [
"MIT"
] | null | null | null | """Train Mask RCNN end to end."""
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import logging
import time
import numpy as np
import cv2
import mxnet as mx
from mxnet import gluon
from mxnet import autograd
import gluoncv as gcv
from gluoncv import data as gdata
from gluoncv import utils as gutils
from gluoncv.model_zoo import get_model
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import MaskRCNNDefaultTrainTransform, \
MaskRCNNDefaultValTransform
from gluoncv.utils.metrics.coco_instance import COCOInstanceMetric
from utils.metric import RCNNAccMetric, RCNNL1LossMetric, RPNAccMetric, RPNL1LossMetric, \
MaskAccMetric, MaskFGAccMetric
from utils.argument import parse_args_for_rcnn as parse_args
from utils.logger import build_logger
def get_dataset(dataset, args):
if dataset.lower() == 'coco':
train_dataset = gdata.COCOInstance(splits='instances_train2017')
val_dataset = gdata.COCOInstance(splits='instances_val2017', skip_empty=False)
val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
elif dataset.lower() == 'imaterialist':
from utils.iMaterialistDataset import COCOiMaterialist
train_dataset = COCOiMaterialist(root='datasets/imaterialist/',
splits='rle_instances_train')
val_dataset = COCOiMaterialist(root='datasets/imaterialist/',
splits='resize_rle_instances_val', skip_empty=False)
val_metric = COCOInstanceMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
else:
raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
return train_dataset, val_dataset, val_metric
def get_dataloader(net, train_dataset, val_dataset, train_transform, val_transform, batch_size,
num_workers, multi_stage):
"""Get dataloader."""
# allow different shapes in same batch
train_bfn = batchify.Tuple(*[batchify.Append() for _ in range(6)])
train_loader = mx.gluon.data.DataLoader(
train_dataset.transform(train_transform(net.short, net.max_size, net, ashape=net.ashape,
multi_stage=multi_stage)),
batch_size, True, batchify_fn=train_bfn, last_batch='rollover', num_workers=num_workers)
val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(2)])
val_loader = mx.gluon.data.DataLoader(
val_dataset.transform(val_transform(net.short, net.max_size)),
batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)
return train_loader, val_loader
def save_params(net, logger, best_map, current_map, epoch, save_interval, prefix):
current_map = float(current_map)
if current_map > best_map[0]:
logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format(
epoch, current_map, best_map, '{:s}_best.params'.format(prefix)))
best_map[0] = current_map
net.save_parameters('{:s}_best.params'.format(prefix))
with open(prefix + '_best_map.log', 'a') as f:
f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map))
if save_interval and (epoch + 1) % save_interval == 0:
logger.info('[Epoch {}] Saving parameters to {}'.format(
epoch, '{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map)))
net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))
def split_and_load(batch, ctx_list):
"""Split data to 1 batch each device."""
num_ctx = len(ctx_list)
new_batch = []
for i, data in enumerate(batch):
new_data = [x.as_in_context(ctx) for x, ctx in zip(data, ctx_list)]
new_batch.append(new_data)
return new_batch
def validate(net, val_data, ctx, eval_metric, args):
"""Test on validation dataset."""
clipper = gcv.nn.bbox.BBoxClipToImage()
eval_metric.reset()
if not args.disable_hybridization:
net.hybridize(static_alloc=args.static_alloc)
for ib, batch in enumerate(val_data):
batch = split_and_load(batch, ctx_list=ctx)
det_bboxes = []
det_ids = []
det_scores = []
det_masks = []
det_infos = []
for x, im_info in zip(*batch):
# get prediction results
ids, scores, bboxes, masks = net(x)
det_bboxes.append(clipper(bboxes, x))
det_ids.append(ids)
det_scores.append(scores)
det_masks.append(masks)
det_infos.append(im_info)
# update metric
for det_bbox, det_id, det_score, det_mask, det_info in zip(det_bboxes, det_ids, det_scores,
det_masks, det_infos):
for i in range(det_info.shape[0]):
# numpy everything
det_bbox = det_bbox[i].asnumpy()
det_id = det_id[i].asnumpy()
det_score = det_score[i].asnumpy()
det_mask = det_mask[i].asnumpy()
det_info = det_info[i].asnumpy()
# filter by conf threshold
im_height, im_width, im_scale = det_info
valid = np.where(((det_id >= 0) & (det_score >= 0.001)))[0]
det_id = det_id[valid]
det_score = det_score[valid]
det_bbox = det_bbox[valid] / im_scale
det_mask = det_mask[valid]
# fill full mask
im_height, im_width = int(round(im_height / im_scale)), int(
round(im_width / im_scale))
full_masks = []
for bbox, mask in zip(det_bbox, det_mask):
mask = gdata.transforms.mask.fill(mask, bbox, (im_width, im_height))
if args.dataset.lower() == 'imaterialist':
# compute metric at size (512, 512)
mask = cv2.resize(mask, (512, 512), cv2.INTER_NEAREST)
full_masks.append(mask)
full_masks = np.array(full_masks)
eval_metric.update(det_bbox, det_id, det_score, full_masks)
return eval_metric.get()
def get_lr_at_iter(alpha):
return 1. / 3. * (1 - alpha) + alpha
def train(net, train_data, val_data, eval_metric, ctx, args):
"""Training pipeline"""
net.collect_params().setattr('grad_req', 'null')
net.collect_train_params().setattr('grad_req', 'write')
trainer = gluon.Trainer(
net.collect_train_params(), # fix batchnorm, fix first stage, etc...
'sgd',
{'learning_rate': args.lr,
'wd': args.wd,
'momentum': args.momentum,
'clip_gradient': 5})
# lr decay policy
lr_decay = float(args.lr_decay)
lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
lr_warmup = float(args.lr_warmup) # avoid int division
rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1 / 9.) # == smoothl1
rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
rcnn_box_loss = mx.gluon.loss.HuberLoss() # == smoothl1
rcnn_mask_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
metrics = [mx.metric.Loss('RPN_Conf'),
mx.metric.Loss('RPN_SmoothL1'),
mx.metric.Loss('RCNN_CrossEntropy'),
mx.metric.Loss('RCNN_SmoothL1'),
mx.metric.Loss('RCNN_Mask')]
rpn_acc_metric = RPNAccMetric()
rpn_bbox_metric = RPNL1LossMetric()
rcnn_acc_metric = RCNNAccMetric()
rcnn_bbox_metric = RCNNL1LossMetric()
rcnn_mask_metric = MaskAccMetric()
rcnn_fgmask_metric = MaskFGAccMetric()
metrics2 = [rpn_acc_metric, rpn_bbox_metric,
rcnn_acc_metric, rcnn_bbox_metric,
rcnn_mask_metric, rcnn_fgmask_metric]
# set up logger
log_file_path = args.save_prefix + '_train.log'
logger = build_logger(log_file_path)
logger.info(args)
if args.verbose:
logger.info('Trainable parameters:')
logger.info(net.collect_train_params().keys())
logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
best_map = [0]
for epoch in range(args.start_epoch, args.epochs):
while lr_steps and epoch >= lr_steps[0]:
new_lr = trainer.learning_rate * lr_decay
lr_steps.pop(0)
trainer.set_learning_rate(new_lr)
logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))
for metric in metrics:
metric.reset()
tic = time.time()
btic = time.time()
if not args.disable_hybridization:
net.hybridize(static_alloc=args.static_alloc)
base_lr = trainer.learning_rate
for i, batch in enumerate(train_data):
if epoch == 0 and i <= lr_warmup:
# adjust based on real percentage
new_lr = base_lr * get_lr_at_iter(i / lr_warmup)
if new_lr != trainer.learning_rate:
if i % args.log_interval == 0:
logger.info(
'[Epoch 0 Iteration {}] Set learning rate to {}'.format(i, new_lr))
trainer.set_learning_rate(new_lr)
batch = split_and_load(batch, ctx_list=ctx)
batch_size = len(batch[0])
losses = []
metric_losses = [[] for _ in metrics]
add_losses = [[] for _ in metrics2]
with autograd.record():
for data, label, gt_mask, rpn_cls_targets, rpn_box_targets, rpn_box_masks in zip(
*batch):
gt_label = label[:, :, 4:5]
gt_box = label[:, :, :4]
cls_pred, box_pred, mask_pred, roi, samples, matches, rpn_score, rpn_box, anchors = net(
data, gt_box)
# losses of rpn
rpn_score = rpn_score.squeeze(axis=-1)
num_rpn_pos = (rpn_cls_targets >= 0).sum()
rpn_loss1 = rpn_cls_loss(rpn_score, rpn_cls_targets,
rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
rpn_loss2 = rpn_box_loss(rpn_box, rpn_box_targets,
rpn_box_masks) * rpn_box.size / num_rpn_pos
# rpn overall loss, use sum rather than average
rpn_loss = rpn_loss1 + rpn_loss2
# generate targets for rcnn
cls_targets, box_targets, box_masks = net.target_generator(roi, samples,
matches, gt_label,
gt_box)
# losses of rcnn
num_rcnn_pos = (cls_targets >= 0).sum()
rcnn_loss1 = rcnn_cls_loss(cls_pred, cls_targets,
cls_targets >= 0) * cls_targets.size / \
cls_targets.shape[0] / num_rcnn_pos
rcnn_loss2 = rcnn_box_loss(box_pred, box_targets, box_masks) * box_pred.size / \
box_pred.shape[0] / num_rcnn_pos
rcnn_loss = rcnn_loss1 + rcnn_loss2
# generate targets for mask
mask_targets, mask_masks = net.mask_target(roi, gt_mask, matches, cls_targets)
# loss of mask
mask_loss = rcnn_mask_loss(mask_pred, mask_targets, mask_masks) * \
mask_targets.size / mask_targets.shape[0] / mask_masks.sum()
# overall losses
losses.append(rpn_loss.sum() + rcnn_loss.sum() + mask_loss.sum())
metric_losses[0].append(rpn_loss1.sum())
metric_losses[1].append(rpn_loss2.sum())
metric_losses[2].append(rcnn_loss1.sum())
metric_losses[3].append(rcnn_loss2.sum())
metric_losses[4].append(mask_loss.sum())
add_losses[0].append([[rpn_cls_targets, rpn_cls_targets >= 0], [rpn_score]])
add_losses[1].append([[rpn_box_targets, rpn_box_masks], [rpn_box]])
add_losses[2].append([[cls_targets], [cls_pred]])
add_losses[3].append([[box_targets, box_masks], [box_pred]])
add_losses[4].append([[mask_targets, mask_masks], [mask_pred]])
add_losses[5].append([[mask_targets, mask_masks], [mask_pred]])
autograd.backward(losses)
for metric, record in zip(metrics, metric_losses):
metric.update(0, record)
for metric, records in zip(metrics2, add_losses):
for pred in records:
metric.update(pred[0], pred[1])
trainer.step(batch_size)
# update metrics
if args.log_interval and not (i + 1) % args.log_interval:
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics + metrics2])
logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}'.format(
epoch, i, args.log_interval * batch_size / (time.time() - btic), msg))
btic = time.time()
msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])
logger.info('[Epoch {}] Training cost: {:.3f}, {}'.format(
epoch, (time.time() - tic), msg))
if not (epoch + 1) % args.val_interval:
# consider reduce the frequency of validation to save time
map_name, mean_ap = validate(net, val_data, ctx, eval_metric, args)
val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
current_map = float(mean_ap[-1])
else:
current_map = 0.
save_params(net, logger, best_map, current_map, epoch, args.save_interval, args.save_prefix)
if __name__ == '__main__':
args = parse_args()
# fix seed for mxnet, numpy and python builtin random generator.
gutils.random.seed(args.seed)
# training contexts
ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
ctx = ctx if ctx else [mx.cpu()]
args.batch_size = len(ctx) # 1 batch per device
# training data
train_dataset, val_dataset, eval_metric = get_dataset(args.dataset, args)
# network
module_list = []
if args.use_fpn:
module_list.append('fpn')
if args.dataset.lower() == 'imaterialist':
# pretrained on coco dataset
net_name = '_'.join(('mask_rcnn', *module_list, args.network, 'coco'))
# 'mask_rcnn_%s_coco'%(args.network)
net = get_model(net_name, pretrained=True)
# reuse the previously trained weights for specified classes
# {'tie':'tie', 'umbrella':'umbrella', 'bag, wallet':'handbag', 'glove':'baseball glove'}
net.reset_class(train_dataset.CLASSES, reuse_weights={16: 27, 26: 25, 24: 26, 17: 35})
else:
net_name = '_'.join(('mask_rcnn', *module_list, args.network, args.dataset))
net = get_model(net_name, pretrained_base=True)
args.save_prefix += net_name
if args.resume.strip():
net.load_parameters(args.resume.strip())
else:
for param in net.collect_params().values():
if param._data is not None:
continue
param.initialize()
net.collect_params().reset_ctx(ctx)
# modify train transform to support RLE segmentations
if args.dataset.lower() == 'imaterialist':
from utils.rle_transform import MaskRCNNTrainTransformRLE
MaskRCNNDefaultTrainTransform = MaskRCNNTrainTransformRLE
train_data, val_data = get_dataloader(
net, train_dataset, val_dataset, MaskRCNNDefaultTrainTransform, MaskRCNNDefaultValTransform,
args.batch_size, args.num_workers, args.use_fpn)
# training
train(net, train_data, val_data, eval_metric, ctx, args)
| 48.37574 | 108 | 0.596844 |
7942a52862dc5a42e9c877e4070213a3bcbafebe | 16 | py | Python | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/cameras/tests/conftest.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 176 | 2019-07-03T00:20:15.000Z | 2022-03-14T07:51:22.000Z | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/cameras/tests/conftest.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 121 | 2019-06-24T20:47:27.000Z | 2022-03-28T02:16:18.000Z | factory-ai-vision/EdgeSolution/modules/WebModule/backend/vision_on_edge/cameras/tests/conftest.py | kaka-lin/azure-intelligent-edge-patterns | 766833c7c25d2458cec697937be288202d1763bc | [
"MIT"
] | 144 | 2019-06-18T18:48:43.000Z | 2022-03-31T12:14:46.000Z | """Conftest
"""
| 5.333333 | 11 | 0.5 |
7942a5bec2d9dd4850f3e16fdab852582f538c65 | 8,271 | py | Python | 28_train_cars_new_rotation_labels_rotation_norm.py | Florian-Barthel/stylegan2 | 4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8 | [
"BSD-Source-Code"
] | null | null | null | 28_train_cars_new_rotation_labels_rotation_norm.py | Florian-Barthel/stylegan2 | 4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8 | [
"BSD-Source-Code"
] | null | null | null | 28_train_cars_new_rotation_labels_rotation_norm.py | Florian-Barthel/stylegan2 | 4ef87038bf9370596cf2b729e1d1a1bc3ebcddd8 | [
"BSD-Source-Code"
] | null | null | null | # Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
import argparse
import copy
import sys
import os
import dnnlib
from dnnlib import EasyDict
from metrics.metric_defaults import metric_defaults
#----------------------------------------------------------------------------
_valid_configs = [
# Table 1
'config-a', # Baseline StyleGAN
'config-b', # + Weight demodulation
'config-c', # + Lazy regularization
'config-d', # + Path length regularization
'config-e', # + No growing, new G & D arch.
'config-f', # + Large networks (default)
# Table 2
'config-e-Gorig-Dorig', 'config-e-Gorig-Dresnet', 'config-e-Gorig-Dskip',
'config-e-Gresnet-Dorig', 'config-e-Gresnet-Dresnet', 'config-e-Gresnet-Dskip',
'config-e-Gskip-Dorig', 'config-e-Gskip-Dresnet', 'config-e-Gskip-Dskip',
]
#----------------------------------------------------------------------------
def run(dataset, data_dir, result_dir, config_id, num_gpus, total_kimg, gamma, mirror_augment, metrics):
train = EasyDict(run_func_name='training.training_loop.training_loop_mirror_v6.training_loop')
G = EasyDict(func_name='training.networks.networks_stylegan2.G_main')
D = EasyDict(func_name='training.networks.networks_stylegan2_discriminator_new_rotation.D_stylegan2_new_rotaion') # Options for discriminator network.
G_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
D_opt = EasyDict(beta1=0.0, beta2=0.99, epsilon=1e-8)
G_loss = EasyDict(func_name='training.loss.loss_G_new_rotation_label_normalize.G_logistic_ns_pathreg')
D_loss = EasyDict(func_name='training.loss.loss_D_logistic_r1_new_rotation_label_normalize.D_logistic_r1_new_rotation')
sched = EasyDict()
grid = EasyDict(size='1080p', layout='random')
sc = dnnlib.SubmitConfig()
tf_config = {'rnd.np_random_seed': 1000}
train.data_dir = data_dir
train.total_kimg = total_kimg
train.mirror_augment = mirror_augment
train.image_snapshot_ticks = train.network_snapshot_ticks = 10
sched.G_lrate_base = sched.D_lrate_base = 0.002
sched.minibatch_size_base = 32
sched.minibatch_gpu_base = 4
D_loss.gamma = 10
metrics = [metric_defaults[x] for x in metrics]
desc = 'stylegan2'
G.style_mixing_prob = None
desc += '-' + dataset
label_file = 'datasets/car_labels/cars_v8-rxx.labels'
desc += '-v8'
dataset_args = EasyDict(tfrecord_dir=dataset, label_file=label_file)
assert num_gpus in [1, 2, 4, 8]
sc.num_gpus = num_gpus
desc += '-%dgpu' % num_gpus
assert config_id in _valid_configs
# desc += '-' + config_id
desc += '-new_rotation_label'
# Configs A-E: Shrink networks to match original StyleGAN.
if config_id != 'config-f':
G.fmap_base = D.fmap_base = 8 << 10
# Config E: Set gamma to 100 and override G & D architecture.
if config_id.startswith('config-e'):
D_loss.gamma = 100
if 'Gorig' in config_id: G.architecture = 'orig'
if 'Gskip' in config_id: G.architecture = 'skip' # (default)
if 'Gresnet' in config_id: G.architecture = 'resnet'
if 'Dorig' in config_id: D.architecture = 'orig'
if 'Dskip' in config_id: D.architecture = 'skip'
if 'Dresnet' in config_id: D.architecture = 'resnet' # (default)
# Configs A-D: Enable progressive growing and switch to networks that support it.
if config_id in ['config-a', 'config-b', 'config-c', 'config-d']:
sched.lod_initial_resolution = 8
sched.G_lrate_base = sched.D_lrate_base = 0.001
sched.G_lrate_dict = sched.D_lrate_dict = {128: 0.0015, 256: 0.002, 512: 0.003, 1024: 0.003}
sched.minibatch_size_base = 32 # (default)
sched.minibatch_size_dict = {8: 256, 16: 128, 32: 64, 64: 32}
sched.minibatch_gpu_base = 4 # (default)
sched.minibatch_gpu_dict = {8: 32, 16: 16, 32: 8, 64: 4}
G.synthesis_func = 'G_synthesis_stylegan_revised'
D.func_name = 'training.networks_stylegan2.D_stylegan'
# Configs A-C: Disable path length regularization.
if config_id in ['config-a', 'config-b', 'config-c']:
G_loss = EasyDict(func_name='training.loss.G_logistic_ns')
# Configs A-B: Disable lazy regularization.
if config_id in ['config-a', 'config-b']:
train.lazy_regularization = False
# Config A: Switch to original StyleGAN networks.
if config_id == 'config-a':
G = EasyDict(func_name='training.networks_stylegan.G_style')
D = EasyDict(func_name='training.networks_stylegan.D_basic')
if gamma is not None:
D_loss.gamma = gamma
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
kwargs = EasyDict(train)
kwargs.update(G_args=G, D_args=D, G_opt_args=G_opt, D_opt_args=D_opt, G_loss_args=G_loss, D_loss_args=D_loss)
kwargs.update(dataset_args=dataset_args, sched_args=sched, grid_args=grid, metric_arg_list=metrics, tf_config=tf_config)
kwargs.submit_config = copy.deepcopy(sc)
kwargs.submit_config.run_dir_root = result_dir
kwargs.submit_config.run_desc = desc
dnnlib.submit_run(**kwargs)
#----------------------------------------------------------------------------
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _parse_comma_sep(s):
if s is None or s.lower() == 'none' or s == '':
return []
return s.split(',')
#----------------------------------------------------------------------------
_examples = '''examples:
# Train StyleGAN2 using the FFHQ dataset
python %(prog)s --num-gpus=8 --data-dir=~/datasets --config=config-f --dataset=ffhq --mirror-augment=true
valid configs:
''' + ', '.join(_valid_configs) + '''
valid metrics:
''' + ', '.join(sorted([x for x in metric_defaults.keys()])) + '''
'''
def main():
parser = argparse.ArgumentParser(
description='Train StyleGAN2.',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
parser.add_argument('--data-dir', help='Dataset root directory', required=True)
parser.add_argument('--dataset', help='Training dataset', required=True)
parser.add_argument('--config', help='Training config (default: %(default)s)', default='config-f', required=True, dest='config_id', metavar='CONFIG')
parser.add_argument('--num-gpus', help='Number of GPUs (default: %(default)s)', default=1, type=int, metavar='N')
parser.add_argument('--total-kimg', help='Training length in thousands of images (default: %(default)s)', metavar='KIMG', default=25000, type=int)
parser.add_argument('--gamma', help='R1 regularization weight (default is config dependent)', default=None, type=float)
parser.add_argument('--mirror-augment', help='Mirror augment (default: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
parser.add_argument('--metrics', help='Comma-separated list of metrics or "none" (default: %(default)s)', default='fid50k', type=_parse_comma_sep)
args = parser.parse_args()
if not os.path.exists(args.data_dir):
print ('Error: dataset root directory does not exist.')
sys.exit(1)
if args.config_id not in _valid_configs:
print ('Error: --config value must be one of: ', ', '.join(_valid_configs))
sys.exit(1)
for metric in args.metrics:
if metric not in metric_defaults:
print ('Error: unknown metric \'%s\'' % metric)
sys.exit(1)
run(**vars(args))
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| 40.743842 | 163 | 0.637891 |
7942a6b51f2c12124eae3aac1c29bba63d4ae55d | 2,614 | py | Python | darts/tests/test_filters.py | muliliao/darts | 2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0 | [
"Apache-2.0"
] | null | null | null | darts/tests/test_filters.py | muliliao/darts | 2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0 | [
"Apache-2.0"
] | null | null | null | darts/tests/test_filters.py | muliliao/darts | 2b5f5c3aa81c6962f4d0d2ba5f280d42f5dc5eb0 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pandas as pd
from .base_test_class import DartsBaseTestClass
from ..models.kalman_filter import KalmanFilter
from ..models.filtering_model import MovingAverage
from ..timeseries import TimeSeries
from ..utils import timeseries_generation as tg
class KalmanFilterTestCase(DartsBaseTestClass):
def test_kalman(self):
""" KalmanFilter test.
Creates an increasing sequence of numbers, adds noise and
assumes the kalman filter predicts values closer to real values
"""
testing_signal = np.arange(1, 5, 0.1)
noise = np.random.normal(0, 0.7, testing_signal.shape)
testing_signal_with_noise = testing_signal + noise
df = pd.DataFrame(data=testing_signal_with_noise, columns=['signal'])
testing_signal_with_noise_ts = TimeSeries.from_dataframe(df, value_cols=['signal'])
kf = KalmanFilter(dim_x=1)
filtered_ts = kf.filter(testing_signal_with_noise_ts, num_samples=1).univariate_values()
noise_distance = testing_signal_with_noise - testing_signal
prediction_distance = filtered_ts - testing_signal
self.assertGreater(noise_distance.std(), prediction_distance.std())
def test_kalman_multivariate(self):
kf = KalmanFilter(dim_x=3)
sine_ts = tg.sine_timeseries(length=30, value_frequency=0.1)
noise_ts = tg.gaussian_timeseries(length=30) * 0.1
ts = sine_ts.stack(noise_ts)
prediction = kf.filter(ts)
self.assertEqual(prediction.width, 3)
class MovingAverageTestCase(DartsBaseTestClass):
def test_moving_average_univariate(self):
ma = MovingAverage(window=3, centered=False)
sine_ts = tg.sine_timeseries(length=30, value_frequency=0.1)
sine_filtered = ma.filter(sine_ts)
self.assertGreater(np.mean(np.abs(sine_ts.values())), np.mean(np.abs(sine_filtered.values())))
def test_moving_average_multivariate(self):
ma = MovingAverage(window=3)
sine_ts = tg.sine_timeseries(length=30, value_frequency=0.1)
noise_ts = tg.gaussian_timeseries(length=30) * 0.1
ts = sine_ts.stack(noise_ts)
ts_filtered = ma.filter(ts)
self.assertGreater(np.mean(np.abs(ts.values()[:, 0])), np.mean(np.abs(ts_filtered.values()[:, 0])))
self.assertGreater(np.mean(np.abs(ts.values()[:, 1])), np.mean(np.abs(ts_filtered.values()[:, 1])))
if __name__ == '__main__':
KalmanFilterTestCase().test_kalman()
MovingAverageTestCase().test_moving_average_univariate()
MovingAverageTestCase().test_moving_average_multivariate()
| 37.884058 | 107 | 0.706197 |
7942a72b97f8b1a9efa46c1cffd28b3ce7d2852c | 481 | py | Python | functions/filterForTarget.py | JRJurman/persistent-number-generator | 4f531e88c94ae9ea1c6aa241e3ae9086c48fd80d | [
"MIT"
] | 1 | 2019-04-10T04:07:50.000Z | 2019-04-10T04:07:50.000Z | functions/filterForTarget.py | JRJurman/persistent-number-generator | 4f531e88c94ae9ea1c6aa241e3ae9086c48fd80d | [
"MIT"
] | 1 | 2019-04-10T04:09:43.000Z | 2019-04-12T01:46:18.000Z | functions/filterForTarget.py | JRJurman/persistent-number-generator | 4f531e88c94ae9ea1c6aa241e3ae9086c48fd80d | [
"MIT"
] | null | null | null | import numpy as np
from functions.multiplyDigits import multiplyDigits
def filterForTarget(listOfNumbers, target, base = 10):
"""
given a list of numbers, find which numbers multiply to get our number
Example:
([20, 21, 22, 23, 24, 25, 26, 27, 28, 29], 8) -> [24]
Parameters:
listOfNumbers: nDim integers
target: (n-1)Dim integers
Returns:
nDim results
"""
products = multiplyDigits(listOfNumbers, base)
return listOfNumbers[products == target]
| 24.05 | 72 | 0.698545 |
7942a75029a14023e1c3082afa6492759dbd4e3b | 11,523 | py | Python | third_party/buildbot_8_4p1/buildbot/manhole.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | third_party/buildbot_8_4p1/buildbot/manhole.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | null | null | null | third_party/buildbot_8_4p1/buildbot/manhole.py | bopopescu/build | 4e95fd33456e552bfaf7d94f7d04b19273d1c534 | [
"BSD-3-Clause"
] | 1 | 2020-07-23T11:05:06.000Z | 2020-07-23T11:05:06.000Z | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import os
import types
import binascii
import base64
from twisted.python import log
from twisted.application import service, strports
from twisted.cred import checkers, portal
from twisted.conch import manhole, telnet, manhole_ssh, checkers as conchc
from twisted.conch.insults import insults
from twisted.internet import protocol
from buildbot.util import ComparableMixin
from zope.interface import implements # requires Twisted-2.0 or later
# makeTelnetProtocol and _TelnetRealm are for the TelnetManhole
class makeTelnetProtocol:
# this curries the 'portal' argument into a later call to
# TelnetTransport()
def __init__(self, portal):
self.portal = portal
def __call__(self):
auth = telnet.AuthenticatingTelnetProtocol
return telnet.TelnetTransport(auth, self.portal)
class _TelnetRealm:
implements(portal.IRealm)
def __init__(self, namespace_maker):
self.namespace_maker = namespace_maker
def requestAvatar(self, avatarId, *interfaces):
if telnet.ITelnetProtocol in interfaces:
namespace = self.namespace_maker()
p = telnet.TelnetBootstrapProtocol(insults.ServerProtocol,
manhole.ColoredManhole,
namespace)
return (telnet.ITelnetProtocol, p, lambda: None)
raise NotImplementedError()
class chainedProtocolFactory:
# this curries the 'namespace' argument into a later call to
# chainedProtocolFactory()
def __init__(self, namespace):
self.namespace = namespace
def __call__(self):
return insults.ServerProtocol(manhole.ColoredManhole, self.namespace)
class AuthorizedKeysChecker(conchc.SSHPublicKeyDatabase):
"""Accept connections using SSH keys from a given file.
SSHPublicKeyDatabase takes the username that the prospective client has
requested and attempts to get a ~/.ssh/authorized_keys file for that
username. This requires root access, so it isn't as useful as you'd
like.
Instead, this subclass looks for keys in a single file, given as an
argument. This file is typically kept in the buildmaster's basedir. The
file should have 'ssh-dss ....' lines in it, just like authorized_keys.
"""
def __init__(self, authorized_keys_file):
self.authorized_keys_file = os.path.expanduser(authorized_keys_file)
def checkKey(self, credentials):
f = open(self.authorized_keys_file)
for l in f.readlines():
l2 = l.split()
if len(l2) < 2:
continue
try:
if base64.decodestring(l2[1]) == credentials.blob:
return 1
except binascii.Error:
continue
return 0
class _BaseManhole(service.MultiService):
"""This provides remote access to a python interpreter (a read/exec/print
loop) embedded in the buildmaster via an internal SSH server. This allows
detailed inspection of the buildmaster state. It is of most use to
buildbot developers. Connect to this by running an ssh client.
"""
def __init__(self, port, checker, using_ssh=True):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@type checker: an object providing the
L{twisted.cred.checkers.ICredentialsChecker} interface
@param checker: if provided, this checker is used to authenticate the
client instead of using the username/password scheme. You must either
provide a username/password or a Checker. Some useful values are::
import twisted.cred.checkers as credc
import twisted.conch.checkers as conchc
c = credc.AllowAnonymousAccess # completely open
c = credc.FilePasswordDB(passwd_filename) # file of name:passwd
c = conchc.UNIXPasswordDatabase # getpwnam() (probably /etc/passwd)
@type using_ssh: bool
@param using_ssh: If True, accept SSH connections. If False, accept
regular unencrypted telnet connections.
"""
# unfortunately, these don't work unless we're running as root
#c = credc.PluggableAuthenticationModulesChecker: PAM
#c = conchc.SSHPublicKeyDatabase() # ~/.ssh/authorized_keys
# and I can't get UNIXPasswordDatabase to work
service.MultiService.__init__(self)
if type(port) is int:
port = "tcp:%d" % port
self.port = port # for comparison later
self.checker = checker # to maybe compare later
def makeNamespace():
# close over 'self' so we can get access to .parent later
master = self.parent
namespace = {
'master': master,
'status': master.getStatus(),
'show': show,
}
return namespace
def makeProtocol():
namespace = makeNamespace()
p = insults.ServerProtocol(manhole.ColoredManhole, namespace)
return p
self.using_ssh = using_ssh
if using_ssh:
r = manhole_ssh.TerminalRealm()
r.chainedProtocolFactory = makeProtocol
p = portal.Portal(r, [self.checker])
f = manhole_ssh.ConchFactory(p)
else:
r = _TelnetRealm(makeNamespace)
p = portal.Portal(r, [self.checker])
f = protocol.ServerFactory()
f.protocol = makeTelnetProtocol(p)
s = strports.service(self.port, f)
s.setServiceParent(self)
def startService(self):
service.MultiService.startService(self)
if self.using_ssh:
via = "via SSH"
else:
via = "via telnet"
log.msg("Manhole listening %s on port %s" % (via, self.port))
class TelnetManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts unencrypted (telnet) connections, and requires a
username and password authorize access. You are encouraged to use the
encrypted ssh-based manhole classes instead."""
compare_attrs = ["port", "username", "password"]
def __init__(self, port, username, password):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param username:
@param password: username= and password= form a pair of strings to
use when authenticating the remote user.
"""
self.username = username
self.password = password
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser(username, password)
_BaseManhole.__init__(self, port, c, using_ssh=False)
class PasswordManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts encrypted (ssh) connections, and requires a
username and password to authorize access.
"""
compare_attrs = ["port", "username", "password"]
def __init__(self, port, username, password):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param username:
@param password: username= and password= form a pair of strings to
use when authenticating the remote user.
"""
self.username = username
self.password = password
c = checkers.InMemoryUsernamePasswordDatabaseDontUse()
c.addUser(username, password)
_BaseManhole.__init__(self, port, c)
class AuthorizedKeysManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts ssh connections, and requires that the
prospective client have an ssh private key that matches one of the public
keys in our authorized_keys file. It is created with the name of a file
that contains the public keys that we will accept."""
compare_attrs = ["port", "keyfile"]
def __init__(self, port, keyfile):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param keyfile: the name of a file (relative to the buildmaster's
basedir) that contains SSH public keys of authorized
users, one per line. This is the exact same format
as used by sshd in ~/.ssh/authorized_keys .
"""
# TODO: expanduser this, and make it relative to the buildmaster's
# basedir
self.keyfile = keyfile
c = AuthorizedKeysChecker(keyfile)
_BaseManhole.__init__(self, port, c)
class ArbitraryCheckerManhole(_BaseManhole, ComparableMixin):
"""This Manhole accepts ssh connections, but uses an arbitrary
user-supplied 'checker' object to perform authentication."""
compare_attrs = ["port", "checker"]
def __init__(self, port, checker):
"""
@type port: string or int
@param port: what port should the Manhole listen on? This is a
strports specification string, like 'tcp:12345' or
'tcp:12345:interface=127.0.0.1'. Bare integers are treated as a
simple tcp port.
@param checker: an instance of a twisted.cred 'checker' which will
perform authentication
"""
_BaseManhole.__init__(self, port, checker)
## utility functions for the manhole
def show(x):
"""Display the data attributes of an object in a readable format"""
print "data attributes of %r" % (x,)
names = dir(x)
maxlen = max([0] + [len(n) for n in names])
for k in names:
v = getattr(x,k)
t = type(v)
if t == types.MethodType: continue
if k[:2] == '__' and k[-2:] == '__': continue
if t is types.StringType or t is types.UnicodeType:
if len(v) > 80 - maxlen - 5:
v = `v[:80 - maxlen - 5]` + "..."
elif t in (types.IntType, types.NoneType):
v = str(v)
elif v in (types.ListType, types.TupleType, types.DictType):
v = "%s (%d elements)" % (v, len(v))
else:
v = str(t)
print "%*s : %s" % (maxlen, k, v)
return x
| 37.656863 | 79 | 0.644884 |
7942a8193b318d313a7b184c1634b4f772f19de5 | 4,520 | py | Python | tests/components/modbus/conftest.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 5 | 2020-12-15T04:09:01.000Z | 2022-03-11T21:34:24.000Z | tests/components/modbus/conftest.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 69 | 2020-08-04T09:03:43.000Z | 2022-03-31T06:13:01.000Z | tests/components/modbus/conftest.py | basicpail/core | 5cc54618c5af3f75c08314bf2375cc7ac40d2b7e | [
"Apache-2.0"
] | 11 | 2020-12-16T13:48:14.000Z | 2022-02-01T00:28:05.000Z | """The tests for the Modbus sensor component."""
import copy
from dataclasses import dataclass
from datetime import timedelta
import logging
from unittest import mock
from pymodbus.exceptions import ModbusException
import pytest
from homeassistant.components.modbus.const import MODBUS_DOMAIN as DOMAIN, TCP
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, CONF_TYPE
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import async_fire_time_changed, mock_restore_cache
TEST_MODBUS_NAME = "modbusTest"
TEST_ENTITY_NAME = "test_entity"
TEST_MODBUS_HOST = "modbusHost"
TEST_PORT_TCP = 5501
TEST_PORT_SERIAL = "usb01"
_LOGGER = logging.getLogger(__name__)
@dataclass
class ReadResult:
"""Storage class for register read results."""
def __init__(self, register_words):
"""Init."""
self.registers = register_words
self.bits = register_words
@pytest.fixture
def mock_pymodbus():
"""Mock pymodbus."""
mock_pb = mock.MagicMock()
with mock.patch(
"homeassistant.components.modbus.modbus.ModbusTcpClient", return_value=mock_pb
), mock.patch(
"homeassistant.components.modbus.modbus.ModbusSerialClient",
return_value=mock_pb,
), mock.patch(
"homeassistant.components.modbus.modbus.ModbusUdpClient", return_value=mock_pb
):
yield mock_pb
@pytest.fixture
def check_config_loaded():
"""Set default for check_config_loaded."""
return True
@pytest.fixture
def register_words():
"""Set default for register_words."""
return [0x00, 0x00]
@pytest.fixture
def config_addon():
"""Add entra configuration items."""
return None
@pytest.fixture
def do_exception():
"""Remove side_effect to pymodbus calls."""
return False
@pytest.fixture
async def mock_modbus(
hass, caplog, register_words, check_config_loaded, config_addon, do_config
):
"""Load integration modbus using mocked pymodbus."""
conf = copy.deepcopy(do_config)
if config_addon:
for key in conf.keys():
conf[key][0].update(config_addon)
caplog.set_level(logging.WARNING)
config = {
DOMAIN: [
{
CONF_TYPE: TCP,
CONF_HOST: TEST_MODBUS_HOST,
CONF_PORT: TEST_PORT_TCP,
CONF_NAME: TEST_MODBUS_NAME,
**conf,
}
]
}
mock_pb = mock.MagicMock()
with mock.patch(
"homeassistant.components.modbus.modbus.ModbusTcpClient", return_value=mock_pb
):
now = dt_util.utcnow()
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
result = await async_setup_component(hass, DOMAIN, config)
assert result or not check_config_loaded
await hass.async_block_till_done()
yield mock_pb
@pytest.fixture
async def mock_pymodbus_exception(hass, do_exception, mock_modbus):
"""Trigger update call with time_changed event."""
if do_exception:
exc = ModbusException("fail read_coils")
mock_modbus.read_coils.side_effect = exc
mock_modbus.read_discrete_inputs.side_effect = exc
mock_modbus.read_input_registers.side_effect = exc
mock_modbus.read_holding_registers.side_effect = exc
@pytest.fixture
async def mock_pymodbus_return(hass, register_words, mock_modbus):
"""Trigger update call with time_changed event."""
read_result = ReadResult(register_words)
mock_modbus.read_coils.return_value = read_result
mock_modbus.read_discrete_inputs.return_value = read_result
mock_modbus.read_input_registers.return_value = read_result
mock_modbus.read_holding_registers.return_value = read_result
@pytest.fixture
async def mock_do_cycle(hass, mock_pymodbus_exception, mock_pymodbus_return):
"""Trigger update call with time_changed event."""
now = dt_util.utcnow() + timedelta(seconds=90)
with mock.patch("homeassistant.helpers.event.dt_util.utcnow", return_value=now):
async_fire_time_changed(hass, now)
await hass.async_block_till_done()
@pytest.fixture
async def mock_test_state(hass, request):
"""Mock restore cache."""
mock_restore_cache(hass, request.param)
return request.param
@pytest.fixture
async def mock_ha(hass, mock_pymodbus_return):
"""Load homeassistant to allow service calls."""
assert await async_setup_component(hass, "homeassistant", {})
await hass.async_block_till_done()
| 29.933775 | 88 | 0.718805 |
7942a93999bbaf769278bc5892c43e06d41bd3ba | 1,775 | py | Python | Medium/275.H-IndexII.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 39 | 2020-07-04T11:15:13.000Z | 2022-02-04T22:33:42.000Z | Medium/275.H-IndexII.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 1 | 2020-07-15T11:53:37.000Z | 2020-07-15T11:53:37.000Z | Medium/275.H-IndexII.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 20 | 2020-07-14T19:12:53.000Z | 2022-03-02T06:28:17.000Z | """
Given an array of citations sorted in ascending order (each citation is a
non-negative integer) of a researcher, write a function to compute the
researcher's h-index.
According to the definition of h-index on Wikipedia: "A scientist has index
h if h of his/her N papers have at least h citations each, and the other
N − h papers have no more than h citations each."
Example:
Input: citations = [0,1,3,5,6]
Output: 3
Explanation: [0,1,3,5,6] means the researcher has 5 papers in total and
each of them had
received 0, 1, 3, 5, 6 citations respectively.
Since the researcher has 3 papers with at least 3 citations
each and the remaining
two with no more than 3 citations each, her h-index is 3.
Note:
If there are several possible values for h, the maximum one is taken as
the h-index.
Follow up:
- This is a follow up problem to H-Index, where citations is now guaranteed
to be sorted in ascending order.
- Could you solve it in logarithmic time complexity?
"""
#Difficulty: Medium
#84 / 84 test cases passed.
#Runtime: 144 ms
#Memory Usage: 20.4 MB
#Runtime: 144 ms, faster than 89.79% of Python3 online submissions for H-Index II.
#Memory Usage: 20.4 MB, less than 50.00% of Python3 online submissions for H-Index II.
class Solution:
def hIndex(self, citations: List[int]) -> int:
if not citations:
return 0
if len(citations) == 1:
return min(len(citations), citations[0])
citations = sorted(citations, reverse = True)
for i, h in enumerate(citations):
if h <= i:
return i
return len(citations)
| 38.586957 | 86 | 0.635493 |
7942a9b2c7789bb97d204f50ad37a8cbd782cdc6 | 163 | py | Python | python/helpers/tests/generator3_tests/data/SkeletonGeneration/skeleton_regenerated_for_changed_module/before/cache/9ce58f4a1c/mod.py | tgodzik/intellij-community | f5ef4191fc30b69db945633951fb160c1cfb7b6f | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/helpers/tests/generator3_tests/data/SkeletonGeneration/skeleton_regenerated_for_changed_module/before/cache/9ce58f4a1c/mod.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2022-02-19T09:45:05.000Z | 2022-02-27T20:32:55.000Z | python/helpers/tests/generator3_tests/data/SkeletonGeneration/skeleton_regenerated_for_changed_module/before/cache/9ce58f4a1c/mod.py | Cyril-lamirand/intellij-community | 60ab6c61b82fc761dd68363eca7d9d69663cfa39 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | # encoding: utf-8
# module mod
# from mod.py
# by generator 1000.0
# no doc
# no imports
# Variables with simple values
version = 1
# no functions
# no classes
| 11.642857 | 30 | 0.693252 |
7942aa477c947ebecd74d0dc87e0b3f4a8d3bd31 | 7,974 | py | Python | meta_pseudo_labels/flag_utils.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-13T21:48:52.000Z | 2022-03-13T21:48:52.000Z | meta_pseudo_labels/flag_utils.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | null | null | null | meta_pseudo_labels/flag_utils.py | shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | [
"Apache-2.0"
] | 1 | 2022-03-30T07:20:29.000Z | 2022-03-30T07:20:29.000Z | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=logging-format-interpolation,unused-import
r"""Define all the relevant flags for these experiments in this file."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import sys
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
if 'gfile' not in sys.modules:
gfile = tf.gfile
_flags = []
def define_string(name, default_value, helper):
flags.DEFINE_string(name, default_value, helper)
global _flags
_flags.append(name)
def define_integer(name, default_value, helper):
flags.DEFINE_integer(name, default_value, helper)
global _flags
_flags.append(name)
def define_float(name, default_value, helper):
flags.DEFINE_float(name, default_value, helper)
global _flags
_flags.append(name)
def define_boolean(name, default_value, helper):
flags.DEFINE_boolean(name, default_value, helper)
global _flags
_flags.append(name)
define_boolean('running_local_dev', False, '')
define_boolean('reset_output_dir', False, '')
define_string('load_checkpoint', None, '')
define_boolean('load_checkpoint_and_restart_global_step', False, '')
define_string('master', None, 'Should be `/bns/el-d/...`')
define_string('tpu_topology', '', 'Should be `2x2`, `4x4`, etc.')
define_boolean('use_tpu', False, '')
define_integer('num_infeed_threads', 4, '')
define_boolean('use_bfloat16', False, '')
define_integer('save_every', 1000, '')
define_integer('log_every', 10, '')
define_string('dataset_name', None, '')
define_integer('num_shards_per_worker', None, '')
define_string('task_mode', None, '')
define_string('output_dir', None, '')
define_float('batch_norm_decay', 0.99, '')
define_float('batch_norm_epsilon', 1e-3, '')
define_integer('batch_norm_batch_size', None, '')
define_integer('train_batch_size', 128, '')
define_integer('eval_batch_size', 128, '')
define_integer('image_size', 32, '')
define_integer('num_classes', 10, '')
define_integer('num_warmup_steps', 0, '')
define_integer('num_train_steps', 10000, '')
define_integer('num_decay_steps', 750, '')
define_float('weight_decay', 1e-4, '')
define_float('dense_dropout_rate', 0.1, '')
define_float('stochastic_depth_drop_rate', 0., '')
define_float('grad_bound', 1e9, '')
define_float('lr', 0.016, 'Per-256-examples start LR for RMSprop')
define_string('lr_decay_type', 'exponential', '')
define_string('optim_type', 'momentum', '')
define_string('model_type', 'wrn-28-2', '')
define_float('lr_decay_rate', 0.97, '')
define_float('rmsprop_rho', 0.9, '')
define_float('rmsprop_momentum', 0.9, '')
define_float('rmsprop_epsilon', 1e-3, '')
define_float('teacher_lr', 0.1, '')
define_float('ema_decay', 0., 'Set to 0 to not use moving_average')
define_integer('ema_start', 0, 'Step to start using ema at this step')
define_boolean('use_augment', False, None)
define_integer('augment_magnitude', 5, '')
define_string('inference_ckpt', None, '')
define_string('inference_ckpt_ensemble', None, 'Comma-separated list')
define_integer('inference_class_id', 0, '')
define_integer('inference_num_threads', None, '')
define_integer('inference_thread_index', 0, '')
define_integer('dataset_service_replicas', None, '')
define_string('dataset_service_bns_prefix', None, '')
define_float('label_smoothing', 0., '')
define_float('teacher_base_lr', 1e-3, '')
define_float('teacher_grad_bound', 20., '')
define_float('teacher_eps', 1e-7, '')
define_float('teacher_weight_decay', 1e-4, '')
define_float('teacher_temperature', 0.5, '')
define_float('teacher_grad_dot_moving_average_decay', 0.99, '')
define_float('teacher_init_range', 0.1, '')
define_float('teacher_one_hot_w', 0.9, '')
define_integer('teacher_num_samples', 10, '')
# UDA
define_integer('uda_warmup_steps', 2000, '')
define_integer('uda_data', 4, '')
define_integer('uda_steps', 10000, '')
define_float('uda_temp', 0.75, '')
define_float('uda_weight', 1., '')
define_float('uda_threshold', 0.6, '')
# MPL
define_float('mpl_temp', 0.75, '')
define_float('mpl_threshold', 0.6, '')
define_float('mpl_dot_product_bound', 0.1, '')
define_integer('mpl_student_wait_steps', 2500, '')
define_string('mpl_teacher_checkpoint', None, 'for MPL teacher')
define_string('mpl_student_checkpoint', None, 'for MPL student')
define_float('mpl_student_lr', 0.1, '')
define_integer('mpl_student_lr_wait_steps', 500, '')
define_integer('mpl_student_lr_warmup_steps', 5000, '')
define_float('mpl_teacher_lr', 0.1, '')
define_integer('mpl_teacher_lr_warmup_steps', 5000, '')
class HParams(object):
"""Implementing the interface of `tf.contrib.training.HParams`."""
def __init__(self, **kwargs):
self.params_dict = {}
for k, v in kwargs.items():
self.params_dict[k] = v
def add_hparam(self, k, v):
self.params_dict[k] = v
def set_hparam(self, k, v):
self.params_dict[k] = v
def to_json(self, indent=0):
return json.dumps(
{k: self.params_dict[k] for k in sorted(self.params_dict.keys())},
indent=indent)
def __getattr__(self, k):
return self.params_dict[k]
def __contains__(self, k):
return k in self.params_dict
def __getitem__(self, k):
return self.params_dict[k]
def __setitem__(self, k, v):
self.params_dict[k] = v
def _deduce_num_classes(params):
"""Set `num_classes` for `params`."""
if 'imagenet' in params.dataset_name.lower():
num_classes = 1000
elif 'cifar100' in params.dataset_name.lower():
num_classes = 100
else:
logging.info('Cannot infer `num_classes` from dataset {0}. Use 10'.format(
params.dataset_name))
num_classes = 10
if 'num_classes' in params and num_classes != params.num_classes:
logging.info('Replace `params.num_classes` from {0} to {1}'.format(
params.num_classes, num_classes))
params.set_hparam('num_classes', num_classes)
def build_params_from_flags():
"""Build and return a `tf.HParams` object."""
FLAGS = flags.FLAGS # pylint: disable=invalid-name
# Make sure not to delete trained checkpoints
if FLAGS.task_mode == 'evals':
assert not FLAGS.reset_output_dir, '`eval` tasks cannot `reset_output_dir`'
# Figure out `output_dir`
output_dir = FLAGS.output_dir
logging.info(f'Checkpoints are at: {output_dir}')
# Create or delete `output_dir`
if not gfile.IsDirectory(output_dir):
logging.info(f'Path `{output_dir}` does not exist. Creating')
gfile.MakeDirs(output_dir, mode=0o777)
elif FLAGS.reset_output_dir:
logging.info(f'Path `{output_dir}` exists. Removing')
gfile.DeleteRecursively(output_dir)
gfile.MakeDirs(output_dir, mode=0o777)
global _flags
params = HParams(
inf=float('inf'),
output_dir=output_dir,
)
for flag_name in _flags:
flag_value = getattr(FLAGS, flag_name)
if flag_name not in params:
params.add_hparam(flag_name, flag_value)
# Try to infer `num_classes` to avoid mistakes, eg. ImageNet with 10 classes.
_deduce_num_classes(params)
pretty_print_params = params.to_json(indent=2)
logging.info(pretty_print_params)
if params.task_mode not in ['inference', 'evals', 'eval_forever']:
params_filename = os.path.join(params.output_dir, 'hparams.json')
if not gfile.Exists(params_filename):
with gfile.GFile(params_filename, 'w') as fout:
fout.write(pretty_print_params)
fout.flush()
return params
| 31.027237 | 79 | 0.727489 |
7942aaad0418a90353515e6450eb87ada404f59b | 1,000 | py | Python | Week4/Tarefa 02/Exercicio_02_Ordenacao_selection_sort.py | WesGtoX/Intro-Computer-Science-with-Python-Part02 | 9dc53362c32575b63d8d1c06c201bc7a01bc07b6 | [
"MIT"
] | null | null | null | Week4/Tarefa 02/Exercicio_02_Ordenacao_selection_sort.py | WesGtoX/Intro-Computer-Science-with-Python-Part02 | 9dc53362c32575b63d8d1c06c201bc7a01bc07b6 | [
"MIT"
] | null | null | null | Week4/Tarefa 02/Exercicio_02_Ordenacao_selection_sort.py | WesGtoX/Intro-Computer-Science-with-Python-Part02 | 9dc53362c32575b63d8d1c06c201bc7a01bc07b6 | [
"MIT"
] | null | null | null | def ordena(lista):
tamanho_lista = len(lista)
for i in range(tamanho_lista - 1):
menor_visto = i
for j in range(i + 1, tamanho_lista):
if lista[j] < lista[menor_visto]:
menor_visto = j
lista[i], lista[menor_visto] = lista[menor_visto], lista[i]
return lista
# def lista_grande(n):
# import random
# lista_g = []
# for i in range(n):
# lista_g.append(random.randrange(9999))
# return lista_g
#
#
# def ordenada(lista):
# for i in range(len(lista)):
# if i < len(lista) - 1:
# if lista[i] > lista[i+1]:
# return False
# return True
#
#
# def test_ordena_01():
# assert ordenada(ordena(lista_grande(15))) == True
#
#
# def test_ordena_03():
# assert ordenada(ordena(lista_grande(10))) == True
#
#
# def test_ordena_04():
# assert ordenada(ordena(lista_grande(5))) == True
#
#
# def test_ordena_06():
# assert ordenada(ordena(lista_grande(20))) == True
| 23.255814 | 67 | 0.583 |
7942aac9d2f04b8caf7da238fb64f3f186fe9957 | 353 | py | Python | src/genie/libs/parser/ios/show_config.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 204 | 2018-06-27T00:55:27.000Z | 2022-03-06T21:12:18.000Z | src/genie/libs/parser/ios/show_config.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 468 | 2018-06-19T00:33:18.000Z | 2022-03-31T23:23:35.000Z | src/genie/libs/parser/ios/show_config.py | nujo/genieparser | 083b01efc46afc32abe1a1858729578beab50cd3 | [
"Apache-2.0"
] | 309 | 2019-01-16T20:21:07.000Z | 2022-03-30T12:56:41.000Z | ''' show_config.py
IOS parsers for the following show commands:
* show configuration lock
'''
# import iosxe parser
from genie.libs.parser.iosxe.show_config import ShowConfigurationLock as \
ShowConfigurationLock_iosxe
class ShowConfigurationLock(ShowConfigurationLock_iosxe):
""" Parser for show configuration lock """
pass
| 23.533333 | 74 | 0.753541 |
7942ab7e65542cfa4f00c046fb759fae434343f9 | 3,601 | py | Python | venv/lib/python3.6/site-packages/channels/routing.py | AzamatGla/channelChokan | 0dbcc8b701d57dcd4c8fe9c0573738c573797c21 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/channels/routing.py | AzamatGla/channelChokan | 0dbcc8b701d57dcd4c8fe9c0573738c573797c21 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/channels/routing.py | AzamatGla/channelChokan | 0dbcc8b701d57dcd4c8fe9c0573738c573797c21 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
import importlib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from channels.http import AsgiHandler
"""
All Routing instances inside this file are also valid ASGI applications - with
new Channels routing, whatever you end up with as the top level object is just
served up as the "ASGI application".
"""
def get_default_application():
"""
Gets the default application, set in the ASGI_APPLICATION setting.
"""
try:
path, name = settings.ASGI_APPLICATION.rsplit(".", 1)
except (ValueError, AttributeError):
raise ImproperlyConfigured("Cannot find ASGI_APPLICATION setting.")
try:
module = importlib.import_module(path)
except ImportError:
raise ImproperlyConfigured("Cannot import ASGI_APPLICATION module %r" % path)
try:
value = getattr(module, name)
except AttributeError:
raise ImproperlyConfigured("Cannot find %r in ASGI_APPLICATION module %s" % (name, path))
return value
class ProtocolTypeRouter:
"""
Takes a mapping of protocol type names to other Application instances,
and dispatches to the right one based on protocol name (or raises an error)
"""
def __init__(self, application_mapping):
self.application_mapping = application_mapping
if "http" not in self.application_mapping:
self.application_mapping["http"] = AsgiHandler
def __call__(self, scope):
if scope["type"] in self.application_mapping:
return self.application_mapping[scope["type"]](scope)
else:
raise ValueError("No application configured for scope type %r" % scope["type"])
class URLRouter:
"""
Routes to different applications/consumers based on the URL path.
Works with anything that has a ``path`` key, but intended for WebSocket
and HTTP. Uses Django's django.conf.urls objects for resolution -
url() or path().
"""
def __init__(self, routes):
self.routes = routes
def __call__(self, scope):
# Get the path
path = scope.get("path", None)
if path is None:
raise ValueError("No 'path' key in connection scope, cannot route URLs")
# Remove leading / to match Django's handling
path = path.lstrip("/")
# Run through the routes we have until one matches
for route in self.routes:
match = route.resolve(path)
if match is not None:
# Add args or kwargs into the scope
scope["url_route"] = {
"args": match.args,
"kwargs": match.kwargs,
}
return match.func(scope)
else:
raise ValueError("No route found for path %r." % path)
class ChannelNameRouter:
"""
Maps to different applications based on a "channel" key in the scope
(intended for the Channels worker mode)
"""
def __init__(self, application_mapping):
self.application_mapping = application_mapping
def __call__(self, scope):
if "channel" not in scope:
raise ValueError(
"ChannelNameRouter got a scope without a 'channel' key. " +
"Did you make sure it's only being used for 'channel' type messages?"
)
if scope["channel"] in self.application_mapping:
return self.application_mapping[scope["channel"]](scope)
else:
raise ValueError("No application configured for channel name %r" % scope["channel"])
| 33.654206 | 97 | 0.647876 |
7942ab81d584e5e7d97f3b522e04a1322a76faf7 | 7,800 | py | Python | realtime-visualisation/main.py | deets/beehive-monitor | 6fc0626e94f2cfc576a406fce9798319336b6006 | [
"MIT"
] | null | null | null | realtime-visualisation/main.py | deets/beehive-monitor | 6fc0626e94f2cfc576a406fce9798319336b6006 | [
"MIT"
] | null | null | null | realtime-visualisation/main.py | deets/beehive-monitor | 6fc0626e94f2cfc576a406fce9798319336b6006 | [
"MIT"
] | null | null | null | # Copyright: 2021, Diez B. Roggisch, Berlin . All rights reserved.
import paho.mqtt.client as mqtt
import threading
import queue
import argparse
import datetime as dt
import pathlib
from bokeh.models import ColumnDataSource
from bokeh.plotting import curdoc, figure
from bokeh.layouts import column
def regroup_line(sdcard_data):
# the V2 format contains a trailing , because it's easier to write that.
sequence, timestamp, *rest = sdcard_data.rstrip(",").split(",")
entries = []
for index in range(0, len(rest), 4):
busno, address, humidity, temperature = rest[index:index + 4]
entry = f"{busno}{address},{temperature},{humidity}"
entries.append(entry)
return ";".join([f"{sequence},{timestamp}"] + entries)
def raw2humidity(humidity):
return humidity * 100 / 65535.0
def raw2temperature(temperature):
return temperature * 175.0 / 65535.0 - 45.0
def process_sensor_payload(sensor_payload):
id_, temperature, humidity = sensor_payload.split(",")
assert temperature[0] == "T"
assert humidity[0] == "H"
return id_, int(temperature[1:], 16), int(humidity[1:], 16)
def process_payload(payload):
header, *sensors = payload.decode("ascii").split(";")
_sequence, timestamp = header.split(",")
timestamp = dt.datetime.fromisoformat(timestamp.split("+")[0])
sensors = [process_sensor_payload(sensor) for sensor in sensors]
return timestamp, sensors
class Visualisation:
def __init__(self):
opts = self._parse_args()
self._topic = opts.topic
self._size = opts.size
if opts.input is None:
self._acquisition_task = self._mqtt_task
else:
self._acquisition_task = lambda: self._acquire_from_file(opts.input)
self._temperature_converter = lambda x: x
self._humidity_converter = lambda x: x
if opts.convert:
self._temperature_converter = raw2temperature
self._humidity_converter = raw2humidity
self._data_q = queue.Queue()
doc = self._doc = curdoc()
data = dict(
time=[],
)
self._source = ColumnDataSource(
data=data,
)
self._temperature_figure = figure(
width=600,
height=200,
y_axis_label="Temperature",
x_axis_type="datetime",
)
self._humidity_figure = figure(
width=600,
height=200,
y_axis_label="Humidity",
x_axis_type="datetime",
)
children = [self._temperature_figure, self._humidity_figure]
self._layout = column(
children,
sizing_mode="scale_width"
)
doc.add_root(self._layout)
self._writer = lambda payload: None
if opts.output:
outf = open(opts.output, "wb")
def writer(payload):
outf.write(payload)
if not payload[-1] == b"\n":
outf.write(b"\n")
outf.flush()
self._writer = writer
def _parse_args(self):
parser = argparse.ArgumentParser()
parser.add_argument("-o", "--output", help="Record data into file")
parser.add_argument("-i", "--input", help="Load data from this file instead of MQTT")
parser.add_argument("-s", "--size", type=int, default=None, help="The limit of measurements shown.")
parser.add_argument("-c", "--convert", action="store_true", default=False, help="Convert acconding to SHT3XDIS datasheet.")
parser.add_argument("--topic", default="beehive/beehive")
return parser.parse_args()
def start_acquisition(self):
t = threading.Thread(target=self._acquisition_task)
t.daemon = True
t.start()
def _mqtt_task(self):
client = mqtt.Client()
client.on_connect = self._on_connect
client.on_message = self._on_message
client.connect("singlemalt.local", 1883, 60)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
def _acquire_from_file(self, path):
path = pathlib.Path(path)
# Data from the SD-Card itself
if path.name.startswith("BEE"):
self._acquire_from_sdcard_data(path)
else:
with path.open("rb") as inf:
for line in inf:
line = line.strip()
if line:
self._data_q.put(line)
self._doc.add_next_tick_callback(self._process_data)
def _acquire_from_sdcard_data(self, path):
all_files = sorted(path.parent.glob(path.name))
for file_ in all_files:
for line in file_.read_text().split("\n"):
line = line.strip()
if line:
assert line.startswith("#V2,")
line = regroup_line(line[4:]).encode("ascii")
self._data_q.put(line)
self._doc.add_next_tick_callback(self._process_data)
def _on_connect(self, client, userdata, flags, rc):
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe(self._topic)
def _on_message(self, client, userdata, msg):
#print(msg.topic, str(msg.payload))
self._data_q.put(msg.payload)
self._writer(msg.payload)
self._doc.add_next_tick_callback(self._process_data)
def _add_graph(self, id_, temperature, humidity, data):
data[f"temp-{id_}"] = [temperature] * len(data["time"])
data[f"hum-{id_}"] = [humidity] * len(data["time"])
self._temperature_figure.line(
x="time",
y=f"temp-{id_}",
alpha=0.5,
source=self._source,
legend_label=f"{id_}C"
)
self._humidity_figure.line(
x="time",
y=f"hum-{id_}",
alpha=0.5,
source=self._source,
legend_label=f"{id_}%"
)
for p in [self._temperature_figure, self._humidity_figure]:
p.legend.click_policy="hide"
def _process_data(self):
# For some reason we get multiple callbacks
# in the mainloop. So instead of relying on
# one callback per line, we gather them
# and process as many of them as we find.
source = self._source
for _ in range(self._data_q.qsize()):
incoming_data = self._data_q.get()
timestamp, sensor_data = process_payload(incoming_data)
# this is needed to "clean up" the data
# as bokeh otherwise complains in the update
data = dict(source.data)
data["time"].append(timestamp)
if self._size is not None:
data["time"] = data["time"][-self._size:]
time_len = len(data["time"])
for id_, temperature, humidity in sensor_data:
temperature = self._temperature_converter(temperature)
humidity = self._humidity_converter(humidity)
if f"temp-{id_}" not in data:
self._add_graph(id_, temperature, humidity, data)
data[f"temp-{id_}"].append(temperature)
data[f"temp-{id_}"] = data[f"temp-{id_}"][-time_len:]
data[f"hum-{id_}"].append(humidity)
data[f"hum-{id_}"] = data[f"hum-{id_}"][-time_len:]
source.update(data=data)
def main():
visualisation = Visualisation()
visualisation.start_acquisition()
main()
| 32.365145 | 131 | 0.593974 |
7942abab6a268bb91ae4bfbf4e27310935458aa3 | 4,304 | py | Python | accountsynchr/tests/test_models.py | uw-it-aca/eventcal | 1d0800fa1f218577f470de127f22a47584db37be | [
"Apache-2.0"
] | null | null | null | accountsynchr/tests/test_models.py | uw-it-aca/eventcal | 1d0800fa1f218577f470de127f22a47584db37be | [
"Apache-2.0"
] | 119 | 2016-12-17T05:02:32.000Z | 2022-02-01T22:14:25.000Z | accountsynchr/tests/test_models.py | uw-it-aca/eventcal | 1d0800fa1f218577f470de127f22a47584db37be | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.test import TestCase
from uw_gws.models import GroupReference
from uw_trumba.models import TrumbaCalendar
from accountsynchr.models import (
UwcalGroup, new_editor_group, new_showon_group, get_cal_name)
class TestModels(TestCase):
def test_uwcalgroup(self):
trumba_cal = TrumbaCalendar(calendarid=2, campus="bot")
editor_gr = new_editor_group(trumba_cal)
editor_gr.set_calendar_name("Bothell >> Dean's Office")
self.assertEqual(trumba_cal.name, "Bothell >> Dean's Office")
showon_gr = new_showon_group(trumba_cal)
# UwcalGroup methods
self.assertEqual(editor_gr.get_calendarid(), 2)
self.assertEqual(editor_gr.get_campus_code(), "bot")
self.assertFalse(editor_gr.has_group_ref())
self.assertIsNone(editor_gr.get_regid())
self.assertEqual(editor_gr.get_group_id(), "u_eventcal_bot_2-editor")
self.assertEqual(editor_gr.get_group_name(),
"u_eventcal_bot_2-editor")
self.assertEqual(editor_gr.get_group_admin(), "u_eventcal_support")
self.assertIsNotNone(editor_gr.get_group_desc())
self.assertEqual(editor_gr.get_group_title(),
"Bothell >> Dean's Office calendar editor group")
self.assertEqual(editor_gr.get_member_manager(),
"u_eventcal_bot_2-editor")
self.assertTrue(editor_gr.is_editor_group())
self.assertFalse(showon_gr.is_editor_group())
self.assertFalse(editor_gr.is_showon_group())
self.assertTrue(showon_gr.is_showon_group())
# group_ref is None
self.assertFalse(editor_gr.same_name(trumba_cal))
self.assertFalse(showon_gr.same_name(trumba_cal))
self.assertEqual(editor_gr.to_json(),
{'calendar': {'calendarid': 2,
'campus': 'bot',
'name': "Bothell >> Dean's Office",
'permissions': {}},
'group_ref': None,
'gtype': 'editor',
'members': []})
editor_gr.group_ref = GroupReference(
name="u_eventcal_bot_2-editor",
display_name="Bothell >> Dean's Office")
self.assertTrue(editor_gr.same_name(trumba_cal))
showon_gr.group_ref = GroupReference(
name="u_eventcal_bot_2-showon",
display_name="Bothell >> Dean's Office calendar showon group")
self.assertTrue(showon_gr.same_name(trumba_cal))
self.assertTrue(editor_gr == editor_gr)
self.assertFalse(editor_gr == showon_gr)
self.assertEqual(
editor_gr.to_json(),
{'calendar': {'calendarid': 2,
'campus': 'bot',
'name': "Bothell >> Dean's Office",
'permissions': {}},
'group_ref': {
'displayName': "Bothell >> Dean's Office",
'id': 'u_eventcal_bot_2-editor',
'regid': ''},
'gtype': 'editor',
'members': []})
self.assertIsNotNone(str(editor_gr))
def test_get_cal_name(self):
self.assertEqual(get_cal_name("UW Tacoma Campus Events"),
"UW Tacoma Campus Events")
self.assertEqual(get_cal_name("Tacoma Campus calendar editor group"),
"Tacoma Campus")
self.assertEqual(get_cal_name("Tacoma Campus calendar showon group"),
"Tacoma Campus")
self.assertEqual(get_cal_name(
"Foster School of Business >> Mktg & Int'l Business " +
"calendar showon group"),
"Foster School of Business >> Mktg & Int'l Business")
self.assertEqual(get_cal_name("Integrated Service Center (ISC) >>" +
" Training >> Seminar >> Workday 101 " +
"- Bothell calendar editor group"),
"Integrated Service Center (ISC) >>" +
" Training >> Seminar >> Workday 101 - Bothell")
| 44.833333 | 78 | 0.575046 |
7942acc9060fb2b30c2105f2ab903b1d84deb4e4 | 846 | py | Python | biobb_ml/test/unitests/test_classification/test_k_neighbors.py | bioexcel/biobb_ml | f99346ef7885d3a62de47dab738a01db4b27467a | [
"Apache-2.0"
] | null | null | null | biobb_ml/test/unitests/test_classification/test_k_neighbors.py | bioexcel/biobb_ml | f99346ef7885d3a62de47dab738a01db4b27467a | [
"Apache-2.0"
] | 5 | 2021-06-30T11:24:14.000Z | 2021-08-04T12:53:00.000Z | biobb_ml/test/unitests/test_classification/test_k_neighbors.py | bioexcel/biobb_ml | f99346ef7885d3a62de47dab738a01db4b27467a | [
"Apache-2.0"
] | null | null | null | from biobb_common.tools import test_fixtures as fx
from biobb_ml.classification.k_neighbors import k_neighbors
class TestKNeighborsTrain():
def setUp(self):
fx.test_setup(self,'k_neighbors')
def tearDown(self):
fx.test_teardown(self)
pass
def test_k_neighbors(self):
k_neighbors(properties=self.properties, **self.paths)
assert fx.not_empty(self.paths['output_model_path'])
assert fx.equal(self.paths['output_model_path'], self.paths['ref_output_model_path'])
assert fx.not_empty(self.paths['output_test_table_path'])
assert fx.equal(self.paths['output_test_table_path'], self.paths['ref_output_test_table_path'])
assert fx.not_empty(self.paths['output_plot_path'])
assert fx.equal(self.paths['output_plot_path'], self.paths['ref_output_plot_path'])
| 40.285714 | 103 | 0.725768 |
7942acd5dba2d5eed64c8ce45ea754c775ad0b0a | 4,813 | py | Python | nova/virt/disk/vfs/api.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | null | null | null | nova/virt/disk/vfs/api.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | null | null | null | nova/virt/disk/vfs/api.py | vmthunder/nova | baf05caab705c5778348d9f275dc541747b7c2de | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
from nova.i18n import _LI
from nova.openstack.common import log as logging
from oslo.utils import importutils
LOG = logging.getLogger(__name__)
class VFS(object):
"""Interface for manipulating disk image.
The VFS class defines an interface for manipulating files within
a virtual disk image filesystem. This allows file injection code
to avoid the assumption that the virtual disk image can be mounted
in the host filesystem.
All paths provided to the APIs in this class should be relative
to the root of the virtual disk image filesystem. Subclasses
will translate paths as required by their implementation.
"""
# Class level flag to indicate whether we can consider
# that guestfs is ready to be used.
guestfs_ready = False
@staticmethod
def instance_for_image(imgfile, imgfmt, partition):
LOG.debug("Instance for image imgfile=%(imgfile)s "
"imgfmt=%(imgfmt)s partition=%(partition)s",
{'imgfile': imgfile, 'imgfmt': imgfmt,
'partition': partition})
vfs = None
try:
LOG.debug("Using primary VFSGuestFS")
vfs = importutils.import_object(
"nova.virt.disk.vfs.guestfs.VFSGuestFS",
imgfile, imgfmt, partition)
if not VFS.guestfs_ready:
# Inspect for capabilities and keep
# track of the result only if succeeded.
vfs.inspect_capabilities()
VFS.guestfs_ready = True
return vfs
except exception.NovaException:
if vfs is not None:
# We are able to load libguestfs but
# something wrong happens when trying to
# check for capabilities.
raise
else:
LOG.info(_LI("Unable to import guestfs"
"falling back to VFSLocalFS"))
return importutils.import_object(
"nova.virt.disk.vfs.localfs.VFSLocalFS",
imgfile, imgfmt, partition)
def __init__(self, imgfile, imgfmt, partition):
self.imgfile = imgfile
self.imgfmt = imgfmt
self.partition = partition
def setup(self):
"""Performs any one-time setup.
Perform any one-time setup tasks to make the virtual filesystem
available to future API calls.
"""
pass
def teardown(self):
"""Releases all resources initialized in the setup method."""
pass
def make_path(self, path):
"""Creates a directory @path.
Create a directory @path, including all intermedia path components
if they do not already exist.
"""
pass
def append_file(self, path, content):
"""Appends @content to the end of the file.
Append @content to the end of the file identified by @path, creating
the file if it does not already exist.
"""
pass
def replace_file(self, path, content):
"""Replaces contents of the file.
Replace the entire contents of the file identified by @path, with
@content, creating the file if it does not already exist.
"""
pass
def read_file(self, path):
"""Returns the entire contents of the file identified by @path."""
pass
def has_file(self, path):
"""Returns a True if the file identified by @path exists."""
pass
def set_permissions(self, path, mode):
"""Sets the permissions on the file.
Set the permissions on the file identified by @path to @mode. The file
must exist prior to this call.
"""
pass
def set_ownership(self, path, user, group):
"""Sets the ownership on the file.
Set the ownership on the file identified by @path to the username
@user and groupname @group. Either of @user or @group may be None,
in which case the current ownership will be left unchanged.
The ownership must be passed in string form, allowing subclasses to
translate to uid/gid form as required. The file must exist prior to
this call.
"""
pass
| 33.894366 | 78 | 0.635363 |
7942ad26d061b085f232f705d8d52d46f5b4ee14 | 232 | py | Python | P08/ex03_pickle_test_in.py | ChanganXLTZ/project_test | b6aa323de105beb6281045bf7b89ed3857ed3d9f | [
"CNRI-Python"
] | 1 | 2018-09-16T13:51:06.000Z | 2018-09-16T13:51:06.000Z | P08/ex03_pickle_test_in.py | ChanganXLTZ/project_test | b6aa323de105beb6281045bf7b89ed3857ed3d9f | [
"CNRI-Python"
] | null | null | null | P08/ex03_pickle_test_in.py | ChanganXLTZ/project_test | b6aa323de105beb6281045bf7b89ed3857ed3d9f | [
"CNRI-Python"
] | null | null | null | # -*- coding:UTF-8 -*-
#! /usr/bin/python3
import pickle
import pprint
with open('test.pkl','rb') as Input:
data_1 = pickle.load(Input)
data_2 = pickle.load(Input)
print(data_1)
pprint.pprint(data_2)
print(data_2) | 21.090909 | 37 | 0.659483 |
7942ae669276c2bcaad09fa4d19745a62ce043a0 | 2,301 | py | Python | WrightTools/data/_brunold.py | untzag/WrightTools | 05480d2f91ceeca422d9e5ac381fce1840207cb0 | [
"MIT"
] | 12 | 2017-07-11T15:58:12.000Z | 2021-05-10T20:33:26.000Z | WrightTools/data/_brunold.py | untzag/WrightTools | 05480d2f91ceeca422d9e5ac381fce1840207cb0 | [
"MIT"
] | 808 | 2015-04-12T00:36:08.000Z | 2022-03-27T21:06:06.000Z | WrightTools/data/_brunold.py | untzag/WrightTools | 05480d2f91ceeca422d9e5ac381fce1840207cb0 | [
"MIT"
] | 9 | 2017-07-22T18:54:23.000Z | 2022-02-17T20:31:05.000Z | """Brunold."""
# --- import --------------------------------------------------------------------------------------
import os
import pathlib
import numpy as np
from ._data import Data
from .. import exceptions as wt_exceptions
# --- define --------------------------------------------------------------------------------------
__all__ = ["from_BrunoldrRaman"]
# --- from function -------------------------------------------------------------------------------
def from_BrunoldrRaman(filepath, name=None, parent=None, verbose=True) -> Data:
"""Create a data object from the Brunold rRaman instrument.
Expects one energy (in wavenumbers) and one counts value.
Parameters
----------
filepath : path-like
Path to .txt file.
Can be either a local or remote file (http/ftp).
Can be compressed with gz/bz2, decompression based on file name.
name : string (optional)
Name to give to the created data object. If None, filename is used.
Default is None.
parent : WrightTools.Collection (optional)
Collection to place new data object within. Default is None.
verbose : boolean (optional)
Toggle talkback. Default is True.
Returns
-------
data
New data object(s).
"""
# parse filepath
filestr = os.fspath(filepath)
filepath = pathlib.Path(filepath)
if not ".txt" in filepath.suffixes:
wt_exceptions.WrongFileTypeWarning.warn(filepath, ".txt")
# parse name
if not name:
name = filepath.name.split(".")[0]
# create data
kwargs = {"name": name, "kind": "BrunoldrRaman", "source": filestr}
if parent is None:
data = Data(**kwargs)
else:
data = parent.create_data(**kwargs)
# array
ds = np.DataSource(None)
f = ds.open(filestr, "rt")
arr = np.genfromtxt(f, delimiter="\t").T
f.close()
# chew through all scans
data.create_variable(name="energy", values=arr[0], units="wn")
data.create_channel(name="signal", values=arr[1])
data.transform("energy")
# finish
if verbose:
print("data created at {0}".format(data.fullpath))
print(" range: {0} to {1} (wn)".format(data.energy[0], data.energy[-1]))
print(" size: {0}".format(data.size))
return data
| 29.126582 | 99 | 0.557584 |
7942ae915f7490410c146420b70dc71a86c5f37e | 637 | py | Python | natlas-server/app/models/__init__.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-server/app/models/__init__.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | natlas-server/app/models/__init__.py | m4rcu5/natlas | d1057c5349a5443cecffb3db9a6428f7271b07ad | [
"Apache-2.0"
] | null | null | null | from app.models.agent import Agent
from app.models.agent_config import AgentConfig
from app.models.agent_script import AgentScript
from app.models.config_item import ConfigItem
from app.models.email_token import EmailToken
from app.models.natlas_services import NatlasServices
from app.models.rescan_task import RescanTask
from app.models.scope_item import ScopeItem
from app.models.tag import Tag
from app.models.user import User
from app.models.scope_log import ScopeLog
__all__ = [
'Agent',
'AgentConfig',
'AgentScript',
'ConfigItem',
'EmailToken',
'NatlasServices',
'RescanTask',
'ScopeItem',
'ScopeLog',
'Tag',
'User'
]
| 24.5 | 53 | 0.799058 |
7942aee6457ef82054b24475b21c8d5b4a766398 | 11,269 | py | Python | python/dgl/nn/pytorch/conv/sageconv.py | dongbowen8/dgl | f1c6948171cc7581e582bf2028776c7e08d448a1 | [
"Apache-2.0"
] | null | null | null | python/dgl/nn/pytorch/conv/sageconv.py | dongbowen8/dgl | f1c6948171cc7581e582bf2028776c7e08d448a1 | [
"Apache-2.0"
] | null | null | null | python/dgl/nn/pytorch/conv/sageconv.py | dongbowen8/dgl | f1c6948171cc7581e582bf2028776c7e08d448a1 | [
"Apache-2.0"
] | null | null | null | """Torch Module for GraphSAGE layer"""
# pylint: disable= no-member, arguments-differ, invalid-name
import torch
from torch import nn
from torch.nn import functional as F
from .... import function as fn
from ....utils import expand_as_pair, check_eq_shape, dgl_warning
class SAGEConv(nn.Module):
r"""
Description
-----------
GraphSAGE layer from paper `Inductive Representation Learning on
Large Graphs <https://arxiv.org/pdf/1706.02216.pdf>`__.
.. math::
h_{\mathcal{N}(i)}^{(l+1)} &= \mathrm{aggregate}
\left(\{h_{j}^{l}, \forall j \in \mathcal{N}(i) \}\right)
h_{i}^{(l+1)} &= \sigma \left(W \cdot \mathrm{concat}
(h_{i}^{l}, h_{\mathcal{N}(i)}^{l+1}) \right)
h_{i}^{(l+1)} &= \mathrm{norm}(h_{i}^{l})
If a weight tensor on each edge is provided, the aggregation becomes:
.. math::
h_{\mathcal{N}(i)}^{(l+1)} = \mathrm{aggregate}
\left(\{e_{ji} h_{j}^{l}, \forall j \in \mathcal{N}(i) \}\right)
where :math:`e_{ji}` is the scalar weight on the edge from node :math:`j` to node :math:`i`.
Please make sure that :math:`e_{ji}` is broadcastable with :math:`h_j^{l}`.
Parameters
----------
in_feats : int, or pair of ints
Input feature size; i.e, the number of dimensions of :math:`h_i^{(l)}`.
SAGEConv can be applied on homogeneous graph and unidirectional
`bipartite graph <https://docs.dgl.ai/generated/dgl.bipartite.html?highlight=bipartite>`__.
If the layer applies on a unidirectional bipartite graph, ``in_feats``
specifies the input feature size on both the source and destination nodes. If
a scalar is given, the source and destination node feature size would take the
same value.
If aggregator type is ``gcn``, the feature size of source and destination nodes
are required to be the same.
out_feats : int
Output feature size; i.e, the number of dimensions of :math:`h_i^{(l+1)}`.
aggregator_type : str
Aggregator type to use (``mean``, ``gcn``, ``pool``, ``lstm``).
feat_drop : float
Dropout rate on features, default: ``0``.
bias : bool
If True, adds a learnable bias to the output. Default: ``True``.
norm : callable activation function/layer or None, optional
If not None, applies normalization to the updated node features.
activation : callable activation function/layer or None, optional
If not None, applies an activation function to the updated node features.
Default: ``None``.
Examples
--------
>>> import dgl
>>> import numpy as np
>>> import torch as th
>>> from dgl.nn import SAGEConv
>>> # Case 1: Homogeneous graph
>>> g = dgl.graph(([0,1,2,3,2,5], [1,2,3,4,0,3]))
>>> g = dgl.add_self_loop(g)
>>> feat = th.ones(6, 10)
>>> conv = SAGEConv(10, 2, 'pool')
>>> res = conv(g, feat)
>>> res
tensor([[-1.0888, -2.1099],
[-1.0888, -2.1099],
[-1.0888, -2.1099],
[-1.0888, -2.1099],
[-1.0888, -2.1099],
[-1.0888, -2.1099]], grad_fn=<AddBackward0>)
>>> # Case 2: Unidirectional bipartite graph
>>> u = [0, 1, 0, 0, 1]
>>> v = [0, 1, 2, 3, 2]
>>> g = dgl.bipartite((u, v))
>>> u_fea = th.rand(2, 5)
>>> v_fea = th.rand(4, 10)
>>> conv = SAGEConv((5, 10), 2, 'mean')
>>> res = conv(g, (u_fea, v_fea))
>>> res
tensor([[ 0.3163, 3.1166],
[ 0.3866, 2.5398],
[ 0.5873, 1.6597],
[-0.2502, 2.8068]], grad_fn=<AddBackward0>)
"""
def __init__(self,
in_feats,
out_feats,
aggregator_type,
feat_drop=0.,
bias=True,
norm=None,
activation=None):
super(SAGEConv, self).__init__()
self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
self._out_feats = out_feats
self._aggre_type = aggregator_type
self.norm = norm
self.feat_drop = nn.Dropout(feat_drop)
self.activation = activation
# aggregator type: mean/pool/lstm/gcn
if aggregator_type == 'pool':
self.fc_pool = nn.Linear(self._in_src_feats, self._in_src_feats)
if aggregator_type == 'lstm':
self.lstm = nn.LSTM(self._in_src_feats, self._in_src_feats, batch_first=True)
if aggregator_type != 'gcn':
self.fc_self = nn.Linear(self._in_dst_feats, out_feats, bias=False)
self.fc_neigh = nn.Linear(self._in_src_feats, out_feats, bias=False)
if bias:
self.bias = nn.parameter.Parameter(torch.zeros(self._out_feats))
else:
self.register_buffer('bias', None)
self.reset_parameters()
def reset_parameters(self):
r"""
Description
-----------
Reinitialize learnable parameters.
Note
----
The linear weights :math:`W^{(l)}` are initialized using Glorot uniform initialization.
The LSTM module is using xavier initialization method for its weights.
"""
gain = nn.init.calculate_gain('relu')
if self._aggre_type == 'pool':
nn.init.xavier_uniform_(self.fc_pool.weight, gain=gain)
if self._aggre_type == 'lstm':
self.lstm.reset_parameters()
if self._aggre_type != 'gcn':
nn.init.xavier_uniform_(self.fc_self.weight, gain=gain)
nn.init.xavier_uniform_(self.fc_neigh.weight, gain=gain)
def _compatibility_check(self):
"""Address the backward compatibility issue brought by #2747"""
if not hasattr(self, 'bias'):
dgl_warning("You are loading a GraphSAGE model trained from a old version of DGL, "
"DGL automatically convert it to be compatible with latest version.")
bias = self.fc_neigh.bias
self.fc_neigh.bias = None
if hasattr(self, 'fc_self'):
if bias is not None:
bias = bias + self.fc_self.bias
self.fc_self.bias = None
self.bias = bias
def _lstm_reducer(self, nodes):
"""LSTM reducer
NOTE(zihao): lstm reducer with default schedule (degree bucketing)
is slow, we could accelerate this with degree padding in the future.
"""
m = nodes.mailbox['m'] # (B, L, D)
batch_size = m.shape[0]
h = (m.new_zeros((1, batch_size, self._in_src_feats)),
m.new_zeros((1, batch_size, self._in_src_feats)))
_, (rst, _) = self.lstm(m, h)
return {'neigh': rst.squeeze(0)}
def forward(self, graph, feat, edge_weight=None):
r"""
Description
-----------
Compute GraphSAGE layer.
Parameters
----------
graph : DGLGraph
The graph.
feat : torch.Tensor or pair of torch.Tensor
If a torch.Tensor is given, it represents the input feature of shape
:math:`(N, D_{in})`
where :math:`D_{in}` is size of input feature, :math:`N` is the number of nodes.
If a pair of torch.Tensor is given, the pair must contain two tensors of shape
:math:`(N_{in}, D_{in_{src}})` and :math:`(N_{out}, D_{in_{dst}})`.
edge_weight : torch.Tensor, optional
Optional tensor on the edge. If given, the convolution will weight
with regard to the message.
Returns
-------
torch.Tensor
The output feature of shape :math:`(N_{dst}, D_{out})`
where :math:`N_{dst}` is the number of destination nodes in the input graph,
:math:`D_{out}` is the size of the output feature.
"""
self._compatibility_check()
with graph.local_scope():
if isinstance(feat, tuple):
feat_src = self.feat_drop(feat[0])
feat_dst = self.feat_drop(feat[1])
else:
feat_src = feat_dst = self.feat_drop(feat)
if graph.is_block:
feat_dst = feat_src[:graph.number_of_dst_nodes()]
msg_fn = fn.copy_src('h', 'm')
if edge_weight is not None:
assert edge_weight.shape[0] == graph.number_of_edges()
graph.edata['_edge_weight'] = edge_weight
msg_fn = fn.u_mul_e('h', '_edge_weight', 'm')
h_self = feat_dst
# Handle the case of graphs without edges
if graph.number_of_edges() == 0:
graph.dstdata['neigh'] = torch.zeros(
feat_dst.shape[0], self._in_src_feats).to(feat_dst)
# Determine whether to apply linear transformation before message passing A(XW)
lin_before_mp = self._in_src_feats > self._out_feats
# Message Passing
if self._aggre_type == 'mean':
graph.srcdata['h'] = self.fc_neigh(feat_src) if lin_before_mp else feat_src
graph.update_all(msg_fn, fn.mean('m', 'neigh'))
h_neigh = graph.dstdata['neigh']
if not lin_before_mp:
h_neigh = self.fc_neigh(h_neigh)
elif self._aggre_type == 'gcn':
check_eq_shape(feat)
graph.srcdata['h'] = self.fc_neigh(feat_src) if lin_before_mp else feat_src
if isinstance(feat, tuple): # heterogeneous
graph.dstdata['h'] = self.fc_neigh(feat_dst) if lin_before_mp else feat_dst
else:
if graph.is_block:
graph.dstdata['h'] = graph.srcdata['h'][:graph.num_dst_nodes()]
else:
graph.dstdata['h'] = graph.srcdata['h']
graph.update_all(msg_fn, fn.sum('m', 'neigh'))
# divide in_degrees
degs = graph.in_degrees().to(feat_dst)
h_neigh = (graph.dstdata['neigh'] + graph.dstdata['h']) / (degs.unsqueeze(-1) + 1)
if not lin_before_mp:
h_neigh = self.fc_neigh(h_neigh)
elif self._aggre_type == 'pool':
graph.srcdata['h'] = F.relu(self.fc_pool(feat_src))
graph.update_all(msg_fn, fn.max('m', 'neigh'))
h_neigh = self.fc_neigh(graph.dstdata['neigh'])
elif self._aggre_type == 'lstm':
graph.srcdata['h'] = feat_src
graph.update_all(msg_fn, self._lstm_reducer)
h_neigh = self.fc_neigh(graph.dstdata['neigh'])
else:
raise KeyError('Aggregator type {} not recognized.'.format(self._aggre_type))
# GraphSAGE GCN does not require fc_self.
if self._aggre_type == 'gcn':
rst = h_neigh
else:
rst = self.fc_self(h_self) + h_neigh
# bias term
if self.bias is not None:
rst = rst + self.bias
# activation
if self.activation is not None:
rst = self.activation(rst)
# normalization
if self.norm is not None:
rst = self.norm(rst)
return rst
| 40.246429 | 99 | 0.560919 |
7942afca92c0b8d94bdf9023f3ded99f877df4dd | 8,090 | py | Python | sdk/cosmos/azure-cosmos/azure/cosmos/_execution_context/multi_execution_aggregator.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | null | null | null | sdk/cosmos/azure-cosmos/azure/cosmos/_execution_context/multi_execution_aggregator.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | null | null | null | sdk/cosmos/azure-cosmos/azure/cosmos/_execution_context/multi_execution_aggregator.py | xolve/azure-sdk-for-python | 9f5baa19c392f77f811d936ee43450e4ea524002 | [
"MIT"
] | null | null | null | # The MIT License (MIT)
# Copyright (c) 2014 Microsoft Corporation
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Internal class for multi execution context aggregator implementation in the Azure Cosmos database service.
"""
import heapq
from azure.cosmos._execution_context.base_execution_context import _QueryExecutionContextBase
from azure.cosmos._execution_context import document_producer
from azure.cosmos._routing import routing_range
from azure.cosmos import exceptions
# pylint: disable=protected-access
class _MultiExecutionContextAggregator(_QueryExecutionContextBase):
"""This class is capable of queries which requires rewriting based on
backend's returned query execution info.
This class maintains the execution context for each partition key range
and aggregates the corresponding results from each execution context.
When handling an orderby query, _MultiExecutionContextAggregator
instantiates one instance of DocumentProducer per target partition key range
and aggregates the result of each.
"""
# TODO improvement: this class needs to be parallelized
class PriorityQueue:
"""Provides a Priority Queue abstraction data structure"""
def __init__(self):
self._heap = []
def pop(self):
return heapq.heappop(self._heap)
def push(self, item):
heapq.heappush(self._heap, item)
def peek(self):
return self._heap[0]
def size(self):
return len(self._heap)
def __init__(self, client, resource_link, query, options, partitioned_query_ex_info):
super(_MultiExecutionContextAggregator, self).__init__(client, options)
# use the routing provider in the client
self._routing_provider = client._routing_map_provider
self._client = client
self._resource_link = resource_link
self._query = query
self._partitioned_query_ex_info = partitioned_query_ex_info
self._sort_orders = partitioned_query_ex_info.get_order_by()
if self._sort_orders:
self._document_producer_comparator = document_producer._OrderByDocumentProducerComparator(self._sort_orders)
else:
self._document_producer_comparator = document_producer._PartitionKeyRangeDocumentProduerComparator()
# will be a list of (partition_min, partition_max) tuples
targetPartitionRanges = self._get_target_partition_key_range()
targetPartitionQueryExecutionContextList = []
for partitionTargetRange in targetPartitionRanges:
# create and add the child execution context for the target range
targetPartitionQueryExecutionContextList.append(
self._createTargetPartitionQueryExecutionContext(partitionTargetRange)
)
self._orderByPQ = _MultiExecutionContextAggregator.PriorityQueue()
for targetQueryExContext in targetPartitionQueryExecutionContextList:
try:
# TODO: we can also use more_itertools.peekable to be more python friendly
targetQueryExContext.peek()
# if there are matching results in the target ex range add it to the priority queue
self._orderByPQ.push(targetQueryExContext)
except exceptions.CosmosHttpResponseError as e:
if exceptions.partition_range_is_gone(e):
# repairing document producer context on partition split
self._repair_document_producer()
else:
raise
except StopIteration:
continue
def __next__(self):
"""Returns the next result
:return: The next result.
:rtype: dict
:raises StopIteration: If no more result is left.
"""
if self._orderByPQ.size() > 0:
targetRangeExContext = self._orderByPQ.pop()
res = next(targetRangeExContext)
try:
# TODO: we can also use more_itertools.peekable to be more python friendly
targetRangeExContext.peek()
self._orderByPQ.push(targetRangeExContext)
except StopIteration:
pass
return res
raise StopIteration
def fetch_next_block(self):
raise NotImplementedError("You should use pipeline's fetch_next_block.")
def _repair_document_producer(self):
"""Repairs the document producer context by using the re-initialized routing map provider in the client,
which loads in a refreshed partition key range cache to re-create the partition key ranges.
After loading this new cache, the document producers get re-created with the new valid ranges.
"""
# refresh the routing provider to get the newly initialized one post-refresh
self._routing_provider = self._client._routing_map_provider
# will be a list of (partition_min, partition_max) tuples
targetPartitionRanges = self._get_target_partition_key_range()
targetPartitionQueryExecutionContextList = []
for partitionTargetRange in targetPartitionRanges:
# create and add the child execution context for the target range
targetPartitionQueryExecutionContextList.append(
self._createTargetPartitionQueryExecutionContext(partitionTargetRange)
)
self._orderByPQ = _MultiExecutionContextAggregator.PriorityQueue()
for targetQueryExContext in targetPartitionQueryExecutionContextList:
try:
# TODO: we can also use more_itertools.peekable to be more python friendly
targetQueryExContext.peek()
# if there are matching results in the target ex range add it to the priority queue
self._orderByPQ.push(targetQueryExContext)
except StopIteration:
continue
def _createTargetPartitionQueryExecutionContext(self, partition_key_target_range):
rewritten_query = self._partitioned_query_ex_info.get_rewritten_query()
if rewritten_query:
if isinstance(self._query, dict):
# this is a parameterized query, collect all the parameters
query = dict(self._query)
query["query"] = rewritten_query
else:
query = rewritten_query
else:
query = self._query
return document_producer._DocumentProducer(
partition_key_target_range,
self._client,
self._resource_link,
query,
self._document_producer_comparator,
self._options,
)
def _get_target_partition_key_range(self):
query_ranges = self._partitioned_query_ex_info.get_query_ranges()
return self._routing_provider.get_overlapping_ranges(
self._resource_link, [routing_range.Range.ParseFromDict(range_as_dict) for range_as_dict in query_ranges]
)
next = __next__ # Python 2 compatibility.
| 40.653266 | 120 | 0.696292 |
7942b1268f03d31051b5a30880cd0f1e8c953bce | 1,388 | py | Python | project.py | salazardetroya/2DHEVF | 02cc327bcfb8a9a01bc5991a3849acaa1ebe271e | [
"MIT"
] | 1 | 2022-01-28T03:11:10.000Z | 2022-01-28T03:11:10.000Z | project.py | salazardetroya/2DHEVF | 02cc327bcfb8a9a01bc5991a3849acaa1ebe271e | [
"MIT"
] | null | null | null | project.py | salazardetroya/2DHEVF | 02cc327bcfb8a9a01bc5991a3849acaa1ebe271e | [
"MIT"
] | 1 | 2021-11-15T23:26:51.000Z | 2021-11-15T23:26:51.000Z | # project.py
import flow
from flow import FlowProject
@FlowProject.label
def check_100_iterations(job):
return job.isfile("control_iterations_f_10.vtu") and \
job.isfile("final_output.txt")# Check the job at least has > 1500 iterations.
@FlowProject.operation
@flow.cmd
@FlowProject.post(check_100_iterations)
def launch_opti(job):
import os
output = job.ws + "/output.txt"
simulation = "source /g/g92/miguel/workspace/firedrake_setup.sh && \
srun --output={3} python3 he_volume_frac.py \
--mu {0:.5f} \
--enthalpy_scale {1:.5f} \
--alphabar {4:.8f} \
--filter {5:.8f} \
--output_dir {2}".format(job.sp.mu, job.sp.enthalpy_scale, job.ws, output, job.sp.alphabar, job.sp.filter)
return simulation
@FlowProject.label
def check_design(job):
return job.isfile(job.id + ".png")
@FlowProject.operation
@flow.cmd
@FlowProject.pre(check_100_iterations)
@FlowProject.post(check_design)
def post_process_design(job):
parameters = "".join([key + " " + f"{job.sp[key]}" + "\n" for key in job.sp.keys()])
import os
post_process = "srun pvpython screenshot_design.py \
--parameters '{0}' \
--filename {1} \
--results_dir {2}".format(parameters, job.id, job.ws)
return post_process
if __name__ == '__main__':
FlowProject().main()
| 28.916667 | 118 | 0.642651 |
7942b14d4182264241f1e643b25e1c9c7a4a010b | 58,529 | py | Python | flask/lib/python2.7/site-packages/sqlalchemy/orm/strategies.py | ccellis/WHACK2016 | 5ef4ddadaa60ef8ca07702a0a82df8a9776b9741 | [
"BSD-3-Clause"
] | 1 | 2018-04-09T07:37:54.000Z | 2018-04-09T07:37:54.000Z | flask/lib/python2.7/site-packages/sqlalchemy/orm/strategies.py | ccellis/WHACK2016 | 5ef4ddadaa60ef8ca07702a0a82df8a9776b9741 | [
"BSD-3-Clause"
] | 1 | 2016-05-25T15:38:50.000Z | 2016-05-25T15:38:50.000Z | flask/lib/python2.7/site-packages/sqlalchemy/orm/strategies.py | ccellis/WHACK2016 | 5ef4ddadaa60ef8ca07702a0a82df8a9776b9741 | [
"BSD-3-Clause"
] | null | null | null | # orm/strategies.py
# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""sqlalchemy.orm.interfaces.LoaderStrategy
implementations, and related MapperOptions."""
from .. import exc as sa_exc, inspect
from .. import util, log, event
from ..sql import util as sql_util, visitors
from .. import sql
from . import (
attributes, interfaces, exc as orm_exc, loading,
unitofwork, util as orm_util
)
from .state import InstanceState
from .util import _none_set
from . import properties
from .interfaces import (
LoaderStrategy, StrategizedProperty
)
from .base import _SET_DEFERRED_EXPIRED, _DEFER_FOR_STATE
from .session import _state_session
import itertools
def _register_attribute(
strategy, mapper, useobject,
compare_function=None,
typecallable=None,
uselist=False,
callable_=None,
proxy_property=None,
active_history=False,
impl_class=None,
**kw
):
prop = strategy.parent_property
attribute_ext = list(util.to_list(prop.extension, default=[]))
listen_hooks = []
if useobject and prop.single_parent:
listen_hooks.append(single_parent_validator)
if prop.key in prop.parent.validators:
fn, opts = prop.parent.validators[prop.key]
listen_hooks.append(
lambda desc, prop: orm_util._validator_events(
desc,
prop.key, fn, **opts)
)
if useobject:
listen_hooks.append(unitofwork.track_cascade_events)
# need to assemble backref listeners
# after the singleparentvalidator, mapper validator
backref = kw.pop('backref', None)
if backref:
listen_hooks.append(
lambda desc, prop: attributes.backref_listeners(
desc,
backref,
uselist
)
)
for m in mapper.self_and_descendants:
if prop is m._props.get(prop.key):
desc = attributes.register_attribute_impl(
m.class_,
prop.key,
parent_token=prop,
uselist=uselist,
compare_function=compare_function,
useobject=useobject,
extension=attribute_ext,
trackparent=useobject and (
prop.single_parent
or prop.direction is interfaces.ONETOMANY),
typecallable=typecallable,
callable_=callable_,
active_history=active_history,
impl_class=impl_class,
send_modified_events=not useobject or not prop.viewonly,
doc=prop.doc,
**kw
)
for hook in listen_hooks:
hook(desc, prop)
@properties.ColumnProperty.strategy_for(instrument=False, deferred=False)
class UninstrumentedColumnLoader(LoaderStrategy):
"""Represent the a non-instrumented MapperProperty.
The polymorphic_on argument of mapper() often results in this,
if the argument is against the with_polymorphic selectable.
"""
__slots__ = 'columns',
def __init__(self, parent):
super(UninstrumentedColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
def setup_query(
self, context, entity, path, loadopt, adapter,
column_collection=None, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
pass
@log.class_logger
@properties.ColumnProperty.strategy_for(instrument=True, deferred=False)
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
__slots__ = 'columns', 'is_composite'
def __init__(self, parent):
super(ColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
self.is_composite = hasattr(self.parent_property, 'composite_class')
def setup_query(
self, context, entity, path, loadopt,
adapter, column_collection, memoized_populators, **kwargs):
for c in self.columns:
if adapter:
c = adapter.columns[c]
column_collection.append(c)
fetch = self.columns[0]
if adapter:
fetch = adapter.columns[fetch]
memoized_populators[self.parent_property] = fetch
def init_class_attribute(self, mapper):
self.is_class_level = True
coltype = self.columns[0].type
# TODO: check all columns ? check for foreign key as well?
active_history = self.parent_property.active_history or \
self.columns[0].primary_key or \
mapper.version_id_col in set(self.columns)
_register_attribute(
self, mapper, useobject=False,
compare_function=coltype.compare_values,
active_history=active_history
)
def create_row_processor(
self, context, path,
loadopt, mapper, result, adapter, populators):
# look through list of columns represented here
# to see which, if any, is present in the row.
for col in self.columns:
if adapter:
col = adapter.columns[col]
getter = result._getter(col)
if getter:
populators["quick"].append((self.key, getter))
break
else:
populators["expire"].append((self.key, True))
@log.class_logger
@properties.ColumnProperty.strategy_for(deferred=True, instrument=True)
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
__slots__ = 'columns', 'group'
def __init__(self, parent):
super(DeferredColumnLoader, self).__init__(parent)
if hasattr(self.parent_property, 'composite_class'):
raise NotImplementedError("Deferred loading for composite "
"types not implemented yet")
self.columns = self.parent_property.columns
self.group = self.parent_property.group
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
# this path currently does not check the result
# for the column; this is because in most cases we are
# working just with the setup_query() directive which does
# not support this, and the behavior here should be consistent.
if not self.is_class_level:
set_deferred_for_local_state = \
self.parent_property._deferred_column_loader
populators["new"].append((self.key, set_deferred_for_local_state))
else:
populators["expire"].append((self.key, False))
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self, mapper, useobject=False,
compare_function=self.columns[0].type.compare_values,
callable_=self._load_for_state,
expire_missing=False
)
def setup_query(
self, context, entity, path, loadopt,
adapter, column_collection, memoized_populators,
only_load_props=None, **kw):
if (
(
loadopt and
'undefer_pks' in loadopt.local_opts and
set(self.columns).intersection(
self.parent._should_undefer_in_wildcard)
)
or
(
loadopt and
self.group and
loadopt.local_opts.get('undefer_group_%s' % self.group, False)
)
or
(
only_load_props and self.key in only_load_props
)
):
self.parent_property._get_strategy_by_cls(ColumnLoader).\
setup_query(context, entity,
path, loadopt, adapter,
column_collection, memoized_populators, **kw)
elif self.is_class_level:
memoized_populators[self.parent_property] = _SET_DEFERRED_EXPIRED
else:
memoized_populators[self.parent_property] = _DEFER_FOR_STATE
def _load_for_state(self, state, passive):
if not state.key:
return attributes.ATTR_EMPTY
if not passive & attributes.SQL_OK:
return attributes.PASSIVE_NO_RESULT
localparent = state.manager.mapper
if self.group:
toload = [
p.key for p in
localparent.iterate_properties
if isinstance(p, StrategizedProperty) and
isinstance(p.strategy, DeferredColumnLoader) and
p.group == self.group
]
else:
toload = [self.key]
# narrow the keys down to just those which have no history
group = [k for k in toload if k in state.unmodified]
session = _state_session(state)
if session is None:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"deferred load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
query = session.query(localparent)
if loading.load_on_ident(
query, state.key,
only_load_props=group, refresh_state=state) is None:
raise orm_exc.ObjectDeletedError(state)
return attributes.ATTR_WAS_SET
class LoadDeferredColumns(object):
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key):
self.key = key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
strategy = prop._strategies[DeferredColumnLoader]
return strategy._load_for_state(state, passive)
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
__slots__ = 'mapper', 'target', 'uselist'
def __init__(self, parent):
super(AbstractRelationshipLoader, self).__init__(parent)
self.mapper = self.parent_property.mapper
self.target = self.parent_property.target
self.uselist = self.parent_property.uselist
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="noload")
@properties.RelationshipProperty.strategy_for(lazy=None)
class NoLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=None".
"""
__slots__ = ()
def init_class_attribute(self, mapper):
self.is_class_level = True
_register_attribute(
self, mapper,
useobject=True,
uselist=self.parent_property.uselist,
typecallable=self.parent_property.collection_class,
)
def create_row_processor(
self, context, path, loadopt, mapper,
result, adapter, populators):
def invoke_no_load(state, dict_, row):
if self.uselist:
state.manager.get_impl(self.key).initialize(state, dict_)
else:
dict_[self.key] = None
populators["new"].append((self.key, invoke_no_load))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy=True)
@properties.RelationshipProperty.strategy_for(lazy="select")
class LazyLoader(AbstractRelationshipLoader, util.MemoizedSlots):
"""Provide loading behavior for a :class:`.RelationshipProperty`
with "lazy=True", that is loads when first accessed.
"""
__slots__ = (
'_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col',
'_equated_columns', '_rev_bind_to_col', '_rev_equated_columns',
'_simple_lazy_clause')
def __init__(self, parent):
super(LazyLoader, self).__init__(parent)
join_condition = self.parent_property._join_condition
self._lazywhere, \
self._bind_to_col, \
self._equated_columns = join_condition.create_lazy_clause()
self._rev_lazywhere, \
self._rev_bind_to_col, \
self._rev_equated_columns = join_condition.create_lazy_clause(
reverse_direction=True)
self.logger.info("%s lazy loading clause %s", self, self._lazywhere)
# determine if our "lazywhere" clause is the same as the mapper's
# get() clause. then we can just use mapper.get()
self.use_get = not self.uselist and \
self.mapper._get_clause[0].compare(
self._lazywhere,
use_proxies=True,
equivalents=self.mapper._equivalent_columns
)
if self.use_get:
for col in list(self._equated_columns):
if col in self.mapper._equivalent_columns:
for c in self.mapper._equivalent_columns[col]:
self._equated_columns[c] = self._equated_columns[col]
self.logger.info("%s will use query.get() to "
"optimize instance loads", self)
def init_class_attribute(self, mapper):
self.is_class_level = True
active_history = (
self.parent_property.active_history or
self.parent_property.direction is not interfaces.MANYTOONE or
not self.use_get
)
# MANYTOONE currently only needs the
# "old" value for delete-orphan
# cascades. the required _SingleParentValidator
# will enable active_history
# in that case. otherwise we don't need the
# "old" value during backref operations.
_register_attribute(
self,
mapper,
useobject=True,
callable_=self._load_for_state,
uselist=self.parent_property.uselist,
backref=self.parent_property.back_populates,
typecallable=self.parent_property.collection_class,
active_history=active_history
)
def _memoized_attr__simple_lazy_clause(self):
criterion, bind_to_col = (
self._lazywhere,
self._bind_to_col
)
params = []
def visit_bindparam(bindparam):
bindparam.unique = False
if bindparam._identifying_key in bind_to_col:
params.append((
bindparam.key, bind_to_col[bindparam._identifying_key],
None))
else:
params.append((bindparam.key, None, bindparam.value))
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam': visit_bindparam}
)
return criterion, params
def _generate_lazy_clause(self, state, passive):
criterion, param_keys = self._simple_lazy_clause
if state is None:
return sql_util.adapt_criterion_to_null(
criterion, [key for key, ident, value in param_keys])
mapper = self.parent_property.parent
o = state.obj() # strong ref
dict_ = attributes.instance_dict(o)
if passive & attributes.INIT_OK:
passive ^= attributes.INIT_OK
params = {}
for key, ident, value in param_keys:
if ident is not None:
if passive and passive & attributes.LOAD_AGAINST_COMMITTED:
value = mapper._get_committed_state_attr_by_column(
state, dict_, ident, passive)
else:
value = mapper._get_state_attr_by_column(
state, dict_, ident, passive)
params[key] = value
return criterion, params
def _load_for_state(self, state, passive):
if not state.key and (
(
not self.parent_property.load_on_pending
and not state._load_pending
)
or not state.session_id
):
return attributes.ATTR_EMPTY
pending = not state.key
ident_key = None
if (
(not passive & attributes.SQL_OK and not self.use_get)
or
(not passive & attributes.NON_PERSISTENT_OK and pending)
):
return attributes.PASSIVE_NO_RESULT
session = _state_session(state)
if not session:
raise orm_exc.DetachedInstanceError(
"Parent instance %s is not bound to a Session; "
"lazy load operation of attribute '%s' cannot proceed" %
(orm_util.state_str(state), self.key)
)
# if we have a simple primary key load, check the
# identity map without generating a Query at all
if self.use_get:
ident = self._get_ident_for_use_get(
session,
state,
passive
)
if attributes.PASSIVE_NO_RESULT in ident:
return attributes.PASSIVE_NO_RESULT
elif attributes.NEVER_SET in ident:
return attributes.NEVER_SET
if _none_set.issuperset(ident):
return None
ident_key = self.mapper.identity_key_from_primary_key(ident)
instance = loading.get_from_identity(session, ident_key, passive)
if instance is not None:
return instance
elif not passive & attributes.SQL_OK or \
not passive & attributes.RELATED_OBJECT_OK:
return attributes.PASSIVE_NO_RESULT
return self._emit_lazyload(session, state, ident_key, passive)
def _get_ident_for_use_get(self, session, state, passive):
instance_mapper = state.manager.mapper
if passive & attributes.LOAD_AGAINST_COMMITTED:
get_attr = instance_mapper._get_committed_state_attr_by_column
else:
get_attr = instance_mapper._get_state_attr_by_column
dict_ = state.dict
return [
get_attr(
state,
dict_,
self._equated_columns[pk],
passive=passive)
for pk in self.mapper.primary_key
]
@util.dependencies("sqlalchemy.orm.strategy_options")
def _emit_lazyload(
self, strategy_options, session, state, ident_key, passive):
q = session.query(self.mapper)._adapt_all_clauses()
if self.parent_property.secondary is not None:
q = q.select_from(self.mapper, self.parent_property.secondary)
q = q._with_invoke_all_eagers(False)
pending = not state.key
# don't autoflush on pending
if pending or passive & attributes.NO_AUTOFLUSH:
q = q.autoflush(False)
if state.load_path:
q = q._with_current_path(state.load_path[self.parent_property])
if state.load_options:
q = q._conditional_options(*state.load_options)
if self.use_get:
return loading.load_on_ident(q, ident_key)
if self.parent_property.order_by:
q = q.order_by(*util.to_list(self.parent_property.order_by))
for rev in self.parent_property._reverse_property:
# reverse props that are MANYTOONE are loading *this*
# object from get(), so don't need to eager out to those.
if rev.direction is interfaces.MANYTOONE and \
rev._use_get and \
not isinstance(rev.strategy, LazyLoader):
q = q.options(
strategy_options.Load(rev.parent).lazyload(rev.key))
lazy_clause, params = self._generate_lazy_clause(
state, passive=passive)
if pending:
if util.has_intersection(
orm_util._none_set, params.values()):
return None
elif util.has_intersection(orm_util._never_set, params.values()):
return None
q = q.filter(lazy_clause).params(params)
result = q.all()
if self.uselist:
return result
else:
l = len(result)
if l:
if l > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for lazily-loaded attribute '%s' "
% self.parent_property)
return result[0]
else:
return None
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
key = self.key
if not self.is_class_level:
# we are not the primary manager for this attribute
# on this class - set up a
# per-instance lazyloader, which will override the
# class-level behavior.
# this currently only happens when using a
# "lazyload" option on a "no load"
# attribute - "eager" attributes always have a
# class-level lazyloader installed.
set_lazy_callable = InstanceState._instance_level_callable_processor(
mapper.class_manager,
LoadLazyAttribute(key, self._strategy_keys[0]), key)
populators["new"].append((self.key, set_lazy_callable))
elif context.populate_existing or mapper.always_refresh:
def reset_for_lazy_callable(state, dict_, row):
# we are the primary manager for this attribute on
# this class - reset its
# per-instance attribute state, so that the class-level
# lazy loader is
# executed when next referenced on this instance.
# this is needed in
# populate_existing() types of scenarios to reset
# any existing state.
state._reset(dict_, key)
populators["new"].append((self.key, reset_for_lazy_callable))
class LoadLazyAttribute(object):
"""serializable loader object used by LazyLoader"""
def __init__(self, key, strategy_key=(('lazy', 'select'),)):
self.key = key
self.strategy_key = strategy_key
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
instance_mapper = state.manager.mapper
prop = instance_mapper._props[key]
strategy = prop._strategies[self.strategy_key]
return strategy._load_for_state(state, passive)
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
__slots__ = ()
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(
self, context, entity,
path, loadopt, adapter, column_collection=None,
parentmapper=None, **kwargs):
pass
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
def load_immediate(state, dict_, row):
state.get_impl(self.key).get(state, dict_)
populators["delayed"].append((self.key, load_immediate))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
__slots__ = 'join_depth',
def __init__(self, parent):
super(SubqueryLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
init_class_attribute(mapper)
def setup_query(
self, context, entity,
path, loadopt, adapter,
column_collection=None,
parentmapper=None, **kwargs):
if not context.query._enable_eagerloads:
return
elif context.query._yield_per:
context.query._no_yield_per("subquery")
path = path[self.parent_property]
# build up a path indicating the path from the leftmost
# entity to the thing we're subquery loading.
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic", None)
if with_poly_info is not None:
effective_entity = with_poly_info.entity
else:
effective_entity = self.mapper
subq_path = context.attributes.get(
('subquery_path', None),
orm_util.PathRegistry.root)
subq_path = subq_path + path
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif subq_path.contains_mapper(self.mapper):
return
leftmost_mapper, leftmost_attr, leftmost_relationship = \
self._get_leftmost(subq_path)
orig_query = context.attributes.get(
("orig_query", SubqueryLoader),
context.query)
# generate a new Query from the original, then
# produce a subquery from it.
left_alias = self._generate_from_original_query(
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship,
entity.entity_zero
)
# generate another Query that will join the
# left alias to the target relationships.
# basically doing a longhand
# "from_self()". (from_self() itself not quite industrial
# strength enough for all contingencies...but very close)
q = orig_query.session.query(effective_entity)
q._attributes = {
("orig_query", SubqueryLoader): orig_query,
('subquery_path', None): subq_path
}
q = q._set_enable_single_crit(False)
to_join, local_attr, parent_alias = \
self._prep_for_joins(left_alias, subq_path)
q = q.order_by(*local_attr)
q = q.add_columns(*local_attr)
q = self._apply_joins(
q, to_join, left_alias,
parent_alias, effective_entity)
q = self._setup_options(q, subq_path, orig_query, effective_entity)
q = self._setup_outermost_orderby(q)
# add new query to attributes to be picked up
# by create_row_processor
path.set(context.attributes, "subquery", q)
def _get_leftmost(self, subq_path):
subq_path = subq_path.path
subq_mapper = orm_util._class_to_mapper(subq_path[0])
# determine attributes of the leftmost mapper
if self.parent.isa(subq_mapper) and \
self.parent_property is subq_path[1]:
leftmost_mapper, leftmost_prop = \
self.parent, self.parent_property
else:
leftmost_mapper, leftmost_prop = \
subq_mapper, \
subq_path[1]
leftmost_cols = leftmost_prop.local_columns
leftmost_attr = [
getattr(
subq_path[0].entity,
leftmost_mapper._columntoproperty[c].key)
for c in leftmost_cols
]
return leftmost_mapper, leftmost_attr, leftmost_prop
def _generate_from_original_query(
self,
orig_query, leftmost_mapper,
leftmost_attr, leftmost_relationship, orig_entity
):
# reformat the original query
# to look only for significant columns
q = orig_query._clone().correlate(None)
# set a real "from" if not present, as this is more
# accurate than just going off of the column expression
if not q._from_obj and orig_entity.mapper.isa(leftmost_mapper):
q._set_select_from([orig_entity], False)
target_cols = q._adapt_col_list(leftmost_attr)
# select from the identity columns of the outer
q._set_entities(target_cols)
distinct_target_key = leftmost_relationship.distinct_target_key
if distinct_target_key is True:
q._distinct = True
elif distinct_target_key is None:
# if target_cols refer to a non-primary key or only
# part of a composite primary key, set the q as distinct
for t in set(c.table for c in target_cols):
if not set(target_cols).issuperset(t.primary_key):
q._distinct = True
break
if q._order_by is False:
q._order_by = leftmost_mapper.order_by
# don't need ORDER BY if no limit/offset
if q._limit is None and q._offset is None:
q._order_by = None
# the original query now becomes a subquery
# which we'll join onto.
embed_q = q.with_labels().subquery()
left_alias = orm_util.AliasedClass(
leftmost_mapper, embed_q,
use_mapper_path=True)
return left_alias
def _prep_for_joins(self, left_alias, subq_path):
# figure out what's being joined. a.k.a. the fun part
to_join = []
pairs = list(subq_path.pairs())
for i, (mapper, prop) in enumerate(pairs):
if i > 0:
# look at the previous mapper in the chain -
# if it is as or more specific than this prop's
# mapper, use that instead.
# note we have an assumption here that
# the non-first element is always going to be a mapper,
# not an AliasedClass
prev_mapper = pairs[i - 1][1].mapper
to_append = prev_mapper if prev_mapper.isa(mapper) else mapper
else:
to_append = mapper
to_join.append((to_append, prop.key))
# determine the immediate parent class we are joining from,
# which needs to be aliased.
if len(to_join) > 1:
info = inspect(to_join[-1][0])
if len(to_join) < 2:
# in the case of a one level eager load, this is the
# leftmost "left_alias".
parent_alias = left_alias
elif info.mapper.isa(self.parent):
# In the case of multiple levels, retrieve
# it from subq_path[-2]. This is the same as self.parent
# in the vast majority of cases, and [ticket:2014]
# illustrates a case where sub_path[-2] is a subclass
# of self.parent
parent_alias = orm_util.AliasedClass(
to_join[-1][0],
use_mapper_path=True)
else:
# if of_type() were used leading to this relationship,
# self.parent is more specific than subq_path[-2]
parent_alias = orm_util.AliasedClass(
self.parent,
use_mapper_path=True)
local_cols = self.parent_property.local_columns
local_attr = [
getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
return to_join, local_attr, parent_alias
def _apply_joins(
self, q, to_join, left_alias, parent_alias,
effective_entity):
for i, (mapper, key) in enumerate(to_join):
# we need to use query.join() as opposed to
# orm.join() here because of the
# rich behavior it brings when dealing with
# "with_polymorphic" mappers. "aliased"
# and "from_joinpoint" take care of most of
# the chaining and aliasing for us.
first = i == 0
middle = i < len(to_join) - 1
second_to_last = i == len(to_join) - 2
last = i == len(to_join) - 1
if first:
attr = getattr(left_alias, key)
if last and effective_entity is not self.mapper:
attr = attr.of_type(effective_entity)
else:
if last and effective_entity is not self.mapper:
attr = getattr(parent_alias, key).\
of_type(effective_entity)
else:
attr = getattr(mapper.entity, key)
if second_to_last:
q = q.join(parent_alias, attr, from_joinpoint=True)
else:
q = q.join(attr, aliased=middle, from_joinpoint=True)
return q
def _setup_options(self, q, subq_path, orig_query, effective_entity):
# propagate loader options etc. to the new query.
# these will fire relative to subq_path.
q = q._with_current_path(subq_path)
q = q._conditional_options(*orig_query._with_options)
if orig_query._populate_existing:
q._populate_existing = orig_query._populate_existing
return q
def _setup_outermost_orderby(self, q):
if self.parent_property.order_by:
# if there's an ORDER BY, alias it the same
# way joinedloader does, but we have to pull out
# the "eagerjoin" from the query.
# this really only picks up the "secondary" table
# right now.
eagerjoin = q._from_obj[0]
eager_order_by = \
eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
q = q.order_by(*eager_order_by)
return q
class _SubqCollections(object):
"""Given a :class:`.Query` used to emit the "subquery load",
provide a load interface that executes the query at the
first moment a value is needed.
"""
_data = None
def __init__(self, subq):
self.subq = subq
def get(self, key, default):
if self._data is None:
self._load()
return self._data.get(key, default)
def _load(self):
self._data = dict(
(k, [vv[0] for vv in v])
for k, v in itertools.groupby(
self.subq,
lambda x: x[1:]
)
)
def loader(self, state, dict_, row):
if self._data is None:
self._load()
def create_row_processor(
self, context, path, loadopt,
mapper, result, adapter, populators):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self)
path = path[self.parent_property]
subq = path.get(context.attributes, 'subquery')
if subq is None:
return
assert subq.session is context.session, (
"Subquery session doesn't refer to that of "
"our context. Are there broken context caching "
"schemes being used?"
)
local_cols = self.parent_property.local_columns
# cache the loaded collections in the context
# so that inheriting mappers don't re-load when they
# call upon create_row_processor again
collections = path.get(context.attributes, "collections")
if collections is None:
collections = self._SubqCollections(subq)
path.set(context.attributes, 'collections', collections)
if adapter:
local_cols = [adapter.columns[c] for c in local_cols]
if self.uselist:
self._create_collection_loader(
context, collections, local_cols, populators)
else:
self._create_scalar_loader(
context, collections, local_cols, populators)
def _create_collection_loader(
self, context, collections, local_cols, populators):
def load_collection_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
()
)
state.get_impl(self.key).\
set_committed_value(state, dict_, collection)
populators["new"].append((self.key, load_collection_from_subq))
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
def _create_scalar_loader(
self, context, collections, local_cols, populators):
def load_scalar_from_subq(state, dict_, row):
collection = collections.get(
tuple([row[col] for col in local_cols]),
(None,)
)
if len(collection) > 1:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
scalar = collection[0]
state.get_impl(self.key).\
set_committed_value(state, dict_, scalar)
populators["new"].append((self.key, load_scalar_from_subq))
if context.invoke_all_eagers:
populators["eager"].append((self.key, collections.loader))
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="joined")
@properties.RelationshipProperty.strategy_for(lazy=False)
class JoinedLoader(AbstractRelationshipLoader):
"""Provide loading behavior for a :class:`.RelationshipProperty`
using joined eager loading.
"""
__slots__ = 'join_depth',
def __init__(self, parent):
super(JoinedLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).init_class_attribute(mapper)
def setup_query(
self, context, entity, path, loadopt, adapter,
column_collection=None, parentmapper=None,
chained_from_outerjoin=False,
**kwargs):
"""Add a left outer join to the statement that's being constructed."""
if not context.query._enable_eagerloads:
return
elif context.query._yield_per and self.uselist:
context.query._no_yield_per("joined collection")
path = path[self.parent_property]
with_polymorphic = None
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
clauses, adapter, add_to_collection = \
self._setup_query_on_user_defined_adapter(
context, entity, path, adapter,
user_defined_adapter
)
else:
# if not via query option, check for
# a cycle
if not path.contains(context.attributes, "loader"):
if self.join_depth:
if path.length / 2 > self.join_depth:
return
elif path.contains_mapper(self.mapper):
return
clauses, adapter, add_to_collection, chained_from_outerjoin = \
self._generate_row_adapter(
context, entity, path, loadopt, adapter,
column_collection, parentmapper, chained_from_outerjoin
)
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info is not None:
with_polymorphic = with_poly_info.with_polymorphic_mappers
else:
with_polymorphic = None
path = path[self.mapper]
loading._setup_entity_query(
context, self.mapper, entity,
path, clauses, add_to_collection,
with_polymorphic=with_polymorphic,
parentmapper=self.mapper,
chained_from_outerjoin=chained_from_outerjoin)
if with_poly_info is not None and \
None in set(context.secondary_columns):
raise sa_exc.InvalidRequestError(
"Detected unaliased columns when generating joined "
"load. Make sure to use aliased=True or flat=True "
"when using joined loading with with_polymorphic()."
)
def _init_user_defined_eager_proc(self, loadopt, context):
# check if the opt applies at all
if "eager_from_alias" not in loadopt.local_opts:
# nope
return False
path = loadopt.path.parent
# the option applies. check if the "user_defined_eager_row_processor"
# has been built up.
adapter = path.get(
context.attributes,
"user_defined_eager_row_processor", False)
if adapter is not False:
# just return it
return adapter
# otherwise figure it out.
alias = loadopt.local_opts["eager_from_alias"]
root_mapper, prop = path[-2:]
#from .mapper import Mapper
#from .interfaces import MapperProperty
#assert isinstance(root_mapper, Mapper)
#assert isinstance(prop, MapperProperty)
if alias is not None:
if isinstance(alias, str):
alias = prop.target.alias(alias)
adapter = sql_util.ColumnAdapter(
alias,
equivalents=prop.mapper._equivalent_columns)
else:
if path.contains(context.attributes, "path_with_polymorphic"):
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic")
adapter = orm_util.ORMAdapter(
with_poly_info.entity,
equivalents=prop.mapper._equivalent_columns)
else:
adapter = context.query._polymorphic_adapters.get(
prop.mapper, None)
path.set(
context.attributes,
"user_defined_eager_row_processor",
adapter)
return adapter
def _setup_query_on_user_defined_adapter(
self, context, entity,
path, adapter, user_defined_adapter):
# apply some more wrapping to the "user defined adapter"
# if we are setting up the query for SQL render.
adapter = entity._get_entity_clauses(context.query, context)
if adapter and user_defined_adapter:
user_defined_adapter = user_defined_adapter.wrap(adapter)
path.set(
context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
elif adapter:
user_defined_adapter = adapter
path.set(
context.attributes, "user_defined_eager_row_processor",
user_defined_adapter)
add_to_collection = context.primary_columns
return user_defined_adapter, adapter, add_to_collection
def _generate_row_adapter(
self,
context, entity, path, loadopt, adapter,
column_collection, parentmapper, chained_from_outerjoin):
with_poly_info = path.get(
context.attributes,
"path_with_polymorphic",
None
)
if with_poly_info:
to_adapt = with_poly_info.entity
else:
to_adapt = orm_util.AliasedClass(
self.mapper,
flat=True,
use_mapper_path=True)
clauses = orm_util.ORMAdapter(
to_adapt,
equivalents=self.mapper._equivalent_columns,
adapt_required=True, allow_label_resolve=False,
anonymize_labels=True)
assert clauses.aliased_class is not None
if self.parent_property.uselist:
context.multi_row_eager_loaders = True
innerjoin = (
loadopt.local_opts.get(
'innerjoin', self.parent_property.innerjoin)
if loadopt is not None
else self.parent_property.innerjoin
)
if not innerjoin:
# if this is an outer join, all non-nested eager joins from
# this path must also be outer joins
chained_from_outerjoin = True
context.create_eager_joins.append(
(
self._create_eager_join, context,
entity, path, adapter,
parentmapper, clauses, innerjoin, chained_from_outerjoin
)
)
add_to_collection = context.secondary_columns
path.set(context.attributes, "eager_row_processor", clauses)
return clauses, adapter, add_to_collection, chained_from_outerjoin
def _create_eager_join(
self, context, entity,
path, adapter, parentmapper,
clauses, innerjoin, chained_from_outerjoin):
if parentmapper is None:
localparent = entity.mapper
else:
localparent = parentmapper
# whether or not the Query will wrap the selectable in a subquery,
# and then attach eager load joins to that (i.e., in the case of
# LIMIT/OFFSET etc.)
should_nest_selectable = context.multi_row_eager_loaders and \
context.query._should_nest_selectable
entity_key = None
if entity not in context.eager_joins and \
not should_nest_selectable and \
context.from_clause:
index, clause = sql_util.find_join_source(
context.from_clause, entity.selectable)
if clause is not None:
# join to an existing FROM clause on the query.
# key it to its list index in the eager_joins dict.
# Query._compile_context will adapt as needed and
# append to the FROM clause of the select().
entity_key, default_towrap = index, clause
if entity_key is None:
entity_key, default_towrap = entity, entity.selectable
towrap = context.eager_joins.setdefault(entity_key, default_towrap)
if adapter:
if getattr(adapter, 'aliased_class', None):
# joining from an adapted entity. The adapted entity
# might be a "with_polymorphic", so resolve that to our
# specific mapper's entity before looking for our attribute
# name on it.
efm = inspect(adapter.aliased_class).\
_entity_for_mapper(
parentmapper
if parentmapper.isa(self.parent) else self.parent)
# look for our attribute on the adapted entity, else fall back
# to our straight property
onclause = getattr(
efm.entity, self.key,
self.parent_property)
else:
onclause = getattr(
orm_util.AliasedClass(
self.parent,
adapter.selectable,
use_mapper_path=True
),
self.key, self.parent_property
)
else:
onclause = self.parent_property
assert clauses.aliased_class is not None
attach_on_outside = (
not chained_from_outerjoin or
not innerjoin or innerjoin == 'unnested')
if attach_on_outside:
# this is the "classic" eager join case.
eagerjoin = orm_util._ORMJoin(
towrap,
clauses.aliased_class,
onclause,
isouter=not innerjoin or (
chained_from_outerjoin and isinstance(towrap, sql.Join)
), _left_memo=self.parent, _right_memo=self.mapper
)
else:
# all other cases are innerjoin=='nested' approach
eagerjoin = self._splice_nested_inner_join(
path, towrap, clauses, onclause)
context.eager_joins[entity_key] = eagerjoin
# send a hint to the Query as to where it may "splice" this join
eagerjoin.stop_on = entity.selectable
if not parentmapper:
# for parentclause that is the non-eager end of the join,
# ensure all the parent cols in the primaryjoin are actually
# in the
# columns clause (i.e. are not deferred), so that aliasing applied
# by the Query propagates those columns outward.
# This has the effect
# of "undefering" those columns.
for col in sql_util._find_columns(
self.parent_property.primaryjoin):
if localparent.mapped_table.c.contains_column(col):
if adapter:
col = adapter.columns[col]
context.primary_columns.append(col)
if self.parent_property.order_by:
context.eager_order_by += eagerjoin._target_adapter.\
copy_and_process(
util.to_list(
self.parent_property.order_by
)
)
def _splice_nested_inner_join(
self, path, join_obj, clauses, onclause, splicing=False):
if splicing is False:
# first call is always handed a join object
# from the outside
assert isinstance(join_obj, orm_util._ORMJoin)
elif isinstance(join_obj, sql.selectable.FromGrouping):
return self._splice_nested_inner_join(
path, join_obj.element, clauses, onclause, splicing
)
elif not isinstance(join_obj, orm_util._ORMJoin):
if path[-2] is splicing:
return orm_util._ORMJoin(
join_obj, clauses.aliased_class,
onclause, isouter=False,
_left_memo=splicing,
_right_memo=path[-1].mapper
)
else:
# only here if splicing == True
return None
target_join = self._splice_nested_inner_join(
path, join_obj.right, clauses,
onclause, join_obj._right_memo)
if target_join is None:
right_splice = False
target_join = self._splice_nested_inner_join(
path, join_obj.left, clauses,
onclause, join_obj._left_memo)
if target_join is None:
# should only return None when recursively called,
# e.g. splicing==True
assert splicing is not False, \
"assertion failed attempting to produce joined eager loads"
return None
else:
right_splice = True
if right_splice:
# for a right splice, attempt to flatten out
# a JOIN b JOIN c JOIN .. to avoid needless
# parenthesis nesting
if not join_obj.isouter and not target_join.isouter:
eagerjoin = join_obj._splice_into_center(target_join)
else:
eagerjoin = orm_util._ORMJoin(
join_obj.left, target_join,
join_obj.onclause, isouter=join_obj.isouter,
_left_memo=join_obj._left_memo)
else:
eagerjoin = orm_util._ORMJoin(
target_join, join_obj.right,
join_obj.onclause, isouter=join_obj.isouter,
_right_memo=join_obj._right_memo)
eagerjoin._target_adapter = target_join._target_adapter
return eagerjoin
def _create_eager_adapter(self, context, result, adapter, path, loadopt):
user_defined_adapter = self._init_user_defined_eager_proc(
loadopt, context) if loadopt else False
if user_defined_adapter is not False:
decorator = user_defined_adapter
# user defined eagerloads are part of the "primary"
# portion of the load.
# the adapters applied to the Query should be honored.
if context.adapter and decorator:
decorator = decorator.wrap(context.adapter)
elif context.adapter:
decorator = context.adapter
else:
decorator = path.get(context.attributes, "eager_row_processor")
if decorator is None:
return False
if self.mapper._result_has_identity_key(result, decorator):
return decorator
else:
# no identity key - don't return a row
# processor, will cause a degrade to lazy
return False
def create_row_processor(
self, context, path, loadopt, mapper,
result, adapter, populators):
if not self.parent.class_manager[self.key].impl.supports_population:
raise sa_exc.InvalidRequestError(
"'%s' does not support object "
"population - eager loading cannot be applied." %
self
)
our_path = path[self.parent_property]
eager_adapter = self._create_eager_adapter(
context,
result,
adapter, our_path, loadopt)
if eager_adapter is not False:
key = self.key
_instance = loading._instance_processor(
self.mapper,
context,
result,
our_path[self.mapper],
eager_adapter)
if not self.uselist:
self._create_scalar_loader(context, key, _instance, populators)
else:
self._create_collection_loader(
context, key, _instance, populators)
else:
self.parent_property._get_strategy_by_cls(LazyLoader).\
create_row_processor(
context, path, loadopt,
mapper, result, adapter, populators)
def _create_collection_loader(self, context, key, _instance, populators):
def load_collection_from_joined_new_row(state, dict_, row):
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(collection,
'append_without_event')
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_existing_row(state, dict_, row):
if (state, key) in context.attributes:
result_list = context.attributes[(state, key)]
else:
# appender_key can be absent from context.attributes
# with isnew=False when self-referential eager loading
# is used; the same instance may be present in two
# distinct sets of result columns
collection = attributes.init_state_collection(
state, dict_, key)
result_list = util.UniqueAppender(
collection,
'append_without_event')
context.attributes[(state, key)] = result_list
inst = _instance(row)
if inst is not None:
result_list.append(inst)
def load_collection_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_collection_from_joined_new_row))
populators["existing"].append(
(self.key, load_collection_from_joined_existing_row))
if context.invoke_all_eagers:
populators["eager"].append(
(self.key, load_collection_from_joined_exec))
def _create_scalar_loader(self, context, key, _instance, populators):
def load_scalar_from_joined_new_row(state, dict_, row):
# set a scalar object instance directly on the parent
# object, bypassing InstrumentedAttribute event handlers.
dict_[key] = _instance(row)
def load_scalar_from_joined_existing_row(state, dict_, row):
# call _instance on the row, even though the object has
# been created, so that we further descend into properties
existing = _instance(row)
if existing is not None \
and key in dict_ \
and existing is not dict_[key]:
util.warn(
"Multiple rows returned with "
"uselist=False for eagerly-loaded attribute '%s' "
% self)
def load_scalar_from_joined_exec(state, dict_, row):
_instance(row)
populators["new"].append((self.key, load_scalar_from_joined_new_row))
populators["existing"].append(
(self.key, load_scalar_from_joined_existing_row))
if context.invoke_all_eagers:
populators["eager"].append((self.key, load_scalar_from_joined_exec))
def single_parent_validator(desc, prop):
def _do_check(state, value, oldvalue, initiator):
if value is not None and initiator.key == prop.key:
hasparent = initiator.hasparent(attributes.instance_state(value))
if hasparent and oldvalue is not value:
raise sa_exc.InvalidRequestError(
"Instance %s is already associated with an instance "
"of %s via its %s attribute, and is only allowed a "
"single parent." %
(orm_util.instance_str(value), state.class_, prop)
)
return value
def append(state, value, initiator):
return _do_check(state, value, None, initiator)
def set_(state, value, oldvalue, initiator):
return _do_check(state, value, oldvalue, initiator)
event.listen(
desc, 'append', append, raw=True, retval=True,
active_history=True)
event.listen(
desc, 'set', set_, raw=True, retval=True,
active_history=True)
| 36.151328 | 81 | 0.594457 |
7942b2d8d69bef44ed495b0a5dc2aa7369f59957 | 1,288 | py | Python | src/nyko/grammar.py | stevencox/nyko | 0619b4691e82d874110bae3388df1ef40865dd5e | [
"MIT"
] | null | null | null | src/nyko/grammar.py | stevencox/nyko | 0619b4691e82d874110bae3388df1ef40865dd5e | [
"MIT"
] | null | null | null | src/nyko/grammar.py | stevencox/nyko | 0619b4691e82d874110bae3388df1ef40865dd5e | [
"MIT"
] | null | null | null | from pyparsing import (
Combine, Word, White, Literal, delimitedList, Optional, Empty, Suppress,
Group, alphas, alphanums, printables, Forward, oneOf, quotedString,
QuotedString, ZeroOrMore, restOfLine, CaselessKeyword, ParserElement,
LineEnd, removeQuotes, Regex, nestedExpr, pyparsing_common as ppc
)
""" A program is a list of statements. """
optWhite = ZeroOrMore(LineEnd() | White())
string = Word(alphas, alphanums + '.')
quotedName = QuotedString(quoteChar='"', unquoteResults=True)
SEMI,COLON,LPAR,RPAR,LBRACE,RBRACE,LBRACK,RBRACK,DOT,COMMA,EQ = map(
Literal,
";:(){}[].,=")
""" Tokens. """
INCLUDE, VLAN, VID, DESCRIPTION = map(
CaselessKeyword,
"include vlan vid description".split())
vlan_name = Word(alphas, alphanums)
vlan_vid = ppc.integer("vid")
""" Grammar productions. """
statement = Forward()
statement <<= (
Group(
Group(VLAN + vlan_name) + optWhite +
Group(VID + vlan_vid) + optWhite +
Group(Optional(DESCRIPTION + quotedName)) + optWhite
) |
Group (
INCLUDE + string
)
)("statement")
""" Make a program a series of statements. """
program_grammar = statement + ZeroOrMore(statement)
""" Make rest-of-line comments. """
comment = "--" + restOfLine
program_grammar.ignore (comment)
| 29.953488 | 76 | 0.677795 |
7942b3ae87102044944beb6285f42147df81b775 | 1,321 | py | Python | android/build.py | MNannig/Vulkan-Examples | 9d76cea05fadf3a6eefae3094c500f84020f706b | [
"MIT"
] | 2 | 2019-08-06T04:33:35.000Z | 2019-10-06T20:03:58.000Z | android/build.py | LiangYue1981816/GraphicsSamples-Vulkan | d15daabc582d4f8b198fdd6ec5651ec4298182a1 | [
"MIT"
] | null | null | null | android/build.py | LiangYue1981816/GraphicsSamples-Vulkan | d15daabc582d4f8b198fdd6ec5651ec4298182a1 | [
"MIT"
] | 1 | 2022-03-23T20:26:33.000Z | 2022-03-23T20:26:33.000Z | # Single example build and deploy script
import os
import subprocess
import sys
import shutil
import glob
# Android SDK version used
SDK_VERSION = "android-23"
PROJECT_FOLDER = ""
# Name/folder of the project to build
if len(sys.argv) > 1:
PROJECT_FOLDER = sys.argv[1]
if not os.path.exists(PROJECT_FOLDER):
print("Please specify a valid project folder to build!")
sys.exit(-1)
# Check if a build file is present, if not create one using the android SDK version specified
if not os.path.isfile(os.path.join(PROJECT_FOLDER, "build.xml")):
print("Build.xml not present, generating with %s " % SDK_VERSION)
if subprocess.call("android.bat update project -p ./%s -t %s" % (PROJECT_FOLDER, SDK_VERSION)) != 0:
print("Error: Project update failed!")
sys.exit(-1)
# Run actual build script from example folder
if not os.path.isfile(os.path.join(PROJECT_FOLDER, "build.py")):
print("Error: No build script present!")
sys.exit(-1)
BUILD_ARGUMENTS = " ".join(sys.argv[2:])
os.chdir(PROJECT_FOLDER)
if subprocess.call("python build.py %s" % BUILD_ARGUMENTS) != 0:
print("Error during build process!")
sys.exit(-1)
# Move apk to bin folder
os.makedirs("../bin", exist_ok=True)
for file in glob.glob("vulkan*.apk"):
print(file)
shutil.move(file, "../bin/%s" % file)
| 30.022727 | 104 | 0.697199 |
7942b48cc05d832558c769b85fc0f8f6b8518744 | 5,109 | py | Python | seleniumbase/core/style_sheet.py | yyb27638/py_scripts | 691f0cff1de712b6d325acbe535461c27fa3659c | [
"MIT"
] | 5 | 2021-07-30T10:08:29.000Z | 2022-01-23T12:22:42.000Z | seleniumbase/core/style_sheet.py | yyb27638/py_scripts | 691f0cff1de712b6d325acbe535461c27fa3659c | [
"MIT"
] | null | null | null | seleniumbase/core/style_sheet.py | yyb27638/py_scripts | 691f0cff1de712b6d325acbe535461c27fa3659c | [
"MIT"
] | 11 | 2020-08-08T02:34:18.000Z | 2022-01-23T12:22:43.000Z | title = '''<meta id="OGTitle" property="og:title" content="SeleniumBase">
<title>Test Report</title>
<link rel="SHORTCUT ICON"
href="%s" /> ''' % (
"https://raw.githubusercontent.com/seleniumbase/SeleniumBase"
"/master/seleniumbase/resources/favicon.ico")
style = title + '''<style type="text/css">
html {
background-color: #9988ad;
}
html, body {
font-size: 100%;
box-sizing: border-box;
}
body {
background-image: none;
background-origin: padding-box;
background-color: #c6d6f0;
padding: 30;
margin: 10;
font-family: "Proxima Nova","proxima-nova",
"Helvetica Neue",Helvetica,Arial,sans-serif !important;
text-rendering: optimizelegibility;
-moz-osx-font-smoothing: grayscale;
box-shadow: 0px 2px 5px 1px rgba(0, 0, 0, 0.24),
1px 2px 12px 0px rgba(0, 0, 0, 0.18) !important;
}
table {
width: 100%;
border-collapse: collapse;
border-spacing: 0;
box-shadow: 0px 2px 5px 1px rgba(0, 0, 0, 0.27),
1px 2px 12px 0px rgba(0, 0, 0, 0.21) !important;
transition: all 0.15s ease-out 0s;
transition-property: all;
transition-duration: 0.1s;
transition-timing-function: ease-out;
transition-delay: 0s;
}
table:hover {
box-shadow: 0px 2px 5px 1px rgba(0, 0, 0, 0.35),
1px 2px 12px 0px rgba(0, 0, 0, 0.28) !important;
}
thead th, thead td {
padding: 0.5rem 0.625rem 0.625rem;
font-weight: bold;
text-align: left;
}
thead {
text-align: center;
border: 1px solid #e1e1e1;
width: 150%;
color: #0C8CDF;
background-color: #c0f0ff;
}
tbody tr:nth-child(even) {
background-color: #f1f1f1;
}
tbody tr:nth-child(odd) {
background-color: #ffffff;
}
tbody tr:nth-child(even):hover {
background-color: #f8f8d2;
}
tbody tr:nth-child(odd):hover {
background-color: #ffffe0;
}
tbody th, tbody td {
padding: 0.5rem 0.625rem 0.625rem;
}
tbody {
border: 1px solid #e1e1e1;
background-color: #fefefe;
}
td {
padding: 5px 5px 5px 0;
vertical-align: top;
}
h1 table {
font-size: 27px;
text-align: left;
padding: 0.5rem 0.625rem 0.625rem;
font-weight: bold;
padding-right: 10px;
padding-left: 20px;
padding: 15px 15px 15px 0;
}
h2 table {
color: #0C8CDF;
font-size: 16px;
text-align: left;
font-weight: bold;
padding: 5px 5px 5px 0;
padding-right: 10px;
padding-left: 20px;
}
</style>'''
# Bootstrap Tour Backdrop Style
bt_backdrop_style = (
'''
.tour-tour-element {
box-shadow: 0 0 0 99999px rgba(0, 0, 0, 0.20);
pointer-events: none !important;
z-index: 9999;
}
:not(.tour-tour-element) .orphan.tour-tour {
box-shadow: 0 0 0 99999px rgba(0, 0, 0, 0.20);
}
.tour-tour {
pointer-events: auto;
z-index: 9999;
}
''')
# DriverJS Tour Backdrop Style
dt_backdrop_style = (
'''
.driver-fix-stacking {
pointer-events: none !important;
}
#driver-popover-item, .popover-class {
pointer-events: auto !important;
}
''')
messenger_style = (
'''
.messenger-message-inner {
font-family: "Proxima Nova","proxima-nova",Arial,sans-serif !important;
font-size: 17px;
}
''')
sh_style_test = (
'''
var test_tour = new Shepherd.Tour({
defaults: {
classes: 'shepherd-theme-dark',
scrollTo: true
}
});
''')
# Hopscotch Backdrop Style
hops_backdrop_style = (
'''
.hopscotch-bubble-container {
font-size: 110%;
}
''')
# Shepherd Backdrop Style
sh_backdrop_style = (
'''
body.shepherd-active .shepherd-target.shepherd-enabled {
box-shadow: 0 0 0 99999px rgba(0, 0, 0, 0.20);
pointer-events: none !important;
z-index: 9999;
}
body.shepherd-active .shepherd-orphan {
box-shadow: 0 0 0 99999px rgba(0, 0, 0, 0.20);
pointer-events: auto;
z-index: 9999;
}
body.shepherd-active
.shepherd-enabled.shepherd-element-attached-top {
position: relative;
}
body.shepherd-active
.shepherd-enabled.shepherd-element-attached-bottom {
position: relative;
}
body.shepherd-active .shepherd-step {
pointer-events: auto;
z-index: 9999;
}
body.shepherd-active {
pointer-events: none !important;
}
''')
| 26.889474 | 79 | 0.517322 |
7942b49f4d4fe582683c0a7bf7093c28959771af | 1,903 | py | Python | stubs.min/System/ComponentModel/__init___parts/CharConverter.py | ricardyn/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | 1 | 2021-02-02T13:39:16.000Z | 2021-02-02T13:39:16.000Z | stubs.min/System/ComponentModel/__init___parts/CharConverter.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | stubs.min/System/ComponentModel/__init___parts/CharConverter.py | hdm-dt-fb/ironpython-stubs | 4d2b405eda3ceed186e8adca55dd97c332c6f49d | [
"MIT"
] | null | null | null | class CharConverter(TypeConverter):
"""
Provides a type converter to convert Unicode character objects to and from various other representations.
CharConverter()
"""
def CanConvertFrom(self,*__args):
"""
CanConvertFrom(self: CharConverter,context: ITypeDescriptorContext,sourceType: Type) -> bool
Gets a value indicating whether this converter can convert an object in the
given source type to a Unicode character object using the specified context.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
sourceType: A System.Type that represents the type you want to convert from.
Returns: true if this converter can perform the conversion; otherwise,false.
"""
pass
def ConvertFrom(self,*__args):
"""
ConvertFrom(self: CharConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object) -> object
Converts the given object to a Unicode character object.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
culture: The culture into which value will be converted.
value: The System.Object to convert.
Returns: An System.Object that represents the converted value.
"""
pass
def ConvertTo(self,*__args):
"""
ConvertTo(self: CharConverter,context: ITypeDescriptorContext,culture: CultureInfo,value: object,destinationType: Type) -> object
Converts the given value object to a Unicode character object using the
arguments.
context: An System.ComponentModel.ITypeDescriptorContext that provides a format context.
culture: The culture into which value will be converted.
value: The System.Object to convert.
destinationType: The System.Type to convert the value to.
Returns: An System.Object that represents the converted value.
"""
pass
| 40.489362 | 133 | 0.73463 |
7942b4bd1f5da5f2585853afd177ead4344e6a16 | 3,311 | py | Python | cgi-bin/download.py | fanuware/pybrowser | 910cebaee45524248c18d86605ba9e7f1b862c47 | [
"MIT"
] | null | null | null | cgi-bin/download.py | fanuware/pybrowser | 910cebaee45524248c18d86605ba9e7f1b862c47 | [
"MIT"
] | null | null | null | cgi-bin/download.py | fanuware/pybrowser | 910cebaee45524248c18d86605ba9e7f1b862c47 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import cgi
import cgitb
import os
import sys
import zipfile
import mimetypes
import templates
from userlogger import UserLogger
cgitb.enable()
def getRecbin():
if not os.path.isdir('recbin') and not os.path.isdir('../recbin'):
os.mkdir('recbin')
return os.path.abspath('recbin' if os.path.isdir('recbin') else '../recbin')
def getUnusedName(file):
if not os.path.exists(file):
return file
basepath, basename = os.path.split(file)
p = basename.rfind('.')
extension = basename[p:] if p > 0 else ""
name = basename[:len(basename)-len(extension)]
counter = 0
outFile = file
while os.path.exists(outFile):
counter += 1
outFile = os.path.join(basepath, name + str(counter) + extension)
return outFile
##################################################
# main
# create instance of field storage
form = cgi.FieldStorage()
userLogger = UserLogger()
# receive filepath
try:
file = form.getvalue("path")
except:
pass
# allows browser to display known content inline
isInline = True
try:
w = form.getvalue("inline").lower()
#isInline = w != "0" and w != "false"
isInline = False
except:
pass
##################################################
# permission guard
userPermission = userLogger.getPermission(os.path.dirname(file))
# make sure user is allowed to read
if (userPermission < UserLogger.PERMISSION_READ):
if "redirect" not in form:
args = '&'.join([key + '=' + str(form[key].value) for key in form.keys()])
if args:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True&' + args
else:
url = os.path.basename(os.environ['SCRIPT_NAME']) + '?redirect=True'
templates.redirect(url)
else:
userLogger.showLogin('Identification required')
##################################################
# create download
def createDownload(fullname, **kwargs):
mime_type = mimetypes.guess_type(fullname)[0]
if not mime_type:
mime_type = 'application/octet-stream'
print ("Content-Type: " + mime_type)
name = os.path.basename(fullname) if 'name' not in kwargs else kwargs['name']
if 'inline' not in kwargs or kwargs['inline'] is False:
print ("Content-Disposition: attachment; filename=" + name)
print ("")
sys.stdout.flush()
sys.stdout.buffer.write(open(fullname, "rb").read())
if os.path.isfile(file):
createDownload(file, inline=(isInline))
elif os.path.isdir(file):
def zipdir(path, ziph, ignore):
for root, dirs, files in os.walk(path):
if ignore in root:
continue
for file in files:
try:
absPath = os.path.join(root, file)
ziph.write(
absPath,
os.path.relpath(absPath, os.path.dirname(path)))
except PermissionError as e:
pass
recbin = getRecbin()
tmpZipName = getUnusedName(os.path.join(recbin, 'tmpZip.zip'))
zipf = zipfile.ZipFile(tmpZipName, 'w', zipfile.ZIP_DEFLATED)
zipdir(file, zipf, recbin)
zipf.close()
createDownload(tmpZipName, name=(os.path.basename(file) + '.zip'))
os.remove(tmpZipName)
else:
templates.error(file)
| 29.5625 | 88 | 0.601027 |
7942b4e04bf63ca7bcee39b528705c213128119b | 25,832 | py | Python | wnf.py | saurabh6790/OFF-RISLIB | eb7866227c5ff085ea714f79576281d82365f4fe | [
"MIT"
] | null | null | null | wnf.py | saurabh6790/OFF-RISLIB | eb7866227c5ff085ea714f79576281d82365f4fe | [
"MIT"
] | null | null | null | wnf.py | saurabh6790/OFF-RISLIB | eb7866227c5ff085ea714f79576281d82365f4fe | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import sys
if __name__=="__main__":
sys.path = [".", "lib", "app"] + sys.path
import webnotes
def main():
parsed_args = webnotes._dict(vars(setup_parser()))
fn = get_function(parsed_args)
if parsed_args.get("site")=="all":
for site in get_sites():
args = parsed_args.copy()
args["site"] = site
run(fn, args)
else:
run(fn, parsed_args)
def cmd(fn):
def new_fn(*args, **kwargs):
import inspect
fnargs, varargs, varkw, defaults = inspect.getargspec(fn)
new_kwargs = {}
for a in fnargs:
if a in kwargs:
new_kwargs[a] = kwargs.get(a)
return fn(*args, **new_kwargs)
return new_fn
def run(fn, args):
if isinstance(args.get(fn), (list, tuple)):
out = globals().get(fn)(*args.get(fn), **args)
else:
out = globals().get(fn)(**args)
return out
def get_function(args):
for fn, val in args.items():
if (val or isinstance(val, list)) and globals().get(fn):
return fn
def get_sites():
import os
import conf
return [site for site in os.listdir(conf.sites_dir)
if not os.path.islink(os.path.join(conf.sites_dir, site))
and os.path.isdir(os.path.join(conf.sites_dir, site))]
def setup_parser():
import argparse
parser = argparse.ArgumentParser(description="Run webnotes utility functions")
setup_install(parser)
setup_utilities(parser)
setup_translation(parser)
setup_git(parser)
# common
parser.add_argument("-f", "--force", default=False, action="store_true",
help="Force execution where applicable (look for [-f] in help)")
parser.add_argument("--quiet", default=True, action="store_false", dest="verbose",
help="Don't show verbose output where applicable")
parser.add_argument("--site", nargs="?", metavar="SITE-NAME or all",
help="Run for a particular site")
parser.add_argument("--plugin", nargs="?", metavar="PLUGIN-NAME",
help="Run for a particular plugin")
return parser.parse_args()
def setup_install(parser):
parser.add_argument("--install", metavar="DB-NAME", nargs=1,
help="Install a new app")
parser.add_argument("--root-password", nargs=1,
help="Root password for new app")
parser.add_argument("--reinstall", default=False, action="store_true",
help="Install a fresh app in db_name specified in conf.py")
parser.add_argument("--restore", metavar=("DB-NAME", "SQL-FILE"), nargs=2,
help="Restore from an sql file")
parser.add_argument("--install_fixtures", default=False, action="store_true",
help="(Re)Install install-fixtures from app/startup/install_fixtures")
parser.add_argument("--make_demo", default=False, action="store_true",
help="Install demo in demo_db_name specified in conf.py")
parser.add_argument("--make_demo_fresh", default=False, action="store_true",
help="(Re)Install demo in demo_db_name specified in conf.py")
parser.add_argument("--add_system_manager", nargs="+",
metavar=("EMAIL", "[FIRST-NAME] [LAST-NAME]"), help="Add a user with all roles")
def setup_utilities(parser):
# update
parser.add_argument("-u", "--update", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Perform git pull, run patches, sync schema and rebuild files/translations")
parser.add_argument("--reload_gunicorn", default=False, action="store_true", help="reload gunicorn on update")
parser.add_argument("--patch", nargs=1, metavar="PATCH-MODULE",
help="Run a particular patch [-f]")
parser.add_argument("-l", "--latest", default=False, action="store_true",
help="Run patches, sync schema and rebuild files/translations")
parser.add_argument("--sync_all", default=False, action="store_true",
help="Reload all doctypes, pages, etc. using txt files [-f]")
parser.add_argument("--update_all_sites", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Perform git pull, run patches, sync schema and rebuild files/translations")
parser.add_argument("--reload_doc", nargs=3,
metavar=('"MODULE"', '"DOCTYPE"', '"DOCNAME"'))
# build
parser.add_argument("-b", "--build", default=False, action="store_true",
help="Minify + concatenate JS and CSS files, build translations")
parser.add_argument("-w", "--watch", default=False, action="store_true",
help="Watch and concatenate JS and CSS files as and when they change")
# misc
parser.add_argument("--backup", default=False, action="store_true",
help="Take backup of database in backup folder [--with_files]")
parser.add_argument("--move", default=False, action="store_true",
help="Move site to different directory defined by --dest_dir")
parser.add_argument("--dest_dir", nargs=1, metavar="DEST-DIR",
help="Move site to different directory")
parser.add_argument("--with_files", default=False, action="store_true",
help="Also take backup of files")
parser.add_argument("--domain", nargs="*",
help="Get or set domain in Website Settings")
parser.add_argument("--make_conf", nargs="*", metavar=("DB-NAME", "DB-PASSWORD"),
help="Create new conf.py file")
parser.add_argument("--make_custom_server_script", nargs=1, metavar="DOCTYPE",
help="Create new conf.py file")
parser.add_argument("--set_admin_password", metavar='ADMIN-PASSWORD', nargs=1,
help="Set administrator password")
parser.add_argument("--mysql", action="store_true", help="get mysql shell for a site")
parser.add_argument("--serve", action="store_true", help="Run development server")
parser.add_argument("--profile", action="store_true", help="enable profiling in development server")
parser.add_argument("--smtp", action="store_true", help="Run smtp debug server",
dest="smtp_debug_server")
parser.add_argument("--python", action="store_true", help="get python shell for a site")
parser.add_argument("--ipython", action="store_true", help="get ipython shell for a site")
parser.add_argument("--get_site_status", action="store_true", help="Get site details")
parser.add_argument("--update_site_config", nargs=1,
metavar="SITE-CONFIG-JSON",
help="Update site_config.json for a given --site")
parser.add_argument("--port", default=8000, type=int, help="port for development server")
# clear
parser.add_argument("--clear_web", default=False, action="store_true",
help="Clear website cache")
parser.add_argument("--build_sitemap", default=False, action="store_true",
help="Build Website Sitemap")
parser.add_argument("--rebuild_sitemap", default=False, action="store_true",
help="Rebuild Website Sitemap")
parser.add_argument("--clear_cache", default=False, action="store_true",
help="Clear cache, doctype cache and defaults")
parser.add_argument("--reset_perms", default=False, action="store_true",
help="Reset permissions for all doctypes")
# scheduler
parser.add_argument("--run_scheduler", default=False, action="store_true",
help="Trigger scheduler")
parser.add_argument("--run_scheduler_event", nargs=1,
metavar="all | daily | weekly | monthly",
help="Run a scheduler event")
# replace
parser.add_argument("--replace", nargs=3,
metavar=("SEARCH-REGEX", "REPLACE-BY", "FILE-EXTN"),
help="Multi-file search-replace [-f]")
# import/export
parser.add_argument("--export_doc", nargs=2, metavar=('"DOCTYPE"', '"DOCNAME"'))
parser.add_argument("--export_doclist", nargs=3, metavar=("DOCTYPE", "NAME", "PATH"),
help="""Export doclist as json to the given path, use '-' as name for Singles.""")
parser.add_argument("--export_csv", nargs=2, metavar=("DOCTYPE", "PATH"),
help="""Dump DocType as csv""")
parser.add_argument("--import_doclist", nargs=1, metavar="PATH",
help="""Import (insert/update) doclist. If the argument is a directory, all files ending with .json are imported""")
def setup_git(parser):
parser.add_argument("--pull", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Run git pull for both repositories")
parser.add_argument("-p", "--push", nargs="*", metavar=("REMOTE", "BRANCH"),
help="Run git push for both repositories")
parser.add_argument("--status", default=False, action="store_true",
help="Run git status for both repositories")
parser.add_argument("--commit", nargs=1, metavar="COMMIT-MSG",
help="Run git commit COMMIT-MSG for both repositories")
parser.add_argument("--checkout", nargs=1, metavar="BRANCH",
help="Run git checkout BRANCH for both repositories")
parser.add_argument("--git", nargs="*", metavar="OPTIONS",
help="Run git command for both repositories")
parser.add_argument("--bump", metavar=("REPO", "VERSION-TYPE"), nargs=2,
help="Bump project version")
def setup_translation(parser):
parser.add_argument("--build_message_files", default=False, action="store_true",
help="Build message files for translation")
parser.add_argument("--export_messages", nargs=2, metavar=("LANG-CODE", "FILENAME"),
help="""Export all messages for a language to translation in a csv file.
Example, lib/wnf.py --export_messages hi hindi.csv""")
parser.add_argument("--import_messages", nargs=2, metavar=("LANG-CODE", "FILENAME"),
help="""Import messages for a language and make language files.
Example, lib/wnf.py --import_messages hi hindi.csv""")
parser.add_argument("--google_translate", nargs=3,
metavar=("LANG-CODE", "INFILE", "OUTFILE"),
help="Auto translate using Google Translate API")
parser.add_argument("--translate", nargs=1, metavar="LANG-CODE",
help="""Rebuild translation for the given langauge and
use Google Translate to tranlate untranslated messages. use "all" """)
# methods
# install
@cmd
def install(db_name, source_sql=None, site=None, verbose=True, force=False, root_password=None, site_config=None, admin_password='admin'):
from webnotes.install_lib.install import Installer
inst = Installer('root', db_name=db_name, site=site, root_password=root_password, site_config=site_config)
inst.install(db_name, source_sql=source_sql, verbose=verbose, force=force, admin_password=admin_password)
webnotes.destroy()
@cmd
def reinstall(site=None, verbose=True):
webnotes.init(site=site)
install(webnotes.conf.db_name, site=site, verbose=verbose, force=True)
@cmd
def restore(db_name, source_sql, site=None, verbose=True, force=False):
install(db_name, source_sql, site=site, verbose=verbose, force=force)
@cmd
def install_fixtures(site=None):
webnotes.init(site=site)
from webnotes.install_lib.install import install_fixtures
install_fixtures()
webnotes.destroy()
@cmd
def add_system_manager(email, first_name=None, last_name=None, site=None):
webnotes.connect(site=site)
webnotes.profile.add_system_manager(email, first_name, last_name)
webnotes.conn.commit()
webnotes.destroy()
@cmd
def make_demo(site=None):
import utilities.demo.make_demo
webnotes.init(site=site)
utilities.demo.make_demo.make()
webnotes.destroy()
@cmd
def make_demo_fresh(site=None):
import utilities.demo.make_demo
webnotes.init(site=site)
utilities.demo.make_demo.make(reset=True)
webnotes.destroy()
# utilities
@cmd
def update(remote=None, branch=None, site=None, reload_gunicorn=False):
pull(remote=remote, branch=branch, site=site)
# maybe there are new framework changes, any consequences?
reload(webnotes)
if not site: build()
latest(site=site)
if reload_gunicorn:
import subprocess
subprocess.check_output("killall -HUP gunicorn".split())
@cmd
def latest(site=None, verbose=True):
import webnotes.modules.patch_handler
import webnotes.model.sync
import webnotes.plugins
from website.doctype.website_sitemap_config.website_sitemap_config import build_website_sitemap_config
webnotes.connect(site=site)
try:
# run patches
webnotes.local.patch_log_list = []
webnotes.modules.patch_handler.run_all()
if verbose:
print "\n".join(webnotes.local.patch_log_list)
# sync
webnotes.model.sync.sync_all()
# remove __init__.py from plugins
webnotes.plugins.remove_init_files()
# build website config if any changes in templates etc.
build_website_sitemap_config()
except webnotes.modules.patch_handler.PatchError, e:
print "\n".join(webnotes.local.patch_log_list)
raise
finally:
webnotes.destroy()
@cmd
def sync_all(site=None, force=False):
import webnotes.model.sync
webnotes.connect(site=site)
webnotes.model.sync.sync_all(force=force)
webnotes.destroy()
@cmd
def patch(patch_module, site=None, force=False):
import webnotes.modules.patch_handler
webnotes.connect(site=site)
webnotes.local.patch_log_list = []
webnotes.modules.patch_handler.run_single(patch_module, force=force)
print "\n".join(webnotes.local.patch_log_list)
webnotes.destroy()
@cmd
def update_all_sites(remote=None, branch=None, verbose=True):
pull(remote, branch)
# maybe there are new framework changes, any consequences?
reload(webnotes)
build()
for site in get_sites():
latest(site=site, verbose=verbose)
@cmd
def reload_doc(module, doctype, docname, plugin=None, site=None, force=False):
webnotes.connect(site=site)
webnotes.reload_doc(module, doctype, docname, plugin=plugin, force=force)
webnotes.conn.commit()
webnotes.destroy()
@cmd
def build():
import webnotes.build
webnotes.build.bundle(False)
@cmd
def watch():
import webnotes.build
webnotes.build.watch(True)
@cmd
def backup(site=None, with_files=False, verbose=True, backup_path_db=None, backup_path_files=None):
from webnotes.utils.backups import scheduled_backup
webnotes.connect(site=site)
odb = scheduled_backup(ignore_files=not with_files, backup_path_db=backup_path_db, backup_path_files=backup_path_files)
if verbose:
from webnotes.utils import now
print "database backup taken -", odb.backup_path_db, "- on", now()
if with_files:
print "files backup taken -", odb.backup_path_files, "- on", now()
webnotes.destroy()
return odb
@cmd
def move(site=None, dest_dir=None):
import os
if not dest_dir:
raise Exception, "--dest_dir is required for --move"
if not os.path.isdir(dest_dir):
raise Exception, "destination is not a directory or does not exist"
webnotes.init(site=site)
old_path = webnotes.utils.get_site_path()
new_path = os.path.join(dest_dir, site)
# check if site dump of same name already exists
site_dump_exists = True
count = 0
while site_dump_exists:
final_new_path = new_path + (count and str(count) or "")
site_dump_exists = os.path.exists(final_new_path)
count = int(count or 0) + 1
os.rename(old_path, final_new_path)
webnotes.destroy()
return os.path.basename(final_new_path)
@cmd
def domain(host_url=None, site=None):
webnotes.connect(site=site)
if host_url:
webnotes.conn.set_value("Website Settings", None, "subdomain", host_url)
webnotes.conn.commit()
else:
print webnotes.conn.get_value("Website Settings", None, "subdomain")
webnotes.destroy()
@cmd
def make_conf(db_name=None, db_password=None, site=None, site_config=None):
from webnotes.install_lib.install import make_conf
make_conf(db_name=db_name, db_password=db_password, site=site, site_config=site_config)
@cmd
def make_custom_server_script(doctype, site=None):
from core.doctype.custom_script.custom_script import make_custom_server_script_file
webnotes.connect(site=site)
make_custom_server_script_file(doctype)
webnotes.destroy()
# clear
@cmd
def clear_cache(site=None):
import webnotes.sessions
webnotes.connect(site=site)
webnotes.sessions.clear_cache()
webnotes.destroy()
@cmd
def clear_web(site=None):
import webnotes.webutils
webnotes.connect(site=site)
from website.doctype.website_sitemap_config.website_sitemap_config import build_website_sitemap_config
build_website_sitemap_config()
webnotes.webutils.clear_cache()
webnotes.destroy()
@cmd
def build_sitemap(site=None):
from website.doctype.website_sitemap_config.website_sitemap_config import build_website_sitemap_config
webnotes.connect(site=site)
build_website_sitemap_config()
webnotes.destroy()
@cmd
def rebuild_sitemap(site=None):
from website.doctype.website_sitemap_config.website_sitemap_config import rebuild_website_sitemap_config
webnotes.connect(site=site)
rebuild_website_sitemap_config()
webnotes.destroy()
@cmd
def reset_perms(site=None):
webnotes.connect(site=site)
for d in webnotes.conn.sql_list("""select name from `tabDocType`
where ifnull(istable, 0)=0 and ifnull(custom, 0)=0"""):
webnotes.clear_cache(doctype=d)
webnotes.reset_perms(d)
webnotes.destroy()
# scheduler
@cmd
def run_scheduler(site=None):
from webnotes.utils.file_lock import create_lock, delete_lock
import webnotes.utils.scheduler
webnotes.init(site=site)
if create_lock('scheduler'):
webnotes.connect(site=site)
print webnotes.utils.scheduler.execute()
delete_lock('scheduler')
webnotes.destroy()
@cmd
def run_scheduler_event(event, site=None):
import webnotes.utils.scheduler
webnotes.connect(site=site)
print webnotes.utils.scheduler.trigger("execute_" + event)
webnotes.destroy()
# replace
@cmd
def replace(search_regex, replacement, extn, force=False):
print search_regex, replacement, extn
replace_code('.', search_regex, replacement, extn, force=force)
# import/export
@cmd
def export_doc(doctype, docname, site=None):
import webnotes.modules
webnotes.connect(site=site)
webnotes.modules.export_doc(doctype, docname)
webnotes.destroy()
@cmd
def export_doclist(doctype, name, path, site=None):
from core.page.data_import_tool import data_import_tool
webnotes.connect(site=site)
data_import_tool.export_json(doctype, name, path)
webnotes.destroy()
@cmd
def export_csv(doctype, path, site=None):
from core.page.data_import_tool import data_import_tool
webnotes.connect(site=site)
data_import_tool.export_csv(doctype, path)
webnotes.destroy()
@cmd
def import_doclist(path, site=None, force=False):
from core.page.data_import_tool import data_import_tool
webnotes.connect(site=site)
data_import_tool.import_doclist(path, overwrite=force)
webnotes.destroy()
# translation
@cmd
def build_message_files(site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.build_message_files()
webnotes.destroy()
@cmd
def export_messages(lang, outfile, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.export_messages(lang, outfile)
webnotes.destroy()
@cmd
def import_messages(lang, infile, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.import_messages(lang, infile)
webnotes.destroy()
@cmd
def google_translate(lang, infile, outfile, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.google_translate(lang, infile, outfile)
webnotes.destroy()
@cmd
def translate(lang, site=None):
import webnotes.translate
webnotes.connect(site=site)
webnotes.translate.translate(lang)
webnotes.destroy()
# git
@cmd
def git(param):
if isinstance(param, (list, tuple)):
param = " ".join(param)
import os
os.system("""cd lib && git %s""" % param)
os.system("""cd app && git %s""" % param)
def get_remote_and_branch(remote=None, branch=None):
if not (remote and branch):
webnotes.init()
if not webnotes.conf.branch:
raise Exception("Please specify remote and branch")
remote = remote or "origin"
branch = branch or webnotes.conf.branch
webnotes.destroy()
return remote, branch
@cmd
def pull(remote=None, branch=None):
remote, branch = get_remote_and_branch(remote, branch)
git(("pull", remote, branch))
@cmd
def push(remote=None, branch=None):
remote, branch = get_remote_and_branch(remote, branch)
git(("push", remote, branch))
@cmd
def status():
git("status")
@cmd
def commit(message):
git("""commit -a -m "%s" """ % message.replace('"', '\"'))
@cmd
def checkout(branch):
git(("checkout", branch))
@cmd
def set_admin_password(admin_password, site=None):
import webnotes
webnotes.connect(site=site)
webnotes.conn.sql("""update __Auth set `password`=password(%s)
where user='Administrator'""", (admin_password,))
webnotes.conn.commit()
webnotes.destroy()
@cmd
def mysql(site=None):
import webnotes
import commands, os
msq = commands.getoutput('which mysql')
webnotes.init(site=site)
os.execv(msq, [msq, '-u', webnotes.conf.db_name, '-p'+webnotes.conf.db_password, webnotes.conf.db_name, '-h', webnotes.conf.db_host or "localhost", "-A"])
webnotes.destroy()
@cmd
def python(site=None):
import webnotes
import commands, os
python = commands.getoutput('which python')
webnotes.init(site=site)
if site:
os.environ["site"] = site
os.environ["PYTHONSTARTUP"] = os.path.join(os.path.dirname(__file__), "pythonrc.py")
os.execv(python, [python])
webnotes.destroy()
@cmd
def ipython(site=None):
import webnotes
webnotes.connect(site=site)
import IPython
IPython.embed()
@cmd
def smtp_debug_server():
import commands, os
python = commands.getoutput('which python')
os.execv(python, [python, '-m', "smtpd", "-n", "-c", "DebuggingServer", "localhost:25"])
@cmd
def serve(port=8000, profile=False):
import webnotes.app
webnotes.app.serve(port=port, profile=profile)
def replace_code(start, txt1, txt2, extn, search=None, force=False):
"""replace all txt1 by txt2 in files with extension (extn)"""
import webnotes.utils
import os, re
esc = webnotes.utils.make_esc('[]')
if not search: search = esc(txt1)
for wt in os.walk(start, followlinks=1):
for fn in wt[2]:
if fn.split('.')[-1]==extn:
fpath = os.path.join(wt[0], fn)
with open(fpath, 'r') as f:
content = f.read()
if re.search(search, content):
res = search_replace_with_prompt(fpath, txt1, txt2, force)
if res == 'skip':
return 'skip'
def search_replace_with_prompt(fpath, txt1, txt2, force=False):
""" Search and replace all txt1 by txt2 in the file with confirmation"""
from termcolor import colored
with open(fpath, 'r') as f:
content = f.readlines()
tmp = []
for c in content:
if c.find(txt1) != -1:
print fpath
print colored(txt1, 'red').join(c[:-1].split(txt1))
a = ''
if force:
c = c.replace(txt1, txt2)
else:
while a.lower() not in ['y', 'n', 'skip']:
a = raw_input('Do you want to Change [y/n/skip]?')
if a.lower() == 'y':
c = c.replace(txt1, txt2)
elif a.lower() == 'skip':
return 'skip'
tmp.append(c)
with open(fpath, 'w') as f:
f.write(''.join(tmp))
print colored('Updated', 'green')
@cmd
def get_site_status(site=None, verbose=False):
import webnotes
import webnotes.utils
from webnotes.profile import get_system_managers
from core.doctype.profile.profile import get_total_users, get_active_users, \
get_website_users, get_active_website_users
import json
webnotes.connect(site=site)
ret = {
'last_backup_on': webnotes.local.conf.last_backup_on,
'active_users': get_active_users(),
'total_users': get_total_users(),
'active_website_users': get_active_website_users(),
'website_users': get_website_users(),
'system_managers': "\n".join(get_system_managers()),
'default_company': webnotes.conn.get_default("company"),
'disk_usage': webnotes.utils.get_disk_usage(),
'working_directory': webnotes.utils.get_base_path()
}
# country, timezone, industry
control_panel_details = webnotes.conn.get_value("Control Panel", "Control Panel",
["country", "time_zone", "industry"], as_dict=True)
if control_panel_details:
ret.update(control_panel_details)
# basic usage/progress analytics
for doctype in ("Company", "Customer", "Item", "Quotation", "Sales Invoice",
"Journal Voucher", "Stock Ledger Entry"):
key = doctype.lower().replace(" ", "_") + "_exists"
ret[key] = 1 if webnotes.conn.count(doctype) else 0
webnotes.destroy()
if verbose:
print json.dumps(ret, indent=1, sort_keys=True)
return ret
@cmd
def update_site_config(site_config, site, verbose=False):
import json
if isinstance(site_config, basestring):
site_config = json.loads(site_config)
webnotes.init(site=site)
webnotes.conf.site_config.update(site_config)
site_config_path = webnotes.get_conf_path(webnotes.conf.sites_dir, site)
with open(site_config_path, "w") as f:
json.dump(webnotes.conf.site_config, f, indent=1, sort_keys=True)
webnotes.destroy()
@cmd
def bump(repo, bump_type):
import json
assert repo in ['lib', 'app']
assert bump_type in ['minor', 'major', 'patch']
def validate(repo_path):
import git
repo = git.Repo(repo_path)
if repo.active_branch != 'master':
raise Exception, "Current branch not master in {}".format(repo_path)
def bump_version(version, version_type):
import semantic_version
v = semantic_version.Version(version)
if version_type == 'minor':
v.minor += 1
elif version_type == 'major':
v.major += 1
elif version_type == 'patch':
v.patch += 1
return unicode(v)
def add_tag(repo_path, version):
import git
repo = git.Repo(repo_path)
repo.index.add(['config.json'])
repo.index.commit('bumped to version {}'.format(version))
repo.create_tag('v' + version, repo.head)
def update_framework_requirement(version):
with open('app/config.json') as f:
config = json.load(f)
config['requires_framework_version'] = '==' + version
with open('app/config.json', 'w') as f:
json.dump(config, f, indent=1, sort_keys=True)
validate('lib/')
validate('app/')
if repo == 'app':
with open('app/config.json') as f:
config = json.load(f)
new_version = bump_version(config['app_version'], bump_type)
config['app_version'] = new_version
with open('app/config.json', 'w') as f:
json.dump(config, f, indent=1, sort_keys=True)
add_tag('app/', new_version)
elif repo == 'lib':
with open('lib/config.json') as f:
config = json.load(f)
new_version = bump_version(config['framework_version'], bump_type)
config['framework_version'] = new_version
with open('lib/config.json', 'w') as f:
json.dump(config, f, indent=1, sort_keys=True)
add_tag('lib/', new_version)
update_framework_requirement(new_version)
bump('app', bump_type)
if __name__=="__main__":
main()
| 32.129353 | 155 | 0.735638 |
7942b4f692246e4195064175c17251c84313ac8d | 68,424 | py | Python | perftests/resources/cross_validation.py | keynmol/fastparse | 2907295ba3463ccba732b1f58c2704ffe2f22939 | [
"MIT",
"Unlicense"
] | 929 | 2015-05-10T22:09:25.000Z | 2021-02-20T04:28:18.000Z | perftests/resources/cross_validation.py | keynmol/fastparse | 2907295ba3463ccba732b1f58c2704ffe2f22939 | [
"MIT",
"Unlicense"
] | 212 | 2015-05-10T22:25:39.000Z | 2021-01-19T20:13:24.000Z | perftests/resources/cross_validation.py | keynmol/fastparse | 2907295ba3463ccba732b1f58c2704ffe2f22939 | [
"MIT",
"Unlicense"
] | 160 | 2015-05-10T22:21:00.000Z | 2021-01-27T08:01:51.000Z |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <[email protected]>,
# Gael Varoquaux <[email protected]>,
# Olivier Grisel <[email protected]>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module has been deprecated in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
Provides train/test indices to split data in train test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by setting n_folds=2 or more,"
" got n_folds={0}.".format(n_folds))
if n_folds > self.n:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of samples: {1}.").format(n_folds, n))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.shuffle = shuffle
self.random_state = random_state
class KFold(_BaseKFold):
"""K-Folds cross validation iterator.
Provides train/test indices to split data in train test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used as a validation set once while the k - 1 remaining
fold(s) form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(4, n_folds=2)
>>> len(kf)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.KFold(n=4, n_folds=2, shuffle=False,
random_state=None)
>>> for train_index, test_index in kf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first n % n_folds folds have size n // n_folds + 1, other folds have
size n // n_folds.
See also
--------
StratifiedKFold take label information into account to avoid building
folds with imbalanced class distributions (for binary or multiclass
classification tasks).
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, n, n_folds=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n, n_folds, shuffle, random_state)
self.idxs = np.arange(n)
if shuffle:
rng = check_random_state(self.random_state)
rng.shuffle(self.idxs)
def _iter_test_indices(self):
n = self.n
n_folds = self.n_folds
fold_sizes = n
fold_sizes[:n % n_folds] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield self.idxs[start:stop]
current = stop
def __repr__(self):
return '%s.%s(n=%i, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LabelKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping labels.
The same label will not appear in two different folds (the number of
distinct labels has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct labels is approximately the same in each fold.
.. versionadded:: 0.17
Parameters
----------
labels : array-like with shape (n_samples, )
Contains a label for each sample.
The folds are built so that the same label does not appear in two
different folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.cross_validation import LabelKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> labels = np.array([0, 0, 2, 2])
>>> label_kfold = LabelKFold(labels, n_folds=2)
>>> len(label_kfold)
2
>>> print(label_kfold)
sklearn.cross_validation.LabelKFold(n_labels=4, n_folds=2)
>>> for train_index, test_index in label_kfold:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def __init__(self, labels, n_folds=3):
super(LabelKFold, self).__init__(len(labels), n_folds,
shuffle=False, random_state=None)
unique_labels, labels = np.unique(labels, return_inverse=True)
n_labels = len(unique_labels)
if n_folds > n_labels:
raise ValueError(
("Cannot have number of folds n_folds={0} greater"
" than the number of labels: {1}.").format(n_folds,
n_labels))
# Weight labels by their number of occurrences
n_samples_per_label = np.bincount(labels)
# Distribute the most frequent labels first
indices = np.argsort(n_samples_per_label)[::-1]
n_samples_per_label = n_samples_per_label[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(n_folds)
# Mapping from label index to fold index
label_to_fold = np.zeros(len(unique_labels))
# Distribute samples by adding the largest weight to the lightest fold
for label_index, weight in enumerate(n_samples_per_label):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
label_to_fold[indices[label_index]] = lightest_fold
self.idxs = label_to_fold[labels]
def _iter_test_indices(self):
for f in range(self.n_folds):
yield np.where(self.idxs == f)[0]
def __repr__(self):
return '{0}.{1}(n_labels={2}, n_folds={3})'.format(
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.n_folds,
)
def __len__(self):
return self.n_folds
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a variation of KFold that
returns stratified folds. The folds are made by preserving
the percentage of samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array-like, [n_samples]
Samples to split in K folds.
n_folds : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(y, n_folds=2)
>>> len(skf)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
sklearn.cross_validation.StratifiedKFold(labels=[0 0 1 1], n_folds=2,
shuffle=False, random_state=None)
>>> for train_index, test_index in skf:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size trunc(n_samples / n_folds), the last one has the
complementary.
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, y, n_folds=3, shuffle=False,
random_state=None):
super(StratifiedKFold, self).__init__(
len(y), n_folds, shuffle, random_state)
y = np.asarray(y)
n_samples = y.shape[0]
unique_labels, y_inversed = np.unique(y, return_inverse=True)
label_counts = bincount(y_inversed)
min_labels = np.min(label_counts)
if np.all(self.n_folds > label_counts):
raise ValueError("All the n_labels for individual classes"
" are less than %d folds."
% (self.n_folds))
if self.n_folds > min_labels:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of labels for any class cannot"
" be less than n_folds=%d."
% (min_labels, self.n_folds)), Warning)
# don't want to use the same seed in each label's shuffle
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each label so as to respect the
# balance of labels
per_label_cvs = [
KFold(max(c, self.n_folds), self.n_folds, shuffle=self.shuffle,
random_state=rng) for c in label_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_idx, per_label_splits in enumerate(zip(*per_label_cvs)):
for label, (_, test_split) in zip(unique_labels, per_label_splits):
label_test_folds = test_folds[y == label]
# the test split can be too big because we used
# KFold(max(c, self.n_folds), self.n_folds) instead of
# KFold(c, self.n_folds) to make it possible to not crash even
# if the data is not 100% stratifiable for all the labels
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(label_test_folds)]
label_test_folds[test_split] = test_fold_idx
test_folds[y == label] = label_test_folds
self.test_folds = test_folds
self.y = y
def _iter_test_masks(self):
for i in range(self.n_folds):
yield self.test_folds == i
def __repr__(self):
return '%s.%s(labels=%s, n_folds=%i, shuffle=%s, random_state=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.y,
self.n_folds,
self.shuffle,
self.random_state,
)
def __len__(self):
return self.n_folds
class LeaveOneLabelOut(_PartitionIterator):
"""Leave-One-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> labels = np.array([1, 1, 2, 2])
>>> lol = cross_validation.LeaveOneLabelOut(labels)
>>> len(lol)
2
>>> print(lol)
sklearn.cross_validation.LeaveOneLabelOut(labels=[1 1 2 2])
>>> for train_index, test_index in lol:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels):
super(LeaveOneLabelOut, self).__init__(len(labels))
# We make a copy of labels to avoid side-effects during iteration
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
def _iter_test_masks(self):
for i in self.unique_labels:
yield self.labels == i
def __repr__(self):
return '%s.%s(labels=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
)
def __len__(self):
return self.n_unique_labels
class LeavePLabelOut(_PartitionIterator):
"""Leave-P-Label_Out cross-validation iterator
Provides train/test indices to split data according to a third-party
provided label. This label information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LeaveOneLabelOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the labels while the latter uses samples
all assigned the same labels.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
labels : array-like of int with shape (n_samples,)
Arbitrary domain-specific stratification of the data to be used
to draw the splits.
p : int
Number of samples to leave out in the test split.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> labels = np.array([1, 2, 3])
>>> lpl = cross_validation.LeavePLabelOut(labels, p=2)
>>> len(lpl)
3
>>> print(lpl)
sklearn.cross_validation.LeavePLabelOut(labels=[1 2 3], p=2)
>>> for train_index, test_index in lpl:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
LabelKFold: K-fold iterator variant with non-overlapping labels.
"""
def __init__(self, labels, p):
# We make a copy of labels to avoid side-effects during iteration
super(LeavePLabelOut, self).__init__(len(labels))
self.labels = np.array(labels, copy=True)
self.unique_labels = np.unique(labels)
self.n_unique_labels = len(self.unique_labels)
self.p = p
def _iter_test_masks(self):
comb = combinations(range(self.n_unique_labels), self.p)
for idx in comb:
test_index = self._empty_mask()
idx = np.array(idx)
for l in self.unique_labels[idx]:
test_index[self.labels == l] = True
yield test_index
def __repr__(self):
return '%s.%s(labels=%s, p=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.labels,
self.p,
)
def __len__(self):
return int(factorial(self.n_unique_labels) /
factorial(self.n_unique_labels - self.p) /
factorial(self.p))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
self.n = n
self.n_iter = n_iter
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
self.n_train, self.n_test = _validate_shuffle_split(n, test_size,
train_size)
def __iter__(self):
for train, test in self._iter_indices():
yield train, test
return
@abstractmethod
def _iter_indices(self):
"""Generate (train, test) indices"""
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validation iterator.
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in the dataset.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn import cross_validation
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... test_size=.25, random_state=0)
>>> len(rs)
3
>>> print(rs)
... # doctest: +ELLIPSIS
ShuffleSplit(4, n_iter=3, test_size=0.25, ...)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = cross_validation.ShuffleSplit(4, n_iter=3,
... train_size=0.5, test_size=.25, random_state=0)
>>> for train_index, test_index in rs:
... print("TRAIN:", train_index, "TEST:", test_index)
...
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self):
rng = check_random_state(self.random_state)
for i in range(self.n_iter):
# random partition
permutation = rng.permutation(self.n)
ind_test = permutation[:self.n_test]
ind_train = permutation[self.n_test:self.n_test + self.n_train]
yield ind_train, ind_test
def __repr__(self):
return ('%s(%d, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.n,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _validate_shuffle_split(n, test_size, train_size):
if test_size is None and train_size is None:
raise ValueError(
'test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind == 'i':
if test_size >= n:
raise ValueError(
'test_size=%d should be smaller '
'than the number of samples %d' % (test_size, n))
else:
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif np.asarray(test_size).dtype.kind == 'f' and \
train_size + test_size > 1.:
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind == 'i':
if train_size >= n:
raise ValueError("train_size=%d should be smaller "
"than the number of samples %d" %
(train_size, n))
else:
raise ValueError("Invalid value for train_size: %r" % train_size)
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n - n_test
else:
if np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n)
else:
n_train = float(train_size)
if test_size is None:
n_test = n - n_train
if n_train + n_test > n:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n))
return int(n_train), int(n_test)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross validation iterator
Provides train/test indices to split data in train test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
y : array, [n_samples]
Labels of samples.
n_iter : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.cross_validation import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(y, 3, test_size=0.5, random_state=0)
>>> len(sss)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(labels=[0 0 1 1], n_iter=3, ...)
>>> for train_index, test_index in sss:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, y, n_iter=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
len(y), n_iter, test_size, train_size, random_state)
self.y = np.array(y)
self.classes, self.y_indices = np.unique(y, return_inverse=True)
n_cls = self.classes.shape[0]
if np.min(bincount(self.y_indices)) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of labels for any class cannot"
" be less than 2.")
if self.n_train < n_cls:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_train, n_cls))
if self.n_test < n_cls:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(self.n_test, n_cls))
def _iter_indices(self):
rng = check_random_state(self.random_state)
cls_count = bincount(self.y_indices)
p_i = cls_count / float(self.n)
n_i = np.round(self.n_train * p_i).astype(int)
t_i = np.minimum(cls_count - n_i,
np.round(self.n_test * p_i).astype(int))
for n in range(self.n_iter):
train = []
test = []
for i, cls in enumerate(self.classes):
permutation = rng.permutation(cls_count[i])
cls_i = np.where((self.y == cls))[0][permutation]
train.extend(cls_i[:n_i[i]])
test.extend(cls_i[n_i[i]:n_i[i] + t_i[i]])
# Because of rounding issues (as n_train and n_test are not
# dividers of the number of elements per class), we may end
# up here with less samples in train and test than asked for.
if len(train) + len(test) < self.n_train + self.n_test:
# We complete by affecting randomly the missing indexes
missing_idx = np.where(bincount(train + test,
minlength=len(self.y)) == 0,
)[0]
missing_idx = rng.permutation(missing_idx)
n_missing_train = self.n_train - len(train)
n_missing_test = self.n_test - len(test)
if n_missing_train > 0:
train.extend(missing_idx[:n_missing_train])
if n_missing_test > 0:
test.extend(missing_idx[-n_missing_test:])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.y,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
class PredefinedSplit(_PartitionIterator):
"""Predefined split cross validation iterator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
test_fold : "array-like, shape (n_samples,)
test_fold[i] gives the test set fold of sample i. A value of -1
indicates that the corresponding sample is not part of any test set
folds, but will instead always be put into the training fold.
Examples
--------
>>> from sklearn.cross_validation import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> ps = PredefinedSplit(test_fold=[0, 1, -1, 1])
>>> len(ps)
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
sklearn.cross_validation.PredefinedSplit(test_fold=[ 0 1 -1 1])
>>> for train_index, test_index in ps:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
super(PredefinedSplit, self).__init__(len(test_fold))
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def _iter_test_indices(self):
for f in self.unique_folds:
yield np.where(self.test_fold == f)[0]
def __repr__(self):
return '%s.%s(test_fold=%s)' % (
self.__class__.__module__,
self.__class__.__name__,
self.test_fold)
def __len__(self):
return len(self.unique_folds)
class LabelShuffleSplit(ShuffleSplit):
"""Shuffle-Labels-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided label. This label information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the labels could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePLabelOut and LabelShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique labels,
whereas LabelShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique labels.
For example, a less computationally intensive alternative to
``LeavePLabelOut(labels, p=10)`` would be
``LabelShuffleSplit(labels, test_size=10, n_iter=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to labels, and
not to samples, as in ShuffleSplit.
.. versionadded:: 0.17
Parameters
----------
labels : array, [n_samples]
Labels of samples
n_iter : int (default 5)
Number of re-shuffling and splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the test split. If
int, represents the absolute number of test labels. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the labels to include in the train split. If
int, represents the absolute number of train labels. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
"""
def __init__(self, labels, n_iter=5, test_size=0.2, train_size=None,
random_state=None):
classes, label_indices = np.unique(labels, return_inverse=True)
super(LabelShuffleSplit, self).__init__(
len(classes),
n_iter=n_iter,
test_size=test_size,
train_size=train_size,
random_state=random_state)
self.labels = labels
self.classes = classes
self.label_indices = label_indices
def __repr__(self):
return ('%s(labels=%s, n_iter=%d, test_size=%s, '
'random_state=%s)' % (
self.__class__.__name__,
self.labels,
self.n_iter,
str(self.test_size),
self.random_state,
))
def __len__(self):
return self.n_iter
def _iter_indices(self):
for label_train, label_test in super(LabelShuffleSplit,
self)._iter_indices():
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(self.label_indices, label_train))
test = np.flatnonzero(np.in1d(self.label_indices, label_test))
yield train, test
##############################################################################
def _index_param_value(X, v, indices):
"""Private helper function for parameter value indexing."""
if not _is_arraylike(v) or _num_samples(v) != _num_samples(X):
# pass through: skip indexing
return v
if sp.issparse(v):
v = v.tocsr()
return safe_indexing(v, indices)
def cross_val_predict(estimator, X, y=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Generate cross-validated estimates for each input data point
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
preds : ndarray
This is the result of calling 'predict'
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_predict
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> y_pred = cross_val_predict(lasso, X, y)
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
preds_blocks = parallel(delayed(_fit_and_predict)(clone(estimator), X, y,
train, test, verbose,
fit_params)
for train, test in cv)
preds = [p for p, _ in preds_blocks]
locs = np.concatenate([loc for _, loc in preds_blocks])
if not _check_is_partition(locs, _num_samples(X)):
raise ValueError('cross_val_predict only works for partitions')
# Check for sparse predictions
if sp.issparse(preds[0]):
preds = sp.vstack(preds, format=preds[0].format)
else:
preds = np.concatenate(preds)
return preds[inv_locs]
def _fit_and_predict(estimator, X, y, train, test, verbose, fit_params):
"""Fit estimator and predict values for a given dataset split.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit' and 'predict'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
Returns
-------
preds : sequence
Result of calling 'estimator.predict'
test : array-like
This is the value of the test parameter
"""
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, _ = _safe_split(estimator, X, y, test, train)
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
preds = estimator.predict(X_test)
return preds, test
def _check_is_partition(locs, n):
"""Check whether locs is a reordering of the array np.arange(n)
Parameters
----------
locs : ndarray
integer array to test
n : int
number of expected elements
Returns
-------
is_partition : bool
True iff sorted(locs) is range(n)
"""
if len(locs) != n:
return False
hit = np.zeros(n, bool)
hit[locs] = True
if not np.all(hit):
return False
return True
def cross_val_score(estimator, X, y=None, scoring=None, cv=None, n_jobs=1,
verbose=0, fit_params=None, pre_dispatch='2*n_jobs'):
"""Evaluate a score by cross-validation
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like
The data to fit. Can be, for example a list, or an array at least 2d.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
verbose : integer, optional
The verbosity level.
fit_params : dict, optional
Parameters to pass to the fit method of the estimator.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
Returns
-------
scores : array of float, shape=(len(list(cv)),)
Array of scores of the estimator for each run of the cross validation.
Examples
--------
>>> from sklearn import datasets, linear_model
>>> from sklearn.cross_validation import cross_val_score
>>> diabetes = datasets.load_diabetes()
>>> X = diabetes.data[:150]
>>> y = diabetes.target[:150]
>>> lasso = linear_model.Lasso()
>>> print(cross_val_score(lasso, X, y)) # doctest: +ELLIPSIS
[ 0.33150734 0.08022311 0.03531764]
See Also
---------
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
parallel = Parallel(n_jobs=n_jobs, verbose=verbose,
pre_dispatch=pre_dispatch)
scores = parallel(delayed(_fit_and_score)(clone(estimator), X, y, scorer,
train, test, verbose, None,
fit_params)
for train, test in cv)
return np.array(scores)[:, 0]
def _fit_and_score(estimator, X, y, scorer, train, test, verbose,
parameters, fit_params, return_train_score=False,
return_parameters=False, error_score='raise'):
"""Fit estimator and compute scores for a given dataset split.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like, optional, default: None
The target variable to try to predict in the case of
supervised learning.
scorer : callable
A scorer callable object / function with signature
``scorer(estimator, X, y)``.
train : array-like, shape (n_train_samples,)
Indices of training samples.
test : array-like, shape (n_test_samples,)
Indices of test samples.
verbose : integer
The verbosity level.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
parameters : dict or None
Parameters to be set on the estimator.
fit_params : dict or None
Parameters that will be passed to ``estimator.fit``.
return_train_score : boolean, optional, default: False
Compute and return score on training set.
return_parameters : boolean, optional, default: False
Return parameters that has been used for the estimator.
Returns
-------
train_score : float, optional
Score on training set, returned only if `return_train_score` is `True`.
test_score : float
Score on test set.
n_test_samples : int
Number of test samples.
scoring_time : float
Time spent for fitting and scoring in seconds.
parameters : dict or None, optional
The parameters that have been evaluated.
"""
if verbose > 1:
if parameters is None:
msg = ''
else:
msg = '%s' % (', '.join('%s=%s' % (k, v)
for k, v in parameters.items()))
print("[CV] %s %s" % (msg, (64 - len(msg)) * '.'))
# Adjust length of sample weights
fit_params = fit_params if fit_params is not None else {}
fit_params = dict([(k, _index_param_value(X, v, train))
for k, v in fit_params.items()])
if parameters is not None:
estimator.set_params(**parameters)
start_time = time.time()
X_train, y_train = _safe_split(estimator, X, y, train)
X_test, y_test = _safe_split(estimator, X, y, test, train)
try:
if y_train is None:
estimator.fit(X_train, **fit_params)
else:
estimator.fit(X_train, y_train, **fit_params)
except Exception as e:
if error_score == 'raise':
raise
elif isinstance(error_score, numbers.Number):
test_score = error_score
if return_train_score:
train_score = error_score
warnings.warn("Classifier fit failed. The score on this train-test"
" partition for these parameters will be set to %f. "
"Details: \n%r" % (error_score, e), FitFailedWarning)
else:
raise ValueError("error_score must be the string 'raise' or a"
" numeric value. (Hint: if using 'raise', please"
" make sure that it has been spelled correctly.)"
)
else:
test_score = _score(estimator, X_test, y_test, scorer)
if return_train_score:
train_score = _score(estimator, X_train, y_train, scorer)
scoring_time = time.time() - start_time
if verbose > 2:
msg += ", score=%f" % test_score
if verbose > 1:
end_msg = "%s -%s" % (msg, logger.short_format_time(scoring_time))
print("[CV] %s %s" % ((64 - len(end_msg)) * '.', end_msg))
ret = [train_score] if return_train_score else []
ret.extend([test_score, _num_samples(X_test), scoring_time])
if return_parameters:
ret.append(parameters)
return ret
def _safe_split(estimator, X, y, indices, train_indices=None):
"""Create subset of dataset and properly handle kernels."""
if hasattr(estimator, 'kernel') and callable(estimator.kernel) \
and not isinstance(estimator.kernel, GPKernel):
# cannot compute the kernel values with custom function
raise ValueError("Cannot use a custom kernel function. "
"Precompute the kernel matrix instead.")
if not hasattr(X, "shape"):
if getattr(estimator, "_pairwise", False):
raise ValueError("Precomputed kernels or affinity matrices have "
"to be passed as arrays or sparse matrices.")
X_subset = [X[idx] for idx in indices]
else:
if getattr(estimator, "_pairwise", False):
# X is a precomputed square kernel matrix
if X.shape[0] != X.shape[1]:
raise ValueError("X should be a square kernel matrix")
if train_indices is None:
X_subset = X[np.ix_(indices, indices)]
else:
X_subset = X[np.ix_(indices, train_indices)]
else:
X_subset = safe_indexing(X, indices)
if y is not None:
y_subset = safe_indexing(y, indices)
else:
y_subset = None
return X_subset, y_subset
def _score(estimator, X_test, y_test, scorer):
"""Compute the score of an estimator on a given test set."""
if y_test is None:
score = scorer(estimator, X_test)
else:
score = scorer(estimator, X_test, y_test)
if hasattr(score, 'item'):
try:
# e.g. unwrap memmapped scalars
score = score.item()
except ValueError:
# non-scalar?
pass
if not isinstance(score, numbers.Number):
raise ValueError("scoring must return a number, got %s (%s) instead."
% (str(score), type(score)))
return score
def _permutation_test_score(estimator, X, y, cv, scorer):
"""Auxiliary function for permutation_test_score"""
avg_score = []
for train, test in cv:
estimator.fit(X[train], y[train])
avg_score.append(scorer(estimator, X[test], y[test]))
return np.mean(avg_score)
def _shuffle(y, labels, random_state):
"""Return a shuffled copy of y eventually shuffle among same labels."""
if labels is None:
ind = random_state.permutation(len(y))
else:
ind = np.arange(len(labels))
for label in np.unique(labels):
this_mask = (labels == label)
ind[this_mask] = random_state.permutation(ind[this_mask])
return y[ind]
def check_cv(cv, X=None, y=None, classifier=False):
"""Input checker utility for building a CV in a user friendly way.
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is binary or
multiclass, :class:`StratifiedKFold` used. In all other cases,
:class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
X : array-like
The data the cross-val object will be applied on.
y : array-like
The target variable for a supervised learning problem.
classifier : boolean optional
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv: a cross-validation generator instance.
The return value is guaranteed to be a cv generator instance, whatever
the input type.
"""
is_sparse = sp.issparse(X)
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if classifier:
if type_of_target(y) in ['binary', 'multiclass']:
cv = StratifiedKFold(y, cv)
else:
cv = KFold(_num_samples(y), cv)
else:
if not is_sparse:
n_samples = len(X)
else:
n_samples = X.shape[0]
cv = KFold(n_samples, cv)
return cv
def permutation_test_score(estimator, X, y, cv=None,
n_permutations=100, n_jobs=1, labels=None,
random_state=0, verbose=0, scoring=None):
"""Evaluate the significance of a cross-validated score with permutations
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
estimator : estimator object implementing 'fit'
The object to use to fit the data.
X : array-like of shape at least 2D
The data to fit.
y : array-like
The target variable to try to predict in the case of
supervised learning.
scoring : string, callable or None, optional, default: None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
n_permutations : integer, optional
Number of times to permute ``y``.
n_jobs : integer, optional
The number of CPUs to use to do the computation. -1 means
'all CPUs'.
labels : array-like of shape [n_samples] (optional)
Labels constrain the permutation among groups of samples with
a same label.
random_state : RandomState or an int seed (0 by default)
A random number generator instance to define the state of the
random permutations generator.
verbose : integer, optional
The verbosity level.
Returns
-------
score : float
The true score without permuting targets.
permutation_scores : array, shape (n_permutations,)
The scores obtained for each permutations.
pvalue : float
The returned value equals p-value if `scoring` returns bigger
numbers for better scores (e.g., accuracy_score). If `scoring` is
rather a loss function (i.e. when lower is better such as with
`mean_squared_error`) then this is actually the complement of the
p-value: 1 - p-value.
Notes
-----
This function implements Test 1 in:
Ojala and Garriga. Permutation Tests for Studying Classifier
Performance. The Journal of Machine Learning Research (2010)
vol. 11
"""
X, y = indexable(X, y)
cv = check_cv(cv, X, y, classifier=is_classifier(estimator))
scorer = check_scoring(estimator, scoring=scoring)
random_state = check_random_state(random_state)
# We clone the estimator to make sure that all the folds are
# independent, and that it is pickle-able.
score = _permutation_test_score(clone(estimator), X, y, cv, scorer)
permutation_scores = Parallel(n_jobs=n_jobs, verbose=verbose)(
delayed(_permutation_test_score)(
clone(estimator), X, _shuffle(y, labels, random_state), cv,
scorer)
for _ in range(n_permutations))
permutation_scores = np.array(permutation_scores)
pvalue = (np.sum(permutation_scores >= score) + 1.0) / (n_permutations + 1)
return score, permutation_scores, pvalue
permutation_test_score.__test__ = False # to avoid a pb with nosetests
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(iter(ShuffleSplit(n_samples)))`` and application to input
data into a single call for splitting (and optionally subsampling)
data in a oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
.. versionadded:: 0.16
preserves input type instead of always casting to numpy array.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the labels array.
.. versionadded:: 0.17
*stratify* splitting
Returns
-------
splitting : list, length = 2 * len(arrays),
List containing train-test split of inputs.
.. versionadded:: 0.16
Output type is the same as the input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.cross_validation import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
cv = StratifiedShuffleSplit(stratify, test_size=test_size,
train_size=train_size,
random_state=random_state)
else:
n_samples = _num_samples(arrays[0])
cv = ShuffleSplit(n_samples, test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(iter(cv))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
| 35.0174 | 79 | 0.6085 |
7942b84baabccd7339a5ce7e7baeb83874dab736 | 1,647 | py | Python | tests/samples.py | colltoaction/datafiles | a331b1fd0ed98b7685a1dae8c0efb73e683063d6 | [
"MIT"
] | 1 | 2019-02-14T08:08:41.000Z | 2019-02-14T08:08:41.000Z | tests/samples.py | colltoaction/datafiles | a331b1fd0ed98b7685a1dae8c0efb73e683063d6 | [
"MIT"
] | null | null | null | tests/samples.py | colltoaction/datafiles | a331b1fd0ed98b7685a1dae8c0efb73e683063d6 | [
"MIT"
] | null | null | null | from dataclasses import dataclass, field
from typing import List, Optional
from datafiles import datafile
from datafiles.converters import String
@datafile('../tmp/sample.yml', manual=True)
class Sample:
bool_: bool
int_: int
float_: float
str_: str
@datafile('../tmp/sample.json', manual=True)
class SampleAsJSON:
bool_: bool
int_: int
float_: float
str_: str
@datafile('../tmp/sample.yml', manual=True)
class SampleWithCustomFields:
included: str
exluced: str
class Meta:
datafile_attrs = {'included': String}
@datafile('../tmp/sample.yml', manual=True)
class SampleWithDefaults:
without_default: str
with_default: str = 'foo'
@dataclass
class _NestedSample1:
name: str
score: float
@datafile('../tmp/sample.yml', manual=True)
class SampleWithNesting:
name: str
score: float
nested: _NestedSample1
@dataclass
class _NestedSample2:
name: str = 'b'
score: float = 3.4
@datafile('../tmp/sample.yml', manual=True)
class SampleWithNestingAndDefaults:
name: str
score: float = 1.2
nested: _NestedSample2 = field(default_factory=_NestedSample2)
@datafile('../tmp/sample.yml', manual=True)
class SampleWithList:
items: List[float]
@datafile('../tmp/sample.yml', manual=True)
class SampleWithListAndDefaults:
items: List[float] = field(default_factory=list)
@datafile('../tmp/sample.yml', manual=True)
class SampleWithListOfDataclasses:
items: List[_NestedSample1] = field(default_factory=list)
@datafile('../tmp/sample.yml', manual=True)
class SampleWithOptionals:
required: float
optional: Optional[float]
| 19.607143 | 66 | 0.705525 |
7942b8a2544450ff15cdefc310a949f2423f8b4d | 3,514 | py | Python | bindings/python/ensmallen/datasets/string/agrobacteriumtumefaciensf2.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/agrobacteriumtumefaciensf2.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/agrobacteriumtumefaciensf2.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Agrobacterium tumefaciens F2.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def AgrobacteriumTumefaciensF2(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Agrobacterium tumefaciens F2 graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Agrobacterium tumefaciens F2 graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="AgrobacteriumTumefaciensF2",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.466667 | 223 | 0.680706 |
7942b96ec8c562a46a9a74e6820fee388eca74fc | 766 | py | Python | hist.py | aaavinash85/OpenCV_image-processing | ea9bf85454a9919ced82567c99d997314598cc5c | [
"MIT"
] | null | null | null | hist.py | aaavinash85/OpenCV_image-processing | ea9bf85454a9919ced82567c99d997314598cc5c | [
"MIT"
] | null | null | null | hist.py | aaavinash85/OpenCV_image-processing | ea9bf85454a9919ced82567c99d997314598cc5c | [
"MIT"
] | 1 | 2021-07-25T08:17:03.000Z | 2021-07-25T08:17:03.000Z | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('images/wave.png')
cv2.imshow('img', img)
''' Cv2 has function to calculate the histogram
cv2.calcHist([img], [channels], [mask], [size], [range])
channels - corrrespods to B G R we can calculate for individual
channles or all three at once by using loop
'''
# Calculating Histogram for Single channel
col0 = 'b'
col1 = 'g'
col2 = 'r'
hist = cv2.calcHist([img], [2], None, [256], [0,256])
plt.plot(hist, color = col2)
plt.show()
# Calculating Histogram for All three channels
color = ('b', 'g', 'r')
for i, col in enumerate(color):
hist1 = cv2.calcHist([img], [i], None, [128], [0,256] )
plt.plot(hist1, color = col)
plt.show()
cv2.waitKey(0)
cv2.destroyAllWindows() | 25.533333 | 67 | 0.673629 |
7942ba397542cb0b0822df17abdba205fbdc9e7a | 100,873 | py | Python | tests/myapp/tests.py | jonasundderwolf/django-mptt | 688f2dd3a2ff7957f2ba2f3c59349232cc1fa4b2 | [
"MIT"
] | null | null | null | tests/myapp/tests.py | jonasundderwolf/django-mptt | 688f2dd3a2ff7957f2ba2f3c59349232cc1fa4b2 | [
"MIT"
] | null | null | null | tests/myapp/tests.py | jonasundderwolf/django-mptt | 688f2dd3a2ff7957f2ba2f3c59349232cc1fa4b2 | [
"MIT"
] | null | null | null | import io
import os
import re
import sys
import unittest
from django.apps import apps
from django.contrib.admin import ModelAdmin, site
from django.contrib.admin.views.main import ChangeList
from django.contrib.auth.models import Group, User
from django.db.models import Q
from django.db.models.query_utils import DeferredAttribute
from django.template import Context, Template, TemplateSyntaxError
from django.test import RequestFactory, TestCase
from mptt.admin import TreeRelatedFieldListFilter
from mptt.querysets import TreeQuerySet
try:
from mock_django import mock_signal_receiver
except ImportError:
mock_signal_receiver = None
from myapp.models import (
AutoNowDateFieldModel,
Book,
Category,
ConcreteModel,
CustomPKName,
CustomTreeManager,
CustomTreeQueryset,
DoubleProxyModel,
Genre,
Item,
MultipleManagerModel,
Node,
NullableDescOrderedInsertionModel,
NullableOrderedInsertionModel,
OrderedInsertion,
Person,
SingleProxyModel,
Student,
SubItem,
UniqueTogetherModel,
UUIDNode,
)
from mptt.exceptions import CantDisableUpdates, InvalidMove
from mptt.managers import TreeManager
from mptt.models import MPTTModel
from mptt.signals import node_moved
from mptt.templatetags.mptt_tags import cache_tree_children
from mptt.utils import print_debug_info
def get_tree_details(nodes):
"""
Creates pertinent tree details for the given list of nodes.
The fields are:
id parent_id tree_id level left right
"""
if hasattr(nodes, "order_by"):
nodes = list(nodes.order_by("tree_id", "lft", "pk"))
nodes = list(nodes)
opts = nodes[0]._mptt_meta
return "\n".join(
[
"%s %s %s %s %s %s"
% (
n.pk,
getattr(n, "%s_id" % opts.parent_attr) or "-",
getattr(n, opts.tree_id_attr),
getattr(n, opts.level_attr),
getattr(n, opts.left_attr),
getattr(n, opts.right_attr),
)
for n in nodes
]
)
leading_whitespace_re = re.compile(r"^\s+", re.MULTILINE)
def tree_details(text):
"""
Trims leading whitespace from the given text specifying tree details
so triple-quoted strings can be used to provide tree details in a
readable format (says who?), to be compared with the result of using
the ``get_tree_details`` function.
"""
return leading_whitespace_re.sub("", text.rstrip())
class TreeTestCase(TestCase):
def assertTreeEqual(self, tree1, tree2):
if not isinstance(tree1, str):
tree1 = get_tree_details(tree1)
tree1 = tree_details(tree1)
if not isinstance(tree2, str):
tree2 = get_tree_details(tree2)
tree2 = tree_details(tree2)
return self.assertEqual(tree1, tree2, "\n%r\n != \n%r" % (tree1, tree2))
class DocTestTestCase(TreeTestCase):
def test_run_doctest(self):
import doctest
class DummyStream:
content = ""
encoding = "utf8"
def write(self, text):
self.content += text
def flush(self):
pass
dummy_stream = DummyStream()
before = sys.stdout
sys.stdout = dummy_stream
doctest.testfile(
os.path.join(os.path.dirname(__file__), "doctests.txt"),
module_relative=False,
optionflags=doctest.IGNORE_EXCEPTION_DETAIL | doctest.ELLIPSIS,
encoding="utf-8",
)
sys.stdout = before
content = dummy_stream.content
if content:
before.write(content + "\n")
self.fail()
# genres.json defines the following tree structure
#
# 1 - 1 0 1 16 action
# 2 1 1 1 2 9 +-- platformer
# 3 2 1 2 3 4 | |-- platformer_2d
# 4 2 1 2 5 6 | |-- platformer_3d
# 5 2 1 2 7 8 | +-- platformer_4d
# 6 1 1 1 10 15 +-- shmup
# 7 6 1 2 11 12 |-- shmup_vertical
# 8 6 1 2 13 14 +-- shmup_horizontal
# 9 - 2 0 1 6 rpg
# 10 9 2 1 2 3 |-- arpg
# 11 9 2 1 4 5 +-- trpg
class ReparentingTestCase(TreeTestCase):
"""
Test that trees are in the appropriate state after reparenting and
that reparented items have the correct tree attributes defined,
should they be required for use after a save.
"""
fixtures = ["genres.json"]
def test_new_root_from_subtree(self):
shmup = Genre.objects.get(id=6)
shmup.parent = None
shmup.save()
self.assertTreeEqual([shmup], "6 - 3 0 1 6")
self.assertTreeEqual(
Genre.objects.all(),
"""
1 - 1 0 1 10
2 1 1 1 2 9
3 2 1 2 3 4
4 2 1 2 5 6
5 2 1 2 7 8
9 - 2 0 1 6
10 9 2 1 2 3
11 9 2 1 4 5
6 - 3 0 1 6
7 6 3 1 2 3
8 6 3 1 4 5
""",
)
def test_new_root_from_leaf_with_siblings(self):
platformer_2d = Genre.objects.get(id=3)
platformer_2d.parent = None
platformer_2d.save()
self.assertTreeEqual([platformer_2d], "3 - 3 0 1 2")
self.assertTreeEqual(
Genre.objects.all(),
"""
1 - 1 0 1 14
2 1 1 1 2 7
4 2 1 2 3 4
5 2 1 2 5 6
6 1 1 1 8 13
7 6 1 2 9 10
8 6 1 2 11 12
9 - 2 0 1 6
10 9 2 1 2 3
11 9 2 1 4 5
3 - 3 0 1 2
""",
)
def test_new_child_from_root(self):
action = Genre.objects.get(id=1)
rpg = Genre.objects.get(id=9)
action.parent = rpg
action.save()
self.assertTreeEqual([action], "1 9 2 1 6 21")
self.assertTreeEqual([rpg], "9 - 2 0 1 22")
self.assertTreeEqual(
Genre.objects.all(),
"""
9 - 2 0 1 22
10 9 2 1 2 3
11 9 2 1 4 5
1 9 2 1 6 21
2 1 2 2 7 14
3 2 2 3 8 9
4 2 2 3 10 11
5 2 2 3 12 13
6 1 2 2 15 20
7 6 2 3 16 17
8 6 2 3 18 19
""",
)
def test_move_leaf_to_other_tree(self):
shmup_horizontal = Genre.objects.get(id=8)
rpg = Genre.objects.get(id=9)
shmup_horizontal.parent = rpg
shmup_horizontal.save()
self.assertTreeEqual([shmup_horizontal], "8 9 2 1 6 7")
self.assertTreeEqual([rpg], "9 - 2 0 1 8")
self.assertTreeEqual(
Genre.objects.all(),
"""
1 - 1 0 1 14
2 1 1 1 2 9
3 2 1 2 3 4
4 2 1 2 5 6
5 2 1 2 7 8
6 1 1 1 10 13
7 6 1 2 11 12
9 - 2 0 1 8
10 9 2 1 2 3
11 9 2 1 4 5
8 9 2 1 6 7
""",
)
def test_move_subtree_to_other_tree(self):
shmup = Genre.objects.get(id=6)
trpg = Genre.objects.get(id=11)
shmup.parent = trpg
shmup.save()
self.assertTreeEqual([shmup], "6 11 2 2 5 10")
self.assertTreeEqual([trpg], "11 9 2 1 4 11")
self.assertTreeEqual(
Genre.objects.all(),
"""
1 - 1 0 1 10
2 1 1 1 2 9
3 2 1 2 3 4
4 2 1 2 5 6
5 2 1 2 7 8
9 - 2 0 1 12
10 9 2 1 2 3
11 9 2 1 4 11
6 11 2 2 5 10
7 6 2 3 6 7
8 6 2 3 8 9
""",
)
def test_move_child_up_level(self):
shmup_horizontal = Genre.objects.get(id=8)
action = Genre.objects.get(id=1)
shmup_horizontal.parent = action
shmup_horizontal.save()
self.assertTreeEqual([shmup_horizontal], "8 1 1 1 14 15")
self.assertTreeEqual([action], "1 - 1 0 1 16")
self.assertTreeEqual(
Genre.objects.all(),
"""
1 - 1 0 1 16
2 1 1 1 2 9
3 2 1 2 3 4
4 2 1 2 5 6
5 2 1 2 7 8
6 1 1 1 10 13
7 6 1 2 11 12
8 1 1 1 14 15
9 - 2 0 1 6
10 9 2 1 2 3
11 9 2 1 4 5
""",
)
def test_move_subtree_down_level(self):
shmup = Genre.objects.get(id=6)
platformer = Genre.objects.get(id=2)
shmup.parent = platformer
shmup.save()
self.assertTreeEqual([shmup], "6 2 1 2 9 14")
self.assertTreeEqual([platformer], "2 1 1 1 2 15")
self.assertTreeEqual(
Genre.objects.all(),
"""
1 - 1 0 1 16
2 1 1 1 2 15
3 2 1 2 3 4
4 2 1 2 5 6
5 2 1 2 7 8
6 2 1 2 9 14
7 6 1 3 10 11
8 6 1 3 12 13
9 - 2 0 1 6
10 9 2 1 2 3
11 9 2 1 4 5
""",
)
def test_move_to(self):
rpg = Genre.objects.get(pk=9)
action = Genre.objects.get(pk=1)
rpg.move_to(action)
rpg.save()
self.assertEqual(rpg.parent, action)
def test_invalid_moves(self):
# A node may not be made a child of itself
action = Genre.objects.get(id=1)
action.parent = action
platformer = Genre.objects.get(id=2)
platformer.parent = platformer
self.assertRaises(InvalidMove, action.save)
self.assertRaises(InvalidMove, platformer.save)
# A node may not be made a child of any of its descendants
platformer_4d = Genre.objects.get(id=5)
action.parent = platformer_4d
platformer.parent = platformer_4d
self.assertRaises(InvalidMove, action.save)
self.assertRaises(InvalidMove, platformer.save)
# New parent is still set when an error occurs
self.assertEqual(action.parent, platformer_4d)
self.assertEqual(platformer.parent, platformer_4d)
class ConcurrencyTestCase(TreeTestCase):
"""
Test that tree structure remains intact when saving nodes (without setting new parent) after
tree structure has been changed.
"""
def setUp(self):
fruit = ConcreteModel.objects.create(name="Fruit")
vegie = ConcreteModel.objects.create(name="Vegie")
ConcreteModel.objects.create(name="Apple", parent=fruit)
ConcreteModel.objects.create(name="Pear", parent=fruit)
ConcreteModel.objects.create(name="Tomato", parent=vegie)
ConcreteModel.objects.create(name="Carrot", parent=vegie)
# sanity check
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
3 1 1 1 2 3
4 1 1 1 4 5
2 - 2 0 1 6
5 2 2 1 2 3
6 2 2 1 4 5
""",
)
def _modify_tree(self):
fruit = ConcreteModel.objects.get(name="Fruit")
vegie = ConcreteModel.objects.get(name="Vegie")
vegie.move_to(fruit)
def _assert_modified_tree_state(self):
carrot = ConcreteModel.objects.get(id=6)
self.assertTreeEqual([carrot], "6 2 1 2 5 6")
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 12
2 1 1 1 2 7
5 2 1 2 3 4
6 2 1 2 5 6
3 1 1 1 8 9
4 1 1 1 10 11
""",
)
def test_node_save_after_tree_restructuring(self):
carrot = ConcreteModel.objects.get(id=6)
self._modify_tree()
carrot.name = "Purple carrot"
carrot.save()
self._assert_modified_tree_state()
def test_node_save_after_tree_restructuring_with_update_fields(self):
"""
Test that model is saved properly when passing update_fields
"""
carrot = ConcreteModel.objects.get(id=6)
self._modify_tree()
# update with kwargs
carrot.name = "Won't change"
carrot.ghosts = "Will get updated"
carrot.save(update_fields=["ghosts"])
self._assert_modified_tree_state()
updated_carrot = ConcreteModel.objects.get(id=6)
self.assertEqual(updated_carrot.ghosts, carrot.ghosts)
self.assertNotEqual(updated_carrot.name, carrot.name)
# update with positional arguments
carrot.name = "Will change"
carrot.ghosts = "Will not be updated"
carrot.save(False, False, None, ["name"])
updated_carrot = ConcreteModel.objects.get(id=6)
self.assertNotEqual(updated_carrot.ghosts, carrot.ghosts)
self.assertEqual(updated_carrot.name, carrot.name)
def test_update_fields_positional(self):
"""
Test that update_fields works as a positional argument
Test for https://github.com/django-mptt/django-mptt/issues/384
"""
carrot = ConcreteModel.objects.get(id=6)
# Why would you do it this way? Meh.
carrot.save(False, False, None, None)
# categories.json defines the following tree structure:
#
# 1 - 1 0 1 20 games
# 2 1 1 1 2 7 +-- wii
# 3 2 1 2 3 4 | |-- wii_games
# 4 2 1 2 5 6 | +-- wii_hardware
# 5 1 1 1 8 13 +-- xbox360
# 6 5 1 2 9 10 | |-- xbox360_games
# 7 5 1 2 11 12 | +-- xbox360_hardware
# 8 1 1 1 14 19 +-- ps3
# 9 8 1 2 15 16 |-- ps3_games
# 10 8 1 2 17 18 +-- ps3_hardware
class DeletionTestCase(TreeTestCase):
"""
Tests that the tree structure is maintained appropriately in various
deletion scenarios.
"""
fixtures = ["categories.json"]
def test_delete_root_node(self):
# Add a few other roots to verify that they aren't affected
Category(name="Preceding root").insert_at(
Category.objects.get(id=1), "left", save=True
)
Category(name="Following root").insert_at(
Category.objects.get(id=1), "right", save=True
)
self.assertTreeEqual(
Category.objects.all(),
"""
11 - 1 0 1 2
1 - 2 0 1 20
2 1 2 1 2 7
3 2 2 2 3 4
4 2 2 2 5 6
5 1 2 1 8 13
6 5 2 2 9 10
7 5 2 2 11 12
8 1 2 1 14 19
9 8 2 2 15 16
10 8 2 2 17 18
12 - 3 0 1 2
""",
)
Category.objects.get(id=1).delete()
self.assertTreeEqual(
Category.objects.all(),
"""
11 - 1 0 1 2
12 - 3 0 1 2
""",
)
def test_delete_last_node_with_siblings(self):
Category.objects.get(id=9).delete()
self.assertTreeEqual(
Category.objects.all(),
"""
1 - 1 0 1 18
2 1 1 1 2 7
3 2 1 2 3 4
4 2 1 2 5 6
5 1 1 1 8 13
6 5 1 2 9 10
7 5 1 2 11 12
8 1 1 1 14 17
10 8 1 2 15 16
""",
)
def test_delete_last_node_with_descendants(self):
Category.objects.get(id=8).delete()
self.assertTreeEqual(
Category.objects.all(),
"""
1 - 1 0 1 14
2 1 1 1 2 7
3 2 1 2 3 4
4 2 1 2 5 6
5 1 1 1 8 13
6 5 1 2 9 10
7 5 1 2 11 12
""",
)
def test_delete_node_with_siblings(self):
child = Category.objects.get(id=6)
parent = child.parent
self.assertEqual(parent.get_descendant_count(), 2)
child.delete()
self.assertTreeEqual(
Category.objects.all(),
"""
1 - 1 0 1 18
2 1 1 1 2 7
3 2 1 2 3 4
4 2 1 2 5 6
5 1 1 1 8 11
7 5 1 2 9 10
8 1 1 1 12 17
9 8 1 2 13 14
10 8 1 2 15 16
""",
)
self.assertEqual(parent.get_descendant_count(), 1)
parent = Category.objects.get(pk=parent.pk)
self.assertEqual(parent.get_descendant_count(), 1)
def test_delete_node_with_descendants_and_siblings(self):
"""
Regression test for Issue 23 - we used to use pre_delete, which
resulted in tree cleanup being performed for every node being
deleted, rather than just the node on which ``delete()`` was
called.
"""
Category.objects.get(id=5).delete()
self.assertTreeEqual(
Category.objects.all(),
"""
1 - 1 0 1 14
2 1 1 1 2 7
3 2 1 2 3 4
4 2 1 2 5 6
8 1 1 1 8 13
9 8 1 2 9 10
10 8 1 2 11 12
""",
)
def test_delete_multiple_nodes(self):
"""Regression test for Issue 576."""
queryset = Category.objects.filter(id__in=[6, 7])
for category in queryset:
category.delete()
self.assertTreeEqual(
Category.objects.all(),
"""
1 - 1 0 1 16
2 1 1 1 2 7
3 2 1 2 3 4
4 2 1 2 5 6
5 1 1 1 8 9
8 1 1 1 10 15
9 8 1 2 11 12
10 8 1 2 13 14""",
)
class IntraTreeMovementTestCase(TreeTestCase):
pass
class InterTreeMovementTestCase(TreeTestCase):
pass
class PositionedInsertionTestCase(TreeTestCase):
pass
class CustomPKNameTestCase(TreeTestCase):
def setUp(self):
manager = CustomPKName.objects
c1 = manager.create(name="c1")
manager.create(name="c11", parent=c1)
manager.create(name="c12", parent=c1)
c2 = manager.create(name="c2")
manager.create(name="c21", parent=c2)
manager.create(name="c22", parent=c2)
manager.create(name="c3")
def test_get_next_sibling(self):
root = CustomPKName.objects.get(name="c12")
sib = root.get_next_sibling()
self.assertTrue(sib is None)
class DisabledUpdatesTestCase(TreeTestCase):
def setUp(self):
self.a = ConcreteModel.objects.create(name="a")
self.b = ConcreteModel.objects.create(name="b", parent=self.a)
self.c = ConcreteModel.objects.create(name="c", parent=self.a)
self.d = ConcreteModel.objects.create(name="d")
# state is now:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
""",
)
def test_single_proxy(self):
self.assertTrue(ConcreteModel._mptt_updates_enabled)
self.assertTrue(SingleProxyModel._mptt_updates_enabled)
self.assertRaises(
CantDisableUpdates,
SingleProxyModel.objects.disable_mptt_updates().__enter__,
)
self.assertTrue(ConcreteModel._mptt_updates_enabled)
self.assertTrue(SingleProxyModel._mptt_updates_enabled)
with ConcreteModel.objects.disable_mptt_updates():
self.assertFalse(ConcreteModel._mptt_updates_enabled)
self.assertFalse(SingleProxyModel._mptt_updates_enabled)
self.assertTrue(ConcreteModel._mptt_updates_enabled)
self.assertTrue(SingleProxyModel._mptt_updates_enabled)
def test_double_proxy(self):
self.assertTrue(ConcreteModel._mptt_updates_enabled)
self.assertTrue(DoubleProxyModel._mptt_updates_enabled)
self.assertRaises(
CantDisableUpdates,
DoubleProxyModel.objects.disable_mptt_updates().__enter__,
)
self.assertTrue(ConcreteModel._mptt_updates_enabled)
self.assertTrue(DoubleProxyModel._mptt_updates_enabled)
with ConcreteModel.objects.disable_mptt_updates():
self.assertFalse(ConcreteModel._mptt_updates_enabled)
self.assertFalse(DoubleProxyModel._mptt_updates_enabled)
self.assertTrue(ConcreteModel._mptt_updates_enabled)
self.assertTrue(DoubleProxyModel._mptt_updates_enabled)
def test_insert_child(self):
with self.assertNumQueries(2):
with ConcreteModel.objects.disable_mptt_updates():
# 1 query here:
with self.assertNumQueries(1):
ConcreteModel.objects.create(name="e", parent=self.d)
# 2nd query here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
5 4 2 1 2 3
""",
)
# yes, this is wrong. that's what disable_mptt_updates() does :/
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
5 4 2 1 2 3
""",
)
def test_insert_root(self):
with self.assertNumQueries(2):
with ConcreteModel.objects.disable_mptt_updates():
with self.assertNumQueries(1):
# 1 query here:
ConcreteModel.objects.create(name="e")
# 2nd query here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
5 - 0 0 1 2
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
""",
)
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
5 - 0 0 1 2
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
""",
)
def test_move_node_same_tree(self):
with self.assertNumQueries(2):
with ConcreteModel.objects.disable_mptt_updates():
with self.assertNumQueries(1):
# 2 queries here:
# (django does a query to determine if the row is in the db yet)
self.c.parent = self.b
self.c.save()
# 3rd query here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 2 1 1 4 5
4 - 2 0 1 2
""",
)
# yes, this is wrong. that's what disable_mptt_updates() does :/
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 2 1 1 4 5
4 - 2 0 1 2
""",
)
def test_move_node_different_tree(self):
with self.assertNumQueries(2):
with ConcreteModel.objects.disable_mptt_updates():
with self.assertNumQueries(1):
# 1 update query
self.c.parent = self.d
self.c.save()
# query 2 here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 4 1 1 4 5
4 - 2 0 1 2
""",
)
# yes, this is wrong. that's what disable_mptt_updates() does :/
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 4 1 1 4 5
4 - 2 0 1 2
""",
)
def test_move_node_to_root(self):
with self.assertNumQueries(2):
with ConcreteModel.objects.disable_mptt_updates():
with self.assertNumQueries(1):
# 1 update query
self.c.parent = None
self.c.save()
# query 2 here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 - 1 1 4 5
4 - 2 0 1 2
""",
)
# yes, this is wrong. that's what disable_mptt_updates() does :/
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 - 1 1 4 5
4 - 2 0 1 2
""",
)
def test_move_root_to_child(self):
with self.assertNumQueries(2):
with ConcreteModel.objects.disable_mptt_updates():
with self.assertNumQueries(1):
# 1 update query
self.d.parent = self.c
self.d.save()
# query 2 here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 3 2 0 1 2
""",
)
# yes, this is wrong. that's what disable_mptt_updates() does :/
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 3 2 0 1 2
""",
)
class DelayedUpdatesTestCase(TreeTestCase):
def setUp(self):
self.a = ConcreteModel.objects.create(name="a")
self.b = ConcreteModel.objects.create(name="b", parent=self.a)
self.c = ConcreteModel.objects.create(name="c", parent=self.a)
self.d = ConcreteModel.objects.create(name="d")
self.z = ConcreteModel.objects.create(name="z")
# state is now:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
def test_proxy(self):
self.assertFalse(ConcreteModel._mptt_is_tracking)
self.assertFalse(SingleProxyModel._mptt_is_tracking)
self.assertRaises(
CantDisableUpdates, SingleProxyModel.objects.delay_mptt_updates().__enter__
)
self.assertFalse(ConcreteModel._mptt_is_tracking)
self.assertFalse(SingleProxyModel._mptt_is_tracking)
with ConcreteModel.objects.delay_mptt_updates():
self.assertTrue(ConcreteModel._mptt_is_tracking)
self.assertTrue(SingleProxyModel._mptt_is_tracking)
self.assertFalse(ConcreteModel._mptt_is_tracking)
self.assertFalse(SingleProxyModel._mptt_is_tracking)
def test_double_context_manager(self):
with ConcreteModel.objects.delay_mptt_updates():
self.assertTrue(ConcreteModel._mptt_is_tracking)
with ConcreteModel.objects.delay_mptt_updates():
self.assertTrue(ConcreteModel._mptt_is_tracking)
self.assertTrue(ConcreteModel._mptt_is_tracking)
self.assertFalse(ConcreteModel._mptt_is_tracking)
def test_insert_child(self):
with self.assertNumQueries(8):
with ConcreteModel.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 1 query for target stale check,
# 1 query to save node.
ConcreteModel.objects.create(name="e", parent=self.d)
# 3rd query here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
6 4 2 1 2 3
5 - 3 0 1 2
""",
)
# remaining queries (4 through 8) are the partial rebuild process.
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 4
6 4 2 1 2 3
5 - 3 0 1 2
""",
)
def test_insert_root(self):
with self.assertNumQueries(3):
with ConcreteModel.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 2 queries required here:
# (one to get the correct tree_id, then one to insert)
ConcreteModel.objects.create(name="e")
# 3rd query here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
5 - 3 0 1 2
6 - 4 0 1 2
""",
)
# no partial rebuild necessary, as no trees were modified
# (newly created tree is already okay)
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
5 - 3 0 1 2
6 - 4 0 1 2
""",
)
def test_move_node_same_tree(self):
with self.assertNumQueries(10):
with ConcreteModel.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 1 query to ensure target fields aren't stale
# 1 update query
self.c.parent = self.b
self.c.save()
# query 3 here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 2 1 2 3 4
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
# the remaining 7 queries are the partial rebuild.
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 5
3 2 1 2 3 4
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
def test_move_node_different_tree(self):
with self.assertNumQueries(12):
with ConcreteModel.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 2 queries here:
# 1. update the node
# 2. collapse old tree since it is now empty.
self.d.parent = self.c
self.d.save()
# query 3 here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 3 1 2 5 6
5 - 2 0 1 2
""",
)
# the other 9 queries are the partial rebuild
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 8
2 1 1 1 2 3
3 1 1 1 4 7
4 3 1 2 5 6
5 - 2 0 1 2
""",
)
def test_move_node_to_root(self):
with self.assertNumQueries(4):
with ConcreteModel.objects.delay_mptt_updates():
with self.assertNumQueries(3):
# 3 queries here!
# 1. find the next tree_id to move to
# 2. update the tree_id on all nodes to the right of that
# 3. update tree fields on self.c
self.c.parent = None
self.c.save()
# 4th query here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
4 - 2 0 1 2
5 - 3 0 1 2
3 - 4 0 1 2
""",
)
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
4 - 2 0 1 2
5 - 3 0 1 2
3 - 4 0 1 2
""",
)
def test_move_root_to_child(self):
with self.assertNumQueries(12):
with ConcreteModel.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 2 queries here:
# 1. update the node
# 2. collapse old tree since it is now empty.
self.d.parent = self.c
self.d.save()
# query 3 here:
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 3 1 2 5 6
5 - 2 0 1 2
""",
)
# the remaining 9 queries are the partial rebuild.
self.assertTreeEqual(
ConcreteModel.objects.all(),
"""
1 - 1 0 1 8
2 1 1 1 2 3
3 1 1 1 4 7
4 3 1 2 5 6
5 - 2 0 1 2
""",
)
class OrderedInsertionSortingTestCase(TestCase):
def test_insert_unordered_stuff(self):
root = OrderedInsertion.objects.create(name="")
# "b" gets inserted first,
b = OrderedInsertion.objects.create(name="b", parent=root)
# "a" gets inserted later,
a = OrderedInsertion.objects.create(name="a", parent=root)
# ... but specifying OrderedInsertion.MPTTMeta.order_insertion_by
# tells django-mptt to order added items by the name. So basically
# instance "a", added later, will get the first place in the
# tree. So what's exactly seems to be the problem?
#
# The problem is, item "b" will not get refreshed in any
# way. We need to reload it manually or else there will be problems
# like the one demonstrated below:
self.assertIn(a, a.get_ancestors(include_self=True))
# This will raise an AssertionError, unless we reload the item from
# the database. As long as we won't come up with a sensible way
# of reloading all Django instances pointing to a given row in the
# database...
# self.assertIn(b, b.get_ancestors(include_self=True)))
self.assertRaises(
AssertionError, self.assertIn, b, b.get_ancestors(include_self=True)
)
# ... we need to reload it properly ourselves:
b.refresh_from_db()
self.assertIn(b, b.get_ancestors(include_self=True))
class OrderedInsertionDelayedUpdatesTestCase(TreeTestCase):
def setUp(self):
self.c = OrderedInsertion.objects.create(name="c")
self.d = OrderedInsertion.objects.create(name="d", parent=self.c)
self.e = OrderedInsertion.objects.create(name="e", parent=self.c)
self.f = OrderedInsertion.objects.create(name="f")
self.z = OrderedInsertion.objects.create(name="z")
# state is now:
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
def test_insert_child(self):
with self.assertNumQueries(12):
with OrderedInsertion.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 1 query here:
OrderedInsertion.objects.create(name="dd", parent=self.c)
# 2nd query here:
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
6 1 1 1 6 7
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
# remaining 9 queries are the partial rebuild process.
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 8
2 1 1 1 2 3
6 1 1 1 4 5
3 1 1 1 6 7
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
def test_insert_root(self):
with self.assertNumQueries(4):
with OrderedInsertion.objects.delay_mptt_updates():
with self.assertNumQueries(3):
# 3 queries required here:
# 1. get correct tree_id (delay_mptt_updates doesn't handle
# root-level ordering when using ordered insertion)
# 2. increment tree_id of all following trees
# 3. insert the object
OrderedInsertion.objects.create(name="ee")
# 4th query here:
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
6 - 2 0 1 2
4 - 3 0 1 2
5 - 4 0 1 2
""",
)
# no partial rebuild is required
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
6 - 2 0 1 2
4 - 3 0 1 2
5 - 4 0 1 2
""",
)
def test_move_node_same_tree(self):
with self.assertNumQueries(9):
with OrderedInsertion.objects.delay_mptt_updates():
with self.assertNumQueries(1):
# 1 update query
self.e.name = "before d"
self.e.save()
# query 2 here:
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
# the remaining 7 queries are the partial rebuild.
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
3 1 1 1 2 3
2 1 1 1 4 5
4 - 2 0 1 2
5 - 3 0 1 2
""",
)
def test_move_node_different_tree(self):
with self.assertNumQueries(12):
with OrderedInsertion.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 2 queries here:
# 1. update the node
# 2. collapse old tree since it is now empty.
self.f.parent = self.c
self.f.name = "dd"
self.f.save()
# query 3 here:
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
4 1 1 1 2 3
3 1 1 1 4 5
5 - 2 0 1 2
""",
)
# the remaining 9 queries are the partial rebuild
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 8
2 1 1 1 2 3
4 1 1 1 4 5
3 1 1 1 6 7
5 - 2 0 1 2
""",
)
def test_move_node_to_root(self):
with self.assertNumQueries(4):
with OrderedInsertion.objects.delay_mptt_updates():
with self.assertNumQueries(3):
# 3 queries here!
# 1. find the next tree_id to move to
# 2. update the tree_id on all nodes to the right of that
# 3. update tree fields on self.c
self.e.parent = None
self.e.save()
# query 4 here:
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 - 2 0 1 2
4 - 3 0 1 2
5 - 4 0 1 2
""",
)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 - 2 0 1 2
4 - 3 0 1 2
5 - 4 0 1 2
""",
)
def test_move_root_to_child(self):
with self.assertNumQueries(12):
with OrderedInsertion.objects.delay_mptt_updates():
with self.assertNumQueries(2):
# 2 queries here:
# 1. update the node
# 2. collapse old tree since it is now empty.
self.f.parent = self.e
self.f.save()
# query 3 here:
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
4 3 1 2 5 6
5 - 2 0 1 2
""",
)
# the remaining 9 queries are the partial rebuild.
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 8
2 1 1 1 2 3
3 1 1 1 4 7
4 3 1 2 5 6
5 - 2 0 1 2
""",
)
class ManagerTests(TreeTestCase):
fixtures = ["categories.json", "genres.json", "persons.json"]
def test_all_managers_are_different(self):
# all tree managers should be different. otherwise, possible infinite recursion.
seen = {}
for model in apps.get_models():
if not issubclass(model, MPTTModel):
continue
tm = model._tree_manager
if id(tm) in seen:
self.fail(
"Tree managers for %s and %s are the same manager"
% (model.__name__, seen[id(tm)].__name__)
)
seen[id(tm)] = model
def test_manager_multi_table_inheritance(self):
self.assertIs(Student._tree_manager.model, Student)
self.assertIs(Student._tree_manager.tree_model, Person)
self.assertIs(Person._tree_manager.model, Person)
self.assertIs(Person._tree_manager.tree_model, Person)
def test_all_managers_have_correct_model(self):
# all tree managers should have the correct model.
for model in apps.get_models():
if not issubclass(model, MPTTModel):
continue
self.assertEqual(model._tree_manager.model, model)
def test_base_manager_infinite_recursion(self):
# repeatedly calling _base_manager should eventually return None
for model in apps.get_models():
if not issubclass(model, MPTTModel):
continue
manager = model._tree_manager
for i in range(20):
manager = manager._base_manager
if manager is None:
break
else:
self.fail(
"Detected infinite recursion in %s._tree_manager._base_manager"
% model
)
def test_proxy_custom_manager(self):
self.assertIsInstance(SingleProxyModel._tree_manager, CustomTreeManager)
self.assertIsInstance(SingleProxyModel._tree_manager._base_manager, TreeManager)
self.assertIsInstance(SingleProxyModel.objects, CustomTreeManager)
self.assertIsInstance(SingleProxyModel.objects._base_manager, TreeManager)
def test_get_queryset_descendants(self):
def get_desc_names(qs, include_self=False):
desc = qs.model.objects.get_queryset_descendants(
qs, include_self=include_self
)
return list(desc.values_list("name", flat=True).order_by("name"))
qs = Category.objects.filter(Q(name="Nintendo Wii") | Q(name="PlayStation 3"))
self.assertEqual(
get_desc_names(qs),
["Games", "Games", "Hardware & Accessories", "Hardware & Accessories"],
)
self.assertEqual(
get_desc_names(qs, include_self=True),
[
"Games",
"Games",
"Hardware & Accessories",
"Hardware & Accessories",
"Nintendo Wii",
"PlayStation 3",
],
)
qs = Genre.objects.filter(parent=None)
self.assertEqual(
get_desc_names(qs),
[
"2D Platformer",
"3D Platformer",
"4D Platformer",
"Action RPG",
"Horizontal Scrolling Shootemup",
"Platformer",
"Shootemup",
"Tactical RPG",
"Vertical Scrolling Shootemup",
],
)
self.assertEqual(
get_desc_names(qs, include_self=True),
[
"2D Platformer",
"3D Platformer",
"4D Platformer",
"Action",
"Action RPG",
"Horizontal Scrolling Shootemup",
"Platformer",
"Role-playing Game",
"Shootemup",
"Tactical RPG",
"Vertical Scrolling Shootemup",
],
)
def _get_anc_names(self, qs, include_self=False):
anc = qs.model.objects.get_queryset_ancestors(qs, include_self=include_self)
return list(anc.values_list("name", flat=True).order_by("name"))
def test_get_queryset_ancestors(self):
qs = Category.objects.filter(Q(name="Nintendo Wii") | Q(name="PlayStation 3"))
self.assertEqual(self._get_anc_names(qs), ["PC & Video Games"])
self.assertEqual(
self._get_anc_names(qs, include_self=True),
["Nintendo Wii", "PC & Video Games", "PlayStation 3"],
)
qs = Genre.objects.filter(parent=None)
self.assertEqual(self._get_anc_names(qs), [])
self.assertEqual(
self._get_anc_names(qs, include_self=True), ["Action", "Role-playing Game"]
)
def test_get_queryset_ancestors_regression_379(self):
# https://github.com/django-mptt/django-mptt/issues/379
qs = Genre.objects.all()
self.assertEqual(
self._get_anc_names(qs, include_self=True),
list(Genre.objects.values_list("name", flat=True).order_by("name")),
)
def test_custom_querysets(self):
"""
Test that a custom manager also provides custom querysets.
"""
self.assertTrue(isinstance(Person.objects.all(), CustomTreeQueryset))
self.assertTrue(
isinstance(Person.objects.all()[0].get_children(), CustomTreeQueryset)
)
self.assertTrue(hasattr(Person.objects.none(), "custom_method"))
# Check that empty querysets get custom methods
self.assertTrue(
hasattr(Person.objects.all()[0].get_children().none(), "custom_method")
)
self.assertEqual(type(Person.objects.all()), type(Person.objects.root_nodes()))
def test_manager_from_custom_queryset(self):
"""
Test that a manager created from a custom queryset works.
Regression test for #378.
"""
TreeManager.from_queryset(CustomTreeQueryset)().contribute_to_class(
Genre, "my_manager"
)
self.assertIsInstance(Genre.my_manager.get_queryset(), CustomTreeQueryset)
def test_num_queries_on_get_queryset_descendants(self):
"""
Test the number of queries to access descendants
is not O(n).
At the moment it is O(1)+1.
Ideally we should aim for O(1).
"""
with self.assertNumQueries(2):
qs = Category.objects.get_queryset_descendants(
Category.objects.all(), include_self=True
)
self.assertEqual(len(qs), 10)
def test_default_manager_with_multiple_managers(self):
"""
Test that a model with multiple managers defined always uses the
default manager as the tree manager.
"""
self.assertEqual(type(MultipleManagerModel._tree_manager), TreeManager)
class CacheTreeChildrenTestCase(TreeTestCase):
"""
Tests for the ``cache_tree_children`` template filter.
"""
fixtures = ["categories.json"]
def test_cache_tree_children_caches_parents(self):
"""
Ensures that each node's parent is cached by ``cache_tree_children``.
"""
# Ensure only 1 query is used during this test
with self.assertNumQueries(1):
roots = cache_tree_children(Category.objects.all())
games = roots[0]
wii = games.get_children()[0]
wii_games = wii.get_children()[0]
# Ensure that ``wii`` is cached as ``parent`` on ``wii_games``, and
# likewise for ``games`` being ``parent`` on the attached ``wii``
self.assertEqual(wii, wii_games.parent)
self.assertEqual(games, wii_games.parent.parent)
def test_cache_tree_children_with_invalid_ordering(self):
"""
Ensures that ``cache_tree_children`` fails with a ``ValueError`` when
passed a list which is not in tree order.
"""
with self.assertNumQueries(1):
with self.assertRaises(ValueError):
cache_tree_children(list(Category.objects.order_by("-id")))
# Passing a list with correct ordering should work, though.
with self.assertNumQueries(1):
cache_tree_children(list(Category.objects.all()))
# The exact ordering tuple doesn't matter, long as the nodes end up in depth-first order.
cache_tree_children(Category.objects.order_by("tree_id", "lft", "name"))
cache_tree_children(Category.objects.filter(tree_id=1).order_by("lft"))
class RecurseTreeTestCase(TreeTestCase):
"""
Tests for the ``recursetree`` template filter.
"""
fixtures = ["categories.json"]
template = re.sub(
r"(?m)^[\s]+",
"",
"""
{% load mptt_tags %}
<ul>
{% recursetree nodes %}
<li>
{{ node.name }}
{% if not node.is_leaf_node %}
<ul class="children">
{{ children }}
</ul>
{% endif %}
</li>
{% endrecursetree %}
</ul>
""",
)
def test_leaf_html(self):
html = (
Template(self.template)
.render(
Context(
{
"nodes": Category.objects.filter(pk=10),
}
)
)
.replace("\n", "")
)
self.assertEqual(html, "<ul><li>Hardware & Accessories</li></ul>")
def test_nonleaf_html(self):
qs = Category.objects.get(pk=8).get_descendants(include_self=True)
html = (
Template(self.template)
.render(
Context(
{
"nodes": qs,
}
)
)
.replace("\n", "")
)
self.assertEqual(
html,
(
'<ul><li>PlayStation 3<ul class="children">'
"<li>Games</li><li>Hardware & Accessories</li></ul></li></ul>"
),
)
def test_parsing_fail(self):
self.assertRaises(
TemplateSyntaxError,
Template,
"{% load mptt_tags %}{% recursetree %}{% endrecursetree %}",
)
def test_cached_ancestors(self):
template = Template(
"""
{% load mptt_tags %}
{% recursetree nodes %}
{{ node.get_ancestors|join:" > " }} {{ node.name }}
{% if not node.is_leaf_node %}
{{ children }}
{% endif %}
{% endrecursetree %}
"""
)
with self.assertNumQueries(1):
qs = Category.objects.all()
template.render(Context({"nodes": qs}))
class TreeInfoTestCase(TreeTestCase):
fixtures = ["genres.json"]
template = re.sub(
r"(?m)^[\s]+",
"",
"""
{% load mptt_tags %}
{% for node, structure in nodes|tree_info %}
{% if structure.new_level %}<ul><li>{% else %}</li><li>{% endif %}
{{ node.pk }}
{% for level in structure.closed_levels %}</li></ul>{% endfor %}
{% endfor %}""",
)
template_with_ancestors = re.sub(
r"(?m)^[\s]+",
"",
"""
{% load mptt_tags %}
{% for node, structure in nodes|tree_info:"ancestors" %}
{% if structure.new_level %}<ul><li>{% else %}</li><li>{% endif %}
{{ node.pk }}
{% for ancestor in structure.ancestors %}
{% if forloop.first %}A:{% endif %}
{{ ancestor }}{% if not forloop.last %},{% endif %}
{% endfor %}
{% for level in structure.closed_levels %}</li></ul>{% endfor %}
{% endfor %}""",
)
def test_tree_info_html(self):
html = (
Template(self.template)
.render(
Context(
{
"nodes": Genre.objects.all(),
}
)
)
.replace("\n", "")
)
self.assertEqual(
html,
"<ul><li>1<ul><li>2<ul><li>3</li><li>4</li><li>5</li></ul></li>"
"<li>6<ul><li>7</li><li>8</li></ul></li></ul></li><li>9<ul>"
"<li>10</li><li>11</li></ul></li></ul>",
)
html = (
Template(self.template)
.render(
Context(
{
"nodes": Genre.objects.filter(
**{
"%s__gte" % Genre._mptt_meta.level_attr: 1,
"%s__lte" % Genre._mptt_meta.level_attr: 2,
}
),
}
)
)
.replace("\n", "")
)
self.assertEqual(
html,
"<ul><li>2<ul><li>3</li><li>4</li><li>5</li></ul></li><li>6<ul>"
"<li>7</li><li>8</li></ul></li><li>10</li><li>11</li></ul>",
)
html = (
Template(self.template_with_ancestors)
.render(
Context(
{
"nodes": Genre.objects.filter(
**{
"%s__gte" % Genre._mptt_meta.level_attr: 1,
"%s__lte" % Genre._mptt_meta.level_attr: 2,
}
),
}
)
)
.replace("\n", "")
)
self.assertEqual(
html,
"<ul><li>2<ul><li>3A:Platformer</li><li>4A:Platformer</li>"
"<li>5A:Platformer</li></ul></li><li>6<ul><li>7A:Shootemup</li>"
"<li>8A:Shootemup</li></ul></li><li>10</li><li>11</li></ul>",
)
class FullTreeTestCase(TreeTestCase):
fixtures = ["genres.json"]
template = re.sub(
r"(?m)^[\s]+",
"",
"""
{% load mptt_tags %}
{% full_tree_for_model myapp.Genre as tree %}
{% for node in tree %}{{ node.pk }},{% endfor %}
""",
)
def test_full_tree_html(self):
html = Template(self.template).render(Context({})).replace("\n", "")
self.assertEqual(html, "1,2,3,4,5,6,7,8,9,10,11,")
class DrilldownTreeTestCase(TreeTestCase):
fixtures = ["genres.json"]
template = re.sub(
r"(?m)^[\s]+",
"",
"""
{% load mptt_tags %}
{% drilldown_tree_for_node node as tree count myapp.Game.genre in game_count %}
{% for n in tree %}
{% if n == node %}[{% endif %}
{{ n.pk }}:{{ n.game_count }}
{% if n == node %}]{% endif %}{% if not forloop.last %},{% endif %}
{% endfor %}
""",
)
def render_for_node(self, pk, cumulative=False, m2m=False, all_descendants=False):
template = self.template
if all_descendants:
template = template.replace(
" count myapp.Game.genre in game_count ", " all_descendants "
)
if cumulative:
template = template.replace(" count ", " cumulative count ")
if m2m:
template = template.replace("Game.genre", "Game.genres_m2m")
return (
Template(template)
.render(
Context(
{
"node": Genre.objects.get(pk=pk),
}
)
)
.replace("\n", "")
)
def test_drilldown_html(self):
for idx, genre in enumerate(Genre.objects.all()):
for i in range(idx):
game = genre.game_set.create(name="Game %s" % i)
genre.games_m2m.add(game)
self.assertEqual(self.render_for_node(1), "[1:],2:1,6:5")
self.assertEqual(self.render_for_node(2), "1:,[2:],3:2,4:3,5:4")
self.assertEqual(self.render_for_node(1, cumulative=True), "[1:],2:10,6:18")
self.assertEqual(
self.render_for_node(2, cumulative=True), "1:,[2:],3:2,4:3,5:4"
)
self.assertEqual(self.render_for_node(1, m2m=True), "[1:],2:1,6:5")
self.assertEqual(self.render_for_node(2, m2m=True), "1:,[2:],3:2,4:3,5:4")
self.assertEqual(
self.render_for_node(1, cumulative=True, m2m=True), "[1:],2:10,6:18"
)
self.assertEqual(
self.render_for_node(2, cumulative=True, m2m=True), "1:,[2:],3:2,4:3,5:4"
)
self.assertEqual(
self.render_for_node(1, all_descendants=True), "[1:],2:,3:,4:,5:,6:,7:,8:"
)
self.assertEqual(
self.render_for_node(2, all_descendants=True), "1:,[2:],3:,4:,5:"
)
class TestAutoNowDateFieldModel(TreeTestCase):
# https://github.com/django-mptt/django-mptt/issues/175
def test_save_auto_now_date_field_model(self):
a = AutoNowDateFieldModel()
a.save()
class RegisteredRemoteModel(TreeTestCase):
def test_save_registered_model(self):
g1 = Group.objects.create(name="group 1")
g1.save()
class TestAltersData(TreeTestCase):
def test_alters_data(self):
node = Node()
output = Template("{{ node.save }}").render(
Context(
{
"node": node,
}
)
)
self.assertEqual(output, "")
self.assertEqual(node.pk, None)
node.save()
self.assertNotEqual(node.pk, None)
output = Template("{{ node.delete }}").render(
Context(
{
"node": node,
}
)
)
self.assertEqual(node, Node.objects.get(pk=node.pk))
class TestDebugInfo(TreeTestCase):
fixtures = ["categories.json"]
def test_debug_info(self):
with io.StringIO() as out:
print_debug_info(Category.objects.all(), file=out)
output = out.getvalue()
self.assertIn("1,0,,1,1,20", output)
def test_debug_info_with_non_ascii_representations(self):
Category.objects.create(name="El niño")
with io.StringIO() as out:
print_debug_info(Category.objects.all(), file=out)
output = out.getvalue()
self.assertIn("El niño", output)
class AdminBatch(TreeTestCase):
fixtures = ["categories.json"]
def test_changelist(self):
user = User.objects.create_superuser("admin", "[email protected]", "p")
self.client.login(username=user.username, password="p")
response = self.client.get("/admin/myapp/category/")
self.assertContains(response, 'name="_selected_action"', 10)
mptt_opts = Category._mptt_meta
self.assertSequenceEqual(
response.context["cl"].result_list.query.order_by[:2],
[mptt_opts.tree_id_attr, mptt_opts.left_attr],
)
data = {
"action": "delete_selected",
"_selected_action": ["5", "8", "9"],
}
response = self.client.post("/admin/myapp/category/", data)
self.assertRegex(response.rendered_content, r'value="Yes, I(\'|’)m sure"')
data["post"] = "yes"
response = self.client.post("/admin/myapp/category/", data)
self.assertRedirects(response, "/admin/myapp/category/")
self.assertEqual(Category.objects.count(), 4)
# Batch deletion has not clobbered MPTT values, because our method
# delete_selected_tree has been used.
self.assertTreeEqual(
Category.objects.all(),
"""
1 - 1 0 1 8
2 1 1 1 2 7
3 2 1 2 3 4
4 2 1 2 5 6
""",
)
class TestUnsaved(TreeTestCase):
def test_unsaved(self):
for method in [
"get_ancestors",
"get_family",
"get_children",
"get_descendants",
"get_leafnodes",
"get_next_sibling",
"get_previous_sibling",
"get_root",
"get_siblings",
]:
self.assertRaisesRegex(
ValueError,
"Cannot call %s on unsaved Genre instances" % method,
getattr(Genre(), method),
)
class QuerySetTests(TreeTestCase):
fixtures = ["categories.json"]
def test_get_ancestors(self):
self.assertEqual(
[
c.pk
for c in Category.objects.get(name="Nintendo Wii").get_ancestors(
include_self=False
)
],
[
c.pk
for c in Category.objects.filter(name="Nintendo Wii").get_ancestors(
include_self=False
)
],
)
self.assertEqual(
[
c.pk
for c in Category.objects.get(name="Nintendo Wii").get_ancestors(
include_self=True
)
],
[
c.pk
for c in Category.objects.filter(name="Nintendo Wii").get_ancestors(
include_self=True
)
],
)
def test_get_descendants(self):
self.assertEqual(
[
c.pk
for c in Category.objects.get(name="Nintendo Wii").get_descendants(
include_self=False
)
],
[
c.pk
for c in Category.objects.filter(name="Nintendo Wii").get_descendants(
include_self=False
)
],
)
self.assertEqual(
[
c.pk
for c in Category.objects.get(name="Nintendo Wii").get_descendants(
include_self=True
)
],
[
c.pk
for c in Category.objects.filter(name="Nintendo Wii").get_descendants(
include_self=True
)
],
)
def test_as_manager(self):
self.assertTrue(issubclass(TreeQuerySet.as_manager().__class__, TreeManager))
class TreeManagerTestCase(TreeTestCase):
fixtures = ["categories.json", "items.json", "subitems.json"]
def test_add_related_count_with_fk_to_natural_key(self):
# Regression test for #284
queryset = Category.objects.filter(name="Xbox 360").order_by("id")
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset, Item, "category_fk", "item_count", cumulative=False
):
self.assertEqual(c.item_count, c.items_by_pk.count())
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset, Item, "category_pk", "item_count", cumulative=False
):
self.assertEqual(c.item_count, c.items_by_pk.count())
def test_add_related_count_multistep(self):
queryset = Category.objects.filter(name="Xbox 360").order_by("id")
topqueryset = Category.objects.filter(name="PC & Video Games").order_by("id")
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_fk", "subitem_count", cumulative=False
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_fk", "subitem_count", cumulative=False
):
self.assertEqual(topc.subitem_count, 1)
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_pk", "subitem_count", cumulative=False
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_pk", "subitem_count", cumulative=False
):
self.assertEqual(topc.subitem_count, 1)
# Test using FK that doesn't point to a primary key, cumulative
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_fk", "subitem_count", cumulative=True
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_fk", "subitem_count", cumulative=True
):
self.assertEqual(topc.subitem_count, 2)
# Also works when using the FK that *does* point to a primary key, cumulative
for c in Category.objects.add_related_count(
queryset, SubItem, "item__category_pk", "subitem_count", cumulative=True
):
self.assertEqual(c.subitem_count, 1)
for topc in Category.objects.add_related_count(
topqueryset, SubItem, "item__category_pk", "subitem_count", cumulative=True
):
self.assertEqual(topc.subitem_count, 2)
def test_add_related_count_with_extra_filters(self):
""" Test that filtering by extra_filters works """
queryset = Category.objects.all()
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_fk",
"item_count",
cumulative=False,
extra_filters={"name": "Halo: Reach"},
):
if c.pk == 5:
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_pk",
"item_count",
cumulative=False,
extra_filters={"name": "Halo: Reach"},
):
if c.pk == 5:
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
# Test using FK that doesn't point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_fk",
"item_count",
cumulative=True,
extra_filters={"name": "Halo: Reach"},
):
if c.pk in (5, 1):
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
# Also works when using the FK that *does* point to a primary key
for c in Category.objects.add_related_count(
queryset,
Item,
"category_pk",
"item_count",
cumulative=True,
extra_filters={"name": "Halo: Reach"},
):
if c.pk in (5, 1):
self.assertEqual(c.item_count, 1)
else:
self.assertEqual(c.item_count, 0)
class TestOrderedInsertionBFS(TreeTestCase):
def test_insert_ordered_DFS_backwards_root_nodes(self):
rock = OrderedInsertion.objects.create(name="Rock")
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
OrderedInsertion.objects.create(name="Classical")
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
3 - 1 0 1 2
1 - 2 0 1 4
2 1 2 1 2 3
""",
)
def test_insert_ordered_BFS_backwards_root_nodes(self):
rock = OrderedInsertion.objects.create(name="Rock")
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 2
""",
)
OrderedInsertion.objects.create(name="Classical")
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
2 - 1 0 1 2
1 - 2 0 1 2
""",
)
# This tends to fail if it uses `rock.tree_id`, which is 1, although
# in the database Rock's tree_id has been updated to 2.
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
2 - 1 0 1 2
1 - 2 0 1 4
3 1 2 1 2 3
""",
)
def test_insert_ordered_DFS_backwards_nonroot_nodes(self):
music = OrderedInsertion.objects.create(name="music")
rock = OrderedInsertion.objects.create(name="Rock", parent=music)
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
OrderedInsertion.objects.create(name="Classical", parent=music)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 8
4 1 1 1 2 3
2 1 1 1 4 7
3 2 1 2 5 6
""",
)
def test_insert_ordered_BFS_backwards_nonroot_nodes(self):
music = OrderedInsertion.objects.create(name="music")
rock = OrderedInsertion.objects.create(name="Rock", parent=music)
OrderedInsertion.objects.create(name="Classical", parent=music)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 6
3 1 1 1 2 3
2 1 1 1 4 5
""",
)
OrderedInsertion.objects.create(name="Led Zeppelin", parent=rock)
self.assertTreeEqual(
OrderedInsertion.objects.all(),
"""
1 - 1 0 1 8
3 1 1 1 2 3
2 1 1 1 4 7
4 2 1 2 5 6
""",
)
class CacheChildrenTestCase(TreeTestCase):
"""
Tests that the queryset function `get_cached_trees` results in a minimum
number of database queries.
"""
fixtures = ["genres.json"]
def test_genre_iter(self):
"""
Test a query with two root nodes.
"""
with self.assertNumQueries(1):
root_nodes = Genre.objects.all().get_cached_trees()
# `get_cached_trees` should only return the root nodes
self.assertEqual(len(root_nodes), 2)
# Getting the children of each node should not result in db hits.
with self.assertNumQueries(0):
for genre in root_nodes:
self.assertIsInstance(genre, Genre)
for child in genre.get_children():
self.assertIsInstance(child, Genre)
for child2 in child.get_children():
self.assertIsInstance(child2, Genre)
def test_hide_nodes(self):
"""
Test that caching a tree with missing nodes works
"""
root = Category.objects.create(name="Root", visible=False)
child = Category.objects.create(name="Child", parent=root)
root2 = Category.objects.create(name="Root2")
list(Category.objects.all().get_cached_trees()) == [root, child, root2]
list(Category.objects.filter(visible=True).get_cached_trees()) == [child, root2]
@unittest.skipUnless(
mock_signal_receiver, "Signals tests require mock_django installed"
)
class Signals(TestCase):
fixtures = ["categories.json"]
def setUp(self):
self.signal = node_moved
self.wii = Category.objects.get(pk=2)
self.ps3 = Category.objects.get(pk=8)
def test_signal_should_not_be_sent_when_parent_hasnt_changed(self):
with mock_signal_receiver(self.signal, sender=Category) as receiver:
self.wii.name = "Woo"
self.wii.save()
self.assertEqual(receiver.call_count, 0)
def test_signal_should_not_be_sent_when_model_created(self):
with mock_signal_receiver(self.signal, sender=Category) as receiver:
Category.objects.create(name="Descriptive name")
self.assertEqual(receiver.call_count, 0)
def test_move_by_using_move_to_should_send_signal(self):
with mock_signal_receiver(self.signal, sender=Category) as receiver:
self.wii.move_to(self.ps3)
receiver.assert_called_once_with(
instance=self.wii,
signal=self.signal,
target=self.ps3,
sender=Category,
position="first-child",
)
def test_move_by_changing_parent_should_send_signal(self):
"""position is not set when sent from save(). I assume it
would be the default(first-child) but didn't feel comfortable
setting it.
"""
with mock_signal_receiver(self.signal, sender=Category) as receiver:
self.wii.parent = self.ps3
self.wii.save()
receiver.assert_called_once_with(
instance=self.wii, signal=self.signal, target=self.ps3, sender=Category
)
class DeferredAttributeTests(TreeTestCase):
"""
Regression tests for #176 and #424
"""
def setUp(self):
OrderedInsertion.objects.create(name="a")
def test_deferred_order_insertion_by(self):
qs = OrderedInsertion.objects.defer("name")
with self.assertNumQueries(1):
nodes = list(qs)
with self.assertNumQueries(0):
self.assertTreeEqual(
nodes,
"""
1 - 1 0 1 2
""",
)
def test_deferred_cached_field_undeferred(self):
obj = OrderedInsertion.objects.defer("name").get()
self.assertEqual(obj._mptt_cached_fields["name"], DeferredAttribute)
with self.assertNumQueries(1):
obj.name
with self.assertNumQueries(3):
# does a node move, since the order_insertion_by field changed
obj.save()
self.assertEqual(obj._mptt_cached_fields["name"], "a")
def test_deferred_cached_field_change(self):
obj = OrderedInsertion.objects.defer("name").get()
self.assertEqual(obj._mptt_cached_fields["name"], DeferredAttribute)
with self.assertNumQueries(0):
obj.name = "b"
with self.assertNumQueries(3):
# does a node move, since the order_insertion_by field changed
obj.save()
self.assertEqual(obj._mptt_cached_fields["name"], "b")
class DraggableMPTTAdminTestCase(TreeTestCase):
def setUp(self):
self.user = User.objects.create_superuser("admin", "[email protected]", "p")
self.client.login(username=self.user.username, password="p")
def test_changelist(self):
p1 = Person.objects.create(name="Franz")
p2 = Person.objects.create(name="Fritz")
p3 = Person.objects.create(name="Hans")
self.assertNotEqual(p1._mpttfield("tree_id"), p2._mpttfield("tree_id"))
response = self.client.get("/admin/myapp/person/")
self.assertContains(response, 'class="drag-handle"', 3)
self.assertContains(response, 'style="text-indent:0px"', 3)
self.assertContains(
response,
'src="/static/mptt/draggable-admin.js" data-context="{"',
)
self.assertContains(response, '}" id="draggable-admin-context"></script>')
response = self.client.post(
"/admin/myapp/person/",
{
"cmd": "move_node",
"cut_item": p1.pk,
"pasted_on": p2.pk,
"position": "last-child",
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 200)
p1.refresh_from_db()
p2.refresh_from_db()
self.assertEqual(p1.parent, p2)
self.assertTreeEqual(
Person.objects.all(),
"""
2 - 2 0 1 4
1 2 2 1 2 3
3 - 3 0 1 2
""",
)
response = self.client.get("/admin/myapp/person/")
self.assertContains(response, 'style="text-indent:0px"', 2)
self.assertContains(response, 'style="text-indent:20px"', 1)
response = self.client.post(
"/admin/myapp/person/",
{
"cmd": "move_node",
"cut_item": p3.pk,
"pasted_on": p1.pk,
"position": "left",
},
HTTP_X_REQUESTED_WITH="XMLHttpRequest",
)
self.assertEqual(response.status_code, 200)
self.assertTreeEqual(
Person.objects.all(),
"""
2 - 2 0 1 6
3 2 2 1 2 3
1 2 2 1 4 5
""",
)
response = self.client.post(
"/admin/myapp/person/",
{
"action": "delete_selected",
"_selected_action": [1],
},
)
self.assertContains(response, "Are you sure?")
response = self.client.post(
"/admin/myapp/person/",
{
"action": "delete_selected",
"_selected_action": [1],
"post": "yes",
},
)
self.assertRedirects(response, "/admin/myapp/person/")
self.assertTreeEqual(
Person.objects.all(),
"""
2 - 2 0 1 4
3 2 2 1 2 3
""",
)
class BookAdmin(ModelAdmin):
list_filter = (
("fk", TreeRelatedFieldListFilter),
("m2m", TreeRelatedFieldListFilter),
)
ordering = ("id",)
class CategoryAdmin(ModelAdmin):
list_filter = (
("books_fk", TreeRelatedFieldListFilter),
("books_m2m", TreeRelatedFieldListFilter),
)
ordering = ("id",)
class ListFiltersTests(TestCase):
def setUp(self):
self.user = User.objects.create_superuser("admin", "[email protected]", "p")
self.request_factory = RequestFactory()
self.parent_category = Category.objects.create(name="Parent category")
self.child_category1 = Category.objects.create(
name="Child category1", parent=self.parent_category
)
self.child_category2 = Category.objects.create(
name="Child category2", parent=self.parent_category
)
self.simple_category = Category.objects.create(name="Simple category")
self.book1 = Book.objects.create(name="book1", fk=self.child_category1)
self.book2 = Book.objects.create(
name="book2", fk=self.parent_category, parent=self.book1
)
self.book3 = Book.objects.create(
name="book3", fk=self.simple_category, parent=self.book1
)
self.book4 = Book.objects.create(name="book4")
self.book1.m2m.add(self.child_category1)
self.book2.m2m.add(self.parent_category)
self.book3.m2m.add(self.simple_category)
def get_request(self, path, params=None):
req = self.request_factory.get(path, params)
req.user = self.user
return req
def get_changelist(self, request, model, modeladmin):
args = [
request,
model,
modeladmin.list_display,
modeladmin.list_display_links,
modeladmin.list_filter,
modeladmin.date_hierarchy,
modeladmin.search_fields,
modeladmin.list_select_related,
modeladmin.list_per_page,
modeladmin.list_max_show_all,
modeladmin.list_editable,
modeladmin,
]
if hasattr(modeladmin, "sortable_by"):
# New in Django 2.1
args.append(modeladmin.sortable_by)
return ChangeList(*args)
def test_treerelatedfieldlistfilter_foreignkey(self):
modeladmin = BookAdmin(Book, site)
request = self.get_request("/")
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that all categories are present in the referencing model's list filter
filterspec = changelist.get_filters(request)[0][0]
expected = [
(
self.parent_category.pk,
self.parent_category.name,
' style="padding-left:0px"',
),
(
self.child_category1.pk,
self.child_category1.name,
' style="padding-left:10px"',
),
(
self.child_category2.pk,
self.child_category2.name,
' style="padding-left:10px"',
),
(
self.simple_category.pk,
self.simple_category.name,
' style="padding-left:0px"',
),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.get_request("/", {"fk__isnull": "True"})
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.book4])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?fk__isnull=True")
# Make sure child's categories books included
request = self.get_request(
"/", {"fk__id__inhierarchy": self.parent_category.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.book1, self.book2])
# Make sure filter for child category works as expected
request = self.get_request(
"/", {"fk__id__inhierarchy": self.child_category1.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.book1])
# Make sure filter for empty category works as expected
request = self.get_request(
"/", {"fk__id__inhierarchy": self.child_category2.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 0)
# Make sure filter for simple category with no hierarchy works as expected
request = self.get_request(
"/", {"fk__id__inhierarchy": self.simple_category.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.book3])
def test_treerelatedfieldlistfilter_manytomany(self):
modeladmin = BookAdmin(Book, site)
request = self.get_request("/")
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure that all categories are present in the referencing model's list filter
filterspec = changelist.get_filters(request)[0][1]
expected = [
(
self.parent_category.pk,
self.parent_category.name,
' style="padding-left:0px"',
),
(
self.child_category1.pk,
self.child_category1.name,
' style="padding-left:10px"',
),
(
self.child_category2.pk,
self.child_category2.name,
' style="padding-left:10px"',
),
(
self.simple_category.pk,
self.simple_category.name,
' style="padding-left:0px"',
),
]
self.assertEqual(sorted(filterspec.lookup_choices), sorted(expected))
request = self.get_request("/", {"m2m__isnull": "True"})
changelist = self.get_changelist(request, Book, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.book4])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?m2m__isnull=True")
# Make sure child's categories books included
request = self.get_request(
"/", {"m2m__id__inhierarchy": self.parent_category.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.book1, self.book2])
# Make sure filter for child category works as expected
request = self.get_request(
"/", {"m2m__id__inhierarchy": self.child_category1.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.book1])
# Make sure filter for empty category works as expected
request = self.get_request(
"/", {"fk__id__inhierarchy": self.child_category2.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 0)
# Make sure filter for simple category with no hierarchy works as expected
request = self.get_request(
"/", {"m2m__id__inhierarchy": self.simple_category.pk}
)
changelist = self.get_changelist(request, Book, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.book3])
def test_treerelatedfieldlistfilter_reverse_relationships(self):
modeladmin = CategoryAdmin(Category, site)
# FK relationship -----
request = self.get_request("/", {"books_fk__isnull": "True"})
changelist = self.get_changelist(request, Category, modeladmin)
# Make sure the correct queryset is returned
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.child_category2])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][0]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?books_fk__isnull=True")
# Make sure child's books categories included
request = self.get_request("/", {"books_fk__id__inhierarchy": self.book1.pk})
changelist = self.get_changelist(request, Category, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(
(list(queryset)),
[self.parent_category, self.child_category1, self.simple_category],
)
# Make sure filter for child book works as expected
request = self.get_request("/", {"books_fk__id__inhierarchy": self.book2.pk})
changelist = self.get_changelist(request, Category, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.parent_category])
# Make sure filter for book with no category works as expected
request = self.get_request("/", {"books_fk__id__inhierarchy": self.book4.pk})
changelist = self.get_changelist(request, Category, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 0)
# M2M relationship -----
request = self.get_request("/", {"books_m2m__isnull": "True"})
changelist = self.get_changelist(request, Category, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(list(queryset), [self.child_category2])
# Make sure the last choice is None and is selected
filterspec = changelist.get_filters(request)[0][1]
choices = list(filterspec.choices(changelist))
self.assertEqual(choices[-1]["selected"], True)
self.assertEqual(choices[-1]["query_string"], "?books_m2m__isnull=True")
# Make sure child's books categories included
request = self.get_request("/", {"books_m2m__id__inhierarchy": self.book1.pk})
changelist = self.get_changelist(request, Category, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(
(list(queryset)),
[self.parent_category, self.child_category1, self.simple_category],
)
# Make sure filter for child book works as expected
request = self.get_request("/", {"books_m2m__id__inhierarchy": self.book2.pk})
changelist = self.get_changelist(request, Category, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual((list(queryset)), [self.parent_category])
# Make sure filter for book with no category works as expected
request = self.get_request("/", {"books_m2m__id__inhierarchy": self.book4.pk})
changelist = self.get_changelist(request, Category, modeladmin)
queryset = changelist.get_queryset(request)
self.assertEqual(queryset.count(), 0)
class UUIDPrimaryKey(TreeTestCase):
def test_save_uuid_model(self):
n1 = UUIDNode.objects.create(name="node")
n2 = UUIDNode.objects.create(name="sub_node", parent=n1)
self.assertEqual(n1.name, "node")
self.assertEqual(n1.tree_id, n2.tree_id)
self.assertEqual(n2.parent, n1)
def test_move_uuid_node(self):
n1 = UUIDNode.objects.create(name="n1")
n2 = UUIDNode.objects.create(name="n2", parent=n1)
n3 = UUIDNode.objects.create(name="n3", parent=n1)
self.assertEqual(list(n1.get_children()), [n2, n3])
n3.move_to(n2, "left")
self.assertEqual(list(n1.get_children()), [n3, n2])
def test_move_root_node(self):
root1 = UUIDNode.objects.create(name="n1")
child = UUIDNode.objects.create(name="n2", parent=root1)
root2 = UUIDNode.objects.create(name="n3")
self.assertEqual(list(root1.get_children()), [child])
root2.move_to(child, "left")
self.assertEqual(list(root1.get_children()), [root2, child])
def test_move_child_node(self):
root1 = UUIDNode.objects.create(name="n1")
child1 = UUIDNode.objects.create(name="n2", parent=root1)
root2 = UUIDNode.objects.create(name="n3")
child2 = UUIDNode.objects.create(name="n4", parent=root2)
self.assertEqual(list(root1.get_children()), [child1])
child2.move_to(child1, "left")
self.assertEqual(list(root1.get_children()), [child2, child1])
class DirectParentAssignment(TreeTestCase):
def test_assignment(self):
"""Regression test for #428"""
n1 = Node.objects.create()
n2 = Node.objects.create()
n1.parent_id = n2.id
n1.save()
class MovingNodeWithUniqueConstraint(TreeTestCase):
def test_unique_together_move_to_same_parent_change_code(self):
"""Regression test for #466 1"""
UniqueTogetherModel.objects.all().delete()
a = UniqueTogetherModel.objects.create(code="a", parent=None)
b = UniqueTogetherModel.objects.create(code="b", parent=None)
a1 = UniqueTogetherModel.objects.create(code="1", parent=a)
b1 = UniqueTogetherModel.objects.create(code="1", parent=b)
b1.parent, b1.code = a, "2" # b1 -> a2
b1.save()
self.assertTreeEqual(
UniqueTogetherModel.objects.all(),
"""
1 - 1 0 1 6
3 1 1 1 2 3
4 1 1 1 4 5
2 - 2 0 1 2
""",
)
def test_unique_together_move_to_same_code_change_parent(self):
"""Regression test for #466 1"""
UniqueTogetherModel.objects.all().delete()
a = UniqueTogetherModel.objects.create(code="a", parent=None)
b = UniqueTogetherModel.objects.create(code="b", parent=None)
a1 = UniqueTogetherModel.objects.create(code="1", parent=a)
a2 = UniqueTogetherModel.objects.create(code="2", parent=a)
a2.parent, a2.code = b, "1" # a2 -> b1
a2.save()
self.assertTreeEqual(
UniqueTogetherModel.objects.all(),
"""
1 - 1 0 1 4
3 1 1 1 2 3
2 - 2 0 1 4
4 2 2 1 2 3
""",
)
class NullableOrderedInsertion(TreeTestCase):
def test_nullable_ordered_insertion(self):
genreA = NullableOrderedInsertionModel.objects.create(name="A", parent=None)
genreA1 = NullableOrderedInsertionModel.objects.create(name="A1", parent=genreA)
genreAnone = NullableOrderedInsertionModel.objects.create(
name=None, parent=genreA
)
self.assertTreeEqual(
NullableOrderedInsertionModel.objects.all(),
"""
1 - 1 0 1 6
3 1 1 1 2 3
2 1 1 1 4 5
""",
)
def test_nullable_ordered_insertion_desc(self):
genreA = NullableDescOrderedInsertionModel.objects.create(name="A", parent=None)
genreA1 = NullableDescOrderedInsertionModel.objects.create(
name="A1", parent=genreA
)
genreAnone = NullableDescOrderedInsertionModel.objects.create(
name=None, parent=genreA
)
self.assertTreeEqual(
NullableDescOrderedInsertionModel.objects.all(),
"""
1 - 1 0 1 6
2 1 1 1 2 3
3 1 1 1 4 5
""",
)
class ModelMetaIndexes(TreeTestCase):
def test_no_index_set(self):
class SomeModel(MPTTModel):
class Meta:
app_label = "myapp"
tree_id_attr = getattr(SomeModel._mptt_meta, "tree_id_attr")
self.assertTrue(SomeModel._meta.get_field(tree_id_attr).db_index)
for key in ("right_attr", "left_attr", "level_attr"):
field_name = getattr(SomeModel._mptt_meta, key)
self.assertFalse(SomeModel._meta.get_field(field_name).db_index)
def test_index_together(self):
already_idx = [["tree_id", "lft"], ("tree_id", "lft")]
no_idx = [tuple(), list()]
some_idx = [["tree_id"], ("tree_id",), [["tree_id"]], (("tree_id",),)]
for idx, case in enumerate(already_idx + no_idx + some_idx):
class Meta:
index_together = case
app_label = "myapp"
# Use type() here and in test_index_together_different_attr over
# an explicit class X(MPTTModel):, as this throws a warning that
# re-registering models with the same name (which is what an explicit
# class does) could cause errors. Kind of... weird, but surprisingly
# effective.
SomeModel = type(
str("model_{}".format(idx)),
(MPTTModel,),
{
"Meta": Meta,
"__module__": __name__,
},
)
self.assertIn(("tree_id", "lft"), SomeModel._meta.index_together)
def test_index_together_different_attr(self):
already_idx = [["abc", "def"], ("abc", "def")]
no_idx = [tuple(), list()]
some_idx = [["abc"], ("abc",), [["abc"]], (("abc",),)]
for idx, case in enumerate(already_idx + no_idx + some_idx):
class MPTTMeta:
tree_id_attr = "abc"
left_attr = "def"
class Meta:
index_together = case
app_label = "myapp"
SomeModel = type(
str("model__different_attr_{}".format(idx)),
(MPTTModel,),
{"MPTTMeta": MPTTMeta, "Meta": Meta, "__module__": str(__name__)},
)
self.assertIn(("abc", "def"), SomeModel._meta.index_together)
class BulkLoadTests(TestCase):
fixtures = ["categories.json"]
def setUp(self):
self.games = {
"id": 11,
"name": "Role-playing",
"children": [
{
"id": 12,
"parent_id": 11,
"name": "Single-player",
},
{
"id": 13,
"parent_id": 11,
"name": "Multi-player",
},
],
}
def test_bulk_root(self):
data = {
"id": 11,
"name": "Enterprise Software",
"children": [
{
"id": 12,
"parent_id": 11,
"name": "Databases",
},
{
"id": 13,
"parent_id": 11,
"name": "Timekeeping",
},
],
}
records = Category.objects.build_tree_nodes(data)
self.assertEqual(len(records), 3)
self.assertEqual((records[0].lft, records[0].rght), (1, 6))
self.assertEqual((records[1].lft, records[1].rght), (2, 3))
self.assertEqual((records[2].lft, records[2].rght), (4, 5))
def test_bulk_last_child(self):
games = Category.objects.get(id=3)
records = Category.objects.build_tree_nodes(self.games, target=games)
self.assertEqual(len(records), 3)
for record in records:
self.assertEqual(record.tree_id, games.tree_id)
self.assertEqual((records[0].lft, records[0].rght), (4, 9))
self.assertEqual((records[1].lft, records[1].rght), (5, 6))
self.assertEqual((records[2].lft, records[2].rght), (7, 8))
games.refresh_from_db()
self.assertEqual((games.lft, games.rght), (3, 10))
def test_bulk_left(self):
games = Category.objects.get(id=3)
records = Category.objects.build_tree_nodes(
self.games, target=games, position="left"
)
self.assertEqual(len(records), 3)
for record in records:
self.assertEqual(record.tree_id, games.tree_id)
self.assertEqual((records[0].lft, records[0].rght), (3, 8))
self.assertEqual((records[1].lft, records[1].rght), (4, 5))
self.assertEqual((records[2].lft, records[2].rght), (6, 7))
games.refresh_from_db()
self.assertEqual((games.lft, games.rght), (9, 10))
| 32.750974 | 97 | 0.530647 |
7942bac5969c49790f808c39a3fd13a23d7be837 | 1,827 | py | Python | examples/features/mask_sensitive.py | vedkharche538/pandas-profiling | ba0a6df3a3dd126ab36045f5a69878f31627d450 | [
"MIT"
] | null | null | null | examples/features/mask_sensitive.py | vedkharche538/pandas-profiling | ba0a6df3a3dd126ab36045f5a69878f31627d450 | [
"MIT"
] | null | null | null | examples/features/mask_sensitive.py | vedkharche538/pandas-profiling | ba0a6df3a3dd126ab36045f5a69878f31627d450 | [
"MIT"
] | null | null | null | from pathlib import Path
import pandas as pd
from pandas_profiling import ProfileReport
from pandas_profiling.utils.cache import cache_file
if __name__ == "__main__":
file_name = cache_file("auto2.dta", "http://www.stata-press.com/data/r15/auto2.dta")
df = pd.read_stata(file_name)
# In case that a sample of the real data (cars) would disclose sensitive information, we can replace it with
# mock data. For illustrative purposes, we use data based on cars from a popular game series.
mock_data = pd.DataFrame(
{
"make": ["Blista Kanjo", "Sentinel", "Burrito"],
"price": [58000, 95000, 65000],
"mpg": [20, 30, 22],
"rep78": ["Average", "Excellent", "Fair"],
"headroom": [2.5, 3.0, 1.5],
"trunk": [8, 10, 4],
"weight": [1050, 1600, 2500],
"length": [165, 170, 180],
"turn": [40, 50, 32],
"displacement": [80, 100, 60],
"gear_ratio": [2.74, 3.51, 2.41],
"foreign": ["Domestic", "Domestic", "Foreign"],
}
)
report = ProfileReport(
df.sample(frac=0.25),
title="Masked data",
dataset=dict(
description="This profiling report was generated using a sample of 5% of the original dataset.",
copyright_holder="StataCorp LLC",
copyright_year="2020",
url="http://www.stata-press.com/data/r15/auto2.dta",
),
sensitive=True,
sample=dict(
name="Mock data sample",
data=mock_data,
caption="Disclaimer: this is synthetic data generated based on the format of the data in this table.",
),
vars=dict(cat=dict(unicode=True)),
interactions=None,
)
report.to_file(Path("masked_report.html"))
| 36.54 | 114 | 0.576355 |
7942bb1bad80012d5558602a969aa4128d6f41bf | 82,021 | py | Python | sympy/geometry/polygon.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 1 | 2020-09-09T20:40:17.000Z | 2020-09-09T20:40:17.000Z | sympy/geometry/polygon.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 14 | 2018-02-08T10:11:03.000Z | 2019-04-16T10:32:46.000Z | sympy/geometry/polygon.py | utkarshdeorah/sympy | dcdf59bbc6b13ddbc329431adf72fcee294b6389 | [
"BSD-3-Clause"
] | 1 | 2022-02-04T13:50:29.000Z | 2022-02-04T13:50:29.000Z | from sympy.core import Expr, S, oo, pi, sympify
from sympy.core.evalf import N
from sympy.core.sorting import default_sort_key, ordered
from sympy.core.symbol import _symbol, Dummy, symbols, Symbol
from sympy.functions.elementary.complexes import sign
from sympy.functions.elementary.piecewise import Piecewise
from sympy.functions.elementary.trigonometric import cos, sin, tan
from .ellipse import Circle
from .entity import GeometryEntity, GeometrySet
from .exceptions import GeometryError
from .line import Line, Segment, Ray
from .point import Point
from sympy.logic import And
from sympy.matrices import Matrix
from sympy.simplify.simplify import simplify
from sympy.solvers.solvers import solve
from sympy.utilities.iterables import has_dups, has_variety, uniq, rotate_left, least_rotation
from sympy.utilities.misc import as_int, func_name
from mpmath.libmp.libmpf import prec_to_dps
import warnings
class Polygon(GeometrySet):
"""A two-dimensional polygon.
A simple polygon in space. Can be constructed from a sequence of points
or from a center, radius, number of sides and rotation angle.
Parameters
==========
vertices : sequence of Points
Optional parameters
==========
n : If > 0, an n-sided RegularPolygon is created. See below.
Default value is 0.
Attributes
==========
area
angles
perimeter
vertices
centroid
sides
Raises
======
GeometryError
If all parameters are not Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment, Triangle
Notes
=====
Polygons are treated as closed paths rather than 2D areas so
some calculations can be be negative or positive (e.g., area)
based on the orientation of the points.
Any consecutive identical points are reduced to a single point
and any points collinear and between two points will be removed
unless they are needed to define an explicit intersection (see examples).
A Triangle, Segment or Point will be returned when there are 3 or
fewer points provided.
Examples
========
>>> from sympy import Polygon, pi
>>> p1, p2, p3, p4, p5 = [(0, 0), (1, 0), (5, 1), (0, 1), (3, 0)]
>>> Polygon(p1, p2, p3, p4)
Polygon(Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1))
>>> Polygon(p1, p2)
Segment2D(Point2D(0, 0), Point2D(1, 0))
>>> Polygon(p1, p2, p5)
Segment2D(Point2D(0, 0), Point2D(3, 0))
The area of a polygon is calculated as positive when vertices are
traversed in a ccw direction. When the sides of a polygon cross the
area will have positive and negative contributions. The following
defines a Z shape where the bottom right connects back to the top
left.
>>> Polygon((0, 2), (2, 2), (0, 0), (2, 0)).area
0
When the keyword `n` is used to define the number of sides of the
Polygon then a RegularPolygon is created and the other arguments are
interpreted as center, radius and rotation. The unrotated RegularPolygon
will always have a vertex at Point(r, 0) where `r` is the radius of the
circle that circumscribes the RegularPolygon. Its method `spin` can be
used to increment that angle.
>>> p = Polygon((0,0), 1, n=3)
>>> p
RegularPolygon(Point2D(0, 0), 1, 3, 0)
>>> p.vertices[0]
Point2D(1, 0)
>>> p.args[0]
Point2D(0, 0)
>>> p.spin(pi/2)
>>> p.vertices[0]
Point2D(0, 1)
"""
def __new__(cls, *args, n = 0, **kwargs):
if n:
args = list(args)
# return a virtual polygon with n sides
if len(args) == 2: # center, radius
args.append(n)
elif len(args) == 3: # center, radius, rotation
args.insert(2, n)
return RegularPolygon(*args, **kwargs)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = nodup[i], nodup[i + 1], nodup[i + 2]
if Point.is_collinear(a, b, c):
nodup.pop(i + 1)
if a == c:
nodup.pop(i)
else:
i += 1
vertices = list(nodup)
if len(vertices) > 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 3:
return Triangle(*vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def area(self):
"""
The area of the polygon.
Notes
=====
The area calculation can be positive or negative based on the
orientation of the points. If any side of the polygon crosses
any other side, there will be areas having opposite signs.
See Also
========
sympy.geometry.ellipse.Ellipse.area
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.area
3
In the Z shaped polygon (with the lower right connecting back
to the upper left) the areas cancel out:
>>> Z = Polygon((0, 1), (1, 1), (0, 0), (1, 0))
>>> Z.area
0
In the M shaped polygon, areas do not cancel because no side
crosses any other (though there is a point of contact).
>>> M = Polygon((0, 0), (0, 1), (2, 0), (3, 1), (3, 0))
>>> M.area
-3/2
"""
area = 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
area += x1*y2 - x2*y1
return simplify(area) / 2
@staticmethod
def _isright(a, b, c):
"""Return True/False for cw/ccw orientation.
Examples
========
>>> from sympy import Point, Polygon
>>> a, b, c = [Point(i) for i in [(0, 0), (1, 1), (1, 0)]]
>>> Polygon._isright(a, b, c)
True
>>> Polygon._isright(a, c, b)
False
"""
ba = b - a
ca = c - a
t_area = simplify(ba.x*ca.y - ca.x*ba.y)
res = t_area.is_nonpositive
if res is None:
raise ValueError("Can't determine orientation")
return res
@property
def angles(self):
"""The internal angle at each vertex.
Returns
=======
angles : dict
A dictionary where each key is a vertex and each value is the
internal angle at that vertex. The vertices are represented as
Points.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.angles[p1]
pi/2
>>> poly.angles[p2]
acos(-4*sqrt(17)/17)
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-1], args[0], args[1])
ret = {}
for i in range(len(args)):
a, b, c = args[i - 2], args[i - 1], args[i]
ang = Ray(b, a).angle_between(Ray(b, c))
if cw ^ self._isright(a, b, c):
ret[b] = 2*S.Pi - ang
else:
ret[b] = ang
return ret
@property
def ambient_dimension(self):
return self.vertices[0].ambient_dimension
@property
def perimeter(self):
"""The perimeter of the polygon.
Returns
=======
perimeter : number or Basic instance
See Also
========
sympy.geometry.line.Segment.length
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.perimeter
sqrt(17) + 7
"""
p = 0
args = self.vertices
for i in range(len(args)):
p += args[i - 1].distance(args[i])
return simplify(p)
@property
def vertices(self):
"""The vertices of the polygon.
Returns
=======
vertices : list of Points
Notes
=====
When iterating over the vertices, it is more efficient to index self
rather than to request the vertices and index them. Only use the
vertices when you want to process all of them at once. This is even
more important with RegularPolygons that calculate each vertex.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.vertices
[Point2D(0, 0), Point2D(1, 0), Point2D(5, 1), Point2D(0, 1)]
>>> poly.vertices[0]
Point2D(0, 0)
"""
return list(self.args)
@property
def centroid(self):
"""The centroid of the polygon.
Returns
=======
centroid : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.util.centroid
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.centroid
Point2D(31/18, 11/18)
"""
A = 1/(6*self.area)
cx, cy = 0, 0
args = self.args
for i in range(len(args)):
x1, y1 = args[i - 1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
cx += v*(x1 + x2)
cy += v*(y1 + y2)
return Point(simplify(A*cx), simplify(A*cy))
def second_moment_of_area(self, point=None):
"""Returns the second moment and product moment of area of a two dimensional polygon.
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point about which second moment of area is to be found.
If "point=None" it will be calculated about the axis passing through the
centroid of the polygon.
Returns
=======
I_xx, I_yy, I_xy : number or SymPy expression
I_xx, I_yy are second moment of area of a two dimensional polygon.
I_xy is product moment of area of a two dimensional polygon.
Examples
========
>>> from sympy import Polygon, symbols
>>> a, b = symbols('a, b')
>>> p1, p2, p3, p4, p5 = [(0, 0), (a, 0), (a, b), (0, b), (a/3, b/3)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> rectangle.second_moment_of_area()
(a*b**3/12, a**3*b/12, 0)
>>> rectangle.second_moment_of_area(p5)
(a*b**3/9, a**3*b/9, a**2*b**2/36)
References
==========
.. [1] https://en.wikipedia.org/wiki/Second_moment_of_area
"""
I_xx, I_yy, I_xy = 0, 0, 0
args = self.vertices
for i in range(len(args)):
x1, y1 = args[i-1].args
x2, y2 = args[i].args
v = x1*y2 - x2*y1
I_xx += (y1**2 + y1*y2 + y2**2)*v
I_yy += (x1**2 + x1*x2 + x2**2)*v
I_xy += (x1*y2 + 2*x1*y1 + 2*x2*y2 + x2*y1)*v
A = self.area
c_x = self.centroid[0]
c_y = self.centroid[1]
# parallel axis theorem
I_xx_c = (I_xx/12) - (A*(c_y**2))
I_yy_c = (I_yy/12) - (A*(c_x**2))
I_xy_c = (I_xy/24) - (A*(c_x*c_y))
if point is None:
return I_xx_c, I_yy_c, I_xy_c
I_xx = (I_xx_c + A*((point[1]-c_y)**2))
I_yy = (I_yy_c + A*((point[0]-c_x)**2))
I_xy = (I_xy_c + A*((point[0]-c_x)*(point[1]-c_y)))
return I_xx, I_yy, I_xy
def first_moment_of_area(self, point=None):
"""
Returns the first moment of area of a two-dimensional polygon with
respect to a certain point of interest.
First moment of area is a measure of the distribution of the area
of a polygon in relation to an axis. The first moment of area of
the entire polygon about its own centroid is always zero. Therefore,
here it is calculated for an area, above or below a certain point
of interest, that makes up a smaller portion of the polygon. This
area is bounded by the point of interest and the extreme end
(top or bottom) of the polygon. The first moment for this area is
is then determined about the centroidal axis of the initial polygon.
References
==========
.. [1] https://skyciv.com/docs/tutorials/section-tutorials/calculating-the-statical-or-first-moment-of-area-of-beam-sections/?cc=BMD
.. [2] https://mechanicalc.com/reference/cross-sections
Parameters
==========
point: Point, two-tuple of sympifyable objects, or None (default=None)
point is the point above or below which the area of interest lies
If ``point=None`` then the centroid acts as the point of interest.
Returns
=======
Q_x, Q_y: number or SymPy expressions
Q_x is the first moment of area about the x-axis
Q_y is the first moment of area about the y-axis
A negative sign indicates that the section modulus is
determined for a section below (or left of) the centroidal axis
Examples
========
>>> from sympy import Point, Polygon
>>> a, b = 50, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> p = Polygon(p1, p2, p3, p4)
>>> p.first_moment_of_area()
(625, 3125)
>>> p.first_moment_of_area(point=Point(30, 7))
(525, 3000)
"""
if point:
xc, yc = self.centroid
else:
point = self.centroid
xc, yc = point
h_line = Line(point, slope=0)
v_line = Line(point, slope=S.Infinity)
h_poly = self.cut_section(h_line)
v_poly = self.cut_section(v_line)
poly_1 = h_poly[0] if h_poly[0].area <= h_poly[1].area else h_poly[1]
poly_2 = v_poly[0] if v_poly[0].area <= v_poly[1].area else v_poly[1]
Q_x = (poly_1.centroid.y - yc)*poly_1.area
Q_y = (poly_2.centroid.x - xc)*poly_2.area
return Q_x, Q_y
def polar_second_moment_of_area(self):
"""Returns the polar modulus of a two-dimensional polygon
It is a constituent of the second moment of area, linked through
the perpendicular axis theorem. While the planar second moment of
area describes an object's resistance to deflection (bending) when
subjected to a force applied to a plane parallel to the central
axis, the polar second moment of area describes an object's
resistance to deflection when subjected to a moment applied in a
plane perpendicular to the object's central axis (i.e. parallel to
the cross-section)
Examples
========
>>> from sympy import Polygon, symbols
>>> a, b = symbols('a, b')
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.polar_second_moment_of_area()
a**3*b/12 + a*b**3/12
References
==========
.. [1] https://en.wikipedia.org/wiki/Polar_moment_of_inertia
"""
second_moment = self.second_moment_of_area()
return second_moment[0] + second_moment[1]
def section_modulus(self, point=None):
"""Returns a tuple with the section modulus of a two-dimensional
polygon.
Section modulus is a geometric property of a polygon defined as the
ratio of second moment of area to the distance of the extreme end of
the polygon from the centroidal axis.
Parameters
==========
point : Point, two-tuple of sympifyable objects, or None(default=None)
point is the point at which section modulus is to be found.
If "point=None" it will be calculated for the point farthest from the
centroidal axis of the polygon.
Returns
=======
S_x, S_y: numbers or SymPy expressions
S_x is the section modulus with respect to the x-axis
S_y is the section modulus with respect to the y-axis
A negative sign indicates that the section modulus is
determined for a point below the centroidal axis
Examples
========
>>> from sympy import symbols, Polygon, Point
>>> a, b = symbols('a, b', positive=True)
>>> rectangle = Polygon((0, 0), (a, 0), (a, b), (0, b))
>>> rectangle.section_modulus()
(a*b**2/6, a**2*b/6)
>>> rectangle.section_modulus(Point(a/4, b/4))
(-a*b**2/3, -a**2*b/3)
References
==========
.. [1] https://en.wikipedia.org/wiki/Section_modulus
"""
x_c, y_c = self.centroid
if point is None:
# taking x and y as maximum distances from centroid
x_min, y_min, x_max, y_max = self.bounds
y = max(y_c - y_min, y_max - y_c)
x = max(x_c - x_min, x_max - x_c)
else:
# taking x and y as distances of the given point from the centroid
y = point.y - y_c
x = point.x - x_c
second_moment= self.second_moment_of_area()
S_x = second_moment[0]/y
S_y = second_moment[1]/x
return S_x, S_y
@property
def sides(self):
"""The directed line segments that form the sides of the polygon.
Returns
=======
sides : list of sides
Each side is a directed Segment.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.sides
[Segment2D(Point2D(0, 0), Point2D(1, 0)),
Segment2D(Point2D(1, 0), Point2D(5, 1)),
Segment2D(Point2D(5, 1), Point2D(0, 1)), Segment2D(Point2D(0, 1), Point2D(0, 0))]
"""
res = []
args = self.vertices
for i in range(-len(args), 0):
res.append(Segment(args[i], args[i + 1]))
return res
@property
def bounds(self):
"""Return a tuple (xmin, ymin, xmax, ymax) representing the bounding
rectangle for the geometric figure.
"""
verts = self.vertices
xs = [p.x for p in verts]
ys = [p.y for p in verts]
return (min(xs), min(ys), max(xs), max(ys))
def is_convex(self):
"""Is the polygon convex?
A polygon is convex if all its interior angles are less than 180
degrees and there are no intersections between sides.
Returns
=======
is_convex : boolean
True if this polygon is convex, False otherwise.
See Also
========
sympy.geometry.util.convex_hull
Examples
========
>>> from sympy import Point, Polygon
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly = Polygon(p1, p2, p3, p4)
>>> poly.is_convex()
True
"""
# Determine orientation of points
args = self.vertices
cw = self._isright(args[-2], args[-1], args[0])
for i in range(1, len(args)):
if cw ^ self._isright(args[i - 2], args[i - 1], args[i]):
return False
# check for intersecting sides
sides = self.sides
for i, si in enumerate(sides):
pts = si.args
# exclude the sides connected to si
for j in range(1 if i == len(sides) - 1 else 0, i - 1):
sj = sides[j]
if sj.p1 not in pts and sj.p2 not in pts:
hit = si.intersection(sj)
if hit:
return False
return True
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import Polygon, Point
>>> p = Polygon((0, 0), (4, 0), (4, 4))
>>> p.encloses_point(Point(2, 1))
True
>>> p.encloses_point(Point(2, 2))
False
>>> p.encloses_point(Point(5, 5))
False
References
==========
.. [1] http://paulbourke.net/geometry/polygonmesh/#insidepoly
"""
p = Point(p, dim=2)
if p in self.vertices or any(p in s for s in self.sides):
return False
# move to p, checking that the result is numeric
lit = []
for v in self.vertices:
lit.append(v - p) # the difference is simplified
if lit[-1].free_symbols:
return None
poly = Polygon(*lit)
# polygon closure is assumed in the following test but Polygon removes duplicate pts so
# the last point has to be added so all sides are computed. Using Polygon.sides is
# not good since Segments are unordered.
args = poly.args
indices = list(range(-len(args), 1))
if poly.is_convex():
orientation = None
for i in indices:
a = args[i]
b = args[i + 1]
test = ((-a.y)*(b.x - a.x) - (-a.x)*(b.y - a.y)).is_negative
if orientation is None:
orientation = test
elif test is not orientation:
return False
return True
hit_odd = False
p1x, p1y = args[0].args
for i in indices[1:]:
p2x, p2y = args[i].args
if 0 > min(p1y, p2y):
if 0 <= max(p1y, p2y):
if 0 <= max(p1x, p2x):
if p1y != p2y:
xinters = (-p1y)*(p2x - p1x)/(p2y - p1y) + p1x
if p1x == p2x or 0 <= xinters:
hit_odd = not hit_odd
p1x, p1y = p2x, p2y
return hit_odd
def arbitrary_point(self, parameter='t'):
"""A parameterized point on the polygon.
The parameter, varying from 0 to 1, assigns points to the position on
the perimeter that is that fraction of the total perimeter. So the
point evaluated at t=1/2 would return the point from the first vertex
that is 1/2 way around the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
arbitrary_point : Point
Raises
======
ValueError
When `parameter` already appears in the Polygon's definition.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy import Polygon, Symbol
>>> t = Symbol('t', real=True)
>>> tri = Polygon((0, 0), (1, 0), (1, 1))
>>> p = tri.arbitrary_point('t')
>>> perimeter = tri.perimeter
>>> s1, s2 = [s.length for s in tri.sides[:2]]
>>> p.subs(t, (s1 + s2/2)/perimeter)
Point2D(1, 1/2)
"""
t = _symbol(parameter, real=True)
if t.name in (f.name for f in self.free_symbols):
raise ValueError('Symbol %s already appears in object and cannot be used as a parameter.' % t.name)
sides = []
perimeter = self.perimeter
perim_fraction_start = 0
for s in self.sides:
side_perim_fraction = s.length/perimeter
perim_fraction_end = perim_fraction_start + side_perim_fraction
pt = s.arbitrary_point(parameter).subs(
t, (t - perim_fraction_start)/side_perim_fraction)
sides.append(
(pt, (And(perim_fraction_start <= t, t < perim_fraction_end))))
perim_fraction_start = perim_fraction_end
return Piecewise(*sides)
def parameter_value(self, other, t):
if not isinstance(other,GeometryEntity):
other = Point(other, dim=self.ambient_dimension)
if not isinstance(other,Point):
raise ValueError("other must be a point")
if other.free_symbols:
raise NotImplementedError('non-numeric coordinates')
unknown = False
T = Dummy('t', real=True)
p = self.arbitrary_point(T)
for pt, cond in p.args:
sol = solve(pt - other, T, dict=True)
if not sol:
continue
value = sol[0][T]
if simplify(cond.subs(T, value)) == True:
return {t: value}
unknown = True
if unknown:
raise ValueError("Given point may not be on %s" % func_name(self))
raise ValueError("Given point is not on %s" % func_name(self))
def plot_interval(self, parameter='t'):
"""The plot interval for the default geometric plot of the polygon.
Parameters
==========
parameter : str, optional
Default value is 't'.
Returns
=======
plot_interval : list (plot interval)
[parameter, lower_bound, upper_bound]
Examples
========
>>> from sympy import Polygon
>>> p = Polygon((0, 0), (1, 0), (1, 1))
>>> p.plot_interval()
[t, 0, 1]
"""
t = Symbol(parameter, real=True)
return [t, 0, 1]
def intersection(self, o):
"""The intersection of polygon and geometry entity.
The intersection may be empty and can contain individual Points and
complete Line Segments.
Parameters
==========
other: GeometryEntity
Returns
=======
intersection : list
The list of Segments and Points
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy import Point, Polygon, Line
>>> p1, p2, p3, p4 = map(Point, [(0, 0), (1, 0), (5, 1), (0, 1)])
>>> poly1 = Polygon(p1, p2, p3, p4)
>>> p5, p6, p7 = map(Point, [(3, 2), (1, -1), (0, 2)])
>>> poly2 = Polygon(p5, p6, p7)
>>> poly1.intersection(poly2)
[Point2D(1/3, 1), Point2D(2/3, 0), Point2D(9/5, 1/5), Point2D(7/3, 1)]
>>> poly1.intersection(Line(p1, p2))
[Segment2D(Point2D(0, 0), Point2D(1, 0))]
>>> poly1.intersection(p1)
[Point2D(0, 0)]
"""
intersection_result = []
k = o.sides if isinstance(o, Polygon) else [o]
for side in self.sides:
for side1 in k:
intersection_result.extend(side.intersection(side1))
intersection_result = list(uniq(intersection_result))
points = [entity for entity in intersection_result if isinstance(entity, Point)]
segments = [entity for entity in intersection_result if isinstance(entity, Segment)]
if points and segments:
points_in_segments = list(uniq([point for point in points for segment in segments if point in segment]))
if points_in_segments:
for i in points_in_segments:
points.remove(i)
return list(ordered(segments + points))
else:
return list(ordered(intersection_result))
def cut_section(self, line):
"""
Returns a tuple of two polygon segments that lie above and below
the intersecting line respectively.
Parameters
==========
line: Line object of geometry module
line which cuts the Polygon. The part of the Polygon that lies
above and below this line is returned.
Returns
=======
upper_polygon, lower_polygon: Polygon objects or None
upper_polygon is the polygon that lies above the given line.
lower_polygon is the polygon that lies below the given line.
upper_polygon and lower polygon are ``None`` when no polygon
exists above the line or below the line.
Raises
======
ValueError: When the line does not intersect the polygon
Examples
========
>>> from sympy import Polygon, Line
>>> a, b = 20, 10
>>> p1, p2, p3, p4 = [(0, b), (0, 0), (a, 0), (a, b)]
>>> rectangle = Polygon(p1, p2, p3, p4)
>>> t = rectangle.cut_section(Line((0, 5), slope=0))
>>> t
(Polygon(Point2D(0, 10), Point2D(0, 5), Point2D(20, 5), Point2D(20, 10)),
Polygon(Point2D(0, 5), Point2D(0, 0), Point2D(20, 0), Point2D(20, 5)))
>>> upper_segment, lower_segment = t
>>> upper_segment.area
100
>>> upper_segment.centroid
Point2D(10, 15/2)
>>> lower_segment.centroid
Point2D(10, 5/2)
References
==========
.. [1] https://github.com/sympy/sympy/wiki/A-method-to-return-a-cut-section-of-any-polygon-geometry
"""
intersection_points = self.intersection(line)
if not intersection_points:
raise ValueError("This line does not intersect the polygon")
points = list(self.vertices)
points.append(points[0])
x, y = symbols('x, y', real=True, cls=Dummy)
eq = line.equation(x, y)
# considering equation of line to be `ax +by + c`
a = eq.coeff(x)
b = eq.coeff(y)
upper_vertices = []
lower_vertices = []
# prev is true when previous point is above the line
prev = True
prev_point = None
for point in points:
# when coefficient of y is 0, right side of the line is
# considered
compare = eq.subs({x: point.x, y: point.y})/b if b \
else eq.subs(x, point.x)/a
# if point lies above line
if compare > 0:
if not prev:
# if previous point lies below the line, the intersection
# point of the polygon egde and the line has to be included
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
upper_vertices.append(point)
prev = True
else:
if prev and prev_point:
edge = Line(point, prev_point)
new_point = edge.intersection(line)
upper_vertices.append(new_point[0])
lower_vertices.append(new_point[0])
lower_vertices.append(point)
prev = False
prev_point = point
upper_polygon, lower_polygon = None, None
if upper_vertices and isinstance(Polygon(*upper_vertices), Polygon):
upper_polygon = Polygon(*upper_vertices)
if lower_vertices and isinstance(Polygon(*lower_vertices), Polygon):
lower_polygon = Polygon(*lower_vertices)
return upper_polygon, lower_polygon
def distance(self, o):
"""
Returns the shortest distance between self and o.
If o is a point, then self does not need to be convex.
If o is another polygon self and o must be convex.
Examples
========
>>> from sympy import Point, Polygon, RegularPolygon
>>> p1, p2 = map(Point, [(0, 0), (7, 5)])
>>> poly = Polygon(*RegularPolygon(p1, 1, 3).vertices)
>>> poly.distance(p2)
sqrt(61)
"""
if isinstance(o, Point):
dist = oo
for side in self.sides:
current = side.distance(o)
if current == 0:
return S.Zero
elif current < dist:
dist = current
return dist
elif isinstance(o, Polygon) and self.is_convex() and o.is_convex():
return self._do_poly_distance(o)
raise NotImplementedError()
def _do_poly_distance(self, e2):
"""
Calculates the least distance between the exteriors of two
convex polygons e1 and e2. Does not check for the convexity
of the polygons as this is checked by Polygon.distance.
Notes
=====
- Prints a warning if the two polygons possibly intersect as the return
value will not be valid in such a case. For a more through test of
intersection use intersection().
See Also
========
sympy.geometry.point.Point.distance
Examples
========
>>> from sympy.geometry import Point, Polygon
>>> square = Polygon(Point(0, 0), Point(0, 1), Point(1, 1), Point(1, 0))
>>> triangle = Polygon(Point(1, 2), Point(2, 2), Point(2, 1))
>>> square._do_poly_distance(triangle)
sqrt(2)/2
Description of method used
==========================
Method:
[1] http://cgm.cs.mcgill.ca/~orm/mind2p.html
Uses rotating calipers:
[2] https://en.wikipedia.org/wiki/Rotating_calipers
and antipodal points:
[3] https://en.wikipedia.org/wiki/Antipodal_point
"""
e1 = self
'''Tests for a possible intersection between the polygons and outputs a warning'''
e1_center = e1.centroid
e2_center = e2.centroid
e1_max_radius = S.Zero
e2_max_radius = S.Zero
for vertex in e1.vertices:
r = Point.distance(e1_center, vertex)
if e1_max_radius < r:
e1_max_radius = r
for vertex in e2.vertices:
r = Point.distance(e2_center, vertex)
if e2_max_radius < r:
e2_max_radius = r
center_dist = Point.distance(e1_center, e2_center)
if center_dist <= e1_max_radius + e2_max_radius:
warnings.warn("Polygons may intersect producing erroneous output")
'''
Find the upper rightmost vertex of e1 and the lowest leftmost vertex of e2
'''
e1_ymax = Point(0, -oo)
e2_ymin = Point(0, oo)
for vertex in e1.vertices:
if vertex.y > e1_ymax.y or (vertex.y == e1_ymax.y and vertex.x > e1_ymax.x):
e1_ymax = vertex
for vertex in e2.vertices:
if vertex.y < e2_ymin.y or (vertex.y == e2_ymin.y and vertex.x < e2_ymin.x):
e2_ymin = vertex
min_dist = Point.distance(e1_ymax, e2_ymin)
'''
Produce a dictionary with vertices of e1 as the keys and, for each vertex, the points
to which the vertex is connected as its value. The same is then done for e2.
'''
e1_connections = {}
e2_connections = {}
for side in e1.sides:
if side.p1 in e1_connections:
e1_connections[side.p1].append(side.p2)
else:
e1_connections[side.p1] = [side.p2]
if side.p2 in e1_connections:
e1_connections[side.p2].append(side.p1)
else:
e1_connections[side.p2] = [side.p1]
for side in e2.sides:
if side.p1 in e2_connections:
e2_connections[side.p1].append(side.p2)
else:
e2_connections[side.p1] = [side.p2]
if side.p2 in e2_connections:
e2_connections[side.p2].append(side.p1)
else:
e2_connections[side.p2] = [side.p1]
e1_current = e1_ymax
e2_current = e2_ymin
support_line = Line(Point(S.Zero, S.Zero), Point(S.One, S.Zero))
'''
Determine which point in e1 and e2 will be selected after e2_ymin and e1_ymax,
this information combined with the above produced dictionaries determines the
path that will be taken around the polygons
'''
point1 = e1_connections[e1_ymax][0]
point2 = e1_connections[e1_ymax][1]
angle1 = support_line.angle_between(Line(e1_ymax, point1))
angle2 = support_line.angle_between(Line(e1_ymax, point2))
if angle1 < angle2:
e1_next = point1
elif angle2 < angle1:
e1_next = point2
elif Point.distance(e1_ymax, point1) > Point.distance(e1_ymax, point2):
e1_next = point2
else:
e1_next = point1
point1 = e2_connections[e2_ymin][0]
point2 = e2_connections[e2_ymin][1]
angle1 = support_line.angle_between(Line(e2_ymin, point1))
angle2 = support_line.angle_between(Line(e2_ymin, point2))
if angle1 > angle2:
e2_next = point1
elif angle2 > angle1:
e2_next = point2
elif Point.distance(e2_ymin, point1) > Point.distance(e2_ymin, point2):
e2_next = point2
else:
e2_next = point1
'''
Loop which determines the distance between anti-podal pairs and updates the
minimum distance accordingly. It repeats until it reaches the starting position.
'''
while True:
e1_angle = support_line.angle_between(Line(e1_current, e1_next))
e2_angle = pi - support_line.angle_between(Line(
e2_current, e2_next))
if (e1_angle < e2_angle) is True:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
min_dist_current = e1_segment.distance(e2_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
elif (e1_angle > e2_angle) is True:
support_line = Line(e2_next, e2_current)
e2_segment = Segment(e2_current, e2_next)
min_dist_current = e2_segment.distance(e1_current)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
else:
support_line = Line(e1_current, e1_next)
e1_segment = Segment(e1_current, e1_next)
e2_segment = Segment(e2_current, e2_next)
min1 = e1_segment.distance(e2_next)
min2 = e2_segment.distance(e1_next)
min_dist_current = min(min1, min2)
if min_dist_current.evalf() < min_dist.evalf():
min_dist = min_dist_current
if e1_connections[e1_next][0] != e1_current:
e1_current = e1_next
e1_next = e1_connections[e1_next][0]
else:
e1_current = e1_next
e1_next = e1_connections[e1_next][1]
if e2_connections[e2_next][0] != e2_current:
e2_current = e2_next
e2_next = e2_connections[e2_next][0]
else:
e2_current = e2_next
e2_next = e2_connections[e2_next][1]
if e1_current == e1_ymax and e2_current == e2_ymin:
break
return min_dist
def _svg(self, scale_factor=1., fill_color="#66cc99"):
"""Returns SVG path element for the Polygon.
Parameters
==========
scale_factor : float
Multiplication factor for the SVG stroke-width. Default is 1.
fill_color : str, optional
Hex string for fill color. Default is "#66cc99".
"""
verts = map(N, self.vertices)
coords = ["{},{}".format(p.x, p.y) for p in verts]
path = "M {} L {} z".format(coords[0], " L ".join(coords[1:]))
return (
'<path fill-rule="evenodd" fill="{2}" stroke="#555555" '
'stroke-width="{0}" opacity="0.6" d="{1}" />'
).format(2. * scale_factor, path, fill_color)
def _hashable_content(self):
D = {}
def ref_list(point_list):
kee = {}
for i, p in enumerate(ordered(set(point_list))):
kee[p] = i
D[i] = p
return [kee[p] for p in point_list]
S1 = ref_list(self.args)
r_nor = rotate_left(S1, least_rotation(S1))
S2 = ref_list(list(reversed(self.args)))
r_rev = rotate_left(S2, least_rotation(S2))
if r_nor < r_rev:
r = r_nor
else:
r = r_rev
canonical_args = [ D[order] for order in r ]
return tuple(canonical_args)
def __contains__(self, o):
"""
Return True if o is contained within the boundary lines of self.altitudes
Parameters
==========
other : GeometryEntity
Returns
=======
contained in : bool
The points (and sides, if applicable) are contained in self.
See Also
========
sympy.geometry.entity.GeometryEntity.encloses
Examples
========
>>> from sympy import Line, Segment, Point
>>> p = Point(0, 0)
>>> q = Point(1, 1)
>>> s = Segment(p, q*2)
>>> l = Line(p, q)
>>> p in q
False
>>> p in s
True
>>> q*3 in s
False
>>> s in l
True
"""
if isinstance(o, Polygon):
return self == o
elif isinstance(o, Segment):
return any(o in s for s in self.sides)
elif isinstance(o, Point):
if o in self.vertices:
return True
for side in self.sides:
if o in side:
return True
return False
def bisectors(p, prec=None):
"""Returns angle bisectors of a polygon. If prec is given
then approximate the point defining the ray to that precision.
The distance between the points defining the bisector ray is 1.
Examples
========
>>> from sympy import Polygon, Point
>>> p = Polygon(Point(0, 0), Point(2, 0), Point(1, 1), Point(0, 3))
>>> p.bisectors(2)
{Point2D(0, 0): Ray2D(Point2D(0, 0), Point2D(0.71, 0.71)),
Point2D(0, 3): Ray2D(Point2D(0, 3), Point2D(0.23, 2.0)),
Point2D(1, 1): Ray2D(Point2D(1, 1), Point2D(0.19, 0.42)),
Point2D(2, 0): Ray2D(Point2D(2, 0), Point2D(1.1, 0.38))}
"""
b = {}
pts = list(p.args)
pts.append(pts[0]) # close it
cw = Polygon._isright(*pts[:3])
if cw:
pts = list(reversed(pts))
for v, a in p.angles.items():
i = pts.index(v)
p1, p2 = Point._normalize_dimension(pts[i], pts[i + 1])
ray = Ray(p1, p2).rotate(a/2, v)
dir = ray.direction
ray = Ray(ray.p1, ray.p1 + dir/dir.distance((0, 0)))
if prec is not None:
ray = Ray(ray.p1, ray.p2.n(prec))
b[v] = ray
return b
class RegularPolygon(Polygon):
"""
A regular polygon.
Such a polygon has all internal angles equal and all sides the same length.
Parameters
==========
center : Point
radius : number or Basic instance
The distance from the center to a vertex
n : int
The number of sides
Attributes
==========
vertices
center
radius
rotation
apothem
interior_angle
exterior_angle
circumcircle
incircle
angles
Raises
======
GeometryError
If the `center` is not a Point, or the `radius` is not a number or Basic
instance, or the number of sides, `n`, is less than three.
Notes
=====
A RegularPolygon can be instantiated with Polygon with the kwarg n.
Regular polygons are instantiated with a center, radius, number of sides
and a rotation angle. Whereas the arguments of a Polygon are vertices, the
vertices of the RegularPolygon must be obtained with the vertices method.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r
RegularPolygon(Point2D(0, 0), 5, 3, 0)
>>> r.vertices[0]
Point2D(5, 0)
"""
__slots__ = ('_n', '_center', '_radius', '_rot')
def __new__(self, c, r, n, rot=0, **kwargs):
r, n, rot = map(sympify, (r, n, rot))
c = Point(c, dim=2, **kwargs)
if not isinstance(r, Expr):
raise GeometryError("r must be an Expr object, not %s" % r)
if n.is_Number:
as_int(n) # let an error raise if necessary
if n < 3:
raise GeometryError("n must be a >= 3, not %s" % n)
obj = GeometryEntity.__new__(self, c, r, n, **kwargs)
obj._n = n
obj._center = c
obj._radius = r
obj._rot = rot % (2*S.Pi/n) if rot.is_number else rot
return obj
def _eval_evalf(self, prec=15, **options):
c, r, n, a = self.args
dps = prec_to_dps(prec)
c, r, a = [i.evalf(n=dps, **options) for i in (c, r, a)]
return self.func(c, r, n, a)
@property
def args(self):
"""
Returns the center point, the radius,
the number of sides, and the orientation angle.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.args
(Point2D(0, 0), 5, 3, 0)
"""
return self._center, self._radius, self._n, self._rot
def __str__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
def __repr__(self):
return 'RegularPolygon(%s, %s, %s, %s)' % tuple(self.args)
@property
def area(self):
"""Returns the area.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> square = RegularPolygon((0, 0), 1, 4)
>>> square.area
2
>>> _ == square.length**2
True
"""
c, r, n, rot = self.args
return sign(r)*n*self.length**2/(4*tan(pi/n))
@property
def length(self):
"""Returns the length of the sides.
The half-length of the side and the apothem form two legs
of a right triangle whose hypotenuse is the radius of the
regular polygon.
Examples
========
>>> from sympy.geometry import RegularPolygon
>>> from sympy import sqrt
>>> s = square_in_unit_circle = RegularPolygon((0, 0), 1, 4)
>>> s.length
sqrt(2)
>>> sqrt((_/2)**2 + s.apothem**2) == s.radius
True
"""
return self.radius*2*sin(pi/self._n)
@property
def center(self):
"""The center of the RegularPolygon
This is also the center of the circumscribing circle.
Returns
=======
center : Point
See Also
========
sympy.geometry.point.Point, sympy.geometry.ellipse.Ellipse.center
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.center
Point2D(0, 0)
"""
return self._center
centroid = center
@property
def circumcenter(self):
"""
Alias for center.
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.circumcenter
Point2D(0, 0)
"""
return self.center
@property
def radius(self):
"""Radius of the RegularPolygon
This is also the radius of the circumscribing circle.
Returns
=======
radius : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.radius
r
"""
return self._radius
@property
def circumradius(self):
"""
Alias for radius.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.circumradius
r
"""
return self.radius
@property
def rotation(self):
"""CCW angle by which the RegularPolygon is rotated
Returns
=======
rotation : number or instance of Basic
Examples
========
>>> from sympy import pi
>>> from sympy.abc import a
>>> from sympy.geometry import RegularPolygon, Point
>>> RegularPolygon(Point(0, 0), 3, 4, pi/4).rotation
pi/4
Numerical rotation angles are made canonical:
>>> RegularPolygon(Point(0, 0), 3, 4, a).rotation
a
>>> RegularPolygon(Point(0, 0), 3, 4, pi).rotation
0
"""
return self._rot
@property
def apothem(self):
"""The inradius of the RegularPolygon.
The apothem/inradius is the radius of the inscribed circle.
Returns
=======
apothem : number or instance of Basic
See Also
========
sympy.geometry.line.Segment.length, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.apothem
sqrt(2)*r/2
"""
return self.radius * cos(S.Pi/self._n)
@property
def inradius(self):
"""
Alias for apothem.
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import RegularPolygon, Point
>>> radius = Symbol('r')
>>> rp = RegularPolygon(Point(0, 0), radius, 4)
>>> rp.inradius
sqrt(2)*r/2
"""
return self.apothem
@property
def interior_angle(self):
"""Measure of the interior angles.
Returns
=======
interior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.interior_angle
3*pi/4
"""
return (self._n - 2)*S.Pi/self._n
@property
def exterior_angle(self):
"""Measure of the exterior angles.
Returns
=======
exterior_angle : number
See Also
========
sympy.geometry.line.LinearEntity.angle_between
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.exterior_angle
pi/4
"""
return 2*S.Pi/self._n
@property
def circumcircle(self):
"""The circumcircle of the RegularPolygon.
Returns
=======
circumcircle : Circle
See Also
========
circumcenter, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 8)
>>> rp.circumcircle
Circle(Point2D(0, 0), 4)
"""
return Circle(self.center, self.radius)
@property
def incircle(self):
"""The incircle of the RegularPolygon.
Returns
=======
incircle : Circle
See Also
========
inradius, sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 4, 7)
>>> rp.incircle
Circle(Point2D(0, 0), 4*cos(pi/7))
"""
return Circle(self.center, self.apothem)
@property
def angles(self):
"""
Returns a dictionary with keys, the vertices of the Polygon,
and values, the interior angle at each vertex.
Examples
========
>>> from sympy import RegularPolygon, Point
>>> r = RegularPolygon(Point(0, 0), 5, 3)
>>> r.angles
{Point2D(-5/2, -5*sqrt(3)/2): pi/3,
Point2D(-5/2, 5*sqrt(3)/2): pi/3,
Point2D(5, 0): pi/3}
"""
ret = {}
ang = self.interior_angle
for v in self.vertices:
ret[v] = ang
return ret
def encloses_point(self, p):
"""
Return True if p is enclosed by (is inside of) self.
Notes
=====
Being on the border of self is considered False.
The general Polygon.encloses_point method is called only if
a point is not within or beyond the incircle or circumcircle,
respectively.
Parameters
==========
p : Point
Returns
=======
encloses_point : True, False or None
See Also
========
sympy.geometry.ellipse.Ellipse.encloses_point
Examples
========
>>> from sympy import RegularPolygon, S, Point, Symbol
>>> p = RegularPolygon((0, 0), 3, 4)
>>> p.encloses_point(Point(0, 0))
True
>>> r, R = p.inradius, p.circumradius
>>> p.encloses_point(Point((r + R)/2, 0))
True
>>> p.encloses_point(Point(R/2, R/2 + (R - r)/10))
False
>>> t = Symbol('t', real=True)
>>> p.encloses_point(p.arbitrary_point().subs(t, S.Half))
False
>>> p.encloses_point(Point(5, 5))
False
"""
c = self.center
d = Segment(c, p).length
if d >= self.radius:
return False
elif d < self.inradius:
return True
else:
# now enumerate the RegularPolygon like a general polygon.
return Polygon.encloses_point(self, p)
def spin(self, angle):
"""Increment *in place* the virtual Polygon's rotation by ccw angle.
See also: rotate method which moves the center.
>>> from sympy import Polygon, Point, pi
>>> r = Polygon(Point(0,0), 1, n=3)
>>> r.vertices[0]
Point2D(1, 0)
>>> r.spin(pi/6)
>>> r.vertices[0]
Point2D(sqrt(3)/2, 1/2)
See Also
========
rotation
rotate : Creates a copy of the RegularPolygon rotated about a Point
"""
self._rot += angle
def rotate(self, angle, pt=None):
"""Override GeometryEntity.rotate to first rotate the RegularPolygon
about its center.
>>> from sympy import Point, RegularPolygon, pi
>>> t = RegularPolygon(Point(1, 0), 1, 3)
>>> t.vertices[0] # vertex on x-axis
Point2D(2, 0)
>>> t.rotate(pi/2).vertices[0] # vertex on y axis now
Point2D(0, 2)
See Also
========
rotation
spin : Rotates a RegularPolygon in place
"""
r = type(self)(*self.args) # need a copy or else changes are in-place
r._rot += angle
return GeometryEntity.rotate(r, angle, pt)
def scale(self, x=1, y=1, pt=None):
"""Override GeometryEntity.scale since it is the radius that must be
scaled (if x == y) or else a new Polygon must be returned.
>>> from sympy import RegularPolygon
Symmetric scaling returns a RegularPolygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 2)
RegularPolygon(Point2D(0, 0), 2, 4, 0)
Asymmetric scaling returns a kite as a Polygon:
>>> RegularPolygon((0, 0), 1, 4).scale(2, 1)
Polygon(Point2D(2, 0), Point2D(0, 1), Point2D(-2, 0), Point2D(0, -1))
"""
if pt:
pt = Point(pt, dim=2)
return self.translate(*(-pt).args).scale(x, y).translate(*pt.args)
if x != y:
return Polygon(*self.vertices).scale(x, y)
c, r, n, rot = self.args
r *= x
return self.func(c, r, n, rot)
def reflect(self, line):
"""Override GeometryEntity.reflect since this is not made of only
points.
Examples
========
>>> from sympy import RegularPolygon, Line
>>> RegularPolygon((0, 0), 1, 4).reflect(Line((0, 1), slope=-2))
RegularPolygon(Point2D(4/5, 2/5), -1, 4, atan(4/3))
"""
c, r, n, rot = self.args
v = self.vertices[0]
d = v - c
cc = c.reflect(line)
vv = v.reflect(line)
dd = vv - cc
# calculate rotation about the new center
# which will align the vertices
l1 = Ray((0, 0), dd)
l2 = Ray((0, 0), d)
ang = l1.closing_angle(l2)
rot += ang
# change sign of radius as point traversal is reversed
return self.func(cc, -r, n, rot)
@property
def vertices(self):
"""The vertices of the RegularPolygon.
Returns
=======
vertices : list
Each vertex is a Point.
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import RegularPolygon, Point
>>> rp = RegularPolygon(Point(0, 0), 5, 4)
>>> rp.vertices
[Point2D(5, 0), Point2D(0, 5), Point2D(-5, 0), Point2D(0, -5)]
"""
c = self._center
r = abs(self._radius)
rot = self._rot
v = 2*S.Pi/self._n
return [Point(c.x + r*cos(k*v + rot), c.y + r*sin(k*v + rot))
for k in range(self._n)]
def __eq__(self, o):
if not isinstance(o, Polygon):
return False
elif not isinstance(o, RegularPolygon):
return Polygon.__eq__(o, self)
return self.args == o.args
def __hash__(self):
return super().__hash__()
class Triangle(Polygon):
"""
A polygon with three vertices and three sides.
Parameters
==========
points : sequence of Points
keyword: asa, sas, or sss to specify sides/angles of the triangle
Attributes
==========
vertices
altitudes
orthocenter
circumcenter
circumradius
circumcircle
inradius
incircle
exradii
medians
medial
nine_point_circle
Raises
======
GeometryError
If the number of vertices is not equal to three, or one of the vertices
is not a Point, or a valid keyword is not given.
See Also
========
sympy.geometry.point.Point, Polygon
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
Triangle(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
Keywords sss, sas, or asa can be used to give the desired
side lengths (in order) and interior angles (in degrees) that
define the triangle:
>>> Triangle(sss=(3, 4, 5))
Triangle(Point2D(0, 0), Point2D(3, 0), Point2D(3, 4))
>>> Triangle(asa=(30, 1, 30))
Triangle(Point2D(0, 0), Point2D(1, 0), Point2D(1/2, sqrt(3)/6))
>>> Triangle(sas=(1, 45, 2))
Triangle(Point2D(0, 0), Point2D(2, 0), Point2D(sqrt(2)/2, sqrt(2)/2))
"""
def __new__(cls, *args, **kwargs):
if len(args) != 3:
if 'sss' in kwargs:
return _sss(*[simplify(a) for a in kwargs['sss']])
if 'asa' in kwargs:
return _asa(*[simplify(a) for a in kwargs['asa']])
if 'sas' in kwargs:
return _sas(*[simplify(a) for a in kwargs['sas']])
msg = "Triangle instantiates with three points or a valid keyword."
raise GeometryError(msg)
vertices = [Point(a, dim=2, **kwargs) for a in args]
# remove consecutive duplicates
nodup = []
for p in vertices:
if nodup and p == nodup[-1]:
continue
nodup.append(p)
if len(nodup) > 1 and nodup[-1] == nodup[0]:
nodup.pop() # last point was same as first
# remove collinear points
i = -3
while i < len(nodup) - 3 and len(nodup) > 2:
a, b, c = sorted(
[nodup[i], nodup[i + 1], nodup[i + 2]], key=default_sort_key)
if Point.is_collinear(a, b, c):
nodup[i] = a
nodup[i + 1] = None
nodup.pop(i + 1)
i += 1
vertices = list(filter(lambda x: x is not None, nodup))
if len(vertices) == 3:
return GeometryEntity.__new__(cls, *vertices, **kwargs)
elif len(vertices) == 2:
return Segment(*vertices, **kwargs)
else:
return Point(*vertices, **kwargs)
@property
def vertices(self):
"""The triangle's vertices
Returns
=======
vertices : tuple
Each element in the tuple is a Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t.vertices
(Point2D(0, 0), Point2D(4, 0), Point2D(4, 3))
"""
return self.args
def is_similar(t1, t2):
"""Is another triangle similar to this one.
Two triangles are similar if one can be uniformly scaled to the other.
Parameters
==========
other: Triangle
Returns
=======
is_similar : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -3))
>>> t1.is_similar(t2)
True
>>> t2 = Triangle(Point(0, 0), Point(-4, 0), Point(-4, -4))
>>> t1.is_similar(t2)
False
"""
if not isinstance(t2, Polygon):
return False
s1_1, s1_2, s1_3 = [side.length for side in t1.sides]
s2 = [side.length for side in t2.sides]
def _are_similar(u1, u2, u3, v1, v2, v3):
e1 = simplify(u1/v1)
e2 = simplify(u2/v2)
e3 = simplify(u3/v3)
return bool(e1 == e2) and bool(e2 == e3)
# There's only 6 permutations, so write them out
return _are_similar(s1_1, s1_2, s1_3, *s2) or \
_are_similar(s1_1, s1_3, s1_2, *s2) or \
_are_similar(s1_2, s1_1, s1_3, *s2) or \
_are_similar(s1_2, s1_3, s1_1, *s2) or \
_are_similar(s1_3, s1_1, s1_2, *s2) or \
_are_similar(s1_3, s1_2, s1_1, *s2)
def is_equilateral(self):
"""Are all the sides the same length?
Returns
=======
is_equilateral : boolean
See Also
========
sympy.geometry.entity.GeometryEntity.is_similar, RegularPolygon
is_isosceles, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_equilateral()
False
>>> from sympy import sqrt
>>> t2 = Triangle(Point(0, 0), Point(10, 0), Point(5, 5*sqrt(3)))
>>> t2.is_equilateral()
True
"""
return not has_variety(s.length for s in self.sides)
def is_isosceles(self):
"""Are two or more of the sides the same length?
Returns
=======
is_isosceles : boolean
See Also
========
is_equilateral, is_right, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(2, 4))
>>> t1.is_isosceles()
True
"""
return has_dups(s.length for s in self.sides)
def is_scalene(self):
"""Are all the sides of the triangle of different lengths?
Returns
=======
is_scalene : boolean
See Also
========
is_equilateral, is_isosceles, is_right
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(1, 4))
>>> t1.is_scalene()
True
"""
return not has_dups(s.length for s in self.sides)
def is_right(self):
"""Is the triangle right-angled.
Returns
=======
is_right : boolean
See Also
========
sympy.geometry.line.LinearEntity.is_perpendicular
is_equilateral, is_isosceles, is_scalene
Examples
========
>>> from sympy.geometry import Triangle, Point
>>> t1 = Triangle(Point(0, 0), Point(4, 0), Point(4, 3))
>>> t1.is_right()
True
"""
s = self.sides
return Segment.is_perpendicular(s[0], s[1]) or \
Segment.is_perpendicular(s[1], s[2]) or \
Segment.is_perpendicular(s[0], s[2])
@property
def altitudes(self):
"""The altitudes of the triangle.
An altitude of a triangle is a segment through a vertex,
perpendicular to the opposite side, with length being the
height of the vertex measured from the line containing the side.
Returns
=======
altitudes : dict
The dictionary consists of keys which are vertices and values
which are Segments.
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment.length
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.altitudes[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: s[1].perpendicular_segment(v[0]),
v[1]: s[2].perpendicular_segment(v[1]),
v[2]: s[0].perpendicular_segment(v[2])}
@property
def orthocenter(self):
"""The orthocenter of the triangle.
The orthocenter is the intersection of the altitudes of a triangle.
It may lie inside, outside or on the triangle.
Returns
=======
orthocenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.orthocenter
Point2D(0, 0)
"""
a = self.altitudes
v = self.vertices
return Line(a[v[0]]).intersection(Line(a[v[1]]))[0]
@property
def circumcenter(self):
"""The circumcenter of the triangle
The circumcenter is the center of the circumcircle.
Returns
=======
circumcenter : Point
See Also
========
sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcenter
Point2D(1/2, 1/2)
"""
a, b, c = [x.perpendicular_bisector() for x in self.sides]
if not a.intersection(b):
print(a,b,a.intersection(b))
return a.intersection(b)[0]
@property
def circumradius(self):
"""The radius of the circumcircle of the triangle.
Returns
=======
circumradius : number of Basic instance
See Also
========
sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy import Symbol
>>> from sympy.geometry import Point, Triangle
>>> a = Symbol('a')
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, a)
>>> t = Triangle(p1, p2, p3)
>>> t.circumradius
sqrt(a**2/4 + 1/4)
"""
return Point.distance(self.circumcenter, self.vertices[0])
@property
def circumcircle(self):
"""The circle which passes through the three vertices of the triangle.
Returns
=======
circumcircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.circumcircle
Circle(Point2D(1/2, 1/2), sqrt(2)/2)
"""
return Circle(self.circumcenter, self.circumradius)
def bisectors(self):
"""The angle bisectors of the triangle.
An angle bisector of a triangle is a straight line through a vertex
which cuts the corresponding angle in half.
Returns
=======
bisectors : dict
Each key is a vertex (Point) and each value is the corresponding
bisector (Segment).
See Also
========
sympy.geometry.point.Point, sympy.geometry.line.Segment
Examples
========
>>> from sympy.geometry import Point, Triangle, Segment
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> from sympy import sqrt
>>> t.bisectors()[p2] == Segment(Point(1, 0), Point(0, sqrt(2) - 1))
True
"""
# use lines containing sides so containment check during
# intersection calculation can be avoided, thus reducing
# the processing time for calculating the bisectors
s = [Line(l) for l in self.sides]
v = self.vertices
c = self.incenter
l1 = Segment(v[0], Line(v[0], c).intersection(s[1])[0])
l2 = Segment(v[1], Line(v[1], c).intersection(s[2])[0])
l3 = Segment(v[2], Line(v[2], c).intersection(s[0])[0])
return {v[0]: l1, v[1]: l2, v[2]: l3}
@property
def incenter(self):
"""The center of the incircle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incenter : Point
See Also
========
incircle, sympy.geometry.point.Point
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.incenter
Point2D(1 - sqrt(2)/2, 1 - sqrt(2)/2)
"""
s = self.sides
l = Matrix([s[i].length for i in [1, 2, 0]])
p = sum(l)
v = self.vertices
x = simplify(l.dot(Matrix([vi.x for vi in v]))/p)
y = simplify(l.dot(Matrix([vi.y for vi in v]))/p)
return Point(x, y)
@property
def inradius(self):
"""The radius of the incircle.
Returns
=======
inradius : number of Basic instance
See Also
========
incircle, sympy.geometry.ellipse.Circle.radius
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(4, 0), Point(0, 3)
>>> t = Triangle(p1, p2, p3)
>>> t.inradius
1
"""
return simplify(2 * self.area / self.perimeter)
@property
def incircle(self):
"""The incircle of the triangle.
The incircle is the circle which lies inside the triangle and touches
all three sides.
Returns
=======
incircle : Circle
See Also
========
sympy.geometry.ellipse.Circle
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(2, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.incircle
Circle(Point2D(2 - sqrt(2), 2 - sqrt(2)), 2 - sqrt(2))
"""
return Circle(self.incenter, self.inradius)
@property
def exradii(self):
"""The radius of excircles of a triangle.
An excircle of the triangle is a circle lying outside the triangle,
tangent to one of its sides and tangent to the extensions of the
other two.
Returns
=======
exradii : dict
See Also
========
sympy.geometry.polygon.Triangle.inradius
Examples
========
The exradius touches the side of the triangle to which it is keyed, e.g.
the exradius touching side 2 is:
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.exradii[t.sides[2]]
-2 + sqrt(10)
References
==========
.. [1] http://mathworld.wolfram.com/Exradius.html
.. [2] http://mathworld.wolfram.com/Excircles.html
"""
side = self.sides
a = side[0].length
b = side[1].length
c = side[2].length
s = (a+b+c)/2
area = self.area
exradii = {self.sides[0]: simplify(area/(s-a)),
self.sides[1]: simplify(area/(s-b)),
self.sides[2]: simplify(area/(s-c))}
return exradii
@property
def excenters(self):
"""Excenters of the triangle.
An excenter is the center of a circle that is tangent to a side of the
triangle and the extensions of the other two sides.
Returns
=======
excenters : dict
Examples
========
The excenters are keyed to the side of the triangle to which their corresponding
excircle is tangent: The center is keyed, e.g. the excenter of a circle touching
side 0 is:
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(6, 0), Point(0, 2)
>>> t = Triangle(p1, p2, p3)
>>> t.excenters[t.sides[0]]
Point2D(12*sqrt(10), 2/3 + sqrt(10)/3)
See Also
========
sympy.geometry.polygon.Triangle.exradii
References
==========
.. [1] http://mathworld.wolfram.com/Excircles.html
"""
s = self.sides
v = self.vertices
a = s[0].length
b = s[1].length
c = s[2].length
x = [v[0].x, v[1].x, v[2].x]
y = [v[0].y, v[1].y, v[2].y]
exc_coords = {
"x1": simplify(-a*x[0]+b*x[1]+c*x[2]/(-a+b+c)),
"x2": simplify(a*x[0]-b*x[1]+c*x[2]/(a-b+c)),
"x3": simplify(a*x[0]+b*x[1]-c*x[2]/(a+b-c)),
"y1": simplify(-a*y[0]+b*y[1]+c*y[2]/(-a+b+c)),
"y2": simplify(a*y[0]-b*y[1]+c*y[2]/(a-b+c)),
"y3": simplify(a*y[0]+b*y[1]-c*y[2]/(a+b-c))
}
excenters = {
s[0]: Point(exc_coords["x1"], exc_coords["y1"]),
s[1]: Point(exc_coords["x2"], exc_coords["y2"]),
s[2]: Point(exc_coords["x3"], exc_coords["y3"])
}
return excenters
@property
def medians(self):
"""The medians of the triangle.
A median of a triangle is a straight line through a vertex and the
midpoint of the opposite side, and divides the triangle into two
equal areas.
Returns
=======
medians : dict
Each key is a vertex (Point) and each value is the median (Segment)
at that point.
See Also
========
sympy.geometry.point.Point.midpoint, sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medians[p1]
Segment2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
s = self.sides
v = self.vertices
return {v[0]: Segment(v[0], s[1].midpoint),
v[1]: Segment(v[1], s[2].midpoint),
v[2]: Segment(v[2], s[0].midpoint)}
@property
def medial(self):
"""The medial triangle of the triangle.
The triangle which is formed from the midpoints of the three sides.
Returns
=======
medial : Triangle
See Also
========
sympy.geometry.line.Segment.midpoint
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.medial
Triangle(Point2D(1/2, 0), Point2D(1/2, 1/2), Point2D(0, 1/2))
"""
s = self.sides
return Triangle(s[0].midpoint, s[1].midpoint, s[2].midpoint)
@property
def nine_point_circle(self):
"""The nine-point circle of the triangle.
Nine-point circle is the circumcircle of the medial triangle, which
passes through the feet of altitudes and the middle points of segments
connecting the vertices and the orthocenter.
Returns
=======
nine_point_circle : Circle
See also
========
sympy.geometry.line.Segment.midpoint
sympy.geometry.polygon.Triangle.medial
sympy.geometry.polygon.Triangle.orthocenter
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.nine_point_circle
Circle(Point2D(1/4, 1/4), sqrt(2)/4)
"""
return Circle(*self.medial.vertices)
@property
def eulerline(self):
"""The Euler line of the triangle.
The line which passes through circumcenter, centroid and orthocenter.
Returns
=======
eulerline : Line (or Point for equilateral triangles in which case all
centers coincide)
Examples
========
>>> from sympy.geometry import Point, Triangle
>>> p1, p2, p3 = Point(0, 0), Point(1, 0), Point(0, 1)
>>> t = Triangle(p1, p2, p3)
>>> t.eulerline
Line2D(Point2D(0, 0), Point2D(1/2, 1/2))
"""
if self.is_equilateral():
return self.orthocenter
return Line(self.orthocenter, self.circumcenter)
def rad(d):
"""Return the radian value for the given degrees (pi = 180 degrees)."""
return d*pi/180
def deg(r):
"""Return the degree value for the given radians (pi = 180 degrees)."""
return r/pi*180
def _slope(d):
rv = tan(rad(d))
return rv
def _asa(d1, l, d2):
"""Return triangle having side with length l on the x-axis."""
xy = Line((0, 0), slope=_slope(d1)).intersection(
Line((l, 0), slope=_slope(180 - d2)))[0]
return Triangle((0, 0), (l, 0), xy)
def _sss(l1, l2, l3):
"""Return triangle having side of length l1 on the x-axis."""
c1 = Circle((0, 0), l3)
c2 = Circle((l1, 0), l2)
inter = [a for a in c1.intersection(c2) if a.y.is_nonnegative]
if not inter:
return None
pt = inter[0]
return Triangle((0, 0), (l1, 0), pt)
def _sas(l1, d, l2):
"""Return triangle having side with length l2 on the x-axis."""
p1 = Point(0, 0)
p2 = Point(l2, 0)
p3 = Point(cos(rad(d))*l1, sin(rad(d))*l1)
return Triangle(p1, p2, p3)
| 28.449879 | 140 | 0.532303 |
7942bc3c710ec4fee3cff8f67abd7eb681f9dfbc | 11,179 | py | Python | acapy_plugin_toolbox/holder.py | swcurran/aries-acapy-plugin-toolbox | f656e4d05dcd18c03085af765e6013b2a8ff2ff2 | [
"Apache-2.0"
] | 1 | 2019-11-20T19:10:25.000Z | 2019-11-20T19:10:25.000Z | acapy_plugin_toolbox/holder.py | swcurran/aries-acapy-plugin-toolbox | f656e4d05dcd18c03085af765e6013b2a8ff2ff2 | [
"Apache-2.0"
] | null | null | null | acapy_plugin_toolbox/holder.py | swcurran/aries-acapy-plugin-toolbox | f656e4d05dcd18c03085af765e6013b2a8ff2ff2 | [
"Apache-2.0"
] | 2 | 2020-01-24T19:00:49.000Z | 2021-06-06T10:45:14.000Z | """Define messages for credential holder admin protocols."""
# pylint: disable=invalid-name
# pylint: disable=too-few-public-methods
from marshmallow import fields
from aries_cloudagent.config.injection_context import InjectionContext
from aries_cloudagent.core.protocol_registry import ProtocolRegistry
from aries_cloudagent.messaging.base_handler import BaseHandler, BaseResponder, RequestContext
from aries_cloudagent.protocols.issue_credential.v1_0.routes import (
V10CredentialExchangeListResultSchema,
V10CredentialProposalRequestMandSchema
)
from aries_cloudagent.protocols.issue_credential.v1_0.models.credential_exchange import (
V10CredentialExchange,
V10CredentialExchangeSchema
)
from aries_cloudagent.protocols.issue_credential.v1_0.messages.credential_proposal import (
CredentialProposal,
)
from aries_cloudagent.protocols.issue_credential.v1_0.manager import CredentialManager
from aries_cloudagent.protocols.present_proof.v1_0.routes import (
V10PresentationExchangeListSchema,
V10PresentationProposalRequestSchema
)
from aries_cloudagent.protocols.present_proof.v1_0.models.presentation_exchange import (
V10PresentationExchange,
V10PresentationExchangeSchema
)
from aries_cloudagent.protocols.present_proof.v1_0.messages.presentation_proposal import (
PresentationProposal,
)
from aries_cloudagent.protocols.present_proof.v1_0.manager import PresentationManager
from aries_cloudagent.connections.models.connection_record import ConnectionRecord
from aries_cloudagent.storage.error import StorageNotFoundError
from aries_cloudagent.protocols.problem_report.message import ProblemReport
from .util import generate_model_schema, admin_only
PROTOCOL = 'did:sov:BzCbsNYhMrjHiqZDTUASHg;spec/admin-holder/0.1'
SEND_CRED_PROPOSAL = '{}/send-credential-proposal'.format(PROTOCOL)
CRED_EXCHANGE = '{}/credential-exchange'.format(PROTOCOL)
SEND_PRES_PROPOSAL = '{}/send-presentation-proposal'.format(PROTOCOL)
PRES_EXCHANGE = '{}/presentation-exchange'.format(PROTOCOL)
CREDENTIALS_GET_LIST = '{}/credentials-get-list'.format(PROTOCOL)
CREDENTIALS_LIST = '{}/credentials-list'.format(PROTOCOL)
PRESENTATIONS_GET_LIST = '{}/presentations-get-list'.format(PROTOCOL)
PRESENTATIONS_LIST = '{}/presentations-list'.format(PROTOCOL)
MESSAGE_TYPES = {
SEND_CRED_PROPOSAL:
'acapy_plugin_toolbox.holder.SendCredProposal',
SEND_PRES_PROPOSAL:
'acapy_plugin_toolbox.holder.SendPresProposal',
CREDENTIALS_GET_LIST:
'acapy_plugin_toolbox.holder.CredGetList',
CREDENTIALS_LIST:
'acapy_plugin_toolbox.holder.CredList',
PRESENTATIONS_GET_LIST:
'acapy_plugin_toolbox.holder.PresGetList',
PRESENTATIONS_LIST:
'acapy_plugin_toolbox.holder.PresList',
}
async def setup(context: InjectionContext):
"""Setup the holder plugin."""
protocol_registry = await context.inject(ProtocolRegistry)
protocol_registry.register_message_types(
MESSAGE_TYPES
)
SendCredProposal, SendCredProposalSchema = generate_model_schema(
name='SendCredProposal',
handler='acapy_plugin_toolbox.holder.SendCredProposalHandler',
msg_type=SEND_CRED_PROPOSAL,
schema=V10CredentialProposalRequestMandSchema
)
CredExchange, CredExchangeSchema = generate_model_schema(
name='CredExchange',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=CRED_EXCHANGE,
schema=V10CredentialExchangeSchema
)
class SendCredProposalHandler(BaseHandler):
"""Handler for received send proposal request."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle received send proposal request."""
connection_id = str(context.message.connection_id)
credential_definition_id = context.message.credential_definition_id
comment = context.message.comment
credential_manager = CredentialManager(context)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context,
connection_id
)
except StorageNotFoundError:
report = ProblemReport(
explain_ltxt='Connection not found.',
who_retries='none'
)
report.assign_thread_from(context.message)
await responder.send_reply(report)
return
if not connection_record.is_ready:
report = ProblemReport(
explain_ltxt='Connection invalid.',
who_retries='none'
)
report.assign_thread_from(context.message)
await responder.send_reply(report)
return
credential_exchange_record = await credential_manager.create_proposal(
connection_id,
comment=comment,
credential_preview=context.message.credential_proposal,
credential_definition_id=credential_definition_id
)
await responder.send(
CredentialProposal(
comment=context.message.comment,
credential_proposal=context.message.credential_proposal,
cred_def_id=context.message.credential_definition_id
),
connection_id=connection_id
)
cred_exchange = CredExchange(**credential_exchange_record.serialize())
cred_exchange.assign_thread_from(context.message)
await responder.send_reply(cred_exchange)
SendPresProposal, SendPresProposalSchema = generate_model_schema(
name='SendPresProposal',
handler='acapy_plugin_toolbox.holder.SendPresProposalHandler',
msg_type=SEND_PRES_PROPOSAL,
schema=V10PresentationProposalRequestSchema
)
PresExchange, PresExchangeSchema = generate_model_schema(
name='PresExchange',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=PRES_EXCHANGE,
schema=V10PresentationExchangeSchema
)
class SendPresProposalHandler(BaseHandler):
"""Handler for received send presentation proposal request."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle received send presentation proposal request."""
connection_id = str(context.message.connection_id)
try:
connection_record = await ConnectionRecord.retrieve_by_id(
context,
connection_id
)
except StorageNotFoundError:
report = ProblemReport(
explain_ltxt='Connection not found.',
who_retries='none'
)
report.assign_thread_from(context.message)
await responder.send_reply(report)
return
if not connection_record.is_ready:
report = ProblemReport(
explain_ltxt='Connection invalid.',
who_retries='none'
)
report.assign_thread_from(context.message)
await responder.send_reply(report)
return
comment = context.message.comment
# Aries#0037 calls it a proposal in the proposal struct but it's of type preview
presentation_proposal = PresentationProposal(
comment=comment,
presentation_proposal=context.message.presentation_proposal
)
auto_present = (
context.message.auto_present or
context.settings.get("debug.auto_respond_presentation_request")
)
presentation_manager = PresentationManager(context)
presentation_exchange_record = (
await presentation_manager.create_exchange_for_proposal(
connection_id=connection_id,
presentation_proposal_message=presentation_proposal,
auto_present=auto_present
)
)
await responder.send(presentation_proposal, connection_id=connection_id)
pres_exchange = PresExchange(**presentation_exchange_record.serialize())
pres_exchange.assign_thread_from(context.message)
await responder.send_reply(pres_exchange)
CredGetList, CredGetListSchema = generate_model_schema(
name='CredGetList',
handler='acapy_plugin_toolbox.holder.CredGetListHandler',
msg_type=CREDENTIALS_GET_LIST,
schema={
'connection_id': fields.Str(required=False),
'credential_definition_id': fields.Str(required=False),
'schema_id': fields.Str(required=False)
}
)
CredList, CredListSchema = generate_model_schema(
name='CredList',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=CREDENTIALS_LIST,
schema=V10CredentialExchangeListResultSchema
# schema={
# 'results': fields.List(fields.Dict())
# }
)
class CredGetListHandler(BaseHandler):
"""Handler for received get cred list request."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle received get cred list request."""
# holder: BaseHolder = await context.inject(BaseHolder)
# credentials = await holder.get_credentials(0, 100, {})
# cred_list = CredList(results=credentials)
# await responder.send_reply(cred_list)
post_filter = dict(
filter(lambda item: item[1] is not None, {
# 'state': V10CredentialExchange.STATE_CREDENTIAL_RECEIVED,
'role': V10CredentialExchange.ROLE_HOLDER,
'connection_id': context.message.connection_id,
'credential_definition_id': context.message.credential_definition_id,
'schema_id': context.message.schema_id
}.items())
)
records = await V10CredentialExchange.query(context, {}, post_filter)
cred_list = CredList(results=records)
await responder.send_reply(cred_list)
PresGetList, PresGetListSchema = generate_model_schema(
name='PresGetList',
handler='acapy_plugin_toolbox.holder.PresGetListHandler',
msg_type=PRESENTATIONS_GET_LIST,
schema={
'connection_id': fields.Str(required=False),
'verified': fields.Str(required=False),
}
)
PresList, PresListSchema = generate_model_schema(
name='PresList',
handler='acapy_plugin_toolbox.util.PassHandler',
msg_type=PRESENTATIONS_LIST,
schema=V10PresentationExchangeListSchema
# schema={
# 'results': fields.List(fields.Dict())
# }
)
class PresGetListHandler(BaseHandler):
"""Handler for received get cred list request."""
@admin_only
async def handle(self, context: RequestContext, responder: BaseResponder):
"""Handle received get cred list request."""
post_filter = dict(
filter(lambda item: item[1] is not None, {
# 'state': V10PresentialExchange.STATE_CREDENTIAL_RECEIVED,
'role': V10PresentationExchange.ROLE_PROVER,
'connection_id': context.message.connection_id,
'verified': context.message.verified,
}.items())
)
records = await V10PresentationExchange.query(context, {}, post_filter)
cred_list = PresList(results=records)
await responder.send_reply(cred_list)
| 36.413681 | 94 | 0.709813 |
7942bc43e13900b2da80684c2b42799ff8c73d27 | 594 | py | Python | pydocx/openxml/packaging/image_part.py | botzill/pydocx | 98c6aa626d875278240eabea8f86a914840499b3 | [
"Apache-2.0"
] | 127 | 2015-01-12T22:35:34.000Z | 2022-01-20T06:24:18.000Z | pydocx/openxml/packaging/image_part.py | turbo-q/pydocx | 98c6aa626d875278240eabea8f86a914840499b3 | [
"Apache-2.0"
] | 156 | 2015-01-05T19:55:56.000Z | 2020-10-14T07:01:42.000Z | pydocx/openxml/packaging/image_part.py | turbo-q/pydocx | 98c6aa626d875278240eabea8f86a914840499b3 | [
"Apache-2.0"
] | 45 | 2015-02-22T18:52:08.000Z | 2021-06-14T08:05:47.000Z | # coding: utf-8
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
from pydocx.openxml.packaging.open_xml_part import OpenXmlPart
class ImagePart(OpenXmlPart):
'''
Represents an image part relationship within a Word document container.
See also: http://msdn.microsoft.com/en-us/library/documentformat.openxml.packaging.imagepart%28v=office.14%29.aspx # noqa
'''
relationship_type = '/'.join([
'http://schemas.openxmlformats.org',
'officeDocument',
'2006',
'relationships',
'image',
])
| 23.76 | 126 | 0.673401 |
7942bd530c3701ec0254a4c73b2f9954d22baa8f | 460 | py | Python | aliyun/api/rest/Ess20140828CreateScalingRuleRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Ess20140828CreateScalingRuleRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | aliyun/api/rest/Ess20140828CreateScalingRuleRequest.py | snowyxx/aliyun-python-demo | ed40887ddff440b85b77f9b2a1fcda11cca55c8b | [
"Apache-2.0"
] | null | null | null | '''
Created by auto_sdk on 2014.10.14
'''
from aliyun.api.base import RestApi
class Ess20140828CreateScalingRuleRequest(RestApi):
def __init__(self,domain='ess.aliyuncs.com',port=80):
RestApi.__init__(self,domain, port)
self.AdjustmentType = None
self.AdjustmentValue = None
self.Cooldown = None
self.ScalingGroupId = None
self.ScalingRuleName = None
def getapiname(self):
return 'ess.aliyuncs.com.CreateScalingRule.2014-08-28'
| 28.75 | 57 | 0.743478 |
7942be45180b74cccc6b9cf62d846a30509bc52e | 2,057 | py | Python | qiime2/metadata/base.py | emollier/qiime2 | bc9167d844fb4b42eccba8d30ace1eb10ab093c3 | [
"BSD-3-Clause"
] | 1 | 2020-11-23T02:24:42.000Z | 2020-11-23T02:24:42.000Z | qiime2/metadata/base.py | emollier/qiime2 | bc9167d844fb4b42eccba8d30ace1eb10ab093c3 | [
"BSD-3-Clause"
] | 1 | 2020-07-13T19:47:48.000Z | 2020-07-13T19:47:48.000Z | qiime2/metadata/base.py | emollier/qiime2 | bc9167d844fb4b42eccba8d30ace1eb10ab093c3 | [
"BSD-3-Clause"
] | null | null | null | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
SUPPORTED_COLUMN_TYPES = {'categorical', 'numeric'}
SUPPORTED_ID_HEADERS = {
'case_insensitive': {
'id', 'sampleid', 'sample id', 'sample-id', 'featureid',
'feature id', 'feature-id'
},
# For backwards-compatibility with existing formats.
'exact_match': {
# QIIME 1 mapping files. "#Sample ID" was never supported, but
# we're including it here for symmetry with the other supported
# headers that allow a space between words.
'#SampleID', '#Sample ID',
# biom-format: observation metadata and "classic" (TSV) OTU tables.
'#OTUID', '#OTU ID',
# Qiita sample/prep information files.
'sample_name'
}
}
FORMATTED_ID_HEADERS = "Case-insensitive: %s\n\nCase-sensitive: %s" % (
', '.join(repr(e) for e in sorted(
SUPPORTED_ID_HEADERS['case_insensitive'])),
', '.join(repr(e) for e in sorted(
SUPPORTED_ID_HEADERS['exact_match']))
)
def is_id_header(name):
"""Determine if a name is a valid ID column header.
This function may be used to determine if a value in a metadata file is a
valid ID column header, or if a pandas ``Index.name`` matches the ID header
requirements. The "ID header" corresponds to the ``Metadata.id_header``
and ``MetadataColumn.id_header`` properties.
Parameters
----------
name : string or None
Name to check against ID header requirements.
Returns
-------
bool
``True`` if `name` is a valid ID column header, ``False`` otherwise.
"""
return name and (name in SUPPORTED_ID_HEADERS['exact_match'] or
name.lower() in SUPPORTED_ID_HEADERS['case_insensitive'])
| 33.721311 | 79 | 0.595041 |
7942c16849a2f33fba611b9b95250460bd1c8784 | 7,144 | py | Python | main2.py | leimao/detr | cd88c4ea01257831ac677b6268e1aef7cd37eca4 | [
"Apache-2.0"
] | null | null | null | main2.py | leimao/detr | cd88c4ea01257831ac677b6268e1aef7cd37eca4 | [
"Apache-2.0"
] | null | null | null | main2.py | leimao/detr | cd88c4ea01257831ac677b6268e1aef7cd37eca4 | [
"Apache-2.0"
] | 2 | 2021-04-28T08:19:32.000Z | 2021-06-15T11:26:30.000Z | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import datetime
import json
import random
import time
from pathlib import Path
import numpy as np
import torch
from torch.utils.data import DataLoader, DistributedSampler
import datasets
import util.misc as utils
from datasets import build_dataset, get_coco_api_from_dataset
from engine import evaluate, train_one_epoch
from models import build_model
def get_args_parser():
parser = argparse.ArgumentParser('Set transformer detector', add_help=False)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_backbone', default=1e-5, type=float)
parser.add_argument('--batch_size', default=2, type=int)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--epochs', default=300, type=int)
parser.add_argument('--lr_drop', default=200, type=int)
parser.add_argument('--clip_max_norm', default=0.1, type=float,
help='gradient clipping max norm')
# Model parameters
parser.add_argument('--frozen_weights', type=str, default=None,
help="Path to the pretrained model. If set, only the mask head will be trained")
# * Backbone
parser.add_argument('--backbone', default='resnet50', type=str,
help="Name of the convolutional backbone to use")
parser.add_argument('--dilation', action='store_true',
help="If true, we replace stride with dilation in the last convolutional block (DC5)")
parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),
help="Type of positional embedding to use on top of the image features")
# * Transformer
parser.add_argument('--enc_layers', default=6, type=int,
help="Number of encoding layers in the transformer")
parser.add_argument('--dec_layers', default=6, type=int,
help="Number of decoding layers in the transformer")
parser.add_argument('--dim_feedforward', default=2048, type=int,
help="Intermediate size of the feedforward layers in the transformer blocks")
parser.add_argument('--hidden_dim', default=256, type=int,
help="Size of the embeddings (dimension of the transformer)")
parser.add_argument('--dropout', default=0.1, type=float,
help="Dropout applied in the transformer")
parser.add_argument('--nheads', default=8, type=int,
help="Number of attention heads inside the transformer's attentions")
parser.add_argument('--num_queries', default=100, type=int,
help="Number of query slots")
parser.add_argument('--pre_norm', action='store_true')
# * Segmentation
parser.add_argument('--masks', action='store_true',
help="Train segmentation head if the flag is provided")
# Loss
parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',
help="Disables auxiliary decoding losses (loss at each layer)")
# * Matcher
parser.add_argument('--set_cost_class', default=1, type=float,
help="Class coefficient in the matching cost")
parser.add_argument('--set_cost_bbox', default=5, type=float,
help="L1 box coefficient in the matching cost")
parser.add_argument('--set_cost_giou', default=2, type=float,
help="giou box coefficient in the matching cost")
# * Loss coefficients
parser.add_argument('--mask_loss_coef', default=1, type=float)
parser.add_argument('--dice_loss_coef', default=1, type=float)
parser.add_argument('--bbox_loss_coef', default=5, type=float)
parser.add_argument('--giou_loss_coef', default=2, type=float)
parser.add_argument('--eos_coef', default=0.1, type=float,
help="Relative classification weight of the no-object class")
# dataset parameters
parser.add_argument('--dataset_file', default='coco')
parser.add_argument('--coco_path', type=str)
parser.add_argument('--coco_panoptic_path', type=str)
parser.add_argument('--remove_difficult', action='store_true')
parser.add_argument('--output_dir', default='',
help='path where to save, empty for no saving')
parser.add_argument('--device', default='cuda',
help='device to use for training / testing')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--resume', default='', help='resume from checkpoint')
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
help='start epoch')
parser.add_argument('--eval', action='store_true')
parser.add_argument('--num_workers', default=2, type=int)
# distributed training parameters
parser.add_argument('--world_size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
return parser
def main(args):
utils.init_distributed_mode(args)
print("git:\n {}\n".format(utils.get_sha()))
if args.frozen_weights is not None:
assert args.masks, "Frozen training is meant for segmentation only"
print(args)
device = torch.device(args.device)
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# dataset_train = build_dataset(image_set='train', args=args)
dataset_val = build_dataset(image_set='val', args=args)
# if args.distributed:
# sampler_train = DistributedSampler(dataset_train)
# sampler_val = DistributedSampler(dataset_val, shuffle=False)
# else:
# sampler_train = torch.utils.data.RandomSampler(dataset_train)
# sampler_val = torch.utils.data.SequentialSampler(dataset_val)
# batch_sampler_train = torch.utils.data.BatchSampler(
# sampler_train, args.batch_size, drop_last=True)
sampler_val = torch.utils.data.SequentialSampler(dataset_val)
# data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train,
# collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_val = DataLoader(dataset_val, args.batch_size, sampler=sampler_val,
drop_last=False, collate_fn=utils.collate_fn_leimao, num_workers=args.num_workers)
for inputs, labels in data_loader_val:
print("---------------------")
print(inputs.shape)
print(labels)
# for input_m in inputs.tensors:
# print(input_m.shape)
if __name__ == '__main__':
parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
args = parser.parse_args()
if args.output_dir:
Path(args.output_dir).mkdir(parents=True, exist_ok=True)
main(args)
| 45.794872 | 115 | 0.665454 |
7942c2d04eddd4181c6f1e30a6c9fc174383b843 | 301 | py | Python | preorder_iterative_bst.py | udyaluashok/Python-practice-questions | eed4ad27b663e7d0c0f7ff314ab5c524cecec62e | [
"MIT"
] | null | null | null | preorder_iterative_bst.py | udyaluashok/Python-practice-questions | eed4ad27b663e7d0c0f7ff314ab5c524cecec62e | [
"MIT"
] | null | null | null | preorder_iterative_bst.py | udyaluashok/Python-practice-questions | eed4ad27b663e7d0c0f7ff314ab5c524cecec62e | [
"MIT"
] | 1 | 2020-08-21T04:08:42.000Z | 2020-08-21T04:08:42.000Z | def preOrderTraversal(root):
stack = []
stack.insert(0, root)
while len(stack) > 0:
current = stack.pop()
print(current.value)
right = current.right
if right is not None:
stack.insert(0, right)
left = current.left
if left is not None:
stack.insert(0, left)
| 20.066667 | 28 | 0.621262 |
7942c3f73f500869a307c33dee1351f0af1d7ad0 | 5,176 | py | Python | test/ibmq/test_basic_server_paths.py | npoirie1/qiskit-ibmq-provider | ffea455f0c03bf7f074b72a20197aecbc3b5c101 | [
"Apache-2.0"
] | 1 | 2021-09-03T12:26:08.000Z | 2021-09-03T12:26:08.000Z | test/ibmq/test_basic_server_paths.py | chahatagarwal/qiskit-ibmq-provider | 2d32a1ddbd043c22e7693fbcdd8dfb18a3d43139 | [
"Apache-2.0"
] | null | null | null | test/ibmq/test_basic_server_paths.py | chahatagarwal/qiskit-ibmq-provider | 2d32a1ddbd043c22e7693fbcdd8dfb18a3d43139 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Tests that hit all the basic server endpoints using both a public and premium provider."""
import time
from qiskit.test import slow_test
from qiskit.providers.ibmq import least_busy
from qiskit.providers.ibmq.exceptions import IBMQBackendJobLimitError
from ..decorators import requires_providers
from ..ibmqtestcase import IBMQTestCase
from ..utils import cancel_job, bell_in_qobj
class TestBasicServerPaths(IBMQTestCase):
"""Test the basic server endpoints using both a public and premium provider."""
@classmethod
@requires_providers
def setUpClass(cls, providers):
# pylint: disable=arguments-differ
super().setUpClass()
cls.providers = providers # Dict[str, AccountProvider]
@slow_test
def test_job_submission(self):
"""Test running a job against a device."""
for desc, provider in self.providers.items():
backend = least_busy(provider.backends(
simulator=False, open_pulse=False,
filters=lambda b: b.configuration().n_qubits >= 5))
with self.subTest(desc=desc, backend=backend):
qobj = bell_in_qobj(backend)
job = self._submit_job_with_retry(qobj, backend)
# Fetch the results.
result = job.result()
self.assertTrue(result.success)
# Fetch the qobj.
qobj_downloaded = job.qobj()
self.assertEqual(qobj_downloaded.to_dict(), qobj.to_dict())
def test_job_backend_properties_and_status(self):
"""Test the backend properties and status of a job."""
for desc, provider in self.providers.items():
backend = provider.backends(
simulator=False, operational=True,
filters=lambda b: b.configuration().n_qubits >= 5)[0]
with self.subTest(desc=desc, backend=backend):
qobj = bell_in_qobj(backend)
job = self._submit_job_with_retry(qobj, backend)
self.assertIsNotNone(job.properties())
self.assertTrue(job.status())
# Cancel job so it doesn't consume more resources.
cancel_job(job, verify=True)
def test_retrieve_jobs(self):
"""Test retrieving jobs."""
backend_name = 'ibmq_qasm_simulator'
for desc, provider in self.providers.items():
backend = provider.get_backend(backend_name)
with self.subTest(desc=desc, backend=backend):
qobj = bell_in_qobj(backend)
job = self._submit_job_with_retry(qobj, backend)
job_id = job.job_id()
retrieved_jobs = provider.backends.jobs(backend_name=backend_name)
self.assertGreaterEqual(len(retrieved_jobs), 1)
retrieved_job_ids = {job.job_id() for job in retrieved_jobs}
self.assertIn(job_id, retrieved_job_ids)
def test_device_properties_and_defaults(self):
"""Test the properties and defaults for an open pulse device."""
for desc, provider in self.providers.items():
pulse_backends = provider.backends(open_pulse=True, operational=True)
if not pulse_backends:
raise self.skipTest('Skipping pulse test since no pulse backend '
'found for "{}"'.format(desc))
pulse_backend = pulse_backends[0]
with self.subTest(desc=desc, backend=pulse_backend):
self.assertIsNotNone(pulse_backend.properties())
self.assertIsNotNone(pulse_backend.defaults())
def test_device_status_and_job_limit(self):
"""Test the status and job limit for a device."""
for desc, provider in self.providers.items():
backend = provider.backends(simulator=False, operational=True)[0]
with self.subTest(desc=desc, backend=backend):
self.assertTrue(backend.status())
job_limit = backend.job_limit()
if desc == 'public_provider':
self.assertEqual(job_limit.maximum_jobs, 5)
self.assertTrue(job_limit)
def _submit_job_with_retry(self, qobj, backend, max_retry=5):
"""Retry submitting a job if limit is reached."""
limit_error = None
for _ in range(max_retry):
try:
job = backend.run(qobj, validate_qobj=True)
return job
except IBMQBackendJobLimitError as err:
limit_error = err
time.sleep(1)
self.fail("Unable to submit job after {} retries: {}".format(max_retry, limit_error))
| 42.42623 | 93 | 0.634467 |
7942c62fb5b4d8d3734ddf8c23a21b0f379d76ec | 759 | py | Python | nni/tools/trial_tool/url_utils.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 9,680 | 2019-05-07T01:42:30.000Z | 2022-03-31T16:48:33.000Z | nni/tools/trial_tool/url_utils.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,957 | 2019-05-06T21:44:21.000Z | 2022-03-31T09:21:53.000Z | nni/tools/trial_tool/url_utils.py | dutxubo/nni | c16f4e1c89b54b8b80661ef0072433d255ad2d24 | [
"MIT"
] | 1,571 | 2019-05-07T06:42:55.000Z | 2022-03-31T03:19:24.000Z | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from .constants import API_ROOT_URL, BASE_URL, STDOUT_API, NNI_TRIAL_JOB_ID, NNI_EXP_ID, VERSION_API, PARAMETER_META_API
def gen_send_stdout_url(ip, port):
'''Generate send stdout url'''
return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, STDOUT_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID)
def gen_send_version_url(ip, port):
'''Generate send error url'''
return '{0}:{1}{2}{3}/{4}/{5}'.format(BASE_URL.format(ip), port, API_ROOT_URL, VERSION_API, NNI_EXP_ID, NNI_TRIAL_JOB_ID)
def gen_parameter_meta_url(ip, port):
'''Generate send error url'''
return '{0}:{1}{2}{3}'.format(BASE_URL.format(ip), port, API_ROOT_URL, PARAMETER_META_API)
| 37.95 | 125 | 0.722003 |
7942c677758dda6a5f783b7492a1ff7bf5632cc7 | 1,425 | py | Python | setup.py | Skyge/brownie | 01785c803155d340fd0ea1bcfdf1b5e2670470d5 | [
"MIT"
] | null | null | null | setup.py | Skyge/brownie | 01785c803155d340fd0ea1bcfdf1b5e2670470d5 | [
"MIT"
] | 156 | 2020-07-20T21:23:47.000Z | 2021-07-27T21:21:46.000Z | setup.py | Skyge/brownie | 01785c803155d340fd0ea1bcfdf1b5e2670470d5 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
from setuptools import find_packages, setup
with open("README.md", "r") as fh:
long_description = fh.read()
with open("requirements.txt", "r") as f:
requirements = list(map(str.strip, f.read().split("\n")))[:-1]
setup(
name="eth-brownie",
packages=find_packages(),
version="1.7.3", # don't change this manually, use bumpversion instead
license="MIT",
description="A Python framework for Ethereum smart contract deployment, testing and interaction.", # noqa: E501
long_description=long_description,
long_description_content_type="text/markdown",
author="Ben Hauser",
author_email="[email protected]",
url="https://github.com/iamdefinitelyahuman/brownie",
keywords=["brownie"],
install_requires=requirements,
entry_points={
"console_scripts": ["brownie=brownie._cli.__main__:main"],
"pytest11": ["pytest-brownie=brownie.test.plugin"],
},
include_package_data=True,
python_requires=">=3.6,<4",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
)
| 34.756098 | 116 | 0.650526 |
7942c79486b930a5715ad5c48a7366edcf919c29 | 1,562 | py | Python | xlsxwriter/test/comparison/test_tutorial01.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_tutorial01.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | xlsxwriter/test/comparison/test_tutorial01.py | Rippling/XlsxWriter-1 | be8d1cb8f8b156cf87bbe5d591f1f5475804be44 | [
"BSD-2-Clause"
] | null | null | null | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2021, John McNamara, [email protected]
#
from ..excel_comparison_test import ExcelComparisonTest
from ...workbook import Workbook
class TestCompareXLSXFiles(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename('tutorial01.xlsx')
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
def test_create_file(self):
"""Example spreadsheet used in the tutorial."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
# Some data we want to write to the worksheet.
expenses = (
['Rent', 1000],
['Gas', 100],
['Food', 300],
['Gym', 50],
)
# Start from the first cell. Rows and columns are zero indexed.
row = 0
col = 0
# Iterate over the data and write it out row by row.
for item, cost in (expenses):
worksheet.write(row, col, item)
worksheet.write(row, col + 1, cost)
row += 1
# Write a total using a formula.
worksheet.write(row, 0, 'Total')
worksheet.write(row, 1, '=SUM(B1:B4)', None, 1450)
workbook.close()
self.assertExcelEqual()
| 26.931034 | 79 | 0.544174 |
7942c8dedb0c58afe333ef52825ad9a807a2eefd | 9,094 | py | Python | bin/yb-prof.py | skahler-yuga/yugabyte-db | 3a69097e0230bc064c260dfddb0f8fddca2c60f1 | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2020-06-23T20:28:25.000Z | 2020-06-23T20:28:25.000Z | bin/yb-prof.py | skahler-yuga/yugabyte-db | 3a69097e0230bc064c260dfddb0f8fddca2c60f1 | [
"Apache-2.0",
"CC0-1.0"
] | 189 | 2021-02-19T01:23:31.000Z | 2021-04-02T01:03:14.000Z | bin/yb-prof.py | skahler-yuga/yugabyte-db | 3a69097e0230bc064c260dfddb0f8fddca2c60f1 | [
"Apache-2.0",
"CC0-1.0"
] | 1 | 2020-12-30T00:43:38.000Z | 2020-12-30T00:43:38.000Z | #!/usr/bin/env python3
#
# Copyright (c) YugaByte, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations
# under the License.
#
import getopt
import os
import re
import subprocess
import sys
class YBProf:
file_prefix_ = ""
total_in_use_count_ = 0
total_in_use_bytes_ = 0
total_alloc_count_ = 0
total_alloc_bytes_ = 0
records_ = []
symbols_ = {}
def __init__(self, output_prefix, pprof_url, seconds):
self.file_prefix_ = output_prefix
self.pprof_url_ = pprof_url
self.seconds_ = seconds
def parse_header_line(self, line):
print('header: %s' % line)
m = re.match(r"heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+)\]", line)
if m:
self.total_in_use_count_ = int(m.group(1))
self.total_in_use_bytes_ = int(m.group(2))
self.total_alloc_count_ = int(m.group(3))
self.total_alloc_bytes_ = int(m.group(4))
else:
print("Unexpected header format: %s" % line)
def parse_stack_line(self, line):
# print('body: %s' % line)
m = re.match(r" *(\d+): *(\d+) *\[ *(\d+): *(\d+)\] \@ (.*)\n", line)
if m:
in_use_count = int(m.group(1))
in_use_bytes = int(m.group(2))
alloc_count = int(m.group(3))
alloc_bytes = int(m.group(4))
stack = m.group(5)
functions = re.split(" ", stack)
# Add to a set of function addresses that we will symbolize later.
for f in functions:
self.symbols_[f] = ""
# Remember this record (the call stack and associated counters).
self.records_.append({"in_use_count": in_use_count,
"in_use_bytes": in_use_bytes,
"alloc_count": alloc_count,
"alloc_bytes": alloc_bytes,
"stack": functions})
else:
print("Unexpected format in line: %s" % line)
def invoke_heap_profile_handler(self):
heap_profile_url = self.pprof_url_ + "/heap?seconds=" + str(self.seconds_)
raw_output_file = self.file_prefix_ + ".raw.txt"
print("Invoking heap profile handler: " + heap_profile_url)
output_fhd = open(raw_output_file, "w")
result = subprocess.call(["curl", heap_profile_url], stdout=output_fhd)
print("Raw output: " + raw_output_file)
self.parse_heap_file(raw_output_file)
def symbolize_all(self):
print("Total Symbols " + str(len(self.symbols_)) + " symbols...")
# Symbolize the above symbols few at a time.
chunk_of_symbols = []
cnt = 0
for key in self.symbols_.keys():
chunk_of_symbols.append(key)
cnt = cnt + 1
if (len(chunk_of_symbols) == 25):
self.symbolize(chunk_of_symbols)
chunk_of_symbols = []
print("Completed symbolizing %d/%d symbols." % (cnt, len(self.symbols_)))
if (len(chunk_of_symbols) > 0):
self.symbolize(chunk_of_symbols)
def symbolize(self, symbols):
arg = "+".join(symbols)
result = subprocess.check_output(
["curl",
"--silent",
"-d",
arg,
self.pprof_url_ + "/symbol"])
lines = result.split("\n")
for line in lines:
# Use whitespace as the delimiter, and produce no more than two parts
# (i.e. max of 1 split). The symbolized function name may itself contain
# spaces. A few examples below (the latter two are example with spaces
# in the name of the function).
#
# 0x7fff950fcd23 std::__1::basic_string<>::append()
# 0x7fff950f943e operator new()
# 0x102296ceb yb::tserver::(anonymous namespace)::SetLastRow()
parts = line.split(None, 1)
num_parts = len(parts)
if num_parts == 0:
continue
elif num_parts == 2:
addr = parts[0]
symbol = parts[1]
self.symbols_[addr] = symbol
else:
print("Unexpected output line: " + line)
def print_records(self, sort_metric, filename):
max_call_stacks = 1000
idx = 0
print("Writing output to " + filename)
fhd = open(filename, "w")
fhd.write("<html>\n")
fhd.write("<title>Top Call Stacks By: " + sort_metric + "</title>\n")
fhd.write("<body style=\"font-family: sans-serif\">\n")
fhd.write("<b>Top " + str(max_call_stacks) + " Call Stacks By: " + sort_metric + "</b>\n")
fhd.write("<p>\n")
fhd.write("<table style=\"border-collapse: collapse\" border=1 cellpadding=5>\n")
fhd.write("<tr>\n")
fhd.write("<th>In Use Cnt</th>\n")
fhd.write("<th>In Use Bytes</th>\n")
fhd.write("<th>In Use Avg Size</th>\n")
fhd.write("<th>Alloc Cnt</th>\n")
fhd.write("<th>Alloc Bytes</th>\n")
fhd.write("<th>Alloc Avg Size</th>\n")
fhd.write("<th>Call Stack</th>\n")
fhd.write("</tr>\n")
sorted_records = sorted(self.records_, key=lambda k: k[sort_metric], reverse=True)
for record in sorted_records:
if (idx == max_call_stacks):
break
idx = idx + 1
functions = []
for addr in record.get("stack"):
fname = self.symbols_[addr]
# If the symbolization didn't happen for some reason,
# print the address itself for the function name.
if fname == "":
fname = addr
functions.append(fname)
fhd.write("<tr>\n")
in_use_count = record.get("in_use_count")
in_use_bytes = record.get("in_use_bytes")
in_use_avg = 0 if in_use_count == 0 else in_use_bytes * 1.0 / in_use_count
alloc_count = record.get("alloc_count")
alloc_bytes = record.get("alloc_bytes")
alloc_avg = 0 if alloc_count == 0 else alloc_bytes * 1.0 / alloc_count
stack = "\n".join(functions)
fhd.write(("<td>%d</td><td>%d</td><td>%.2f</td><td>%d</td>" +
"<td>%d</td><td>%.2f</td><td><pre>%s</pre></td>") %
(in_use_count, in_use_bytes, in_use_avg,
alloc_count, alloc_bytes, alloc_avg,
stack))
fhd.write("</tr>\n")
fhd.write("</table>")
fhd.write("</body>")
fhd.write("</html>\n")
fhd.close()
def parse_heap_file(self, filename):
line_num = 0
with open(filename) as f:
for line in f:
line_num = line_num + 1
if line_num == 1:
self.parse_header_line(line)
elif line == "\n":
continue
elif line == "MAPPED_LIBRARIES:\n":
print("End of stacks...")
break
else:
self.parse_stack_line(line)
self.symbolize_all()
self.print_records("in_use_bytes", self.file_prefix_ + ".in_use_bytes.html")
self.print_records("alloc_bytes", self.file_prefix_ + ".alloc_bytes.html")
def print_usage():
print("Usage:")
print('yb-prof.py --profile_url=<url> --output_file_prefix=<file_prefix> ' +
'--seconds=<time_in_seconds>')
print("**********************")
def main(argv):
profile_url = ''
output_prefix = ''
seconds = '20'
try:
opts, args = getopt.getopt(argv, "h:", ["profile_url=", "output_file_prefix=", "seconds="])
except getopt.GetoptError:
print("Incorrect getopt options")
print_usage()
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
print_usage()
sys.exit(1)
elif opt in ("--profile_url"):
profile_url = arg
elif opt in ("--output_file_prefix"):
output_file_prefix = arg
elif opt in ("--seconds"):
seconds = int(arg)
else:
assert False, "** Unhandled option. **"
if ((profile_url == '') or (output_file_prefix == '')):
print_usage()
sys.exit(1)
print("Profile Base URL:" + profile_url)
print("Output File Prefix:" + output_file_prefix)
heap_prof = YBProf(output_file_prefix, profile_url, seconds)
heap_prof.invoke_heap_profile_handler()
if __name__ == "__main__":
main(sys.argv[1:])
| 37.423868 | 99 | 0.548384 |
7942c97269e99877322e727866750e500fa15e38 | 16,936 | py | Python | ontospy/core/entities.py | lambdamusic/Ontospy | 0b2c06b5aedcc0b755ba17af935558c70a4a03fa | [
"MIT"
] | 134 | 2017-04-09T01:39:07.000Z | 2022-03-31T13:40:05.000Z | ontospy/core/entities.py | lambdamusic/Ontospy | 0b2c06b5aedcc0b755ba17af935558c70a4a03fa | [
"MIT"
] | 76 | 2017-04-14T09:45:42.000Z | 2022-03-11T18:58:59.000Z | ontospy/core/entities.py | lambdamusic/Ontospy | 0b2c06b5aedcc0b755ba17af935558c70a4a03fa | [
"MIT"
] | 39 | 2017-06-04T01:51:42.000Z | 2022-03-08T14:53:09.000Z | # !/usr/bin/env python
# -*- coding: UTF-8 -*-
from __future__ import print_function
from colorama import Fore, Style
import rdflib
from itertools import count
# http://stackoverflow.com/questions/8628123/counting-instances-of-a-class
from . import *
from .utils import *
class RdfEntity(object):
"""
Pythonic representation of an RDF resource - normally not instantiated but used for
inheritance purposes
<triples> : a structure like this:
[(rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/OnlineChatAccount'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#comment'),
rdflib.term.Literal(u'An online chat account.')),
(rdflib.term.URIRef(u'http://xmlns.com/foaf/0.1/OnlineChatAccount'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#subClassOf')]
"""
_ids = count(0)
def __repr__(self):
return "<Ontospy: RdfEntity object for uri *%s*>" % (self.uri)
def __init__(self,
uri,
rdftype=None,
namespaces=None,
ext_model=False,
is_Bnode=False,
pref_title="qname",
pref_lang="en",
):
"""
Init ontology object. Load the graph in memory, then setup all necessary attributes.
2017-07-23
ext_model: flag to mark entities that are instantiated but are not part
of the main model (eg xsd:String range values)
is_Bnode: flag to identify Bnodes
"""
self.id = next(self._ids)
self.uri = uri # rdflib.Uriref
self.locale = inferURILocalSymbol(self.uri)[0]
self.ext_model = ext_model
self.is_Bnode = is_Bnode
self._pref_title = pref_title
self._pref_lang = pref_lang
self.slug = None
self.rdftype = rdftype
self.triples = None
self.rdflib_graph = rdflib.Graph()
self.namespaces = namespaces
self.all_shapes = []
self.qname = self._build_qname()
self.rdftype_qname = self._build_qname(rdftype)
self._children = []
self._parents = []
# self.siblings = []
def rdf_source(self, format="turtle"):
""" xml, n3, turtle, nt, pretty-xml, trix are built in"""
if self.triples:
if not self.rdflib_graph:
self._buildGraph()
s = self.rdflib_graph.serialize(format=format)
if isinstance(s, bytes):
s = s.decode('utf-8')
return s
else:
return None
def printSerialize(self, format="turtle"):
printInfo("\n" + self.rdf_source(format))
def printTriples(self):
""" display triples """
printInfo(Fore.RED + self.uri + Style.RESET_ALL)
for x in self.triples:
printInfo(Fore.BLACK + "=> " + unicode(x[1]))
printInfo(Style.DIM + ".... " + unicode(x[2]) + Fore.RESET)
printInfo("")
def _build_qname(self, uri=None, namespaces=None):
""" extracts a qualified name for a uri """
if not uri:
uri = self.uri
if not namespaces:
namespaces = self.namespaces
return uri2niceString(uri, namespaces)
def _buildGraph(self):
"""
transforms the triples list into a proper rdflib graph
(which can be used later for querying)
"""
for n in self.namespaces:
self.rdflib_graph.bind(n[0], rdflib.Namespace(n[1]))
if self.triples:
for terzetto in self.triples:
self.rdflib_graph.add(terzetto)
# methods added to RdfEntity even though they apply only to some subs
def ancestors(self, cl=None, noduplicates=True):
""" returns all ancestors in the taxonomy """
if not cl:
cl = self
if cl.parents():
bag = []
for x in cl.parents():
if x.uri != cl.uri: # avoid circular relationships
bag += [x] + self.ancestors(x, noduplicates)
else:
bag += [x]
# finally:
if noduplicates:
return remove_duplicates(bag)
else:
return bag
else:
return []
def descendants(self, cl=None, noduplicates=True):
""" returns all descendants in the taxonomy """
if not cl:
cl = self
if cl.children():
bag = []
for x in cl.children():
if x.uri != cl.uri: # avoid circular relationships
bag += [x] + self.descendants(x, noduplicates)
else:
bag += [x]
# finally:
if noduplicates:
return remove_duplicates(bag)
else:
return bag
else:
return []
def parents(self):
"""wrapper around property"""
return self._parents
def children(self):
"""wrapper around property"""
return self._children
def getValuesForProperty(self, aPropURIRef):
"""
generic way to extract some prop value eg
In [11]: c.getValuesForProperty(rdflib.RDF.type)
Out[11]:
[rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#Class'),
rdflib.term.URIRef(u'http://www.w3.org/2000/01/rdf-schema#Class')]
"""
if not type(aPropURIRef) == rdflib.URIRef:
aPropURIRef = rdflib.URIRef(aPropURIRef)
return list(self.rdflib_graph.objects(None, aPropURIRef))
def bestLabel(self, prefLanguage="", qname_allowed=True, quotes=False):
"""
facility for extrating the best available label for an entity
..This checks RFDS.label, SKOS.prefLabel and finally the qname local component
"""
test = self.getValuesForProperty(rdflib.RDFS.label)
out = ""
if not prefLanguage:
prefLanguage = self._pref_lang
if test:
out = firstStringInList(test, prefLanguage)
else:
test = self.getValuesForProperty(rdflib.namespace.SKOS.prefLabel)
if test:
out = firstStringInList(test, prefLanguage)
else:
if qname_allowed:
out = self.locale
if quotes and out:
return addQuotes(out)
else:
return out
def bestDescription(self, prefLanguage="", quotes=False):
"""
facility for extracting a human readable description for an entity
"""
test_preds = [
rdflib.RDFS.comment, rdflib.namespace.DCTERMS.description,
rdflib.namespace.DC.description, rdflib.namespace.SKOS.definition
]
if not prefLanguage:
prefLanguage = self._pref_lang
for pred in test_preds:
test = self.getValuesForProperty(pred)
# printInfo(str(test), "red")
if test:
if quotes:
return addQuotes(joinStringsInList(test, prefLanguage))
else:
return joinStringsInList(test, prefLanguage)
return ""
@property
def title(self):
"""Entity title - used for display purposes only.
Can be set by user once ontospy is created.
Values allowed: 'qname' or 'label'
Defaults to 'qname'.
"""
if self._pref_title == "qname":
out = self.qname
elif self._pref_title == "label":
out = self.bestLabel()
else:
return self.qname
return out
class Ontology(RdfEntity):
"""
Pythonic representation of an OWL ontology
"""
def __repr__(self):
return "<Ontospy: Ontology object for uri *%s*>" % (self.uri)
def __init__(self,
uri,
rdftype=None,
namespaces=None,
pref_prefix="",
ext_model=False,
pref_title="qname",
pref_lang="en",
):
"""
Init ontology object. Load the graph in memory, then setup all necessary attributes.
"""
super().__init__(uri, rdftype, namespaces, ext_model, pref_title=pref_title, pref_lang=pref_lang)
# self.uri = uri # rdflib.Uriref
self.prefix = pref_prefix
self.slug = "ontology-" + slugify(self.qname)
self.all_classes = []
self.all_properties = []
self.all_skos_concepts = []
def annotations(self, qname=True):
"""
wrapper that returns all triples for an onto.
By default resources URIs are transformed into qnames
"""
if qname:
return sorted([(uri2niceString(x, self.namespaces)
), (uri2niceString(y, self.namespaces)), z]
for x, y, z in self.triples)
else:
return sorted(self.triples)
def describe(self):
""" shotcut to pull out useful info for interactive use """
# self.printGenericTree()
# self.printTriples()
printInfo(self.uri, "green")
self.stats()
def stats(self):
""" shotcut to pull out useful info for interactive use """
printInfo("Classes.....: %d" % len(self.all_classes))
printInfo("Properties..: %d" % len(self.all_properties))
class OntoClass(RdfEntity):
"""
Python representation of a generic class within an ontology.
Includes methods for representing and querying RDFS/OWL classes
domain_of_inferred: a list of dict
[{<Class *http://xmlns.com/foaf/0.1/Person*>:
[<Property *http://xmlns.com/foaf/0.1/currentProject*>,<Property *http://xmlns.com/foaf/0.1/familyName*>,
etc....]},
{<Class *http://www.w3.org/2003/01/geo/wgs84_pos#SpatialThing*>:
[<Property *http://xmlns.com/foaf/0.1/based_near*>, etc...]},
]
"""
def __init__(self, uri, rdftype=None, namespaces=None,
ext_model=False, pref_title="qname", pref_lang="en"):
"""
...
"""
super().__init__(uri, rdftype, namespaces, ext_model,
pref_title=pref_title, pref_lang=pref_lang)
self.slug = "class-" + slugify(self.qname)
self.domain_of = []
self.range_of = []
self.domain_of_inferred = []
self.range_of_inferred = []
self.ontology = None
self._instances = False # calc on demand at runtime
self.sparqlHelper = None # the original graph the class derives from
self.shapedProperties = [
] #properties of this class that belong to a shape
def __repr__(self):
return "<Class *%s*>" % (self.uri)
@property
def instances(self): # = all instances
if self._instances == False:
# calculate and set
self._instances = []
if self.sparqlHelper:
qres = self.sparqlHelper.getClassInstances(self.uri)
for uri in [x[0] for x in qres]:
instance = RdfEntity(uri, self.uri, self.namespaces)
instance.triples = self.sparqlHelper.entityTriples(
instance.uri)
instance._buildGraph() # force construction of mini graph
self._instances += [instance]
return self._instances
else:
# it's been calc already, hence return
return self._instances
def count(self):
return len(self.instances)
def printStats(self):
""" shortcut to pull out useful info for interactive use """
printInfo("----------------")
printInfo("Parents......: %d" % len(self.parents()))
printInfo("Children.....: %d" % len(self.children()))
printInfo("Ancestors....: %d" % len(self.ancestors()))
printInfo("Descendants..: %d" % len(self.descendants()))
printInfo("Domain of....: %d" % len(self.domain_of))
printInfo("Range of.....: %d" % len(self.range_of))
printInfo("Instances....: %d" % self.count())
printInfo("----------------")
def printGenericTree(self):
printGenericTree(self)
def describe(self):
""" shotcut to pull out useful info for interactive use """
# self.printTriples()
printInfo(self.uri, "green")
self.printStats()
# self.printGenericTree()
class OntoProperty(RdfEntity):
"""
Python representation of a generic RDF/OWL property.
rdftype is one of:
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#ObjectProperty')
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#DatatypeProperty')
rdflib.term.URIRef(u'http://www.w3.org/2002/07/owl#AnnotationProperty')
rdflib.term.URIRef(u'http://www.w3.org/1999/02/22-rdf-syntax-ns#Property')
"""
def __init__(self, uri, rdftype=None, namespaces=None, ext_model=False,
pref_title="qname", pref_lang="en"):
"""
...
"""
super().__init__(uri, rdftype, namespaces, ext_model, pref_title=pref_title, pref_lang=pref_lang)
self.slug = "prop-" + slugify(self.qname)
self.rdftype = inferMainPropertyType(rdftype)
self.domains = []
self.ranges = []
self.ontology = None
def __repr__(self):
return "<Property *%s*>" % (self.uri)
def printGenericTree(self):
printGenericTree(self)
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printInfo("----------------")
printInfo("Parents......: %d" % len(self.parents()))
printInfo("Children.....: %d" % len(self.children()))
printInfo("Ancestors....: %d" % len(self.ancestors()))
printInfo("Descendants..: %d" % len(self.descendants()))
printInfo("Has Domain...: %d" % len(self.domains))
printInfo("Has Range....: %d" % len(self.ranges))
printInfo("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
# self.printTriples()
printInfo(self.uri, "green")
self.printStats()
# self.printGenericTree()
class OntoSKOSConcept(RdfEntity):
"""
Python representation of a generic SKOS concept within an ontology.
@todo: complete methods..
"""
def __init__(self, uri, rdftype=None, namespaces=None, ext_model=False, pref_title="qname", pref_lang="en"):
"""
...
"""
super().__init__(uri, rdftype, namespaces,
ext_model, pref_title=pref_title, pref_lang=pref_lang)
self.slug = "concept-" + slugify(self.qname)
self.instance_of = []
self.ontology = None
self.sparqlHelper = None # the original graph the class derives from
def __repr__(self):
return "<SKOS Concept *%s*>" % (self.uri)
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printInfo("----------------")
printInfo("Parents......: %d" % len(self.parents()))
printInfo("Children.....: %d" % len(self.children()))
printInfo("Ancestors....: %d" % len(self.ancestors()))
printInfo("Descendants..: %d" % len(self.descendants()))
printInfo("----------------")
def printGenericTree(self):
printGenericTree(self)
def describe(self):
""" shotcut to pull out useful info for interactive use """
# self.printTriples()
printInfo(self.uri, "green")
self.printStats()
self.printGenericTree()
class OntoShape(RdfEntity):
"""
Python representation of a SHACL shape.
"""
def __init__(self, uri, rdftype=None, namespaces=None, ext_model=False, pref_title="qname", pref_lang="en"):
"""
...
"""
super().__init__(uri, rdftype, namespaces, ext_model, pref_title=pref_title, pref_lang=pref_lang)
self.slug = "shape-" + slugify(self.qname)
self.ontology = None
self.targetClasses = []
self.sparqlHelper = None # the original graph the class derives from
def __repr__(self):
return "<SHACL shape *%s*>" % (self.uri)
def printStats(self):
""" shotcut to pull out useful info for interactive use """
printInfo("----------------")
printInfo("Parents......: %d" % len(self.parents()))
printInfo("Children.....: %d" % len(self.children()))
printInfo("Ancestors....: %d" % len(self.ancestors()))
printInfo("Descendants..: %d" % len(self.descendants()))
printInfo("----------------")
def describe(self):
""" shotcut to pull out useful info for interactive use """
# self.printTriples()
self.printStats()
| 33.404339 | 117 | 0.564478 |
7942caf3031338dcf1da4a72b4cce99e8dafa940 | 5,443 | py | Python | alipay/aop/api/domain/RoyaltyDetailInfos.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/RoyaltyDetailInfos.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/RoyaltyDetailInfos.py | articuly/alipay-sdk-python-all | 0259cd28eca0f219b97dac7f41c2458441d5e7a6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import simplejson as json
from alipay.aop.api.constant.ParamConstants import *
class RoyaltyDetailInfos(object):
def __init__(self):
self._amount = None
self._amount_percentage = None
self._batch_no = None
self._desc = None
self._out_relation_id = None
self._serial_no = None
self._trans_in = None
self._trans_in_type = None
self._trans_out = None
self._trans_out_type = None
@property
def amount(self):
return self._amount
@amount.setter
def amount(self, value):
self._amount = value
@property
def amount_percentage(self):
return self._amount_percentage
@amount_percentage.setter
def amount_percentage(self, value):
self._amount_percentage = value
@property
def batch_no(self):
return self._batch_no
@batch_no.setter
def batch_no(self, value):
self._batch_no = value
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
self._desc = value
@property
def out_relation_id(self):
return self._out_relation_id
@out_relation_id.setter
def out_relation_id(self, value):
self._out_relation_id = value
@property
def serial_no(self):
return self._serial_no
@serial_no.setter
def serial_no(self, value):
self._serial_no = value
@property
def trans_in(self):
return self._trans_in
@trans_in.setter
def trans_in(self, value):
self._trans_in = value
@property
def trans_in_type(self):
return self._trans_in_type
@trans_in_type.setter
def trans_in_type(self, value):
self._trans_in_type = value
@property
def trans_out(self):
return self._trans_out
@trans_out.setter
def trans_out(self, value):
self._trans_out = value
@property
def trans_out_type(self):
return self._trans_out_type
@trans_out_type.setter
def trans_out_type(self, value):
self._trans_out_type = value
def to_alipay_dict(self):
params = dict()
if self.amount:
if hasattr(self.amount, 'to_alipay_dict'):
params['amount'] = self.amount.to_alipay_dict()
else:
params['amount'] = self.amount
if self.amount_percentage:
if hasattr(self.amount_percentage, 'to_alipay_dict'):
params['amount_percentage'] = self.amount_percentage.to_alipay_dict()
else:
params['amount_percentage'] = self.amount_percentage
if self.batch_no:
if hasattr(self.batch_no, 'to_alipay_dict'):
params['batch_no'] = self.batch_no.to_alipay_dict()
else:
params['batch_no'] = self.batch_no
if self.desc:
if hasattr(self.desc, 'to_alipay_dict'):
params['desc'] = self.desc.to_alipay_dict()
else:
params['desc'] = self.desc
if self.out_relation_id:
if hasattr(self.out_relation_id, 'to_alipay_dict'):
params['out_relation_id'] = self.out_relation_id.to_alipay_dict()
else:
params['out_relation_id'] = self.out_relation_id
if self.serial_no:
if hasattr(self.serial_no, 'to_alipay_dict'):
params['serial_no'] = self.serial_no.to_alipay_dict()
else:
params['serial_no'] = self.serial_no
if self.trans_in:
if hasattr(self.trans_in, 'to_alipay_dict'):
params['trans_in'] = self.trans_in.to_alipay_dict()
else:
params['trans_in'] = self.trans_in
if self.trans_in_type:
if hasattr(self.trans_in_type, 'to_alipay_dict'):
params['trans_in_type'] = self.trans_in_type.to_alipay_dict()
else:
params['trans_in_type'] = self.trans_in_type
if self.trans_out:
if hasattr(self.trans_out, 'to_alipay_dict'):
params['trans_out'] = self.trans_out.to_alipay_dict()
else:
params['trans_out'] = self.trans_out
if self.trans_out_type:
if hasattr(self.trans_out_type, 'to_alipay_dict'):
params['trans_out_type'] = self.trans_out_type.to_alipay_dict()
else:
params['trans_out_type'] = self.trans_out_type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = RoyaltyDetailInfos()
if 'amount' in d:
o.amount = d['amount']
if 'amount_percentage' in d:
o.amount_percentage = d['amount_percentage']
if 'batch_no' in d:
o.batch_no = d['batch_no']
if 'desc' in d:
o.desc = d['desc']
if 'out_relation_id' in d:
o.out_relation_id = d['out_relation_id']
if 'serial_no' in d:
o.serial_no = d['serial_no']
if 'trans_in' in d:
o.trans_in = d['trans_in']
if 'trans_in_type' in d:
o.trans_in_type = d['trans_in_type']
if 'trans_out' in d:
o.trans_out = d['trans_out']
if 'trans_out_type' in d:
o.trans_out_type = d['trans_out_type']
return o
| 30.926136 | 85 | 0.589197 |
7942cc0241fe32295029812ef9568b7c57e7bb80 | 2,838 | py | Python | hasher-matcher-actioner/scripts/submitter.py | ekmixon/ThreatExchange | 947737affbdfabc67bb12021366683895c817de8 | [
"BSD-3-Clause"
] | null | null | null | hasher-matcher-actioner/scripts/submitter.py | ekmixon/ThreatExchange | 947737affbdfabc67bb12021366683895c817de8 | [
"BSD-3-Clause"
] | null | null | null | hasher-matcher-actioner/scripts/submitter.py | ekmixon/ThreatExchange | 947737affbdfabc67bb12021366683895c817de8 | [
"BSD-3-Clause"
] | null | null | null | #! /usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import time
import threading
import uuid
import datetime
import typing as t
from hma_client_lib import DeployedInstanceClient
class Submitter(threading.Thread):
def __init__(
self,
client: DeployedInstanceClient,
batch_size: int,
seconds_between_batches: int,
filepaths: t.List[str] = [],
**kwargs,
):
super(Submitter, self).__init__(**kwargs)
self.daemon = True
self._stop_signal = threading.Event()
self._lock = threading.Lock()
self.client = client
self.batch_size = batch_size
self.seconds_between_batches = seconds_between_batches
self.filepaths = filepaths
self.total_submitted = 0
def stop(self):
self._stop_signal.set()
def stopped(self):
return self._stop_signal.is_set()
def run(self):
while not self.stopped() and self._lock.acquire():
if self.stopped():
self._lock.release()
return
try:
batch_prefix = f"soak-test-{str(uuid.uuid4())}"
for i in range(self.batch_size):
content_id = f"{batch_prefix}{i}-time-{datetime.datetime.now().isoformat()}-time-"
if self.filepaths:
self.client.submit_test_content(
content_id, filepath=self.filepaths[i % len(self.filepaths)]
)
else:
self.client.submit_test_content(content_id)
self.total_submitted += 1
finally:
self._lock.release()
time.sleep(self.seconds_between_batches)
def get_total_submit_count(self) -> int:
with self._lock:
return self.total_submitted
def get_current_values(self) -> t.Tuple[int, int, int]:
with self._lock:
return (self.batch_size, self.seconds_between_batches, self.total_submitted)
def set_batch_size(self, batch_size: int):
with self._lock:
self.batch_size = batch_size
def set_seconds_between_batches(self, seconds_between_batches: int):
with self._lock:
self.seconds_between_batches = seconds_between_batches
if __name__ == "__main__":
API_URL = ""
TOKEN = ""
api_url = os.environ.get(
"HMA_API_URL",
API_URL,
)
token = os.environ.get(
"HMA_TOKEN",
TOKEN,
)
client = DeployedInstanceClient(api_url, token)
submitter = Submitter(client, batch_size=5, seconds_between_batches=5)
submitter.start()
cmd = ""
while cmd != "q":
cmd = input("Enter 'q' to shutdown: ")
submitter.stop()
| 28.959184 | 102 | 0.593728 |
7942cc4b50013f1c47123d5ebadedf0900d8aabc | 1,915 | py | Python | data/train/python/7942cc4b50013f1c47123d5ebadedf0900d8aabcControllerManager.py | harshp8l/deep-learning-lang-detection | 2a54293181c1c2b1a2b840ddee4d4d80177efb33 | [
"MIT"
] | 84 | 2017-10-25T15:49:21.000Z | 2021-11-28T21:25:54.000Z | data/train/python/7942cc4b50013f1c47123d5ebadedf0900d8aabcControllerManager.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 5 | 2018-03-29T11:50:46.000Z | 2021-04-26T13:33:18.000Z | data/train/python/7942cc4b50013f1c47123d5ebadedf0900d8aabcControllerManager.py | vassalos/deep-learning-lang-detection | cbb00b3e81bed3a64553f9c6aa6138b2511e544e | [
"MIT"
] | 24 | 2017-11-22T08:31:00.000Z | 2022-03-27T01:22:31.000Z |
import os
import sys
import ControllerBase
import Settings
gControllerMan = False
def GetControllerManager():
global gControllerMan
if gControllerMan == False:
gControllerMan = ControllerManager()
return gControllerMan
class ControllerManager:
modules = {}
controllers = {}
def __init__(self):
self.ImportAvailableController()
def ImportAvailableController(self):
controllersPath = Settings.Settings["PATH"] + "/Controllers"
sys.path.append(controllersPath)
for controllerName in os.listdir(controllersPath):
if controllerName[0] != '.' and controllerName.endswith(".py"):
controllerName = controllerName[:controllerName.find(".")]
self.modules[controllerName] = __import__(controllerName)
print "Loaded controller: " + controllerName
print ""
def StartConfiguredControllers(self):
configPath = Settings.Settings["PATH"] + "/Config"
sys.path.append(configPath)
for configFile in os.listdir(configPath):
if configFile[0] != '.' and configFile.endswith(".controller"):
name = configFile[:configFile.find(".")]
settingsFile = open(configPath + "/" + configFile, 'r')
lines = settingsFile.readlines()
settingsFile.close()
settings = {}
for line in lines:
line = line.strip("\n").strip(" ")
if len(line) > 0:
if line[0] != '#':
settingName = line[:line.find("=")]
settingValue = line[line.find("=")+1:]
settings[settingName] = settingValue
if not settings.has_key("CONTROLLER"):
print configFile + " has no CONTROLLER defined, skipping"
elif not self.modules.has_key(settings["CONTROLLER"]):
print settings["CONTROLLER"] + " is not a recognized controller name, skipping"
else:
self.controllers[name] = self.modules[settings["CONTROLLER"]].Controller(settings)
print "Started controller: " + settings["CONTROLLER"] + " with Config/" + configFile
print ""
| 28.58209 | 89 | 0.692428 |
7942cc86a5514339b4f0a555add7b02d3648f827 | 4,856 | py | Python | Bot/src/tools/emoji.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 181 | 2021-05-26T17:37:40.000Z | 2022-02-26T08:36:07.000Z | Bot/src/tools/emoji.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 24 | 2021-05-14T19:47:34.000Z | 2021-09-06T17:16:17.000Z | Bot/src/tools/emoji.py | AryamanSrii/Mecha-Karen | 4a5c7318f8c458495eee72a13be5db8a0113ed28 | [
"Apache-2.0"
] | 16 | 2021-07-02T09:40:56.000Z | 2022-01-21T10:07:08.000Z | # !/usr/bin/python
"""
Copyright ©️: 2020 Seniatical / _-*™#7519
License: Apache 2.0
A permissive license whose main conditions require preservation of copyright and license notices.
Contributors provide an express grant of patent rights.
Licensed works, modifications, and larger works may be distributed under different terms and without source code.
FULL LICENSE CAN BE FOUND AT:
https://www.apache.org/licenses/LICENSE-2.0.html
Any violation to the license, will result in moderate action
You are legally required to mention (original author, license, source and any changes made)
"""
import discord
from discord.ext import commands
import aiohttp
from io import BytesIO
import datetime
class Emoji(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
async def enlarge(self, ctx, emoji: str):
try:
_id = emoji.split(':')[-1][:-1]
except IndexError:
return await ctx.send('Invalid emoji provided')
endpoint = 'https://cdn.discordapp.com/emojis/'
try:
endpoint += '{}.{}?v=1'.format(_id, 'png' if emoji[1].lower() != 'a' else 'gif')
except IndexError:
return await ctx.send('Invalid emoji provided')
try:
embed = discord.Embed(title=emoji.split(':')[-2], url=endpoint, colour=discord.Colour.green())
except IndexError:
return await ctx.send('Invalid emoji provided')
embed.set_image(url=endpoint)
await ctx.send(embed=embed)
@commands.command()
@commands.cooldown(1, 10, commands.BucketType.user)
@commands.bot_has_guild_permissions(manage_emojis=True)
@commands.has_guild_permissions(manage_emojis=True)
async def steal(self, ctx, emoji: str = None, *, name: str = None):
message = ctx.message
if len(ctx.guild.emojis) == ctx.guild.emoji_limit:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> You have reached the maximum allowance for emojis!', colour=discord.Colour.red()))
if not message.attachments and not emoji:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> Need to give an emoji to add', colour=discord.Colour.red()))
if message.attachments:
if not emoji:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> Need to give a name for your emoji', colour=discord.Colour.red()))
url = message.attachments[0].url
try:
data = await (await self.session.get(url)).read()
except Exception:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> Invalid attachment provided', colour=discord.Colour.red()))
try:
cxn = await ctx.guild.create_custom_emoji(name=emoji[:32], image=data, reason=f'{ctx.author} has used the steal command')
except Exception:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> Image was too large. Maximum filesize for emojis is 256.0 kb!', colour=discord.Colour.red()))
else:
try:
_id = int(emoji.split(':')[-1][:-1])
endpoint = 'https://cdn.discordapp.com/emojis/'
endpoint += '{}.{}?v=1'.format(_id, 'png' if emoji[1].lower() != 'a' else 'gif')
name = name or emoji.split(':')[-2]
data = await (await self.session.get(endpoint)).read()
except ValueError:
try:
data = await (await self.session.get(emoji)).read()
except Exception:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> Invalid Image URL provided', colour=discord.Colour.red()))
if not name:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> Need to provide a name for your emoji', colour=discord.Colour.red()))
try:
cxn = await ctx.guild.create_custom_emoji(name=name[:32].replace(' ', '_'), image=data, reason=f'{ctx.author} has used the steal command')
except Exception:
return await ctx.send(embed=discord.Embed(description='<a:nope:787764352387776523> Image was too large. Maximum filesize for emojis is 256.0 kb!', colour=discord.Colour.red()))
await ctx.send(embed=discord.Embed(description=f'{cxn} | New [emoji]({cxn.url}) created using the name `:{cxn.name}:`', colour=discord.Colour.green()))
def setup(bot):
bot.add_cog(Emoji(bot))
| 50.061856 | 192 | 0.636944 |
7942cce3e667098fe17f991ebe432a46ace2c61e | 18,326 | py | Python | research/vrgripper/vrgripper_env_models.py | IronOnet/tensor2robot | 351cecbf76b71d09b56a766b981e1a15f85d9528 | [
"Apache-2.0"
] | 2 | 2021-10-31T01:06:00.000Z | 2021-11-08T09:43:25.000Z | research/vrgripper/vrgripper_env_models.py | IronOnet/tensor2robot | 351cecbf76b71d09b56a766b981e1a15f85d9528 | [
"Apache-2.0"
] | null | null | null | research/vrgripper/vrgripper_env_models.py | IronOnet/tensor2robot | 351cecbf76b71d09b56a766b981e1a15f85d9528 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2019 The Tensor2Robot Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""T2RModels for VRGripper env tasks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
import numpy as np
from tensor2robot.google import distortion
from tensor2robot.layers import mdn
from tensor2robot.layers import vision_layers
from tensor2robot.meta_learning import meta_tfdata
from tensor2robot.models import abstract_model
from tensor2robot.models import regression_model
from tensor2robot.preprocessors import abstract_preprocessor
from tensor2robot.utils import tensorspec_utils
import tensorflow.compat.v1 as tf # tf
import tensorflow_probability as tfp
from typing import Callable, Dict, List, Optional, Text, Tuple
from tensorflow.contrib import layers as contrib_layers
TensorSpec = tensorspec_utils.ExtendedTensorSpec
TRAIN = tf.estimator.ModeKeys.TRAIN
PREDICT = tf.estimator.ModeKeys.PREDICT
FLOAT_DTYPES = [tf.bfloat16, tf.float32, tf.float64]
@gin.configurable
class DefaultVRGripperPreprocessor(abstract_preprocessor.AbstractPreprocessor):
"""The default VRGripperEnv preprocessor."""
def __init__(self,
src_img_res = (220, 300),
crop_size = (200, 280),
mixup_alpha = 0.0,
**kwargs):
"""Construct the preprocessor.
Args:
src_img_res: The true height and width of the image data. If the model
expects images of a different size, we automatically resize the images.
crop_size: Before resizing the image, take a crop of the image to this
height and width. Is a no-op if equal to src_img_res. Crop is done
randomly at train time, and is take from the center otherwise.
mixup_alpha: If > 0., turns on Mixup data augmentation for features and
labels.
**kwargs: Keyword args passed to parent class.
"""
super(DefaultVRGripperPreprocessor, self).__init__(**kwargs)
self._src_img_res = src_img_res
self._crop_size = crop_size
self._mixup_alpha = mixup_alpha
def get_in_feature_specification(self, mode
):
"""See base class."""
feature_spec = tensorspec_utils.copy_tensorspec(
self._model_feature_specification_fn(mode))
# Don't want to parse the original_image, since we don't want to parse it
# and we are adding this feature in preprocess_fn to satisfy the model's
# inputs.
if mode != PREDICT and 'original_image' in feature_spec:
del feature_spec['original_image']
if 'image' in feature_spec:
true_img_shape = feature_spec.image.shape.as_list()
# Overwrite the H, W dimensions.
true_img_shape[-3:-1] = self._src_img_res
feature_spec.image = TensorSpec.from_spec(
feature_spec.image, shape=true_img_shape, dtype=tf.uint8)
return tensorspec_utils.flatten_spec_structure(feature_spec)
def get_in_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def get_out_feature_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_feature_specification_fn(mode))
def get_out_label_specification(self, mode
):
"""See base class."""
return tensorspec_utils.flatten_spec_structure(
self._model_label_specification_fn(mode))
def _preprocess_fn(
self, features,
labels,
mode
):
"""Resize images and convert them from uint8 -> float32."""
if 'image' in features:
ndim = len(features.image.shape)
is_sequence = (ndim > 4)
input_size = self._src_img_res
target_size = self._crop_size
features.original_image = features.image
features.image = distortion.preprocess_image(features.image, mode,
is_sequence, input_size,
target_size)
features.image = tf.image.convert_image_dtype(features.image, tf.float32)
out_feature_spec = self.get_out_feature_specification(mode)
if out_feature_spec.image.shape != features.image.shape:
features.image = meta_tfdata.multi_batch_apply(
tf.image.resize_images, 2, features.image,
out_feature_spec.image.shape.as_list()[-3:-1])
if self._mixup_alpha > 0. and labels and mode == TRAIN:
lmbda = tfp.distributions.Beta(
self._mixup_alpha, self._mixup_alpha).sample()
for key, x in features.items():
if x.dtype in FLOAT_DTYPES:
features[key] = lmbda * x + (1-lmbda)*tf.reverse(x, axis=[0])
if labels is not None:
for key, x in labels.items():
if x.dtype in FLOAT_DTYPES:
labels[key] = lmbda * x + (1 - lmbda) * tf.reverse(x, axis=[0])
return features, labels
@gin.configurable
class VRGripperRegressionModel(regression_model.RegressionModel):
"""Continuous regression output model for VRGripper Env."""
def __init__(self,
use_gripper_input = True,
normalize_outputs = False,
output_mean = None,
output_stddev = None,
outer_loss_multiplier = 1.,
num_mixture_components = 1,
output_mixture_sample = False,
condition_mixture_stddev = False,
episode_length = 40,
**kwargs):
"""Initialize the VRGripperRegressionModel.
Args:
use_gripper_input: If True, concatenate gripper pose with input to the
fully connected layers when predicting actions.
normalize_outputs: If True, scale actions by `output_stddev` and
translate by `output_mean`.
output_mean: The empirical mean of demonstration actions.
output_stddev: The empirical standard deviation of demonstration actions.
outer_loss_multiplier: A scaling factor for the outer loss.
num_mixture_components: The number of gaussian mixture components. Use 1
for standard mean squared error regression.
output_mixture_sample: If True (and num_mixture_components > 1), output
actions by sampling from a gaussian mixture. Otherwise, we use the mean
of the most likely component.
condition_mixture_stddev: If True, the mixture standard deviations will be
output from a neural net and thus conditioned on image/state. Otherwise,
they will simply be learned variables (unconditioned on image/state).
episode_length: The fixed length of an episode in the data.
**kwargs: Passed to parent.
Raises:
ValueError: If `output_mean` or `output_stddev` have incorrect length.
"""
super(VRGripperRegressionModel, self).__init__(**kwargs)
self._use_gripper_input = use_gripper_input
self._normalize_outputs = normalize_outputs
self._output_mean = None
self._output_stddev = None
self._outer_loss_multiplier = outer_loss_multiplier
self._num_mixture_components = num_mixture_components
self._output_mixture_sample = output_mixture_sample
self._condition_mixture_stddev = condition_mixture_stddev
self._episode_length = episode_length
if output_mean and output_stddev:
if not len(output_mean) == len(output_stddev) == self.action_size:
raise ValueError(
'Output mean and stddev have lengths {:d} and {:d}.'.format(
len(output_mean), len(output_stddev)))
self._output_mean = np.array([output_mean])
self._output_stddev = np.array([output_stddev])
@property
def default_preprocessor_cls(self):
return DefaultVRGripperPreprocessor
def get_feature_specification(self, mode):
del mode
image_spec = TensorSpec(
shape=(100, 100, 3),
dtype=tf.float32,
name='image0',
data_format='jpeg')
gripper_pose_spec = TensorSpec(
shape=(14,), dtype=tf.float32, name='world_pose_gripper')
tspec = tensorspec_utils.TensorSpecStruct(
image=image_spec, gripper_pose=gripper_pose_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
def get_label_specification(self, mode):
del mode
action_spec = TensorSpec(
shape=(self._action_size,), dtype=tf.float32, name='action_world')
tspec = tensorspec_utils.TensorSpecStruct(action=action_spec)
return tensorspec_utils.copy_tensorspec(
tspec, batch_size=self._episode_length)
@property
def action_size(self):
return self._action_size
def _single_batch_a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE):
"""A state -> action regression function that expects a single batch dim."""
gripper_pose = features.gripper_pose if self._use_gripper_input else None
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
fc_input = tf.concat([feature_points, gripper_pose], -1)
outputs = {}
if self._num_mixture_components > 1:
dist_params = mdn.predict_mdn_params(
fc_input,
self._num_mixture_components,
self._action_size,
condition_sigmas=self._condition_mixture_stddev)
gm = mdn.get_mixture_distribution(
dist_params, self._num_mixture_components, self._action_size,
self._output_mean if self._normalize_outputs else None)
if self._output_mixture_sample:
# Output a mixture sample as action.
action = gm.sample()
else:
action = mdn.gaussian_mixture_approximate_mode(gm)
outputs['dist_params'] = dist_params
else:
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
fc_input, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
outputs.update({
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax']
})
return outputs
def a_func(self,
features,
scope,
mode,
context_fn=None,
reuse=tf.AUTO_REUSE,
config=None,
params=None):
"""A (state) regression function.
This function can return a stochastic or a deterministic tensor.
Args:
features: This is the first item returned from the input_fn and parsed by
tensorspec_utils.validate_and_pack. A spec_structure which fulfills the
requirements of the self.get_feature_spefication.
scope: String specifying variable scope.
mode: (ModeKeys) Specifies if this is training, evaluation or prediction.
context_fn: Optional python function that takes in features and returns
new features of same shape. For merging information like in RL^2.
reuse: Whether or not to reuse variables under variable scope 'scope'.
config: Optional configuration object. Will receive what is passed to
Estimator in config parameter, or the default config. Allows updating
things in your model_fn based on configuration such as num_ps_replicas,
or model_dir.
params: An optional dict of hyper parameters that will be passed into
input_fn and model_fn. Keys are names of parameters, values are basic
python types. There are reserved keys for TPUEstimator, including
'batch_size'.
Returns:
outputs: A {key: Tensor} mapping. The key 'action' is required.
"""
del config, params
return meta_tfdata.multi_batch_apply(self._single_batch_a_func, 2, features,
scope, mode, context_fn, reuse)
def loss_fn(self, labels, inference_outputs, mode, params=None):
"""This implements outer loss and configurable inner losses."""
if params and params.get('is_outer_loss', False):
pass
if self._num_mixture_components > 1:
gm = mdn.get_mixture_distribution(
inference_outputs['dist_params'], self._num_mixture_components,
self._action_size,
self._output_mean if self._normalize_outputs else None)
return -tf.reduce_mean(gm.log_prob(labels.action))
else:
return self._outer_loss_multiplier * tf.losses.mean_squared_error(
labels=labels.action,
predictions=inference_outputs['inference_output'])
@gin.configurable
class VRGripperDomainAdaptiveModel(VRGripperRegressionModel):
"""Base model which uses a learned loss to do domain adaptive imitation.
The model conditions on video only (no actions or gripper pose).
"""
def __init__(self,
predict_con_gripper_pose = False,
learned_loss_conv1d_layers = (10, 10,
6),
**kwargs):
"""Initialize the model.
Args:
predict_con_gripper_pose: If True, predict the condition gripper pose
input from the image features. Otherwise, set to zeros.
learned_loss_conv1d_layers: A tuple describing the conv1d layers of the
learned loss. If None, the learned loss won't use conv1d layers.
**kwargs: Passed to parent.
"""
super(VRGripperDomainAdaptiveModel, self).__init__(**kwargs)
self._predict_con_gripper_pose = predict_con_gripper_pose
self._learned_loss_conv1d_layers = learned_loss_conv1d_layers
def _predict_gripper_pose(self, feature_points):
"""Predict the condition gripper pose from feature points."""
out = feature_points
out = tf.layers.dense(out, 40, activation=tf.nn.relu, use_bias=False)
out = contrib_layers.layer_norm(out)
out = tf.layers.dense(out, 14, activation=None)
return out
def single_batch_a_func(
self, features, scope,
mode,
context_fn, reuse,
config,
params):
"""Single step action predictor when there is a single batch dim."""
del config
with tf.variable_scope(scope, reuse=reuse, use_resource=True):
with tf.variable_scope('state_features', reuse=reuse, use_resource=True):
feature_points, end_points = vision_layers.BuildImagesToFeaturesModel(
features.image,
is_training=(mode == TRAIN),
normalizer_fn=contrib_layers.layer_norm)
if context_fn:
feature_points = context_fn(feature_points)
if params and params.get('is_inner_loop', False):
if self._predict_con_gripper_pose:
gripper_pose = self._predict_gripper_pose(feature_points)
else:
gripper_pose = tf.zeros_like(features.gripper_pose)
else:
gripper_pose = features.gripper_pose
action, _ = vision_layers.BuildImageFeaturesToPoseModel(
feature_points, aux_input=gripper_pose, num_outputs=self._action_size)
action = self._output_mean + self._output_stddev * action
return {
'inference_output': action,
'image': features.image,
'feature_points': feature_points,
'softmax': end_points['softmax'],
}
def a_func(self,
features,
scope,
mode,
context_fn = None,
reuse=tf.AUTO_REUSE,
config = None,
params = None
):
"""Single step action predictor. See parent class."""
return meta_tfdata.multi_batch_apply(self.single_batch_a_func, 2, features,
scope, mode, context_fn, reuse, config,
params)
def model_train_fn(self,
features,
labels,
inference_outputs,
mode,
config = None,
params = None
):
"""Output learned loss if inner loop, or behavior clone if outer loop."""
if params and params.get('is_outer_loss', False):
# Outer loss case: use standard RegressionModel loss.
return self.loss_fn(labels, inference_outputs, mode, params)
# Inner loss case: compute learned loss function.
with tf.variable_scope(
'learned_loss', reuse=tf.AUTO_REUSE, use_resource=True):
predicted_action, _ = meta_tfdata.multi_batch_apply(
vision_layers.BuildImageFeaturesToPoseModel,
2,
inference_outputs['feature_points'],
num_outputs=self._action_size)
if self._learned_loss_conv1d_layers is None:
return tf.losses.mean_squared_error(predicted_action,
inference_outputs['action'])
ll_input = tf.concat([
predicted_action, inference_outputs['feature_points'],
inference_outputs['inference_output']
], -1)
net = ll_input
for num_filters in self._learned_loss_conv1d_layers[:-1]:
net = tf.layers.conv1d(
net, num_filters, 10, activation=tf.nn.relu, use_bias=False)
net = contrib_layers.layer_norm(net)
net = tf.layers.conv1d(net, self._learned_loss_conv1d_layers[-1],
1) # 1x1 convolution.
return tf.reduce_mean(tf.reduce_sum(tf.square(net), axis=(1, 2)))
| 40.815145 | 80 | 0.669213 |
7942cd64f350b473262cdcaf73d3170ccc72bcf4 | 147 | py | Python | vote/apps.py | jjmutumi/djangotutorial | e0855f229cecea9413c3306e71782d6885eb02f7 | [
"MIT"
] | null | null | null | vote/apps.py | jjmutumi/djangotutorial | e0855f229cecea9413c3306e71782d6885eb02f7 | [
"MIT"
] | null | null | null | vote/apps.py | jjmutumi/djangotutorial | e0855f229cecea9413c3306e71782d6885eb02f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.apps import AppConfig
class VoteConfig(AppConfig):
name = 'vote'
| 18.375 | 39 | 0.727891 |
7942ce81f4808492e8b56a0975055d4b55dbe328 | 4,995 | py | Python | bokeh/sphinxext/bokeh_gallery.py | RiccardoGiro/bokeh | 7e1bfeea4f3d7a6296aabfeec96e79e1f5a28467 | [
"BSD-3-Clause"
] | 1 | 2020-07-02T06:06:18.000Z | 2020-07-02T06:06:18.000Z | bokeh/sphinxext/bokeh_gallery.py | Deng-Fankang/bokeh | 894731860c53b7c9ddd0057dee85cf064278dc0e | [
"BSD-3-Clause"
] | 12 | 2020-08-26T20:19:29.000Z | 2020-08-26T20:19:52.000Z | bokeh/sphinxext/bokeh_gallery.py | Deng-Fankang/bokeh | 894731860c53b7c9ddd0057dee85cf064278dc0e | [
"BSD-3-Clause"
] | 1 | 2020-03-06T07:38:50.000Z | 2020-03-06T07:38:50.000Z | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
""" Generate a gallery of Bokeh plots from a configuration file.
"""
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
import json
import os
from os.path import abspath, dirname, exists, getmtime, isdir, isfile, join
# External imports
from sphinx.errors import SphinxError
from sphinx.util import ensuredir, status_iterator
# Bokeh imports
from .bokeh_directive import BokehDirective
from .templates import GALLERY_DETAIL, GALLERY_PAGE
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'BokehGalleryDirective',
'setup',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class BokehGalleryDirective(BokehDirective):
has_content = False
required_arguments = 1
def run(self):
env = self.state.document.settings.env
docdir = dirname(env.doc2path(env.docname))
gallery_file = join(docdir, self.arguments[0])
gallery_dir = join(dirname(dirname(gallery_file)), "gallery")
if not exists(gallery_dir) and isdir(gallery_dir):
raise SphinxError("gallery dir %r missing for gallery file %r" % (gallery_dir, gallery_file))
spec = json.load(open(gallery_file))
names = [detail['name']for detail in spec['details']]
rst_text = GALLERY_PAGE.render(names=names)
return self._parse(rst_text, "<bokeh-gallery>")
def config_inited_handler(app, config):
gallery_dir = join(app.srcdir, config.bokeh_gallery_dir)
gallery_file = gallery_dir + ".json"
if not exists(gallery_file) and isfile(gallery_file):
raise SphinxError("could not find gallery file %r for configured gallery dir %r" % (gallery_file, gallery_dir))
gallery_file_mtime = getmtime(gallery_file)
ensuredir(gallery_dir)
# we will remove each file we process from this set and see if anything is
# left at the end (and remove it in that case)
extras = set(os.listdir(gallery_dir))
# app.env.note_dependency(specpath)
spec = json.load(open(gallery_file))
details = spec['details']
names = set(x['name'] for x in details)
if len(names) < len(details):
raise SphinxError("gallery file %r has duplicate names" % gallery_file)
details_iter = status_iterator(details,
'creating gallery file entries... ',
'brown',
len(details),
app.verbosity,
stringify_func=lambda x: x['name'] + ".rst")
for detail in details_iter:
detail_file_name = detail['name'] + ".rst"
detail_file_path = join(gallery_dir, detail_file_name)
if detail_file_path in extras:
extras.remove(detail_file_path)
# if the gallery detail file is newer than the gallery file, assume it is up to date
if exists(detail_file_path) and getmtime(detail_file_path) > gallery_file_mtime:
continue
with open(detail_file_path, "w") as f:
source_path = abspath(join(app.srcdir, "..", "..", detail['path']))
f.write(GALLERY_DETAIL.render(filename=detail['name']+'.py', source_path=source_path))
for extra_file in extras:
os.remove(join(gallery_dir, extra_file))
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_config_value('bokeh_gallery_dir', join("docs", "gallery"), 'html')
app.connect('config-inited', config_inited_handler)
app.add_directive('bokeh-gallery', BokehGalleryDirective)
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| 37 | 119 | 0.498098 |
7942cf7ea891b9d7257503dbc179c71352c4f93a | 11,193 | py | Python | nssrc/com/citrix/netscaler/nitro/resource/config/wi/wisite_translationinternalip_binding.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/wi/wisite_translationinternalip_binding.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | nssrc/com/citrix/netscaler/nitro/resource/config/wi/wisite_translationinternalip_binding.py | mahabs/nitro | be74e1e177f5c205c16126bc9b023f2348788409 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class wisite_translationinternalip_binding(base_resource) :
""" Binding class showing the translationinternalip that can be bound to wisite.
"""
def __init__(self) :
self._translationinternalip = ""
self._accesstype = ""
self._translationinternalport = 0
self._translationexternalip = ""
self._translationexternalport = 0
self._sitepath = ""
self.___count = 0
@property
def sitepath(self) :
"""Path to the Web Interface site.<br/>Minimum length = 1<br/>Maximum length = 250.
"""
try :
return self._sitepath
except Exception as e:
raise e
@sitepath.setter
def sitepath(self, sitepath) :
"""Path to the Web Interface site.<br/>Minimum length = 1<br/>Maximum length = 250
"""
try :
self._sitepath = sitepath
except Exception as e:
raise e
@property
def accesstype(self) :
"""Type of access to the XenApp or XenDesktop server.
Available settings function as follows:
* User Device - Clients can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* Gateway - Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* User Device and Gateway - Both clients and Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.<br/>Default value: UserDevice<br/>Possible values = UserDevice, Gateway, UserDeviceAndGateway.
"""
try :
return self._accesstype
except Exception as e:
raise e
@accesstype.setter
def accesstype(self, accesstype) :
"""Type of access to the XenApp or XenDesktop server.
Available settings function as follows:
* User Device - Clients can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* Gateway - Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.
* User Device and Gateway - Both clients and Access Gateway can use the translated address of the mapping entry to connect to the XenApp or XenDesktop server.<br/>Default value: UserDevice<br/>Possible values = UserDevice, Gateway, UserDeviceAndGateway
"""
try :
self._accesstype = accesstype
except Exception as e:
raise e
@property
def translationexternalport(self) :
"""External port number associated with the server's port number.<br/>Range 1 - 65535.
"""
try :
return self._translationexternalport
except Exception as e:
raise e
@translationexternalport.setter
def translationexternalport(self, translationexternalport) :
"""External port number associated with the server's port number.<br/>Range 1 - 65535
"""
try :
self._translationexternalport = translationexternalport
except Exception as e:
raise e
@property
def translationinternalip(self) :
"""IP address of the server for which you want to associate an external IP address. (Clients access the server through the associated external address and port.).<br/>Default value: 0.
"""
try :
return self._translationinternalip
except Exception as e:
raise e
@translationinternalip.setter
def translationinternalip(self, translationinternalip) :
"""IP address of the server for which you want to associate an external IP address. (Clients access the server through the associated external address and port.).<br/>Default value: 0
"""
try :
self._translationinternalip = translationinternalip
except Exception as e:
raise e
@property
def translationexternalip(self) :
"""External IP address associated with server's IP address.
"""
try :
return self._translationexternalip
except Exception as e:
raise e
@translationexternalip.setter
def translationexternalip(self, translationexternalip) :
"""External IP address associated with server's IP address.
"""
try :
self._translationexternalip = translationexternalip
except Exception as e:
raise e
@property
def translationinternalport(self) :
"""Port number of the server for which you want to associate an external port. (Clients access the server through the associated external address and port.).<br/>Range 1 - 65535.
"""
try :
return self._translationinternalport
except Exception as e:
raise e
@translationinternalport.setter
def translationinternalport(self, translationinternalport) :
"""Port number of the server for which you want to associate an external port. (Clients access the server through the associated external address and port.).<br/>Range 1 - 65535
"""
try :
self._translationinternalport = translationinternalport
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(wisite_translationinternalip_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.wisite_translationinternalip_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.sitepath) :
return str(self.sitepath)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
try :
if resource and type(resource) is not list :
updateresource = wisite_translationinternalip_binding()
updateresource.sitepath = resource.sitepath
updateresource.translationinternalip = resource.translationinternalip
updateresource.translationexternalip = resource.translationexternalip
updateresource.accesstype = resource.accesstype
return updateresource.update_resource(client)
else :
if resource and len(resource) > 0 :
updateresources = [wisite_translationinternalip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].sitepath = resource[i].sitepath
updateresources[i].translationinternalip = resource[i].translationinternalip
updateresources[i].translationexternalip = resource[i].translationexternalip
updateresources[i].accesstype = resource[i].accesstype
return cls.update_bulk_request(client, updateresources)
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
try :
if resource and type(resource) is not list :
deleteresource = wisite_translationinternalip_binding()
deleteresource.sitepath = resource.sitepath
deleteresource.translationinternalip = resource.translationinternalip
deleteresource.translationexternalip = resource.translationexternalip
return deleteresource.delete_resource(client)
else :
if resource and len(resource) > 0 :
deleteresources = [wisite_translationinternalip_binding() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].sitepath = resource[i].sitepath
deleteresources[i].translationinternalip = resource[i].translationinternalip
deleteresources[i].translationexternalip = resource[i].translationexternalip
return cls.delete_bulk_request(client, deleteresources)
except Exception as e :
raise e
@classmethod
def get(cls, service, sitepath) :
""" Use this API to fetch wisite_translationinternalip_binding resources.
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, sitepath, filter_) :
""" Use this API to fetch filtered set of wisite_translationinternalip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, sitepath) :
""" Use this API to count wisite_translationinternalip_binding resources configued on NetScaler.
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, sitepath, filter_) :
""" Use this API to count the filtered set of wisite_translationinternalip_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = wisite_translationinternalip_binding()
obj.sitepath = sitepath
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Accessmethod:
Direct = "Direct"
Alternate = "Alternate"
Translated = "Translated"
GatewayDirect = "GatewayDirect"
GatewayAlternate = "GatewayAlternate"
GatewayTranslated = "GatewayTranslated"
class Accesstype:
UserDevice = "UserDevice"
Gateway = "Gateway"
UserDeviceAndGateway = "UserDeviceAndGateway"
class Transport:
HTTP = "HTTP"
HTTPS = "HTTPS"
SSLRELAY = "SSLRELAY"
class Loadbalance:
ON = "ON"
OFF = "OFF"
class Recoveryfarm:
ON = "ON"
OFF = "OFF"
class wisite_translationinternalip_binding_response(base_response) :
def __init__(self, length=1) :
self.wisite_translationinternalip_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.wisite_translationinternalip_binding = [wisite_translationinternalip_binding() for _ in range(length)]
| 35.087774 | 255 | 0.748146 |
7942cfe9f8c43f8bdb0e8d12d458ed41450fb5e7 | 5,877 | py | Python | voila/exporter.py | zappyzac/voila | 68e5e660226312638b13db90e249eec967cb72d2 | [
"BSD-3-Clause"
] | null | null | null | voila/exporter.py | zappyzac/voila | 68e5e660226312638b13db90e249eec967cb72d2 | [
"BSD-3-Clause"
] | null | null | null | voila/exporter.py | zappyzac/voila | 68e5e660226312638b13db90e249eec967cb72d2 | [
"BSD-3-Clause"
] | null | null | null | #############################################################################
# Copyright (c) 2018, Voila Contributors #
# Copyright (c) 2018, QuantStack #
# #
# Distributed under the terms of the BSD 3-Clause License. #
# #
# The full license is in the file LICENSE, distributed with this software. #
#############################################################################
import mimetypes
import traitlets
from traitlets.config import Config
from jinja2 import contextfilter
import jinja2
from nbconvert.filters.markdown_mistune import IPythonRenderer, MarkdownWithMath
from nbconvert.exporters.html import HTMLExporter
from nbconvert.exporters.templateexporter import TemplateExporter
from nbconvert.filters.highlight import Highlight2HTML
class VoilaMarkdownRenderer(IPythonRenderer):
"""Custom markdown renderer that inlines images"""
def image(self, src, title, text):
contents_manager = self.options['contents_manager']
if contents_manager.file_exists(src):
content = contents_manager.get(src, format='base64')
data = content['content'].replace('\n', '') # remove the newline
mime_type, encoding = mimetypes.guess_type(src)
src = 'data:{mime_type};base64,{data}'.format(mime_type=mime_type, data=data)
return super(VoilaMarkdownRenderer, self).image(src, title, text)
class VoilaExporter(HTMLExporter):
"""Custom HTMLExporter that inlines the images using VoilaMarkdownRenderer"""
base_url = traitlets.Unicode(help="Base url for resources").tag(config=True)
markdown_renderer_class = traitlets.Type('mistune.Renderer').tag(config=True)
# The voila exporter overrides the markdown renderer from the HTMLExporter
# to inline images.
@contextfilter
def markdown2html(self, context, source):
cell = context['cell']
attachments = cell.get('attachments', {})
cls = self.markdown_renderer_class
renderer = cls(escape=False, attachments=attachments,
contents_manager=self.contents_manager,
anchor_link_text=self.anchor_link_text)
return MarkdownWithMath(renderer=renderer).render(source)
# The voila exporter disables the CSSHTMLHeaderPreprocessor from the HTMLExporter.
@property
def default_config(self):
c = Config({
'CSSHTMLHeaderPreprocessor': {
'enabled': False
},
'VoilaExporter': {
'markdown_renderer_class': 'voila.exporter.VoilaMarkdownRenderer'
}
})
c.merge(super(VoilaExporter, self).default_config)
return c
# Instead, we use the VoilaCSSPreprocessor.
@traitlets.default('preprocessors')
def _default_preprocessors(self):
return ['voila.csspreprocessor.VoilaCSSPreprocessor']
# Overriding the default template file.
@traitlets.default('template_file')
def default_template_file(self):
return 'index.html.j2'
async def generate_from_notebook_node(self, nb, resources=None, extra_context={}, **kw):
# this replaces from_notebook_node, but calls template.generate instead of template.render
langinfo = nb.metadata.get('language_info', {})
lexer = langinfo.get('pygments_lexer', langinfo.get('name', None))
highlight_code = self.filters.get('highlight_code', Highlight2HTML(pygments_lexer=lexer, parent=self))
self.register_filter('highlight_code', highlight_code)
# NOTE: we don't call HTML or TemplateExporter' from_notebook_node
nb_copy, resources = super(TemplateExporter, self).from_notebook_node(nb, resources, **kw)
resources.setdefault('raw_mimetypes', self.raw_mimetypes)
resources['global_content_filter'] = {
'include_code': not self.exclude_code_cell,
'include_markdown': not self.exclude_markdown,
'include_raw': not self.exclude_raw,
'include_unknown': not self.exclude_unknown,
'include_input': not self.exclude_input,
'include_output': not self.exclude_output,
'include_input_prompt': not self.exclude_input_prompt,
'include_output_prompt': not self.exclude_output_prompt,
'no_prompt': self.exclude_input_prompt and self.exclude_output_prompt,
}
async for output in self.template.generate_async(nb=nb_copy, resources=resources, **extra_context):
yield (output, resources)
@property
def environment(self):
# enable Jinja async template execution
self.enable_async = True
env = super(type(self), self).environment
if 'jinja2.ext.do' not in env.extensions:
env.add_extension('jinja2.ext.do')
return env
def get_template_paths(self):
return self.template_path
def _init_resources(self, resources):
def include_css(name):
code = """<link rel="stylesheet" type="text/css" href="%svoila/%s">""" % (self.base_url, name)
return jinja2.Markup(code)
def include_js(name):
code = """<script src="%svoila/%s"></script>""" % (self.base_url, name)
return jinja2.Markup(code)
def include_url(name):
url = "%svoila/%s" % (self.base_url, name)
return jinja2.Markup(url)
resources = super(VoilaExporter, self)._init_resources(resources)
resources['include_css'] = include_css
resources['include_js'] = include_js
resources['include_url'] = include_url
return resources
| 42.89781 | 110 | 0.627021 |
7942d0d94f80cc55e261ea1d6244feb44cf7437e | 10,763 | py | Python | analemmatic.py | cmcqueen/sundials | 0b7fbf7bcdef4687c1805436bdac006d777b23f2 | [
"MIT"
] | null | null | null | analemmatic.py | cmcqueen/sundials | 0b7fbf7bcdef4687c1805436bdac006d777b23f2 | [
"MIT"
] | null | null | null | analemmatic.py | cmcqueen/sundials | 0b7fbf7bcdef4687c1805436bdac006d777b23f2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
"""
Calculation and generation of analemmatic sundial.
References:
Plus Magazine http://pass.maths.org.uk/issue11/features/sundials/index.html
Wikipedia http://en.wikipedia.org/wiki/Analemmatic_sundial
Calculations have been done according to the Plus Magazine reference.
Dependencies:
- Python 2.x
- NumPy
- matplotlib
"""
import datetime
import logging
from collections import namedtuple
import sys
import matplotlib
#matplotlib.use('pdf')
#matplotlib.use('svg')
from matplotlib import pyplot as plt
from matplotlib import lines
from matplotlib import text
import matplotlib.patches
import numpy as np
import sun_declination
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
# Named tuple to hold geographic location
Location = namedtuple('Location', 'latitude, longitude, timezone, location')
if True:
LOCATION = Location(-37.81, 144.96, 10, 'Melbourne, Victoria, Australia')
HOUR_LINE_MIN = 5
HOUR_LINE_MAX = 20
EXTENT_MAJOR = 1.2
EXTENT_MINOR = 0.75
elif True:
LOCATION = Location(35.10, 138.86, 9, 'Numazu, Japan')
HOUR_LINE_MIN = 4
HOUR_LINE_MAX = 19
EXTENT_MAJOR = 1.2
EXTENT_MINOR = 0.75
else:
LOCATION = Location(51.3809, -2.3603, 0, 'Bath, England')
HOUR_LINE_MIN = 3
HOUR_LINE_MAX = 21
EXTENT_MAJOR = 1.2
EXTENT_MINOR = 1.1
NUMERAL_OFFSET = 1.1
DATE_SCALE_X_EXTENT = 0.15
DATE_SCALE_TICK_X = 0.1
DATE_SCALE_TEXT_X = 0.025
def equatorial_hour_angle(hour, location):
"""Midnight is angle 0.
6 am is angle pi/2.
midday is angle pi.
etc."""
equatorial_angle = (hour - location.timezone) * 2 * np.pi / 24 + (np.deg2rad(location.longitude))
logging.getLogger("hour.angle.equ").debug("For hour %d, equatorial angle %g" % (hour, np.rad2deg(equatorial_angle)))
return equatorial_angle
def rotated_equatorial_hour_angle(hour, location):
"""Angles rotated so midday is up on mathematical angle range.
Midday is pi/2.
6 am is pi.
6 pm is 0.
etc."""
equatorial_angle = equatorial_hour_angle(hour, location)
equatorial_angle_from_solar_noon = equatorial_angle - np.pi
# Angle currently is angle referenced from solar noon, positive (pm) towards the east.
# Change to mathematical angle, anticlockwise from 0 in the east.
return np.pi / 2 - equatorial_angle_from_solar_noon
def analemmatic_horiz_hour_angle(hour, location):
equatorial_angle = equatorial_hour_angle(hour, location)
equatorial_angle_from_solar_noon = equatorial_angle - np.pi
logging.getLogger("hour.angle.equ.noon").debug("For hour %d, equatorial angle from solar noon %g" % (hour, equatorial_angle_from_solar_noon * 180 / np.pi))
# negative (am) is towards the west; positive (pm) towards the east
a_x = np.cos(equatorial_angle_from_solar_noon)
a_y = np.sin(equatorial_angle_from_solar_noon)
horiz_angle_from_solar_noon = np.arctan2(a_y, a_x * np.sin(np.deg2rad(location.latitude)))
logging.getLogger("hour.angle.horiz.noon").debug("For hour %d, horiz angle from solar noon %g" % (hour, np.rad2deg(horiz_angle_from_solar_noon)))
# Angle currently is angle referenced from solar noon, positive (pm) towards the east.
# Change to mathematical angle, anticlockwise from 0 in the east.
return np.pi / 2 - horiz_angle_from_solar_noon
def analemmatic_horiz_hour_position(hour, location):
rotated_equatorial_angle = rotated_equatorial_hour_angle(hour, location)
a_x = np.cos(rotated_equatorial_angle)
a_y = np.sin(rotated_equatorial_angle)
a_y *= np.sin(np.deg2rad(location.latitude))
logging.getLogger("hour.pos").debug("For hour %d, x-y position (%g, %g)" % (hour, a_x, a_y))
return (a_x, a_y)
def main():
fig = plt.figure(num=LOCATION.location)
# ax1 = fig.add_subplot(111, aspect='equal')
ax1 = fig.add_axes([0,0,1.0,1.0], aspect='equal')
# Calculate ellipse parameters
ellipse_major_axis = 1.0
ellipse_minor_axis = ellipse_major_axis * np.sin(np.deg2rad(LOCATION.latitude))
ellipse_foci_offset = np.sqrt(ellipse_major_axis**2 - ellipse_minor_axis**2)
ellipse_logger = logging.getLogger("ellipse")
ellipse_logger.info("Ellipse semimajor axis length %g" % ellipse_major_axis)
ellipse_logger.info("Ellipse semiminor axis length %g" % ellipse_minor_axis)
ellipse_logger.info("Ellipse foci x offset %g" % ellipse_foci_offset)
# Draw an ellipse arc
ellipse_pos_min = analemmatic_horiz_hour_position(HOUR_LINE_MIN, LOCATION)
ellipse_angle_min = np.arctan2(ellipse_pos_min[1], ellipse_pos_min[0])
ellipse_pos_max = analemmatic_horiz_hour_position(HOUR_LINE_MAX, LOCATION)
ellipse_angle_max = np.arctan2(ellipse_pos_max[1], ellipse_pos_max[0])
ellipse_rotation = 0
if LOCATION.latitude < 0:
# For southern hemisphere, rotate the whole thing around by 180
# degrees, so "up" is consistently from the sundial viewer's
# perspective with the sun behind their shoulder.
ellipse_rotation = 180
ellipse = matplotlib.patches.Arc(xy=(0,0), # centre of ellipse
width=2 * ellipse_major_axis,
height=2 * ellipse_minor_axis,
angle=ellipse_rotation,
theta1=np.rad2deg(ellipse_angle_max),
theta2=np.rad2deg(ellipse_angle_min),
)
ax1.add_patch(ellipse)
analemmatic_positions_x = []
analemmatic_positions_y = []
for hour in range(HOUR_LINE_MIN, HOUR_LINE_MAX + 1):
analemmatic_angle = analemmatic_horiz_hour_angle(hour, LOCATION)
(analemmatic_position_x, analemmatic_position_y) = analemmatic_horiz_hour_position(hour, LOCATION)
if LOCATION.latitude < 0:
# For southern hemisphere, rotate the whole thing around by 180
# degrees, so "up" is consistently from the sundial viewer's
# perspective with the sun behind their shoulder.
analemmatic_angle += np.deg2rad(180)
(analemmatic_position_x, analemmatic_position_y) = (-analemmatic_position_x, -analemmatic_position_y)
logging.getLogger("hour.angle.horiz").info("For hour %d, horiz angle %g" % (hour, np.rad2deg(analemmatic_angle)))
logging.getLogger("hour.pos").info("For hour %d, x-y position (%g, %g)" % (hour, analemmatic_position_x, analemmatic_position_y))
line = lines.Line2D([0, np.cos(analemmatic_angle)], [0, np.sin(analemmatic_angle)])
# ax1.add_line(line)
# ax1.plot(analemmatic_position_x, analemmatic_position_y, '.')
analemmatic_positions_x.append(analemmatic_position_x)
analemmatic_positions_y.append(analemmatic_position_y)
hour_text = "%d" % ((hour - 1) % 12 + 1)
# ax1.add_artist(text.Text(np.cos(analemmatic_angle) * NUMERAL_OFFSET, np.sin(analemmatic_angle) * NUMERAL_OFFSET, hour_text, ha='center', va='center'))
ax1.add_artist(text.Text(analemmatic_position_x * NUMERAL_OFFSET, analemmatic_position_y * NUMERAL_OFFSET, hour_text, ha='center', va='center'))
ax1.plot(analemmatic_positions_x, analemmatic_positions_y, '.')
# Draw date scale
datescale_logger = logging.getLogger("datescale")
# Max and min lines
dates_y = []
for sun_angle in [-sun_declination.SUN_OBLIQUITY, sun_declination.SUN_OBLIQUITY]:
date_y = np.tan(sun_angle) * np.cos(np.deg2rad(LOCATION.latitude))
dates_y.append(date_y)
line = lines.Line2D([-DATE_SCALE_X_EXTENT, DATE_SCALE_X_EXTENT], [date_y, date_y])
ax1.add_line(line)
# Draw vertical line of date scale
line = lines.Line2D([0,0], dates_y)
ax1.add_line(line)
datescale_logger.info("Date scale max and min y positions at %g and %g" % tuple(dates_y))
# Draw month ticks and month labels on date scale
DATE_SOLSTICE = datetime.date(2008, 12, 21)
month_starts_y = []
month_start_slopes = []
for month_number in range(1, 12 + 1):
month_start = datetime.date(2009, month_number, 1)
day_number = matplotlib.dates.date2num(month_start) - matplotlib.dates.date2num(DATE_SOLSTICE)
sun_angle = sun_declination.sun_declination(day_number)
sun_angle2 = sun_declination.sun_declination(day_number + 0.001)
month_start_slope = 1 if sun_angle2 >= sun_angle else -1
month_start_slopes.append(month_start_slope)
if LOCATION.latitude < 0:
sun_angle = -sun_angle
sun_angle2 = -sun_angle2
# month_start_slope = 1 if sun_angle2 >= sun_angle else -1
# month_start_slopes.append(month_start_slope)
month_start_y = np.tan(sun_angle) * np.cos(np.deg2rad(LOCATION.latitude))
month_starts_y.append(month_start_y)
month_name = month_start.strftime("%b")
datescale_logger.info("For beginning of %s, y position %g" % (month_name, month_start_y))
month_starts_y.append(month_starts_y[0])
month_start_slopes.append(month_start_slopes[0])
for month_number in range(1, 12 + 1):
month_start_y = month_starts_y[month_number - 1]
month_end_y = month_starts_y[month_number]
month_start_slope = month_start_slopes[month_number - 1]
month_end_slope = month_start_slopes[month_number]
# Add tick mark for month start
line = lines.Line2D([0, month_start_slope * DATE_SCALE_TICK_X], [month_start_y, month_start_y])
ax1.add_line(line)
# Add text for month name, in the middle of the month
if month_start_slope == month_end_slope:
text_y = (month_start_y + month_end_y) / 2
month_name = datetime.date(2009,month_number,1).strftime("%b")
ha = 'left' if month_start_slope >= 0 else 'right'
ax1.add_artist(text.Text(DATE_SCALE_TEXT_X * month_start_slope, text_y, month_name, ha=ha, va='center'))
# Draw a compass arrow
if LOCATION.latitude >= 0:
# Up for northern hemisphere
ax1.add_artist(text.Text(0.5, 0.15, "N", ha='center', va='center'))
arrow = matplotlib.patches.Arrow(0.5, -0.15, 0, 0.25, width=0.08, edgecolor='none')
ax1.add_patch(arrow)
else:
# Down for the southern hemisphere
ax1.add_artist(text.Text(0.5, -0.15, "N", ha='center', va='center'))
arrow = matplotlib.patches.Arrow(0.5, 0.15, 0, -0.25, width=0.08, edgecolor='none')
ax1.add_patch(arrow)
# plt.axis('tight')
plt.axis('off')
plt.xlim(-EXTENT_MAJOR, EXTENT_MAJOR)
plt.ylim(-EXTENT_MINOR, EXTENT_MINOR)
# plt.savefig('analemmatic.pdf')
# plt.savefig('analemmatic.svg')
# plt.savefig('analemmatic.png')
plt.show()
if __name__ == '__main__':
main()
| 43.574899 | 159 | 0.692929 |
7942d189b0df95d6e61d99380d8fa33cd19549ea | 469 | py | Python | src/fchecker/exceptions/exceptions.py | IncognitoCoding/fchecker | bbc70685174c70b6c396e1c93864028bffd3e22e | [
"MIT"
] | null | null | null | src/fchecker/exceptions/exceptions.py | IncognitoCoding/fchecker | bbc70685174c70b6c396e1c93864028bffd3e22e | [
"MIT"
] | null | null | null | src/fchecker/exceptions/exceptions.py | IncognitoCoding/fchecker | bbc70685174c70b6c396e1c93864028bffd3e22e | [
"MIT"
] | null | null | null | __author__ = "IncognitoCoding"
__copyright__ = "Copyright 2022, exceptions"
__credits__ = ["IncognitoCoding"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "IncognitoCoding"
__status__ = "Beta"
class InputFailure(Exception):
"""Exception raised for an input exception message."""
__module__ = "builtins"
pass
class InvalidKeyError(Exception):
"""Exception raised for an invalid dictionary key."""
__module__ = "builtins"
pass
| 21.318182 | 58 | 0.720682 |
7942d1c2e6e54680695703f8d16232ea4c5d00f0 | 8,123 | py | Python | socksOhttp/socksohttp/AES/blockfeeder.py | skelsec/socksohttp | c1713bfcd44f29790325dbb0578590bceb93499f | [
"MIT"
] | 37 | 2018-10-12T22:44:50.000Z | 2021-09-13T09:51:55.000Z | socksOhttp/socksohttp/AES/blockfeeder.py | skelsec/socksohttp | c1713bfcd44f29790325dbb0578590bceb93499f | [
"MIT"
] | 1 | 2021-05-11T09:05:34.000Z | 2021-05-11T09:05:34.000Z | socksOhttp/socksohttp/AES/blockfeeder.py | skelsec/socksohttp | c1713bfcd44f29790325dbb0578590bceb93499f | [
"MIT"
] | 4 | 2018-10-13T15:07:20.000Z | 2020-05-15T02:04:04.000Z | # The MIT License (MIT)
#
# Copyright (c) 2014 Richard Moore
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .AES import AESBlockModeOfOperation, AESSegmentModeOfOperation, AESStreamModeOfOperation
from .util import append_PKCS7_padding, strip_PKCS7_padding, to_bufferable
# First we inject three functions to each of the modes of operations
#
# _can_consume(size)
# - Given a size, determine how many bytes could be consumed in
# a single call to either the decrypt or encrypt method
#
# _final_encrypt(data, padding = PADDING_DEFAULT)
# - call and return encrypt on this (last) chunk of data,
# padding as necessary; this will always be at least 16
# bytes unless the total incoming input was less than 16
# bytes
#
# _final_decrypt(data, padding = PADDING_DEFAULT)
# - same as _final_encrypt except for decrypt, for
# stripping off padding
#
PADDING_NONE = 'none'
PADDING_DEFAULT = 'default'
# @TODO: Ciphertext stealing and explicit PKCS#7
# PADDING_CIPHERTEXT_STEALING
# PADDING_PKCS7
# ECB and CBC are block-only ciphers
def _block_can_consume(self, size):
if size >= 16: return 16
return 0
# After padding, we may have more than one block
def _block_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding == PADDING_DEFAULT:
data = append_PKCS7_padding(data)
elif padding == PADDING_NONE:
if len(data) != 16:
raise Exception('invalid data length for final block')
else:
raise Exception('invalid padding option')
if len(data) == 32:
return self.encrypt(data[:16]) + self.encrypt(data[16:])
return self.encrypt(data)
def _block_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding == PADDING_DEFAULT:
return strip_PKCS7_padding(self.decrypt(data))
if padding == PADDING_NONE:
if len(data) != 16:
raise Exception('invalid data length for final block')
return self.decrypt(data)
raise Exception('invalid padding option')
AESBlockModeOfOperation._can_consume = _block_can_consume
AESBlockModeOfOperation._final_encrypt = _block_final_encrypt
AESBlockModeOfOperation._final_decrypt = _block_final_decrypt
# CFB is a segment cipher
def _segment_can_consume(self, size):
return self.segment_bytes * int(size // self.segment_bytes)
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
def _segment_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding != PADDING_DEFAULT:
raise Exception('invalid padding option')
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
padded = data + to_bufferable(faux_padding)
return self.encrypt(padded)[:len(data)]
# CFB can handle a non-segment-sized block at the end using the remaining cipherblock
def _segment_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding != PADDING_DEFAULT:
raise Exception('invalid padding option')
faux_padding = (chr(0) * (self.segment_bytes - (len(data) % self.segment_bytes)))
padded = data + to_bufferable(faux_padding)
return self.decrypt(padded)[:len(data)]
AESSegmentModeOfOperation._can_consume = _segment_can_consume
AESSegmentModeOfOperation._final_encrypt = _segment_final_encrypt
AESSegmentModeOfOperation._final_decrypt = _segment_final_decrypt
# OFB and CTR are stream ciphers
def _stream_can_consume(self, size):
return size
def _stream_final_encrypt(self, data, padding = PADDING_DEFAULT):
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
raise Exception('invalid padding option')
return self.encrypt(data)
def _stream_final_decrypt(self, data, padding = PADDING_DEFAULT):
if padding not in [PADDING_NONE, PADDING_DEFAULT]:
raise Exception('invalid padding option')
return self.decrypt(data)
AESStreamModeOfOperation._can_consume = _stream_can_consume
AESStreamModeOfOperation._final_encrypt = _stream_final_encrypt
AESStreamModeOfOperation._final_decrypt = _stream_final_decrypt
class BlockFeeder(object):
'''The super-class for objects to handle chunking a stream of bytes
into the appropriate block size for the underlying mode of operation
and applying (or stripping) padding, as necessary.'''
def __init__(self, mode, feed, final, padding = PADDING_DEFAULT):
self._mode = mode
self._feed = feed
self._final = final
self._buffer = to_bufferable("")
self._padding = padding
def feed(self, data = None):
'''Provide bytes to encrypt (or decrypt), returning any bytes
possible from this or any previous calls to feed.
Call with None or an empty string to flush the mode of
operation and return any final bytes; no further calls to
feed may be made.'''
if self._buffer is None:
raise ValueError('already finished feeder')
# Finalize; process the spare bytes we were keeping
if data is None:
result = self._final(self._buffer, self._padding)
self._buffer = None
return result
self._buffer += to_bufferable(data)
# We keep 16 bytes around so we can determine padding
result = to_bufferable('')
while len(self._buffer) > 16:
can_consume = self._mode._can_consume(len(self._buffer) - 16)
if can_consume == 0: break
result += self._feed(self._buffer[:can_consume])
self._buffer = self._buffer[can_consume:]
return result
class Encrypter(BlockFeeder):
'Accepts bytes of plaintext and returns encrypted ciphertext.'
def __init__(self, mode, padding = PADDING_DEFAULT):
BlockFeeder.__init__(self, mode, mode.encrypt, mode._final_encrypt, padding)
class Decrypter(BlockFeeder):
'Accepts bytes of ciphertext and returns decrypted plaintext.'
def __init__(self, mode, padding = PADDING_DEFAULT):
BlockFeeder.__init__(self, mode, mode.decrypt, mode._final_decrypt, padding)
# 8kb blocks
BLOCK_SIZE = (1 << 13)
def _feed_stream(feeder, in_stream, out_stream, block_size = BLOCK_SIZE):
'Uses feeder to read and convert from in_stream and write to out_stream.'
while True:
chunk = in_stream.read(block_size)
if not chunk:
break
converted = feeder.feed(chunk)
out_stream.write(converted)
converted = feeder.feed()
out_stream.write(converted)
def encrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Encrypts a stream of bytes from in_stream to out_stream using mode.'
encrypter = Encrypter(mode, padding = padding)
_feed_stream(encrypter, in_stream, out_stream, block_size)
def decrypt_stream(mode, in_stream, out_stream, block_size = BLOCK_SIZE, padding = PADDING_DEFAULT):
'Decrypts a stream of bytes from in_stream to out_stream using mode.'
decrypter = Decrypter(mode, padding = padding)
_feed_stream(decrypter, in_stream, out_stream, block_size)
| 35.627193 | 100 | 0.721408 |
7942d2074f849d13d13c7e74e732fe4c23283c96 | 1,192 | py | Python | intro_exercise.py | ebayandelger/MSDS600 | 7bec34531f3fc661a0bfdfe42a974fcfd8f1f2c4 | [
"MIT"
] | null | null | null | intro_exercise.py | ebayandelger/MSDS600 | 7bec34531f3fc661a0bfdfe42a974fcfd8f1f2c4 | [
"MIT"
] | null | null | null | intro_exercise.py | ebayandelger/MSDS600 | 7bec34531f3fc661a0bfdfe42a974fcfd8f1f2c4 | [
"MIT"
] | null | null | null | # Lines starting with # are comments and are not run by Python.
"""
Multi-line comments are possible with triple quotes like this.
"""
# pandas is a common library for working with data in Python, we usually import it like so:
import pandas as pd
import matplotlib.pyplot as plt
# This data comes from the UCI ML repository:
# https://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
# It is the daily number of users from a bike share program
df = pd.read_csv('day.csv')
# shows a preview of the data
df.head()
# shows some basic stats of the data
df.describe()
# Use the examples in the jupyter notebook to help you here.
# calculate the mean and standard deviation of the hourly data counts (the 'cnt' column)
# mean
hourly = df["cnt"]
print("Mean Deviation:", hourly.mean())
#___
# standard deviation
print("Standard Deviation:", hourly.std())
#___
# plot the counts ('cnt' column)
plt.plot(hourly) # create a plot of list generated
plt.xlabel("Hourly Count Length") # label x axis
plt.ylabel("Hourly Count Values") # label y axis
plt.title('Hourly Data Counts Plot') # title the plot
plt.show() # show the plot
#___ | 28.380952 | 91 | 0.700503 |
7942d2aa9655b4d764ff6b740324b2088c8e4f9b | 11,045 | py | Python | tests/unit_tests/test_tethys_apps/test_models/test_TethysApp.py | quyendong/tethys | 99bcb524d5b2021b88d5fa15b7ed6b8acb460997 | [
"BSD-2-Clause"
] | 1 | 2020-10-08T20:38:33.000Z | 2020-10-08T20:38:33.000Z | tests/unit_tests/test_tethys_apps/test_models/test_TethysApp.py | quyendong/tethys | 99bcb524d5b2021b88d5fa15b7ed6b8acb460997 | [
"BSD-2-Clause"
] | 1 | 2018-04-14T19:40:54.000Z | 2018-04-14T19:40:54.000Z | tests/unit_tests/test_tethys_apps/test_models/test_TethysApp.py | quyendong/tethys | 99bcb524d5b2021b88d5fa15b7ed6b8acb460997 | [
"BSD-2-Clause"
] | 1 | 2021-09-07T14:47:11.000Z | 2021-09-07T14:47:11.000Z | """
********************************************************************************
* Name: test_TethysApp
* Author: nswain
* Created On: August 15, 2018
* Copyright: (c) Aquaveo 2018
********************************************************************************
"""
from tethys_sdk.testing import TethysTestCase
from tethys_apps.models import TethysApp, TethysAppSetting
from tethys_services.models import PersistentStoreService, SpatialDatasetService, DatasetService, WebProcessingService
class TethysAppTests(TethysTestCase):
def set_up(self):
self.test_app = TethysApp.objects.get(package='test_app')
self.wps = WebProcessingService(
name='test_wps',
endpoint='http://localhost/wps/WebProcessingService',
username='foo',
password='password'
)
self.wps.save()
self.sds = SpatialDatasetService(
name='test_sds',
endpoint='http://localhost/geoserver/rest/',
username='foo',
password='password'
)
self.sds.save()
self.ds = DatasetService(
name='test_ds',
endpoint='http://localhost/api/3/action/',
apikey='foo',
)
self.ds.save()
self.ps = PersistentStoreService(
name='test_ps',
host='localhost',
port='5432',
username='foo',
password='password'
)
self.ps.save()
def tear_down(self):
self.wps.delete()
self.ps.delete()
self.ds.delete()
self.sds.delete()
def test_str(self):
ret = str(self.test_app)
self.assertEqual('Test App', ret)
def test_add_settings(self):
new_setting = TethysAppSetting(
name='new_setting',
required=False
)
self.test_app.add_settings([new_setting])
app = TethysApp.objects.get(package='test_app')
settings = app.settings_set.filter(name='new_setting')
self.assertEqual(1, len(settings))
def test_add_settings_add_same_setting_twice(self):
new_setting = TethysAppSetting(
name='new_setting',
required=False
)
new_setting_same_name = TethysAppSetting(
name='new_setting',
required=False
)
self.test_app.add_settings([new_setting, new_setting_same_name])
app = TethysApp.objects.get(package='test_app')
settings = app.settings_set.filter(name='new_setting')
self.assertEqual(1, len(settings))
def test_settings_prop(self):
ret = self.test_app.settings
self.assertEqual(12, len(ret))
for r in ret:
self.assertIsInstance(r, TethysAppSetting)
def test_custom_settings_prop(self):
custom_setting = self.test_app.settings_set.select_subclasses().get(name='default_name')
custom_setting.value = 'foo'
custom_setting.save()
ret = self.test_app.custom_settings
for r in ret:
self.assertIsInstance(r, TethysAppSetting)
if r.name == 'default_name':
self.assertEqual('foo', r.value)
def test_dataset_service_settings_prop(self):
ds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_ckan')
ds_setting.dataset_service = self.ds
ds_setting.save()
ret = self.test_app.dataset_service_settings
for r in ret:
self.assertIsInstance(r, TethysAppSetting)
if r.name == 'primary_ckan':
self.assertEqual('test_ds', r.dataset_service.name)
self.assertEqual('foo', r.dataset_service.apikey)
self.assertEqual('http://localhost/api/3/action/', r.dataset_service.endpoint)
def test_spatial_dataset_service_settings_prop(self):
sds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_geoserver')
sds_setting.spatial_dataset_service = self.sds
sds_setting.save()
ret = self.test_app.spatial_dataset_service_settings
for r in ret:
self.assertIsInstance(r, TethysAppSetting)
if r.name == 'primary_geoserver':
self.assertEqual('test_sds', r.spatial_dataset_service.name)
self.assertEqual('http://localhost/geoserver/rest/', r.spatial_dataset_service.endpoint)
self.assertEqual('foo', r.spatial_dataset_service.username)
self.assertEqual('password', r.spatial_dataset_service.password)
def test_wps_services_settings_prop(self):
wps_setting = self.test_app.settings_set.select_subclasses().get(name='primary_52n')
wps_setting.web_processing_service = self.wps
wps_setting.save()
ret = self.test_app.wps_services_settings
for r in ret:
self.assertIsInstance(r, TethysAppSetting)
if r.name == 'primary_52n':
self.assertEqual('test_wps', r.web_processing_service.name)
self.assertEqual('http://localhost/wps/WebProcessingService', r.web_processing_service.endpoint)
self.assertEqual('foo', r.web_processing_service.username)
self.assertEqual('password', r.web_processing_service.password)
def test_persistent_store_connection_settings_prop(self):
ps_setting = self.test_app.settings_set.select_subclasses().get(name='primary')
ps_setting.persistent_store_service = self.ps
ps_setting.save()
ret = self.test_app.persistent_store_connection_settings
for r in ret:
self.assertIsInstance(r, TethysAppSetting)
if r.name == 'primary':
self.assertEqual('test_ps', r.persistent_store_service.name)
self.assertEqual('localhost', r.persistent_store_service.host)
self.assertEqual(5432, r.persistent_store_service.port)
self.assertEqual('foo', r.persistent_store_service.username)
self.assertEqual('password', r.persistent_store_service.password)
def test_persistent_store_database_settings_prop(self):
ps_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_setting.persistent_store_service = self.ps
ps_setting.save()
ret = self.test_app.persistent_store_database_settings
for r in ret:
self.assertIsInstance(r, TethysAppSetting)
if r.name == 'spatial_db':
self.assertEqual('test_ps', r.persistent_store_service.name)
self.assertEqual('localhost', r.persistent_store_service.host)
self.assertEqual(5432, r.persistent_store_service.port)
self.assertEqual('foo', r.persistent_store_service.username)
self.assertEqual('password', r.persistent_store_service.password)
def test_configured_prop_required_and_set(self):
# See: test_app.app for expected settings configuration
# Set required settings
custom_setting = self.test_app.settings_set.select_subclasses().get(name='default_name')
custom_setting.value = 'foo'
custom_setting.save()
ds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_ckan')
ds_setting.dataset_service = self.ds
ds_setting.save()
sds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_geoserver')
sds_setting.spatial_dataset_service = self.sds
sds_setting.save()
wps_setting = self.test_app.settings_set.select_subclasses().get(name='primary_52n')
wps_setting.web_processing_service = self.wps
wps_setting.save()
ps_setting = self.test_app.settings_set.select_subclasses().get(name='primary')
ps_setting.persistent_store_service = self.ps
ps_setting.save()
ps_db_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
ps_db_setting.persistent_store_service = self.ps
ps_db_setting.save()
ret = self.test_app.configured
self.assertTrue(ret)
def test_configured_prop_required_no_value(self):
# See: test_app.app for expected settings configuration
# Set required settings
custom_setting = self.test_app.settings_set.select_subclasses().get(name='default_name')
custom_setting.value = '' # <-- NOT SET / NO VALUE
custom_setting.save()
ds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_ckan')
ds_setting.dataset_service = self.ds
ds_setting.save()
sds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_geoserver')
sds_setting.spatial_dataset_service = self.sds
sds_setting.save()
wps_setting = self.test_app.settings_set.select_subclasses().get(name='primary_52n')
wps_setting.web_processing_service = self.wps
wps_setting.save()
ps_setting = self.test_app.settings_set.select_subclasses().get(name='primary')
ps_setting.persistent_store_service = self.ps
ps_setting.save()
psd_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
psd_setting.persistent_store_service = self.ps
psd_setting.save()
ret = self.test_app.configured
self.assertFalse(ret)
def test_configured_prop_not_assigned_exception(self):
# See: test_app.app for expected settings configuration
custom_setting = self.test_app.settings_set.select_subclasses().get(name='default_name')
custom_setting.value = ''
custom_setting.save()
ds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_ckan')
ds_setting.dataset_service = None
ds_setting.save()
sds_setting = self.test_app.settings_set.select_subclasses().get(name='primary_geoserver')
sds_setting.spatial_dataset_service = None
sds_setting.save()
wps_setting = self.test_app.settings_set.select_subclasses().get(name='primary_52n')
wps_setting.web_processing_service = None
wps_setting.save()
ps_setting = self.test_app.settings_set.select_subclasses().get(name='primary')
ps_setting.persistent_store_service = None
ps_setting.save()
psd_setting = self.test_app.settings_set.select_subclasses().get(name='spatial_db')
psd_setting.persistent_store_service = None
psd_setting.save()
ret = self.test_app.configured
self.assertFalse(ret)
class TethysAppNoSettingsTests(TethysTestCase):
def set_up(self):
self.test_app = TethysApp.objects.get(package='test_app')
# See: test_app.app for expected settings configuration
for setting in self.test_app.settings_set.all():
setting.delete()
def test_configured_prop_no_settings(self):
ret = self.test_app.configured
self.assertTrue(ret)
| 38.350694 | 118 | 0.657764 |
7942d32c11f6010092a0886fb170579f7116a515 | 34,081 | py | Python | zipline/test_algorithms.py | mmilutinovic1313/zipline-with-algorithms | fb005461670e60e1182a7bd746abf98b54929f69 | [
"Apache-2.0"
] | 1 | 2015-07-22T06:12:56.000Z | 2015-07-22T06:12:56.000Z | zipline/test_algorithms.py | mmilutinovic1313/zipline-with-algorithms | fb005461670e60e1182a7bd746abf98b54929f69 | [
"Apache-2.0"
] | null | null | null | zipline/test_algorithms.py | mmilutinovic1313/zipline-with-algorithms | fb005461670e60e1182a7bd746abf98b54929f69 | [
"Apache-2.0"
] | null | null | null | #
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Algorithm Protocol
===================
For a class to be passed as a trading algorithm to the
:py:class:`zipline.lines.SimulatedTrading` zipline it must follow an
implementation protocol. Examples of this algorithm protocol are provided
below.
The algorithm must expose methods:
- initialize: method that takes no args, no returns. Simply called to
enable the algorithm to set any internal state needed.
- get_sid_filter: method that takes no args, and returns a list of valid
sids. List must have a length between 1 and 10. If None is returned the
filter will block all events.
- handle_data: method that accepts a :py:class:`zipline.protocol.BarData`
of the current state of the simulation universe. An example data object:
.. This outputs the table as an HTML table but for some reason there
is no bounding box. Make the previous paraagraph ending colon a
double-colon to turn this back into blockquoted table in ASCII art.
+-----------------+--------------+----------------+-------------------+
| | sid(133) | sid(134) | sid(135) |
+=================+==============+================+===================+
| price | $10.10 | $22.50 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| volume | 10,000 | 5,000 | 50,000 |
+-----------------+--------------+----------------+-------------------+
| mvg_avg_30 | $9.97 | $22.61 | $13.37 |
+-----------------+--------------+----------------+-------------------+
| dt | 6/30/2012 | 6/30/2011 | 6/29/2012 |
+-----------------+--------------+----------------+-------------------+
- set_order: method that accepts a callable. Will be set as the value of the
order method of trading_client. An algorithm can then place orders with a
valid sid and a number of shares::
self.order(sid(133), share_count)
- set_performance: property which can be set equal to the
cumulative_trading_performance property of the trading_client. An
algorithm can then check position information with the
Portfolio object::
self.Portfolio[sid(133)]['cost_basis']
- set_transact_setter: method that accepts a callable. Will
be set as the value of the set_transact_setter method of
the trading_client. This allows an algorithm to change the
slippage model used to predict transactions based on orders
and trade events.
"""
from copy import deepcopy
import numpy as np
from nose.tools import assert_raises
from six.moves import range
from six import itervalues
from zipline.algorithm import TradingAlgorithm
from zipline.api import (
FixedSlippage,
order,
set_slippage,
record,
sid,
)
from zipline.errors import UnsupportedOrderParameters
from zipline.assets import Future, Equity
from zipline.finance.execution import (
LimitOrder,
MarketOrder,
StopLimitOrder,
StopOrder,
)
from zipline.finance.controls import AssetDateBounds
from zipline.transforms import BatchTransform, batch_transform
class TestAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self,
sid,
amount,
order_count,
sid_filter=None,
slippage=None,
commission=None):
self.count = order_count
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
if sid_filter:
self.sid_filter = sid_filter
else:
self.sid_filter = [self.asset.sid]
if slippage is not None:
self.set_slippage(slippage)
if commission is not None:
self.set_commission(commission)
def handle_data(self, data):
# place an order for amount shares of sid
if self.incr < self.count:
self.order(self.asset, self.amount)
self.incr += 1
class HeavyBuyAlgorithm(TradingAlgorithm):
"""
This algorithm will send a specified number of orders, to allow unit tests
to verify the orders sent/received, transactions created, and positions
at the close of a simulation.
"""
def initialize(self, sid, amount):
self.asset = self.sid(sid)
self.amount = amount
self.incr = 0
def handle_data(self, data):
# place an order for 100 shares of sid
self.order(self.asset, self.amount)
self.incr += 1
class NoopAlgorithm(TradingAlgorithm):
"""
Dolce fa niente.
"""
def get_sid_filter(self):
return []
def initialize(self):
pass
def set_transact_setter(self, txn_sim_callable):
pass
def handle_data(self, data):
pass
class ExceptionAlgorithm(TradingAlgorithm):
"""
Throw an exception from the method name specified in the
constructor.
"""
def initialize(self, throw_from, sid):
self.throw_from = throw_from
self.asset = self.sid(sid)
if self.throw_from == "initialize":
raise Exception("Algo exception in initialize")
else:
pass
def set_portfolio(self, portfolio):
if self.throw_from == "set_portfolio":
raise Exception("Algo exception in set_portfolio")
else:
pass
def handle_data(self, data):
if self.throw_from == "handle_data":
raise Exception("Algo exception in handle_data")
else:
pass
def get_sid_filter(self):
if self.throw_from == "get_sid_filter":
raise Exception("Algo exception in get_sid_filter")
else:
return [self.asset]
def set_transact_setter(self, txn_sim_callable):
pass
class DivByZeroAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
self.incr += 1
if self.incr > 4:
5 / 0
pass
class TooMuchProcessingAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
def handle_data(self, data):
# Unless we're running on some sort of
# supercomputer this will hit timeout.
for i in range(1000000000):
self.foo = i
class TimeoutAlgorithm(TradingAlgorithm):
def initialize(self, sid):
self.asset = self.sid(sid)
self.incr = 0
def handle_data(self, data):
if self.incr > 4:
import time
time.sleep(100)
pass
class RecordAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
self.incr += 1
self.record(incr=self.incr)
name = 'name'
self.record(name, self.incr)
record(name, self.incr, 'name2', 2, name3=self.incr)
class TestOrderAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 1
self.order(self.sid(0), 1)
class TestOrderInstantAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
self.last_price, "Orders was not filled at last price."
self.incr += 2
self.order_value(self.sid(0), data[0].price * 2.)
self.last_price = data[0].price
class TestOrderStyleForwardingAlgorithm(TradingAlgorithm):
"""
Test Algorithm for verifying that ExecutionStyles are properly forwarded by
order API helper methods. Pass the name of the method to be tested as a
string parameter to this algorithm's constructor.
"""
def __init__(self, *args, **kwargs):
self.method_name = kwargs.pop('method_name')
super(TestOrderStyleForwardingAlgorithm, self)\
.__init__(*args, **kwargs)
def initialize(self):
self.incr = 0
self.last_price = None
def handle_data(self, data):
if self.incr == 0:
assert len(self.portfolio.positions.keys()) == 0
method_to_check = getattr(self, self.method_name)
method_to_check(self.sid(0),
data[0].price,
style=StopLimitOrder(10, 10))
assert len(self.blotter.open_orders[0]) == 1
result = self.blotter.open_orders[0][0]
assert result.limit == 10
assert result.stop == 10
self.incr += 1
class TestOrderValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.incr = 0
self.sale_price = None
def handle_data(self, data):
if self.incr == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.incr, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.incr += 2
multiplier = 2.
if isinstance(self.sid(0), Future):
multiplier *= self.sid(0).contract_multiplier
self.order_value(self.sid(0), data[0].price * multiplier)
class TestTargetAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.target_shares = np.random.randint(1, 30)
self.order_target(self.sid(0), self.target_shares)
class TestOrderPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_percent(self.sid(0), .001)
if isinstance(self.sid(0), Equity):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) / data[0].price
)
if isinstance(self.sid(0), Future):
self.target_shares += np.floor(
(.001 * self.portfolio.portfolio_value) /
(data[0].price * self.sid(0).contract_multiplier)
)
class TestTargetPercentAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.target_shares = 1
else:
assert np.round(self.portfolio.portfolio_value * 0.002) == \
self.portfolio.positions[0]['amount'] * self.sale_price, \
"Orders not filled correctly."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.sale_price = data[0].price
self.order_target_percent(self.sid(0), .002)
class TestTargetValueAlgorithm(TradingAlgorithm):
def initialize(self):
self.target_shares = 0
self.sale_price = None
def handle_data(self, data):
if self.target_shares == 0:
assert 0 not in self.portfolio.positions
self.order(self.sid(0), 10)
self.target_shares = 10
return
else:
print(self.portfolio)
assert self.portfolio.positions[0]['amount'] == \
self.target_shares, "Orders not filled immediately."
assert self.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
self.order_target_value(self.sid(0), 20)
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Equity):
self.target_shares = np.round(20 / data[0].price)
if isinstance(self.sid(0), Future):
self.target_shares = np.round(
20 / (data[0].price * self.sid(0).contract_multiplier))
############################
# AccountControl Test Algos#
############################
class SetMaxLeverageAlgorithm(TradingAlgorithm):
def initialize(self, max_leverage=None):
self.set_max_leverage(max_leverage=max_leverage)
############################
# TradingControl Test Algos#
############################
class SetMaxPositionSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_position_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetMaxOrderSizeAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, max_shares=None, max_notional=None):
self.order_count = 0
self.set_max_order_size(sid=sid,
max_shares=max_shares,
max_notional=max_notional)
class SetDoNotOrderListAlgorithm(TradingAlgorithm):
def initialize(self, sid=None, restricted_list=None):
self.order_count = 0
self.set_do_not_order_list(restricted_list)
class SetMaxOrderCountAlgorithm(TradingAlgorithm):
def initialize(self, count):
self.order_count = 0
self.set_max_order_count(count)
class SetLongOnlyAlgorithm(TradingAlgorithm):
def initialize(self):
self.order_count = 0
self.set_long_only()
class SetAssetDateBoundsAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to order 1 share of sid 0 on every bar and has an
AssetDateBounds() trading control in place.
"""
def initialize(self):
self.register_trading_control(AssetDateBounds())
def handle_data(algo, data):
algo.order(algo.sid(0), 1)
class TestRegisterTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.set_slippage(FixedSlippage())
def handle_data(self, data):
pass
class AmbitiousStopLimitAlgorithm(TradingAlgorithm):
"""
Algorithm that tries to buy with extremely low stops/limits and tries to
sell with extremely high versions of same. Should not end up with any
positions for reasonable data.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sid'))
def handle_data(self, data):
########
# Buys #
########
# Buy with low limit, shouldn't trigger.
self.order(self.asset, 100, limit_price=1)
# But with high stop, shouldn't trigger
self.order(self.asset, 100, stop_price=10000000)
# Buy with high limit (should trigger) but also high stop (should
# prevent trigger).
self.order(self.asset, 100, limit_price=10000000, stop_price=10000000)
# Buy with low stop (should trigger), but also low limit (should
# prevent trigger).
self.order(self.asset, 100, limit_price=1, stop_price=1)
#########
# Sells #
#########
# Sell with high limit, shouldn't trigger.
self.order(self.asset, -100, limit_price=1000000)
# Sell with low stop, shouldn't trigger.
self.order(self.asset, -100, stop_price=1)
# Sell with low limit (should trigger), but also high stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1000000, stop_price=1000000)
# Sell with low limit (should trigger), but also low stop (should
# prevent trigger).
self.order(self.asset, -100, limit_price=1, stop_price=1)
###################
# Rounding Checks #
###################
self.order(self.asset, 100, limit_price=.00000001)
self.order(self.asset, -100, stop_price=.00000001)
##########################################
# Algorithm using simple batch transforms
class ReturnPriceBatchTransform(BatchTransform):
def get_value(self, data):
assert data.shape[1] == self.window_length, \
"data shape={0} does not equal window_length={1} for data={2}".\
format(data.shape[1], self.window_length, data)
return data.price
@batch_transform
def return_price_batch_decorator(data):
return data.price
@batch_transform
def return_args_batch_decorator(data, *args, **kwargs):
return args, kwargs
@batch_transform
def return_data(data, *args, **kwargs):
return data
@batch_transform
def uses_ufunc(data, *args, **kwargs):
# ufuncs like np.log should not crash
return np.log(data)
@batch_transform
def price_multiple(data, multiplier, extra_arg=1):
return data.price * multiplier * extra_arg
class BatchTransformAlgorithm(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history_return_price_class = []
self.history_return_price_decorator = []
self.history_return_args = []
self.history_return_arbitrary_fields = []
self.history_return_nan = []
self.history_return_sid_filter = []
self.history_return_field_filter = []
self.history_return_field_no_filter = []
self.history_return_ticks = []
self.history_return_not_full = []
self.return_price_class = ReturnPriceBatchTransform(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_price_decorator = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_args_batch = return_args_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_arbitrary_fields = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.return_nan = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_sid_filter = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
sids=[0]
)
self.return_field_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True,
fields=['price']
)
self.return_field_no_filter = return_data(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=True
)
self.return_not_full = return_data(
refresh_period=1,
window_length=self.window_length,
compute_only_full=False
)
self.uses_ufunc = uses_ufunc(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.price_multiple = price_multiple(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False
)
self.iter = 0
self.set_slippage(FixedSlippage())
def handle_data(self, data):
self.history_return_price_class.append(
self.return_price_class.handle_data(data))
self.history_return_price_decorator.append(
self.return_price_decorator.handle_data(data))
self.history_return_args.append(
self.return_args_batch.handle_data(
data, *self.args, **self.kwargs))
self.history_return_not_full.append(
self.return_not_full.handle_data(data))
self.uses_ufunc.handle_data(data)
# check that calling transforms with the same arguments
# is idempotent
self.price_multiple.handle_data(data, 1, extra_arg=1)
if self.price_multiple.full:
pre = self.price_multiple.rolling_panel.get_current().shape[0]
result1 = self.price_multiple.handle_data(data, 1, extra_arg=1)
post = self.price_multiple.rolling_panel.get_current().shape[0]
assert pre == post, "batch transform is appending redundant events"
result2 = self.price_multiple.handle_data(data, 1, extra_arg=1)
assert result1 is result2, "batch transform is not idempotent"
# check that calling transform with the same data, but
# different supplemental arguments results in new
# results.
result3 = self.price_multiple.handle_data(data, 2, extra_arg=1)
assert result1 is not result3, \
"batch transform is not updating for new args"
result4 = self.price_multiple.handle_data(data, 1, extra_arg=2)
assert result1 is not result4,\
"batch transform is not updating for new kwargs"
new_data = deepcopy(data)
for sidint in new_data:
new_data[sidint]['arbitrary'] = 123
self.history_return_arbitrary_fields.append(
self.return_arbitrary_fields.handle_data(new_data))
# nan every second event price
if self.iter % 2 == 0:
self.history_return_nan.append(
self.return_nan.handle_data(data))
else:
nan_data = deepcopy(data)
nan_data.price = np.nan
self.history_return_nan.append(
self.return_nan.handle_data(nan_data))
self.iter += 1
# Add a new sid to check that it does not get included
extra_sid_data = deepcopy(data)
extra_sid_data[1] = extra_sid_data[0]
self.history_return_sid_filter.append(
self.return_sid_filter.handle_data(extra_sid_data)
)
# Add a field to check that it does not get included
extra_field_data = deepcopy(data)
extra_field_data[0]['ignore'] = extra_sid_data[0]['price']
self.history_return_field_filter.append(
self.return_field_filter.handle_data(extra_field_data)
)
self.history_return_field_no_filter.append(
self.return_field_no_filter.handle_data(extra_field_data)
)
class BatchTransformAlgorithmMinute(TradingAlgorithm):
def initialize(self, *args, **kwargs):
self.refresh_period = kwargs.pop('refresh_period', 1)
self.window_length = kwargs.pop('window_length', 3)
self.args = args
self.kwargs = kwargs
self.history = []
self.batch_transform = return_price_batch_decorator(
refresh_period=self.refresh_period,
window_length=self.window_length,
clean_nans=False,
bars='minute'
)
def handle_data(self, data):
self.history.append(self.batch_transform.handle_data(data))
class SetPortfolioAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to set the portfolio directly.
The portfolio should be treated as a read-only object
within the algorithm.
"""
def initialize(self, *args, **kwargs):
pass
def handle_data(self, data):
self.portfolio = 3
class TALIBAlgorithm(TradingAlgorithm):
"""
An algorithm that applies a TA-Lib transform. The transform object can be
passed at initialization with the 'talib' keyword argument. The results are
stored in the talib_results array.
"""
def initialize(self, *args, **kwargs):
if 'talib' not in kwargs:
raise KeyError('No TA-LIB transform specified '
'(use keyword \'talib\').')
elif not isinstance(kwargs['talib'], (list, tuple)):
self.talib_transforms = (kwargs['talib'],)
else:
self.talib_transforms = kwargs['talib']
self.talib_results = dict((t, []) for t in self.talib_transforms)
def handle_data(self, data):
for t in self.talib_transforms:
result = t.handle_data(data)
if result is None:
if len(t.talib_fn.output_names) == 1:
result = np.nan
else:
result = (np.nan,) * len(t.talib_fn.output_names)
self.talib_results[t].append(result)
class EmptyPositionsAlgorithm(TradingAlgorithm):
"""
An algorithm that ensures that 'phantom' positions do not appear
portfolio.positions in the case that a position has been entered
and fully exited.
"""
def initialize(self, *args, **kwargs):
self.ordered = False
self.exited = False
def handle_data(self, data):
if not self.ordered:
for s in data:
self.order(self.sid(s), 100)
self.ordered = True
if not self.exited:
amounts = [pos.amount for pos
in itervalues(self.portfolio.positions)]
if (
all([(amount == 100) for amount in amounts]) and
(len(amounts) == len(data.keys()))
):
for stock in self.portfolio.positions:
self.order(self.sid(stock), -100)
self.exited = True
# Should be 0 when all positions are exited.
self.record(num_positions=len(self.portfolio.positions))
class InvalidOrderAlgorithm(TradingAlgorithm):
"""
An algorithm that tries to make various invalid order calls, verifying that
appropriate exceptions are raised.
"""
def initialize(self, *args, **kwargs):
self.asset = self.sid(kwargs.pop('sids')[0])
def handle_data(self, data):
from zipline.api import (
order_percent,
order_target,
order_target_percent,
order_target_value,
order_value,
)
for style in [MarketOrder(), LimitOrder(10),
StopOrder(10), StopLimitOrder(10, 10)]:
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order(self.asset, 10, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_value(self.asset, 300, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_percent(self.asset, .1, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, limit_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target(self.asset, 100, stop_price=10, style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_value(self.asset, 100,
stop_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
limit_price=10,
style=style)
with assert_raises(UnsupportedOrderParameters):
order_target_percent(self.asset, .2,
stop_price=10,
style=style)
##############################
# Quantopian style algorithms
# Noop algo
def initialize_noop(context):
pass
def handle_data_noop(context, data):
pass
# API functions
def initialize_api(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data_api(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
###########################
# AlgoScripts as strings
noop_algo = """
# Noop algo
def initialize(context):
pass
def handle_data(context, data):
pass
"""
api_algo = """
from zipline.api import (order,
set_slippage,
FixedSlippage,
record,
sid)
def initialize(context):
context.incr = 0
context.sale_price = None
set_slippage(FixedSlippage())
def handle_data(context, data):
if context.incr == 0:
assert 0 not in context.portfolio.positions
else:
assert context.portfolio.positions[0]['amount'] == \
context.incr, "Orders not filled immediately."
assert context.portfolio.positions[0]['last_sale_price'] == \
data[0].price, "Orders not filled at current price."
context.incr += 1
order(sid(0), 1)
record(incr=context.incr)
"""
api_get_environment_algo = """
from zipline.api import get_environment, order, symbol
def initialize(context):
context.environment = get_environment()
handle_data = lambda context, data: order(symbol(0), 1)
"""
api_symbol_algo = """
from zipline.api import (order,
symbol)
def initialize(context):
pass
def handle_data(context, data):
order(symbol(0), 1)
"""
call_order_in_init = """
from zipline.api import (order)
def initialize(context):
order(0, 10)
pass
def handle_data(context, data):
pass
"""
access_portfolio_in_init = """
def initialize(context):
var = context.portfolio.cash
pass
def handle_data(context, data):
pass
"""
access_account_in_init = """
def initialize(context):
var = context.account.settled_cash
pass
def handle_data(context, data):
pass
"""
call_all_order_methods = """
from zipline.api import (order,
order_value,
order_percent,
order_target,
order_target_value,
order_target_percent,
sid)
def initialize(context):
pass
def handle_data(context, data):
order(sid(0), 10)
order_value(sid(0), 300)
order_percent(sid(0), .1)
order_target(sid(0), 100)
order_target_value(sid(0), 100)
order_target_percent(sid(0), .2)
"""
record_variables = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(incr=context.incr)
"""
record_float_magic = """
from zipline.api import record
def initialize(context):
context.stocks = [0, 1]
context.incr = 0
def handle_data(context, data):
context.incr += 1
record(data=float('%s'))
"""
| 31.181153 | 79 | 0.607347 |
7942d36247cc0ab01377b2798b873a841c07bfc7 | 3,234 | py | Python | mpf/devices/new_device_template.py | xsdk/mpf | f99fdfb9cbe7f13d42a914e6ace13e05214c8739 | [
"MIT"
] | null | null | null | mpf/devices/new_device_template.py | xsdk/mpf | f99fdfb9cbe7f13d42a914e6ace13e05214c8739 | [
"MIT"
] | null | null | null | mpf/devices/new_device_template.py | xsdk/mpf | f99fdfb9cbe7f13d42a914e6ace13e05214c8739 | [
"MIT"
] | null | null | null | """ Template file for a new device driver."""
# Documentation and more info at http://missionpinball.com/framework
# Search this file for 'YourNewDevice' and replace with your device name
import logging
from mpf.system.devices import Device
class YourNewDevice(Device):
config_section = 'YourNewDevices'
collection = 'yournewdevices'
""" The two class attributes above control how devices based on this class
are configured and how they're presented to the MPF.
`config_section` is the name of the section in the machine configuration
files that contains settings for this type of device. The game programmer
would then create subsections for each device of this type, with individual
settings under each one.
For example, in the machine configuration files:
YourNewDevices:
device1:
setting1: foo
setting2: bar
tags: tag2, tag3
label: A plain english description of this device
device2:
setting1: foo
setting2: bar
tags: tag1, tag2
label: A plain english description of this device
`collection` is the DeviceCollection instance that will be created to hold
all the devices of this new type. For example, if collection is
'yournewdevice', a collection will be created which is accessible via
self.machine.yournewdevices. """
@classmethod
def device_class_init(cls, machine):
"""This @classmethod is optional, but is called automatically before
individual devices based on this device class are created. You can use
it for any system-wide settings, configurations, or objects that you
might need for these types of devices outside of the individual devices
themselves.
For example, led.py uses this to make sure the global fade_ms default
fade time is a float. The EM score reels devices use this to set up the
score controller that has to exist to manage them.
You can safely delete this method if your device doesn't need it. (Most
don't need it.)
"""
pass
def __init__(self, machine, name, config, collection=None):
self.log = logging.getLogger('YourNewDevice.' + name)
super(YourNewDevice, self).__init__(machine, name, config, collection)
# Since this new device class is a subclass of Device and you're calling
# super(), several attributes are available to you, including:
# self.machine - a reference to the main machine controller object
# self.name - a string of the name of this device ('device1', 'device2', etc.)
# self.tags - any tags that were specified in the machine config files
# self.label - a plain english description from the machine config files
# Next, set config defaults
# Typically you'd want to configure the default settings so your device
# works even if the game programmer doesn't specify all the options for this
# device in their machine configuration files.
# For example:
# if 'foo' not in self.config:
# self.config['foo'] = 'bar'
# Finally, add the event handlers, methods, and attributes you need for your
# new device. | 39.439024 | 82 | 0.694187 |
7942d378242cb6e3c6f0e8917fcaa171e5de402f | 656 | py | Python | Course3/Lab4/validations.py | DeniSaputra-art/it-cert-automation-practice | 4bdfdcd8aedc85735b71bfa980e525a92690e950 | [
"Apache-2.0"
] | null | null | null | Course3/Lab4/validations.py | DeniSaputra-art/it-cert-automation-practice | 4bdfdcd8aedc85735b71bfa980e525a92690e950 | [
"Apache-2.0"
] | null | null | null | Course3/Lab4/validations.py | DeniSaputra-art/it-cert-automation-practice | 4bdfdcd8aedc85735b71bfa980e525a92690e950 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import re
def validate_user(username, minlen):
"""Checks if the received username matches the required conditions."""
if type(username) != str:
raise TypeError("username must be a string")
if minlen < 1:
raise ValueError("minlen must be at least 1")
# Usernames can't be shorter than minlen
if len(username) > minlen:
return False
# Usernames can only use letters, numbers, dots and underscores
if not re.match('^[a-z0-9._]*$', username):
return False
# Usernames can't begin with a number
if username[0].isnumeric():
return False
return True
| 26.24 | 74 | 0.646341 |
7942d424f593883c757a32fe159a7e374b440403 | 25,068 | py | Python | backend/controller/dir.py | artontech/ArtonFileManager | b099c5294ab731b0a0f1eb7dbe35397df4515863 | [
"Apache-2.0"
] | 1 | 2020-11-17T12:45:47.000Z | 2020-11-17T12:45:47.000Z | backend/controller/dir.py | artontech/ArtonFileManager | b099c5294ab731b0a0f1eb7dbe35397df4515863 | [
"Apache-2.0"
] | null | null | null | backend/controller/dir.py | artontech/ArtonFileManager | b099c5294ab731b0a0f1eb7dbe35397df4515863 | [
"Apache-2.0"
] | null | null | null | ''' dir '''
import json
import logging
import os
import shutil
import tornado.gen
from tornado.concurrent import run_on_executor
from backend.controller.default import (DefaultHandler, DefaultWSHandler)
from backend.model.attribute import Attribute
from backend.model.dir import Dir
from backend.model.file import File
from backend.service import workspace
from backend.util import (image, io, string)
class DirWebSocket(DefaultWSHandler):
name = "dir"
def open(self):
logging.info("[ws] Dir WebSocket opened")
def on_message(self, msg):
msg_json = json.loads(msg, strict=False)
msg_type = msg_json.get("type", None)
if msg_type == "init":
wid = msg_json.get("wid", None)
logging.info("[ws] dir init: wid=%s", wid)
if wid is None:
self.write_json(msg_type="init", err="no_wid")
return
self.space = workspace.get_by_id(wid)
if self.space is None or not self.space.enabled:
self.write_json(msg_type="init", err="no_workspace")
return
self.space.add_ws(self)
self.write_json(msg_type="init", status="success",
data=self.space.serializable())
def on_close(self):
logging.info("[ws] close")
if hasattr(self, "space") and self.space is not None:
self.space.del_ws(self)
class Import(DefaultHandler):
''' import dir '''
def data_received(self, chunk):
pass
def get(self):
''' get '''
self.post()
@tornado.gen.coroutine
def post(self):
''' post '''
wid = self.get_arg("wid")
path = self.get_arg("path")
base_id = int(self.get_arg("current"))
delete = self.get_arg("delete")
encrypt = self.get_arg("encrypt")
yield self.import_dir(wid, path, base_id, delete, encrypt)
@run_on_executor
def import_dir(self, wid, path, base_id, delete, encrypt):
# trim input
if not isinstance(encrypt, str) or encrypt.strip() == "":
encrypt = None
# get workspace first
space = workspace.get_by_id(wid)
if space is None or not space.enabled:
self.write_json(err="no_workspace")
return
# dup image backup dir
bak_path = os.path.join(space.data_path, "bak")
os.makedirs(bak_path, exist_ok=True)
# prepare miss id
miss_attribute_id = space.driver.get_miss_ids("attribute")
miss_dir_id = space.driver.get_miss_ids("dir")
miss_file_id = space.driver.get_miss_ids("file")
# get directory structure, write to database table `dir` & `file`
dir_id_map = {}
id_dir_map = {}
dir_len = 0
for current, _, files in os.walk(path):
if current not in dir_id_map:
dir_name = os.path.basename(current)
parent = os.path.dirname(current)
parent_id = dir_id_map.get(parent, {}).get("id", base_id)
# check exist
dir_list = space.driver.get_dirs(
parent=parent_id, delete=0, name=dir_name)
if len(dir_list) > 0:
current_id = dir_list[0].id
dir_ok = True
logging.info("existing dir %s:%s", current_id, current)
else:
# add dir to table `dir`
dir_model = Dir()
# if has miss id, use it
if len(miss_dir_id) > 0:
dir_model.id = miss_dir_id[0]
dir_model.parent = parent_id
dir_model.name = dir_name
current_id, dir_ok = space.driver.add_dir(dir_model)
logging.info("add dir %s:%s", current_id, current)
# remove id
if dir_ok and current_id in miss_dir_id:
miss_dir_id.remove(current_id)
dir_id_map[current] = {"id": current_id, "parent": parent_id}
id_dir_map[current_id] = current
# add files to table `file`
file_ok = True
if len(files) > 0:
# gen exist file dict
file_model = File()
file_model.none()
file_model.dir = current_id
file_model.delete = 0
file_list, _ = space.driver.get_files(file_model)
exist_files = {}
for obj in file_list:
f = obj.name + obj.ext
exist_files[f] = True
# add files
for f in files:
if not exist_files.get(f, False):
sp = os.path.splitext(f)
ext = sp[1].lower() if sp[1] is not None else ""
file = File()
# if has missing id, use it
if len(miss_file_id) > 0:
file.id = miss_file_id[0]
file.dir = current_id
file.name = sp[0]
file.ext = ext
file_id, file_ok = space.driver.add_file(file)
if not file_ok:
break
if file_id in miss_file_id:
miss_file_id.remove(file_id)
if dir_ok and file_ok:
space.driver.commit()
else:
space.driver.rollback()
space.send_ws(name="dir", msg_type="import", err="db", data={
"type": "msg", "msg": "structure_fail"
})
self.write_json(err="structure_fail")
return
dir_len += 1
if dir_len % 100 == 0:
space.send_ws(name="dir", msg_type="import", status="run", data={
"type": "dir", "now": dir_len
})
space.send_ws(name="dir", msg_type="import", status="run", data={
"type": "msg", "msg": "structure_done"
})
# get file list (not processed)
file_model = File()
file_model.none()
file_model.delete = 0
file_list, _ = space.driver.get_files(file_model, attr_null=True)
file_list_len = len(file_list)
step = int(file_list_len / 100) + 1
for i in range(file_list_len):
file_id = file_list[i].id
dir_id = file_list[i].dir
file_ext = file_list[i].ext
file_name = file_list[i].name
file_path = os.path.join(id_dir_map.get(
dir_id, None), string.join(file_name, file_ext))
logging.info("Now: %d, %s", file_id, file_path)
# open file & find dup CRC & SHA
with open(file_path, "rb") as f:
file_data = f.read()
file_size = io.get_file_size(file_path)
file_crc32 = io.get_crc_32(file_data)
file_sha256 = io.get_sha_256(file_data)
# if attr exist
hash_file_name = io.format_file_name(
file_size, file_crc32, file_sha256, None)
move_file_ok = True
attr_list = space.driver.get_attrs(
size=file_size, crc32=file_crc32, sha256=file_sha256)
attr = None
if len(attr_list) > 0:
attr = attr_list[0]
logging.warning("[import] duplicate file %s with %s(%s)",
file_path, attr.file, attr.id)
space.send_ws(name="dir", msg_type="import", status="run", data={
"type": "warn", "msg": "dup_file", "file": file_path, "files": [attr.file, file_id]
})
attr_ok = True
else:
# add attr
attr = Attribute()
# if has miss id, use it
if len(miss_attribute_id) > 0:
attr.id = miss_attribute_id[0]
attr.file = file_id
attr.type = 0
attr.size = file_size
attr.crc32 = file_crc32
attr.sha256 = file_sha256
attr.ext = file_ext
attr.encrypt = encrypt
attr.key = io.random_key(32)
# load image info
if io.is_cv_support(attr.ext):
try:
img = image.parse_image(file_data)
attr.height = img.shape[0]
attr.width = img.shape[1]
except Exception as e:
logging.error(
"[import] unable to load image %s. %s", file_path, e)
space.send_ws(name="dir", msg_type="import", status="run", data={
"type": "warn", "msg": "load_img_fail", "file": file_path})
try:
attr.ahash = image.a_hash(img)
attr.dhash = image.d_hash(img)
attr.phash = image.p_hash(img)
except Exception as e:
logging.error(
"[import] unable to calc image hash %s. %s", file_path, e)
space.send_ws(name="dir", msg_type="import", status="run", data={
"type": "warn", "msg": "calc_hash_fail", "file": file_path})
# move file
new_file_path = os.path.abspath(
os.path.join(space.data_path, hash_file_name))
if os.path.exists(new_file_path):
bak_file_path = os.path.join(
bak_path, os.path.basename(new_file_path))
logging.warning(
"[import] dst path exist: %s, copy to: %s", new_file_path, bak_file_path)
shutil.copy(new_file_path, bak_file_path)
try:
if attr.encrypt is not None:
io.encrypt_data_to(
file_data, attr.key, new_file_path)
if delete:
os.remove(file_path)
else:
if delete:
os.rename(file_path, new_file_path)
else:
shutil.copy(file_path, new_file_path)
except IOError as e:
move_file_ok = False
logging.error("[import] unable to copy file. %s", e)
space.send_ws(name="dir", msg_type="import", data={
"type": "msg", "msg": "io_error", "file": file_path, "error": e.strerror})
# add `attribute`
attr.id, attr_ok = space.driver.add_attribute(attr)
# remove id
if attr_ok and attr.id in miss_attribute_id:
miss_attribute_id.remove(attr.id)
# update attribute id and hash file name to `file`
file = File()
file.none()
file.id = file_id
file.attribute = attr.id
update_ok = space.driver.update_file(file)
if attr_ok and update_ok and move_file_ok:
space.driver.commit()
else:
space.driver.rollback()
space.send_ws(name="dir", msg_type="import", data={
"type": "msg", "msg": "attr_fail"})
self.write_json(err="attr_fail")
return
if i % step == 0:
space.send_ws(name="dir", msg_type="import", status="run", data={
"type": "files_progress", "total": file_list_len, "now": i})
space.send_ws(name="dir", msg_type="import", status="success", data={
"type": "msg", "msg": "import_done"
})
self.write_json(status="success", data=file_list_len)
class Export(DefaultHandler):
''' export dir '''
def data_received(self, chunk):
pass
def get(self):
''' get '''
self.post()
def post(self):
''' post '''
wid = self.get_arg("wid")
current = self.get_arg("current")
name = self.get_arg("name")
path = os.path.abspath(self.get_arg("path"))
# get workspace first
space = workspace.get_by_id(wid)
if space is None or not space.enabled:
self.write_json(err="no_workspace")
return
# process
id_dir_map = {
current: os.path.join(path, name)
}
dir_stack = [current]
while len(dir_stack) > 0:
dir_id = dir_stack.pop()
dir_path = id_dir_map.get(dir_id, None)
if dir_path is None:
logging.warning("[export] dir_path is none %s", dir_id)
continue
os.makedirs(dir_path, exist_ok=True)
# fetch from db
result_list, _ = space.driver.get_dirs_files(current=dir_id)
for obj in result_list:
if obj.type == "dir":
if id_dir_map.get(obj.id, None) is not None:
continue
id_dir_map[obj.id] = os.path.join(dir_path, obj.name)
dir_stack.append(obj.id)
elif obj.type == "file":
# export
file_path = os.path.join(dir_path, obj.name + obj.ext)
if os.path.exists(file_path):
continue
# get attr
attr_list = space.driver.get_attrs(item_id=obj.attribute)
if len(attr_list) <= 0:
self.write_json(err="no_attr")
return
attr = attr_list[0]
hash_file_name = io.format_file_name(
attr.size, attr.crc32, attr.sha256, None)
# path
hash_file_path = os.path.join(
space.data_path, hash_file_name)
if not os.path.exists(hash_file_path):
self.write_json(err="no_data", data={
"file": hash_file_path})
return
# decrypt
if attr.encrypt is not None and attr.key is not None:
io.decrypt_file_to(hash_file_path, attr.key, file_path)
else:
shutil.copy(hash_file_path, file_path)
self.write_json(status="success", data=current)
class List(DefaultHandler):
''' list dir '''
def data_received(self, chunk):
pass
def get(self):
''' get '''
self.post()
def post(self):
''' post '''
wid = self.get_arg("wid")
current = int(self.get_arg("current"))
show_thumb = string.str2bool(self.get_arg("thumb"))
get_dir = string.str2bool(self.get_arg("dir"))
get_file = string.str2bool(self.get_arg("file"))
page_no = self.get_arg("page_no", default=None)
page_size = self.get_arg("page_size", default=None)
# get workspace first
space = workspace.get_by_id(wid)
if space is None or not space.enabled:
self.write_json(err="no_workspace")
return
# fetch from db
result_list = []
total = None
if get_dir and get_file:
result_list, total = space.driver.get_dirs_files(
current, page_no, page_size)
elif get_dir:
dir_list = space.driver.get_dirs(parent=current, delete=0)
result_list.extend(dir_list)
elif get_file:
file_model = File()
file_model.none()
file_model.dir = current
file_model.delete = 0
file_list, _ = space.driver.get_files(file_model)
result_list.extend(file_list)
# process
for obj in result_list:
if obj.type == "dir":
obj.icon = self.static_url("folder.png", include_version=False)
# get attribute tag
obj.tags = space.driver.union_attribute_tags(
target=obj.id, type_id=2)
elif obj.type == "file":
obj.icon = self.static_url(io.get_icon_name(
obj.ext) + ".png", include_version=False)
if show_thumb and io.is_web_img(obj.ext):
obj.thumb = "/media/link?wid=%s&attribute=%s&filename=thumb" % (
wid, obj.attribute)
# get attr
attr_list = space.driver.get_attrs(item_id=obj.attribute)
if len(attr_list) <= 0:
self.write_json(err="no_attr")
return
obj.attr = attr_list[0]
# get attribute tag by attribute id
obj.tags = space.driver.union_attribute_tags(
target=obj.attribute, type_id=1)
self.write_json(status="success", data={
"list": result_list, "total": total})
class Detail(DefaultHandler):
''' file or dir detail '''
def data_received(self, chunk):
pass
def get(self):
''' get '''
self.post()
def post(self):
''' post '''
wid = self.get_arg("wid")
target = int(self.get_arg("target"))
target_type = self.get_arg("type")
# get workspace first
space = workspace.get_by_id(wid)
if space is None or not space.enabled:
self.write_json(err="no_workspace")
return
if target_type == "file":
# get attr
attr_list = space.driver.get_attrs(item_id=target)
if len(attr_list) <= 0:
self.write_json(err="no_attr")
return
attr = attr_list[0]
self.write_json(status="success", data=attr)
return
elif target_type == "dir":
result = {}
size = 0
file_count = 0
# get dirs
dir_list = [target]
space.driver.enum_dirs(dir_list, target)
# enum files
file_model = File()
for dir_id in dir_list:
file_model.none()
file_model.dir = dir_id
file_model.delete = 0
file_list, _ = space.driver.get_files(file_model)
if len(file_list) <= 0:
continue
file_count += len(file_list)
for obj in file_list:
# get attr
attr_list = space.driver.get_attrs(item_id=obj.attribute)
if len(attr_list) <= 0:
self.write_json(err="no_attr")
return
attr = attr_list[0]
size += attr.size
result["dir_count"] = len(dir_list) - 1
result["file_count"] = file_count
result["size"] = size
self.write_json(status="success", data=result)
return
self.write_json(err="unknown_type")
class Create(DefaultHandler):
''' create dir '''
def data_received(self, chunk):
pass
def get(self):
''' get '''
self.post()
def post(self):
''' post '''
wid = self.get_arg("wid")
current = self.get_arg("current")
dir_name = self.get_arg("name")
# get workspace first
space = workspace.get_by_id(wid)
if space is None or not space.enabled:
self.write_json(err="no_workspace")
return
# check exist
dir_list = space.driver.get_dirs(
parent=current, delete=0, name=dir_name)
if len(dir_list) > 0:
current_id = dir_list[0].id
logging.info("existing dir %s:%s", current_id, current)
self.write_json(err="dir_exist")
return
else:
# add dir to table `dir`
dir_model = Dir()
dir_model.parent = current
dir_model.name = dir_name
current_id, dir_ok = space.driver.add_dir(dir_model)
logging.info("add dir %s:%s", current_id, current)
# commit to db
if dir_ok:
space.driver.commit()
else:
space.driver.rollback()
self.write_json(err="db")
return
self.write_json(status="success", data=current_id)
class MoveTo(DefaultHandler):
''' move to dir '''
def data_received(self, chunk):
pass
def get(self):
''' get '''
self.post()
def post(self):
''' post '''
wid = self.get_arg("wid")
target_type = self.get_arg("type")
id_from = self.get_arg("from")
id_to = self.get_arg("to")
# get workspace first
space = workspace.get_by_id(wid)
if space is None or not space.enabled:
self.write_json(err="no_workspace")
return
update_ok = False
if target_type == "file":
# update dir id of file
file = File()
file.none()
file.id = id_from
file.dir = id_to
update_ok = space.driver.update_file(file)
elif target_type == "dir":
# update parent of dir
dir_model = Dir()
dir_model.none()
dir_model.id = id_from
dir_model.parent = id_to
update_ok = space.driver.update_dir(dir_model)
else:
self.write_json(err="unknown_type")
return
# commit to db
if update_ok:
space.driver.commit()
else:
space.driver.rollback()
self.write_json(err="db")
return
self.write_json(status="success", data=id_to)
class Update(DefaultHandler):
''' update '''
def data_received(self, chunk):
pass
def get(self):
''' get '''
self.post()
def post(self):
''' post '''
wid = self.get_arg("wid")
types = str(self.get_arg("type")).split(',')
targets = [int(s) for s in str(self.get_arg("target")).split(',')]
name = self.get_arg("name", default=None)
ext = self.get_arg("ext", default=None)
delete = self.get_arg("delete", default=None)
# check param
total = len(types)
if total != len(targets):
self.write_json(err="param")
return
# get workspace first
space = workspace.get_by_id(wid)
if space is None or not space.enabled:
self.write_json(err="no_workspace")
return
update_ok = False
for i in range(total):
target_type = types[i]
target = targets[i]
if target_type == "file":
# update dir id of file
file = File()
file.none()
file.id = target
file.name = name
file.ext = ext
file.delete = delete
update_ok = space.driver.update_file(file)
elif target_type == "dir":
# update parent of dir
dir_model = Dir()
dir_model.none()
dir_model.id = target
dir_model.name = name
dir_model.delete = delete
update_ok = space.driver.update_dir(dir_model)
else:
self.write_json(err="unknown_type")
return
if not update_ok:
break
# commit to db
if update_ok:
space.driver.commit()
else:
space.driver.rollback()
self.write_json(err="db")
return
self.write_json(status="success", data=len(targets))
| 36.17316 | 104 | 0.482129 |
7942d48fd50dc39669e67fba02a3d5f22e5abe49 | 1,710 | py | Python | tests/test_server_functionality.py | liannnix/ansible-samba | e1a5d1347c5336daa92dfb1b543f2a64e0e7252f | [
"MIT"
] | null | null | null | tests/test_server_functionality.py | liannnix/ansible-samba | e1a5d1347c5336daa92dfb1b543f2a64e0e7252f | [
"MIT"
] | 12 | 2018-06-15T10:47:50.000Z | 2020-03-03T12:45:16.000Z | tests/test_server_functionality.py | liannnix/ansible-samba | e1a5d1347c5336daa92dfb1b543f2a64e0e7252f | [
"MIT"
] | 9 | 2018-05-31T08:10:11.000Z | 2021-10-01T11:20:11.000Z | # coding=utf-8
"""Domain controllers feature tests."""
from pytest_bdd import (
given,
scenario,
then,
when,
)
from conftest import assert_cmd
import pytest
@scenario('features/server_functionality.feature', 'Each domain controller can see each other')
def test_each_domain_controller_can_see_each_other(ssh_dcs, dcs):
"""Each domain controller can see each other."""
pass
@given('From any domain controller')
def from_any_domain_controller():
"""From any domain controller."""
@when('I ping other controllers')
def i_ping_other_controllers(ssh_dcs, dcs):
"""I ping other controllers."""
ssh_dcs.exec_command('ping -c1 -w1 {}'.format(dcs))
@then('They reply to ping request')
def they_reply_to_ping_request(ssh_dcs):
"""They reply to ping request."""
assert ssh_dcs.res['rc'] == 0, "[{}]: {} failed.".format(ssh_dcs.host, ssh_dcs.res['cmd'])
@scenario('features/server_functionality.feature', 'Users list')
def test_users_list(ssh_dcs):
"""Users list."""
pass
@when('I try to get users list')
def i_try_to_get_users_list(ssh_dcs):
"""I try to get users list."""
# expect = lambda res: res['rc'] == 0 and len(res['stdout']) > 0 and 'Administrator' in res['stdout']
# assert_cmd(expect, ssh_dcs.exec_command('samba-tool user list'))
ssh_dcs.exec_command('sudo samba-tool user list')
@then('It returns non empty list that contains Administrator user')
def it_returns_non_empty_list_that_contains_administrator_user(ssh_dcs):
"""It returns non empty list that contains Administrator user."""
res = ssh_dcs.res
assert res['rc'] == 0
assert len(res['stdout']) > 0
assert 'Administrator' in res['stdout'].split('\n')
| 30 | 104 | 0.704678 |
7942d5227845433639071f76f7e3440907f51ee2 | 384 | py | Python | dataset/main.py | Datalab-AUTH/MSc---Lampridis---MANIFEST | 9c13018313f2681dd27ef56ac0eb8470319a1749 | [
"Apache-2.0"
] | 3 | 2021-03-28T20:13:11.000Z | 2021-08-23T05:52:27.000Z | dataset/main.py | Datalab-AUTH/MSc---Lampridis---MANIFEST | 9c13018313f2681dd27ef56ac0eb8470319a1749 | [
"Apache-2.0"
] | null | null | null | dataset/main.py | Datalab-AUTH/MSc---Lampridis---MANIFEST | 9c13018313f2681dd27ef56ac0eb8470319a1749 | [
"Apache-2.0"
] | null | null | null | import create_dataset
def main():
data_separated = create_dataset.get_separated_dataset()
data_combined = create_dataset.get_combined_dataset()
print(data_separated)
print(data_combined)
data_separated.to_csv('data_csv/data_separated.csv', index=False)
data_combined.to_csv('data_csv/data_combined.csv', index=False)
if __name__ == "__main__":
main()
| 24 | 69 | 0.755208 |
7942d558e88fc199a45bf86984a08b4e8641bb48 | 8,761 | py | Python | oneflow/python/test/ops/test_nn_conv2d_padding_dynamic.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | 1 | 2021-04-14T03:19:35.000Z | 2021-04-14T03:19:35.000Z | oneflow/python/test/ops/test_nn_conv2d_padding_dynamic.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | 1 | 2021-06-16T08:37:50.000Z | 2021-06-16T08:37:50.000Z | oneflow/python/test/ops/test_nn_conv2d_padding_dynamic.py | wanghongsheng01/framework_enflame | debf613e05e3f5ea8084c3e79b60d0dd9e349526 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
from test_util import GenArgList
import oneflow.typing as oft
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
global_storage = {}
def global_storage_setter(name):
global global_storage
def _set(x):
global_storage[name] = x
return _set
def compare_with_tensorflow(
device_type,
x_shape,
filters,
kernel_size,
groups,
of_padding="SAME",
tf_padding="SAME",
stride=1,
data_format="NCHW",
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
if data_format == "NCHW":
xy_data_transpose = (0, 2, 3, 1)
weight_data_transpose = (2, 3, 1, 0)
else:
xy_data_transpose = (0, 1, 2, 3)
weight_data_transpose = (1, 2, 3, 0)
@flow.global_function(type="train", function_config=func_config)
def DynamicConvJob(x: oft.ListNumpy.Placeholder((10, 3, 100, 100))):
with flow.scope.placement(device_type, "0:0"):
x_var = flow.get_variable(
name="v1",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
x_var = flow.cast_to_current_logical_view(x_var)
x += x_var
if data_format == "NCHW":
weight_shape = (filters, x_shape[1] // groups, kernel_size, kernel_size)
else:
weight_shape = (filters, kernel_size, kernel_size, x_shape[3] // groups)
weight = flow.get_variable(
"conv-weight",
shape=weight_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
)
weight = flow.cast_to_current_logical_view(weight)
loss = flow.nn.conv2d(
x,
weight,
strides=[stride, stride],
padding=of_padding,
data_format=data_format,
dilations=[1, 1],
groups=groups,
)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x, global_storage_setter("x"))
flow.watch_diff(x, global_storage_setter("x_diff"))
flow.watch(weight, global_storage_setter("weight"))
flow.watch_diff(weight, global_storage_setter("weight_diff"))
flow.watch(loss, global_storage_setter("loss"))
flow.watch_diff(loss, global_storage_setter("loss_diff"))
return loss
# OneFlow
data = [np.random.rand(*x_shape).astype(np.float32)]
of_out = DynamicConvJob(data).get().numpy_list()[0]
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(data[0].transpose(xy_data_transpose))
assert groups > 0
assert x_shape[1] % groups == 0
assert filters % groups == 0
weight = tf.Variable(
global_storage["weight"].numpy().transpose(weight_data_transpose)
)
tf_out = tf.nn.conv2d(
x,
weight,
strides=[1, stride, stride, 1],
padding=tf_padding,
data_format="NHWC",
)
idx = np.where(np.abs(of_out.transpose(xy_data_transpose) - tf_out.numpy()) > 5e-4)
assert np.allclose(
of_out.transpose(xy_data_transpose), tf_out.numpy(), rtol=1e-3, atol=1e-3,
)
loss_diff = global_storage["loss_diff"].numpy_list()[0].transpose(xy_data_transpose)
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
tf_weight_diff = tape.gradient(tf_out, weight, loss_diff)
rtol = 1e-4
atol = 1e-4
if device_type == "cpu":
rtol *= 100
atol *= 100
assert np.allclose(
global_storage["x_diff"].numpy_list()[0].transpose(xy_data_transpose),
tf_x_diff.numpy(),
rtol=rtol,
atol=atol,
), (
global_storage["x_diff"].numpy_list()[0].transpose(xy_data_transpose)
- tf_x_diff.numpy()
)
assert np.allclose(
global_storage["weight_diff"].numpy().transpose(weight_data_transpose),
tf_weight_diff.numpy(),
rtol=5e-3,
atol=5e-3,
)
@flow.unittest.skip_unless_1n1d()
@unittest.skip("skip_for_ci")
class TestNnConv2dPaddingDynamic(flow.unittest.TestCase):
def test_padding_valid(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["x_shape"] = [(10, 3, 10, 10), (10, 3, 11, 11)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [3, 2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = ["VALID"]
arg_dict["tf_padding"] = ["VALID"]
arg_dict["stride"] = [1, 2]
arg_dict["data_format"] = ["NCHW"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_padding_same(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["x_shape"] = [(10, 3, 10, 10), (10, 3, 11, 11)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [3, 2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = ["SAME_UPPER"]
arg_dict["tf_padding"] = ["SAME"]
arg_dict["stride"] = [1, 2]
arg_dict["data_format"] = ["NCHW"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pad_list1(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["x_shape"] = [(10, 3, 10, 10), (10, 3, 11, 11)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [3, 2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = [[[0, 0], [0, 0], [0, 1], [1, 0]]]
arg_dict["tf_padding"] = [[[0, 0], [0, 1], [1, 0], [0, 0]]]
arg_dict["stride"] = [1, 2]
arg_dict["data_format"] = ["NCHW"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pad_list2(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["x_shape"] = [(10, 3, 10, 10), (10, 3, 11, 11)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [3, 2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = [[[0, 0], [0, 0], [1, 1], [1, 1]]]
arg_dict["tf_padding"] = [[[0, 0], [1, 1], [1, 1], [0, 0]]]
arg_dict["stride"] = [1, 2]
arg_dict["data_format"] = ["NCHW"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pad_list3(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["x_shape"] = [(10, 3, 10, 10), (10, 3, 11, 11)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [3, 2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = [[[0, 0], [0, 0], [1, 0], [1, 0]]]
arg_dict["tf_padding"] = [[[0, 0], [1, 0], [1, 0], [0, 0]]]
arg_dict["stride"] = [1, 2]
arg_dict["data_format"] = ["NCHW"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_pad_list4(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["x_shape"] = [(10, 3, 10, 10), (10, 3, 11, 11)]
arg_dict["filters"] = [64]
arg_dict["kernel_size"] = [3, 2]
arg_dict["groups"] = [1]
arg_dict["of_padding"] = [[[0, 0], [0, 0], [10, 2], [10, 2]]]
arg_dict["tf_padding"] = [[[0, 0], [10, 2], [10, 2], [0, 0]]]
arg_dict["stride"] = [1, 2]
arg_dict["data_format"] = ["NCHW"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| 34.904382 | 88 | 0.583267 |
7942d697df2653883968f241b4d85459c86b7a48 | 3,792 | py | Python | WatchDogs_Visualisation/oldApps/pie-chart/venv/dash-test-3.py | tnreddy09/WatchDogs_StockMarketAnalysis | 0c72430da633785fcb14e40d8b007c86081d515d | [
"Apache-2.0"
] | 4 | 2020-02-05T11:26:47.000Z | 2021-05-26T07:48:46.000Z | WatchDogs_Visualisation/oldApps/pie-chart/venv/dash-test-3.py | prashanth-thipparthi/WatchDogs_StockMarketAnalysis | 0c72430da633785fcb14e40d8b007c86081d515d | [
"Apache-2.0"
] | null | null | null | WatchDogs_Visualisation/oldApps/pie-chart/venv/dash-test-3.py | prashanth-thipparthi/WatchDogs_StockMarketAnalysis | 0c72430da633785fcb14e40d8b007c86081d515d | [
"Apache-2.0"
] | null | null | null | import dash
import dash_html_components as html
import dash_core_components as dcc
from dash.dependencies import Output, Event, Input
import plotly
import plotly.graph_objs as go
from WatchDogs_MongoWrapper import MongoWrapper
app = dash.Dash(__name__)
app.layout = html.Div([
html.Div(children=[
html.Div(
dcc.Dropdown(
id='my_dropdown',
placeholder='Select a stock',
options=[
{'label': 'Microsoft', 'value': 'Microsoft'},
{'label': 'Facebook', 'value': 'Facebook'},
{'label': 'Visa', 'value': 'Visa'},
{'label': 'Nvidia', 'value': 'Nvidia'},
{'label': 'Google', 'value': 'Google'},
{'label': 'Nike', 'value': 'Nike'},
{'label': 'Alibaba', 'value': 'Alibaba'},
{'label': 'Netflix', 'value': 'Netflix'},
{'label': 'PayPal', 'value': 'PayPal'},
{'label': 'Ebay', 'value': 'Ebay'},
{'label': 'Tesla', 'value': 'Tesla'},
{'label': 'Twitter', 'value': 'Twitter'},
{'label': 'Disney', 'value': 'Disney'},
{'label': 'Pepsi', 'value': 'Pepsi'},
{'label': 'Lyft', 'value': 'Lyft'},
{'label': 'Chevron', 'value': 'Chevron'},
{'label': 'Cisco', 'value': 'Cisco'},
{'label': 'Intel', 'value': 'Intel'},
{'label': 'Verizon', 'value': 'Verizon'},
{'label': 'AT&T', 'value': 'AT&T'},
{'label': 'Nokia', 'value': 'Nokia'},
{'label': 'Comcast', 'value': 'Comcast'},
{'label': 'Kroger', 'value': 'Kroger'},
{'label': 'Boeing', 'value': 'Boeing'},
{'label': 'Starbucks', 'value': 'Starbucks'},
{'label': 'Walmart', 'value': 'Walmart'},
{'label': 'Adobe', 'value': 'Adobe'},
{'label': 'Dell', 'value': 'Dell'},
{'label': 'Ford', 'value': 'Ford'},
{'label': 'Samsung', 'value': 'Samsung'},
]
)
)
]),
html.Div(id='live-update-graph'),
dcc.Interval(
id='interval-update',
interval=2000
)
])
@app.callback(Output('live-update-graph', 'children'),
[Input('my_dropdown', 'value')],
events=[Event('interval-update', 'interval')])
def update_graph_live(value):
mongo = MongoWrapper()
neg_sentiment, neutral_sentiment, pos_sentiment = mongo.get_polarity_tweets_of_stock(value)
negs = neg_sentiment.count()
poss = pos_sentiment.count()
neuu = neutral_sentiment.count()
tots = negs+poss+neuu
posPercentage = (poss/(tots))*100
neuPercentage = (neuu/(tots))*100
negPercentage = (negs/(tots))*100
bigGraph = dcc.Graph(
# animate=True,
figure={
'data': [{
'type': 'pie',
'labels': ['Positive', 'Neutral' ,'Negative'],
'values': [posPercentage, neuPercentage, negPercentage],
'marker': {'colors': ['#32C85A', '#4C93B1', '#FA4632'],
# 'values': [posPercentage, negPercentage],
# 'marker': {'colors': ['#32C85A', '#FA4632'],
},
}],
'layout':{
'title':"Twitter Sentiment for {}\n".format(value)
}
}
)
afterText = html.Div(children='Total Tweets pulled for searchword: {}.\n'.format(tots))
return (bigGraph, afterText)
if __name__ == '__main__':
app.run_server(debug=True) | 36.461538 | 95 | 0.466772 |
7942d6d122427027400b58e02483e0f1f027043e | 16,524 | py | Python | sdk/python/pulumi_azure_nextgen/insights/v20180416/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/insights/v20180416/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/insights/v20180416/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'AlertingActionResponse',
'AzNsActionGroupResponse',
'CriteriaResponse',
'DimensionResponse',
'LogMetricTriggerResponse',
'LogToMetricActionResponse',
'ScheduleResponse',
'SourceResponse',
'TriggerConditionResponse',
]
@pulumi.output_type
class AlertingActionResponse(dict):
"""
Specify action need to be taken when rule type is Alert
"""
def __init__(__self__, *,
odata_type: str,
severity: str,
trigger: 'outputs.TriggerConditionResponse',
azns_action: Optional['outputs.AzNsActionGroupResponse'] = None,
throttling_in_min: Optional[int] = None):
"""
Specify action need to be taken when rule type is Alert
:param str odata_type: Specifies the action. Supported values - AlertingAction, LogToMetricAction
:param str severity: Severity of the alert
:param 'TriggerConditionResponseArgs' trigger: The trigger condition that results in the alert rule being.
:param 'AzNsActionGroupResponseArgs' azns_action: Azure action group reference.
:param int throttling_in_min: time (in minutes) for which Alerts should be throttled or suppressed.
"""
pulumi.set(__self__, "odata_type", 'Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.AlertingAction')
pulumi.set(__self__, "severity", severity)
pulumi.set(__self__, "trigger", trigger)
if azns_action is not None:
pulumi.set(__self__, "azns_action", azns_action)
if throttling_in_min is not None:
pulumi.set(__self__, "throttling_in_min", throttling_in_min)
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
Specifies the action. Supported values - AlertingAction, LogToMetricAction
"""
return pulumi.get(self, "odata_type")
@property
@pulumi.getter
def severity(self) -> str:
"""
Severity of the alert
"""
return pulumi.get(self, "severity")
@property
@pulumi.getter
def trigger(self) -> 'outputs.TriggerConditionResponse':
"""
The trigger condition that results in the alert rule being.
"""
return pulumi.get(self, "trigger")
@property
@pulumi.getter(name="aznsAction")
def azns_action(self) -> Optional['outputs.AzNsActionGroupResponse']:
"""
Azure action group reference.
"""
return pulumi.get(self, "azns_action")
@property
@pulumi.getter(name="throttlingInMin")
def throttling_in_min(self) -> Optional[int]:
"""
time (in minutes) for which Alerts should be throttled or suppressed.
"""
return pulumi.get(self, "throttling_in_min")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzNsActionGroupResponse(dict):
"""
Azure action group
"""
def __init__(__self__, *,
action_group: Optional[Sequence[str]] = None,
custom_webhook_payload: Optional[str] = None,
email_subject: Optional[str] = None):
"""
Azure action group
:param Sequence[str] action_group: Azure Action Group reference.
:param str custom_webhook_payload: Custom payload to be sent for all webhook URI in Azure action group
:param str email_subject: Custom subject override for all email ids in Azure action group
"""
if action_group is not None:
pulumi.set(__self__, "action_group", action_group)
if custom_webhook_payload is not None:
pulumi.set(__self__, "custom_webhook_payload", custom_webhook_payload)
if email_subject is not None:
pulumi.set(__self__, "email_subject", email_subject)
@property
@pulumi.getter(name="actionGroup")
def action_group(self) -> Optional[Sequence[str]]:
"""
Azure Action Group reference.
"""
return pulumi.get(self, "action_group")
@property
@pulumi.getter(name="customWebhookPayload")
def custom_webhook_payload(self) -> Optional[str]:
"""
Custom payload to be sent for all webhook URI in Azure action group
"""
return pulumi.get(self, "custom_webhook_payload")
@property
@pulumi.getter(name="emailSubject")
def email_subject(self) -> Optional[str]:
"""
Custom subject override for all email ids in Azure action group
"""
return pulumi.get(self, "email_subject")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CriteriaResponse(dict):
"""
Specifies the criteria for converting log to metric.
"""
def __init__(__self__, *,
metric_name: str,
dimensions: Optional[Sequence['outputs.DimensionResponse']] = None):
"""
Specifies the criteria for converting log to metric.
:param str metric_name: Name of the metric
:param Sequence['DimensionResponseArgs'] dimensions: List of Dimensions for creating metric
"""
pulumi.set(__self__, "metric_name", metric_name)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> str:
"""
Name of the metric
"""
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def dimensions(self) -> Optional[Sequence['outputs.DimensionResponse']]:
"""
List of Dimensions for creating metric
"""
return pulumi.get(self, "dimensions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DimensionResponse(dict):
"""
Specifies the criteria for converting log to metric.
"""
def __init__(__self__, *,
name: str,
operator: str,
values: Sequence[str]):
"""
Specifies the criteria for converting log to metric.
:param str name: Name of the dimension
:param str operator: Operator for dimension values
:param Sequence[str] values: List of dimension values
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "operator", operator)
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def name(self) -> str:
"""
Name of the dimension
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def operator(self) -> str:
"""
Operator for dimension values
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Sequence[str]:
"""
List of dimension values
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LogMetricTriggerResponse(dict):
"""
A log metrics trigger descriptor.
"""
def __init__(__self__, *,
metric_column: Optional[str] = None,
metric_trigger_type: Optional[str] = None,
threshold: Optional[float] = None,
threshold_operator: Optional[str] = None):
"""
A log metrics trigger descriptor.
:param str metric_column: Evaluation of metric on a particular column
:param str metric_trigger_type: Metric Trigger Type - 'Consecutive' or 'Total'
:param float threshold: The threshold of the metric trigger.
:param str threshold_operator: Evaluation operation for Metric -'GreaterThan' or 'LessThan' or 'Equal'.
"""
if metric_column is not None:
pulumi.set(__self__, "metric_column", metric_column)
if metric_trigger_type is not None:
pulumi.set(__self__, "metric_trigger_type", metric_trigger_type)
if threshold is not None:
pulumi.set(__self__, "threshold", threshold)
if threshold_operator is not None:
pulumi.set(__self__, "threshold_operator", threshold_operator)
@property
@pulumi.getter(name="metricColumn")
def metric_column(self) -> Optional[str]:
"""
Evaluation of metric on a particular column
"""
return pulumi.get(self, "metric_column")
@property
@pulumi.getter(name="metricTriggerType")
def metric_trigger_type(self) -> Optional[str]:
"""
Metric Trigger Type - 'Consecutive' or 'Total'
"""
return pulumi.get(self, "metric_trigger_type")
@property
@pulumi.getter
def threshold(self) -> Optional[float]:
"""
The threshold of the metric trigger.
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="thresholdOperator")
def threshold_operator(self) -> Optional[str]:
"""
Evaluation operation for Metric -'GreaterThan' or 'LessThan' or 'Equal'.
"""
return pulumi.get(self, "threshold_operator")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LogToMetricActionResponse(dict):
"""
Specify action need to be taken when rule type is converting log to metric
"""
def __init__(__self__, *,
criteria: Sequence['outputs.CriteriaResponse'],
odata_type: str):
"""
Specify action need to be taken when rule type is converting log to metric
:param Sequence['CriteriaResponseArgs'] criteria: Criteria of Metric
:param str odata_type: Specifies the action. Supported values - AlertingAction, LogToMetricAction
"""
pulumi.set(__self__, "criteria", criteria)
pulumi.set(__self__, "odata_type", 'Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.LogToMetricAction')
@property
@pulumi.getter
def criteria(self) -> Sequence['outputs.CriteriaResponse']:
"""
Criteria of Metric
"""
return pulumi.get(self, "criteria")
@property
@pulumi.getter(name="odataType")
def odata_type(self) -> str:
"""
Specifies the action. Supported values - AlertingAction, LogToMetricAction
"""
return pulumi.get(self, "odata_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ScheduleResponse(dict):
"""
Defines how often to run the search and the time interval.
"""
def __init__(__self__, *,
frequency_in_minutes: int,
time_window_in_minutes: int):
"""
Defines how often to run the search and the time interval.
:param int frequency_in_minutes: frequency (in minutes) at which rule condition should be evaluated.
:param int time_window_in_minutes: Time window for which data needs to be fetched for query (should be greater than or equal to frequencyInMinutes).
"""
pulumi.set(__self__, "frequency_in_minutes", frequency_in_minutes)
pulumi.set(__self__, "time_window_in_minutes", time_window_in_minutes)
@property
@pulumi.getter(name="frequencyInMinutes")
def frequency_in_minutes(self) -> int:
"""
frequency (in minutes) at which rule condition should be evaluated.
"""
return pulumi.get(self, "frequency_in_minutes")
@property
@pulumi.getter(name="timeWindowInMinutes")
def time_window_in_minutes(self) -> int:
"""
Time window for which data needs to be fetched for query (should be greater than or equal to frequencyInMinutes).
"""
return pulumi.get(self, "time_window_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SourceResponse(dict):
"""
Specifies the log search query.
"""
def __init__(__self__, *,
data_source_id: str,
authorized_resources: Optional[Sequence[str]] = None,
query: Optional[str] = None,
query_type: Optional[str] = None):
"""
Specifies the log search query.
:param str data_source_id: The resource uri over which log search query is to be run.
:param Sequence[str] authorized_resources: List of Resource referred into query
:param str query: Log search query. Required for action type - AlertingAction
:param str query_type: Set value to 'ResultCount' .
"""
pulumi.set(__self__, "data_source_id", data_source_id)
if authorized_resources is not None:
pulumi.set(__self__, "authorized_resources", authorized_resources)
if query is not None:
pulumi.set(__self__, "query", query)
if query_type is not None:
pulumi.set(__self__, "query_type", query_type)
@property
@pulumi.getter(name="dataSourceId")
def data_source_id(self) -> str:
"""
The resource uri over which log search query is to be run.
"""
return pulumi.get(self, "data_source_id")
@property
@pulumi.getter(name="authorizedResources")
def authorized_resources(self) -> Optional[Sequence[str]]:
"""
List of Resource referred into query
"""
return pulumi.get(self, "authorized_resources")
@property
@pulumi.getter
def query(self) -> Optional[str]:
"""
Log search query. Required for action type - AlertingAction
"""
return pulumi.get(self, "query")
@property
@pulumi.getter(name="queryType")
def query_type(self) -> Optional[str]:
"""
Set value to 'ResultCount' .
"""
return pulumi.get(self, "query_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class TriggerConditionResponse(dict):
"""
The condition that results in the Log Search rule.
"""
def __init__(__self__, *,
threshold: float,
threshold_operator: str,
metric_trigger: Optional['outputs.LogMetricTriggerResponse'] = None):
"""
The condition that results in the Log Search rule.
:param float threshold: Result or count threshold based on which rule should be triggered.
:param str threshold_operator: Evaluation operation for rule - 'GreaterThan' or 'LessThan.
:param 'LogMetricTriggerResponseArgs' metric_trigger: Trigger condition for metric query rule
"""
pulumi.set(__self__, "threshold", threshold)
pulumi.set(__self__, "threshold_operator", threshold_operator)
if metric_trigger is not None:
pulumi.set(__self__, "metric_trigger", metric_trigger)
@property
@pulumi.getter
def threshold(self) -> float:
"""
Result or count threshold based on which rule should be triggered.
"""
return pulumi.get(self, "threshold")
@property
@pulumi.getter(name="thresholdOperator")
def threshold_operator(self) -> str:
"""
Evaluation operation for rule - 'GreaterThan' or 'LessThan.
"""
return pulumi.get(self, "threshold_operator")
@property
@pulumi.getter(name="metricTrigger")
def metric_trigger(self) -> Optional['outputs.LogMetricTriggerResponse']:
"""
Trigger condition for metric query rule
"""
return pulumi.get(self, "metric_trigger")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 35.008475 | 194 | 0.643912 |
7942d715dc06e7e47e52d8966b8c73f0a458082f | 2,372 | py | Python | src/chatbot/aiml/Utils.py | johnmelodyme/chatbot | 0abe20bdc291eec7dbf1df95c208f995a5b17106 | [
"MIT"
] | 1 | 2018-12-13T07:50:26.000Z | 2018-12-13T07:50:26.000Z | src/chatbot/aiml/Utils.py | c4pt000/Sophia-bot | 91c85d2bb58cf0cea54dacb3958ce2b966a40616 | [
"MIT"
] | null | null | null | src/chatbot/aiml/Utils.py | c4pt000/Sophia-bot | 91c85d2bb58cf0cea54dacb3958ce2b966a40616 | [
"MIT"
] | null | null | null | """
Copyright 2003-2010 Cort Stratton. All rights reserved.
Copyright 2015, 2016 Hanson Robotics
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FREEBSD PROJECT OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
"""This file contains assorted general utility functions used by other
modules in the PyAIML package.
"""
def sentences(s):
"""Split the string s into a list of sentences."""
try:
s + ""
except:
raise TypeError, "s must be a string"
pos = 0
sentenceList = []
l = len(s)
while pos < l:
try:
p = s.index('.', pos)
except:
p = l + 1
try:
q = s.index('?', pos)
except:
q = l + 1
try:
e = s.index('!', pos)
except:
e = l + 1
end = min(p, q, e)
sentenceList.append(s[pos:end].strip())
pos = end + 1
# If no sentences were found, return a one-item list containing
# the entire input string.
if len(sentenceList) == 0:
sentenceList.append(s)
return sentenceList
# Self test
if __name__ == "__main__":
# sentences
sents = sentences(
"First. Second, still? Third and Final! Well, not really")
assert(len(sents) == 4)
| 34.376812 | 71 | 0.676644 |
7942d7dc5c754c363d2c6d88ad5a20066777a93a | 3,154 | py | Python | examples/data/ogbl_collab.py | amznero/graph-learn | edb359edc0ce2bfdd5651cff62c592ded40a0849 | [
"Apache-2.0"
] | null | null | null | examples/data/ogbl_collab.py | amznero/graph-learn | edb359edc0ce2bfdd5651cff62c592ded40a0849 | [
"Apache-2.0"
] | null | null | null | examples/data/ogbl_collab.py | amznero/graph-learn | edb359edc0ce2bfdd5651cff62c592ded40a0849 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""ogbl_collab dataset.
"""
import numpy as np
from ogb.linkproppred import LinkPropPredDataset
# load data
dataset = LinkPropPredDataset(name='ogbl-collab')
split_edge = dataset.get_edge_split()
train_edge, valid_edge, test_edge = split_edge['train'], split_edge['valid'], split_edge['test']
# train_edge['edge'], (1179052, 2)
# train_edge['weight'], (1179052,)
# train_edge['year'], (1179052,)
# valid_edge, 60084
# test_edge, 46329
graph = dataset[0]
num_nodes = graph['num_nodes'] # 235868
node_feat = graph['node_feat'] # shape(235868, 128)
# dump to disk
root = 'ogbl_collab/'
train_table = root + 'ogbl_collab_train_edge'
val_table = root + 'ogbl_collab_val_edge'
test_table = root + 'ogbl_collab_test_edge'
node_table = root + 'ogbl_collab_node'
val_neg_table = root + 'ogbl_collab_val_edge_neg'
test_neg_table = root + 'ogbl_collab_test_edge_neg'
with open(train_table, 'w') as f:
f.write('src_id:int64' + '\t' + 'dst_id:int64' + '\t' + 'weight:double\n')
for i in range(len(train_edge['edge'])):
f.write(str(train_edge['edge'][i, 0]) + '\t' + str(train_edge['edge'][i, 1])
+ '\t' + str(train_edge['weight'][i]) + '\n')
with open(val_table, 'w') as f:
f.write('src_id:int64' + '\t' + 'dst_id:int64' + '\t' + 'weight:double\n')
for i in range(len(valid_edge['edge'])):
f.write(str(valid_edge['edge'][i, 0]) + '\t' + str(valid_edge['edge'][i, 1])
+ '\t' + str(valid_edge['weight'][i]) + '\n')
with open(test_table, 'w') as f:
f.write('src_id:int64' + '\t' + 'dst_id:int64' + '\t' + 'weight:double\n')
for i in range(len(test_edge['edge'])):
f.write(str(test_edge['edge'][i, 0]) + '\t' + str(test_edge['edge'][i, 1])
+ '\t' + str(test_edge['weight'][i]) + '\n')
with open(node_table, 'w') as f:
f.write('id:int64' + '\t' + 'feature:string\n')
for i in range(num_nodes):
f.write(str(i) + '\t' + str(':'.join(map(str, node_feat[i]))) + '\n')
with open(val_neg_table, 'w') as f:
f.write('src_id:int64' + '\t' + 'dst_id:int64' + '\t' + 'weight:double\n')
for i in range(len(valid_edge['edge_neg'])):
f.write(str(valid_edge['edge_neg'][i, 0]) + '\t' + str(valid_edge['edge_neg'][i, 1])
+ '\t' + '1.0\n')
with open(test_neg_table, 'w') as f:
f.write('src_id:int64' + '\t' + 'dst_id:int64' + '\t' + 'weight:double\n')
for i in range(len(test_edge['edge_neg'])):
f.write(str(test_edge['edge_neg'][i, 0]) + '\t' + str(test_edge['edge_neg'][i, 1])
+ '\t' + '1.0\n')
| 40.961039 | 96 | 0.63792 |
7942d81e67b9c30c4896188d9a96ec3281eb2b22 | 2,987 | py | Python | tests/core/test_core_run.py | askanna-io/askanna-python | b6a1dec3b9911888f3d769cef46ce8c2d5cb0dfe | [
"Apache-2.0"
] | 1 | 2021-02-22T15:53:47.000Z | 2021-02-22T15:53:47.000Z | tests/core/test_core_run.py | askanna-io/askanna-python | b6a1dec3b9911888f3d769cef46ce8c2d5cb0dfe | [
"Apache-2.0"
] | null | null | null | tests/core/test_core_run.py | askanna-io/askanna-python | b6a1dec3b9911888f3d769cef46ce8c2d5cb0dfe | [
"Apache-2.0"
] | null | null | null | import unittest
import responses
from askanna.core import client
from askanna.core.run import RunGateway
a_sample_run_response = {
"message_type": "status",
"status": "queued",
"uuid": "8b73cb55-1ea3-49e1-a294-590a16b60f3f",
"short_uuid": "4F8q-nTCK-M2nA-aXUg",
"name": "Run with name",
"created": "2021-04-22T10:31:29.249069Z",
"updated": "2021-04-22T10:31:29.249091Z",
"finished": None,
"job": {
"relation": "jobdef",
"name": "test-payload",
"uuid": "85632085-5a62-4cf3-9142-34237f5f32d1",
"short_uuid": "43hH-OthG-zG6G-9OVK",
},
"project": {
"relation": "project",
"name": "AskAnna Sandbox",
"uuid": "f1e2144a-87f9-4936-8562-4304c51332ea",
"short_uuid": "7MQT-6309-9g3t-R5QR",
},
"workspace": {
"relation": "workspace",
"name": "AskAnna",
"uuid": "695fcc8b-ba8c-4575-a1e0-f0fcfc70a349",
"short_uuid": "3Cpy-QMzd-MVko-1rDQ",
},
"environment": {
"name": "",
"description": None,
"label": None,
"image": {
"name": "python",
"tag": "3.8-slim",
"digest": "sha256:9b0d7419e2811710aacee87c40a2c94693e2b6810c3e7e466b8c7fc5bde4cd66",
},
"timezone": "UTC",
},
"next_url": "https://beta-api.askanna.eu/v1/status/4F8q-nTCK-M2nA-aXUg/",
}
class SDKRunTest(unittest.TestCase):
def setUp(self):
self.base_url = client.config.remote
self.responses = responses.RequestsMock()
self.responses.start()
self.responses.add(
responses.POST,
url=self.base_url + "run/abcd-abcd-abcd-abcd/",
json=a_sample_run_response,
)
self.responses.add(
responses.POST,
url=self.base_url + "run/abcd-abcd-abcd-abcd/?name=new+name",
json=a_sample_run_response,
)
self.responses.add(
responses.POST,
url=self.base_url + "run/abcd-abcd-abcd-abcd/?description=new+description",
json=a_sample_run_response,
)
self.responses.add(
responses.POST,
url=self.base_url
+ "run/abcd-abcd-abcd-abcd/?name=new+name&description=new+description",
json=a_sample_run_response,
)
def tearDown(self):
self.responses.stop
self.responses.reset
def test_run_with_name(self):
rgw = RunGateway()
rgw.start("abcd-abcd-abcd-abcd", name="new name")
def test_run_with_description(self):
rgw = RunGateway()
rgw.start("abcd-abcd-abcd-abcd", description="new description")
def test_run_with_name_and_description(self):
rgw = RunGateway()
rgw.start(
"abcd-abcd-abcd-abcd",
name="new name",
description="new description",
)
def test_run_default(self):
rgw = RunGateway()
rgw.start("abcd-abcd-abcd-abcd")
| 29 | 96 | 0.577837 |
7942daeb499906d60245b81d805be6cea334d742 | 250 | py | Python | sdf4sim/__init__.py | sglumac/sdf4sim | 3e8fc3ca577c6badd7f729bd52378b4210d7fba1 | [
"MIT"
] | 1 | 2020-04-29T10:34:54.000Z | 2020-04-29T10:34:54.000Z | sdf4sim/__init__.py | sglumac/sdf4sim | 3e8fc3ca577c6badd7f729bd52378b4210d7fba1 | [
"MIT"
] | 1 | 2021-11-15T17:49:09.000Z | 2021-11-15T17:49:09.000Z | sdf4sim/__init__.py | sglumac/sdf4sim | 3e8fc3ca577c6badd7f729bd52378b4210d7fba1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for sdf4sim."""
__author__ = """Slaven Glumac"""
__email__ = '[email protected]'
__version__ = '0.6.0'
from . import sdf, cs, example, autoconfig
__all__ = ['sdf', 'cs', 'example', 'autoconfig']
| 20.833333 | 48 | 0.644 |
7942dba361e69c113041e6f460f4b838fd197e2a | 3,440 | py | Python | homeassistant/components/guardian/binary_sensor.py | ccatterina/core | 36789cfc310f270bf343676eb94d123e5d0dfa83 | [
"Apache-2.0"
] | 6 | 2016-11-25T06:36:27.000Z | 2021-11-16T11:20:23.000Z | homeassistant/components/guardian/binary_sensor.py | ccatterina/core | 36789cfc310f270bf343676eb94d123e5d0dfa83 | [
"Apache-2.0"
] | 45 | 2020-10-15T06:47:06.000Z | 2022-03-31T06:26:16.000Z | homeassistant/components/guardian/binary_sensor.py | ccatterina/core | 36789cfc310f270bf343676eb94d123e5d0dfa83 | [
"Apache-2.0"
] | 2 | 2020-11-17T09:19:47.000Z | 2020-12-16T03:56:09.000Z | """Binary sensors for the Elexa Guardian integration."""
from typing import Callable, Dict
from aioguardian import Client
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_MOISTURE,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import GuardianEntity
from .const import (
API_SYSTEM_ONBOARD_SENSOR_STATUS,
API_WIFI_STATUS,
DATA_CLIENT,
DATA_COORDINATOR,
DOMAIN,
)
ATTR_CONNECTED_CLIENTS = "connected_clients"
SENSOR_KIND_AP_INFO = "ap_enabled"
SENSOR_KIND_LEAK_DETECTED = "leak_detected"
SENSORS = [
(SENSOR_KIND_AP_INFO, "Onboard AP Enabled", DEVICE_CLASS_CONNECTIVITY),
(SENSOR_KIND_LEAK_DETECTED, "Leak Detected", DEVICE_CLASS_MOISTURE),
]
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable
) -> None:
"""Set up Guardian switches based on a config entry."""
async_add_entities(
[
GuardianBinarySensor(
entry,
hass.data[DOMAIN][DATA_CLIENT][entry.entry_id],
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id],
kind,
name,
device_class,
)
for kind, name, device_class in SENSORS
],
True,
)
class GuardianBinarySensor(GuardianEntity, BinarySensorEntity):
"""Define a generic Guardian sensor."""
def __init__(
self,
entry: ConfigEntry,
client: Client,
coordinators: Dict[str, DataUpdateCoordinator],
kind: str,
name: str,
device_class: str,
) -> None:
"""Initialize."""
super().__init__(entry, client, coordinators, kind, name, device_class, None)
self._is_on = True
@property
def available(self) -> bool:
"""Return whether the entity is available."""
if self._kind == SENSOR_KIND_AP_INFO:
return self._coordinators[API_WIFI_STATUS].last_update_success
if self._kind == SENSOR_KIND_LEAK_DETECTED:
return self._coordinators[
API_SYSTEM_ONBOARD_SENSOR_STATUS
].last_update_success
return False
@property
def is_on(self) -> bool:
"""Return True if the binary sensor is on."""
return self._is_on
async def _async_internal_added_to_hass(self) -> None:
if self._kind == SENSOR_KIND_AP_INFO:
self.async_add_coordinator_update_listener(API_WIFI_STATUS)
elif self._kind == SENSOR_KIND_LEAK_DETECTED:
self.async_add_coordinator_update_listener(API_SYSTEM_ONBOARD_SENSOR_STATUS)
@callback
def _async_update_from_latest_data(self) -> None:
"""Update the entity."""
if self._kind == SENSOR_KIND_AP_INFO:
self._is_on = self._coordinators[API_WIFI_STATUS].data["station_connected"]
self._attrs.update(
{
ATTR_CONNECTED_CLIENTS: self._coordinators[API_WIFI_STATUS].data[
"ap_clients"
]
}
)
elif self._kind == SENSOR_KIND_LEAK_DETECTED:
self._is_on = self._coordinators[API_SYSTEM_ONBOARD_SENSOR_STATUS].data[
"wet"
]
| 31.559633 | 88 | 0.652035 |
7942dc57faa0c4594e7b6b3df0e8ce3dfef73ec4 | 934 | py | Python | setup.py | RRisto/simple_classifiy | 81fcc5c6bdac39cda207526a6117bac2767d1255 | [
"MIT"
] | 1 | 2019-10-10T15:57:12.000Z | 2019-10-10T15:57:12.000Z | setup.py | RRisto/simple_classifiy | 81fcc5c6bdac39cda207526a6117bac2767d1255 | [
"MIT"
] | 3 | 2018-09-04T10:12:13.000Z | 2018-09-07T06:38:09.000Z | setup.py | RRisto/simple_classifiy | 81fcc5c6bdac39cda207526a6117bac2767d1255 | [
"MIT"
] | null | null | null | from setuptools import setup
def readme():
with open('readme.md') as f:
return f.read()
setup(name='TextClass',
version='0.2',
description='Classes and methods for cleaning, cleaning and classifyng texts',
long_description=readme(),
classifiers=[
'Development Status :: 4 - Beta',
'Programming Language :: Python :: 3.5',
'Topic :: Text Processing :: General',
],
keywords='nlp classification',
url='https://github.com/RRisto/simple_classifiy',
author='Risto Hinno',
author_email='[email protected]',
license='MIT',
packages=['TextClass'],
install_requires=['nltk','sklearn','imblearn','gensim','matplotlib','pyldavis','pandas','pytest','openpyxl',
'mock', 'fasttext==0.8.3'],
test_suite='pytest-runner',
tests_require=['pytest'],
include_package_data=True,
zip_safe=False) | 34.592593 | 114 | 0.61349 |
7942dccd68bb1bc52dca7d1c09de6aa0a77709ca | 137,053 | py | Python | sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/hdinsight/azure-mgmt-hdinsight/azure/mgmt/hdinsight/models/_models_py3.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import datetime
from typing import Any, Dict, List, Optional, Union
from azure.core.exceptions import HttpResponseError
import msrest.serialization
from ._hd_insight_management_client_enums import *
class AaddsResourceDetails(msrest.serialization.Model):
"""The Azure active directory domain service resource details.
:param domain_name: The Azure active directory domain service name.
:type domain_name: str
:param initial_sync_complete: This indicates whether initial sync complete or not.
:type initial_sync_complete: bool
:param ldaps_enabled: This indicates whether enable ldaps or not.
:type ldaps_enabled: bool
:param ldaps_public_certificate_in_base64: The base 64 format string of public ldap
certificate.
:type ldaps_public_certificate_in_base64: str
:param resource_id: The resource id of azure active directory domain service.
:type resource_id: str
:param subnet_id: The subnet resource id.
:type subnet_id: str
:param tenant_id: The tenant id of azure active directory domain service .
:type tenant_id: str
"""
_attribute_map = {
'domain_name': {'key': 'domainName', 'type': 'str'},
'initial_sync_complete': {'key': 'initialSyncComplete', 'type': 'bool'},
'ldaps_enabled': {'key': 'ldapsEnabled', 'type': 'bool'},
'ldaps_public_certificate_in_base64': {'key': 'ldapsPublicCertificateInBase64', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'subnet_id': {'key': 'subnetId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
domain_name: Optional[str] = None,
initial_sync_complete: Optional[bool] = None,
ldaps_enabled: Optional[bool] = None,
ldaps_public_certificate_in_base64: Optional[str] = None,
resource_id: Optional[str] = None,
subnet_id: Optional[str] = None,
tenant_id: Optional[str] = None,
**kwargs
):
super(AaddsResourceDetails, self).__init__(**kwargs)
self.domain_name = domain_name
self.initial_sync_complete = initial_sync_complete
self.ldaps_enabled = ldaps_enabled
self.ldaps_public_certificate_in_base64 = ldaps_public_certificate_in_base64
self.resource_id = resource_id
self.subnet_id = subnet_id
self.tenant_id = tenant_id
class Resource(msrest.serialization.Model):
"""The core properties of ARM resources.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(Resource, self).__init__(**kwargs)
self.id = None
self.name = None
self.type = None
class ProxyResource(Resource):
"""The resource model definition for a ARM proxy resource. It will have everything other than required location and tags.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ProxyResource, self).__init__(**kwargs)
class Application(ProxyResource):
"""The HDInsight cluster application.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param etag: The ETag for the application.
:type etag: str
:param tags: A set of tags. The tags for the application.
:type tags: dict[str, str]
:param properties: The properties of the application.
:type properties: ~azure.mgmt.hdinsight.models.ApplicationProperties
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ApplicationProperties'},
}
def __init__(
self,
*,
etag: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
properties: Optional["ApplicationProperties"] = None,
**kwargs
):
super(Application, self).__init__(**kwargs)
self.etag = etag
self.tags = tags
self.properties = properties
class ApplicationGetEndpoint(msrest.serialization.Model):
"""Gets the application SSH endpoint.
:param location: The location of the endpoint.
:type location: str
:param destination_port: The destination port to connect to.
:type destination_port: int
:param public_port: The public port to connect to.
:type public_port: int
:param private_ip_address: The private ip address of the endpoint.
:type private_ip_address: str
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'destination_port': {'key': 'destinationPort', 'type': 'int'},
'public_port': {'key': 'publicPort', 'type': 'int'},
'private_ip_address': {'key': 'privateIPAddress', 'type': 'str'},
}
def __init__(
self,
*,
location: Optional[str] = None,
destination_port: Optional[int] = None,
public_port: Optional[int] = None,
private_ip_address: Optional[str] = None,
**kwargs
):
super(ApplicationGetEndpoint, self).__init__(**kwargs)
self.location = location
self.destination_port = destination_port
self.public_port = public_port
self.private_ip_address = private_ip_address
class ApplicationGetHttpsEndpoint(msrest.serialization.Model):
"""Gets the application HTTP endpoints.
Variables are only populated by the server, and will be ignored when sending a request.
:param access_modes: The list of access modes for the application.
:type access_modes: list[str]
:ivar location: The location of the endpoint.
:vartype location: str
:param destination_port: The destination port to connect to.
:type destination_port: int
:ivar public_port: The public port to connect to.
:vartype public_port: int
:param private_ip_address: The private ip address of the endpoint.
:type private_ip_address: str
:param sub_domain_suffix: The subdomain suffix of the application.
:type sub_domain_suffix: str
:param disable_gateway_auth: The value indicates whether to disable GatewayAuth.
:type disable_gateway_auth: bool
"""
_validation = {
'location': {'readonly': True},
'public_port': {'readonly': True},
}
_attribute_map = {
'access_modes': {'key': 'accessModes', 'type': '[str]'},
'location': {'key': 'location', 'type': 'str'},
'destination_port': {'key': 'destinationPort', 'type': 'int'},
'public_port': {'key': 'publicPort', 'type': 'int'},
'private_ip_address': {'key': 'privateIPAddress', 'type': 'str'},
'sub_domain_suffix': {'key': 'subDomainSuffix', 'type': 'str'},
'disable_gateway_auth': {'key': 'disableGatewayAuth', 'type': 'bool'},
}
def __init__(
self,
*,
access_modes: Optional[List[str]] = None,
destination_port: Optional[int] = None,
private_ip_address: Optional[str] = None,
sub_domain_suffix: Optional[str] = None,
disable_gateway_auth: Optional[bool] = None,
**kwargs
):
super(ApplicationGetHttpsEndpoint, self).__init__(**kwargs)
self.access_modes = access_modes
self.location = None
self.destination_port = destination_port
self.public_port = None
self.private_ip_address = private_ip_address
self.sub_domain_suffix = sub_domain_suffix
self.disable_gateway_auth = disable_gateway_auth
class ApplicationListResult(msrest.serialization.Model):
"""Result of the request to list cluster Applications. It contains a list of operations and a URL link to get the next set of results.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of HDInsight applications installed on HDInsight cluster.
:type value: list[~azure.mgmt.hdinsight.models.Application]
:ivar next_link: The URL to get the next set of operation list results if there are any.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Application]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Application"]] = None,
**kwargs
):
super(ApplicationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ApplicationProperties(msrest.serialization.Model):
"""The HDInsight cluster application GET response.
Variables are only populated by the server, and will be ignored when sending a request.
:param compute_profile: The list of roles in the cluster.
:type compute_profile: ~azure.mgmt.hdinsight.models.ComputeProfile
:param install_script_actions: The list of install script actions.
:type install_script_actions: list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]
:param uninstall_script_actions: The list of uninstall script actions.
:type uninstall_script_actions: list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]
:param https_endpoints: The list of application HTTPS endpoints.
:type https_endpoints: list[~azure.mgmt.hdinsight.models.ApplicationGetHttpsEndpoint]
:param ssh_endpoints: The list of application SSH endpoints.
:type ssh_endpoints: list[~azure.mgmt.hdinsight.models.ApplicationGetEndpoint]
:ivar provisioning_state: The provisioning state of the application.
:vartype provisioning_state: str
:param application_type: The application type.
:type application_type: str
:ivar application_state: The application state.
:vartype application_state: str
:param errors: The list of errors.
:type errors: list[~azure.mgmt.hdinsight.models.Errors]
:ivar created_date: The application create date time.
:vartype created_date: str
:ivar marketplace_identifier: The marketplace identifier.
:vartype marketplace_identifier: str
"""
_validation = {
'provisioning_state': {'readonly': True},
'application_state': {'readonly': True},
'created_date': {'readonly': True},
'marketplace_identifier': {'readonly': True},
}
_attribute_map = {
'compute_profile': {'key': 'computeProfile', 'type': 'ComputeProfile'},
'install_script_actions': {'key': 'installScriptActions', 'type': '[RuntimeScriptAction]'},
'uninstall_script_actions': {'key': 'uninstallScriptActions', 'type': '[RuntimeScriptAction]'},
'https_endpoints': {'key': 'httpsEndpoints', 'type': '[ApplicationGetHttpsEndpoint]'},
'ssh_endpoints': {'key': 'sshEndpoints', 'type': '[ApplicationGetEndpoint]'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'application_type': {'key': 'applicationType', 'type': 'str'},
'application_state': {'key': 'applicationState', 'type': 'str'},
'errors': {'key': 'errors', 'type': '[Errors]'},
'created_date': {'key': 'createdDate', 'type': 'str'},
'marketplace_identifier': {'key': 'marketplaceIdentifier', 'type': 'str'},
}
def __init__(
self,
*,
compute_profile: Optional["ComputeProfile"] = None,
install_script_actions: Optional[List["RuntimeScriptAction"]] = None,
uninstall_script_actions: Optional[List["RuntimeScriptAction"]] = None,
https_endpoints: Optional[List["ApplicationGetHttpsEndpoint"]] = None,
ssh_endpoints: Optional[List["ApplicationGetEndpoint"]] = None,
application_type: Optional[str] = None,
errors: Optional[List["Errors"]] = None,
**kwargs
):
super(ApplicationProperties, self).__init__(**kwargs)
self.compute_profile = compute_profile
self.install_script_actions = install_script_actions
self.uninstall_script_actions = uninstall_script_actions
self.https_endpoints = https_endpoints
self.ssh_endpoints = ssh_endpoints
self.provisioning_state = None
self.application_type = application_type
self.application_state = None
self.errors = errors
self.created_date = None
self.marketplace_identifier = None
class AsyncOperationResult(msrest.serialization.Model):
"""The azure async operation response.
:param status: The async operation state. Possible values include: "InProgress", "Succeeded",
"Failed".
:type status: str or ~azure.mgmt.hdinsight.models.AsyncOperationState
:param error: The operation error information.
:type error: ~azure.mgmt.hdinsight.models.Errors
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'error': {'key': 'error', 'type': 'Errors'},
}
def __init__(
self,
*,
status: Optional[Union[str, "AsyncOperationState"]] = None,
error: Optional["Errors"] = None,
**kwargs
):
super(AsyncOperationResult, self).__init__(**kwargs)
self.status = status
self.error = error
class Autoscale(msrest.serialization.Model):
"""The autoscale request parameters.
:param capacity: Parameters for load-based autoscale.
:type capacity: ~azure.mgmt.hdinsight.models.AutoscaleCapacity
:param recurrence: Parameters for schedule-based autoscale.
:type recurrence: ~azure.mgmt.hdinsight.models.AutoscaleRecurrence
"""
_attribute_map = {
'capacity': {'key': 'capacity', 'type': 'AutoscaleCapacity'},
'recurrence': {'key': 'recurrence', 'type': 'AutoscaleRecurrence'},
}
def __init__(
self,
*,
capacity: Optional["AutoscaleCapacity"] = None,
recurrence: Optional["AutoscaleRecurrence"] = None,
**kwargs
):
super(Autoscale, self).__init__(**kwargs)
self.capacity = capacity
self.recurrence = recurrence
class AutoscaleCapacity(msrest.serialization.Model):
"""The load-based autoscale request parameters.
:param min_instance_count: The minimum instance count of the cluster.
:type min_instance_count: int
:param max_instance_count: The maximum instance count of the cluster.
:type max_instance_count: int
"""
_attribute_map = {
'min_instance_count': {'key': 'minInstanceCount', 'type': 'int'},
'max_instance_count': {'key': 'maxInstanceCount', 'type': 'int'},
}
def __init__(
self,
*,
min_instance_count: Optional[int] = None,
max_instance_count: Optional[int] = None,
**kwargs
):
super(AutoscaleCapacity, self).__init__(**kwargs)
self.min_instance_count = min_instance_count
self.max_instance_count = max_instance_count
class AutoscaleConfigurationUpdateParameter(msrest.serialization.Model):
"""The autoscale configuration update parameter.
:param autoscale: The autoscale configuration.
:type autoscale: ~azure.mgmt.hdinsight.models.Autoscale
"""
_attribute_map = {
'autoscale': {'key': 'autoscale', 'type': 'Autoscale'},
}
def __init__(
self,
*,
autoscale: Optional["Autoscale"] = None,
**kwargs
):
super(AutoscaleConfigurationUpdateParameter, self).__init__(**kwargs)
self.autoscale = autoscale
class AutoscaleRecurrence(msrest.serialization.Model):
"""Schedule-based autoscale request parameters.
:param time_zone: The time zone for the autoscale schedule times.
:type time_zone: str
:param schedule: Array of schedule-based autoscale rules.
:type schedule: list[~azure.mgmt.hdinsight.models.AutoscaleSchedule]
"""
_attribute_map = {
'time_zone': {'key': 'timeZone', 'type': 'str'},
'schedule': {'key': 'schedule', 'type': '[AutoscaleSchedule]'},
}
def __init__(
self,
*,
time_zone: Optional[str] = None,
schedule: Optional[List["AutoscaleSchedule"]] = None,
**kwargs
):
super(AutoscaleRecurrence, self).__init__(**kwargs)
self.time_zone = time_zone
self.schedule = schedule
class AutoscaleSchedule(msrest.serialization.Model):
"""Parameters for a schedule-based autoscale rule, consisting of an array of days + a time and capacity.
:param days: Days of the week for a schedule-based autoscale rule.
:type days: list[str or ~azure.mgmt.hdinsight.models.DaysOfWeek]
:param time_and_capacity: Time and capacity for a schedule-based autoscale rule.
:type time_and_capacity: ~azure.mgmt.hdinsight.models.AutoscaleTimeAndCapacity
"""
_attribute_map = {
'days': {'key': 'days', 'type': '[str]'},
'time_and_capacity': {'key': 'timeAndCapacity', 'type': 'AutoscaleTimeAndCapacity'},
}
def __init__(
self,
*,
days: Optional[List[Union[str, "DaysOfWeek"]]] = None,
time_and_capacity: Optional["AutoscaleTimeAndCapacity"] = None,
**kwargs
):
super(AutoscaleSchedule, self).__init__(**kwargs)
self.days = days
self.time_and_capacity = time_and_capacity
class AutoscaleTimeAndCapacity(msrest.serialization.Model):
"""Time and capacity request parameters.
:param time: 24-hour time in the form xx:xx.
:type time: str
:param min_instance_count: The minimum instance count of the cluster.
:type min_instance_count: int
:param max_instance_count: The maximum instance count of the cluster.
:type max_instance_count: int
"""
_attribute_map = {
'time': {'key': 'time', 'type': 'str'},
'min_instance_count': {'key': 'minInstanceCount', 'type': 'int'},
'max_instance_count': {'key': 'maxInstanceCount', 'type': 'int'},
}
def __init__(
self,
*,
time: Optional[str] = None,
min_instance_count: Optional[int] = None,
max_instance_count: Optional[int] = None,
**kwargs
):
super(AutoscaleTimeAndCapacity, self).__init__(**kwargs)
self.time = time
self.min_instance_count = min_instance_count
self.max_instance_count = max_instance_count
class AzureMonitorRequest(msrest.serialization.Model):
"""The azure monitor parameters.
:param workspace_id: The Log Analytics workspace ID.
:type workspace_id: str
:param primary_key: The Log Analytics workspace key.
:type primary_key: str
:param selected_configurations: The selected configurations.
:type selected_configurations: ~azure.mgmt.hdinsight.models.AzureMonitorSelectedConfigurations
"""
_attribute_map = {
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
'selected_configurations': {'key': 'selectedConfigurations', 'type': 'AzureMonitorSelectedConfigurations'},
}
def __init__(
self,
*,
workspace_id: Optional[str] = None,
primary_key: Optional[str] = None,
selected_configurations: Optional["AzureMonitorSelectedConfigurations"] = None,
**kwargs
):
super(AzureMonitorRequest, self).__init__(**kwargs)
self.workspace_id = workspace_id
self.primary_key = primary_key
self.selected_configurations = selected_configurations
class AzureMonitorResponse(msrest.serialization.Model):
"""The azure monitor status response.
:param cluster_monitoring_enabled: The status of the monitor on the HDInsight cluster.
:type cluster_monitoring_enabled: bool
:param workspace_id: The workspace ID of the monitor on the HDInsight cluster.
:type workspace_id: str
:param selected_configurations: The selected configurations.
:type selected_configurations: ~azure.mgmt.hdinsight.models.AzureMonitorSelectedConfigurations
"""
_attribute_map = {
'cluster_monitoring_enabled': {'key': 'clusterMonitoringEnabled', 'type': 'bool'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'selected_configurations': {'key': 'selectedConfigurations', 'type': 'AzureMonitorSelectedConfigurations'},
}
def __init__(
self,
*,
cluster_monitoring_enabled: Optional[bool] = None,
workspace_id: Optional[str] = None,
selected_configurations: Optional["AzureMonitorSelectedConfigurations"] = None,
**kwargs
):
super(AzureMonitorResponse, self).__init__(**kwargs)
self.cluster_monitoring_enabled = cluster_monitoring_enabled
self.workspace_id = workspace_id
self.selected_configurations = selected_configurations
class AzureMonitorSelectedConfigurations(msrest.serialization.Model):
"""The selected configurations for azure monitor.
:param configuration_version: The configuration version.
:type configuration_version: str
:param global_configurations: The global configurations of selected configurations.
:type global_configurations: dict[str, str]
:param table_list: The table list.
:type table_list: list[~azure.mgmt.hdinsight.models.AzureMonitorTableConfiguration]
"""
_attribute_map = {
'configuration_version': {'key': 'configurationVersion', 'type': 'str'},
'global_configurations': {'key': 'globalConfigurations', 'type': '{str}'},
'table_list': {'key': 'tableList', 'type': '[AzureMonitorTableConfiguration]'},
}
def __init__(
self,
*,
configuration_version: Optional[str] = None,
global_configurations: Optional[Dict[str, str]] = None,
table_list: Optional[List["AzureMonitorTableConfiguration"]] = None,
**kwargs
):
super(AzureMonitorSelectedConfigurations, self).__init__(**kwargs)
self.configuration_version = configuration_version
self.global_configurations = global_configurations
self.table_list = table_list
class AzureMonitorTableConfiguration(msrest.serialization.Model):
"""The table configuration for the Log Analytics integration.
:param name: The name.
:type name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
**kwargs
):
super(AzureMonitorTableConfiguration, self).__init__(**kwargs)
self.name = name
class BillingMeters(msrest.serialization.Model):
"""The billing meters.
:param meter_parameter: The virtual machine sizes.
:type meter_parameter: str
:param meter: The HDInsight meter guid.
:type meter: str
:param unit: The unit of meter, VMHours or CoreHours.
:type unit: str
"""
_attribute_map = {
'meter_parameter': {'key': 'meterParameter', 'type': 'str'},
'meter': {'key': 'meter', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
}
def __init__(
self,
*,
meter_parameter: Optional[str] = None,
meter: Optional[str] = None,
unit: Optional[str] = None,
**kwargs
):
super(BillingMeters, self).__init__(**kwargs)
self.meter_parameter = meter_parameter
self.meter = meter
self.unit = unit
class BillingResources(msrest.serialization.Model):
"""The billing resources.
:param region: The region or location.
:type region: str
:param billing_meters: The billing meter information.
:type billing_meters: list[~azure.mgmt.hdinsight.models.BillingMeters]
:param disk_billing_meters: The managed disk billing information.
:type disk_billing_meters: list[~azure.mgmt.hdinsight.models.DiskBillingMeters]
"""
_attribute_map = {
'region': {'key': 'region', 'type': 'str'},
'billing_meters': {'key': 'billingMeters', 'type': '[BillingMeters]'},
'disk_billing_meters': {'key': 'diskBillingMeters', 'type': '[DiskBillingMeters]'},
}
def __init__(
self,
*,
region: Optional[str] = None,
billing_meters: Optional[List["BillingMeters"]] = None,
disk_billing_meters: Optional[List["DiskBillingMeters"]] = None,
**kwargs
):
super(BillingResources, self).__init__(**kwargs)
self.region = region
self.billing_meters = billing_meters
self.disk_billing_meters = disk_billing_meters
class BillingResponseListResult(msrest.serialization.Model):
"""The response for the operation to get regional billingSpecs for a subscription.
Variables are only populated by the server, and will be ignored when sending a request.
:param vm_sizes: The virtual machine sizes to include or exclude.
:type vm_sizes: list[str]
:param vm_sizes_with_encryption_at_host: The vm sizes which enable encryption at host.
:type vm_sizes_with_encryption_at_host: list[str]
:param vm_size_filters: The virtual machine filtering mode. Effectively this can enabling or
disabling the virtual machine sizes in a particular set.
:type vm_size_filters: list[~azure.mgmt.hdinsight.models.VmSizeCompatibilityFilterV2]
:ivar vm_size_properties: The vm size properties.
:vartype vm_size_properties: list[~azure.mgmt.hdinsight.models.VmSizeProperty]
:param billing_resources: The billing and managed disk billing resources for a region.
:type billing_resources: list[~azure.mgmt.hdinsight.models.BillingResources]
"""
_validation = {
'vm_size_properties': {'readonly': True},
}
_attribute_map = {
'vm_sizes': {'key': 'vmSizes', 'type': '[str]'},
'vm_sizes_with_encryption_at_host': {'key': 'vmSizesWithEncryptionAtHost', 'type': '[str]'},
'vm_size_filters': {'key': 'vmSizeFilters', 'type': '[VmSizeCompatibilityFilterV2]'},
'vm_size_properties': {'key': 'vmSizeProperties', 'type': '[VmSizeProperty]'},
'billing_resources': {'key': 'billingResources', 'type': '[BillingResources]'},
}
def __init__(
self,
*,
vm_sizes: Optional[List[str]] = None,
vm_sizes_with_encryption_at_host: Optional[List[str]] = None,
vm_size_filters: Optional[List["VmSizeCompatibilityFilterV2"]] = None,
billing_resources: Optional[List["BillingResources"]] = None,
**kwargs
):
super(BillingResponseListResult, self).__init__(**kwargs)
self.vm_sizes = vm_sizes
self.vm_sizes_with_encryption_at_host = vm_sizes_with_encryption_at_host
self.vm_size_filters = vm_size_filters
self.vm_size_properties = None
self.billing_resources = billing_resources
class CapabilitiesResult(msrest.serialization.Model):
"""The Get Capabilities operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:param versions: The version capability.
:type versions: dict[str, ~azure.mgmt.hdinsight.models.VersionsCapability]
:param regions: The virtual machine size compatibility features.
:type regions: dict[str, ~azure.mgmt.hdinsight.models.RegionsCapability]
:param vmsizes: The virtual machine sizes.
:type vmsizes: dict[str, ~azure.mgmt.hdinsight.models.VmSizesCapability]
:param vmsize_filters: The virtual machine size compatibility filters.
:type vmsize_filters: list[~azure.mgmt.hdinsight.models.VmSizeCompatibilityFilter]
:param features: The capability features.
:type features: list[str]
:ivar quota: The quota capability.
:vartype quota: ~azure.mgmt.hdinsight.models.QuotaCapability
"""
_validation = {
'quota': {'readonly': True},
}
_attribute_map = {
'versions': {'key': 'versions', 'type': '{VersionsCapability}'},
'regions': {'key': 'regions', 'type': '{RegionsCapability}'},
'vmsizes': {'key': 'vmsizes', 'type': '{VmSizesCapability}'},
'vmsize_filters': {'key': 'vmsize_filters', 'type': '[VmSizeCompatibilityFilter]'},
'features': {'key': 'features', 'type': '[str]'},
'quota': {'key': 'quota', 'type': 'QuotaCapability'},
}
def __init__(
self,
*,
versions: Optional[Dict[str, "VersionsCapability"]] = None,
regions: Optional[Dict[str, "RegionsCapability"]] = None,
vmsizes: Optional[Dict[str, "VmSizesCapability"]] = None,
vmsize_filters: Optional[List["VmSizeCompatibilityFilter"]] = None,
features: Optional[List[str]] = None,
**kwargs
):
super(CapabilitiesResult, self).__init__(**kwargs)
self.versions = versions
self.regions = regions
self.vmsizes = vmsizes
self.vmsize_filters = vmsize_filters
self.features = features
self.quota = None
class ClientGroupInfo(msrest.serialization.Model):
"""The information of AAD security group.
:param group_name: The AAD security group name.
:type group_name: str
:param group_id: The AAD security group id.
:type group_id: str
"""
_attribute_map = {
'group_name': {'key': 'groupName', 'type': 'str'},
'group_id': {'key': 'groupId', 'type': 'str'},
}
def __init__(
self,
*,
group_name: Optional[str] = None,
group_id: Optional[str] = None,
**kwargs
):
super(ClientGroupInfo, self).__init__(**kwargs)
self.group_name = group_name
self.group_id = group_id
class TrackedResource(Resource):
"""The resource model definition for a ARM tracked top level resource.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The Azure Region where the resource lives.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(TrackedResource, self).__init__(**kwargs)
self.location = location
self.tags = tags
class Cluster(TrackedResource):
"""The HDInsight cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar id: Fully qualified resource Id for the resource.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:param location: The Azure Region where the resource lives.
:type location: str
:param tags: A set of tags. Resource tags.
:type tags: dict[str, str]
:param etag: The ETag for the resource.
:type etag: str
:param properties: The properties of the cluster.
:type properties: ~azure.mgmt.hdinsight.models.ClusterGetProperties
:param identity: The identity of the cluster, if configured.
:type identity: ~azure.mgmt.hdinsight.models.ClusterIdentity
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'etag': {'key': 'etag', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ClusterGetProperties'},
'identity': {'key': 'identity', 'type': 'ClusterIdentity'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
etag: Optional[str] = None,
properties: Optional["ClusterGetProperties"] = None,
identity: Optional["ClusterIdentity"] = None,
**kwargs
):
super(Cluster, self).__init__(location=location, tags=tags, **kwargs)
self.etag = etag
self.properties = properties
self.identity = identity
class ClusterConfigurations(msrest.serialization.Model):
"""The configuration object for the specified cluster.
:param configurations: The configuration object for the specified configuration for the
specified cluster.
:type configurations: dict[str, dict[str, str]]
"""
_attribute_map = {
'configurations': {'key': 'configurations', 'type': '{{str}}'},
}
def __init__(
self,
*,
configurations: Optional[Dict[str, Dict[str, str]]] = None,
**kwargs
):
super(ClusterConfigurations, self).__init__(**kwargs)
self.configurations = configurations
class ClusterCreateParametersExtended(msrest.serialization.Model):
"""The CreateCluster request parameters.
:param location: The location of the cluster.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The cluster create parameters.
:type properties: ~azure.mgmt.hdinsight.models.ClusterCreateProperties
:param identity: The identity of the cluster, if configured.
:type identity: ~azure.mgmt.hdinsight.models.ClusterIdentity
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ClusterCreateProperties'},
'identity': {'key': 'identity', 'type': 'ClusterIdentity'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
properties: Optional["ClusterCreateProperties"] = None,
identity: Optional["ClusterIdentity"] = None,
**kwargs
):
super(ClusterCreateParametersExtended, self).__init__(**kwargs)
self.location = location
self.tags = tags
self.properties = properties
self.identity = identity
class ClusterCreateProperties(msrest.serialization.Model):
"""The cluster create parameters.
:param cluster_version: The version of the cluster.
:type cluster_version: str
:param os_type: The type of operating system. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.hdinsight.models.OSType
:param tier: The cluster tier. Possible values include: "Standard", "Premium".
:type tier: str or ~azure.mgmt.hdinsight.models.Tier
:param cluster_definition: The cluster definition.
:type cluster_definition: ~azure.mgmt.hdinsight.models.ClusterDefinition
:param kafka_rest_properties: The cluster kafka rest proxy configuration.
:type kafka_rest_properties: ~azure.mgmt.hdinsight.models.KafkaRestProperties
:param security_profile: The security profile.
:type security_profile: ~azure.mgmt.hdinsight.models.SecurityProfile
:param compute_profile: The compute profile.
:type compute_profile: ~azure.mgmt.hdinsight.models.ComputeProfile
:param storage_profile: The storage profile.
:type storage_profile: ~azure.mgmt.hdinsight.models.StorageProfile
:param disk_encryption_properties: The disk encryption properties.
:type disk_encryption_properties: ~azure.mgmt.hdinsight.models.DiskEncryptionProperties
:param encryption_in_transit_properties: The encryption-in-transit properties.
:type encryption_in_transit_properties:
~azure.mgmt.hdinsight.models.EncryptionInTransitProperties
:param min_supported_tls_version: The minimal supported tls version.
:type min_supported_tls_version: str
:param network_properties: The network properties.
:type network_properties: ~azure.mgmt.hdinsight.models.NetworkProperties
:param compute_isolation_properties: The compute isolation properties.
:type compute_isolation_properties: ~azure.mgmt.hdinsight.models.ComputeIsolationProperties
"""
_attribute_map = {
'cluster_version': {'key': 'clusterVersion', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'cluster_definition': {'key': 'clusterDefinition', 'type': 'ClusterDefinition'},
'kafka_rest_properties': {'key': 'kafkaRestProperties', 'type': 'KafkaRestProperties'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'compute_profile': {'key': 'computeProfile', 'type': 'ComputeProfile'},
'storage_profile': {'key': 'storageProfile', 'type': 'StorageProfile'},
'disk_encryption_properties': {'key': 'diskEncryptionProperties', 'type': 'DiskEncryptionProperties'},
'encryption_in_transit_properties': {'key': 'encryptionInTransitProperties', 'type': 'EncryptionInTransitProperties'},
'min_supported_tls_version': {'key': 'minSupportedTlsVersion', 'type': 'str'},
'network_properties': {'key': 'networkProperties', 'type': 'NetworkProperties'},
'compute_isolation_properties': {'key': 'computeIsolationProperties', 'type': 'ComputeIsolationProperties'},
}
def __init__(
self,
*,
cluster_version: Optional[str] = None,
os_type: Optional[Union[str, "OSType"]] = None,
tier: Optional[Union[str, "Tier"]] = None,
cluster_definition: Optional["ClusterDefinition"] = None,
kafka_rest_properties: Optional["KafkaRestProperties"] = None,
security_profile: Optional["SecurityProfile"] = None,
compute_profile: Optional["ComputeProfile"] = None,
storage_profile: Optional["StorageProfile"] = None,
disk_encryption_properties: Optional["DiskEncryptionProperties"] = None,
encryption_in_transit_properties: Optional["EncryptionInTransitProperties"] = None,
min_supported_tls_version: Optional[str] = None,
network_properties: Optional["NetworkProperties"] = None,
compute_isolation_properties: Optional["ComputeIsolationProperties"] = None,
**kwargs
):
super(ClusterCreateProperties, self).__init__(**kwargs)
self.cluster_version = cluster_version
self.os_type = os_type
self.tier = tier
self.cluster_definition = cluster_definition
self.kafka_rest_properties = kafka_rest_properties
self.security_profile = security_profile
self.compute_profile = compute_profile
self.storage_profile = storage_profile
self.disk_encryption_properties = disk_encryption_properties
self.encryption_in_transit_properties = encryption_in_transit_properties
self.min_supported_tls_version = min_supported_tls_version
self.network_properties = network_properties
self.compute_isolation_properties = compute_isolation_properties
class ClusterCreateRequestValidationParameters(ClusterCreateParametersExtended):
"""The cluster create request specification.
:param location: The location of the cluster.
:type location: str
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
:param properties: The cluster create parameters.
:type properties: ~azure.mgmt.hdinsight.models.ClusterCreateProperties
:param identity: The identity of the cluster, if configured.
:type identity: ~azure.mgmt.hdinsight.models.ClusterIdentity
:param name: The cluster name.
:type name: str
:param type: The resource type.
:type type: str
:param tenant_id: The tenant id.
:type tenant_id: str
:param fetch_aadds_resource: This indicates whether fetch Aadds resource or not.
:type fetch_aadds_resource: bool
"""
_attribute_map = {
'location': {'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
'properties': {'key': 'properties', 'type': 'ClusterCreateProperties'},
'identity': {'key': 'identity', 'type': 'ClusterIdentity'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'fetch_aadds_resource': {'key': 'fetchAaddsResource', 'type': 'bool'},
}
def __init__(
self,
*,
location: Optional[str] = None,
tags: Optional[Dict[str, str]] = None,
properties: Optional["ClusterCreateProperties"] = None,
identity: Optional["ClusterIdentity"] = None,
name: Optional[str] = None,
type: Optional[str] = None,
tenant_id: Optional[str] = None,
fetch_aadds_resource: Optional[bool] = None,
**kwargs
):
super(ClusterCreateRequestValidationParameters, self).__init__(location=location, tags=tags, properties=properties, identity=identity, **kwargs)
self.name = name
self.type = type
self.tenant_id = tenant_id
self.fetch_aadds_resource = fetch_aadds_resource
class ClusterCreateValidationResult(msrest.serialization.Model):
"""The response of cluster create request validation.
:param validation_errors: The validation errors.
:type validation_errors: list[~azure.mgmt.hdinsight.models.ValidationErrorInfo]
:param validation_warnings: The validation warnings.
:type validation_warnings: list[~azure.mgmt.hdinsight.models.ValidationErrorInfo]
:param estimated_creation_duration: The estimated creation duration.
:type estimated_creation_duration: ~datetime.timedelta
:param aadds_resources_details: The Azure active directory domain service resource details.
:type aadds_resources_details: list[~azure.mgmt.hdinsight.models.AaddsResourceDetails]
"""
_attribute_map = {
'validation_errors': {'key': 'validationErrors', 'type': '[ValidationErrorInfo]'},
'validation_warnings': {'key': 'validationWarnings', 'type': '[ValidationErrorInfo]'},
'estimated_creation_duration': {'key': 'estimatedCreationDuration', 'type': 'duration'},
'aadds_resources_details': {'key': 'aaddsResourcesDetails', 'type': '[AaddsResourceDetails]'},
}
def __init__(
self,
*,
validation_errors: Optional[List["ValidationErrorInfo"]] = None,
validation_warnings: Optional[List["ValidationErrorInfo"]] = None,
estimated_creation_duration: Optional[datetime.timedelta] = None,
aadds_resources_details: Optional[List["AaddsResourceDetails"]] = None,
**kwargs
):
super(ClusterCreateValidationResult, self).__init__(**kwargs)
self.validation_errors = validation_errors
self.validation_warnings = validation_warnings
self.estimated_creation_duration = estimated_creation_duration
self.aadds_resources_details = aadds_resources_details
class ClusterDefinition(msrest.serialization.Model):
"""The cluster definition.
:param blueprint: The link to the blueprint.
:type blueprint: str
:param kind: The type of cluster.
:type kind: str
:param component_version: The versions of different services in the cluster.
:type component_version: dict[str, str]
:param configurations: The cluster configurations.
:type configurations: any
"""
_attribute_map = {
'blueprint': {'key': 'blueprint', 'type': 'str'},
'kind': {'key': 'kind', 'type': 'str'},
'component_version': {'key': 'componentVersion', 'type': '{str}'},
'configurations': {'key': 'configurations', 'type': 'object'},
}
def __init__(
self,
*,
blueprint: Optional[str] = None,
kind: Optional[str] = None,
component_version: Optional[Dict[str, str]] = None,
configurations: Optional[Any] = None,
**kwargs
):
super(ClusterDefinition, self).__init__(**kwargs)
self.blueprint = blueprint
self.kind = kind
self.component_version = component_version
self.configurations = configurations
class ClusterDiskEncryptionParameters(msrest.serialization.Model):
"""The Disk Encryption Cluster request parameters.
:param vault_uri: Base key vault URI where the customers key is located eg.
https://myvault.vault.azure.net.
:type vault_uri: str
:param key_name: Key name that is used for enabling disk encryption.
:type key_name: str
:param key_version: Specific key version that is used for enabling disk encryption.
:type key_version: str
"""
_attribute_map = {
'vault_uri': {'key': 'vaultUri', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
'key_version': {'key': 'keyVersion', 'type': 'str'},
}
def __init__(
self,
*,
vault_uri: Optional[str] = None,
key_name: Optional[str] = None,
key_version: Optional[str] = None,
**kwargs
):
super(ClusterDiskEncryptionParameters, self).__init__(**kwargs)
self.vault_uri = vault_uri
self.key_name = key_name
self.key_version = key_version
class ClusterGetProperties(msrest.serialization.Model):
"""The properties of cluster.
All required parameters must be populated in order to send to Azure.
:param cluster_version: The version of the cluster.
:type cluster_version: str
:param cluster_hdp_version: The hdp version of the cluster.
:type cluster_hdp_version: str
:param os_type: The type of operating system. Possible values include: "Windows", "Linux".
:type os_type: str or ~azure.mgmt.hdinsight.models.OSType
:param tier: The cluster tier. Possible values include: "Standard", "Premium".
:type tier: str or ~azure.mgmt.hdinsight.models.Tier
:param cluster_id: The cluster id.
:type cluster_id: str
:param cluster_definition: Required. The cluster definition.
:type cluster_definition: ~azure.mgmt.hdinsight.models.ClusterDefinition
:param kafka_rest_properties: The cluster kafka rest proxy configuration.
:type kafka_rest_properties: ~azure.mgmt.hdinsight.models.KafkaRestProperties
:param security_profile: The security profile.
:type security_profile: ~azure.mgmt.hdinsight.models.SecurityProfile
:param compute_profile: The compute profile.
:type compute_profile: ~azure.mgmt.hdinsight.models.ComputeProfile
:param provisioning_state: The provisioning state, which only appears in the response. Possible
values include: "InProgress", "Failed", "Succeeded", "Canceled", "Deleting".
:type provisioning_state: str or ~azure.mgmt.hdinsight.models.HDInsightClusterProvisioningState
:param created_date: The date on which the cluster was created.
:type created_date: str
:param cluster_state: The state of the cluster.
:type cluster_state: str
:param quota_info: The quota information.
:type quota_info: ~azure.mgmt.hdinsight.models.QuotaInfo
:param errors: The list of errors.
:type errors: list[~azure.mgmt.hdinsight.models.Errors]
:param connectivity_endpoints: The list of connectivity endpoints.
:type connectivity_endpoints: list[~azure.mgmt.hdinsight.models.ConnectivityEndpoint]
:param disk_encryption_properties: The disk encryption properties.
:type disk_encryption_properties: ~azure.mgmt.hdinsight.models.DiskEncryptionProperties
:param encryption_in_transit_properties: The encryption-in-transit properties.
:type encryption_in_transit_properties:
~azure.mgmt.hdinsight.models.EncryptionInTransitProperties
:param storage_profile: The storage profile.
:type storage_profile: ~azure.mgmt.hdinsight.models.StorageProfile
:param min_supported_tls_version: The minimal supported tls version.
:type min_supported_tls_version: str
:param excluded_services_config: The excluded services config.
:type excluded_services_config: ~azure.mgmt.hdinsight.models.ExcludedServicesConfig
:param network_properties: The network properties.
:type network_properties: ~azure.mgmt.hdinsight.models.NetworkProperties
:param compute_isolation_properties: The compute isolation properties.
:type compute_isolation_properties: ~azure.mgmt.hdinsight.models.ComputeIsolationProperties
"""
_validation = {
'cluster_definition': {'required': True},
}
_attribute_map = {
'cluster_version': {'key': 'clusterVersion', 'type': 'str'},
'cluster_hdp_version': {'key': 'clusterHdpVersion', 'type': 'str'},
'os_type': {'key': 'osType', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
'cluster_id': {'key': 'clusterId', 'type': 'str'},
'cluster_definition': {'key': 'clusterDefinition', 'type': 'ClusterDefinition'},
'kafka_rest_properties': {'key': 'kafkaRestProperties', 'type': 'KafkaRestProperties'},
'security_profile': {'key': 'securityProfile', 'type': 'SecurityProfile'},
'compute_profile': {'key': 'computeProfile', 'type': 'ComputeProfile'},
'provisioning_state': {'key': 'provisioningState', 'type': 'str'},
'created_date': {'key': 'createdDate', 'type': 'str'},
'cluster_state': {'key': 'clusterState', 'type': 'str'},
'quota_info': {'key': 'quotaInfo', 'type': 'QuotaInfo'},
'errors': {'key': 'errors', 'type': '[Errors]'},
'connectivity_endpoints': {'key': 'connectivityEndpoints', 'type': '[ConnectivityEndpoint]'},
'disk_encryption_properties': {'key': 'diskEncryptionProperties', 'type': 'DiskEncryptionProperties'},
'encryption_in_transit_properties': {'key': 'encryptionInTransitProperties', 'type': 'EncryptionInTransitProperties'},
'storage_profile': {'key': 'storageProfile', 'type': 'StorageProfile'},
'min_supported_tls_version': {'key': 'minSupportedTlsVersion', 'type': 'str'},
'excluded_services_config': {'key': 'excludedServicesConfig', 'type': 'ExcludedServicesConfig'},
'network_properties': {'key': 'networkProperties', 'type': 'NetworkProperties'},
'compute_isolation_properties': {'key': 'computeIsolationProperties', 'type': 'ComputeIsolationProperties'},
}
def __init__(
self,
*,
cluster_definition: "ClusterDefinition",
cluster_version: Optional[str] = None,
cluster_hdp_version: Optional[str] = None,
os_type: Optional[Union[str, "OSType"]] = None,
tier: Optional[Union[str, "Tier"]] = None,
cluster_id: Optional[str] = None,
kafka_rest_properties: Optional["KafkaRestProperties"] = None,
security_profile: Optional["SecurityProfile"] = None,
compute_profile: Optional["ComputeProfile"] = None,
provisioning_state: Optional[Union[str, "HDInsightClusterProvisioningState"]] = None,
created_date: Optional[str] = None,
cluster_state: Optional[str] = None,
quota_info: Optional["QuotaInfo"] = None,
errors: Optional[List["Errors"]] = None,
connectivity_endpoints: Optional[List["ConnectivityEndpoint"]] = None,
disk_encryption_properties: Optional["DiskEncryptionProperties"] = None,
encryption_in_transit_properties: Optional["EncryptionInTransitProperties"] = None,
storage_profile: Optional["StorageProfile"] = None,
min_supported_tls_version: Optional[str] = None,
excluded_services_config: Optional["ExcludedServicesConfig"] = None,
network_properties: Optional["NetworkProperties"] = None,
compute_isolation_properties: Optional["ComputeIsolationProperties"] = None,
**kwargs
):
super(ClusterGetProperties, self).__init__(**kwargs)
self.cluster_version = cluster_version
self.cluster_hdp_version = cluster_hdp_version
self.os_type = os_type
self.tier = tier
self.cluster_id = cluster_id
self.cluster_definition = cluster_definition
self.kafka_rest_properties = kafka_rest_properties
self.security_profile = security_profile
self.compute_profile = compute_profile
self.provisioning_state = provisioning_state
self.created_date = created_date
self.cluster_state = cluster_state
self.quota_info = quota_info
self.errors = errors
self.connectivity_endpoints = connectivity_endpoints
self.disk_encryption_properties = disk_encryption_properties
self.encryption_in_transit_properties = encryption_in_transit_properties
self.storage_profile = storage_profile
self.min_supported_tls_version = min_supported_tls_version
self.excluded_services_config = excluded_services_config
self.network_properties = network_properties
self.compute_isolation_properties = compute_isolation_properties
class ClusterIdentity(msrest.serialization.Model):
"""Identity for the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of cluster identity. This property will only be provided
for a system assigned identity.
:vartype principal_id: str
:ivar tenant_id: The tenant id associated with the cluster. This property will only be provided
for a system assigned identity.
:vartype tenant_id: str
:param type: The type of identity used for the cluster. The type 'SystemAssigned, UserAssigned'
includes both an implicitly created identity and a set of user assigned identities. Possible
values include: "SystemAssigned", "UserAssigned", "SystemAssigned, UserAssigned", "None".
:type type: str or ~azure.mgmt.hdinsight.models.ResourceIdentityType
:param user_assigned_identities: The list of user identities associated with the cluster. The
user identity dictionary key references will be ARM resource ids in the form:
'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ManagedIdentity/userAssignedIdentities/{identityName}'.
:type user_assigned_identities: dict[str,
~azure.mgmt.hdinsight.models.ComponentsC51Ht8SchemasClusteridentityPropertiesUserassignedidentitiesAdditionalproperties]
"""
_validation = {
'principal_id': {'readonly': True},
'tenant_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'user_assigned_identities': {'key': 'userAssignedIdentities', 'type': '{ComponentsC51Ht8SchemasClusteridentityPropertiesUserassignedidentitiesAdditionalproperties}'},
}
def __init__(
self,
*,
type: Optional[Union[str, "ResourceIdentityType"]] = None,
user_assigned_identities: Optional[Dict[str, "ComponentsC51Ht8SchemasClusteridentityPropertiesUserassignedidentitiesAdditionalproperties"]] = None,
**kwargs
):
super(ClusterIdentity, self).__init__(**kwargs)
self.principal_id = None
self.tenant_id = None
self.type = type
self.user_assigned_identities = user_assigned_identities
class ClusterListPersistedScriptActionsResult(msrest.serialization.Model):
"""The ListPersistedScriptActions operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of Persisted Script Actions.
:type value: list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RuntimeScriptAction]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RuntimeScriptAction"]] = None,
**kwargs
):
super(ClusterListPersistedScriptActionsResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ClusterListResult(msrest.serialization.Model):
"""The List Cluster operation response.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of Clusters.
:type value: list[~azure.mgmt.hdinsight.models.Cluster]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Cluster]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Cluster"]] = None,
**kwargs
):
super(ClusterListResult, self).__init__(**kwargs)
self.value = value
self.next_link = None
class ClusterListRuntimeScriptActionDetailResult(msrest.serialization.Model):
"""The list runtime script action detail response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value:
:vartype value: list[~azure.mgmt.hdinsight.models.RuntimeScriptActionDetail]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RuntimeScriptActionDetail]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterListRuntimeScriptActionDetailResult, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ClusterListRuntimeScriptActionDetailResultAutoGenerated(msrest.serialization.Model):
"""The list runtime script action detail response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of persisted script action details for the cluster.
:vartype value: list[~azure.mgmt.hdinsight.models.RuntimeScriptActionDetail]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RuntimeScriptActionDetail]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ClusterListRuntimeScriptActionDetailResultAutoGenerated, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ClusterMonitoringRequest(msrest.serialization.Model):
"""The cluster monitor parameters.
:param workspace_id: The cluster monitor workspace ID.
:type workspace_id: str
:param primary_key: The cluster monitor workspace key.
:type primary_key: str
"""
_attribute_map = {
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
}
def __init__(
self,
*,
workspace_id: Optional[str] = None,
primary_key: Optional[str] = None,
**kwargs
):
super(ClusterMonitoringRequest, self).__init__(**kwargs)
self.workspace_id = workspace_id
self.primary_key = primary_key
class ClusterMonitoringResponse(msrest.serialization.Model):
"""The cluster monitoring status response.
:param cluster_monitoring_enabled: The status of the monitor on the HDInsight cluster.
:type cluster_monitoring_enabled: bool
:param workspace_id: The workspace ID of the monitor on the HDInsight cluster.
:type workspace_id: str
"""
_attribute_map = {
'cluster_monitoring_enabled': {'key': 'clusterMonitoringEnabled', 'type': 'bool'},
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
}
def __init__(
self,
*,
cluster_monitoring_enabled: Optional[bool] = None,
workspace_id: Optional[str] = None,
**kwargs
):
super(ClusterMonitoringResponse, self).__init__(**kwargs)
self.cluster_monitoring_enabled = cluster_monitoring_enabled
self.workspace_id = workspace_id
class ClusterPatchParameters(msrest.serialization.Model):
"""The PatchCluster request parameters.
:param tags: A set of tags. The resource tags.
:type tags: dict[str, str]
"""
_attribute_map = {
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(
self,
*,
tags: Optional[Dict[str, str]] = None,
**kwargs
):
super(ClusterPatchParameters, self).__init__(**kwargs)
self.tags = tags
class ClusterResizeParameters(msrest.serialization.Model):
"""The Resize Cluster request parameters.
:param target_instance_count: The target instance count for the operation.
:type target_instance_count: int
"""
_attribute_map = {
'target_instance_count': {'key': 'targetInstanceCount', 'type': 'int'},
}
def __init__(
self,
*,
target_instance_count: Optional[int] = None,
**kwargs
):
super(ClusterResizeParameters, self).__init__(**kwargs)
self.target_instance_count = target_instance_count
class ComponentsC51Ht8SchemasClusteridentityPropertiesUserassignedidentitiesAdditionalproperties(msrest.serialization.Model):
"""ComponentsC51Ht8SchemasClusteridentityPropertiesUserassignedidentitiesAdditionalproperties.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar principal_id: The principal id of user assigned identity.
:vartype principal_id: str
:ivar client_id: The client id of user assigned identity.
:vartype client_id: str
:param tenant_id: The tenant id of user assigned identity.
:type tenant_id: str
"""
_validation = {
'principal_id': {'readonly': True},
'client_id': {'readonly': True},
}
_attribute_map = {
'principal_id': {'key': 'principalId', 'type': 'str'},
'client_id': {'key': 'clientId', 'type': 'str'},
'tenant_id': {'key': 'tenantId', 'type': 'str'},
}
def __init__(
self,
*,
tenant_id: Optional[str] = None,
**kwargs
):
super(ComponentsC51Ht8SchemasClusteridentityPropertiesUserassignedidentitiesAdditionalproperties, self).__init__(**kwargs)
self.principal_id = None
self.client_id = None
self.tenant_id = tenant_id
class ComputeIsolationProperties(msrest.serialization.Model):
"""The compute isolation properties.
:param enable_compute_isolation: The flag indicates whether enable compute isolation or not.
:type enable_compute_isolation: bool
:param host_sku: The host sku.
:type host_sku: str
"""
_attribute_map = {
'enable_compute_isolation': {'key': 'enableComputeIsolation', 'type': 'bool'},
'host_sku': {'key': 'hostSku', 'type': 'str'},
}
def __init__(
self,
*,
enable_compute_isolation: Optional[bool] = False,
host_sku: Optional[str] = None,
**kwargs
):
super(ComputeIsolationProperties, self).__init__(**kwargs)
self.enable_compute_isolation = enable_compute_isolation
self.host_sku = host_sku
class ComputeProfile(msrest.serialization.Model):
"""Describes the compute profile.
:param roles: The list of roles in the cluster.
:type roles: list[~azure.mgmt.hdinsight.models.Role]
"""
_attribute_map = {
'roles': {'key': 'roles', 'type': '[Role]'},
}
def __init__(
self,
*,
roles: Optional[List["Role"]] = None,
**kwargs
):
super(ComputeProfile, self).__init__(**kwargs)
self.roles = roles
class ConnectivityEndpoint(msrest.serialization.Model):
"""The connectivity properties.
:param name: The name of the endpoint.
:type name: str
:param protocol: The protocol of the endpoint.
:type protocol: str
:param location: The location of the endpoint.
:type location: str
:param port: The port to connect to.
:type port: int
:param private_ip_address: The private ip address of the endpoint.
:type private_ip_address: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'protocol': {'key': 'protocol', 'type': 'str'},
'location': {'key': 'location', 'type': 'str'},
'port': {'key': 'port', 'type': 'int'},
'private_ip_address': {'key': 'privateIPAddress', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
protocol: Optional[str] = None,
location: Optional[str] = None,
port: Optional[int] = None,
private_ip_address: Optional[str] = None,
**kwargs
):
super(ConnectivityEndpoint, self).__init__(**kwargs)
self.name = name
self.protocol = protocol
self.location = location
self.port = port
self.private_ip_address = private_ip_address
class DataDisksGroups(msrest.serialization.Model):
"""The data disks groups for the role.
Variables are only populated by the server, and will be ignored when sending a request.
:param disks_per_node: The number of disks per node.
:type disks_per_node: int
:ivar storage_account_type: ReadOnly. The storage account type. Do not set this value.
:vartype storage_account_type: str
:ivar disk_size_gb: ReadOnly. The DiskSize in GB. Do not set this value.
:vartype disk_size_gb: int
"""
_validation = {
'storage_account_type': {'readonly': True},
'disk_size_gb': {'readonly': True},
}
_attribute_map = {
'disks_per_node': {'key': 'disksPerNode', 'type': 'int'},
'storage_account_type': {'key': 'storageAccountType', 'type': 'str'},
'disk_size_gb': {'key': 'diskSizeGB', 'type': 'int'},
}
def __init__(
self,
*,
disks_per_node: Optional[int] = None,
**kwargs
):
super(DataDisksGroups, self).__init__(**kwargs)
self.disks_per_node = disks_per_node
self.storage_account_type = None
self.disk_size_gb = None
class Dimension(msrest.serialization.Model):
"""The definition of Dimension.
:param name: The name of the dimension.
:type name: str
:param display_name: The display name of the dimension.
:type display_name: str
:param internal_name: The display name of the dimension.
:type internal_name: str
:param to_be_exported_for_shoebox: The flag indicates whether the metric will be exported for
shoebox or not.
:type to_be_exported_for_shoebox: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'internal_name': {'key': 'internalName', 'type': 'str'},
'to_be_exported_for_shoebox': {'key': 'toBeExportedForShoebox', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
internal_name: Optional[str] = None,
to_be_exported_for_shoebox: Optional[bool] = None,
**kwargs
):
super(Dimension, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.internal_name = internal_name
self.to_be_exported_for_shoebox = to_be_exported_for_shoebox
class DiskBillingMeters(msrest.serialization.Model):
"""The disk billing meters.
:param disk_rp_meter: The managed disk meter guid.
:type disk_rp_meter: str
:param sku: The managed disk billing sku, P30 or S30.
:type sku: str
:param tier: The managed disk billing tier, Standard or Premium. Possible values include:
"Standard", "Premium".
:type tier: str or ~azure.mgmt.hdinsight.models.Tier
"""
_attribute_map = {
'disk_rp_meter': {'key': 'diskRpMeter', 'type': 'str'},
'sku': {'key': 'sku', 'type': 'str'},
'tier': {'key': 'tier', 'type': 'str'},
}
def __init__(
self,
*,
disk_rp_meter: Optional[str] = None,
sku: Optional[str] = None,
tier: Optional[Union[str, "Tier"]] = None,
**kwargs
):
super(DiskBillingMeters, self).__init__(**kwargs)
self.disk_rp_meter = disk_rp_meter
self.sku = sku
self.tier = tier
class DiskEncryptionProperties(msrest.serialization.Model):
"""The disk encryption properties.
:param vault_uri: Base key vault URI where the customers key is located eg.
https://myvault.vault.azure.net.
:type vault_uri: str
:param key_name: Key name that is used for enabling disk encryption.
:type key_name: str
:param key_version: Specific key version that is used for enabling disk encryption.
:type key_version: str
:param encryption_algorithm: Algorithm identifier for encryption, default RSA-OAEP. Possible
values include: "RSA-OAEP", "RSA-OAEP-256", "RSA1_5".
:type encryption_algorithm: str or ~azure.mgmt.hdinsight.models.JsonWebKeyEncryptionAlgorithm
:param msi_resource_id: Resource ID of Managed Identity that is used to access the key vault.
:type msi_resource_id: str
:param encryption_at_host: Indicates whether or not resource disk encryption is enabled.
:type encryption_at_host: bool
"""
_attribute_map = {
'vault_uri': {'key': 'vaultUri', 'type': 'str'},
'key_name': {'key': 'keyName', 'type': 'str'},
'key_version': {'key': 'keyVersion', 'type': 'str'},
'encryption_algorithm': {'key': 'encryptionAlgorithm', 'type': 'str'},
'msi_resource_id': {'key': 'msiResourceId', 'type': 'str'},
'encryption_at_host': {'key': 'encryptionAtHost', 'type': 'bool'},
}
def __init__(
self,
*,
vault_uri: Optional[str] = None,
key_name: Optional[str] = None,
key_version: Optional[str] = None,
encryption_algorithm: Optional[Union[str, "JsonWebKeyEncryptionAlgorithm"]] = None,
msi_resource_id: Optional[str] = None,
encryption_at_host: Optional[bool] = False,
**kwargs
):
super(DiskEncryptionProperties, self).__init__(**kwargs)
self.vault_uri = vault_uri
self.key_name = key_name
self.key_version = key_version
self.encryption_algorithm = encryption_algorithm
self.msi_resource_id = msi_resource_id
self.encryption_at_host = encryption_at_host
class EncryptionInTransitProperties(msrest.serialization.Model):
"""The encryption-in-transit properties.
:param is_encryption_in_transit_enabled: Indicates whether or not inter cluster node
communication is encrypted in transit.
:type is_encryption_in_transit_enabled: bool
"""
_attribute_map = {
'is_encryption_in_transit_enabled': {'key': 'isEncryptionInTransitEnabled', 'type': 'bool'},
}
def __init__(
self,
*,
is_encryption_in_transit_enabled: Optional[bool] = False,
**kwargs
):
super(EncryptionInTransitProperties, self).__init__(**kwargs)
self.is_encryption_in_transit_enabled = is_encryption_in_transit_enabled
class ErrorResponse(msrest.serialization.Model):
"""Describes the format of Error response.
:param code: Error code.
:type code: str
:param message: Error message indicating why the operation failed.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(ErrorResponse, self).__init__(**kwargs)
self.code = code
self.message = message
class Errors(msrest.serialization.Model):
"""The error message associated with the cluster creation.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
**kwargs
):
super(Errors, self).__init__(**kwargs)
self.code = code
self.message = message
class ExcludedServicesConfig(msrest.serialization.Model):
"""The configuration that services will be excluded when creating cluster.
:param excluded_services_config_id: The config id of excluded services.
:type excluded_services_config_id: str
:param excluded_services_list: The list of excluded services.
:type excluded_services_list: str
"""
_attribute_map = {
'excluded_services_config_id': {'key': 'excludedServicesConfigId', 'type': 'str'},
'excluded_services_list': {'key': 'excludedServicesList', 'type': 'str'},
}
def __init__(
self,
*,
excluded_services_config_id: Optional[str] = None,
excluded_services_list: Optional[str] = None,
**kwargs
):
super(ExcludedServicesConfig, self).__init__(**kwargs)
self.excluded_services_config_id = excluded_services_config_id
self.excluded_services_list = excluded_services_list
class ExecuteScriptActionParameters(msrest.serialization.Model):
"""The parameters for the script actions to execute on a running cluster.
All required parameters must be populated in order to send to Azure.
:param script_actions: The list of run time script actions.
:type script_actions: list[~azure.mgmt.hdinsight.models.RuntimeScriptAction]
:param persist_on_success: Required. Gets or sets if the scripts needs to be persisted.
:type persist_on_success: bool
"""
_validation = {
'persist_on_success': {'required': True},
}
_attribute_map = {
'script_actions': {'key': 'scriptActions', 'type': '[RuntimeScriptAction]'},
'persist_on_success': {'key': 'persistOnSuccess', 'type': 'bool'},
}
def __init__(
self,
*,
persist_on_success: bool,
script_actions: Optional[List["RuntimeScriptAction"]] = None,
**kwargs
):
super(ExecuteScriptActionParameters, self).__init__(**kwargs)
self.script_actions = script_actions
self.persist_on_success = persist_on_success
class Extension(msrest.serialization.Model):
"""Cluster monitoring extensions.
:param workspace_id: The workspace ID for the cluster monitoring extension.
:type workspace_id: str
:param primary_key: The certificate for the cluster monitoring extensions.
:type primary_key: str
"""
_attribute_map = {
'workspace_id': {'key': 'workspaceId', 'type': 'str'},
'primary_key': {'key': 'primaryKey', 'type': 'str'},
}
def __init__(
self,
*,
workspace_id: Optional[str] = None,
primary_key: Optional[str] = None,
**kwargs
):
super(Extension, self).__init__(**kwargs)
self.workspace_id = workspace_id
self.primary_key = primary_key
class GatewaySettings(msrest.serialization.Model):
"""Gateway settings.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar is_credential_enabled: Indicates whether or not the gateway settings based authorization
is enabled.
:vartype is_credential_enabled: str
:ivar user_name: The gateway settings user name.
:vartype user_name: str
:ivar password: The gateway settings user password.
:vartype password: str
"""
_validation = {
'is_credential_enabled': {'readonly': True},
'user_name': {'readonly': True},
'password': {'readonly': True},
}
_attribute_map = {
'is_credential_enabled': {'key': 'restAuthCredential\\.isEnabled', 'type': 'str'},
'user_name': {'key': 'restAuthCredential\\.username', 'type': 'str'},
'password': {'key': 'restAuthCredential\\.password', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(GatewaySettings, self).__init__(**kwargs)
self.is_credential_enabled = None
self.user_name = None
self.password = None
class HardwareProfile(msrest.serialization.Model):
"""The hardware profile.
:param vm_size: The size of the VM.
:type vm_size: str
"""
_attribute_map = {
'vm_size': {'key': 'vmSize', 'type': 'str'},
}
def __init__(
self,
*,
vm_size: Optional[str] = None,
**kwargs
):
super(HardwareProfile, self).__init__(**kwargs)
self.vm_size = vm_size
class HostInfo(msrest.serialization.Model):
"""The cluster host information.
:param name: The host name.
:type name: str
:param fqdn: The Fully Qualified Domain Name of host.
:type fqdn: str
:param effective_disk_encryption_key_url: The effective disk encryption key URL used by the
host.
:type effective_disk_encryption_key_url: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'fqdn': {'key': 'fqdn', 'type': 'str'},
'effective_disk_encryption_key_url': {'key': 'effectiveDiskEncryptionKeyUrl', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
fqdn: Optional[str] = None,
effective_disk_encryption_key_url: Optional[str] = None,
**kwargs
):
super(HostInfo, self).__init__(**kwargs)
self.name = name
self.fqdn = fqdn
self.effective_disk_encryption_key_url = effective_disk_encryption_key_url
class KafkaRestProperties(msrest.serialization.Model):
"""The kafka rest proxy configuration which contains AAD security group information.
:param client_group_info: The information of AAD security group.
:type client_group_info: ~azure.mgmt.hdinsight.models.ClientGroupInfo
:param configuration_override: The configurations that need to be overriden.
:type configuration_override: dict[str, str]
"""
_attribute_map = {
'client_group_info': {'key': 'clientGroupInfo', 'type': 'ClientGroupInfo'},
'configuration_override': {'key': 'configurationOverride', 'type': '{str}'},
}
def __init__(
self,
*,
client_group_info: Optional["ClientGroupInfo"] = None,
configuration_override: Optional[Dict[str, str]] = None,
**kwargs
):
super(KafkaRestProperties, self).__init__(**kwargs)
self.client_group_info = client_group_info
self.configuration_override = configuration_override
class LinuxOperatingSystemProfile(msrest.serialization.Model):
"""The ssh username, password, and ssh public key.
:param username: The username.
:type username: str
:param password: The password.
:type password: str
:param ssh_profile: The SSH profile.
:type ssh_profile: ~azure.mgmt.hdinsight.models.SshProfile
"""
_attribute_map = {
'username': {'key': 'username', 'type': 'str'},
'password': {'key': 'password', 'type': 'str'},
'ssh_profile': {'key': 'sshProfile', 'type': 'SshProfile'},
}
def __init__(
self,
*,
username: Optional[str] = None,
password: Optional[str] = None,
ssh_profile: Optional["SshProfile"] = None,
**kwargs
):
super(LinuxOperatingSystemProfile, self).__init__(**kwargs)
self.username = username
self.password = password
self.ssh_profile = ssh_profile
class LocalizedName(msrest.serialization.Model):
"""The details about the localizable name of a type of usage.
:param value: The name of the used resource.
:type value: str
:param localized_value: The localized name of the used resource.
:type localized_value: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': 'str'},
'localized_value': {'key': 'localizedValue', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[str] = None,
localized_value: Optional[str] = None,
**kwargs
):
super(LocalizedName, self).__init__(**kwargs)
self.value = value
self.localized_value = localized_value
class MetricSpecifications(msrest.serialization.Model):
"""The details of metric specifications.
:param name: The name of the metric specification.
:type name: str
:param display_name: The display name of the metric specification.
:type display_name: str
:param display_description: The display description of the metric specification.
:type display_description: str
:param unit: The unit of the metric specification.
:type unit: str
:param aggregation_type: The aggregation type of the metric specification.
:type aggregation_type: str
:param supported_aggregation_types: The supported aggregation types of the metric
specification.
:type supported_aggregation_types: list[str]
:param supported_time_grain_types: The supported time grain types of the metric specification.
:type supported_time_grain_types: list[str]
:param enable_regional_mdm_account: The flag indicates whether enable regional mdm account or
not.
:type enable_regional_mdm_account: bool
:param source_mdm_account: The source mdm account.
:type source_mdm_account: str
:param source_mdm_namespace: The source mdm namespace.
:type source_mdm_namespace: str
:param metric_filter_pattern: The metric filter pattern.
:type metric_filter_pattern: str
:param fill_gap_with_zero: The flag indicates whether filling gap with zero.
:type fill_gap_with_zero: bool
:param category: The category of the metric.
:type category: str
:param resource_id_dimension_name_override: The override name of resource id dimension name.
:type resource_id_dimension_name_override: str
:param is_internal: The flag indicates whether the metric is internal or not.
:type is_internal: bool
:param delegate_metric_name_override: The override name of delegate metric.
:type delegate_metric_name_override: str
:param dimensions: The dimensions of the metric specification.
:type dimensions: list[~azure.mgmt.hdinsight.models.Dimension]
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'display_description': {'key': 'displayDescription', 'type': 'str'},
'unit': {'key': 'unit', 'type': 'str'},
'aggregation_type': {'key': 'aggregationType', 'type': 'str'},
'supported_aggregation_types': {'key': 'supportedAggregationTypes', 'type': '[str]'},
'supported_time_grain_types': {'key': 'supportedTimeGrainTypes', 'type': '[str]'},
'enable_regional_mdm_account': {'key': 'enableRegionalMdmAccount', 'type': 'bool'},
'source_mdm_account': {'key': 'sourceMdmAccount', 'type': 'str'},
'source_mdm_namespace': {'key': 'sourceMdmNamespace', 'type': 'str'},
'metric_filter_pattern': {'key': 'metricFilterPattern', 'type': 'str'},
'fill_gap_with_zero': {'key': 'fillGapWithZero', 'type': 'bool'},
'category': {'key': 'category', 'type': 'str'},
'resource_id_dimension_name_override': {'key': 'resourceIdDimensionNameOverride', 'type': 'str'},
'is_internal': {'key': 'isInternal', 'type': 'bool'},
'delegate_metric_name_override': {'key': 'delegateMetricNameOverride', 'type': 'str'},
'dimensions': {'key': 'dimensions', 'type': '[Dimension]'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display_name: Optional[str] = None,
display_description: Optional[str] = None,
unit: Optional[str] = None,
aggregation_type: Optional[str] = None,
supported_aggregation_types: Optional[List[str]] = None,
supported_time_grain_types: Optional[List[str]] = None,
enable_regional_mdm_account: Optional[bool] = None,
source_mdm_account: Optional[str] = None,
source_mdm_namespace: Optional[str] = None,
metric_filter_pattern: Optional[str] = None,
fill_gap_with_zero: Optional[bool] = None,
category: Optional[str] = None,
resource_id_dimension_name_override: Optional[str] = None,
is_internal: Optional[bool] = None,
delegate_metric_name_override: Optional[str] = None,
dimensions: Optional[List["Dimension"]] = None,
**kwargs
):
super(MetricSpecifications, self).__init__(**kwargs)
self.name = name
self.display_name = display_name
self.display_description = display_description
self.unit = unit
self.aggregation_type = aggregation_type
self.supported_aggregation_types = supported_aggregation_types
self.supported_time_grain_types = supported_time_grain_types
self.enable_regional_mdm_account = enable_regional_mdm_account
self.source_mdm_account = source_mdm_account
self.source_mdm_namespace = source_mdm_namespace
self.metric_filter_pattern = metric_filter_pattern
self.fill_gap_with_zero = fill_gap_with_zero
self.category = category
self.resource_id_dimension_name_override = resource_id_dimension_name_override
self.is_internal = is_internal
self.delegate_metric_name_override = delegate_metric_name_override
self.dimensions = dimensions
class NameAvailabilityCheckRequestParameters(msrest.serialization.Model):
"""The request spec of checking name availability.
:param name: The resource name.
:type name: str
:param type: The resource type.
:type type: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
type: Optional[str] = None,
**kwargs
):
super(NameAvailabilityCheckRequestParameters, self).__init__(**kwargs)
self.name = name
self.type = type
class NameAvailabilityCheckResult(msrest.serialization.Model):
"""The response spec of checking name availability.
Variables are only populated by the server, and will be ignored when sending a request.
:param name_available: This indicates whether the name is available.
:type name_available: bool
:ivar reason: The reason of the result.
:vartype reason: str
:ivar message: The related message.
:vartype message: str
"""
_validation = {
'reason': {'readonly': True},
'message': {'readonly': True},
}
_attribute_map = {
'name_available': {'key': 'nameAvailable', 'type': 'bool'},
'reason': {'key': 'reason', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
name_available: Optional[bool] = None,
**kwargs
):
super(NameAvailabilityCheckResult, self).__init__(**kwargs)
self.name_available = name_available
self.reason = None
self.message = None
class NetworkProperties(msrest.serialization.Model):
"""The network properties.
:param resource_provider_connection: The direction for the resource provider connection.
Possible values include: "Inbound", "Outbound".
:type resource_provider_connection: str or
~azure.mgmt.hdinsight.models.ResourceProviderConnection
:param private_link: Indicates whether or not private link is enabled. Possible values include:
"Disabled", "Enabled".
:type private_link: str or ~azure.mgmt.hdinsight.models.PrivateLink
"""
_attribute_map = {
'resource_provider_connection': {'key': 'resourceProviderConnection', 'type': 'str'},
'private_link': {'key': 'privateLink', 'type': 'str'},
}
def __init__(
self,
*,
resource_provider_connection: Optional[Union[str, "ResourceProviderConnection"]] = None,
private_link: Optional[Union[str, "PrivateLink"]] = None,
**kwargs
):
super(NetworkProperties, self).__init__(**kwargs)
self.resource_provider_connection = resource_provider_connection
self.private_link = private_link
class Operation(msrest.serialization.Model):
"""The HDInsight REST API operation.
:param name: The operation name: {provider}/{resource}/{operation}.
:type name: str
:param display: The object that represents the operation.
:type display: ~azure.mgmt.hdinsight.models.OperationDisplay
:param properties: The operation properties.
:type properties: ~azure.mgmt.hdinsight.models.OperationProperties
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'display': {'key': 'display', 'type': 'OperationDisplay'},
'properties': {'key': 'properties', 'type': 'OperationProperties'},
}
def __init__(
self,
*,
name: Optional[str] = None,
display: Optional["OperationDisplay"] = None,
properties: Optional["OperationProperties"] = None,
**kwargs
):
super(Operation, self).__init__(**kwargs)
self.name = name
self.display = display
self.properties = properties
class OperationDisplay(msrest.serialization.Model):
"""The object that represents the operation.
:param provider: The service provider: Microsoft.HDInsight.
:type provider: str
:param resource: The resource on which the operation is performed: Cluster, Applications, etc.
:type resource: str
:param operation: The operation type: read, write, delete, etc.
:type operation: str
:param description: Localized friendly description for the operation.
:type description: str
"""
_attribute_map = {
'provider': {'key': 'provider', 'type': 'str'},
'resource': {'key': 'resource', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'description': {'key': 'description', 'type': 'str'},
}
def __init__(
self,
*,
provider: Optional[str] = None,
resource: Optional[str] = None,
operation: Optional[str] = None,
description: Optional[str] = None,
**kwargs
):
super(OperationDisplay, self).__init__(**kwargs)
self.provider = provider
self.resource = resource
self.operation = operation
self.description = description
class OperationListResult(msrest.serialization.Model):
"""Result of the request to list HDInsight operations. It contains a list of operations and a URL link to get the next set of results.
:param value: The list of HDInsight operations supported by the HDInsight resource provider.
:type value: list[~azure.mgmt.hdinsight.models.Operation]
:param next_link: The URL to get the next set of operation list results if there are any.
:type next_link: str
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["Operation"]] = None,
next_link: Optional[str] = None,
**kwargs
):
super(OperationListResult, self).__init__(**kwargs)
self.value = value
self.next_link = next_link
class OperationProperties(msrest.serialization.Model):
"""The details of operation.
:param service_specification: The specification of the service.
:type service_specification: ~azure.mgmt.hdinsight.models.ServiceSpecification
"""
_attribute_map = {
'service_specification': {'key': 'serviceSpecification', 'type': 'ServiceSpecification'},
}
def __init__(
self,
*,
service_specification: Optional["ServiceSpecification"] = None,
**kwargs
):
super(OperationProperties, self).__init__(**kwargs)
self.service_specification = service_specification
class OsProfile(msrest.serialization.Model):
"""The Linux operation systems profile.
:param linux_operating_system_profile: The Linux OS profile.
:type linux_operating_system_profile: ~azure.mgmt.hdinsight.models.LinuxOperatingSystemProfile
"""
_attribute_map = {
'linux_operating_system_profile': {'key': 'linuxOperatingSystemProfile', 'type': 'LinuxOperatingSystemProfile'},
}
def __init__(
self,
*,
linux_operating_system_profile: Optional["LinuxOperatingSystemProfile"] = None,
**kwargs
):
super(OsProfile, self).__init__(**kwargs)
self.linux_operating_system_profile = linux_operating_system_profile
class QuotaCapability(msrest.serialization.Model):
"""The regional quota capability.
:param cores_used: The number of cores used in the subscription.
:type cores_used: long
:param max_cores_allowed: The number of cores that the subscription allowed.
:type max_cores_allowed: long
:param regional_quotas: The list of region quota capabilities.
:type regional_quotas: list[~azure.mgmt.hdinsight.models.RegionalQuotaCapability]
"""
_attribute_map = {
'cores_used': {'key': 'cores_used', 'type': 'long'},
'max_cores_allowed': {'key': 'max_cores_allowed', 'type': 'long'},
'regional_quotas': {'key': 'regionalQuotas', 'type': '[RegionalQuotaCapability]'},
}
def __init__(
self,
*,
cores_used: Optional[int] = None,
max_cores_allowed: Optional[int] = None,
regional_quotas: Optional[List["RegionalQuotaCapability"]] = None,
**kwargs
):
super(QuotaCapability, self).__init__(**kwargs)
self.cores_used = cores_used
self.max_cores_allowed = max_cores_allowed
self.regional_quotas = regional_quotas
class QuotaInfo(msrest.serialization.Model):
"""The quota properties for the cluster.
:param cores_used: The cores used by the cluster.
:type cores_used: int
"""
_attribute_map = {
'cores_used': {'key': 'coresUsed', 'type': 'int'},
}
def __init__(
self,
*,
cores_used: Optional[int] = None,
**kwargs
):
super(QuotaInfo, self).__init__(**kwargs)
self.cores_used = cores_used
class RegionalQuotaCapability(msrest.serialization.Model):
"""The regional quota capacity.
:param region_name: The region name.
:type region_name: str
:param cores_used: The number of cores used in the region.
:type cores_used: long
:param cores_available: The number of cores available in the region.
:type cores_available: long
"""
_attribute_map = {
'region_name': {'key': 'region_name', 'type': 'str'},
'cores_used': {'key': 'cores_used', 'type': 'long'},
'cores_available': {'key': 'cores_available', 'type': 'long'},
}
def __init__(
self,
*,
region_name: Optional[str] = None,
cores_used: Optional[int] = None,
cores_available: Optional[int] = None,
**kwargs
):
super(RegionalQuotaCapability, self).__init__(**kwargs)
self.region_name = region_name
self.cores_used = cores_used
self.cores_available = cores_available
class RegionsCapability(msrest.serialization.Model):
"""The regions capability.
:param available: The list of region capabilities.
:type available: list[str]
"""
_attribute_map = {
'available': {'key': 'available', 'type': '[str]'},
}
def __init__(
self,
*,
available: Optional[List[str]] = None,
**kwargs
):
super(RegionsCapability, self).__init__(**kwargs)
self.available = available
class Role(msrest.serialization.Model):
"""Describes a role on the cluster.
:param name: The name of the role.
:type name: str
:param min_instance_count: The minimum instance count of the cluster.
:type min_instance_count: int
:param target_instance_count: The instance count of the cluster.
:type target_instance_count: int
:param vm_group_name: The name of the virtual machine group.
:type vm_group_name: str
:param autoscale_configuration: The autoscale configurations.
:type autoscale_configuration: ~azure.mgmt.hdinsight.models.Autoscale
:param hardware_profile: The hardware profile.
:type hardware_profile: ~azure.mgmt.hdinsight.models.HardwareProfile
:param os_profile: The operating system profile.
:type os_profile: ~azure.mgmt.hdinsight.models.OsProfile
:param virtual_network_profile: The virtual network profile.
:type virtual_network_profile: ~azure.mgmt.hdinsight.models.VirtualNetworkProfile
:param data_disks_groups: The data disks groups for the role.
:type data_disks_groups: list[~azure.mgmt.hdinsight.models.DataDisksGroups]
:param script_actions: The list of script actions on the role.
:type script_actions: list[~azure.mgmt.hdinsight.models.ScriptAction]
:param encrypt_data_disks: Indicates whether encrypt the data disks.
:type encrypt_data_disks: bool
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'min_instance_count': {'key': 'minInstanceCount', 'type': 'int'},
'target_instance_count': {'key': 'targetInstanceCount', 'type': 'int'},
'vm_group_name': {'key': 'VMGroupName', 'type': 'str'},
'autoscale_configuration': {'key': 'autoscale', 'type': 'Autoscale'},
'hardware_profile': {'key': 'hardwareProfile', 'type': 'HardwareProfile'},
'os_profile': {'key': 'osProfile', 'type': 'OsProfile'},
'virtual_network_profile': {'key': 'virtualNetworkProfile', 'type': 'VirtualNetworkProfile'},
'data_disks_groups': {'key': 'dataDisksGroups', 'type': '[DataDisksGroups]'},
'script_actions': {'key': 'scriptActions', 'type': '[ScriptAction]'},
'encrypt_data_disks': {'key': 'encryptDataDisks', 'type': 'bool'},
}
def __init__(
self,
*,
name: Optional[str] = None,
min_instance_count: Optional[int] = None,
target_instance_count: Optional[int] = None,
vm_group_name: Optional[str] = None,
autoscale_configuration: Optional["Autoscale"] = None,
hardware_profile: Optional["HardwareProfile"] = None,
os_profile: Optional["OsProfile"] = None,
virtual_network_profile: Optional["VirtualNetworkProfile"] = None,
data_disks_groups: Optional[List["DataDisksGroups"]] = None,
script_actions: Optional[List["ScriptAction"]] = None,
encrypt_data_disks: Optional[bool] = False,
**kwargs
):
super(Role, self).__init__(**kwargs)
self.name = name
self.min_instance_count = min_instance_count
self.target_instance_count = target_instance_count
self.vm_group_name = vm_group_name
self.autoscale_configuration = autoscale_configuration
self.hardware_profile = hardware_profile
self.os_profile = os_profile
self.virtual_network_profile = virtual_network_profile
self.data_disks_groups = data_disks_groups
self.script_actions = script_actions
self.encrypt_data_disks = encrypt_data_disks
class RuntimeScriptAction(msrest.serialization.Model):
"""Describes a script action on a running cluster.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the script action.
:type name: str
:param uri: Required. The URI to the script.
:type uri: str
:param parameters: The parameters for the script.
:type parameters: str
:param roles: Required. The list of roles where script will be executed.
:type roles: list[str]
:ivar application_name: The application name of the script action, if any.
:vartype application_name: str
"""
_validation = {
'name': {'required': True},
'uri': {'required': True},
'roles': {'required': True},
'application_name': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
'roles': {'key': 'roles', 'type': '[str]'},
'application_name': {'key': 'applicationName', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
uri: str,
roles: List[str],
parameters: Optional[str] = None,
**kwargs
):
super(RuntimeScriptAction, self).__init__(**kwargs)
self.name = name
self.uri = uri
self.parameters = parameters
self.roles = roles
self.application_name = None
class RuntimeScriptActionDetail(RuntimeScriptAction):
"""The execution details of a script action.
Variables are only populated by the server, and will be ignored when sending a request.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the script action.
:type name: str
:param uri: Required. The URI to the script.
:type uri: str
:param parameters: The parameters for the script.
:type parameters: str
:param roles: Required. The list of roles where script will be executed.
:type roles: list[str]
:ivar application_name: The application name of the script action, if any.
:vartype application_name: str
:ivar script_execution_id: The execution id of the script action.
:vartype script_execution_id: long
:ivar start_time: The start time of script action execution.
:vartype start_time: str
:ivar end_time: The end time of script action execution.
:vartype end_time: str
:ivar status: The current execution status of the script action.
:vartype status: str
:ivar operation: The reason why the script action was executed.
:vartype operation: str
:ivar execution_summary: The summary of script action execution result.
:vartype execution_summary: list[~azure.mgmt.hdinsight.models.ScriptActionExecutionSummary]
:ivar debug_information: The script action execution debug information.
:vartype debug_information: str
"""
_validation = {
'name': {'required': True},
'uri': {'required': True},
'roles': {'required': True},
'application_name': {'readonly': True},
'script_execution_id': {'readonly': True},
'start_time': {'readonly': True},
'end_time': {'readonly': True},
'status': {'readonly': True},
'operation': {'readonly': True},
'execution_summary': {'readonly': True},
'debug_information': {'readonly': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
'roles': {'key': 'roles', 'type': '[str]'},
'application_name': {'key': 'applicationName', 'type': 'str'},
'script_execution_id': {'key': 'scriptExecutionId', 'type': 'long'},
'start_time': {'key': 'startTime', 'type': 'str'},
'end_time': {'key': 'endTime', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'operation': {'key': 'operation', 'type': 'str'},
'execution_summary': {'key': 'executionSummary', 'type': '[ScriptActionExecutionSummary]'},
'debug_information': {'key': 'debugInformation', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
uri: str,
roles: List[str],
parameters: Optional[str] = None,
**kwargs
):
super(RuntimeScriptActionDetail, self).__init__(name=name, uri=uri, parameters=parameters, roles=roles, **kwargs)
self.script_execution_id = None
self.start_time = None
self.end_time = None
self.status = None
self.operation = None
self.execution_summary = None
self.debug_information = None
class ScriptAction(msrest.serialization.Model):
"""Describes a script action on role on the cluster.
All required parameters must be populated in order to send to Azure.
:param name: Required. The name of the script action.
:type name: str
:param uri: Required. The URI to the script.
:type uri: str
:param parameters: Required. The parameters for the script provided.
:type parameters: str
"""
_validation = {
'name': {'required': True},
'uri': {'required': True},
'parameters': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
uri: str,
parameters: str,
**kwargs
):
super(ScriptAction, self).__init__(**kwargs)
self.name = name
self.uri = uri
self.parameters = parameters
class ScriptActionExecutionHistoryList(msrest.serialization.Model):
"""The list script execution history response.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar value: The list of persisted script action details for the cluster.
:vartype value: list[~azure.mgmt.hdinsight.models.RuntimeScriptActionDetail]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RuntimeScriptActionDetail]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
**kwargs
):
super(ScriptActionExecutionHistoryList, self).__init__(**kwargs)
self.value = None
self.next_link = None
class ScriptActionExecutionSummary(msrest.serialization.Model):
"""The execution summary of a script action.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar status: The status of script action execution.
:vartype status: str
:ivar instance_count: The instance count for a given script action execution status.
:vartype instance_count: int
"""
_validation = {
'status': {'readonly': True},
'instance_count': {'readonly': True},
}
_attribute_map = {
'status': {'key': 'status', 'type': 'str'},
'instance_count': {'key': 'instanceCount', 'type': 'int'},
}
def __init__(
self,
**kwargs
):
super(ScriptActionExecutionSummary, self).__init__(**kwargs)
self.status = None
self.instance_count = None
class ScriptActionPersistedGetResponseSpec(msrest.serialization.Model):
"""The persisted script action for cluster.
:param name: The name of script action.
:type name: str
:param uri: The URI to the script.
:type uri: str
:param parameters: The parameters for the script provided.
:type parameters: str
:param roles: The list of roles where script will be executed.
:type roles: list[str]
:param application_name: The application name for the script action.
:type application_name: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'uri': {'key': 'uri', 'type': 'str'},
'parameters': {'key': 'parameters', 'type': 'str'},
'roles': {'key': 'roles', 'type': '[str]'},
'application_name': {'key': 'applicationName', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
uri: Optional[str] = None,
parameters: Optional[str] = None,
roles: Optional[List[str]] = None,
application_name: Optional[str] = None,
**kwargs
):
super(ScriptActionPersistedGetResponseSpec, self).__init__(**kwargs)
self.name = name
self.uri = uri
self.parameters = parameters
self.roles = roles
self.application_name = application_name
class ScriptActionsList(msrest.serialization.Model):
"""The persisted script action for the cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:param value: The list of persisted script action details for the cluster.
:type value: list[~azure.mgmt.hdinsight.models.RuntimeScriptActionDetail]
:ivar next_link: The link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[RuntimeScriptActionDetail]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(
self,
*,
value: Optional[List["RuntimeScriptActionDetail"]] = None,
**kwargs
):
super(ScriptActionsList, self).__init__(**kwargs)
self.value = value
self.next_link = None
class SecurityProfile(msrest.serialization.Model):
"""The security profile which contains Ssh public key for the HDInsight cluster.
Variables are only populated by the server, and will be ignored when sending a request.
:ivar directory_type: The directory type. Default value: "ActiveDirectory".
:vartype directory_type: str
:param domain: The organization's active directory domain.
:type domain: str
:param organizational_unit_dn: The organizational unit within the Active Directory to place the
cluster and service accounts.
:type organizational_unit_dn: str
:param ldaps_urls: The LDAPS protocol URLs to communicate with the Active Directory.
:type ldaps_urls: list[str]
:param domain_username: The domain user account that will have admin privileges on the cluster.
:type domain_username: str
:param domain_user_password: The domain admin password.
:type domain_user_password: str
:param cluster_users_group_d_ns: Optional. The Distinguished Names for cluster user groups.
:type cluster_users_group_d_ns: list[str]
:param aadds_resource_id: The resource ID of the user's Azure Active Directory Domain Service.
:type aadds_resource_id: str
:param msi_resource_id: User assigned identity that has permissions to read and create
cluster-related artifacts in the user's AADDS.
:type msi_resource_id: str
"""
_validation = {
'directory_type': {'constant': True},
}
_attribute_map = {
'directory_type': {'key': 'directoryType', 'type': 'str'},
'domain': {'key': 'domain', 'type': 'str'},
'organizational_unit_dn': {'key': 'organizationalUnitDN', 'type': 'str'},
'ldaps_urls': {'key': 'ldapsUrls', 'type': '[str]'},
'domain_username': {'key': 'domainUsername', 'type': 'str'},
'domain_user_password': {'key': 'domainUserPassword', 'type': 'str'},
'cluster_users_group_d_ns': {'key': 'clusterUsersGroupDNs', 'type': '[str]'},
'aadds_resource_id': {'key': 'aaddsResourceId', 'type': 'str'},
'msi_resource_id': {'key': 'msiResourceId', 'type': 'str'},
}
directory_type = "ActiveDirectory"
def __init__(
self,
*,
domain: Optional[str] = None,
organizational_unit_dn: Optional[str] = None,
ldaps_urls: Optional[List[str]] = None,
domain_username: Optional[str] = None,
domain_user_password: Optional[str] = None,
cluster_users_group_d_ns: Optional[List[str]] = None,
aadds_resource_id: Optional[str] = None,
msi_resource_id: Optional[str] = None,
**kwargs
):
super(SecurityProfile, self).__init__(**kwargs)
self.domain = domain
self.organizational_unit_dn = organizational_unit_dn
self.ldaps_urls = ldaps_urls
self.domain_username = domain_username
self.domain_user_password = domain_user_password
self.cluster_users_group_d_ns = cluster_users_group_d_ns
self.aadds_resource_id = aadds_resource_id
self.msi_resource_id = msi_resource_id
class ServiceSpecification(msrest.serialization.Model):
"""The specification of the service.
:param metric_specifications: The metric specifications.
:type metric_specifications: list[~azure.mgmt.hdinsight.models.MetricSpecifications]
"""
_attribute_map = {
'metric_specifications': {'key': 'metricSpecifications', 'type': '[MetricSpecifications]'},
}
def __init__(
self,
*,
metric_specifications: Optional[List["MetricSpecifications"]] = None,
**kwargs
):
super(ServiceSpecification, self).__init__(**kwargs)
self.metric_specifications = metric_specifications
class SshProfile(msrest.serialization.Model):
"""The list of SSH public keys.
:param public_keys: The list of SSH public keys.
:type public_keys: list[~azure.mgmt.hdinsight.models.SshPublicKey]
"""
_attribute_map = {
'public_keys': {'key': 'publicKeys', 'type': '[SshPublicKey]'},
}
def __init__(
self,
*,
public_keys: Optional[List["SshPublicKey"]] = None,
**kwargs
):
super(SshProfile, self).__init__(**kwargs)
self.public_keys = public_keys
class SshPublicKey(msrest.serialization.Model):
"""The SSH public key for the cluster nodes.
:param certificate_data: The certificate for SSH.
:type certificate_data: str
"""
_attribute_map = {
'certificate_data': {'key': 'certificateData', 'type': 'str'},
}
def __init__(
self,
*,
certificate_data: Optional[str] = None,
**kwargs
):
super(SshPublicKey, self).__init__(**kwargs)
self.certificate_data = certificate_data
class StorageAccount(msrest.serialization.Model):
"""The storage Account.
:param name: The name of the storage account.
:type name: str
:param is_default: Whether or not the storage account is the default storage account.
:type is_default: bool
:param container: The container in the storage account, only to be specified for WASB storage
accounts.
:type container: str
:param file_system: The filesystem, only to be specified for Azure Data Lake Storage Gen 2.
:type file_system: str
:param key: The storage account access key.
:type key: str
:param resource_id: The resource ID of storage account, only to be specified for Azure Data
Lake Storage Gen 2.
:type resource_id: str
:param msi_resource_id: The managed identity (MSI) that is allowed to access the storage
account, only to be specified for Azure Data Lake Storage Gen 2.
:type msi_resource_id: str
:param saskey: The shared access signature key.
:type saskey: str
:param fileshare: The file share name.
:type fileshare: str
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'container': {'key': 'container', 'type': 'str'},
'file_system': {'key': 'fileSystem', 'type': 'str'},
'key': {'key': 'key', 'type': 'str'},
'resource_id': {'key': 'resourceId', 'type': 'str'},
'msi_resource_id': {'key': 'msiResourceId', 'type': 'str'},
'saskey': {'key': 'saskey', 'type': 'str'},
'fileshare': {'key': 'fileshare', 'type': 'str'},
}
def __init__(
self,
*,
name: Optional[str] = None,
is_default: Optional[bool] = None,
container: Optional[str] = None,
file_system: Optional[str] = None,
key: Optional[str] = None,
resource_id: Optional[str] = None,
msi_resource_id: Optional[str] = None,
saskey: Optional[str] = None,
fileshare: Optional[str] = None,
**kwargs
):
super(StorageAccount, self).__init__(**kwargs)
self.name = name
self.is_default = is_default
self.container = container
self.file_system = file_system
self.key = key
self.resource_id = resource_id
self.msi_resource_id = msi_resource_id
self.saskey = saskey
self.fileshare = fileshare
class StorageProfile(msrest.serialization.Model):
"""The storage profile.
:param storageaccounts: The list of storage accounts in the cluster.
:type storageaccounts: list[~azure.mgmt.hdinsight.models.StorageAccount]
"""
_attribute_map = {
'storageaccounts': {'key': 'storageaccounts', 'type': '[StorageAccount]'},
}
def __init__(
self,
*,
storageaccounts: Optional[List["StorageAccount"]] = None,
**kwargs
):
super(StorageProfile, self).__init__(**kwargs)
self.storageaccounts = storageaccounts
class UpdateClusterIdentityCertificateParameters(msrest.serialization.Model):
"""The update cluster identity certificate request parameters.
:param application_id: The application id.
:type application_id: str
:param certificate: The certificate in base64 encoded format.
:type certificate: str
:param certificate_password: The password of the certificate.
:type certificate_password: str
"""
_attribute_map = {
'application_id': {'key': 'applicationId', 'type': 'str'},
'certificate': {'key': 'certificate', 'type': 'str'},
'certificate_password': {'key': 'certificatePassword', 'type': 'str'},
}
def __init__(
self,
*,
application_id: Optional[str] = None,
certificate: Optional[str] = None,
certificate_password: Optional[str] = None,
**kwargs
):
super(UpdateClusterIdentityCertificateParameters, self).__init__(**kwargs)
self.application_id = application_id
self.certificate = certificate
self.certificate_password = certificate_password
class UpdateGatewaySettingsParameters(msrest.serialization.Model):
"""The update gateway settings request parameters.
:param is_credential_enabled: Indicates whether or not the gateway settings based authorization
is enabled.
:type is_credential_enabled: bool
:param user_name: The gateway settings user name.
:type user_name: str
:param password: The gateway settings user password.
:type password: str
"""
_attribute_map = {
'is_credential_enabled': {'key': 'restAuthCredential\\.isEnabled', 'type': 'bool'},
'user_name': {'key': 'restAuthCredential\\.username', 'type': 'str'},
'password': {'key': 'restAuthCredential\\.password', 'type': 'str'},
}
def __init__(
self,
*,
is_credential_enabled: Optional[bool] = True,
user_name: Optional[str] = None,
password: Optional[str] = None,
**kwargs
):
super(UpdateGatewaySettingsParameters, self).__init__(**kwargs)
self.is_credential_enabled = is_credential_enabled
self.user_name = user_name
self.password = password
class Usage(msrest.serialization.Model):
"""The details about the usage of a particular limited resource.
Variables are only populated by the server, and will be ignored when sending a request.
:param unit: The type of measurement for usage.
:type unit: str
:param current_value: The current usage.
:type current_value: long
:param limit: The maximum allowed usage.
:type limit: long
:ivar name: The details about the localizable name of the used resource.
:vartype name: ~azure.mgmt.hdinsight.models.LocalizedName
"""
_validation = {
'name': {'readonly': True},
}
_attribute_map = {
'unit': {'key': 'unit', 'type': 'str'},
'current_value': {'key': 'currentValue', 'type': 'long'},
'limit': {'key': 'limit', 'type': 'long'},
'name': {'key': 'name', 'type': 'LocalizedName'},
}
def __init__(
self,
*,
unit: Optional[str] = None,
current_value: Optional[int] = None,
limit: Optional[int] = None,
**kwargs
):
super(Usage, self).__init__(**kwargs)
self.unit = unit
self.current_value = current_value
self.limit = limit
self.name = None
class UsagesListResult(msrest.serialization.Model):
"""The response for the operation to get regional usages for a subscription.
:param value: The list of usages.
:type value: list[~azure.mgmt.hdinsight.models.Usage]
"""
_attribute_map = {
'value': {'key': 'value', 'type': '[Usage]'},
}
def __init__(
self,
*,
value: Optional[List["Usage"]] = None,
**kwargs
):
super(UsagesListResult, self).__init__(**kwargs)
self.value = value
class ValidationErrorInfo(msrest.serialization.Model):
"""The validation error information.
:param code: The error code.
:type code: str
:param message: The error message.
:type message: str
:param error_resource: The error resource.
:type error_resource: str
:param message_arguments: The message arguments.
:type message_arguments: list[str]
"""
_attribute_map = {
'code': {'key': 'code', 'type': 'str'},
'message': {'key': 'message', 'type': 'str'},
'error_resource': {'key': 'errorResource', 'type': 'str'},
'message_arguments': {'key': 'messageArguments', 'type': '[str]'},
}
def __init__(
self,
*,
code: Optional[str] = None,
message: Optional[str] = None,
error_resource: Optional[str] = None,
message_arguments: Optional[List[str]] = None,
**kwargs
):
super(ValidationErrorInfo, self).__init__(**kwargs)
self.code = code
self.message = message
self.error_resource = error_resource
self.message_arguments = message_arguments
class VersionsCapability(msrest.serialization.Model):
"""The version capability.
:param available: The list of version capabilities.
:type available: list[~azure.mgmt.hdinsight.models.VersionSpec]
"""
_attribute_map = {
'available': {'key': 'available', 'type': '[VersionSpec]'},
}
def __init__(
self,
*,
available: Optional[List["VersionSpec"]] = None,
**kwargs
):
super(VersionsCapability, self).__init__(**kwargs)
self.available = available
class VersionSpec(msrest.serialization.Model):
"""The version properties.
:param friendly_name: The friendly name.
:type friendly_name: str
:param display_name: The display name.
:type display_name: str
:param is_default: Whether or not the version is the default version.
:type is_default: bool
:param component_versions: The component version property.
:type component_versions: dict[str, str]
"""
_attribute_map = {
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
'display_name': {'key': 'displayName', 'type': 'str'},
'is_default': {'key': 'isDefault', 'type': 'bool'},
'component_versions': {'key': 'componentVersions', 'type': '{str}'},
}
def __init__(
self,
*,
friendly_name: Optional[str] = None,
display_name: Optional[str] = None,
is_default: Optional[bool] = None,
component_versions: Optional[Dict[str, str]] = None,
**kwargs
):
super(VersionSpec, self).__init__(**kwargs)
self.friendly_name = friendly_name
self.display_name = display_name
self.is_default = is_default
self.component_versions = component_versions
class VirtualNetworkProfile(msrest.serialization.Model):
"""The virtual network properties.
:param id: The ID of the virtual network.
:type id: str
:param subnet: The name of the subnet.
:type subnet: str
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'subnet': {'key': 'subnet', 'type': 'str'},
}
def __init__(
self,
*,
id: Optional[str] = None,
subnet: Optional[str] = None,
**kwargs
):
super(VirtualNetworkProfile, self).__init__(**kwargs)
self.id = id
self.subnet = subnet
class VmSizeCompatibilityFilter(msrest.serialization.Model):
"""The virtual machine type compatibility filter.
:param filter_mode: The mode for the filter.
:type filter_mode: str
:param regions: The list of regions.
:type regions: list[str]
:param cluster_flavors: The list of cluster types available.
:type cluster_flavors: list[str]
:param node_types: The list of node types.
:type node_types: list[str]
:param cluster_versions: The list of cluster versions.
:type cluster_versions: list[str]
:param os_type: The list of OS types.
:type os_type: list[str]
:param vm_sizes: The list of virtual machine sizes.
:type vm_sizes: list[str]
:param esp_applied: Whether apply for ESP cluster. 'true' means only for ESP, 'false' means
only for non-ESP, null or empty string or others mean for both.
:type esp_applied: str
:param compute_isolation_supported: Whether support compute isolation. 'true' means only for
ComputeIsolationEnabled, 'false' means only for regular cluster.
:type compute_isolation_supported: str
"""
_attribute_map = {
'filter_mode': {'key': 'FilterMode', 'type': 'str'},
'regions': {'key': 'Regions', 'type': '[str]'},
'cluster_flavors': {'key': 'ClusterFlavors', 'type': '[str]'},
'node_types': {'key': 'NodeTypes', 'type': '[str]'},
'cluster_versions': {'key': 'ClusterVersions', 'type': '[str]'},
'os_type': {'key': 'OsType', 'type': '[str]'},
'vm_sizes': {'key': 'VMSizes', 'type': '[str]'},
'esp_applied': {'key': 'ESPApplied', 'type': 'str'},
'compute_isolation_supported': {'key': 'ComputeIsolationSupported', 'type': 'str'},
}
def __init__(
self,
*,
filter_mode: Optional[str] = None,
regions: Optional[List[str]] = None,
cluster_flavors: Optional[List[str]] = None,
node_types: Optional[List[str]] = None,
cluster_versions: Optional[List[str]] = None,
os_type: Optional[List[str]] = None,
vm_sizes: Optional[List[str]] = None,
esp_applied: Optional[str] = None,
compute_isolation_supported: Optional[str] = None,
**kwargs
):
super(VmSizeCompatibilityFilter, self).__init__(**kwargs)
self.filter_mode = filter_mode
self.regions = regions
self.cluster_flavors = cluster_flavors
self.node_types = node_types
self.cluster_versions = cluster_versions
self.os_type = os_type
self.vm_sizes = vm_sizes
self.esp_applied = esp_applied
self.compute_isolation_supported = compute_isolation_supported
class VmSizeCompatibilityFilterV2(msrest.serialization.Model):
"""This class represent a single filter object that defines a multidimensional set. The dimensions of this set are Regions, ClusterFlavors, NodeTypes and ClusterVersions. The constraint should be defined based on the following: FilterMode (Exclude vs Include), VMSizes (the vm sizes in affect of exclusion/inclusion) and the ordering of the Filters. Later filters override previous settings if conflicted.
:param filter_mode: The filtering mode. Effectively this can enabling or disabling the VM sizes
in a particular set. Possible values include: "Exclude", "Include", "Recommend", "Default".
:type filter_mode: str or ~azure.mgmt.hdinsight.models.FilterMode
:param regions: The list of regions under the effect of the filter.
:type regions: list[str]
:param cluster_flavors: The list of cluster flavors under the effect of the filter.
:type cluster_flavors: list[str]
:param node_types: The list of node types affected by the filter.
:type node_types: list[str]
:param cluster_versions: The list of cluster versions affected in Major.Minor format.
:type cluster_versions: list[str]
:param os_type: The OSType affected, Windows or Linux.
:type os_type: list[str or ~azure.mgmt.hdinsight.models.OSType]
:param vm_sizes: The list of virtual machine sizes to include or exclude.
:type vm_sizes: list[str]
"""
_attribute_map = {
'filter_mode': {'key': 'filterMode', 'type': 'str'},
'regions': {'key': 'regions', 'type': '[str]'},
'cluster_flavors': {'key': 'clusterFlavors', 'type': '[str]'},
'node_types': {'key': 'nodeTypes', 'type': '[str]'},
'cluster_versions': {'key': 'clusterVersions', 'type': '[str]'},
'os_type': {'key': 'osType', 'type': '[str]'},
'vm_sizes': {'key': 'vmSizes', 'type': '[str]'},
}
def __init__(
self,
*,
filter_mode: Optional[Union[str, "FilterMode"]] = None,
regions: Optional[List[str]] = None,
cluster_flavors: Optional[List[str]] = None,
node_types: Optional[List[str]] = None,
cluster_versions: Optional[List[str]] = None,
os_type: Optional[List[Union[str, "OSType"]]] = None,
vm_sizes: Optional[List[str]] = None,
**kwargs
):
super(VmSizeCompatibilityFilterV2, self).__init__(**kwargs)
self.filter_mode = filter_mode
self.regions = regions
self.cluster_flavors = cluster_flavors
self.node_types = node_types
self.cluster_versions = cluster_versions
self.os_type = os_type
self.vm_sizes = vm_sizes
class VmSizeProperty(msrest.serialization.Model):
"""The vm size property.
:param name: The vm size name.
:type name: str
:param cores: The number of cores that the vm size has.
:type cores: int
:param data_disk_storage_tier: The data disk storage tier of the vm size.
:type data_disk_storage_tier: str
:param label: The label of the vm size.
:type label: str
:param max_data_disk_count: The max data disk count of the vm size.
:type max_data_disk_count: long
:param memory_in_mb: The memory whose unit is MB of the vm size.
:type memory_in_mb: long
:param supported_by_virtual_machines: This indicates this vm size is supported by virtual
machines or not.
:type supported_by_virtual_machines: bool
:param supported_by_web_worker_roles: The indicates this vm size is supported by web worker
roles or not.
:type supported_by_web_worker_roles: bool
:param virtual_machine_resource_disk_size_in_mb: The virtual machine resource disk size whose
unit is MB of the vm size.
:type virtual_machine_resource_disk_size_in_mb: long
:param web_worker_resource_disk_size_in_mb: The web worker resource disk size whose unit is MB
of the vm size.
:type web_worker_resource_disk_size_in_mb: long
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'cores': {'key': 'cores', 'type': 'int'},
'data_disk_storage_tier': {'key': 'dataDiskStorageTier', 'type': 'str'},
'label': {'key': 'label', 'type': 'str'},
'max_data_disk_count': {'key': 'maxDataDiskCount', 'type': 'long'},
'memory_in_mb': {'key': 'memoryInMb', 'type': 'long'},
'supported_by_virtual_machines': {'key': 'supportedByVirtualMachines', 'type': 'bool'},
'supported_by_web_worker_roles': {'key': 'supportedByWebWorkerRoles', 'type': 'bool'},
'virtual_machine_resource_disk_size_in_mb': {'key': 'virtualMachineResourceDiskSizeInMb', 'type': 'long'},
'web_worker_resource_disk_size_in_mb': {'key': 'webWorkerResourceDiskSizeInMb', 'type': 'long'},
}
def __init__(
self,
*,
name: Optional[str] = None,
cores: Optional[int] = None,
data_disk_storage_tier: Optional[str] = None,
label: Optional[str] = None,
max_data_disk_count: Optional[int] = None,
memory_in_mb: Optional[int] = None,
supported_by_virtual_machines: Optional[bool] = None,
supported_by_web_worker_roles: Optional[bool] = None,
virtual_machine_resource_disk_size_in_mb: Optional[int] = None,
web_worker_resource_disk_size_in_mb: Optional[int] = None,
**kwargs
):
super(VmSizeProperty, self).__init__(**kwargs)
self.name = name
self.cores = cores
self.data_disk_storage_tier = data_disk_storage_tier
self.label = label
self.max_data_disk_count = max_data_disk_count
self.memory_in_mb = memory_in_mb
self.supported_by_virtual_machines = supported_by_virtual_machines
self.supported_by_web_worker_roles = supported_by_web_worker_roles
self.virtual_machine_resource_disk_size_in_mb = virtual_machine_resource_disk_size_in_mb
self.web_worker_resource_disk_size_in_mb = web_worker_resource_disk_size_in_mb
class VmSizesCapability(msrest.serialization.Model):
"""The virtual machine sizes capability.
:param available: The list of virtual machine size capabilities.
:type available: list[str]
"""
_attribute_map = {
'available': {'key': 'available', 'type': '[str]'},
}
def __init__(
self,
*,
available: Optional[List[str]] = None,
**kwargs
):
super(VmSizesCapability, self).__init__(**kwargs)
self.available = available
| 36.654988 | 409 | 0.654396 |
Subsets and Splits