ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b401507294192a75a8b8caad0fab46c4d11f1607 | # $Filename$
# $Authors$
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
This module contains all custom widgets for the datafinder guis.
"""
import functools
from PyQt4 import QtCore, QtGui
from PyQt4.QtCore import Qt
__version__ = "$Revision-Id$"
class _Tab(object):
"""
Tab class to store tab informations.
Only used in the L{datafinder.gui.user.ouput.decorator.TabWidgetDecorator}.
"""
def __init__(self, tabText, tabToolTip, tabWhatsThis, tabIcon, widget, shown = True):
"""
Constructor.
@param tabText: Text of the tab.
@type tabText: C{string}
@param tabToolTip: ToolTip of the tab.
@type tabToolTip: C{string}
@param tabWhatsThis: Whats this text of the tab.
@type tabWhatsThis: C{string}
@param tabIcon: Icon of the tab.
@type tabIcon: C{QtGui.QIcon}
@param widget: Widget of the tab.
@type widget: C{QtGui.QWidget}
@param shown: True = The tab is visible, False = the tab is removed.
@type shown: C{bool}
"""
self.text = tabText
self.toolTip = tabToolTip
self.whatsThis = tabWhatsThis
self.icon = tabIcon
self.widget = widget
self.shown = shown
class HideableTabWidget(QtGui.QTabWidget):
"""
Decorator for the QTabWidget class to change the visibility of tab items.
"""
def __init__(self, parent=None):
"""
Constructor.
@param tabWidget: TabWidget that you want to decorate.
@type tabWidget: C{QtGui.QTabWidget}
@param parent: Parent of this L{QtCore.QObject}.
@type parent: C{QtCore.QObject}
"""
QtGui.QTabWidget.__init__(self, parent)
self.__tabs = list()
self.tabBar().setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
QtCore.QObject.connect(self.tabBar(),
QtCore.SIGNAL("customContextMenuRequested(QPoint)"),
self.showTabBarContextMenuSlot)
def fetchTabs(self, index=0):
"""
Fetch all tab informations and stores them in an internal list.
Necessary cause it is not possible to hide tabs without loss of tab informations.
Has to be called after setting up new tabs that have to get the hiding ability.
@param index: The index at which the tab was inserted.
@type index: C{int}
"""
count = self.count()
self.__tabs = self.__tabs[:index]
for i in range(index, count):
tab = _Tab(self.tabText(i), self.tabToolTip(i), self.tabWhatsThis(i), self.tabIcon(i), self.widget(i))
self.__tabs.append(tab)
def setTabShown(self, tab, shown):
"""
Show or hide a widget at the given index.
@param index: Index of the tab.
@type index: C{int}
@param shown: True = show, False = hide.
@type shown: C{bool}
"""
index = tab
#Index correction.
for i in range(tab):
if not self.__tabs[i].shown:
index -= 1
#Set the tab visible.
if shown is True:
self.insertTab(index,
self.__tabs[tab].widget,
self.__tabs[tab].icon,
self.__tabs[tab].text)
self.setTabToolTip(index, self.__tabs[tab].toolTip)
self.setTabWhatsThis(index, self.__tabs[tab].whatsThis)
self.setCurrentIndex(index)
#Hide the tab.
else:
self.removeTab(index)
#Set the tab visibility status.
self.__tabs[tab].shown = shown
#Hide the tabwidget if there is no tab anymore.
shown = self.count() > 0
#Sending signal on visibility change.
if self.isHidden() == shown:
self.emit(QtCore.SIGNAL("shownChangedSignal(bool)"), shown)
self.setShown(shown)
def showTabBarContextMenuSlot(self):
"""
Slot is called when a context menu request was emitted.
"""
menu = QtGui.QMenu(self)
for i, tab in enumerate(self.__tabs):
action = menu.addAction(tab.icon, tab.text)
action.setCheckable(True)
action.setChecked(tab.shown)
self.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.setTabShown, i))
menu.exec_(QtGui.QCursor.pos())
class DefaultTreeView(QtGui.QTreeView):
"""
Customized the given L{QtGui.QTreeView}.
"""
def __init__(self, parent=None):
"""
Constructor.
@param widget: The tree view that has to be customized.
@type widget: C{QtGui.QWidget}
"""
QtGui.QTreeView.__init__(self, parent)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectItems)
self.setEditTriggers(QtGui.QAbstractItemView.SelectedClicked |
QtGui.QAbstractItemView.EditKeyPressed)
self.header().hide()
self.header().setSortIndicator(0, QtCore.Qt.AscendingOrder)
self.setSortingEnabled(True)
self.connect(self, QtCore.SIGNAL("expanded(QModelIndex)"), self._resizeColumnsSlot)
self.connect(self, QtCore.SIGNAL("collapsed(QModelIndex)"), self._resizeColumnsSlot)
def _resizeColumnsSlot(self, index):
"""
Resize the given columns on expand or collapse.
@param index: Index with the column which have to be resized.
@type index: C{QtCore.QModelIndex}
"""
if index.isValid():
self.resizeColumnToContents(index.column())
class DefaultTableView(QtGui.QTableView):
"""
Customized the given L{QtGui.QTableView}.
"""
def __init__(self, parent=None):
"""
Constructor.
@param widget: The table view that has to be customized.
@type widget: C{QtGui.QTableView}
"""
QtGui.QTableView.__init__(self, parent)
self.__gridStyles = [(self.tr('Solid'), QtCore.Qt.SolidLine),
(self.tr('Dashed'), QtCore.Qt.DashLine),
(self.tr('Dotted'), QtCore.Qt.DotLine),
(self.tr('Dashed Dotted'), QtCore.Qt.DashDotLine)]
self.verticalHeader().hide()
self.verticalHeader().setDefaultSectionSize(22)
self.horizontalHeader().setSortIndicatorShown(True)
self.horizontalHeader().setClickable(True)
self.horizontalHeader().setStretchLastSection(True)
self.horizontalHeader().setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.horizontalHeader().setMovable(True)
self.horizontalHeader().setHighlightSections(False)
self.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
self.setGridStyle(QtCore.Qt.DotLine)
self.connect(self.horizontalHeader(),
QtCore.SIGNAL("customContextMenuRequested(QPoint)"),
self.showHeaderMenu)
self.installEventFilter(self)
def eventFilter(self, _, event):
""" Custom event filter which:
- emits a "returnPressed" event with additional currently selected index
if the Qt.Key_Return key is pressed.
- ensures that the content of the current cell is copied to the
clip board if <Ctrl>+C is pressed
"""
if event.type() == QtCore.QEvent.KeyPress:
if event.key() == Qt.Key_Return:
self.emit(QtCore.SIGNAL("returnPressed"), self.currentIndex())
elif event.type() == QtCore.QEvent.KeyRelease:
if event.matches(QtGui.QKeySequence.Copy):
QtGui.QApplication.clipboard().setText(self.currentIndex().data().toString())
return False
def showHeaderMenu(self, _):
"""
Shows the header content menu at the current cursor position.
"""
#Generates the menu for changing the visibility of the headers.
menu = QtGui.QMenu(self)
lastCheckedAction = None
numberOfCheckActions = 0
for section in range(self.model().columnCount(QtCore.QModelIndex())):
text = self.model().headerData(section, QtCore.Qt.Horizontal, QtCore.Qt.DisplayRole).toString()
action = menu.addAction(text)
action.setCheckable(True)
if self.isColumnHidden(section):
action.setChecked(False)
action.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.showColumn, section))
else:
action.setChecked(True)
action.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.hideColumn, section))
lastCheckedAction = action
numberOfCheckActions += 1
action.setEnabled(True)
if not lastCheckedAction is None and numberOfCheckActions == 1:
lastCheckedAction.setEnabled(False)
#Generates the menu for the grid style.
gridMenu = QtGui.QMenu(self.tr('Grid'), menu)
styleGroup = QtGui.QActionGroup(menu)
for name, style in self.__gridStyles:
action = gridMenu.addAction(name)
action.setCheckable(True)
action.setChecked(style == self.gridStyle())
action.setEnabled(self.showGrid())
styleGroup.addAction(action)
self.connect(action, QtCore.SIGNAL("triggered(bool)"),
functools.partial(self.setGridStyle, style))
gridMenu.addSeparator()
action = gridMenu.addAction(self.tr('Show'))
action.setCheckable(True)
action.setChecked(self.showGrid())
self.connect(action, QtCore.SIGNAL("triggered(bool)"), self.setShowGrid)
menu.addSeparator()
menu.addMenu(gridMenu)
menu.exec_(QtGui.QCursor.pos())
class DefaultListView(QtGui.QListView):
"""
Customize the given L{QtGui.QListView}.
"""
def __init__(self, parent=None):
"""
Constructor.
@param widget: The widget that has to be wrapped by this class.
@type widget: C{QtGui.QWidget}
"""
QtGui.QListView.__init__(self, parent)
self.__verticalOffset = 0
def keyPressEvent(self, keyEvent):
""" Signals that the return key is pressed and provides the specific the current model index. """
if keyEvent.key() == Qt.Key_Return:
self.emit(QtCore.SIGNAL("returnPressed"), self.selectionModel().currentIndex())
QtGui.QListView.keyPressEvent(self, keyEvent)
def setViewMode(self, mode):
"""
@see: QtGui.QListView#setViewMode
"""
size = QtCore.QSize(-1, -1)
self.__verticalOffset = 0
if mode == QtGui.QListView.IconMode:
size = QtCore.QSize(115, 80)
self.__verticalOffset = -10
self.setGridSize(size)
QtGui.QListView.setViewMode(self, mode)
def visualRect(self, index):
"""
@see: QtCore.QAbstractItemView#visualRect
"""
rect = self.rectForIndex(index)
dx = -1 * self.horizontalOffset()
dy = -1 * self.verticalOffset() - self.__verticalOffset
rect.adjust(dx, dy, dx, dy)
return rect
class ActionTooltipMenu(QtGui.QMenu):
""" Implements a menu which shows the tool tip of the active action. """
def __init__(self, parent=None):
""" Constructor. """
QtGui.QMenu.__init__(self, parent)
def event(self, event):
"""
@see: L{event<PyQt4.QtGui.QWidget.event>}
Used displaying token dependent tool tips.
"""
if event.type() == QtCore.QEvent.ToolTip:
if not self.activeAction() is None:
QtGui.QToolTip.showText(event.globalPos(), self.activeAction().toolTip())
else:
QtGui.QToolTip.hideText()
return QtGui.QMenu.event(self, event)
|
py | b40150f4c2ed10e6aa6d54b488fee109f8a9c544 | import sklearn.datasets
import sklearn.metrics
from ray.tune.schedulers import ASHAScheduler
from sklearn.model_selection import train_test_split
import xgboost as xgb
from ray import tune
from ray.tune.integration.xgboost import TuneReportCheckpointCallback
def train_breast_cancer(config):
# Load dataset
data, labels = sklearn.datasets.load_breast_cancer(return_X_y=True)
# Split into train and test set
train_x, test_x, train_y, test_y = train_test_split(
data, labels, test_size=0.25)
# Build input matrices for XGBoost
train_set = xgb.DMatrix(train_x, label=train_y)
test_set = xgb.DMatrix(test_x, label=test_y)
# Train the classifier
xgb.train(
config,
train_set,
evals=[(test_set, "eval")],
verbose_eval=False,
callbacks=[TuneReportCheckpointCallback(filename="model.xgb")])
if __name__ == "__main__":
config = {
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
"max_depth": tune.randint(1, 9),
"min_child_weight": tune.choice([1, 2, 3]),
"subsample": tune.uniform(0.5, 1.0),
"eta": tune.loguniform(1e-4, 1e-1)
}
scheduler = ASHAScheduler(
max_t=10, # 10 training iterations
grace_period=1,
reduction_factor=2)
analysis = tune.run(
train_breast_cancer,
metric="eval-logloss",
mode="min",
resources_per_trial={"cpu": 1, "gpu": 0.1}, # You can add "gpu": 0.1 here
config=config,
num_samples=100,
scheduler=scheduler)
# Load the best model checkpoint
import os
best_bst = xgb.Booster()
best_bst.load_model(os.path.join(analysis.best_checkpoint, "model.xgb"))
accuracy = 1. - analysis.best_result["eval-error"]
print(f"Best model parameters: {analysis.best_config}")
print(f"Best model total accuracy: {accuracy:.4f}")
# You could now do further predictions with
# best_bst.predict(...) |
py | b401535280f4d49232e8efb811f4903c8b8fcc3a | import os
class Config :
'''
General configuration parent class
'''
NEWS_HIGHLIGHT_API_BASE_URL = 'https://newsapi.org/v2/sources?apiKey={}'
TOP_HEADLINES_URL = 'https://newsapi.org/v2/top-headlines?sources={}&apikey={}'
EVERYTHING_URL = 'https://newsapi.org/v2/everything?q=trending&language=en&apiKey={}'
NEWS_HIGHLIGHT_API_KEY = os.environ.get('NEWS_HIGHLIGHT_API_KEY')
class ProdConfig(Config) :
'''
Production configuration child class
Args:
config : The parent configuration class with General configuration settings
'''
pass
class DevConfig(Config) :
'''
Development configuration child class
'''
DEBUG = True #enable debug mode in my app
config_options = {
'development' : DevConfig,
'production' : ProdConfig
} |
py | b401548ad79b3ef0fbcfb9d8e9e98050d6e0920c | from __future__ import absolute_import
from __future__ import print_function
from typing import Dict, List, Optional, Set
import re
from collections import defaultdict
from .template_parser import (
tokenize,
Token,
)
class HtmlBranchesException(Exception):
# TODO: Have callers pass in line numbers.
pass
class HtmlTreeBranch(object):
"""
For <p><div id='yo'>bla<span class='bar'></span></div></p>, store a
representation of the tags all the way down to the leaf, which would
conceptually be something like "p div(#yo) span(.bar)".
"""
def __init__(self, tags, fn):
# type: (List[TagInfo], Optional[str]) -> None
self.tags = tags
self.fn = fn
self.line = tags[-1].token.line
self.words = set() # type: Set[str]
for tag in tags:
for word in tag.words:
self.words.add(word)
def staircase_text(self):
# type: () -> str
"""
produces representation of a node in staircase-like format:
html
body.main-section
p#intro
"""
res = '\n'
indent = ' ' * 4
for t in self.tags:
res += indent + t.text() + '\n'
indent += ' ' * 4
return res
def text(self):
# type: () -> str
"""
produces one-line representation of branch:
html body.main-section p#intro
"""
return ' '.join(t.text() for t in self.tags)
class Node(object):
def __init__(self, token, parent): # FIXME parent parameter is not used!
# type: (Token, Optional[Node]) -> None
self.token = token
self.children = [] # type: List[Node]
self.parent = None # type: Optional[Node]
class TagInfo(object):
def __init__(self, tag, classes, ids, token):
# type: (str, List[str], List[str], Token) -> None
self.tag = tag
self.classes = classes
self.ids = ids
self.token = token
self.words = \
[self.tag] + \
['.' + s for s in classes] + \
['#' + s for s in ids]
def text(self):
# type: () -> str
s = self.tag
if self.classes:
s += '.' + '.'.join(self.classes)
if self.ids:
s += '#' + '#'.join(self.ids)
return s
def get_tag_info(token):
# type: (Token) -> TagInfo
s = token.s
tag = token.tag
classes = [] # type: List[str]
ids = [] # type: List[str]
searches = [
(classes, ' class="(.*?)"'),
(classes, " class='(.*?)'"),
(ids, ' id="(.*?)"'),
(ids, " id='(.*?)'"),
]
for lst, regex in searches:
m = re.search(regex, s)
if m:
for g in m.groups():
lst += split_for_id_and_class(g)
return TagInfo(tag=tag, classes=classes, ids=ids, token=token)
def split_for_id_and_class(element):
# type: (str) -> List[str]
# Here we split a given string which is expected to contain id or class
# attributes from HTML tags. This also takes care of template variables
# in string during splitting process. For eg. 'red black {{ a|b|c }}'
# is split as ['red', 'black', '{{ a|b|c }}']
outside_braces = True # type: bool
lst = []
s = ''
for ch in element:
if ch == '{':
outside_braces = False
if ch == '}':
outside_braces = True
if ch == ' ' and outside_braces:
if not s == '':
lst.append(s)
s = ''
else:
s += ch
if not s == '':
lst.append(s)
return lst
def html_branches(text, fn=None):
# type: (str, Optional[str]) -> List[HtmlTreeBranch]
tree = html_tag_tree(text)
branches = [] # type: List[HtmlTreeBranch]
def walk(node, tag_info_list=None):
# type: (Node, Optional[List[TagInfo]]) -> None
info = get_tag_info(node.token)
if tag_info_list is None:
tag_info_list = [info]
else:
tag_info_list = tag_info_list[:] + [info]
if node.children:
for child in node.children:
walk(node=child, tag_info_list=tag_info_list)
else:
tree_branch = HtmlTreeBranch(tags=tag_info_list, fn=fn)
branches.append(tree_branch)
for node in tree.children:
walk(node, None)
return branches
def html_tag_tree(text):
# type: (str) -> Node
tokens = tokenize(text)
top_level = Node(token=None, parent=None)
stack = [top_level]
for token in tokens:
# Add tokens to the Node tree first (conditionally).
if token.kind in ('html_start', 'html_singleton'):
parent = stack[-1]
node = Node(token=token, parent=parent)
parent.children.append(node)
# Then update the stack to have the next node that
# we will be appending to at the top.
if token.kind == 'html_start':
stack.append(node)
elif token.kind == 'html_end':
stack.pop()
return top_level
def build_id_dict(templates):
# type: (List[str]) -> (Dict[str,List[str]])
template_id_dict = defaultdict(list) # type: (Dict[str,List[str]])
for fn in templates:
text = open(fn).read()
list_tags = tokenize(text)
for tag in list_tags:
info = get_tag_info(tag)
for ids in info.ids:
template_id_dict[ids].append("Line " + str(info.token.line) + ":" + fn)
return template_id_dict
|
py | b40154ac34a1d9454cc9b4ba7587e9641871164d | from .python.ad3 import *
from .python.simple_inference import simple_grid, general_graph
|
py | b40154b8740e33335583ac5f91e8bf713958655e | # -*- coding: utf-8 -*-
"""Make prediction and compute confusion matrix for modified input data"""
"""PART B2 : Input random noise to electrode column of emg data"""
import numpy as np
import tensorflow as tf
import random
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(1234)
random.seed(12345)
#
session_conf = tf.ConfigProto(
intra_op_parallelism_threads=1,
inter_op_parallelism_threads=1
)
from keras import backend as K
#
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
sess.run(tf.global_variables_initializer())
K.set_session(sess)
##############################################################################
import sys
import matplotlib.pyplot as plt
from keras import optimizers, initializers, regularizers, constraints
from tensorflow.keras.callbacks import TensorBoard
from keras.utils import plot_model
from utils import *
from datageneratordb5_b import *
import preprocessing_db5
import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
import scipy.io
from keras.models import model_from_json
from sklearn.metrics import confusion_matrix
with open('DB5_vgg16_b2.json') as json_file:
config_data = json.load(json_file)
MODEL_WEIGHTS_SAVE_FILE = os.path.abspath(
'models_vgg16_db5') + '/'+'_DB5_vgg16'+ '_{}.h5'
MODEL_SAVE_FILE = os.path.abspath(
'models_vgg16_db5') + '/'+'_DB5_vgg16'+ '_{}.json'
PARAMS_MODEL = config_data['model']
PARAMS_DATASET = config_data['dataset']
PARAMS_TEST_GENERATOR = DEFAULT_GENERATOR_PARAMS.copy()
params_gen = PARAMS_DATASET.get('test_generator', {}).copy()
for key in params_gen.keys():
PARAMS_TEST_GENERATOR[key] = params_gen[key]
input_directory = r'C:/Users/Marina/Desktop/ninapro-db5/Ninapro-DB5_Preprocessed'
PARAMS_TEST_GENERATOR['preprocess_function'] = [preprocessing_db5.lpf]
PARAMS_TEST_GENERATOR['preprocess_function_extra'] = [{'fs':200}]
PARAMS_TEST_GENERATOR['data_type'] = 'rms'
PARAMS_TEST_GENERATOR['classes'] = [i for i in range(13)]
PARAMS_TEST_GENERATOR.pop('input_directory', '')
test_generator = DataGeneratorB(input_directory=input_directory,**PARAMS_TEST_GENERATOR)
X_test, Y_test, test_reps = test_generator.get_data()
y_test = np.argmax(Y_test, axis=1)
# load json and create model
with open(MODEL_SAVE_FILE,'r') as f:
json = f.read()
loaded_model = model_from_json(json)
loaded_model.load_weights(MODEL_WEIGHTS_SAVE_FILE)
print("Loaded model from disk")
# evaluate loaded model on test data
loaded_model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
score = loaded_model.evaluate(X_test, Y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
Y_pred = loaded_model.predict(X_test)
y_pred = np.argmax(Y_pred, axis=1)
#Display confusion matrix
print(confusion_matrix(y_test,y_pred))
plt.xlabel('Predicted')
plt.ylabel('True')
plt.imshow(confusion_matrix(y_test,y_pred))
|
py | b401550d92686cf2f02c90b077c0a532ef270cd5 | """Unit test for the bibcheck.checker module."""
import unittest
import bibcheck.checker
class LineTest(unittest.TestCase):
"""Test the Line class."""
def test_line(self):
"""Test the Line class."""
line = bibcheck.checker.Line("Lorum ipsum delor", "references.bib", 34)
self.assertEqual(line.text, "Lorum ipsum delor")
self.assertEqual(line.file_path, "references.bib")
self.assertEqual(line.line_number, 34)
class IssueTest(unittest.TestCase):
"""Test the Issue class."""
def test_issue(self):
"""Test the Issue class."""
issue = bibcheck.checker.Issue("references.bib", 34)
self.assertEqual(issue.file_path, "references.bib")
self.assertEqual(issue.line_number, 34)
self.assertTrue(issue)
|
py | b401560a76e0b2db81f0e0b1f6e47e5d53e3b7e6 | # (C) Datadog, Inc. 2019-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from datadog_checks.dev import TempDir
from datadog_checks.dev.utils import ensure_parent_dir_exists, path_join, write_file
from .utils import get_spec
pytestmark = pytest.mark.conf
def test_cache():
spec = get_spec('')
spec.data = 'test'
spec.load()
spec.load()
assert spec.data == 'test'
def test_invalid_yaml():
spec = get_spec(
"""
foo:
- bar
baz: oops
"""
)
spec.load()
assert spec.errors[0].startswith('test: Unable to parse the configuration specification')
def test_not_map():
spec = get_spec('- foo')
spec.load()
assert 'test: Configuration specifications must be a mapping object' in spec.errors
def test_no_name():
spec = get_spec(
"""
foo:
- bar
"""
)
spec.load()
assert 'test: Configuration specifications must contain a top-level `name` attribute' in spec.errors
def test_name_not_string():
spec = get_spec(
"""
name: 123
"""
)
spec.load()
assert 'test: The top-level `name` attribute must be a string' in spec.errors
def test_no_version():
spec = get_spec(
"""
name: foo
"""
)
spec.load()
assert 'test: Configuration specifications must contain a top-level `version` attribute' in spec.errors
def test_version_not_string():
spec = get_spec(
"""
name: foo
version: 123
"""
)
spec.load()
assert 'test: The top-level `version` attribute must be a string' in spec.errors
def test_version_loaded():
spec = get_spec(
"""
name: foo
""",
version='0.0.0',
)
spec.load()
assert 'test: Configuration specifications must contain a top-level `files` attribute' in spec.errors
def test_no_files():
spec = get_spec(
"""
name: foo
version: 0.0.0
"""
)
spec.load()
assert 'test: Configuration specifications must contain a top-level `files` attribute' in spec.errors
def test_files_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
foo: bar
"""
)
spec.load()
assert 'test: The top-level `files` attribute must be an array' in spec.errors
def test_file_not_map():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- 5
- baz
"""
)
spec.load()
assert 'test, file #1: File attribute must be a mapping object' in spec.errors
assert 'test, file #2: File attribute must be a mapping object' in spec.errors
def test_file_no_name():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- foo: bar
"""
)
spec.load()
assert (
'test, file #1: Every file must contain a `name` attribute representing the final destination the Agent loads'
) in spec.errors
def test_file_name_duplicate():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
- name: test.yaml
"""
)
spec.load()
assert 'test, file #2: File name `test.yaml` already used by file #1' in spec.errors
def test_example_file_name_duplicate():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
- name: bar.yaml
example_name: test.yaml.example
"""
)
spec.load()
assert 'test, file #2: Example file name `test.yaml.example` already used by file #1' in spec.errors
def test_file_name_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: 123
example_name: test.yaml.example
"""
)
spec.load()
assert 'test, file #1: Attribute `name` must be a string' in spec.errors
def test_example_file_name_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: 123
"""
)
spec.load()
assert 'test, file #1: Attribute `example_name` must be a string' in spec.errors
def test_file_name_standard_incorrect():
spec = get_spec(
"""
name: IBM Db2
version: 0.0.0
files:
- name: foo.yaml
""",
source='IBM Db2',
)
spec.load()
assert 'IBM Db2, file #1: File name `foo.yaml` should be `ibm_db2.yaml`' in spec.errors
def test_example_file_name_autodiscovery_incorrect():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: auto_conf.yaml
example_name: test.yaml.example
"""
)
spec.load()
assert 'test, file #1: Example file name `test.yaml.example` should be `auto_conf.yaml`' in spec.errors
def test_example_file_name_standard_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
"""
)
spec.load()
assert spec.data['files'][0]['example_name'] == 'conf.yaml.example'
def test_example_file_name_autodiscovery_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: auto_conf.yaml
"""
)
spec.load()
assert spec.data['files'][0]['example_name'] == 'auto_conf.yaml'
def test_no_options():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
"""
)
spec.load()
assert 'test, test.yaml: Every file must contain an `options` attribute' in spec.errors
def test_sections_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
foo: bar
"""
)
spec.load()
assert 'test, test.yaml: The `options` attribute must be an array' in spec.errors
def test_section_not_map():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- 5
- baz
"""
)
spec.load()
assert 'test, test.yaml, option #1: Option attribute must be a mapping object' in spec.errors
assert 'test, test.yaml, option #2: Option attribute must be a mapping object' in spec.errors
def test_section_no_name():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- foo: bar
"""
)
spec.load()
assert 'test, test.yaml, option #1: Every option must contain a `name` attribute' in spec.errors
def test_section_name_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: 123
"""
)
spec.load()
assert 'test, test.yaml, option #1: Attribute `name` must be a string' in spec.errors
def test_section_name_duplicate():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
- name: instances
"""
)
spec.load()
assert 'test, test.yaml, option #2: Option name `instances` already used by option #1' in spec.errors
def test_options_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
foo: bar
"""
)
spec.load()
assert 'test, test.yaml, instances: The `options` attribute must be an array' in spec.errors
def test_option_not_map():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- 5
- baz
"""
)
spec.load()
assert 'test, test.yaml, instances, option #1: Option attribute must be a mapping object' in spec.errors
assert 'test, test.yaml, instances, option #2: Option attribute must be a mapping object' in spec.errors
def test_option_no_name():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- foo: bar
"""
)
spec.load()
assert 'test, test.yaml, instances, option #1: Every option must contain a `name` attribute' in spec.errors
def test_option_name_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: 123
"""
)
spec.load()
assert 'test, test.yaml, instances, option #1: Attribute `name` must be a string' in spec.errors
def test_option_name_duplicate():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: server
- name: server
"""
)
spec.load()
assert 'test, test.yaml, instances, option #2: Option name `server` already used by option #1' in spec.errors
def test_option_no_description():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Every option must contain a `description` attribute' in spec.errors
def test_option_description_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: 123
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `description` must be a string' in spec.errors
def test_option_required_not_boolean():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
required: nope
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `required` must be true or false' in spec.errors
def test_option_required_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['required'] is False
def test_option_hidden_not_boolean():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
hidden: nope
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `hidden` must be true or false' in spec.errors
def test_option_hidden_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['hidden'] is False
def test_option_deprecation_not_mapping():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
deprecation: nope
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `deprecation` must be a mapping object' in spec.errors
def test_option_deprecation_value_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
deprecation:
test: 5
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Key `test` for attribute `deprecation` must be a string' in spec.errors
def test_option_deprecation_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['deprecation'] == {}
def test_option_deprecation_ok():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
deprecation:
test: foo
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['deprecation'] == {'test': 'foo'}
def test_option_metadata_tags_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
metadata_tags: nope
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `metadata_tags` must be an array' in spec.errors
def test_option_metadata_tags_value_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
metadata_tags:
- 5
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `metadata_tags` must only contain strings' in spec.errors
def test_option_metadata_tags_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['metadata_tags'] == []
def test_option_metadata_tags_ok():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
metadata_tags:
- test:foo
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['metadata_tags'] == ['test:foo']
def test_option_no_value_nor_options():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
"""
)
spec.load()
assert not spec.errors
def test_option_value_and_options():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
options:
value:
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: An option cannot contain both `value` and `options` attributes'
) in spec.errors
def test_option_value_not_map():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
- foo
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `value` must be a mapping object' in spec.errors
def test_option_secret_not_boolean():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
secret: nope
value:
type: string
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `secret` must be true or false' in spec.errors
def test_option_secret_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['secret'] is False
def test_value_no_type():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
foo: bar
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Every value must contain a `type` attribute' in spec.errors
def test_value_type_string_valid_basic():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
"""
)
spec.load()
assert not spec.errors
def test_value_type_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: 123
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `type` must be a string' in spec.errors
def test_value_type_string_example_default_no_depth():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['value']['example'] == '<FOO>'
def test_value_type_string_example_default_nested():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
"""
)
spec.load()
assert not spec.errors
assert 'example' not in spec.data['files'][0]['options'][0]['options'][0]['value']['items']
def test_value_type_string_example_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
example: 123
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `example` for `type` string must be a string' in spec.errors
def test_value_type_string_example_valid():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
example: bar
"""
)
spec.load()
assert not spec.errors
def test_value_type_string_pattern_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
pattern: 123
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `pattern` for `type` string must be a string' in spec.errors
def test_value_type_integer_valid_basic():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
"""
)
spec.load()
assert not spec.errors
def test_value_type_integer_example_default_no_depth():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['value']['example'] == '<FOO>'
def test_value_type_integer_example_default_nested():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: integer
"""
)
spec.load()
assert not spec.errors
assert 'example' not in spec.data['files'][0]['options'][0]['options'][0]['value']['items']
def test_value_type_integer_example_not_number():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
example: bar
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `example` for `type` integer must be a number' in spec.errors
def test_value_type_integer_example_valid():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
example: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_integer_correct_minimum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
minimum: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_integer_incorrect_minimum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
minimum: "5"
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `minimum` for `type` integer must be a number' in spec.errors
def test_value_type_integer_correct_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
maximum: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_integer_incorrect_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
maximum: "5"
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `maximum` for `type` integer must be a number' in spec.errors
def test_value_type_integer_correct_minimum_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
minimum: 4
maximum: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_integer_incorrect_minimum_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: integer
minimum: 5
maximum: 5
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Attribute `maximum` for '
'`type` integer must be greater than attribute `minimum`'
) in spec.errors
def test_value_type_number_valid_basic():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
"""
)
spec.load()
assert not spec.errors
def test_value_type_number_example_default_no_depth():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['value']['example'] == '<FOO>'
def test_value_type_number_example_default_nested():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: number
"""
)
spec.load()
assert not spec.errors
assert 'example' not in spec.data['files'][0]['options'][0]['options'][0]['value']['items']
def test_value_type_number_example_not_number():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
example: bar
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `example` for `type` number must be a number' in spec.errors
def test_value_type_number_example_valid():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
example: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_number_correct_minimum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
minimum: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_number_incorrect_minimum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
minimum: "5"
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `minimum` for `type` number must be a number' in spec.errors
def test_value_type_number_correct_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
maximum: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_number_incorrect_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
maximum: "5"
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `maximum` for `type` number must be a number' in spec.errors
def test_value_type_number_correct_minimum_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
minimum: 4
maximum: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_number_incorrect_minimum_maximum():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: number
minimum: 5
maximum: 5
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Attribute `maximum` for '
'`type` number must be greater than attribute `minimum`'
) in spec.errors
def test_value_type_boolean_example_default_no_depth():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: boolean
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Every boolean must contain a default `example` attribute' in spec.errors
def test_value_type_boolean_example_default_nested():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: boolean
"""
)
spec.load()
assert not spec.errors
assert 'example' not in spec.data['files'][0]['options'][0]['options'][0]['value']['items']
def test_value_type_boolean_example_not_boolean():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: boolean
example: "true"
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Attribute `example` for `type` boolean must be true or false'
) in spec.errors
def test_value_type_boolean_example_valid():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: boolean
example: true
"""
)
spec.load()
assert not spec.errors
def test_value_type_array_example_default_no_depth():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['options'][0]['value']['example'] == []
def test_value_type_array_example_default_nested():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: array
items:
type: string
"""
)
spec.load()
assert not spec.errors
assert 'example' not in spec.data['files'][0]['options'][0]['options'][0]['value']['items']
def test_value_type_array_example_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
example: 123
items:
type: string
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `example` for `type` array must be an array' in spec.errors
def test_value_type_array_example_valid():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
example:
- foo
- bar
items:
type: string
"""
)
spec.load()
assert not spec.errors
def test_value_type_array_no_items():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Every array must contain an `items` attribute' in spec.errors
def test_value_type_array_items_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items: 123
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `items` for `type` array must be a mapping object' in spec.errors
def test_value_type_array_unique_items_not_boolean():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
uniqueItems: yup
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Attribute `uniqueItems` for `type` array must be true or false'
) in spec.errors
def test_value_type_array_correct_min_items():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
minItems: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_array_incorrect_min_items():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
minItems: 5.5
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `minItems` for `type` array must be an integer' in spec.errors
def test_value_type_array_correct_max_items():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
maxItems: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_array_incorrect_max_items():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
maxItems: 5.5
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `maxItems` for `type` array must be an integer' in spec.errors
def test_value_type_array_correct_min_items_max_items():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: string
minItems: 4
maxItems: 5
"""
)
spec.load()
assert not spec.errors
def test_value_type_array_incorrect_min_items_max_items():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: array
items:
type: string
minItems: 5
maxItems: 5
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Attribute `maxItems` for '
'`type` array must be greater than attribute `minItems`'
) in spec.errors
def test_value_type_object_example_default_no_depth():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
"""
)
spec.load()
assert not spec.errors
assert spec.data['files'][0]['options'][0]['options'][0]['value']['example'] == {}
def test_value_type_object_example_default_nested():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: array
items:
type: object
"""
)
spec.load()
assert not spec.errors
assert 'example' not in spec.data['files'][0]['options'][0]['options'][0]['value']['items']
def test_value_type_object_example_not_map():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
example: 123
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Attribute `example` for `type` object must be a mapping object'
) in spec.errors
def test_value_type_object_example_valid():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
example:
foo: bar
items:
type: string
"""
)
spec.load()
assert not spec.errors
def test_value_type_object_required_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
required: {}
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `required` for `type` object must be an array' in spec.errors
def test_value_type_object_required_empty():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
required: []
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Remove attribute `required` for `type` object if no properties are required'
) in spec.errors
def test_value_type_object_required_not_unique():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
required:
- foo
- foo
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: All entries in attribute `required` for `type` object must be unique'
) in spec.errors
def test_value_type_object_properties_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
"""
)
spec.load()
assert not spec.errors
assert spec.data['files'][0]['options'][0]['options'][0]['value']['properties'] == []
def test_value_type_object_properties_not_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
properties: {}
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `properties` for `type` object must be an array' in spec.errors
def test_value_type_object_properties_entry_not_map():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
properties:
- foo
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Every entry in `properties` for `type` object must be a mapping object'
) in spec.errors
def test_value_type_object_properties_entry_no_name():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
properties:
- type: string
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: Every entry in `properties` for `type` object must contain a `name` attribute'
) in spec.errors
def test_value_type_object_properties_entry_name_not_string():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
properties:
- name: 123
type: string
"""
)
spec.load()
assert 'test, test.yaml, instances, foo: Attribute `name` for `type` object must be a string' in spec.errors
def test_value_type_object_properties_valid():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
properties:
- name: bar
type: string
"""
)
spec.load()
assert not spec.errors
def test_value_type_object_properties_entry_name_not_unique():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
properties:
- name: bar
type: string
- name: bar
type: string
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: All entries in attribute '
'`properties` for `type` object must have unique names'
) in spec.errors
def test_value_type_object_properties_required_not_met():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: object
properties:
- name: bar
type: string
required:
- foo
- bar
"""
)
spec.load()
assert (
'test, test.yaml, instances, foo: All entries in attribute `required` '
'for `type` object must be defined in the`properties` attribute'
) in spec.errors
def test_value_type_unknown():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: custom
"""
)
spec.load()
assert (
"test, test.yaml, instances, foo: Unknown type `custom`, "
"valid types are array | boolean | integer | number | object | string" in spec.errors
)
def test_option_no_section():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: ad_identifiers
description: words
value:
type: array
items:
type: string
"""
)
spec.load()
assert not spec.errors
def test_multiple_default():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
options:
- name: bar
description: words
value:
type: string
"""
)
spec.load()
assert spec.data['files'][0]['options'][0]['multiple'] is False
def test_multiple_not_boolean():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: foo
description: words
multiple: nope
options:
- name: bar
description: words
value:
type: string
"""
)
spec.load()
assert 'test, test.yaml, foo: Attribute `multiple` must be true or false' in spec.errors
def test_template_unknown():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
- template: unknown
- name: bar
description: words
value:
type: string
"""
)
spec.load()
assert 'test, test.yaml, instances, option #2: Template `unknown` does not exist' in spec.errors
def test_template_mapping():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
- template: instances/tags
- name: bar
description: words
value:
type: string
"""
)
spec.load()
assert not spec.errors
options = spec.data['files'][0]['options'][0]['options']
assert options[0]['name'] == 'foo'
assert options[1] == {
'name': 'tags',
'value': {'example': ['<KEY_1>:<VALUE_1>', '<KEY_2>:<VALUE_2>'], 'type': 'array', 'items': {'type': 'string'}},
'description': (
'A list of tags to attach to every metric and service check emitted by this instance.\n'
'\n'
'Learn more about tagging at https://docs.datadoghq.com/tagging\n'
),
# Defaults should be post-populated
'required': False,
'hidden': False,
'deprecation': {},
'metadata_tags': [],
'secret': False,
}
assert options[2]['name'] == 'bar'
def test_template_array():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
- template: instances/http
- name: bar
description: words
value:
type: string
"""
)
spec.load()
assert not spec.errors
options = spec.data['files'][0]['options'][0]['options']
option_names = [option['name'] for option in options]
assert option_names == [
'foo',
'proxy',
'skip_proxy',
'auth_type',
'username',
'password',
'ntlm_domain',
'kerberos_auth',
'kerberos_delegate',
'kerberos_force_initiate',
'kerberos_hostname',
'kerberos_principal',
'kerberos_keytab',
'aws_region',
'aws_host',
'aws_service',
'tls_verify',
'tls_ignore_warning',
'tls_cert',
'tls_private_key',
'tls_ca_cert',
'headers',
'extra_headers',
'timeout',
'connect_timeout',
'read_timeout',
'log_requests',
'persist_connections',
'bar',
]
def test_template_array_empty():
with TempDir() as d:
template_file = path_join(d, 'empty.yaml')
ensure_parent_dir_exists(template_file)
write_file(template_file, '[]')
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
- template: empty
- name: bar
description: words
value:
type: string
""",
template_paths=[d],
)
spec.load()
assert 'test, test.yaml, instances, option #2: Template refers to an empty array' in spec.errors
def test_template_array_primitive():
with TempDir() as d:
template_file = path_join(d, 'primitive.yaml')
ensure_parent_dir_exists(template_file)
write_file(template_file, '- foo')
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
- template: primitive
- name: bar
description: words
value:
type: string
""",
template_paths=[d],
)
spec.load()
assert 'test, test.yaml, instances, option #2: Template option must be a mapping object' in spec.errors
def test_template_primitive():
spec = get_spec(
"""
name: foo
version: 0.0.0
files:
- name: test.yaml
example_name: test.yaml.example
options:
- name: instances
description: words
options:
- name: foo
description: words
value:
type: string
- template: instances/http.proxy.description
- name: bar
description: words
value:
type: string
"""
)
spec.load()
assert 'test, test.yaml, instances, option #2: Template does not refer to a mapping object nor array' in spec.errors
|
py | b4015669b53ab9fda44fefae56fea40cc2b22425 | import pytest
import uuid
from django.core.exceptions import ValidationError
from django.test.utils import override_settings
from unittest import mock
from olympia.amo.tests import TestCase, addon_factory
from olympia.constants.scanners import (
ABORTED,
ABORTING,
COMPLETED,
CUSTOMS,
FALSE_POSITIVE,
MAD,
NEW,
RUNNING,
SCANNERS,
SCHEDULED,
UNKNOWN,
WAT,
YARA,
)
from olympia.files.models import FileUpload
from olympia.scanners.models import (
ImproperScannerQueryRuleStateError, ScannerQueryResult, ScannerQueryRule,
ScannerResult, ScannerRule
)
class FakeYaraMatch(object):
def __init__(self, rule, tags, meta):
self.rule = rule
self.tags = tags
self.meta = meta
class TestScannerResultMixin:
__test__ = False
def create_customs_result(self):
return self.model.objects.create(scanner=CUSTOMS)
def create_wat_result(self):
return self.model.objects.create(scanner=WAT)
def create_mad_result(self):
return self.model.objects.create(scanner=MAD)
def create_fake_yara_match(
self, rule='some-yara-rule', tags=None, description='some description',
filename='some/file.js'
):
return FakeYaraMatch(
rule=rule,
tags=tags or [],
meta={
'description': description,
'filename': filename,
}
)
def create_yara_result(self):
return self.model.objects.create(scanner=YARA)
def test_add_yara_result(self):
result = self.create_yara_result()
match = self.create_fake_yara_match()
result.add_yara_result(
rule=match.rule, tags=match.tags, meta=match.meta
)
assert result.results == [
{'rule': match.rule, 'tags': match.tags, 'meta': match.meta}
]
def test_save_set_has_matches(self):
result = self.create_yara_result()
rule = self.rule_model.objects.create(
name='some rule name', scanner=result.scanner
)
result.has_matches = None
result.save()
assert result.has_matches is False
result.has_matches = None
result.results = [{'rule': rule.name}] # Fake match
result.save()
assert result.has_matches is True
def test_save_ignores_disabled_rules(self):
result = self.create_yara_result()
rule = self.rule_model.objects.create(
name='some rule name', scanner=result.scanner, is_active=False
)
result.has_matches = None
result.results = [{'rule': rule.name}] # Fake match
result.save()
assert result.has_matches is False
def test_extract_rule_names_with_no_yara_results(self):
result = self.create_yara_result()
assert result.extract_rule_names() == []
def test_extract_rule_names_with_yara_results(self):
result = self.create_yara_result()
rule1 = 'rule-1'
rule2 = 'rule-2'
for rule in [rule1, rule2]:
match = self.create_fake_yara_match(rule=rule)
result.add_yara_result(
rule=match.rule, tags=match.tags, meta=match.meta
)
assert result.extract_rule_names() == [rule1, rule2]
def test_extract_rule_names_returns_unique_list(self):
result = self.create_yara_result()
rule1 = 'rule-1'
rule2 = 'rule-2'
for rule in [rule1, rule2, rule1, rule2]:
match = self.create_fake_yara_match(rule=rule)
result.add_yara_result(
rule=match.rule, tags=match.tags, meta=match.meta
)
assert result.extract_rule_names() == [rule1, rule2]
def test_extract_rule_names_returns_empty_list_for_unsupported_scanner(
self
):
result = self.create_wat_result()
assert result.extract_rule_names() == []
def test_extract_rule_names_with_no_customs_matched_rules_attribute(self):
result = self.create_customs_result()
result.results = {}
assert result.extract_rule_names() == []
def test_extract_rule_names_with_no_customs_results(self):
result = self.create_customs_result()
result.results = {'matchedRules': []}
assert result.extract_rule_names() == []
def test_extract_rule_names_with_customs_results(self):
result = self.create_customs_result()
rules = ['rule-1', 'rule-2']
result.results = {'matchedRules': rules}
assert result.extract_rule_names() == rules
def test_get_scanner_name(self):
result = self.create_customs_result()
assert result.get_scanner_name() == 'customs'
def test_get_pretty_results(self):
result = self.create_customs_result()
result.results = {'foo': 'bar'}
assert result.get_pretty_results() == '{\n "foo": "bar"\n}'
def test_get_customs_git_repository(self):
result = self.create_customs_result()
git_repo = 'some git repo'
with override_settings(CUSTOMS_GIT_REPOSITORY=git_repo):
assert result.get_git_repository() == git_repo
def test_get_yara_git_repository(self):
result = self.create_yara_result()
git_repo = 'some git repo'
with override_settings(YARA_GIT_REPOSITORY=git_repo):
assert result.get_git_repository() == git_repo
def test_get_git_repository_returns_none_if_not_supported(self):
result = self.create_wat_result()
assert result.get_git_repository() is None
def test_can_report_feedback(self):
result = self.create_customs_result()
assert result.can_report_feedback()
def test_can_report_feedback_is_false_when_state_is_not_unknown(self):
result = self.create_customs_result()
result.state = FALSE_POSITIVE
assert not result.can_report_feedback()
def test_can_report_feedback_is_false_when_scanner_is_wat(self):
result = self.create_wat_result()
assert not result.can_report_feedback()
def test_can_report_feedback_is_false_when_scanner_is_mad(self):
result = self.create_mad_result()
assert not result.can_report_feedback()
def test_can_revert_feedback_for_triaged_result(self):
result = self.create_yara_result()
result.state = FALSE_POSITIVE
assert result.can_revert_feedback()
def test_cannot_revert_feedback_for_untriaged_result(self):
result = self.create_yara_result()
assert result.state == UNKNOWN
assert not result.can_revert_feedback()
def test_get_files_by_matched_rules_for_wat(self):
result = self.create_wat_result()
assert result.get_files_by_matched_rules() == {}
def test_get_files_by_matched_rules_with_no_yara_results(self):
result = self.create_yara_result()
assert result.get_files_by_matched_rules() == {}
def test_get_files_by_matched_rules_for_yara(self):
result = self.create_yara_result()
rule1 = 'rule-1'
file1 = 'file/1.js'
match1 = self.create_fake_yara_match(rule=rule1, filename=file1)
result.add_yara_result(
rule=match1.rule, tags=match1.tags, meta=match1.meta
)
rule2 = 'rule-2'
file2 = 'file/2.js'
match2 = self.create_fake_yara_match(rule=rule2, filename=file2)
result.add_yara_result(
rule=match2.rule, tags=match2.tags, meta=match2.meta
)
# rule1 with file2
match3 = self.create_fake_yara_match(rule=rule1, filename=file2)
result.add_yara_result(
rule=match3.rule, tags=match3.tags, meta=match3.meta
)
assert result.get_files_by_matched_rules() == {
rule1: [file1, file2],
rule2: [file2],
}
def test_get_files_by_matched_rules_no_file_somehow(self):
result = self.create_yara_result()
rule = self.rule_model.objects.create(name='foobar', scanner=YARA)
result.add_yara_result(rule=rule.name)
result.save()
assert result.get_files_by_matched_rules() == {
'foobar': ['???'],
}
def test_get_files_by_matched_rules_with_no_customs_results(self):
result = self.create_customs_result()
result.results = {'matchedRules': []}
assert result.get_files_by_matched_rules() == {}
def test_get_files_by_matched_rules_for_customs(self):
result = self.create_customs_result()
file1 = 'file/1.js'
rule1 = 'rule1'
file2 = 'file/2.js'
rule2 = 'rule2'
file3 = 'file/3.js'
rule3 = 'rule3'
file4 = 'file/4.js'
result.results = {
'scanMap': {
file1: {
rule1: {
'RULE_HAS_MATCHED': True,
},
rule2: {},
# no rule3
},
file2: {
rule1: {
'RULE_HAS_MATCHED': False,
},
rule2: {},
# no rule3
},
file3: {
rule1: {},
rule2: {},
rule3: {
'RULE_HAS_MATCHED': True,
},
},
file4: {
# no rule1 or rule2
rule3: {
'RULE_HAS_MATCHED': True,
},
},
}
}
assert result.get_files_by_matched_rules() == {
rule1: [file1],
rule3: [file3, file4],
}
class TestScannerResult(TestScannerResultMixin, TestCase):
__test__ = True
model = ScannerResult
rule_model = ScannerRule
def create_file_upload(self):
addon = addon_factory()
return FileUpload.objects.create(addon=addon)
def create_customs_result(self):
upload = self.create_file_upload()
return self.model.objects.create(upload=upload, scanner=CUSTOMS)
def create_wat_result(self):
upload = self.create_file_upload()
return self.model.objects.create(upload=upload, scanner=WAT)
def create_yara_result(self):
upload = self.create_file_upload()
return self.model.objects.create(upload=upload, scanner=YARA)
def test_create(self):
upload = self.create_file_upload()
result = self.model.objects.create(upload=upload, scanner=CUSTOMS)
assert result.id is not None
assert result.upload == upload
assert result.scanner == CUSTOMS
assert result.results == []
assert result.version is None
assert result.has_matches is False
def test_create_different_entries_for_a_single_upload(self):
upload = self.create_file_upload()
customs_result = self.model.objects.create(
upload=upload, scanner=CUSTOMS
)
wat_result = self.model.objects.create(upload=upload, scanner=WAT)
assert customs_result.scanner == CUSTOMS
assert wat_result.scanner == WAT
def test_upload_constraint(self):
upload = self.create_file_upload()
result = self.model.objects.create(upload=upload, scanner=CUSTOMS)
upload.delete()
result.refresh_from_db()
assert result.upload is None
class TestScannerQueryResult(TestScannerResultMixin, TestCase):
__test__ = True
model = ScannerQueryResult
rule_model = ScannerQueryRule
class TestScannerRuleMixin:
__test__ = False
def test_clean_raises_for_yara_rule_without_a_definition(self):
rule = self.model(name='some_rule', scanner=YARA)
with pytest.raises(ValidationError, match=r'should have a definition'):
rule.clean()
def test_clean_raises_for_yara_rule_without_same_rule_name(self):
rule = self.model(
name='some_rule', scanner=YARA, definition='rule x {}'
)
with pytest.raises(ValidationError, match=r'should match the name of'):
rule.clean()
def test_clean_raises_when_yara_rule_has_two_rules(self):
rule = self.model(
name='some_rule',
scanner=YARA,
definition='rule some_rule {} rule foo {}',
)
with pytest.raises(ValidationError, match=r'Only one Yara rule'):
rule.clean()
def test_clean_raises_when_yara_rule_is_invalid(self):
rule = self.model(
name='some_rule',
scanner=YARA,
# Invalid because there is no `condition`.
definition='rule some_rule {}',
)
with pytest.raises(
ValidationError, match=r'The definition is not valid: line 1'
):
rule.clean()
def test_clean_supports_our_external_variables(self):
externals = self.model.get_yara_externals()
assert externals
conditions = ' and '.join(externals)
rule = self.model(
name='some_rule',
scanner=YARA,
definition='rule some_rule { condition: %s}' % conditions,
)
rule.clean() # Shouldn't raise, the externals are automatically added.
@mock.patch('yara.compile')
def test_clean_raises_generic_error_when_yara_compile_failed(
self, yara_compile_mock
):
rule = self.model(
name='some_rule',
scanner=YARA,
definition='rule some_rule { condition: true }'
)
yara_compile_mock.side_effect = Exception()
with pytest.raises(ValidationError, match=r'An error occurred'):
rule.clean()
class TestScannerRule(TestScannerRuleMixin, TestCase):
__test__ = True
model = ScannerRule
def test_scanner_choices(self):
field = self.model._meta.get_field('scanner')
assert field.choices == SCANNERS.items()
class TestScannerQueryRule(TestScannerRuleMixin, TestCase):
__test__ = True
model = ScannerQueryRule
def test_scanner_choices(self):
# Code search only supports yara for now.
field = self.model._meta.get_field('scanner')
assert field.choices == ((YARA, 'yara'),)
assert field.default == YARA
@mock.patch('olympia.amo.celery.app.GroupResult.restore')
def test_completed_task_count(self, restore_mock):
restore_mock.return_value.completed_count.return_value = 42
rule = ScannerQueryRule(
state=RUNNING, celery_group_result_id=str(uuid.uuid4()))
assert rule._get_completed_tasks_count() == 42
restore_mock.return_value = None
assert rule._get_completed_tasks_count() is None
def test_completed_task_count_no_group_id(self):
rule = ScannerQueryRule(state=RUNNING, celery_group_result_id=None)
assert rule._get_completed_tasks_count() is None
@mock.patch.object(ScannerQueryRule, '_get_completed_tasks_count')
def test_completion_rate(self, _get_completed_tasks_count_mock):
rule = ScannerQueryRule(state=RUNNING, task_count=10000)
_get_completed_tasks_count_mock.return_value = None
assert rule.completion_rate() is None
_get_completed_tasks_count_mock.return_value = 0
assert rule.completion_rate() == '0.00%'
_get_completed_tasks_count_mock.return_value = 1000
assert rule.completion_rate() == '10.00%'
_get_completed_tasks_count_mock.return_value = 3333
assert rule.completion_rate() == '33.33%'
_get_completed_tasks_count_mock.return_value = 10000
assert rule.completion_rate() == '100.00%'
rule.task_count = 0
assert rule.completion_rate() is None
def test_completion_rate_not_running(self):
rule = ScannerQueryRule(state=NEW, task_count=10000)
assert rule.completion_rate() is None
rule.state = SCHEDULED
assert rule.completion_rate() is None
rule.state = ABORTING
assert rule.completion_rate() is None
rule.state = ABORTED
assert rule.completion_rate() is None
@pytest.mark.django_db
@pytest.mark.parametrize('current_state,target_state', [
(NEW, SCHEDULED),
(SCHEDULED, RUNNING),
(NEW, ABORTING), # Technically not exposed through the admin yet.
(SCHEDULED, ABORTING), # Technically not exposed through the admin yet.
(RUNNING, ABORTING),
(ABORTING, ABORTED),
(RUNNING, COMPLETED),
])
def test_query_rule_change_state_to_valid(current_state, target_state):
rule = ScannerQueryRule(name='some_rule', scanner=YARA)
rule.state = current_state
rule.change_state_to(target_state)
@pytest.mark.django_db
@pytest.mark.parametrize('current_state,target_state', [
(NEW, RUNNING), # Should go through SCHEDULED first to work.
(NEW, ABORTED), # Should go through ABORTING first to work.
(NEW, COMPLETED), # Should go through RUNNING first to work.
(SCHEDULED, NEW), # Can't reset to NEW.
(SCHEDULED, ABORTED), # Should go through ABORTING first to work.
(SCHEDULED, COMPLETED), # Should go through RUNNING first to work.
(RUNNING, NEW), # Can't reset to NEW.
(RUNNING, ABORTED), # Should go through ABORTING first to work.
(RUNNING, SCHEDULED), # Can't reset to SCHEDULED
(ABORTING, NEW), # Can't reset to NEW.
(ABORTING, RUNNING), # Can't reset to RUNNING
(ABORTING, SCHEDULED), # Can't reset to SCHEDULED
(ABORTED, NEW), # Can't reset to NEW.
(ABORTED, RUNNING), # Can't reset to RUNNING.
(ABORTED, SCHEDULED), # Can't reset to SCHEDULED
(COMPLETED, NEW), # Can't reset to... anything, it's completed!
(COMPLETED, RUNNING), # As above.
(COMPLETED, ABORTED), # As above.
(COMPLETED, ABORTING), # As above.
(COMPLETED, SCHEDULED), # As above.
])
def test_query_rule_change_state_to_invalid(current_state, target_state):
rule = ScannerQueryRule(name='some_rule', scanner=YARA)
rule.state = current_state
with pytest.raises(ImproperScannerQueryRuleStateError):
rule.change_state_to(target_state)
|
py | b4015782d911a16350a41f2a47d28be3711fdffe | from spike import ColorSensor
from uartremote import *
u=UartRemote(port.A)
c=ColorSensor("B")
u.send_receive('neoinit',8)
def getcolor():
r=c.get_red()>>3
g=c.get_green()>>3
b=c.get_blue()>>3
return [r,g,b]
while True:
q=u.send_receive('neosa','B',[0,8]+getcolor()*8)
q=u.send_receive('neow')
|
py | b40157976b3d2dfa62add8eba2a1acdb5772b001 | """
TODO
- add description about the file.
"""
import pygame
from Board import Board
class Screen(Board):
"""
set caption also stores variables for initialize window (wth and hgt)
:return: none
"""
def __init__(self):
super().__init__()
# create instantiation of board
self.board = Board()
# set caption
pygame.display.set_caption("Chess")
# set icon
icon = pygame.image.load('chess_png/icon.png')
pygame.display.set_icon(icon)
# initial screen
self._width = self.width_chess_board
self._height = self.height_chess_board
self._screen = pygame.display.set_mode((self._width, self._height))
@staticmethod
def on():
"""
initialize pygame
:return: none
"""
pygame.init()
def update(self):
"""
screen and sprite_group update
:return: none
"""
self.board.update_sprites(self._screen)
self.board.update_sprites(self._screen)
pygame.display.flip()
@staticmethod
def quit():
"""
screen quit
:return: none
"""
pygame.display.quit()
|
py | b40157bfe2080eb0b2301254e1157fa0adf52d9b | from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.contrib.auth.models import (Group, User,
SiteProfileNotAvailable, UserManager)
@override_settings(USE_TZ=False, AUTH_PROFILE_MODULE='')
class ProfileTestCase(TestCase):
def test_site_profile_not_available(self):
user = User.objects.create(username='testclient')
# calling get_profile without AUTH_PROFILE_MODULE set
del settings.AUTH_PROFILE_MODULE
with self.assertRaisesRegexp(SiteProfileNotAvailable,
"You need to set AUTH_PROFILE_MODULE in your project"):
user.get_profile()
# Bad syntax in AUTH_PROFILE_MODULE:
settings.AUTH_PROFILE_MODULE = 'foobar'
with self.assertRaisesRegexp(SiteProfileNotAvailable,
"app_label and model_name should be separated by a dot"):
user.get_profile()
# module that doesn't exist
settings.AUTH_PROFILE_MODULE = 'foo.bar'
with self.assertRaisesRegexp(SiteProfileNotAvailable,
"Unable to load the profile model"):
user.get_profile()
@override_settings(USE_TZ=False)
class NaturalKeysTestCase(TestCase):
fixtures = ['authtestdata.json']
def test_user_natural_key(self):
staff_user = User.objects.get(username='staff')
self.assertEqual(User.objects.get_by_natural_key('staff'), staff_user)
self.assertEqual(staff_user.natural_key(), ('staff',))
def test_group_natural_key(self):
users_group = Group.objects.create(name='users')
self.assertEqual(Group.objects.get_by_natural_key('users'), users_group)
@override_settings(USE_TZ=False)
class LoadDataWithoutNaturalKeysTestCase(TestCase):
fixtures = ['regular.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
@override_settings(USE_TZ=False)
class LoadDataWithNaturalKeysTestCase(TestCase):
fixtures = ['natural.json']
def test_user_is_created_and_added_to_group(self):
user = User.objects.get(username='my_username')
group = Group.objects.get(name='my_group')
self.assertEqual(group, user.groups.get())
class UserManagerTestCase(TestCase):
def test_create_user(self):
email_lowercase = '[email protected]'
user = User.objects.create_user('user', email_lowercase)
self.assertEqual(user.email, email_lowercase)
self.assertEqual(user.username, 'user')
self.assertEqual(user.password, '!')
def test_create_user_email_domain_normalize_rfc3696(self):
# According to http://tools.ietf.org/html/rfc3696#section-3
# the "@" symbol can be part of the local part of an email address
returned = UserManager.normalize_email(r'Abc\@[email protected]')
self.assertEqual(returned, r'Abc\@[email protected]')
def test_create_user_email_domain_normalize(self):
returned = UserManager.normalize_email('[email protected]')
self.assertEqual(returned, '[email protected]')
def test_create_user_email_domain_normalize_with_whitespace(self):
returned = UserManager.normalize_email('email\ [email protected]')
self.assertEqual(returned, 'email\ [email protected]')
def test_empty_username(self):
self.assertRaisesMessage(ValueError,
'The given username must be set',
User.objects.create_user, username='')
|
py | b40158dd2dee1ab35da9d3c2179d1dec5ba16cda | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import ew as ew_core
import ew.jinja2_ew as ew
from allura.lib import validators as V
from .form_fields import AutoResizeTextarea
from .forms import ForgeForm
class OAuthApplicationForm(ForgeForm):
submit_text = 'Register new application'
style = 'wide'
class fields(ew_core.NameList):
application_name = ew.TextField(label='Application Name',
validator=V.UniqueOAuthApplicationName())
application_description = AutoResizeTextarea(
label='Application Description')
class OAuthRevocationForm(ForgeForm):
submit_text = 'Revoke Access'
fields = []
class fields(ew_core.NameList):
_id = ew.HiddenField()
|
py | b40158e20535e82350a4c1b5c39a19e2f85acf51 | # qubit number=4
# total number=35
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += X(3) # number=1
prog += H(0) # number=18
prog += X(1) # number=28
prog += CZ(3,0) # number=19
prog += H(2) # number=24
prog += H(0) # number=20
prog += RX(-1.8378317023500288,1) # number=25
prog += Z(3) # number=14
prog += CNOT(3,0) # number=15
prog += H(1) # number=2
prog += H(3) # number=16
prog += H(2) # number=3
prog += H(3) # number=4
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += H(3) # number=29
prog += CZ(0,3) # number=30
prog += H(3) # number=31
prog += X(3) # number=22
prog += H(3) # number=32
prog += CZ(0,3) # number=33
prog += H(3) # number=34
prog += Z(1) # number=26
prog += X(2) # number=11
prog += X(2) # number=12
prog += Z(1) # number=27
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2677.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
py | b40159d7262b459a41514a884adf15963bcd3476 | import json
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework import generics, mixins, permissions
from rest_framework.authentication import SessionAuthentication
from status.models import StatusModel
from status.api.serializers import StatusSerializer
from accounts.api.permission import IsOwnerOrReadOnly
from django.shortcuts import get_object_or_404
from .utils import is_json
"""
Class Based views for Create and List + Update Delete and Retrieve.
"""
class StatusApiView(mixins.CreateModelMixin, generics.ListAPIView):
permission_classes = [permissions.IsAuthenticated, IsOwnerOrReadOnly]
# authentication_classes = [SessionAuthentication]
serializer_class = StatusSerializer
def get_queryset(self):
queryset = StatusModel.objects.all()
query = self.request.GET.get("q")
# print(self.request.user)
if query is not None:
queryset = queryset.filter(content__icontains=query)
return queryset
def post(self, request, *args, **kwargs):
return self.create(request, *args, **kwargs)
def perform_create(self, serializer):
serializer.save(user=self.request.user)
class StatusApiDetailView(mixins.DestroyModelMixin, mixins.UpdateModelMixin, generics.RetrieveAPIView):
# authentication_classes = []
permission_classes = [permissions.IsAuthenticated]
serializer_class = StatusSerializer
queryset = StatusModel.objects.all()
def put(self, request, *args, **kwargs):
return self.update(request, *args, **kwargs)
def detail(self, request, *args, **kwargs):
return self.retrieve(request, *args, **kwargs)
def delete(self, request, *args, **kwargs):
return self.destroy(request, *args, **kwargs)
|
py | b40159e65f3bedbe36d29b2d66a67bc3dc6d2e4d | class Solution(object):
def subarraySum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
presum = {0: 1}
s = res = 0
for num in nums:
s += num
res += presum.get(s - k, 0)
presum[s] = presum.get(s, 0) + 1
return res
|
py | b4015a7a26af1956519d535e21fa7946971ec836 | # Generated by Django 3.2.8 on 2022-02-11 14:49
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('netbox_ddns', '0007_zone_meta'),
]
operations = [
migrations.AddField(
model_name='server',
name='server_port',
field=models.PositiveIntegerField(default=53, validators=[django.core.validators.MinValueValidator(53), django.core.validators.MaxValueValidator(65535)]),
),
]
|
py | b4015b0353f6d83c70c6385377bcad6bdbc37abb | # -*- coding: utf-8 -*-
# gomory_hu.py - function for computing Gomory Hu trees
#
# Copyright 2017-2019 NetworkX developers.
#
# This file is part of NetworkX.
#
# NetworkX is distributed under a BSD license; see LICENSE.txt for more
# information.
#
# Author: Jordi Torrents <[email protected]>
"""
Gomory-Hu tree of undirected Graphs.
"""
import networkx as nx
from networkx.utils import not_implemented_for
from .edmondskarp import edmonds_karp
from .utils import build_residual_network
default_flow_func = edmonds_karp
__all__ = ['gomory_hu_tree']
@not_implemented_for('directed')
def gomory_hu_tree(G, capacity='capacity', flow_func=None):
r"""Returns the Gomory-Hu tree of an undirected graph G.
A Gomory-Hu tree of an undirected graph with capacities is a
weighted tree that represents the minimum s-t cuts for all s-t
pairs in the graph.
It only requires `n-1` minimum cut computations instead of the
obvious `n(n-1)/2`. The tree represents all s-t cuts as the
minimum cut value among any pair of nodes is the minimum edge
weight in the shortest path between the two nodes in the
Gomory-Hu tree.
The Gomory-Hu tree also has the property that removing the
edge with the minimum weight in the shortest path between
any two nodes leaves two connected components that form
a partition of the nodes in G that defines the minimum s-t
cut.
See Examples section below for details.
Parameters
----------
G : NetworkX graph
Undirected graph
capacity : string
Edges of the graph G are expected to have an attribute capacity
that indicates how much flow the edge can support. If this
attribute is not present, the edge is considered to have
infinite capacity. Default value: 'capacity'.
flow_func : function
Function to perform the underlying flow computations. Default value
:func:`edmonds_karp`. This function performs better in sparse graphs
with right tailed degree distributions.
:func:`shortest_augmenting_path` will perform better in denser
graphs.
Returns
-------
Tree : NetworkX graph
A NetworkX graph representing the Gomory-Hu tree of the input graph.
Raises
------
NetworkXNotImplemented : Exception
Raised if the input graph is directed.
NetworkXError: Exception
Raised if the input graph is an empty Graph.
Examples
--------
>>> G = nx.karate_club_graph()
>>> nx.set_edge_attributes(G, 1, 'capacity')
>>> T = nx.gomory_hu_tree(G)
>>> # The value of the minimum cut between any pair
... # of nodes in G is the minimum edge weight in the
... # shortest path between the two nodes in the
... # Gomory-Hu tree.
... def minimum_edge_weight_in_shortest_path(T, u, v):
... path = nx.shortest_path(T, u, v, weight='weight')
... return min((T[u][v]['weight'], (u,v)) for (u, v) in zip(path, path[1:]))
>>> u, v = 0, 33
>>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
>>> cut_value
10
>>> nx.minimum_cut_value(G, u, v)
10
>>> # The Comory-Hu tree also has the property that removing the
... # edge with the minimum weight in the shortest path between
... # any two nodes leaves two connected components that form
... # a partition of the nodes in G that defines the minimum s-t
... # cut.
... cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
>>> T.remove_edge(*edge)
>>> U, V = list(nx.connected_components(T))
>>> # Thus U and V form a partition that defines a minimum cut
... # between u and v in G. You can compute the edge cut set,
... # that is, the set of edges that if removed from G will
... # disconnect u from v in G, with this information:
... cutset = set()
>>> for x, nbrs in ((n, G[n]) for n in U):
... cutset.update((x, y) for y in nbrs if y in V)
>>> # Because we have set the capacities of all edges to 1
... # the cutset contains ten edges
... len(cutset)
10
>>> # You can use any maximum flow algorithm for the underlying
... # flow computations using the argument flow_func
... from networkx.algorithms import flow
>>> T = nx.gomory_hu_tree(G, flow_func=flow.boykov_kolmogorov)
>>> cut_value, edge = minimum_edge_weight_in_shortest_path(T, u, v)
>>> cut_value
10
>>> nx.minimum_cut_value(G, u, v, flow_func=flow.boykov_kolmogorov)
10
Notes
-----
This implementation is based on Gusfield approach [1]_ to compute
Comory-Hu trees, which does not require node contractions and has
the same computational complexity than the original method.
See also
--------
:func:`minimum_cut`
:func:`maximum_flow`
References
----------
.. [1] Gusfield D: Very simple methods for all pairs network flow analysis.
SIAM J Comput 19(1):143-155, 1990.
"""
if flow_func is None:
flow_func = default_flow_func
if len(G) == 0: # empty graph
msg = 'Empty Graph does not have a Gomory-Hu tree representation'
raise nx.NetworkXError(msg)
# Start the tree as a star graph with an arbitrary node at the center
tree = {}
labels = {}
iter_nodes = iter(G)
root = next(iter_nodes)
for n in iter_nodes:
tree[n] = root
# Reuse residual network
R = build_residual_network(G, capacity)
# For all the leaves in the star graph tree (that is n-1 nodes).
for source in tree:
# Find neighbor in the tree
target = tree[source]
# compute minimum cut
cut_value, partition = nx.minimum_cut(G, source, target,
capacity=capacity,
flow_func=flow_func,
residual=R)
labels[(source, target)] = cut_value
# Update the tree
# Source will always be in partition[0] and target in partition[1]
for node in partition[0]:
if node != source and node in tree and tree[node] == target:
tree[node] = source
labels[node, source] = labels.get((node, target), cut_value)
#
if target != root and tree[target] in partition[0]:
labels[source, tree[target]] = labels[target, tree[target]]
labels[target, source] = cut_value
tree[source] = tree[target]
tree[target] = source
# Build the tree
T = nx.Graph()
T.add_nodes_from(G)
T.add_weighted_edges_from(((u, v, labels[u, v]) for u, v in tree.items()))
return T
|
py | b4015b136ab3deb9675397509df89d871d80557e | import os
from pyaedt.generic.general_methods import aedt_exception_handler, is_ironpython
from pyaedt.modeler.Model3DLayout import Modeler3DLayout
from pyaedt.modules.Mesh3DLayout import Mesh3d
from pyaedt.modules.SetupTemplates import SetupKeys
from pyaedt.modules.SolveSetup import Setup3DLayout
from pyaedt.application.Analysis import Analysis
if is_ironpython:
from pyaedt.modules.PostProcessor import PostProcessor
else:
from pyaedt.modules.AdvancedPostProcessing import PostProcessor
class FieldAnalysis3DLayout(Analysis):
"""Manages 3D field analysis setup in HFSS 3D Layout.
This class is automatically initialized by an application call from this
3D tool. See the application function for parameter definitions.
Parameters
----------
application : str
3D application that is to initialize the call.
projectname : str, optional
Name of the project to select or the full path to the project
or AEDTZ archive to open. The default is ``None``, in which
case an attempt is made to get an active project. If no
projects are present, an empty project is created.
designname : str, optional
Name of the design to select. The default is ``None``, in
which case an attempt is made to get an active design. If no
designs are present, an empty design is created.
solution_type : str, optional
Solution type to apply to the design. The default is
``None``, in which case the default type is applied.
setup_name : str, optional
Name of the setup to use as the nominal. The default is
``None``, in which case the active setup is used or
nothing is used.
specified_version : str, optional
Version of AEDT to use. The default is ``None``, in which case
the active version or latest installed version is used.
NG : bool, optional
Whether to run AEDT in the non-graphical mode. The default
is ``False``, in which case AEDT is launched in the graphical mode.
new_desktop_session : bool, optional
Whether to launch an instance of AEDT in a new thread, even if
another instance of the ``specified_version`` is active on the
machine. The default is ``True``.
close_on_exit : bool, optional
Whether to release AEDT on exit. The default is ``False``.
student_version : bool, optional
Whether to enable the student version of AEDT. The default
is ``False``.
"""
def __init__(
self,
application,
projectname,
designname,
solution_type,
setup_name=None,
specified_version=None,
non_graphical=False,
new_desktop_session=False,
close_on_exit=False,
student_version=False,
):
Analysis.__init__(
self,
application,
projectname,
designname,
solution_type,
setup_name,
specified_version,
non_graphical,
new_desktop_session,
close_on_exit,
student_version,
)
self.osolution = self._odesign.GetModule("SolveSetups")
self.oexcitation = self._odesign.GetModule("Excitations")
self.oboundary = self._odesign.GetModule("Excitations")
self.logger.info("Analysis Loaded")
self._modeler = Modeler3DLayout(self)
self._modeler.primitives.init_padstacks()
self.logger.info("Modeler Loaded")
self._mesh = Mesh3d(self)
self._post = PostProcessor(self)
# self._post = PostProcessor(self)
@property
def mesh(self):
"""Mesh.
Returns
-------
:class:`pyaedt.modules.Mesh3DLayout.Mesh3d`
"""
return self._mesh
@property
def get_excitations_name(self):
"""Excitation names.
Returns
-------
list
list of all excitation
"""
return list(self.oboundary.GetAllPortsList())
@property
def get_all_sparameter_list(self, excitation_names=[]):
"""List of all S parameters for a list of excitations.
Parameters
----------
excitation_names : list, optional
List of excitations. The default is ``[]``, in which case
the S parameters for all excitations are to be provided.
For example, ``["1", "2"]``.
Returns
-------
list
List of strings representing the S parameters of the excitations.
For example, ``["S(1, 1)", "S(1, 2)", S(2, 2)]``.
"""
if not excitation_names:
excitation_names = self.get_excitations_name
spar = []
k = 0
for i in excitation_names:
k = excitation_names.index(i)
while k < len(excitation_names):
spar.append("S({},{})".format(i, excitation_names[k]))
k += 1
return spar
@aedt_exception_handler
def export_mesh_stats(self, setup_name, variation_string="", mesh_path=None):
"""Export mesh statistics to a file.
Parameters
----------
setup_name :str
Setup name.
variation_string : str, optional
Variation List.
mesh_path : str, optional
Full path to mesh statistics file.
Returns
-------
str
File Path.
"""
if not mesh_path:
mesh_path = os.path.join(self.project_path, "meshstats.ms")
self.odesign.ExportMeshStats(setup_name, variation_string, mesh_path)
return mesh_path
@aedt_exception_handler
def get_all_return_loss_list(self, excitation_names=[], excitation_name_prefix=""):
"""Retrieve a list of all return losses for a list of excitations.
Parameters
----------
excitation_names : list, optional
List of excitations. The default is ``[]``, in which case
the return losses for all excitations are to be provided.
For example, ``["1", "2"]``.
excitation_name_prefix : string, optional
Prefix to add to the excitation names. The default is ``""``.
Returns
-------
list
List of strings representing the return losses of the excitations.
For example, ``["S(1, 1)", "S(2, 2)"]``.
"""
if not excitation_names:
excitation_names = self.get_excitations_name
if excitation_name_prefix:
excitation_names = [i for i in excitation_names if excitation_name_prefix.lower() in i.lower()]
spar = []
for i in excitation_names:
spar.append("S({},{})".format(i, i))
return spar
@aedt_exception_handler
def get_all_insertion_loss_list(self, trlist=[], reclist=[], tx_prefix="", rx_prefix=""):
"""Retrieve a list of all insertion losses from two lists of excitations (driver and receiver).
Parameters
----------
trlist : list, optional
List of drivers. The default is ``[]``. For example, ``["1"]``.
reclist : list, optional
List of receivers. The default is ``[]``. The number of drivers equals
the number of receivers. For example, ``["2"]``.
tx_prefix : str, optional
Prefix to add to driver names. For example, ``"DIE"``. The default is ``""``.
rx_prefix : str, optional
Prefix to add to receiver names. For example, ``"BGA"``. The default is ``""``.
Returns
-------
list
List of strings representing insertion losses of the excitations.
For example, ``["S(1, 2)"]``.
"""
spar = []
if not trlist:
trlist = [i for i in self.get_excitations_name if tx_prefix in i]
if not reclist:
reclist = [i for i in self.get_excitations_name if rx_prefix in i]
if len(trlist) != len(reclist):
self.logger.error("The TX and RX lists should be same length.")
return False
for i, j in zip(trlist, reclist):
spar.append("S({},{})".format(i, j))
return spar
@aedt_exception_handler
def get_next_xtalk_list(self, trlist=[], tx_prefix=""):
"""Retrieve a list of all the near end XTalks from a list of excitations (driver and receiver).
Parameters
----------
trlist : list, optional
List of drivers. The default is ``[]``. For example, ``["1", "2", "3"]``.
tx_prefix : str, optional
Prefix to add to driver names. For example, ``"DIE"``. The default is ``""``.
Returns
-------
list
List of strings representing near end XTalks of the excitations.
For example, ``["S(1, 2)", "S(1, 3)", "S(2, 3)"]``.
"""
next = []
if not trlist:
trlist = [i for i in self.get_excitations_name if tx_prefix in i]
for i in trlist:
k = trlist.index(i) + 1
while k < len(trlist):
next.append("S({},{})".format(i, trlist[k]))
k += 1
return next
@aedt_exception_handler
def get_fext_xtalk_list(self, trlist=[], reclist=[], tx_prefix="", rx_prefix="", skip_same_index_couples=True):
"""Retrieve a list of all the far end XTalks from two lists of exctitations (driver and receiver).
Parameters
----------
trlist : list, optional
List of drivers. The default is ``[]``. For example, ``["1", "2"]``.
reclist : list, optional
List of receivers. The default is ``[]``. For example, ``["3", "4"]``.
tx_prefix : str, optional
Prefix to add to the driver names. For example, ``"DIE"``. The default is ``""``.
rx_prefix : str, optional
Prefix to add to the receiver names. For examples, ``"BGA"``. The default is ``""``.
skip_same_index_couples : bool, optional
Whether to skip driver and receiver couples with the same index position.
The default is ``True``, in which case the drivers and receivers
with the same index position are considered insertion losses and
excluded from the list.
Returns
-------
list
List of strings representing the far end XTalks of the excitations.
For example, ``["S(1, 4)", "S(2, 3)"]``.
"""
fext = []
if not trlist:
trlist = [i for i in self.get_excitations_name if tx_prefix in i]
if not reclist:
reclist = [i for i in self.get_excitations_name if rx_prefix in i]
for i in trlist:
for k in reclist:
if not skip_same_index_couples or reclist.index(k) != trlist.index(i):
fext.append("S({},{})".format(i, k))
return fext
@property
def modeler(self):
"""Modeler object."""
return self._modeler
@property
def port_list(self):
"""Port list."""
return self.oexcitation.GetAllPortsList()
@property
def existing_analysis_setups(self):
"""Existing analysis setups in the design.
Returns
-------
list
List of names of all analysis setups in the design.
"""
setups = list(self.oanalysis.GetSetups())
return setups
@aedt_exception_handler
def create_setup(self, setupname="MySetupAuto", setuptype=None, props={}):
"""Create a setup.
Parameters
----------
setupname : str, optional
Name of the new setup. The default is ``"MySetupAuto"``.
setuptype : str, optional
Type of the setup. The default is ``None``, in which case
the default type is applied.
props : dict, optional
Dictionary of properties with values. The default is ``{}``.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup3DLayout`
"""
if setuptype is None:
setuptype = SetupKeys.defaultSetups[self.solution_type]
name = self.generate_unique_setup_name(setupname)
setup = Setup3DLayout(self, setuptype, name)
setup.create()
if props:
for el in props:
setup.props[el] = props[el]
setup.update()
self.analysis_setup = name
self.setups.append(setup)
return setup
@aedt_exception_handler
def get_setup(self, setupname, setuptype=None):
"""Retrieve a setup.
Parameters
----------
setupname : str
Name of the setup.
setuptype : SETUPS, optional
Type of the setup. The default is ``None``, in which case
the default type is applied.
Returns
-------
:class:`pyaedt.modules.SolveSetup.Setup3DLayout`
Setup object.
"""
if setuptype is None:
setuptype = SetupKeys.defaultSetups[self.solution_type]
for setup in self.setups:
if setupname == setup.name:
return setup
setup = Setup3DLayout(self, setuptype, setupname, isnewsetup=False)
self.analysis_setup = setupname
return setup
@aedt_exception_handler
def delete_setup(self, setupname):
"""Delete a setup.
Parameters
----------
setupname : str
Name of the setup.
Returns
-------
bool
``True`` when successful, ``False`` when failed.
Examples
--------
Create a setup and then delete it.
>>> import pyaedt
>>> hfss3dlayout = pyaedt.Hfss3dLayout()
>>> setup1 = hfss3dlayout.create_setup(setupname='Setup1')
>>> hfss3dlayout.delete_setup(setupname='Setup1')
...
pyaedt info: Sweep was deleted correctly.
"""
if setupname in self.existing_analysis_setups:
self.osolution.Delete(setupname)
for s in self.setups:
if s.name == setupname:
self.setups.remove(s)
return True
return False
|
py | b4015b8be4b2629dfbbc4f44260a72ba0b44d7a5 | # Copyright (c) 2021, omar jaber and contributors
# For license information, please see license.txt
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.utils import nowdate, add_to_date, cstr, cint, getdate, get_link_to_form
class RosterEmployeeActions(Document):
def autoname(self):
self.name = self.start_date + "|" + self.end_date + "|" + self.action_type + "|" + self.supervisor
def after_insert(self):
# send notification to supervisor
user_id = frappe.db.get_value("Employee", self.supervisor, ["user_id"])
if user_id:
link = get_link_to_form(self.doctype, self.name)
subject = _("New Action to {action_type}.".format(action_type=self.action_type))
message = _("""
You have been issued a Roster Employee Action.<br>
Please review the employees assigned to you, take necessary actions and update the status.<br>
Link: {link}""".format(link=link))
frappe.sendmail([user_id], subject=subject, message=message, reference_doctype=self.doctype, reference_name=self.name)
|
py | b4015c2327d662c9d2d21696f370f70064c80b50 | """
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import json
import logging
from contextlib import closing
from typing import Any, Dict
import pkg_resources
import yaml
from bravado_core.spec import Spec
from bravado_core.validate import validate_object as bravado_validate
EVENT_REGISTRY = 'event_registry'
SWAGGER_SPEC = 'swagger_spec'
BRAVADO_SPEC = 'bravado_spec'
MODULE = 'module'
FILENAME = 'filename'
DEFINITIONS = 'definitions'
class EventValidator(object):
"""
gRPC based server for EventD.
"""
def __init__(self, config: Dict[str, Any]):
self.event_registry = config[EVENT_REGISTRY]
self.specs_by_filename = self._load_specs_from_registry()
def validate_event(self, raw_event: str, event_type: str) -> None:
"""
Checks if an event is registered and validates it based on
a registered schema.
Args:
raw_event: The event to be validated, as a JSON-encoded string
event_type: The type of an event, which corresponds
to a generated model
Returns:
Does not return, but throws exceptions if validation fails.
"""
event = json.loads(raw_event)
# Event not in registry
if event_type not in self.event_registry:
logging.debug(
'Event type %s not among registered event types (%s)',
event_type, self.event_registry)
raise KeyError(
'Event type {} not registered, '
'please add it to the EventD config'.format(event_type))
filename = self.event_registry[event_type][FILENAME]
bravado_validate(
self.specs_by_filename[filename][BRAVADO_SPEC],
self.specs_by_filename[filename][SWAGGER_SPEC][event_type],
event)
def _load_specs_from_registry(self) -> Dict[str, Any]:
"""
Loads all swagger definitions from the files specified in the
event registry.
"""
specs_by_filename = {}
for event_type, info in self.event_registry.items():
filename = info[FILENAME]
if filename in specs_by_filename:
# Spec for this file is already registered
self._check_event_exists_in_spec(
specs_by_filename[filename][SWAGGER_SPEC],
filename,
event_type,
)
continue
module = '{}.swagger.specs'.format(info[MODULE])
if not pkg_resources.resource_exists(module, filename):
raise LookupError(
'File {} not found under {}/swagger, please ensure that '
'it exists'.format(filename, info[MODULE]))
stream = pkg_resources.resource_stream(module, filename)
with closing(stream) as spec_file:
swagger_spec = yaml.safe_load(spec_file)
self._check_event_exists_in_spec(
swagger_spec[DEFINITIONS], filename, event_type)
config = {'validate_swagger_spec': False}
bravado_spec = Spec.from_dict(swagger_spec, config=config)
specs_by_filename[filename] = {
SWAGGER_SPEC: swagger_spec[DEFINITIONS],
BRAVADO_SPEC: bravado_spec,
}
return specs_by_filename
@staticmethod
def _check_event_exists_in_spec(
swagger_definitions: Dict[str, Any],
filename: str,
event_type: str,
):
"""
Throw a KeyError if the event_type does not exist in swagger_definitions
"""
if event_type not in swagger_definitions:
raise KeyError(
'Event type {} is not defined in {}, '
'please add the definition and re-generate '
'swagger specifications'.format(event_type, filename))
|
py | b4015c3a47bab2727a4330aa1f3dba0739158314 | """
Example Kernels
---------------
Plot three One-dimensional kernels: the Gaussian, Exponential, and Top-Hat
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Compute Kernels.
x = np.linspace(-5, 5, 10000)
dx = x[1] - x[0]
gauss = (1. / np.sqrt(2 * np.pi)) * np.exp(-0.5 * x ** 2)
exp = 0.5 * np.exp(-abs(x))
tophat = 0.5 * np.ones_like(x)
tophat[abs(x) > 1] = 0
#------------------------------------------------------------
# Plot the kernels
fig = plt.figure(figsize=(5, 3.75))
ax = fig.add_subplot(111)
ax.plot(x, gauss, '-', c='black', lw=3, label='Gaussian')
ax.plot(x, exp, '-', c='#666666', lw=2, label='Exponential')
ax.plot(x, tophat, '-', c='#999999', lw=1, label='Top-hat')
ax.legend(loc=1)
ax.set_xlabel('$u$')
ax.set_ylabel('$K(u)$')
ax.set_xlim(-5, 5)
ax.set_ylim(0, 0.6001)
plt.show()
|
py | b4015dc7f0711f33401619f89cbda598afc8b348 | """
WSGI config for HappyOrMad project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "HappyOrMad.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
py | b4015de989ba861b896696cd95e14e8298a8d2b7 |
def merge_sort(array):
if len(array) < 2:
return array
mid = len(array) // 2
left = merge_sort(array[:mid])
right = merge_sort(array[mid:])
return merge(left, right)
def merge(left, right):
result = []
i, j = 0, 0
while i < len(left) or j < len(right):
if left[i] <= right[j]:
result.append(left[i])
i += 1
else:
result.append(right[j])
j += 1
if i == len(left) or j == len(right):
result.extend(left[i:] or right[j:])
break
return result
array = [i for i in range(1, 20)]
print(array)
print(merge_sort(array))
|
py | b4015df1f48a30e0a9e55722ad1c001bbc4ce7e0 | """
Implements classes representing elements of the ExploreCourses catalog
Includes:
- Course
- LearningObjective
- Section
- Schedule
- Instructor
- Attribute
- AdministrativeInformation
- Tag
- School
- Department
"""
from dataclasses import dataclass
from functools import total_ordering
import html
from typing import FrozenSet, Optional, Tuple
from xml.etree.ElementTree import Element
def _bool_or_none(condition: str, true: str, false: str) -> Optional[bool]:
if condition == true:
return True
if condition == false:
return False
return None
@dataclass(frozen=True)
class Department:
"""A department at the university"""
longname: str
name: str
@classmethod
def from_xml(cls, elem: Element):
"""Construct new Department from an XML element"""
return cls(elem.get("longname"), elem.get("name"))
@dataclass(frozen=True)
class School:
"""A school at the university"""
name: str
departments: FrozenSet[Department]
@classmethod
def from_xml(cls, elem: Element):
"""Construct new School from an XML element"""
return cls(
elem.get("name"),
frozenset(Department.from_xml(dept) for dept in elem.findall("department")),
)
def department(self, name: str) -> Department:
"""
Find department within the school
Args:
name (str): Full name or subject code identifying the department
Returns:
Department: The mathcing department
"""
lname = name.lower()
for dept in self.departments:
if lname in (dept.longname.lower(), dept.name.lower()):
return dept
raise ValueError(f"no department named '{name}'")
@dataclass(frozen=True)
class LearningObjective:
"""A learning objective for a course"""
requirement_code: str
description: str
@classmethod
def from_xml(cls, elem: Element):
"""Construct new LearningObjective from an XML element"""
return cls(elem.findtext("requirementCode"), elem.findtext("description"))
@dataclass(frozen=True)
class Instructor:
"""An instructor for a section"""
name: str
first_name: str
middle_name: str
last_name: str
sunet: str
role: str
@classmethod
def from_xml(cls, elem: Element):
"""Construct new Instructor from an XML element"""
return cls(
elem.findtext("name"),
elem.findtext("firstName"),
elem.findtext("middleName"),
elem.findtext("lastName"),
elem.findtext("sunet"),
elem.findtext("role"),
)
@dataclass(frozen=True)
class Schedule:
"""A schedule for a section"""
start_date: str
end_date: str
start_time: str
end_time: str
location: str
days: Tuple[str]
instructors: FrozenSet[Instructor]
@classmethod
def from_xml(cls, elem: Element):
"""Construct new Schedule from an XML element"""
return cls(
elem.findtext("startDate"),
elem.findtext("endDate"),
elem.findtext("startTime"),
elem.findtext("endTime"),
elem.findtext("location"),
tuple(elem.findtext("days").split()),
frozenset(Instructor.from_xml(instr) for instr in elem.find("instructors")),
)
@dataclass(frozen=True)
class Attribute:
"""An attribute of a course or section"""
name: str
value: str
description: str
catalog_print: bool
schedule_print: bool
@classmethod
def from_xml(cls, elem: Element):
"""Construct new Attribute from an XML element"""
return cls(
elem.findtext("name"),
elem.findtext("value"),
elem.findtext("description"),
elem.findtext("catalogPrint") == "true",
elem.findtext("schedulePrint") == "true",
)
@dataclass(frozen=True)
class Section:
"""A section of a course"""
class_id: int
term: str
term_id: int
subject: str
code: str
units: str
section_number: str
component: str
num_enrolled: int
max_enrolled: int
num_waitlist: int
max_waitlist: int
enroll_status: str
add_consent: str
drop_consent: str
instruction_mode: str
course_id: int
schedules: FrozenSet[Schedule]
# current_class_size: int # Redundant, possibly deprecated
# max_class_size: int
# current_waitlist_size: int
# max_waitlist_size: int
notes: str
attributes: FrozenSet[Attribute]
@classmethod
def from_xml(cls, elem: Element):
"""Construct new Section from an XML element"""
return cls(
int(elem.findtext("classId")),
elem.findtext("term"),
int(elem.findtext("termId")),
elem.findtext("subject"),
elem.findtext("code"),
elem.findtext("units"),
elem.findtext("sectionNumber"),
elem.findtext("component"),
int(elem.findtext("numEnrolled")),
int(elem.findtext("maxEnrolled")),
int(elem.findtext("numWaitlist")),
int(elem.findtext("maxWaitlist")),
elem.findtext("enrollStatus"),
elem.findtext("addConsent"),
elem.findtext("dropConsent"),
elem.findtext("instructionMode"),
int(elem.findtext("courseId")),
frozenset(Schedule.from_xml(sched) for sched in elem.find("schedules")),
# int(elem.findtext("currentClassSize")), # Redundant, possibly deprecated
# int(elem.findtext("maxClassSize")),
# int(elem.findtext("currentWaitlistSize")),
# int(elem.findtext("maxWaitlistSize")),
elem.findtext("notes"),
frozenset(Attribute.from_xml(attr) for attr in elem.find("attributes")),
)
@dataclass(frozen=True)
class AdministrativeInformation:
"""Administrative information about a course"""
course_id: int
effective_status: str
offer_number: int
academic_group: str
academic_organization: str
academic_career: str
final_exam_flag: Optional[bool]
catalog_print: bool
schedule_print: bool
max_units_repeat: int
max_times_repeat: int
@classmethod
def from_xml(cls, elem: Element):
"""Construct new AdministrativeInformation from an XML element"""
return cls(
int(elem.findtext("courseId")),
elem.findtext("effectiveStatus"),
int(elem.findtext("offerNumber")),
elem.findtext("academicGroup"),
elem.findtext("academicOrganization"),
elem.findtext("academicCareer"),
_bool_or_none(elem.findtext("finalExamFlag"), "Y", "N"),
elem.findtext("catalogPrint") == "Y",
elem.findtext("schedulePrint") == "Y",
int(elem.findtext("maxUnitsRepeat")),
int(elem.findtext("maxTimesRepeat")),
)
@dataclass(frozen=True)
class Tag:
"""A tag for a course"""
organization: str
name: str
@classmethod
def from_xml(cls, elem: Element):
"""Construct new Tag from an XML element"""
return cls(elem.findtext("organization"), elem.findtext("name"))
@total_ordering
@dataclass(frozen=True)
class Course:
"""A course from the catalog"""
year: str
subject: str
code: str
title: str
description: str
gers: FrozenSet[str]
repeatable: bool
grading: str
units_min: int
units_max: int
remote: Optional[bool]
learning_objectives: FrozenSet[LearningObjective]
sections: FrozenSet[Section]
administrative_information: AdministrativeInformation
attributes: FrozenSet[Attribute]
tags: FrozenSet[Tag]
@classmethod
def from_xml(cls, elem: Element):
"""Construct new AdministrativeInformation from an XML element"""
return cls(
elem.findtext("year"),
elem.findtext("subject"),
elem.findtext("code"),
elem.findtext("title"),
html.unescape(html.unescape(elem.findtext("description"))),
frozenset(elem.findtext("gers").split(", ")),
elem.findtext("repeatable") == "true",
elem.findtext("grading"),
int(elem.findtext("unitsMin")),
int(elem.findtext("unitsMax")),
_bool_or_none(elem.findtext("remote"), "true", "false"),
frozenset(
LearningObjective.from_xml(lo) for lo in elem.find("learningObjectives")
),
frozenset(Section.from_xml(section) for section in elem.find("sections")),
AdministrativeInformation.from_xml(elem.find("administrativeInformation")),
frozenset(Attribute.from_xml(attr) for attr in elem.find("attributes")),
frozenset(Tag.from_xml(tag) for tag in elem.find("tags")),
)
@property
def course_code(self):
"""Course code"""
return f"{self.subject} {self.code}"
@property
def course_id(self):
"""Unique course id"""
return self.administrative_information.course_id
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self.year, self.course_code) == (other.year, other.course_code)
def __lt__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
return (self.year, self.course_code) < (other.year, other.course_code)
def __hash__(self):
return hash((self.year, self.course_code))
|
py | b4015e279b69cc24c3c64d5af18eb5812ebf31bb | from rest_framework.test import APITestCase
from core import Constants
from imc.models import IMCCurve
from imc.curves import IMCCurveMale, IMCCurveFemale
class IMCCurveTestCase(APITestCase):
"""
Unit test of IMC based growth curve
"""
def setUp(self):
"""
This method will run before any test.
"""
self.male = IMCCurveMale().make()
self.female = IMCCurveFemale().make()
def test_imc_curve_male(self):
"""
Test to verify if graphic construct is correct with MALE gender
"""
graphic = IMCCurve(gender=Constants.MALE)
self.assertEqual(
graphic.make(),
self.male
)
self.assertEqual(
graphic.make(IMCCurve.TITLE),
self.male['title']
)
def test_imc_curve_female(self):
"""
Test to verify if graphic construct is correct with FEMALE gender.
"""
graphic = IMCCurve(gender=Constants.FEMALE)
self.assertEqual(
graphic.make(),
self.female
)
self.assertEqual(
graphic.make(IMCCurve.TITLE),
self.female['title']
)
def test_result_ok(self):
"""
Test to check if the result with age is correct.
"""
graphic = IMCCurve(gender=Constants.MALE)
# percentis_3
self.assertEqual(graphic.result(13.82, 2), -1)
self.assertEqual(graphic.result(13.83, 2), 0)
self.assertEqual(graphic.result(13.84, 2), 0)
self.assertEqual(graphic.result(15.36, 10), -1)
self.assertEqual(graphic.result(15.37, 10), 0)
self.assertEqual(graphic.result(15.38, 10), 0)
# percentis_97
self.assertEqual(graphic.result(19.70, 2), 0)
self.assertEqual(graphic.result(19.71, 2), 0)
self.assertEqual(graphic.result(19.72, 2), 1)
self.assertEqual(graphic.result(28.26, 10), 0)
self.assertEqual(graphic.result(28.27, 10), 0)
self.assertEqual(graphic.result(28.28, 10), 1)
def test_result_invalid(self):
"""
Test to check if the result with age is incorrect.
"""
graphic = IMCCurve(gender=Constants.MALE)
# percentis_3
self.assertEqual(graphic.result(13.83, -2), "Invalid age")
self.assertEqual(graphic.result(13.83, 0), "Invalid age")
self.assertEqual(graphic.result(13.83, 1), "Invalid age")
self.assertEqual(graphic.result(13.83, 2), 0)
self.assertEqual(graphic.result(19.48, 18), 0)
self.assertEqual(graphic.result(19.48, 19), "Invalid age")
|
py | b4015ee9dfe2884c514a92c863d7f9ed98556aa5 | # -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# Authenticated API
# ----------------------------------------------------------------------
# Copyright (C) 2007-2016 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
from __future__ import absolute_import
import operator
from threading import Lock
# Third-party modules
import cachetools
# NOC modules
from .api import APIRequestHandler
from noc.aaa.models.user import User
user_lock = Lock()
class AuthAPIRequestHandler(APIRequestHandler):
_user_cache = cachetools.TTLCache(maxsize=1000, ttl=60)
@classmethod
@cachetools.cachedmethod(operator.attrgetter("_user_cache"), lock=lambda _: user_lock)
def get_user_by_name(cls, name):
try:
return User.objects.get(username=name)
except User.DoesNotExist:
return None
def get_current_user(self):
return self.get_user_by_name(self.request.headers.get("Remote-User"))
|
py | b4015f1f54d139beb2c1f43f88bf31f2e881b751 | import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a mini-batch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
:param x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
:param w: A numpy array of weights, of shape (D, M)
:param b: A numpy array of biases, of shape (M,)
:return out: output, of shape (N, M)
:return cache: (x, w, b)
"""
x_reshaped = np.reshape(x, (x.shape[0], -1))
out = x_reshaped.dot(w) + b
cache = (x, w, b)
return out, cache
def affine_backward(d_out, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
:param d_out: Upstream derivative, of shape (N, M)
:param cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,
:return dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
:return dw: Gradient with respect to w, of shape (D, M)
:return db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dw = np.reshape(x, (x.shape[0], -1)).T.dot(d_out)
dw = np.reshape(dw, w.shape)
db = np.sum(d_out, axis=0, keepdims=False)
dx = d_out.dot(w.T)
dx = np.reshape(dx, x.shape)
return dx, dw, db
class Sigmoid:
def __init__(self):
pass
def forward(self, x):
"""
:param x: Inputs, of any shape
:return out: Output, of the same shape as x
:return cache: Cache, for backward computation, of the same shape as x
"""
outputs = 1 / (1 + np.exp(-x))
cache = outputs
return outputs, cache
def backward(self, d_out, cache):
"""
:return: dx: the gradient w.r.t. input X, of the same shape as X
"""
dx = d_out * cache * (1 - cache)
return dx
class Relu:
def __init__(self):
pass
def forward(self, x):
"""
:param x: Inputs, of any shape
:return out: Output, of the same shape as x
:return cache: Cache, for backward computation, of the same shape as x
"""
########################################################################
# TODO: #
# Implement the forward pass of Relu activation function #
########################################################################
outputs = np.maximum(x, 0)
cache = outputs
########################################################################
# END OF YOUR CODE #
########################################################################
return outputs, cache
def backward(self, d_out, cache):
"""
:return: dx: the gradient w.r.t. input X, of the same shape as X
"""
########################################################################
# TODO: #
# Implement the backward pass of Relu activation function #
########################################################################
dx = cache.copy()
dx[dx >= 0] = 1
dx[dx < 0] = 0
dx = d_out * dx
########################################################################
# END OF YOUR CODE #
########################################################################
return dx
class LeakyRelu:
def __init__(self, slope=0.01):
self.slope = slope
def forward(self, x):
"""
:param x: Inputs, of any shape
:return out: Output, of the same shape as x
:return cache: Cache, for backward computation, of the same shape as x
"""
########################################################################
# TODO: #
# Implement the forward pass of LeakyRelu activation function #
########################################################################
outputs = x.copy()
outputs[outputs < 0] *= self.slope
cache = outputs
########################################################################
# END OF YOUR CODE #
########################################################################
return outputs, cache
def backward(self, d_out, cache):
"""
:return: dx: the gradient w.r.t. input X, of the same shape as X
"""
########################################################################
# TODO: #
# Implement the backward pass of LeakyRelu activation function #
########################################################################
dx = cache.copy()
dx[dx >= 0] = 1
dx[dx < 0] = self.slope
dx = d_out * dx
########################################################################
# END OF YOUR CODE #
########################################################################
return dx
class Tanh:
def __init__(self):
pass
def forward(self, x):
"""
:param x: Inputs, of any shape
:return out: Output, of the same shape as x
:return cache: Cache, for backward computation, of the same shape as x
"""
########################################################################
# TODO: #
# Implement the forward pass of Tanh activation function #
########################################################################
exp_x = np.exp(x)
exp_neg_x = np.exp(-x)
outputs = (exp_x - exp_neg_x) / (exp_x + exp_neg_x)
cache = outputs
########################################################################
# END OF YOUR CODE #
########################################################################
return outputs, cache
def backward(self, d_out, cache):
"""
:return: dx: the gradient w.r.t. input X, of the same shape as X
"""
########################################################################
# TODO: #
# Implement the backward pass of Tanh activation function #
########################################################################
dx = d_out * (1 - cache * cache)
########################################################################
# END OF YOUR CODE #
########################################################################
return dx
|
py | b4015f58f4a11293848229de19e1b97e45c97952 | from __future__ import print_function
import mxnext as X
import mxnet as mx
from models.FPN.builder import FPNRpnHead, FPNRoiExtractor
from models.FPN import assign_layer_fpn, get_topk_proposal
from models.maskrcnn import bbox_post_processing
class MaskFasterRcnn(object):
def __init__(self):
pass
@staticmethod
def get_train_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head):
gt_bbox = X.var("gt_bbox")
gt_poly = X.var("gt_poly")
im_info = X.var("im_info")
rpn_cls_label = X.var("rpn_cls_label")
rpn_reg_target = X.var("rpn_reg_target")
rpn_reg_weight = X.var("rpn_reg_weight")
rpn_feat = backbone.get_rpn_feature()
rcnn_feat = backbone.get_rcnn_feature()
rpn_feat = neck.get_rpn_feature(rpn_feat)
rcnn_feat = neck.get_rcnn_feature(rcnn_feat)
rpn_loss = rpn_head.get_loss(rpn_feat, rpn_cls_label, rpn_reg_target, rpn_reg_weight)
proposal, bbox_cls, bbox_target, bbox_weight, mask_proposal, mask_target = \
rpn_head.get_sampled_proposal(rpn_feat, gt_bbox, gt_poly, im_info)
roi_feat = roi_extractor.get_roi_feature(rcnn_feat, proposal)
mask_roi_feat = mask_roi_extractor.get_roi_feature(rcnn_feat, mask_proposal)
bbox_loss = bbox_head.get_loss(roi_feat, bbox_cls, bbox_target, bbox_weight)
mask_loss = mask_head.get_loss(mask_roi_feat, mask_target)
return X.group(rpn_loss + bbox_loss + mask_loss)
@staticmethod
def get_test_symbol(backbone, neck, rpn_head, roi_extractor, mask_roi_extractor, bbox_head, mask_head, bbox_post_processor):
im_info = X.var("im_info")
im_id = X.var("im_id")
rec_id = X.var("rec_id")
rpn_feat = backbone.get_rpn_feature()
rcnn_feat = backbone.get_rcnn_feature()
rpn_feat = neck.get_rpn_feature(rpn_feat)
rcnn_feat = neck.get_rcnn_feature(rcnn_feat)
proposal = rpn_head.get_all_proposal(rpn_feat, im_info)
roi_feat = roi_extractor.get_roi_feature(rcnn_feat, proposal)
cls_score, bbox_xyxy = bbox_head.get_prediction(roi_feat, im_info, proposal)
post_cls_score, post_bbox_xyxy, post_cls = bbox_post_processor.get_post_processing(cls_score, bbox_xyxy)
mask_roi_feat = mask_roi_extractor.get_roi_feature(rcnn_feat, post_bbox_xyxy)
mask = mask_head.get_prediction(mask_roi_feat)
return X.group([rec_id, im_id, im_info, post_cls_score, post_bbox_xyxy, post_cls, mask])
class BboxPostProcessor(object):
def __init__(self, pTest):
super(BboxPostProcessor, self).__init__()
self.p = pTest
def get_post_processing(self, cls_score, bbox_xyxy):
p = self.p
max_det_per_image = p.max_det_per_image
min_det_score = p.min_det_score
nms_type = p.nms.type
nms_thr = p.nms.thr
post_cls_score, post_bbox_xyxy, post_cls = mx.sym.Custom(
cls_score=cls_score,
bbox_xyxy=bbox_xyxy,
max_det_per_image = max_det_per_image,
min_det_score = min_det_score,
nms_type = nms_type,
nms_thr = nms_thr,
op_type='BboxPostProcessing')
return post_cls_score, post_bbox_xyxy, post_cls
class MaskFPNRpnHead(FPNRpnHead):
def __init__(self, pRpn, pMask):
super(MaskFPNRpnHead, self).__init__(pRpn)
self.pMask = pMask
def get_sampled_proposal(self, conv_fpn_feat, gt_bbox, gt_poly, im_info):
p = self.p
batch_image = p.batch_image
proposal_wo_gt = p.subsample_proposal.proposal_wo_gt
image_roi = p.subsample_proposal.image_roi
fg_fraction = p.subsample_proposal.fg_fraction
fg_thr = p.subsample_proposal.fg_thr
bg_thr_hi = p.subsample_proposal.bg_thr_hi
bg_thr_lo = p.subsample_proposal.bg_thr_lo
post_nms_top_n = p.proposal.post_nms_top_n
num_reg_class = p.bbox_target.num_reg_class
class_agnostic = p.bbox_target.class_agnostic
bbox_target_weight = p.bbox_target.weight
bbox_target_mean = p.bbox_target.mean
bbox_target_std = p.bbox_target.std
mask_size = self.pMask.resolution
proposal = self.get_all_proposal(conv_fpn_feat, im_info)
(bbox, label, bbox_target, bbox_weight, match_gt_iou, mask_target) = mx.sym.ProposalMaskTarget(
rois=proposal,
gt_boxes=gt_bbox,
gt_polys=gt_poly,
mask_size=mask_size,
num_classes=num_reg_class,
class_agnostic=class_agnostic,
batch_images=batch_image,
proposal_without_gt=proposal_wo_gt,
image_rois=image_roi,
fg_fraction=fg_fraction,
fg_thresh=fg_thr,
bg_thresh_hi=bg_thr_hi,
bg_thresh_lo=bg_thr_lo,
bbox_weight=bbox_target_weight,
bbox_mean=bbox_target_mean,
bbox_std=bbox_target_std,
output_iou=True,
name="subsample_proposal"
)
label = X.reshape(label, (-3, -2))
bbox_target = X.reshape(bbox_target, (-3, -2))
bbox_weight = X.reshape(bbox_weight, (-3, -2))
mask_target = X.reshape(mask_target, (-3, -2))
num_fg_rois_per_img = int(image_roi * fg_fraction)
mask_proposal = mx.sym.slice_axis(
bbox,
axis=1,
begin=0,
end=num_fg_rois_per_img)
return bbox, label, bbox_target, bbox_weight, mask_proposal, mask_target
class MaskFasterRcnnHead(object):
def __init__(self, pBbox, pMask, pMaskRoi):
self.pBbox = pBbox
self.pMask = pMask
self.pMaskRoi = pMaskRoi
self._head_feat = None
def _get_mask_head_logit(self, conv_feat):
raise NotImplemented
def get_output(self, conv_feat):
pBbox = self.pBbox
num_class = pBbox.num_class
head_feat = self._get_mask_head_logit(conv_feat)
msra_init = mx.init.Xavier(rnd_type="gaussian", factor_type="out", magnitude=2)
if self.pMask:
head_feat = X.to_fp32(head_feat, name="mask_head_to_fp32")
mask_fcn_logit = X.conv(
head_feat,
filter=num_class,
name="mask_fcn_logit",
no_bias=False,
init=msra_init
)
return mask_fcn_logit
def get_prediction(self, conv_feat):
"""
input: conv_feat, (1 * num_box, channel, pool_size, pool_size)
"""
mask_fcn_logit = self.get_output(conv_feat)
mask_prob = mx.symbol.Activation(
data=mask_fcn_logit,
act_type='sigmoid',
name="mask_prob")
return mask_prob
def get_loss(self, conv_feat, mask_target):
pBbox = self.pBbox
pMask = self.pMask
batch_image = pBbox.batch_image
mask_fcn_logit = self.get_output(conv_feat)
scale_loss_shift = 128.0 if pMask.fp16 else 1.0
mask_fcn_logit = X.reshape(
mask_fcn_logit,
shape=(1, -1),
name="mask_fcn_logit_reshape"
)
mask_target = X.reshape(
mask_target,
shape=(1, -1),
name="mask_target_reshape"
)
mask_loss = mx.sym.contrib.SigmoidCrossEntropy(
mask_fcn_logit,
mask_target,
grad_scale=1.0 * scale_loss_shift,
name="mask_loss"
)
return (mask_loss,)
class MaskFasterRcnn4ConvHead(MaskFasterRcnnHead):
def __init__(self, pBbox, pMask, pMaskRoi):
super(MaskFasterRcnn4ConvHead, self).__init__(pBbox, pMask, pMaskRoi)
def _get_mask_head_logit(self, conv_feat):
if self._head_feat is not None:
return self._head_feat
up_stride = int(self.pMask.resolution // self.pMaskRoi.out_size)
dim_reduced = self.pMask.dim_reduced
msra_init = mx.init.Xavier(rnd_type="gaussian", factor_type="out", magnitude=2)
current = conv_feat
for i in range(4):
current = X.convrelu(
current,
name="mask_fcn_conv{}".format(i),
filter=dim_reduced,
kernel=3,
no_bias=False,
init=msra_init
)
mask_up = current
for i in range(up_stride // 2):
weight = X.var(
name="mask_up{}_weight".format(i),
init=msra_init,
lr_mult=1,
wd_mult=1)
mask_up = mx.sym.Deconvolution(
mask_up,
kernel=(2, 2),
stride=(2, 2),
num_filter=dim_reduced,
no_bias=False,
weight=weight,
name="mask_up{}".format(i)
)
mask_up = X.relu(
mask_up,
name="mask_up{}_relu".format(i))
mask_up = X.to_fp32(mask_up, name='mask_up_to_fp32')
self._head_feat = mask_up
return self._head_feat
|
py | b4015fbfa3de63833a230726bb40f331a6bb2837 | from sympy import solve, simplify, Eq, symbols
x1, x2, x3, X1, X2, X3 = symbols('x1 x2 x3 X1 X2 X3')
def get_inverse(eq1, eq2, eq3):
inverse = solve([Eq(x1, eq1), Eq(x2, eq2), Eq(x3, eq3)], [X1, X2, X3])
return inverse
def get_Lagrange(eq1, eq2, eq3):
U1 = simplify(eq1 - X1)
U2 = simplify(eq2 - X2)
U3 = simplify(eq3 - X3)
U = [U1, U2, U3]
return U
def get_Euler(inverse):
u1 = simplify(x1 - inverse[X1])
u2 = simplify(x2 - inverse[X2])
u3 = simplify(x3 - inverse[X3])
u = [u1, u2, u3]
return u
# Testing
#from testdata import eq1, eq2, eq3
#inverse = get_inverse(eq1, eq2, eq3)
#print(get_Lagrange(eq1, eq2, eq3))
#print(get_Euler(inverse))
|
py | b40160f4877e8d8ce89f14c58f7b34480badbc25 | import json
import os
import pika
import logging
import ssl
import subprocess
import requests
import tempfile
logging.basicConfig(
format='%(asctime)s %(message)s',
filename='logs/check.log',
level=logging.INFO
)
context = ssl.create_default_context()
ssl_options = pika.SSLOptions(context, os.environ['RABBITMQ_HOST'])
credentials = pika.PlainCredentials(
os.environ['RABBITMQ_USER'],
os.environ['RABBITMQ_PASSWORD']
)
parameters = pika.ConnectionParameters(
host=os.environ['RABBITMQ_HOST'],
port=5671,
virtual_host='/',
credentials=credentials,
ssl_options=ssl_options
)
try:
with pika.BlockingConnection(parameters) as conn:
channel = conn.channel()
channel.queue_declare(queue='hpc-jobs')
# Check if there is an incoming job
result = channel.basic_get('hpc-jobs')
while result[0]:
# Handle the incoming job in Slurm
logging.info("Job received from queue")
# Get settings for Slurm
message = json.loads(result[2].decode('utf-8'))
inputs = message['inputs']
settings = message['settings']
n_cpu = settings['CPUs']
mem = settings['Maximum memory (MB)']
time = settings['Maximum time (D-HH:MM)']
mail = settings['Slurm Notification Email']
# Authenticate User
auth_url = "https://{}/api/auth/token/".format(os.environ["RODAN_HOST"])
logging.info("Attempting to authenticate at {}...".format(auth_url))
payload = {'username': os.environ['RODAN_USER'], 'password': os.environ['RODAN_PASSWORD']}
response = requests.post(auth_url, data=payload)
if not response.ok:
logging.error("Bad response from server (" + response.url + ")")
logging.error(response.text)
quit()
else:
logging.info("Received code " + str(response.text) + " on authorization")
settings['token'] = response.json()['token']
logging.info("Token: " + settings['token'])
message['settings'] = settings
gpu_req = "--gres=gpu:1"
if mem > 128000 and mem <= 192000:
gpu_req = "--gres=gpu:v100l:1"
elif mem > 192000:
gpu_req = "--gres=gpu:p100l:4"
# Output the JSON body contents
with tempfile.NamedTemporaryFile(dir=".", delete=False) as f:
f.write(json.dumps(message).encode('utf-8'))
run_array = [
'sbatch',
'--cpus-per-task='+str(n_cpu),
gpu_req,
'--mem='+str(mem)+'M',
'--time='+str(time),
'hpc_training.sh',
f.name,
result[1].reply_to,
result[1].correlation_id
]
logging.info("Reply queue: " + result[1].reply_to)
if len(mail) > 0:
run_array.insert(1, '--mail-type=ALL')
run_array.insert(1, '--mail-user=' + mail)
sub_result = subprocess.run(run_array, check=True, capture_output=True, text=True)
logging.info(sub_result.stdout)
job_id = sub_result.stdout.split(' ')[-1].strip()
logging.info("Preparing to submit dependency for job " + job_id)
subprocess.run([
'sbatch',
'--dependency=afterany:' + job_id,
'handle_failure',
job_id,
result[1].correlation_id,
result[1].reply_to
], check=True)
logging.info("Dependency Submitted")
channel.basic_ack(result[0].delivery_tag)
result = channel.basic_get('hpc-jobs') # Check for additional unscheduled jobs.
else:
logging.info("No job present.")
except pika.exceptions.AMQPConnectionError:
logging.info("Could not connect.")
except Exception as e:
logging.error("EXCEPTION")
logging.error(e)
|
py | b401612b1cf2940e61bf27a48e77da830e836297 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple
from google.api_core import grpc_helpers # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.aiplatform_v1beta1.types import model
from google.cloud.aiplatform_v1beta1.types import model as gca_model
from google.cloud.aiplatform_v1beta1.types import model_evaluation
from google.cloud.aiplatform_v1beta1.types import model_evaluation_slice
from google.cloud.aiplatform_v1beta1.types import model_service
from google.longrunning import operations_pb2 as operations # type: ignore
from .base import ModelServiceTransport, DEFAULT_CLIENT_INFO
class ModelServiceGrpcTransport(ModelServiceTransport):
"""gRPC backend transport for ModelService.
A service for managing AI Platform's machine learning Models.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "aiplatform.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=self._ssl_channel_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
self._stubs = {} # type: Dict[str, Callable]
self._operations_client = None
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
client_info=client_info,
)
@classmethod
def create_channel(
cls,
host: str = "aiplatform.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
address (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def upload_model(
self,
) -> Callable[[model_service.UploadModelRequest], operations.Operation]:
r"""Return a callable for the upload model method over gRPC.
Uploads a Model artifact into AI Platform.
Returns:
Callable[[~.UploadModelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "upload_model" not in self._stubs:
self._stubs["upload_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/UploadModel",
request_serializer=model_service.UploadModelRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["upload_model"]
@property
def get_model(self) -> Callable[[model_service.GetModelRequest], model.Model]:
r"""Return a callable for the get model method over gRPC.
Gets a Model.
Returns:
Callable[[~.GetModelRequest],
~.Model]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_model" not in self._stubs:
self._stubs["get_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/GetModel",
request_serializer=model_service.GetModelRequest.serialize,
response_deserializer=model.Model.deserialize,
)
return self._stubs["get_model"]
@property
def list_models(
self,
) -> Callable[[model_service.ListModelsRequest], model_service.ListModelsResponse]:
r"""Return a callable for the list models method over gRPC.
Lists Models in a Location.
Returns:
Callable[[~.ListModelsRequest],
~.ListModelsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_models" not in self._stubs:
self._stubs["list_models"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/ListModels",
request_serializer=model_service.ListModelsRequest.serialize,
response_deserializer=model_service.ListModelsResponse.deserialize,
)
return self._stubs["list_models"]
@property
def update_model(
self,
) -> Callable[[model_service.UpdateModelRequest], gca_model.Model]:
r"""Return a callable for the update model method over gRPC.
Updates a Model.
Returns:
Callable[[~.UpdateModelRequest],
~.Model]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_model" not in self._stubs:
self._stubs["update_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/UpdateModel",
request_serializer=model_service.UpdateModelRequest.serialize,
response_deserializer=gca_model.Model.deserialize,
)
return self._stubs["update_model"]
@property
def delete_model(
self,
) -> Callable[[model_service.DeleteModelRequest], operations.Operation]:
r"""Return a callable for the delete model method over gRPC.
Deletes a Model.
Note: Model can only be deleted if there are no
DeployedModels created from it.
Returns:
Callable[[~.DeleteModelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_model" not in self._stubs:
self._stubs["delete_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/DeleteModel",
request_serializer=model_service.DeleteModelRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["delete_model"]
@property
def export_model(
self,
) -> Callable[[model_service.ExportModelRequest], operations.Operation]:
r"""Return a callable for the export model method over gRPC.
Exports a trained, exportable, Model to a location specified by
the user. A Model is considered to be exportable if it has at
least one [supported export
format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats].
Returns:
Callable[[~.ExportModelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_model" not in self._stubs:
self._stubs["export_model"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/ExportModel",
request_serializer=model_service.ExportModelRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["export_model"]
@property
def get_model_evaluation(
self,
) -> Callable[
[model_service.GetModelEvaluationRequest], model_evaluation.ModelEvaluation
]:
r"""Return a callable for the get model evaluation method over gRPC.
Gets a ModelEvaluation.
Returns:
Callable[[~.GetModelEvaluationRequest],
~.ModelEvaluation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_model_evaluation" not in self._stubs:
self._stubs["get_model_evaluation"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluation",
request_serializer=model_service.GetModelEvaluationRequest.serialize,
response_deserializer=model_evaluation.ModelEvaluation.deserialize,
)
return self._stubs["get_model_evaluation"]
@property
def list_model_evaluations(
self,
) -> Callable[
[model_service.ListModelEvaluationsRequest],
model_service.ListModelEvaluationsResponse,
]:
r"""Return a callable for the list model evaluations method over gRPC.
Lists ModelEvaluations in a Model.
Returns:
Callable[[~.ListModelEvaluationsRequest],
~.ListModelEvaluationsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_model_evaluations" not in self._stubs:
self._stubs["list_model_evaluations"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluations",
request_serializer=model_service.ListModelEvaluationsRequest.serialize,
response_deserializer=model_service.ListModelEvaluationsResponse.deserialize,
)
return self._stubs["list_model_evaluations"]
@property
def get_model_evaluation_slice(
self,
) -> Callable[
[model_service.GetModelEvaluationSliceRequest],
model_evaluation_slice.ModelEvaluationSlice,
]:
r"""Return a callable for the get model evaluation slice method over gRPC.
Gets a ModelEvaluationSlice.
Returns:
Callable[[~.GetModelEvaluationSliceRequest],
~.ModelEvaluationSlice]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_model_evaluation_slice" not in self._stubs:
self._stubs["get_model_evaluation_slice"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/GetModelEvaluationSlice",
request_serializer=model_service.GetModelEvaluationSliceRequest.serialize,
response_deserializer=model_evaluation_slice.ModelEvaluationSlice.deserialize,
)
return self._stubs["get_model_evaluation_slice"]
@property
def list_model_evaluation_slices(
self,
) -> Callable[
[model_service.ListModelEvaluationSlicesRequest],
model_service.ListModelEvaluationSlicesResponse,
]:
r"""Return a callable for the list model evaluation slices method over gRPC.
Lists ModelEvaluationSlices in a ModelEvaluation.
Returns:
Callable[[~.ListModelEvaluationSlicesRequest],
~.ListModelEvaluationSlicesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_model_evaluation_slices" not in self._stubs:
self._stubs["list_model_evaluation_slices"] = self.grpc_channel.unary_unary(
"/google.cloud.aiplatform.v1beta1.ModelService/ListModelEvaluationSlices",
request_serializer=model_service.ListModelEvaluationSlicesRequest.serialize,
response_deserializer=model_service.ListModelEvaluationSlicesResponse.deserialize,
)
return self._stubs["list_model_evaluation_slices"]
__all__ = ("ModelServiceGrpcTransport",)
|
py | b40161c87e4ef2698049223ec2b4e00a906c888a | #!/usr/bin/env python3
# Copyright © 2012-13 Qtrac Ltd. All rights reserved.
# This program or module is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version. It is provided for
# educational purposes and is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import collections
import json
import os
import re
import subprocess
import sys
UTF8 = "utf-8"
TRANSFORM, SUMMARIZE = ("TRANSFORM", "SUMMARIZE")
Code = collections.namedtuple("Code", "name code kind")
def main():
genome = 3 * GENOME
for i, code in enumerate(CODE):
context = dict(genome=genome, target="G[AC]{2}TT", replace="TCGA")
execute(code, context)
if sys.version_info[:2] > (3, 1):
def execute(code, context):
module, offset = create_module(code.code, context)
with subprocess.Popen([sys.executable, "-"], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE) as process:
communicate(process, code, module, offset)
else:
def execute(code, context):
module, offset = create_module(code.code, context)
process = subprocess.Popen([sys.executable, "-"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
communicate(process, code, module, offset)
def create_module(code, context):
lines = ["import json", "result = error = None"]
for key, value in context.items():
lines.append("{} = {!r}".format(key, value))
offset = len(lines) + 1
outputLine = "\nprint(json.dumps((result, error)))"
return "\n".join(lines) + "\n" + code + outputLine, offset
def communicate(process, code, module, offset):
stdout, stderr = process.communicate(module.encode(UTF8))
if stderr:
stderr = stderr.decode(UTF8).lstrip().replace(", in <module>", ":")
stderr = re.sub(", line (\d+)",
lambda match: str(int(match.group(1)) - offset), stderr)
print(re.sub(r'File."[^"]+?"', "'{}' has an error on line "
.format(code.name), stderr))
return
if stdout:
result, error = json.loads(stdout.decode(UTF8))
handle_result(code, result, error)
return
print("'{}' produced no result\n".format(code.name))
def handle_result(code, result, error):
if error is not None:
print("'{}' error: {}".format(code.name, error))
elif result is None:
print("'{}' produced no result".format(code.name))
elif code.kind == TRANSFORM:
genome = result
try:
print("'{}' produced a genome of length {}".format(code.name,
len(genome)))
except TypeError as err:
print("'{}' error: expected a sequence result: {}".format(
code.name, err))
elif code.kind == SUMMARIZE:
print("'{}' produced a result of {}".format(code.name, result))
print()
CODE = (
Code("Count",
"""
import re
matches = re.findall(target, genome)
if matches:
result = len(matches)
else:
error = "'{}' not found".format(target)
""", SUMMARIZE)
,
Code("Replace",
"""
import re
result, count = re.subn(target, replace, genome)
if not count:
error = "no '{}' replacements made".format(target)
""", TRANSFORM)
,
Code("Exception Test",
"""
result = 0
for i in range(len(genome)):
if genome[i] = "A":
result += 1
""", SUMMARIZE)
,
Code("Error Test",
"""
import re
matches = re.findall(target * 5, genome)
if matches:
result = len(matches)
else:
error = "'{}' not found".format(target)
""", TRANSFORM)
,
Code("No Result Test",
"""
# No result
""", TRANSFORM)
,
Code("Wrong Kind Test",
"""
result = len(genome)
""", TRANSFORM)
,
Code("Termination Test",
"""
import sys
result = "terminating"
sys.exit()
""", SUMMARIZE)
,
Code("Length",
"""
result = len(genome)
""", SUMMARIZE)
)
GENOME = """TGTTAGTCGCTCCTCGGTCTAAGACATCAAAGTCGGTCTGCGCGGCTGCTCCCTTAGCGCTG
CATAAGAGCGGGGCAGAGAGAGATAGGCGTTTTGACCGTGGCGAGCAAGGCGCGTCATAGTGTCGCCGTGACTG
ATCCTACTGGGTTCTTGCTACTGCCCGGGTCGCAATCCAAAATCTCCACGCGCTGCCACCCCGAAGAAGATATA
TGTCACTGAATTGTATTGGTAACATAGTCGAATTGGGTTCAGGTAAGTTAGTCGTTTAGCCGCTGCGACAGTGG
TGGAAGGGCGAATAGTGTAAAATTTCGCCTGTTAGTGAACATTATCAGGCTGCCATCGTTGATCGCCCCTCTTA
AACTCAGTCTTAAATGAGTTCCCGCCTAAGGTCATTCGTGCCTTGATGATTGATAGCTCGATTGGTCCCTTATG
AAACCGGACCAGAAATGTACCCGCTGAACCGGTGTCATAAGTGTCGCCGTCCCTACGATCGACACTTCCTGAGC
ACGAACGATTTGCGACGCTGTAATGCCACGAGGACTGCATTGAAGATTTTTTGTCCTAGGTGTATGTGCTTCTC
AGGAAGATGCACTACGCACTCCCCTTATCACGGGTGTGACCATCAGGTAGCGTAGGAAGATTAAGACCGCGTAA
CTATCCCTTTCCGTCGCACTCCGACGTCTCAGCACATGTGCGGGGGCCCCTAATTGAGAAACAGTCCATGGTTG
TCCGTAAGTTTCGGAAATCAACTTCACTGCTAGATGGTTGGACGCCAAGGCTCAATAGGTTGGACTCTAAGAAG
""".replace("\n", "")
if __name__ == "__main__":
main()
|
py | b40165f62aef66d4126028d61137ef20c50ddc9b | import sys
from apache_beam.options.pipeline_options import TypeOptions
# Suppress a spurious warning that happens when you import apache_beam
from pipe_tools.beam import logging_monkeypatch
from pipe_tools.options import validate_options
from pipe_tools.options import LoggingOptions
from pipe_segment.options.segment import SegmentOptions
from pipe_segment import pipeline
def run(args):
args = args or []
args.append('--no_pipeline_type_check')
options = validate_options(args=args, option_classes=[LoggingOptions,SegmentOptions])
options.view_as(LoggingOptions).configure_logging()
return pipeline.run(options)
if __name__ == '__main__':
sys.exit(run(args=sys.argv))
|
py | b40166f71553883d881cb07368f98b053047d385 | # Copyright (c) 2012 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api.v2 import attributes as attr
from neutron import context
from neutron.db import db_base_plugin_v2
from neutron.db import portsecurity_db
from neutron.db import securitygroups_db
from neutron.extensions import portsecurity as psec
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.tests.unit import test_db_plugin
from neutron.tests.unit import test_extension_security_group
DB_PLUGIN_KLASS = ('neutron.tests.unit.test_extension_portsecurity.'
'PortSecurityTestPlugin')
class PortSecurityTestCase(
test_extension_security_group.SecurityGroupsTestCase,
test_db_plugin.NeutronDbPluginV2TestCase):
def setUp(self, plugin=None):
ext_mgr = (
test_extension_security_group.SecurityGroupTestExtensionManager())
super(PortSecurityTestCase, self).setUp(plugin=plugin, ext_mgr=ext_mgr)
# Check if a plugin supports security groups
plugin_obj = manager.NeutronManager.get_plugin()
self._skip_security_group = ('security-group' not in
plugin_obj.supported_extension_aliases)
def tearDown(self):
super(PortSecurityTestCase, self).tearDown()
self._skip_security_group = None
class PortSecurityTestPlugin(db_base_plugin_v2.NeutronDbPluginV2,
securitygroups_db.SecurityGroupDbMixin,
portsecurity_db.PortSecurityDbMixin):
"""Test plugin that implements necessary calls on create/delete port for
associating ports with security groups and port security.
"""
supported_extension_aliases = ["security-group", "port-security"]
def create_network(self, context, network):
tenant_id = self._get_tenant_id_for_create(context, network['network'])
self._ensure_default_security_group(context, tenant_id)
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).create_network(
context, network)
neutron_db.update(network['network'])
self._process_network_port_security_create(
context, network['network'], neutron_db)
return neutron_db
def update_network(self, context, id, network):
with context.session.begin(subtransactions=True):
neutron_db = super(PortSecurityTestPlugin, self).update_network(
context, id, network)
if psec.PORTSECURITY in network['network']:
self._process_network_port_security_update(
context, network['network'], neutron_db)
return neutron_db
def get_network(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
net = super(PortSecurityTestPlugin, self).get_network(
context, id)
return self._fields(net, fields)
def create_port(self, context, port):
p = port['port']
with context.session.begin(subtransactions=True):
p[ext_sg.SECURITYGROUPS] = self._get_security_groups_on_port(
context, port)
neutron_db = super(PortSecurityTestPlugin, self).create_port(
context, port)
p.update(neutron_db)
(port_security, has_ip) = self._determine_port_security_and_has_ip(
context, p)
p[psec.PORTSECURITY] = port_security
self._process_port_port_security_create(context, p, neutron_db)
if (attr.is_attr_set(p.get(ext_sg.SECURITYGROUPS)) and
not (port_security and has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port requires ip and port_security enabled for security group
if has_ip and port_security:
self._ensure_default_security_group_on_port(context, port)
if (p.get(ext_sg.SECURITYGROUPS) and p[psec.PORTSECURITY]):
self._process_port_create_security_group(
context, p, p[ext_sg.SECURITYGROUPS])
return port['port']
def update_port(self, context, id, port):
delete_security_groups = self._check_update_deletes_security_groups(
port)
has_security_groups = self._check_update_has_security_groups(port)
with context.session.begin(subtransactions=True):
ret_port = super(PortSecurityTestPlugin, self).update_port(
context, id, port)
# copy values over - but not fixed_ips
port['port'].pop('fixed_ips', None)
ret_port.update(port['port'])
# populate port_security setting
if psec.PORTSECURITY not in ret_port:
ret_port[psec.PORTSECURITY] = self._get_port_security_binding(
context, id)
has_ip = self._ip_on_port(ret_port)
# checks if security groups were updated adding/modifying
# security groups, port security is set and port has ip
if (has_security_groups and (not ret_port[psec.PORTSECURITY]
or not has_ip)):
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# Port security/IP was updated off. Need to check that no security
# groups are on port.
if ret_port[psec.PORTSECURITY] is not True or not has_ip:
if has_security_groups:
raise psec.PortSecurityAndIPRequiredForSecurityGroups()
# get security groups on port
filters = {'port_id': [id]}
security_groups = (super(PortSecurityTestPlugin, self).
_get_port_security_group_bindings(
context, filters))
if security_groups and not delete_security_groups:
raise psec.PortSecurityPortHasSecurityGroup()
if (delete_security_groups or has_security_groups):
# delete the port binding and read it with the new rules.
self._delete_port_security_group_bindings(context, id)
sgids = self._get_security_groups_on_port(context, port)
# process port create sec groups needs port id
port['id'] = id
self._process_port_create_security_group(context,
ret_port, sgids)
if psec.PORTSECURITY in port['port']:
self._process_port_port_security_update(
context, port['port'], ret_port)
return ret_port
class PortSecurityDBTestCase(PortSecurityTestCase):
def setUp(self, plugin=None):
plugin = plugin or DB_PLUGIN_KLASS
super(PortSecurityDBTestCase, self).setUp(plugin)
class TestPortSecurity(PortSecurityDBTestCase):
def test_create_network_with_portsecurity_mac(self):
res = self._create_network('json', 'net1', True)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
def test_create_network_with_portsecurity_false(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_updating_network_port_security(self):
res = self._create_network('json', 'net1', True,
port_security_enabled='True')
net = self.deserialize('json', res)
self.assertEqual(net['network'][psec.PORTSECURITY], True)
update_net = {'network': {psec.PORTSECURITY: False}}
req = self.new_update_request('networks', update_net,
net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
req = self.new_show_request('networks', net['network']['id'])
net = self.deserialize('json', req.get_response(self.api))
self.assertEqual(net['network'][psec.PORTSECURITY], False)
def test_create_port_default_true(self):
with self.network() as net:
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_passing_true(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=True)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_on_port_security_false_network(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self._delete('ports', port['port']['id'])
def test_create_port_security_overrides_network_value(self):
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self._delete('ports', port['port']['id'])
def test_create_port_fails_with_secgroup_and_port_security_false(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
security_group = self.deserialize(
'json',
self._create_security_group(self.fmt, 'asdf', 'asdf'))
security_group_id = security_group['security_group']['id']
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',
'port_security_enabled'),
security_groups=[security_group_id],
port_security_enabled=False)
self.assertEqual(res.status_int, 400)
def test_create_port_with_default_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_create_port_with_security_group_and_net_sec_false(self):
# This tests that port_security_enabled is true when creating
# a port on a network that is marked as port_security_enabled=False
# that has a subnet and securiy_groups are passed it.
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
res = self._create_network('json', 'net1', True,
arg_list=('port_security_enabled',),
port_security_enabled=False)
net = self.deserialize('json', res)
self._create_subnet('json', net['network']['id'], '10.0.0.0/24')
security_group = self.deserialize(
'json', self._create_security_group(self.fmt, 'asdf', 'asdf'))
security_group_id = security_group['security_group']['id']
res = self._create_port('json', net['network']['id'],
arg_list=('security_groups',),
security_groups=[security_group_id])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(port['port']['security_groups'], [security_group_id])
self._delete('ports', port['port']['id'])
def test_update_port_security_off_with_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'])
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
update_port = {'port': {psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
res = req.get_response(self.api)
self.assertEqual(res.status_int, 409)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], False)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 0)
self._delete('ports', port['port']['id'])
def test_update_port_remove_port_security_security_group_read(self):
if self._skip_security_group:
self.skipTest("Plugin does not support security groups")
with self.network() as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=True)
port = self.deserialize('json', res)
self.assertEqual(port['port'][psec.PORTSECURITY], True)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
self.deserialize('json', req.get_response(self.api))
sg_id = port['port'][ext_sg.SECURITYGROUPS]
update_port = {'port': {ext_sg.SECURITYGROUPS: [sg_id[0]],
psec.PORTSECURITY: True}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
port = self.deserialize('json', req.get_response(self.api))
self.assertEqual(port['port'][psec.PORTSECURITY], True)
self.assertEqual(len(port['port'][ext_sg.SECURITYGROUPS]), 1)
self._delete('ports', port['port']['id'])
def test_create_port_security_off_shared_network(self):
with self.network(shared=True) as net:
with self.subnet(network=net):
res = self._create_port('json', net['network']['id'],
arg_list=('port_security_enabled',),
port_security_enabled=False,
tenant_id='not_network_owner',
set_context=True)
self.deserialize('json', res)
self.assertEqual(res.status_int, 403)
def test_update_port_security_off_shared_network(self):
with self.network(shared=True, do_delete=False) as net:
with self.subnet(network=net, do_delete=False):
res = self._create_port('json', net['network']['id'],
tenant_id='not_network_owner',
set_context=True)
port = self.deserialize('json', res)
# remove security group on port
update_port = {'port': {ext_sg.SECURITYGROUPS: None,
psec.PORTSECURITY: False}}
req = self.new_update_request('ports', update_port,
port['port']['id'])
req.environ['neutron.context'] = context.Context(
'', 'not_network_owner')
res = req.get_response(self.api)
# TODO(salvatore-orlando): Expected error is 404 because
# the current API controller always returns this error
# code for any policy check failures on update.
# It should be 404 when the caller cannot access the whole
# resource, and 403 when it cannot access a single attribute
self.assertEqual(res.status_int, 404)
|
py | b401675b558a4d7deb5a74f39674d66891749599 | from __future__ import absolute_import, division, print_function
import logging
import os
import time
logger = logging.getLogger(__name__)
LOG_LEVELS = {'CRITICAL': logging.CRITICAL, 'ERORR': logging.ERROR, 'WARNING': logging.WARNING, 'INFO': logging.INFO,
'DEBUG': logging.DEBUG}
PROPERTIES = {
'server': '',
'port': '5000',
'log_level': 'INFO',
'stop_timeout': '1'
}
class Config(object):
"""
This is the main configuration class
is thread safe 'cause you cannot write new values :)
"""
def __init__(self):
start = time.time()
self.properties = {}
for v in os.environ:
if v.startswith('GINDROP_'):
k = v.replace('GINDROP_', '').lower()
if k in PROPERTIES:
PROPERTIES[k] = os.environ[v]
else:
print("Unknown property: [{}]".format(k))
for p in PROPERTIES:
self.properties[p] = PROPERTIES[p]
logging.basicConfig(
format="%(asctime)s | %(process)5d |[%(threadName)10s] | %(levelname)9s | %(name)s:%(funcName)s() "
"| %(message)s",
level=LOG_LEVELS[self.log_level.upper()])
stop = time.time()
logger.info('configuration loaded in ' + str(stop - start) + "s")
def __iter__(self):
for p in self.properties:
yield p
def __str__(self):
return str(self.properties)
def __getitem__(self, item):
if item not in self.properties:
raise KeyError
return self.properties[item]
def __getattr__(self, item):
if item in self.properties:
return self.properties[item]
class Orchestrator(object):
__slots__ = ["_obj", "__weakref__"]
def __init__(self, obj):
object.__setattr__(self, "_obj", obj)
def __getattribute__(self, name):
return getattr(object.__getattribute__(self, "_obj"), name)
def __delattr__(self, name):
delattr(object.__getattribute__(self, "_obj"), name)
def __setattr__(self, name, value):
setattr(object.__getattribute__(self, "_obj"), name, value)
def __nonzero__(self):
return bool(object.__getattribute__(self, "_obj"))
def __str__(self):
return str(object.__getattribute__(self, "_obj"))
def __repr__(self):
return repr(object.__getattribute__(self, "_obj"))
#
# factories
#
_special_names = [
'__abs__', '__add__', '__and__', '__call__', '__cmp__', '__coerce__',
'__contains__', '__delitem__', '__delslice__', '__div__', '__divmod__',
'__eq__', '__float__', '__floordiv__', '__ge__', '__getitem__',
'__getslice__', '__gt__', '__hash__', '__hex__', '__iadd__', '__iand__',
'__idiv__', '__idivmod__', '__ifloordiv__', '__ilshift__', '__imod__',
'__imul__', '__int__', '__invert__', '__ior__', '__ipow__', '__irshift__',
'__isub__', '__iter__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__',
'__neg__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__',
'__rand__', '__rdiv__', '__rdivmod__', '__reduce__', '__reduce_ex__',
'__repr__', '__reversed__', '__rfloorfiv__', '__rlshift__', '__rmod__',
'__rmul__', '__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__',
'__rtruediv__', '__rxor__', '__setitem__', '__setslice__', '__sub__',
'__truediv__', '__xor__', 'next',
]
@classmethod
def _create_class_proxy(cls, theclass):
"""creates a proxy for the given class"""
def make_method(name):
def method(self, *args, **kw):
return getattr(object.__getattribute__(self, "_obj"), name)(*args, **kw)
return method
namespace = {}
for name in cls._special_names:
if hasattr(theclass, name):
namespace[name] = make_method(name)
return type("%s(%s)" % (cls.__name__, theclass.__name__), (cls,), namespace)
def __new__(cls, obj, *args, **kwargs):
"""
creates an proxy instance referencing `obj`. (obj, *args, **kwargs) are
passed to this class' __init__, so deriving classes can define an
__init__ method of their own.
note: _class_proxy_cache is unique per deriving class (each deriving
class must hold its own cache)
"""
try:
cache = cls.__dict__["_class_proxy_cache"]
except KeyError:
cls._class_proxy_cache = cache = {}
try:
theclass = cache[obj.__class__]
except KeyError:
cache[obj.__class__] = theclass = cls._create_class_proxy(obj.__class__)
ins = object.__new__(theclass)
theclass.__init__(ins, obj, *args, **kwargs)
return ins
|
py | b40167d2dda90aecc45caae5b66c50a23baaf498 | # Copyright 2016 Osvaldo Santana Neto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from datetime import date, datetime, timedelta
from decimal import Decimal
from typing import Dict, List, Optional, Tuple, Union, cast # noqa: F401
from PIL import Image
from .. import exceptions
from ..utils import get_resource_path, to_decimal
from .address import Address, ZipCode
from .data import (
EXTRA_SERVICE_VD_PAC,
EXTRA_SERVICE_VD_SEDEX,
FREIGHT_ERROR_FINAL_ZIPCODE_RESTRICTED,
FREIGHT_ERROR_INITIAL_AND_FINAL_ZIPCODE_RESTRICTED,
FREIGHT_ERROR_INITIAL_ZIPCODE_RESTRICTED,
INSURANCE_PERCENTUAL_COST,
INSURANCE_VALUE_THRESHOLD_PAC,
INSURANCE_VALUE_THRESHOLD_SEDEX,
SERVICE_PAC,
SERVICE_PAC_INDUSTRIAL,
SERVICE_SEDEX,
SERVICE_SEDEX_INDUSTRIAL,
TRACKING_EVENT_TYPES,
TRACKING_STATUS,
)
from .user import Contract # noqa: F401
from .user import ExtraService, PostingCard, Service
TRACKING_CODE_SIZE = 13
TRACKING_CODE_NUMBER_SIZE = 8
TRACKING_CODE_PREFIX_SIZE = 2
TRACKING_CODE_SUFFIX_SIZE = 2
IATA_COEFICIENT = 6.0
VOLUMETRIC_WEIGHT_THRESHOLD = 5000 # g
MIN_WIDTH, MAX_WIDTH = 11, 105 # cm
MIN_HEIGHT, MAX_HEIGHT = 2, 105 # cm
MIN_LENGTH, MAX_LENGTH = 16, 105 # cm
MIN_DIAMETER, MAX_DIAMETER = 5, 91 # cm
MIN_CYLINDER_LENGTH, MAX_CYLINDER_LENGTH = 18, 105 # cm
MIN_SIZE, MAX_SIZE = 29, 200 # cm
MIN_CYLINDER_SIZE, MAX_CYLINDER_SIZE = 28, 200 # cm
MAX_MECHANIZABLE_PACKAGE_SIZE = 70 # cm
NON_MECHANIZABLE_COST = Decimal("79.0")
INSURANCE_VALUE_THRESHOLDS = {
Service.get(SERVICE_PAC).code: INSURANCE_VALUE_THRESHOLD_PAC,
Service.get(SERVICE_PAC_INDUSTRIAL).code: INSURANCE_VALUE_THRESHOLD_PAC,
Service.get(SERVICE_SEDEX).code: INSURANCE_VALUE_THRESHOLD_SEDEX,
Service.get(SERVICE_SEDEX_INDUSTRIAL).code: INSURANCE_VALUE_THRESHOLD_SEDEX,
}
class EventStatus:
def __init__(self, event_type: str, status_code: Union[str, int]) -> None:
event_type = event_type.upper()
status_code = int(status_code)
event_status_data = self._get_event_status_data(event_type, status_code)
self.type = event_type
self.status = status_code
self.category = event_status_data[0]
self.description = event_status_data[1]
self.detail = event_status_data[2]
self.action = event_status_data[3]
def _get_event_status_data(self, event_type, status_code):
if event_type not in TRACKING_EVENT_TYPES:
raise exceptions.InvalidEventStatusError("{} is not valid".format(event_type))
try:
return TRACKING_STATUS[event_type, status_code]
except KeyError:
raise exceptions.InvalidEventStatusError("{}/{} is not valid".format(event_type, status_code))
@property
def display_event_type(self):
return TRACKING_EVENT_TYPES[self.type]
def __str__(self):
return "({}, {})".format(self.type, self.status)
def __repr__(self):
return "<EventStatus({!r}, {!r})>".format(self.type, self.status)
class ErrorEventStatus(EventStatus):
def __init__(self):
super().__init__("ERROR", 0)
class TrackingEvent:
timestamp_format = "%d/%m/%Y %H:%M"
def __init__(
self,
timestamp: datetime,
status: Union[Tuple[str, Union[str, int]], EventStatus],
location_zip_code: Union[str, ZipCode] = "",
location: str = "",
receiver: str = "",
city: str = "",
state: str = "",
document: str = "",
comment: str = "",
description: str = "",
details: str = "",
) -> None:
self.timestamp = timestamp
self.location = location
self.receiver = receiver
self.city = city
self.state = state
self.document = document
self.comment = comment
self.description = description
self.details = details
if location_zip_code:
location_zip_code = ZipCode.create(location_zip_code)
self.location_zip_code = location_zip_code
if isinstance(status, tuple):
status = EventStatus(*status)
self.status = status
def __str__(self):
return "{} - {} - {}/{}".format(self.description, self.location, self.city, self.state)
def __repr__(self):
timestamp = self.timestamp.strftime(self.timestamp_format)
return "<TrackingEvent({!s}, {!s})>".format(self.status, timestamp)
class NotFoundTrackingEvent(TrackingEvent):
def __init__(self, timestamp: datetime, comment) -> None:
super().__init__(timestamp=timestamp, status=ErrorEventStatus(), comment=comment)
class TrackingCode:
def __init__(self, code: str) -> None:
self.prefix = code[:2].upper()
self.number = "".join(d for d in code[2:10] if d.isdigit())
self.suffix = code[-2:].upper()
self._digit = None
if len(code) == TRACKING_CODE_SIZE and code[10:11] != " ":
self._digit = int(code[10:11])
self._validate()
# filled by tracking service
self.category = None # type: Optional[str]
self.name = None # type: Optional[str]
self.initials = None # type: Optional[str]
self.events = [] # type: List[TrackingEvent]
def _validate(self):
if len(self.prefix) != TRACKING_CODE_PREFIX_SIZE or not self.prefix.isalpha():
raise exceptions.InvalidTrackingCodeError("Invalid tracking code prefix {}".format(self.prefix))
if len(self.suffix) != TRACKING_CODE_SUFFIX_SIZE or not self.suffix.isalpha():
raise exceptions.InvalidTrackingCodeError("Invalid tracking code suffix {}".format(self.suffix))
if len(self.number) != TRACKING_CODE_NUMBER_SIZE or not self.number.isnumeric():
raise exceptions.InvalidTrackingCodeError("Invalid tracking code number {}".format(self.number))
if self._digit is not None and self._digit != self.calculate_digit(self.number):
raise exceptions.InvalidTrackingCodeError(
"Invalid tracking code number {} or digit {} (must be {})".format(
self.number, self._digit, self.calculate_digit(self.number)
)
)
@classmethod
def create(cls, tracking_code: Union[str, "TrackingCode"]):
if isinstance(tracking_code, cls):
return tracking_code
tracking_code = cast(str, tracking_code)
return cls(tracking_code)
@classmethod
def calculate_digit(cls, number: str) -> int:
numbers = [int(c) for c in number if c.isdigit()]
multipliers = [8, 6, 4, 2, 3, 5, 9, 7]
mod = sum(multipliers[i] * digit for i, digit in enumerate(numbers)) % 11
if not mod:
return 5
if mod == 1:
return 0
return 11 - mod
@classmethod
def create_range(cls, start: Union[str, "TrackingCode"], end: Union[str, "TrackingCode"]):
if not isinstance(start, TrackingCode):
start = TrackingCode(start)
if not isinstance(end, TrackingCode):
end = TrackingCode(end)
if start.prefix != end.prefix:
raise exceptions.InvalidTrackingCodeError(
"Different tracking code prefixes: {} != {}".format(start.prefix, end.prefix)
)
if start.suffix != end.suffix:
raise exceptions.InvalidTrackingCodeError(
"Different tracking code suffixes: {} != {}".format(start.suffix, end.suffix)
)
start_number = int(start.number)
end_number = int(end.number)
if start_number > end_number:
raise exceptions.InvalidTrackingCodeError("Invalid range numbers: {} > {}".format(start_number, end_number))
code_range = range(int(start.number), int(end.number) + 1)
return [TrackingCode(start.prefix + "{:08}".format(n) + start.suffix) for n in code_range]
@property
def digit(self):
if self._digit is None:
self._digit = self.calculate_digit(self.number)
return self._digit
@property
def code(self):
return self.prefix + self.number + str(self.digit) + self.suffix
@property
def nodigit(self):
return "{}{} {}".format(self.prefix, self.number, self.suffix)
@property
def short(self):
return "{}{}{}".format(self.prefix, self.number, self.suffix)
@property
def splitted(self):
code = self.code
return "{!s} {!s} {!s} {!s} {!s}".format(code[:2], code[2:5], code[5:8], code[8:11], code[11:])
def add_event(self, event: TrackingEvent):
self.events.append(event)
def __str__(self):
return self.code
def __repr__(self):
return "<TrackingCode code={!r}>".format(self.code)
class Package:
TYPE_ENVELOPE = 1 # type: int
TYPE_BOX = 2 # type: int
TYPE_CYLINDER = 3 # type: int
freight_package_types = {TYPE_BOX: 1, TYPE_CYLINDER: 2, TYPE_ENVELOPE: 3} # type: Dict[int, int]
def __init__(
self,
package_type: int = TYPE_BOX,
width: Union[float, int] = 0, # cm
height: Union[float, int] = 0, # cm
length: Union[float, int] = 0, # cm
diameter: Union[float, int] = 0, # cm
weight: Union[float, int] = 0, # g
sequence=(1, 1),
service: Optional[Union[Service, str, int]] = None,
) -> None:
if service:
service = Service.get(service)
Package.validate(package_type, width, height, length, diameter, service, weight)
if len(sequence) != 2 or sequence[0] > sequence[1]:
raise exceptions.InvalidPackageSequenceError("Package must be a tuple with 2 elements: (number, total)")
self.package_type = package_type
self.real_width = width # cm
self.real_height = height # cm
self.real_length = length # cm
self.real_diameter = diameter # cm
self.real_weight = weight # g
self.sequence = sequence
self.service = service
@property
def width(self) -> int:
return max(MIN_WIDTH, int(math.ceil(self.real_width)))
@width.setter
def width(self, width):
Package._validate_dimension("width", width, MAX_WIDTH)
self.real_width = width
@property
def height(self) -> int:
return max(MIN_HEIGHT, int(math.ceil(self.real_height)))
@height.setter
def height(self, height):
Package._validate_dimension("height", height, MAX_HEIGHT)
self.real_height = height
@property
def length(self) -> int:
return max(MIN_LENGTH, int(math.ceil(self.real_length)))
@length.setter
def length(self, length):
Package._validate_dimension("length", length, MAX_LENGTH)
self.real_length = length
@property
def diameter(self) -> int:
if self.package_type != Package.TYPE_CYLINDER:
return 0
return max(MIN_DIAMETER, int(math.ceil(self.real_diameter)))
@diameter.setter
def diameter(self, diameter):
Package._validate_dimension("diameter", diameter, MAX_DIAMETER)
self.real_diameter = diameter
@property
def weight(self) -> int:
return int(math.ceil(self.real_weight))
@weight.setter
def weight(self, weight):
Package._validate_weight(weight, self.service)
self.real_weight = weight
@property
def volumetric_weight(self) -> int:
return Package.calculate_volumetric_weight(self.width, self.height, self.length)
@property
def posting_list_volumetric_weight(self) -> Decimal:
return Decimal("0.00")
@property
def posting_weight(self) -> int:
return Package.calculate_posting_weight(self.weight, self.volumetric_weight)
@property
def freight_package_type(self) -> int:
"""
SIGEP API and FreightResponse API different codes to identify package types:
SIGEP | Freight | Type
------+---------+----------
1 | 3 | Envelope
2 | 1 | Box
3 | 2 | Cylinder
"""
return self.freight_package_types[self.package_type]
@property
def is_mechanizable(self) -> bool:
if self.package_type == Package.TYPE_CYLINDER:
return False
return max(self.width, self.height, self.length) <= MAX_MECHANIZABLE_PACKAGE_SIZE
@property
def non_mechanizable_cost(self):
return Decimal("0.0") if self.is_mechanizable else NON_MECHANIZABLE_COST
@classmethod
def calculate_volumetric_weight(cls, width, height, length) -> int:
return int(math.ceil((width * height * length) / IATA_COEFICIENT))
@classmethod
def calculate_posting_weight(cls, weight, volumetric_weight) -> int:
if volumetric_weight <= VOLUMETRIC_WEIGHT_THRESHOLD:
return weight
return int(math.ceil(max(volumetric_weight, weight)))
@classmethod
def calculate_insurance(
cls, per_unit_value: Union[int, float, Decimal], service: Union[Service, int, str], quantity: int = 1
) -> Decimal:
value = Decimal("0.00")
per_unit_value = Decimal(per_unit_value)
service_code = Service.get(service).code
insurance_value_threshold = INSURANCE_VALUE_THRESHOLDS.get(service_code, per_unit_value)
if per_unit_value > insurance_value_threshold:
value = (per_unit_value - insurance_value_threshold) * INSURANCE_PERCENTUAL_COST
return to_decimal(value * quantity)
@classmethod
def validate(
cls,
package_type: int,
width: Union[float, int] = 0,
height: Union[float, int] = 0,
length: Union[float, int] = 0,
diameter: Union[float, int] = 0,
service: Optional[Union[Service, str, int]] = None,
weight: Union[float, int] = 0,
) -> None:
width = int(math.ceil(width))
height = int(math.ceil(height))
length = int(math.ceil(length))
diameter = int(math.ceil(diameter))
weight = int(math.ceil(weight))
if service:
service = Service.get(service)
Package._validate_weight(weight, service)
if package_type == Package.TYPE_ENVELOPE:
if any([width, height, length, diameter]):
raise exceptions.InvalidPackageDimensionsError(
"Invalid dimensions: {}x{}x{}".format(width, height, length)
)
return
if package_type == Package.TYPE_BOX:
if diameter:
raise exceptions.InvalidPackageDimensionsError("Package does not use diameter: {}".format(diameter))
cls._validate_dimension("width", width, MAX_WIDTH)
cls._validate_dimension("height", height, MAX_HEIGHT)
cls._validate_dimension("length", length, MAX_LENGTH)
cls._validate_dimension("sum of dimensions", width + height + length, MAX_SIZE)
return
# Volume.TYPE_CYLINDER
if width or height:
raise exceptions.InvalidPackageDimensionsError(
"Cylinder does not use width/height: {}x{}".format(width, height)
)
cls._validate_dimension("length", length, MAX_CYLINDER_LENGTH)
cls._validate_dimension("diameter", diameter, MAX_DIAMETER)
cls._validate_dimension("cylinder size", length + 2 * diameter, MAX_CYLINDER_SIZE)
@classmethod
def _validate_dimension(cls, name, value, maximum):
msg = "Invalid {} (range 0~{}): {}".format(name, maximum, value)
if value <= 0:
raise exceptions.InvalidMinPackageDimensionsError(msg)
if value > maximum:
raise exceptions.InvalidMaxPackageDimensionsError(msg)
@classmethod
def _validate_weight(cls, weight, service: Optional[Union[Service, str, int]] = None) -> None:
if weight <= 0:
raise exceptions.InvalidMinPackageWeightError("Invalid weight {!r}g".format(weight))
if not service:
return
service = Service.get(service)
if service.max_weight is None:
return
if weight > service.max_weight:
message = "Max weight exceeded for service {!r}: {!r}g (max. {!r}g)".format(
weight, str(service), service.max_weight
)
raise exceptions.InvalidMaxPackageWeightError(message)
class Receipt:
STATUS_UNPROCESSED = 0
STATUS_PROCESSED = 1
def __init__(self, number: Union[int, str], post_date: Union[str, date], value: Union[str, Decimal]) -> None:
self.number = int(number)
self.real_post_date = post_date
if not isinstance(post_date, date):
post_date = datetime.strptime(post_date, "%Y%m%d").date()
self.post_date = post_date
self.real_value = value
if not isinstance(value, Decimal):
value = to_decimal(value)
self.value = value
def __eq__(self, other):
return all(
[
isinstance(other, Receipt),
self.number == other.number,
self.post_date == other.post_date,
self.value == other.value,
]
)
def __repr__(self):
return (
"<Receipt("
"number={number}, "
"post_date={post_date}, "
"value={value}"
")>".format(number=self.number, post_date=self.post_date, value=self.value)
)
class ShippingLabel:
variable_data_identifier = 51 # Variable data identifier for package
invoice_template = "{!s}"
contract_number_template = "{!s}"
order_template = "{!s}"
service_name_template = "{!s}"
package_template = "{!s}/{!s}"
weight_template = "{!s}g"
receipt_template = (
"Recebedor: ___________________________________________<br/>"
"Assinatura: __________________ Documento: _______________"
)
sender_header = "DESTINATÁRIO"
carrier_logo = str(get_resource_path("carrier_logo_bw.png"))
receiver_data_template = (
"{receiver.label_name!s:>.50}<br/>"
"{receiver.label_address!s:>.95}<br/>"
"<b>{receiver.zip_code_display}</b> {receiver.city}/{receiver.state}"
)
sender_data_template = (
"<b>Remetente:</b> {sender.label_name!s:>.40}<br/>"
"{sender.label_address!s:>.95}<br/>"
"<b>{sender.zip_code_display}</b> {sender.city}-{sender.state}"
)
def __init__(
self,
posting_card: PostingCard,
sender: Address,
receiver: Address,
service: Union[Service, int],
tracking_code: Union[TrackingCode, str],
package: Package,
extra_services: Optional[List[Union[ExtraService, int]]] = None,
logo: Optional[Union[str, Image.Image]] = None,
order: Optional[str] = "",
invoice_number: Optional[str] = "",
invoice_series: Optional[str] = "",
invoice_type: Optional[str] = "",
value: Optional[Decimal] = Decimal("0.00"),
billing: Optional[Decimal] = Decimal("0.00"),
text: Optional[str] = "",
latitude: Optional[float] = 0.0,
longitude: Optional[float] = 0.0,
receipt: Optional[Receipt] = None,
) -> None:
if sender == receiver:
raise exceptions.InvalidAddressesError("Sender and receiver cannot be the same")
if logo is None:
logo = str(get_resource_path("default_logo.png"))
if isinstance(logo, str):
logo = Image.open(logo)
self.posting_card = posting_card
self.sender = sender
self.receiver = receiver
self.service = Service.get(service)
self.tracking_code = TrackingCode.create(tracking_code)
self.package = package
self.logo = logo
self.order = order
self.invoice_number = invoice_number
self.invoice_series = invoice_series
self.invoice_type = invoice_type
self.real_value = value
self.billing = billing
self.text = text
self.latitude = latitude
self.longitude = longitude
self.carrier_logo = Image.open(self.carrier_logo)
self.extra_services = self.service.default_extra_services[:]
if extra_services:
self.add_extra_services(extra_services)
self.posting_list = None # type: Optional[PostingList]
self.posting_list_group = 0
self.receipt = receipt
def __repr__(self):
return "<ShippingLabel tracking={!r}>".format(str(self.tracking_code))
def add_extra_services(self, extra_services: List[Union["ExtraService", int]]):
for extra_service in extra_services:
self.add_extra_service(extra_service)
def add_extra_service(self, extra_service: Union["ExtraService", int]):
extra_service = ExtraService.get(extra_service)
if extra_service.is_declared_value():
self.service.validate_declared_value(self.value)
self.extra_services.append(extra_service)
@property
def posted(self) -> bool:
return self.receipt is not None
@property
def value(self) -> Decimal:
return max(self.service.min_declared_value, self.real_value) # type: ignore
@property
def symbol(self):
return self.service.symbol_image
@property
def contract(self):
return self.posting_card.contract
@property
def posting_weight(self):
return self.package.posting_weight
def has_declared_value(self):
return any([ExtraService.get(EXTRA_SERVICE_VD_PAC) in self, ExtraService.get(EXTRA_SERVICE_VD_SEDEX) in self])
def get_order(self):
return self.order_template.format(self.order)
def get_invoice(self):
return self.invoice_template.format(self.invoice_number)
def get_contract_number(self):
return self.contract_number_template.format(self.posting_card.get_contract_number())
def get_service_name(self):
return self.service_name_template.format(self.service.display_name)
def get_package_sequence(self):
return self.package_template.format(*self.package.sequence)
def get_weight(self):
return self.weight_template.format(self.package.weight)
def get_symbol_filename(self, extension="gif"):
return self.service.get_symbol_filename(extension)
def get_tracking_code(self):
return self.tracking_code.splitted
def get_receiver_data(self):
return self.receiver_data_template.format(receiver=self.receiver)
def get_sender_data(self):
return self.sender_data_template.format(sender=self.sender)
def _get_extra_service_info(self) -> str:
extra_services_numbers = ["00" for _ in range(6)]
for i, extra_service in enumerate(self.extra_services[:6]):
extra_services_numbers[i] = "{:02d}".format(extra_service.number)
return "".join(extra_services_numbers)
def get_datamatrix_info(self):
receiver_number = self.receiver.number
if receiver_number.isnumeric():
receiver_number = receiver_number.rjust(5, "0")
else:
receiver_number = receiver_number.rjust(5)
parts = [
"{!s:>08}".format(self.receiver.zip_code),
"{}".format(self.receiver.zip_complement.rjust(5, "0")),
"{!s:>08}".format(self.sender.zip_code),
"{}".format(self.sender.zip_complement.rjust(5, "0")),
"{!s:>01}".format(self.receiver.zip_code.digit),
"{!s:>02}".format(self.variable_data_identifier),
"{!s:>13}".format(self.tracking_code),
"{!s:>12}".format(self._get_extra_service_info()),
"{!s:>010}".format(self.posting_card.number),
"{!s:>05}".format(self.service.code),
"{!s:>02}".format(self.posting_list_group),
"{}".format(receiver_number),
"{!s:<20}".format(self.receiver.complement_safe_display[:20].rjust(20, "0")),
"{!s:>05}".format(0 if self.value is None else int(self.value * 100)),
"{}".format(str(self.receiver.phone)[:12].rjust(12, "0") or "0" * 12),
"{:+010.6f}".format(self.latitude),
"{:+010.6f}".format(self.longitude),
"|",
"{!s:<30}".format(self.text[:30]),
]
return "".join(parts)
def __contains__(self, extra_service: ExtraService):
return extra_service in self.extra_services
class PostingList:
def __init__(self, custom_id: int, logo: Optional[Union[str, Image.Image]] = None) -> None:
# will be filled by close_posting_list
self.number = None # type: Optional[int]
if logo is None:
logo = str(get_resource_path("carrier_logo.png"))
if isinstance(logo, str):
logo = Image.open(logo)
self.logo = logo
self.custom_id = custom_id
self.shipping_labels = {} # type: Dict[str, ShippingLabel]
# filled by the first shipping label
self.initial_shipping_label = None # type: Optional[ShippingLabel]
self.posting_card = None # type: Optional[PostingCard]
self.contract = None # type: Optional[Contract]
self.sender = None # type: Optional[Address]
def add_shipping_label(self, shipping_label: ShippingLabel):
if not self.initial_shipping_label:
self.initial_shipping_label = shipping_label
self.posting_card = shipping_label.posting_card
self.contract = shipping_label.contract
self.sender = shipping_label.sender
if shipping_label.tracking_code.short in self.shipping_labels:
raise exceptions.PostingListError("Shipping label {!r} already in posting list".format(shipping_label))
if shipping_label.posting_card != self.posting_card:
raise exceptions.PostingListError(
"Invalid posting card: {} != {}".format(shipping_label.posting_card, self.posting_card)
)
self.shipping_labels[shipping_label.tracking_code.short] = shipping_label
shipping_label.posting_list = self
def get_tracking_codes(self):
return list(self.shipping_labels.keys())
def close_with_id(self, number: int):
self.number = number
@property
def closed(self):
return self.number is not None
class PostalUnit:
def __init__(self, code: str, description: str) -> None:
self.code = code
self.description = description
class PostInfo:
def __init__(self, postal_unit: PostalUnit, posting_list: PostingList, value: Union[Decimal, float, str]) -> None:
self.postal_unit = postal_unit
self.posting_list = posting_list
self.real_value = value
if not isinstance(value, Decimal):
value = to_decimal(value)
self.value = value
def __repr__(self):
return (
"<PostInfo("
"postal_unit={self.postal_unit}, "
"posting_list={self.posting_list}, "
"value={self.value}"
")>".format(self=self)
)
class FreightResponse:
restricted_address_error_code = (
FREIGHT_ERROR_INITIAL_ZIPCODE_RESTRICTED,
FREIGHT_ERROR_FINAL_ZIPCODE_RESTRICTED,
FREIGHT_ERROR_INITIAL_AND_FINAL_ZIPCODE_RESTRICTED,
)
def __init__(
self,
service: Union[Service, int],
delivery_time: Union[int, timedelta],
value: Union[Decimal, float, int, str],
declared_value: Union[Decimal, float, int, str] = 0.00,
mp_value: Union[Decimal, float, int, str] = 0.00,
ar_value: Union[Decimal, float, int, str] = 0.00,
saturday: bool = False,
home: bool = False,
error_code: int = 0,
error_message: str = "",
) -> None:
self.service = Service.get(service)
if not isinstance(delivery_time, timedelta):
delivery_time = timedelta(days=delivery_time)
self.delivery_time = delivery_time
if not isinstance(value, Decimal):
value = to_decimal(value)
self.value = value
if not isinstance(declared_value, Decimal):
declared_value = to_decimal(declared_value)
self.declared_value = declared_value
if not isinstance(mp_value, Decimal):
mp_value = to_decimal(mp_value)
self.mp_value = mp_value
if not isinstance(ar_value, Decimal):
ar_value = to_decimal(ar_value)
self.ar_value = ar_value
if not isinstance(saturday, bool):
saturday = saturday == "S"
self.saturday = saturday
if not isinstance(home, bool):
home = home == "S"
self.home = home
self.error_code = error_code
self.error_message = error_message
@property
def total(self) -> Decimal:
return self.value + self.declared_value + self.ar_value + self.mp_value
def is_error(self):
return self.error_code != 0
def is_restricted_address(self):
return self.error_code in self.restricted_address_error_code
|
py | b4016940b8623f8478f6a9bea06f6acae03e093d | import sqlite3
import time
import datetime
import logging
logging.basicConfig(level=logging.INFO)
def get_logger():
""" Get named logger """
return logging.getLogger(__name__)
def get_db_name():
return 'recv_db.db'
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def connect():
conn = sqlite3.connect(get_db_name())
conn.row_factory = dict_factory
return conn.cursor(), conn
def close(conn):
conn.commit()
conn.close()
def create_dbs():
c, conn = connect()
c.execute('''CREATE TABLE rec_account (
rec_acc text,
pool_account_id text,
user_data text,
create_root_acc text,
acc_idx int,
create_wallet_id text,
created_time int,
status text,
updated_time int
)''')
get_logger().info("DB table rec_account created")
close(conn)
def upgrade1():
# add columns create_root_acc, acc_idx. Copy the table for proper column order
c, conn = connect()
get_logger().info("Upgrading table rec_account")
c.execute("ALTER TABLE rec_account RENAME TO rec_account_old;")
c.execute("CREATE TABLE rec_account (rec_acc text, pool_account_id text, user_data text, create_root_acc text, acc_idx int, create_wallet_id text, created_time int, status text, updated_time int);")
c.execute("INSERT INTO rec_account SELECT rec_acc, pool_account_id, user_data, '', -1, create_wallet_id, created_time, status, updated_time FROM rec_account_old ORDER BY created_time ASC;")
c.execute("DROP TABLE rec_account_old;")
get_logger().info("Upgrade complete")
close(conn)
def add_new_rec_account(rec_acc, pool_account_id, user_data, root_acc, acc_idx, wallet_id):
c, conn = connect()
now = str(int(time.time()))
c.execute("INSERT INTO rec_account VALUES ('" +
str(rec_acc) + "', '" +
str(pool_account_id) + "', '" +
str(user_data) + "', '" +
str(root_acc) + "', '" +
str(acc_idx) + "', '" +
str(wallet_id) + "', '" +
now + "', 'ACTIVE', '" +
now + "');")
get_logger().info("Inserted into table rec_account, " + str(rec_acc))
close(conn)
def get_all_accounts():
c, conn = connect()
c.execute("SELECT * FROM rec_account;")
ret = c.fetchall()
close(conn)
return ret
def get_account(account):
c, conn = connect()
c.execute("SELECT * FROM rec_account WHERE rec_acc='" + str(account) + "';")
ret = c.fetchall()
close(conn)
return ret
|
py | b4016a46b46a6ac56042e020ba3cce2efb2a73d3 | # File Name : BBS_Rand.py
# Description : BBS random generator
# Author : Ganyuan Cao
import random
# check if p is blum prime
def blumprime(p):
if p % 4 == 3:
return 1
else:
return 0
# determine prime p,q
# To have larger prime, adjust numBits
def findPrime(numBits=8):
candidate = 1
# check if candidate is a blum prime
flag = blumprime(candidate)
# Iterative to find a blum prime with specific digits
while flag != 1:
candidate = random.getrandbits(numBits)
flag = blumprime(candidate)
return candidate
## parity of x_i
def parity(x):
if x % 2 == 0:
return 0
else:
return 1
# define the algorithm
def bbsAlgorithm(limit):
# n = pq where p,q are primes
p = findPrime()
q = findPrime()
n = p * q
print "n = pq =", p, "*", q, "=", n
# choose a seed s between (1, n-1)
s = random.randrange(1, n-1)
print "The random seed s =", s
# initialize the sequence with z_0 which is the parity of x_0
x_0 = s * s % n
z_0 = parity(x_0)
print "i = 0", ", x_i =", x_0, ", z_i =", z_0
# initialize the resulting sequence
rlt_seq = [z_0]
# Initialize the result, then keep adding z_i * 2^i to result
result = z_0
tmp = x_0
# begin iterating to obtain the sequence z_1, z_2, z_3 ....
for i in range(1, limit):
x_i = tmp * tmp % n
z_i = parity(x_i)
print "i =", i, ", x_i =", x_i, ", z_i =", z_i
tmp = x_i
expo = 2 ** i
result = result + z_i * expo
tmp_seq = [z_i]
rlt_seq = rlt_seq + tmp_seq
rlt_seq.reverse()
print "The resulting sequence is: ", rlt_seq
return result
def main():
print "This program illustrates the algorithm of Blum Blum Shub pseudorandom number generator"
print "--------------------------------------------------------------------------------------"
print "This program will prompt you to enter a round number i.e., the digit of the binary sequence to be generated"
print "------------------------------------------------------------------------------------------------------"
lim = input("Enter the round number: ")
num = bbsAlgorithm(lim)
print "Generated random integer is:", num
if __name__ == "__main__":
main()
|
py | b4016b14ebcaddb0bf448d41ad1ec5b29a0af973 | import os.path
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
def ABS_PATH(*path):
return os.path.abspath(os.path.join(ROOT_DIR, *path))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Senko Rasic', '[email protected]'),
)
MANAGERS = ADMINS
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='sqlite://:memory:')
}
ALLOWED_HOSTS = []
TIME_ZONE = 'Europe/Zagreb'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = False
USE_L10N = False
USE_TZ = False
MEDIA_ROOT = ABS_PATH('media')
MEDIA_URL = '/media/'
STATIC_ROOT = ABS_PATH('static')
STATIC_URL = '/static/'
STATICFILES_DIRS = (
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'o9o1br26s7bzr*^o56ck=h=89zeo$yv3i4b7)y0&=d_%xl#@nc'
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'email_checker.urls'
WSGI_APPLICATION = 'email_checker.wsgi.application'
TEMPLATE_DIRS = (
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'checker'
)
SESSION_SERIALIZER = 'django.contrib.sessions.serializers.JSONSerializer'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
py | b4016b663ee8143ccdd58886d71d2a179962466c | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
"""A utility module with a factory of standard QueryBuilder instances for Calculation nodes."""
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from aiida.common.lang import classproperty
from aiida.cmdline.utils.query.mapping import CalculationProjectionMapper
class CalculationQueryBuilder(object): # pylint: disable=useless-object-inheritance
"""Utility class to construct a QueryBuilder instance for Calculation nodes and project the query set."""
# This tuple serves to mark compound projections that cannot explicitly be projected in the QueryBuilder, but will
# have to be manually projected from composing its individual projection constituents
_compound_projections = ('state',)
_default_projections = ('pk', 'ctime', 'process_label', 'state', 'process_status')
_valid_projections = ('pk', 'uuid', 'ctime', 'mtime', 'state', 'process_state', 'process_status', 'exit_status',
'sealed', 'process_label', 'label', 'description', 'node_type', 'paused', 'process_type',
'job_state', 'scheduler_state')
def __init__(self, mapper=None):
if mapper is None:
self._mapper = CalculationProjectionMapper(self._valid_projections)
else:
self._mapper = mapper
@property
def mapper(self):
return self._mapper
@classproperty
def default_projections(self):
return self._default_projections
@classproperty
def valid_projections(self):
return self._valid_projections
def get_filters(self,
all_entries=False,
process_state=None,
process_label=None,
exit_status=None,
failed=False,
node_types=None):
"""
Return a set of QueryBuilder filters based on typical command line options.
:param node_types: a tuple of node classes to filter for (must be sub classes of Calculation)
:param all_entries: boolean to negate filtering for process state
:param process_state: filter for this process state attribute
:param process_label: filter for this process label attribute
:param exit_status: filter for this exit status
:param failed: boolean to filter only failed processes
:return: dictionary of filters suitable for a QueryBuilder.append() call
"""
# pylint: disable=too-many-arguments
from aiida.engine import ProcessState
exit_status_attribute = self.mapper.get_attribute('exit_status')
process_label_attribute = self.mapper.get_attribute('process_label')
process_state_attribute = self.mapper.get_attribute('process_state')
filters = {}
if node_types is not None:
filters['or'] = []
for node_class in node_types:
filters['or'].append({'type': node_class.class_node_type})
if process_state and not all_entries:
filters[process_state_attribute] = {'in': process_state}
if process_label is not None:
filters[process_label_attribute] = process_label
if failed:
filters[process_state_attribute] = {'==': ProcessState.FINISHED.value}
filters[exit_status_attribute] = {'>': 0}
if exit_status is not None:
filters[process_state_attribute] = {'==': ProcessState.FINISHED.value}
filters[exit_status_attribute] = {'==': exit_status}
return filters
def get_query_set(self, relationships=None, filters=None, order_by=None, past_days=None, limit=None):
"""
Return the query set of calculations for the given filters and query parameters
:param relationships: a mapping of relationships to join on, e.g. {'with_node': Group} to join on a Group. The
keys in this dictionary should be the keyword used in the `append` method of the `QueryBuilder` to join the
entity on that is defined as the value.
:param filters: rules to filter query results with
:param order_by: order the query set by this criterion
:param past_days: only include entries from the last past days
:param limit: limit the query set to this number of entries
:return: the query set, a list of dictionaries
"""
import datetime
from aiida import orm
from aiida.common import timezone
# Define the list of projections for the QueryBuilder, which are all valid minus the compound projections
projected_attributes = [
self.mapper.get_attribute(projection)
for projection in self._valid_projections
if projection not in self._compound_projections
]
if filters is None:
filters = {}
if past_days is not None:
filters['ctime'] = {'>': timezone.now() - datetime.timedelta(days=past_days)}
builder = orm.QueryBuilder()
builder.append(cls=orm.ProcessNode, filters=filters, project=projected_attributes, tag='process')
if relationships is not None:
for tag, entity in relationships.items():
builder.append(cls=type(entity), filters={'id': entity.id}, **{tag: 'process'})
if order_by is not None:
builder.order_by({'process': order_by})
else:
builder.order_by({'process': {'ctime': 'asc'}})
if limit is not None:
builder.limit(limit)
return builder.iterdict()
def get_projected(self, query_set, projections):
"""
Project the query set for the given set of projections
"""
header = [self.mapper.get_label(projection) for projection in projections]
result = [header]
for query_result in query_set:
result_row = [self.mapper.format(projection, query_result['process']) for projection in projections]
result.append(result_row)
return result
|
py | b4016fe03efbebf91b9d93d09f2813003602c2bc | import pygame
import pygame.gfxdraw
import random
import enum
from copy import deepcopy
# Graphical size settings
SQUARE_SIZE = 100
DISC_SIZE_RATIO = 0.8
# Colours
BLUE_COLOR = (23, 93, 222)
YELLOW_COLOR = (255, 240, 0)
RED_COLOR = (255, 0, 0)
BACKGROUND_COLOR = (19, 72, 162)
BLACK_COLOR = (0, 0, 0)
WHITE_COLOR = (255, 255, 255)
class Event(enum.Enum):
PIECE_PLACED = 1
GAME_WON = 2
GAME_RESET = 3
class Observer:
def __init__(self):
pass
def update(self, obj, event, *argv):
pass
class Observable:
def __init__(self):
self._observers = []
def notify(self, event, *argv):
for obs in self._observers:
obs.update(self, event, *argv)
def add_observer(self, obs):
self._observers.append(obs)
def remove_observer(self, obs):
if obs in self._observers:
self._observers.remove(obs)
class Connect4Game(Observable):
def __init__(self, rows=6, cols=7):
super().__init__()
self._rows = rows
self._cols = cols
self._board = None
self._turn = None
self._won = None
self.reset_game()
def reset_game(self):
"""
Resets the game state (board and variables)
"""
self._board = [[0 for _ in range(self._rows)] for _ in range(self._cols)]
self._turn = random.randint(1, 2)
self._won = None
self.notify(Event.GAME_RESET)
def place(self, c):
"""
Tries to place the playing colour on the cth column
:param c: column to place on
:return: position of placed colour or None if not placeable
"""
for r in range(self._rows):
if self._board[c][r] == 0:
self._board[c][r] = self._turn
if self._turn == 1:
self._turn = 2
else:
self._turn = 1
self.notify(Event.PIECE_PLACED, (c, r))
self.check_win((c, r))
return c, r
return None
def check_win(self, pos):
"""
Checks for win/draw from newly added disc
:param pos: position from which to check the win
:return: player number if a win occurs, 0 if a draw occurs, None otherwise
"""
c = pos[0]
r = pos[1]
player = self._board[c][r]
min_col = max(c-3, 0)
max_col = min(c+3, self._cols-1)
min_row = max(r - 3, 0)
max_row = min(r + 3, self._rows - 1)
# Horizontal check
count = 0
for ci in range(min_col, max_col + 1):
if self._board[ci][r] == player:
count += 1
else:
count = 0
if count == 4:
self._won = player
self.notify(Event.GAME_WON, self._won)
return self._won
# Vertical check
count = 0
for ri in range(min_row, max_row + 1):
if self._board[c][ri] == player:
count += 1
else:
count = 0
if count == 4:
self._won = player
self.notify(Event.GAME_WON, self._won)
return self._won
count1 = 0
count2 = 0
# Diagonal check
for i in range(-3, 4):
# bottom-left -> top-right
if 0 <= c + i < self._cols and 0 <= r + i < self._rows:
if self._board[c + i][r + i] == player:
count1 += 1
else:
count1 = 0
if count1 == 4:
self._won = player
self.notify(Event.GAME_WON, self._won)
return self._won
# bottom-right -> top-let
if 0 <= c + i < self._cols and 0 <= r - i < self._rows:
if self._board[c + i][r - i] == player:
count2 += 1
else:
count2 = 0
if count2 == 4:
self._won = player
self.notify(Event.GAME_WON, self._won)
return self._won
# Draw check
if sum([x.count(0) for x in self._board]) == 0:
self._won = 0
self.notify(Event.GAME_WON, self._won)
return self._won
self._won = None
return self._won
def get_cols(self):
"""
:return: The number of columns of the game
"""
return self._cols
def get_rows(self):
"""
:return: The number of rows of the game
"""
return self._rows
def get_win(self):
"""
:return: If one play won or not
"""
return self._won
def get_turn(self):
"""
:return: To which player is the turn
"""
return self._turn
def get_board(self):
"""
:return: A copy of the game board
"""
return self._board.copy()
def board_at(self, c, r):
"""
:param: c, the column
:param: r, the row
:return: What value is held at column c, row r in the board
"""
return self._board[c][r]
def copy_state(self):
"""
Use this instead of the copy() method. Useful as we don't want our graphical interface (viewed as an Observer in this class)
to be updated when we are playing moves in our tree search.
"""
# Temporary removes the
temporary_observers = self._observers
self._observers = []
new_one = deepcopy(self)
new_one._observers.clear() # Clear observers, such as GUI in our case.
# Reassign the observers after deepcopy
self._observers = temporary_observers
return new_one
class Connect4Viewer(Observer):
def __init__(self, game):
super(Observer, self).__init__()
assert game is not None
self._game = game
self._game.add_observer(self)
self._screen = None
self._font = None
def initialize(self):
"""
Initialises the view window
"""
pygame.init()
icon = pygame.image.load("icon.png")
pygame.display.set_icon(icon)
pygame.display.set_caption("Connect Four")
self._font = pygame.font.SysFont(None, 80)
self._screen = pygame.display.set_mode([self._game.get_cols() * SQUARE_SIZE, self._game.get_rows() * SQUARE_SIZE])
self.draw_board()
def draw_board(self):
"""
Draws board[c][r] with c = 0 and r = 0 being bottom left
0 = empty (background colour)
1 = yellow
2 = red
"""
self._screen.fill(BLUE_COLOR)
for r in range(self._game.get_rows()):
for c in range(self._game.get_cols()):
colour = BACKGROUND_COLOR
if self._game.board_at(c, r) == 1:
colour = YELLOW_COLOR
if self._game.board_at(c, r) == 2:
colour = RED_COLOR
# Anti-aliased circle drawing
pygame.gfxdraw.aacircle(self._screen, c * SQUARE_SIZE + SQUARE_SIZE // 2,
self._game.get_rows() * SQUARE_SIZE - r * SQUARE_SIZE - SQUARE_SIZE // 2,
int(DISC_SIZE_RATIO * SQUARE_SIZE / 2),
colour)
pygame.gfxdraw.filled_circle(self._screen, c * SQUARE_SIZE + SQUARE_SIZE // 2,
self._game.get_rows() * SQUARE_SIZE - r * SQUARE_SIZE - SQUARE_SIZE // 2,
int(DISC_SIZE_RATIO * SQUARE_SIZE / 2),
colour)
pygame.display.update()
def update(self, obj, event, *argv):
"""
Called when notified. Updates the view.
"""
if event == Event.GAME_WON:
won = argv[0]
self.draw_win_message(won)
elif event == Event.GAME_RESET:
self.draw_board()
elif event == Event.PIECE_PLACED:
self.draw_board()
def draw_win_message(self, won):
"""
Displays win message on top of the board
"""
if won == 1:
img = self._font.render("Yellow won", True, BLACK_COLOR, YELLOW_COLOR)
elif won == 2:
img = self._font.render("Red won", True, WHITE_COLOR, RED_COLOR)
else:
img = self._font.render("Draw", True, WHITE_COLOR, BLUE_COLOR)
rect = img.get_rect()
rect.center = ((self._game.get_cols() * SQUARE_SIZE) // 2, (self._game.get_rows() * SQUARE_SIZE) // 2)
self._screen.blit(img, rect)
pygame.display.update()
if __name__ == '__main__':
game = Connect4Game()
game.reset_game()
view = Connect4Viewer(game=game)
view.initialize()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == pygame.MOUSEBUTTONUP and event.button == 1:
if game.get_win() is None:
game.place(pygame.mouse.get_pos()[0] // SQUARE_SIZE)
else:
game.reset_game()
pygame.quit()
|
py | b4016ff19965a0d90ad996ffbf72ed95a0835062 | from __future__ import print_function
from __future__ import unicode_literals
import re
import time
from netmiko.cisco_base_connection import CiscoSSHConnection
class HPProcurveSSH(CiscoSSHConnection):
def session_preparation(self):
"""
Prepare the session after the connection has been established.
Procurve uses - 'Press any key to continue'
"""
delay_factor = self.select_delay_factor(delay_factor=0)
time.sleep(2 * delay_factor)
self.write_channel("\n")
time.sleep(2 * delay_factor)
self.write_channel("\n")
time.sleep(2 * delay_factor)
# HP output contains VT100 escape codes
self.ansi_escape_codes = True
self.set_base_prompt()
self.disable_paging(command="\nno page\n")
self.set_terminal_width(command='terminal width 511')
def enable(self, cmd='enable', pattern='password', re_flags=re.IGNORECASE,
default_username='manager'):
"""Enter enable mode"""
debug = False
output = self.send_command_timing(cmd)
if 'username' in output.lower():
output += self.send_command_timing(default_username)
if 'password' in output.lower():
output += self.send_command_timing(self.secret)
if debug:
print(output)
self.clear_buffer()
return output
|
py | b4017029ff5301981821d6bc07ed6bbf09bdea12 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
# -----------------------------------------------------------------------------
try:
basestring # Python 3
except NameError:
basestring = str # Python 2
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class bybit(Exchange):
def describe(self):
return self.deep_extend(super(bybit, self).describe(), {
'id': 'bybit',
'name': 'Bybit',
'countries': ['VG'], # British Virgin Islands
'version': 'v2',
'userAgent': None,
'rateLimit': 100,
'hostname': 'bybit.com', # bybit.com, bytick.com
'has': {
'margin': False,
'swap': True,
'future': True,
'cancelAllOrders': True,
'cancelOrder': True,
'CORS': True,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRates': False,
'fetchClosedOrders': True,
'fetchDeposits': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': False,
'fetchIndexOHLCV': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': None,
'fetchWithdrawals': True,
'setLeverage': True,
'setMarginMode': True,
},
'timeframes': {
'1m': '1',
'3m': '3',
'5m': '5',
'15m': '15',
'30m': '30',
'1h': '60',
'2h': '120',
'4h': '240',
'6h': '360',
'12h': '720',
'1d': 'D',
'1w': 'W',
'1M': 'M',
'1y': 'Y',
},
'urls': {
'test': {
'spot': 'https://api-testnet.{hostname}',
'futures': 'https://api-testnet.{hostname}',
'v2': 'https://api-testnet.{hostname}',
'public': 'https://api-testnet.{hostname}',
'private': 'https://api-testnet.{hostname}',
},
'logo': 'https://user-images.githubusercontent.com/51840849/76547799-daff5b80-649e-11ea-87fb-3be9bac08954.jpg',
'api': {
'spot': 'https://api.{hostname}',
'futures': 'https://api.{hostname}',
'v2': 'https://api.{hostname}',
'public': 'https://api.{hostname}',
'private': 'https://api.{hostname}',
},
'www': 'https://www.bybit.com',
'doc': [
'https://bybit-exchange.github.io/docs/inverse/',
'https://bybit-exchange.github.io/docs/linear/',
'https://github.com/bybit-exchange',
],
'fees': 'https://help.bybit.com/hc/en-us/articles/360039261154',
'referral': 'https://www.bybit.com/app/register?ref=X7Prm',
},
'api': {
'spot': {
'public': {
'get': [
'symbols',
],
},
'quote': {
'get': [
'depth',
'depth/merged',
'trades',
'kline',
'ticker/24hr',
'ticker/price',
'ticker/book_ticker',
],
},
'private': {
'get': [
'order',
'open-orders',
'history-orders',
'myTrades',
'account',
'time',
],
'post': [
'order',
],
'delete': [
'order',
'order/fast',
],
},
'order': {
'delete': [
'batch-cancel',
'batch-fast-cancel',
'batch-cancel-by-ids',
],
},
},
'futures': {
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'execution/list',
'trade/closed-pnl/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
'v2': {
'public': {
'get': [
'orderBook/L2',
'kline/list',
'tickers',
'trading-records',
'symbols',
'liq-records',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'open-interest',
'big-deal',
'account-ratio',
'time',
'announcement',
'funding/prev-funding-rate',
'risk-limit/list',
],
},
'private': {
'get': [
'order/list',
'order',
'stop-order/list',
'stop-order',
'position/list',
'execution/list',
'trade/closed-pnl/list',
'funding/prev-funding-rate',
'funding/prev-funding',
'funding/predicted-funding',
'account/api-key',
'account/lcp',
'wallet/balance',
'wallet/fund/records',
'wallet/withdraw/list',
'exchange-order/list',
],
'post': [
'order/create',
'order/cancel',
'order/cancelAll',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancelAll',
'stop-order/replace',
'position/change-position-margin',
'position/trading-stop',
'position/leverage/save',
'position/switch-mode',
'position/switch-isolated',
'position/risk-limit',
],
},
},
'public': {
'linear': {
'get': [
'kline',
'recent-trading-records',
'funding/prev-funding-rate',
'mark-price-kline',
'index-price-kline',
'premium-index-kline',
'risk-limit',
],
},
},
'private': {
'linear': {
'get': [
'order/list',
'order/search',
'stop-order/list',
'stop-order/search',
'position/list',
'trade/execution/list',
'trade/closed-pnl/list',
'funding/predicted-funding',
'funding/prev-funding',
],
'post': [
'order/create',
'order/cancel',
'order/cancel-all',
'order/replace',
'stop-order/create',
'stop-order/cancel',
'stop-order/cancel-all',
'stop-order/replace',
'position/set-auto-add-margin',
'position/switch-isolated',
'tpsl/switch-mode',
'position/add-margin',
'position/set-leverage',
'position/trading-stop',
'position/set-risk',
],
},
},
},
'httpExceptions': {
'403': RateLimitExceeded, # Forbidden -- You request too many times
},
'exceptions': {
'exact': {
'-2015': AuthenticationError, # Invalid API-key, IP, or permissions for action.
'10001': BadRequest, # parameter error
'10002': InvalidNonce, # request expired, check your timestamp and recv_window
'10003': AuthenticationError, # Invalid apikey
'10004': AuthenticationError, # invalid sign
'10005': PermissionDenied, # permission denied for current apikey
'10006': RateLimitExceeded, # too many requests
'10007': AuthenticationError, # api_key not found in your request parameters
'10010': PermissionDenied, # request ip mismatch
'10017': BadRequest, # request path not found or request method is invalid
'10018': RateLimitExceeded, # exceed ip rate limit
'20001': OrderNotFound, # Order not exists
'20003': InvalidOrder, # missing parameter side
'20004': InvalidOrder, # invalid parameter side
'20005': InvalidOrder, # missing parameter symbol
'20006': InvalidOrder, # invalid parameter symbol
'20007': InvalidOrder, # missing parameter order_type
'20008': InvalidOrder, # invalid parameter order_type
'20009': InvalidOrder, # missing parameter qty
'20010': InvalidOrder, # qty must be greater than 0
'20011': InvalidOrder, # qty must be an integer
'20012': InvalidOrder, # qty must be greater than zero and less than 1 million
'20013': InvalidOrder, # missing parameter price
'20014': InvalidOrder, # price must be greater than 0
'20015': InvalidOrder, # missing parameter time_in_force
'20016': InvalidOrder, # invalid value for parameter time_in_force
'20017': InvalidOrder, # missing parameter order_id
'20018': InvalidOrder, # invalid date format
'20019': InvalidOrder, # missing parameter stop_px
'20020': InvalidOrder, # missing parameter base_price
'20021': InvalidOrder, # missing parameter stop_order_id
'20022': BadRequest, # missing parameter leverage
'20023': BadRequest, # leverage must be a number
'20031': BadRequest, # leverage must be greater than zero
'20070': BadRequest, # missing parameter margin
'20071': BadRequest, # margin must be greater than zero
'20084': BadRequest, # order_id or order_link_id is required
'30001': BadRequest, # order_link_id is repeated
'30003': InvalidOrder, # qty must be more than the minimum allowed
'30004': InvalidOrder, # qty must be less than the maximum allowed
'30005': InvalidOrder, # price exceeds maximum allowed
'30007': InvalidOrder, # price exceeds minimum allowed
'30008': InvalidOrder, # invalid order_type
'30009': ExchangeError, # no position found
'30010': InsufficientFunds, # insufficient wallet balance
'30011': PermissionDenied, # operation not allowed as position is undergoing liquidation
'30012': PermissionDenied, # operation not allowed as position is undergoing ADL
'30013': PermissionDenied, # position is in liq or adl status
'30014': InvalidOrder, # invalid closing order, qty should not greater than size
'30015': InvalidOrder, # invalid closing order, side should be opposite
'30016': ExchangeError, # TS and SL must be cancelled first while closing position
'30017': InvalidOrder, # estimated fill price cannot be lower than current Buy liq_price
'30018': InvalidOrder, # estimated fill price cannot be higher than current Sell liq_price
'30019': InvalidOrder, # cannot attach TP/SL params for non-zero position when placing non-opening position order
'30020': InvalidOrder, # position already has TP/SL params
'30021': InvalidOrder, # cannot afford estimated position_margin
'30022': InvalidOrder, # estimated buy liq_price cannot be higher than current mark_price
'30023': InvalidOrder, # estimated sell liq_price cannot be lower than current mark_price
'30024': InvalidOrder, # cannot set TP/SL/TS for zero-position
'30025': InvalidOrder, # trigger price should bigger than 10% of last price
'30026': InvalidOrder, # price too high
'30027': InvalidOrder, # price set for Take profit should be higher than Last Traded Price
'30028': InvalidOrder, # price set for Stop loss should be between Liquidation price and Last Traded Price
'30029': InvalidOrder, # price set for Stop loss should be between Last Traded Price and Liquidation price
'30030': InvalidOrder, # price set for Take profit should be lower than Last Traded Price
'30031': InsufficientFunds, # insufficient available balance for order cost
'30032': InvalidOrder, # order has been filled or cancelled
'30033': RateLimitExceeded, # The number of stop orders exceeds maximum limit allowed
'30034': OrderNotFound, # no order found
'30035': RateLimitExceeded, # too fast to cancel
'30036': ExchangeError, # the expected position value after order execution exceeds the current risk limit
'30037': InvalidOrder, # order already cancelled
'30041': ExchangeError, # no position found
'30042': InsufficientFunds, # insufficient wallet balance
'30043': InvalidOrder, # operation not allowed as position is undergoing liquidation
'30044': InvalidOrder, # operation not allowed as position is undergoing AD
'30045': InvalidOrder, # operation not allowed as position is not normal status
'30049': InsufficientFunds, # insufficient available balance
'30050': ExchangeError, # any adjustments made will trigger immediate liquidation
'30051': ExchangeError, # due to risk limit, cannot adjust leverage
'30052': ExchangeError, # leverage can not less than 1
'30054': ExchangeError, # position margin is invalid
'30057': ExchangeError, # requested quantity of contracts exceeds risk limit
'30063': ExchangeError, # reduce-only rule not satisfied
'30067': InsufficientFunds, # insufficient available balance
'30068': ExchangeError, # exit value must be positive
'30074': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is raising to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or greater than stop_px, please adjust base_price or stop_px
'30075': InvalidOrder, # can't create the stop order, because you expect the order will be triggered when the LastPrice(or IndexPrice、 MarkPrice, determined by trigger_by) is falling to stop_px, but the LastPrice(or IndexPrice、 MarkPrice) is already equal to or less than stop_px, please adjust base_price or stop_px
'33004': AuthenticationError, # apikey already expired
'34026': ExchangeError, # the limit is no change
},
'broad': {
'unknown orderInfo': OrderNotFound, # {"ret_code":-1,"ret_msg":"unknown orderInfo","ext_code":"","ext_info":"","result":null,"time_now":"1584030414.005545","rate_limit_status":99,"rate_limit_reset_ms":1584030414003,"rate_limit":100}
'invalid api_key': AuthenticationError, # {"ret_code":10003,"ret_msg":"invalid api_key","ext_code":"","ext_info":"","result":null,"time_now":"1599547085.415797"}
},
},
'precisionMode': TICK_SIZE,
'options': {
'marketTypes': {
'BTC/USDT': 'linear',
'ETH/USDT': 'linear',
'BNB/USDT': 'linear',
'ADA/USDT': 'linear',
'DOGE/USDT': 'linear',
'XRP/USDT': 'linear',
'DOT/USDT': 'linear',
'UNI/USDT': 'linear',
'BCH/USDT': 'linear',
'LTC/USDT': 'linear',
'SOL/USDT': 'linear',
'LINK/USDT': 'linear',
'MATIC/USDT': 'linear',
'ETC/USDT': 'linear',
'FIL/USDT': 'linear',
'EOS/USDT': 'linear',
'AAVE/USDT': 'linear',
'XTZ/USDT': 'linear',
'SUSHI/USDT': 'linear',
'XEM/USDT': 'linear',
'BTC/USD': 'inverse',
'ETH/USD': 'inverse',
'EOS/USD': 'inverse',
'XRP/USD': 'inverse',
},
'defaultType': 'linear', # linear, inverse, futures
'code': 'BTC',
'cancelAllOrders': {
# 'method': 'v2PrivatePostOrderCancelAll', # v2PrivatePostStopOrderCancelAll
},
'recvWindow': 5 * 1000, # 5 sec default
'timeDifference': 0, # the difference between system clock and exchange server clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.00075,
'maker': -0.00025,
},
'funding': {
'tierBased': False,
'percentage': False,
'withdraw': {},
'deposit': {},
},
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
async def load_time_difference(self, params={}):
serverTime = await self.fetch_time(params)
after = self.milliseconds()
self.options['timeDifference'] = after - serverTime
return self.options['timeDifference']
async def fetch_time(self, params={}):
response = await self.v2PublicGetTime(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {},
# time_now: '1583933682.448826'
# }
#
return self.safe_timestamp(response, 'time_now')
async def fetch_markets(self, params={}):
if self.options['adjustForTimeDifference']:
await self.load_time_difference()
response = await self.v2PublicGetSymbols(params)
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "name":"BTCUSD",
# "alias":"BTCUSD",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USD",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999.5","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":1000000,"min_trading_qty":1,"qty_step":1}
# },
# {
# "name":"BTCUSDT",
# "alias":"BTCUSDT",
# "status":"Trading",
# "base_currency":"BTC",
# "quote_currency":"USDT",
# "price_scale":2,
# "taker_fee":"0.00075",
# "maker_fee":"-0.00025",
# "leverage_filter":{"min_leverage":1,"max_leverage":100,"leverage_step":"0.01"},
# "price_filter":{"min_price":"0.5","max_price":"999999.5","tick_size":"0.5"},
# "lot_size_filter":{"max_trading_qty":100,"min_trading_qty":0.001,"qty_step":0.001}
# },
# ],
# "time_now":"1610539664.818033"
# }
#
markets = self.safe_value(response, 'result', [])
options = self.safe_value(self.options, 'fetchMarkets', {})
linearQuoteCurrencies = self.safe_value(options, 'linear', {'USDT': True})
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string_2(market, 'name', 'symbol')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
linear = (quote in linearQuoteCurrencies)
inverse = not linear
symbol = base + '/' + quote
baseQuote = base + quote
type = 'swap'
if baseQuote != id:
symbol = id
type = 'futures'
lotSizeFilter = self.safe_value(market, 'lot_size_filter', {})
priceFilter = self.safe_value(market, 'price_filter', {})
precision = {
'amount': self.safe_number(lotSizeFilter, 'qty_step'),
'price': self.safe_number(priceFilter, 'tick_size'),
}
leverage = self.safe_value(market, 'leverage_filter', {})
status = self.safe_string(market, 'status')
active = None
if status is not None:
active = (status == 'Trading')
spot = (type == 'spot')
swap = (type == 'swap')
futures = (type == 'futures')
option = (type == 'option')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'active': active,
'precision': precision,
'taker': self.safe_number(market, 'taker_fee'),
'maker': self.safe_number(market, 'maker_fee'),
'type': type,
'spot': spot,
'swap': swap,
'futures': futures,
'option': option,
'linear': linear,
'inverse': inverse,
'limits': {
'amount': {
'min': self.safe_number(lotSizeFilter, 'min_trading_qty'),
'max': self.safe_number(lotSizeFilter, 'max_trading_qty'),
},
'price': {
'min': self.safe_number(priceFilter, 'min_price'),
'max': self.safe_number(priceFilter, 'max_price'),
},
'cost': {
'min': None,
'max': None,
},
'leverage': {
'max': self.safe_number(leverage, 'max_leverage', 1),
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
#
timestamp = None
marketId = self.safe_string(ticker, 'symbol')
symbol = self.safe_symbol(marketId, market)
last = self.safe_number(ticker, 'last_price')
open = self.safe_number(ticker, 'prev_price_24h')
percentage = self.safe_number(ticker, 'price_24h_pcnt')
if percentage is not None:
percentage *= 100
change = None
average = None
if (last is not None) and (open is not None):
change = last - open
average = self.sum(open, last) / 2
baseVolume = self.safe_number(ticker, 'turnover_24h')
quoteVolume = self.safe_number(ticker, 'volume_24h')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high_price_24h'),
'low': self.safe_number(ticker, 'low_price_24h'),
'bid': self.safe_number(ticker, 'bid_price'),
'bidVolume': None,
'ask': self.safe_number(ticker, 'ask_price'),
'askVolume': None,
'vwap': vwap,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.v2PublicGetTickers(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
first = self.safe_value(result, 0)
timestamp = self.safe_timestamp(response, 'time_now')
ticker = self.parse_ticker(first, market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.v2PublicGetTickers(params)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# bid_price: '7680',
# ask_price: '7680.5',
# last_price: '7680.00',
# last_tick_direction: 'MinusTick',
# prev_price_24h: '7870.50',
# price_24h_pcnt: '-0.024204',
# high_price_24h: '8035.00',
# low_price_24h: '7671.00',
# prev_price_1h: '7780.00',
# price_1h_pcnt: '-0.012853',
# mark_price: '7683.27',
# index_price: '7682.74',
# open_interest: 188829147,
# open_value: '23670.06',
# total_turnover: '25744224.90',
# turnover_24h: '102997.83',
# total_volume: 225448878806,
# volume_24h: 809919408,
# funding_rate: '0.0001',
# predicted_funding_rate: '0.0001',
# next_funding_time: '2020-03-12T00:00:00Z',
# countdown_hour: 7
# }
# ],
# time_now: '1583948195.818255'
# }
#
result = self.safe_value(response, 'result', [])
tickers = {}
for i in range(0, len(result)):
ticker = self.parse_ticker(result[i])
symbol = ticker['symbol']
tickers[symbol] = ticker
return self.filter_by_array(tickers, 'symbol', symbols)
def parse_ohlcv(self, ohlcv, market=None):
#
# inverse perpetual BTC/USD
#
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# }
#
# linear perpetual BTC/USDT
#
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
#
return [
self.safe_timestamp_2(ohlcv, 'open_time', 'start_at'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number_2(ohlcv, 'volume', 'turnover'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
price = self.safe_string(params, 'price')
params = self.omit(params, 'price')
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
duration = self.parse_timeframe(timeframe)
now = self.seconds()
if since is None:
if limit is None:
raise ArgumentsRequired(self.id + ' fetchOHLCV() requires a since argument or a limit argument')
else:
request['from'] = now - limit * duration
else:
request['from'] = int(since / 1000)
if limit is not None:
request['limit'] = limit # max 200, default 200
method = 'v2PublicGetKlineList'
if price == 'mark':
method = 'v2PublicGetMarkPriceKline'
elif price == 'index':
method = 'v2PublicGetIndexPriceKline'
elif price == 'premiumIndex':
method = 'v2PublicGetPremiumIndexKline'
elif market['linear']:
method = 'publicLinearGetKline'
response = await getattr(self, method)(self.extend(request, params))
#
# inverse perpetual BTC/USD
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# symbol: 'BTCUSD',
# interval: '1',
# open_time: 1583952540,
# open: '7760.5',
# high: '7764',
# low: '7757',
# close: '7763.5',
# volume: '1259766',
# turnover: '162.32773718999994'
# },
# ],
# time_now: '1583953082.397330'
# }
#
# linear perpetual BTC/USDT
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":[
# {
# "id":143536,
# "symbol":"BTCUSDT",
# "period":"15",
# "start_at":1587883500,
# "volume":1.035,
# "open":7540.5,
# "high":7541,
# "low":7540.5,
# "close":7541
# }
# ],
# "time_now":"1587884120.168077"
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_ohlcvs(result, market, timeframe, since, limit)
async def fetch_funding_rate(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = 'publicLinearGetFundingPrevFundingRate' if market['linear'] else 'v2PublicGetFundingPrevFundingRate'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "symbol": "BTCUSD",
# "funding_rate": "0.00010000",
# "funding_rate_timestamp": 1577433600
# },
# "ext_info": null,
# "time_now": "1577445586.446797",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577445586454,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result')
nextFundingRate = self.safe_number(result, 'funding_rate')
previousFundingTime = self.safe_integer(result, 'funding_rate_timestamp') * 1000
nextFundingTime = previousFundingTime + (8 * 3600000)
currentTime = self.milliseconds()
return {
'info': result,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': currentTime,
'datetime': self.iso8601(currentTime),
'previousFundingRate': None,
'nextFundingRate': nextFundingRate,
'previousFundingTimestamp': previousFundingTime,
'nextFundingTimestamp': nextFundingTime,
'previousFundingDatetime': self.iso8601(previousFundingTime),
'nextFundingDatetime': self.iso8601(nextFundingTime),
}
async def fetch_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'index',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_mark_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchMarkOHLCV() requires a since argument or a limit argument')
request = {
'price': 'mark',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
async def fetch_premium_index_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
if since is None and limit is None:
raise ArgumentsRequired(self.id + ' fetchPremiumIndexOHLCV() requires a since argument or a limit argument')
request = {
'price': 'premiumIndex',
}
return await self.fetch_ohlcv(symbol, timeframe, since, limit, self.extend(request, params))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "id": "44275042152",
# "symbol": "AAVEUSDT",
# "price": "256.35",
# "qty": "0.1",
# "side": "Buy",
# "time": "2021-11-30T12:46:14.000Z",
# "trade_time_ms": "1638276374312"
# }
#
# fetchMyTrades, fetchOrderTrades(private)
#
# {
# "order_id": "b020b4bc-6fe2-45b5-adbc-dd07794f9746",
# "order_link_id": "",
# "side": "Buy",
# "symbol": "AAVEUSDT",
# "exec_id": "09abe8f0-aea6-514e-942b-7da8cb935120",
# "price": "269.3",
# "order_price": "269.3",
# "order_qty": "0.1",
# "order_type": "Market",
# "fee_rate": "0.00075",
# "exec_price": "256.35",
# "exec_type": "Trade",
# "exec_qty": "0.1",
# "exec_fee": "0.01922625",
# "exec_value": "25.635",
# "leaves_qty": "0",
# "closed_size": "0",
# "last_liquidity_ind": "RemovedLiquidity",
# "trade_time": "1638276374",
# "trade_time_ms": "1638276374312"
# }
#
id = self.safe_string_2(trade, 'id', 'exec_id')
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
amountString = self.safe_string_2(trade, 'qty', 'exec_qty')
priceString = self.safe_string_2(trade, 'exec_price', 'price')
costString = self.safe_string(trade, 'exec_value')
timestamp = self.parse8601(self.safe_string(trade, 'time'))
if timestamp is None:
timestamp = self.safe_integer(trade, 'trade_time_ms')
side = self.safe_string_lower(trade, 'side')
lastLiquidityInd = self.safe_string(trade, 'last_liquidity_ind')
takerOrMaker = 'maker' if (lastLiquidityInd == 'AddedLiquidity') else 'taker'
feeCostString = self.safe_string(trade, 'exec_fee')
fee = None
if feeCostString is not None:
feeCurrencyCode = market['base'] if market['inverse'] else market['quote']
fee = {
'cost': feeCostString,
'currency': feeCurrencyCode,
'rate': self.safe_string(trade, 'fee_rate'),
}
return self.safe_trade({
'id': id,
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': self.safe_string(trade, 'order_id'),
'type': self.safe_string_lower(trade, 'order_type'),
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'from': 123, # from id
}
if limit is not None:
request['count'] = limit # default 500, max 1000
method = 'publicLinearGetRecentTradingRecords' if market['linear'] else 'v2PublicGetTradingRecords'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {
# id: 43785688,
# symbol: 'BTCUSD',
# price: 7786,
# qty: 67,
# side: 'Sell',
# time: '2020-03-11T19:18:30.123Z'
# },
# ],
# time_now: '1583954313.393362'
# }
#
result = self.safe_value(response, 'result', {})
return self.parse_trades(result, market, since, limit)
def parse_order_book(self, orderbook, symbol, timestamp=None, bidsKey='Buy', asksKey='Sell', priceKey='price', amountKey='size'):
bids = []
asks = []
for i in range(0, len(orderbook)):
bidask = orderbook[i]
side = self.safe_string(bidask, 'side')
if side == 'Buy':
bids.append(self.parse_bid_ask(bidask, priceKey, amountKey))
elif side == 'Sell':
asks.append(self.parse_bid_ask(bidask, priceKey, amountKey))
else:
raise ExchangeError(self.id + ' parseOrderBook encountered an unrecognized bidask format: ' + self.json(bidask))
return {
'symbol': symbol,
'bids': self.sort_by(bids, 0, True),
'asks': self.sort_by(asks, 0),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'nonce': None,
}
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.v2PublicGetOrderBookL2(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [
# {symbol: 'BTCUSD', price: '7767.5', size: 677956, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7767', size: 580690, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7766.5', size: 475252, side: 'Buy'},
# {symbol: 'BTCUSD', price: '7768', size: 330847, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7768.5', size: 97159, side: 'Sell'},
# {symbol: 'BTCUSD', price: '7769', size: 6508, side: 'Sell'},
# ],
# time_now: '1583954829.874823'
# }
#
result = self.safe_value(response, 'result', [])
timestamp = self.safe_timestamp(response, 'time_now')
return self.parse_order_book(result, symbol, timestamp, 'Buy', 'Sell', 'price', 'size')
async def fetch_balance(self, params={}):
# note: any funds in the 'spot' account will not be returned or visible from self endpoint
await self.load_markets()
request = {}
coin = self.safe_string(params, 'coin')
code = self.safe_string(params, 'code')
if coin is not None:
request['coin'] = coin
elif code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
response = await self.v2PrivateGetWalletBalance(self.extend(request, params))
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: {
# BTC: {
# equity: 0,
# available_balance: 0,
# used_margin: 0,
# order_margin: 0,
# position_margin: 0,
# occ_closing_fee: 0,
# occ_funding_fee: 0,
# wallet_balance: 0,
# realised_pnl: 0,
# unrealised_pnl: 0,
# cum_realised_pnl: 0,
# given_cash: 0,
# service_cash: 0
# }
# },
# time_now: '1583937810.370020',
# rate_limit_status: 119,
# rate_limit_reset_ms: 1583937810367,
# rate_limit: 120
# }
#
result = {
'info': response,
}
balances = self.safe_value(response, 'result', {})
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = balances[currencyId]
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'available_balance')
account['used'] = self.safe_string(balance, 'used_margin')
account['total'] = self.safe_string(balance, 'equity')
result[code] = account
return self.parse_balance(result)
def parse_order_status(self, status):
statuses = {
# basic orders
'Created': 'open',
'Rejected': 'rejected', # order is triggered but failed upon being placed
'New': 'open',
'PartiallyFilled': 'open',
'Filled': 'closed',
'Cancelled': 'canceled',
'PendingCancel': 'canceling', # the engine has received the cancellation but there is no guarantee that it will be successful
# conditional orders
'Active': 'open', # order is triggered and placed successfully
'Untriggered': 'open', # order waits to be triggered
'Triggered': 'closed', # order is triggered
# 'Cancelled': 'canceled', # order is cancelled
# 'Rejected': 'rejected', # order is triggered but fail to be placed
'Deactivated': 'canceled', # conditional order was cancelled before triggering
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'GoodTillCancel': 'GTC',
'ImmediateOrCancel': 'IOC',
'FillOrKill': 'FOK',
'PostOnly': 'PO',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0, # in contracts, where 1 contract = 1 quote currency unit(USD for inverse contracts)
# "cum_exec_value": 0, # in contract's underlying currency(BTC for inverse contracts)
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# }
#
# fetchOrder
#
# {
# "user_id" : 599946,
# "symbol" : "BTCUSD",
# "side" : "Buy",
# "order_type" : "Limit",
# "price" : "7948",
# "qty" : 10,
# "time_in_force" : "GoodTillCancel",
# "order_status" : "Filled",
# "ext_fields" : {
# "o_req_num" : -1600687220498,
# "xreq_type" : "x_create"
# },
# "last_exec_time" : "1588150113.968422",
# "last_exec_price" : "7948",
# "leaves_qty" : 0,
# "leaves_value" : "0",
# "cum_exec_qty" : 10,
# "cum_exec_value" : "0.00125817",
# "cum_exec_fee" : "-0.00000031",
# "reject_reason" : "",
# "cancel_type" : "",
# "order_link_id" : "",
# "created_at" : "2020-04-29T08:45:24.399146Z",
# "updated_at" : "2020-04-29T08:48:33.968422Z",
# "order_id" : "dd2504b9-0157-406a-99e1-efa522373944"
# }
#
# conditional order
#
# {
# "user_id":##,
# "symbol":"BTCUSD",
# "side":"Buy",
# "order_type":"Market",
# "price":0,
# "qty":10,
# "time_in_force":"GoodTillCancel",
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "order_status":"Untriggered",
# "ext_fields":{
# "stop_order_type":"Stop",
# "trigger_by":"LastPrice",
# "base_price":11833,
# "expected_direction":"Rising",
# "trigger_price":12400,
# "close_on_trigger":true,
# "op_from":"api",
# "remark":"x.x.x.x",
# "o_req_num":0
# },
# "leaves_qty":10,
# "leaves_value":0.00080645,
# "reject_reason":null,
# "cross_seq":-1,
# "created_at":"2020-08-21T09:18:48.000Z",
# "updated_at":"2020-08-21T09:18:48.000Z",
# "trigger_price":12400,
# "stop_order_id":"3f3b54b1-3379-42c7-8510-44f4d9915be0"
# }
#
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
feeCurrency = None
timestamp = self.parse8601(self.safe_string(order, 'created_at'))
id = self.safe_string_2(order, 'order_id', 'stop_order_id')
type = self.safe_string_lower(order, 'order_type')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'average_price')
amount = self.safe_string(order, 'qty')
cost = self.safe_string(order, 'cum_exec_value')
filled = self.safe_string(order, 'cum_exec_qty')
remaining = self.safe_string(order, 'leaves_qty')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol)
if market is not None:
if marketType == 'linear':
feeCurrency = market['quote']
else:
feeCurrency = market['base']
lastTradeTimestamp = self.safe_timestamp(order, 'last_exec_time')
if lastTradeTimestamp == 0:
lastTradeTimestamp = None
status = self.parse_order_status(self.safe_string_2(order, 'order_status', 'stop_order_status'))
side = self.safe_string_lower(order, 'side')
feeCostString = Precise.string_abs(self.safe_string(order, 'cum_exec_fee'))
fee = None
if feeCostString is not None:
fee = {
'cost': feeCostString,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'order_link_id')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None
timeInForce = self.parse_time_in_force(self.safe_string(order, 'time_in_force'))
stopPrice = self.safe_number_2(order, 'trigger_price', 'stop_px')
postOnly = (timeInForce == 'PO')
return self.safe_order2({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': lastTradeTimestamp,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'stopPrice': stopPrice,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearGetOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetOrder'
elif market['futures']:
method = 'futuresPrivateGetOrder'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearGetStopOrderSearch'
elif market['inverse']:
method = 'v2PrivateGetStopOrder'
elif market['futures']:
method = 'futuresPrivateGetStopOrder'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Limit",
# "price": "8083",
# "qty": 10,
# "time_in_force": "GoodTillCancel",
# "order_status": "New",
# "ext_fields": {"o_req_num": -308787, "xreq_type": "x_create", "xreq_offset": 4154640},
# "leaves_qty": 10,
# "leaves_value": "0.00123716",
# "cum_exec_qty": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-10-21T07:28:19.396246Z",
# "updated_at": "2019-10-21T07:28:19.396246Z",
# "order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"
# },
# "time_now": "1571651135.291930",
# "rate_limit_status": 99, # The remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": "8000",
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Untriggered",
# "ext_fields": {},
# "leaves_qty": 1,
# "leaves_value": "0.00013333",
# "cum_exec_qty": 0,
# "cum_exec_value": null,
# "cum_exec_fee": null,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-12-27T19:56:24.052194Z",
# "updated_at": "2019-12-27T19:56:24.052194Z",
# "order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"
# },
# "time_now": "1577476584.386958",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request = {
# orders ---------------------------------------------------------
'side': self.capitalize(side),
'symbol': market['id'],
'order_type': self.capitalize(type),
'qty': qty, # order quantity in USD, integer only
# 'price': float(self.price_to_precision(symbol, price)), # required for limit orders
'time_in_force': 'GoodTillCancel', # ImmediateOrCancel, FillOrKill, PostOnly
# 'take_profit': 123.45, # take profit price, only take effect upon opening the position
# 'stop_loss': 123.45, # stop loss price, only take effect upon opening the position
# 'reduce_only': False, # reduce only, required for linear orders
# when creating a closing order, bybit recommends a True value for
# close_on_trigger to avoid failing due to insufficient available margin
# 'close_on_trigger': False, required for linear orders
# 'order_link_id': 'string', # unique client order id, max 36 characters
# conditional orders ---------------------------------------------
# base_price is used to compare with the value of stop_px, to decide
# whether your conditional order will be triggered by crossing trigger
# price from upper side or lower side, mainly used to identify the
# expected direction of the current conditional order
# 'base_price': 123.45, # required for conditional orders
# 'stop_px': 123.45, # trigger price, required for conditional orders
# 'trigger_by': 'LastPrice', # IndexPrice, MarkPrice
}
priceIsRequired = False
if type == 'limit':
priceIsRequired = True
if priceIsRequired:
if price is not None:
request['price'] = float(self.price_to_precision(symbol, price))
else:
raise ArgumentsRequired(self.id + ' createOrder() requires a price argument for a ' + type + ' order')
clientOrderId = self.safe_string_2(params, 'order_link_id', 'clientOrderId')
if clientOrderId is not None:
request['order_link_id'] = clientOrderId
params = self.omit(params, ['order_link_id', 'clientOrderId'])
stopPx = self.safe_value_2(params, 'stop_px', 'stopPrice')
basePrice = self.safe_value(params, 'base_price')
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCreate'
request['reduce_only'] = False
request['close_on_trigger'] = False
elif market['inverse']:
method = 'v2PrivatePostOrderCreate'
elif market['futures']:
method = 'futuresPrivatePostOrderCreate'
if stopPx is not None:
if basePrice is None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCreate'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCreate'
elif market['futures']:
method = 'futuresPrivatePostStopOrderCreate'
request['stop_px'] = float(self.price_to_precision(symbol, stopPx))
request['base_price'] = float(self.price_to_precision(symbol, basePrice))
params = self.omit(params, ['stop_px', 'stopPrice', 'base_price'])
elif basePrice is not None:
raise ArgumentsRequired(self.id + ' createOrder() requires both the stop_px and base_price params for a conditional ' + type + ' order')
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "user_id": 1,
# "order_id": "335fd977-e5a5-4781-b6d0-c772d5bfb95b",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8800,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "order_status": "Created",
# "last_exec_time": 0,
# "last_exec_price": 0,
# "leaves_qty": 1,
# "cum_exec_qty": 0,
# "cum_exec_value": 0,
# "cum_exec_fee": 0,
# "reject_reason": "",
# "order_link_id": "",
# "created_at": "2019-11-30T11:03:43.452Z",
# "updated_at": "2019-11-30T11:03:43.455Z"
# },
# "time_now": "1575111823.458705",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_status": "Untriggered",
# "ext_fields": {
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "expected_direction": "Rising",
# "trigger_price": 7500,
# "op_from": "api",
# "remark": "127.0.01",
# "o_req_num": 0
# },
# "leaves_qty": 1,
# "leaves_value": 0.00013333,
# "reject_reason": null,
# "cross_seq": -1,
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# "ext_info": null,
# "time_now": "1577450904.327654",
# "rate_limit_status": 99,
# "rate_limit_reset_ms": 1577450904335,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result')
return self.parse_order(result, market)
async def edit_order(self, id, symbol, type, side, amount=None, price=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' editOrder() requires an symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
# 'order_id': id, # only for non-conditional orders
'symbol': market['id'],
# 'p_r_qty': self.amount_to_precision(symbol, amount), # new order quantity, optional
# 'p_r_price' self.priceToprecision(symbol, price), # new order price, optional
# ----------------------------------------------------------------
# conditional orders
# 'stop_order_id': id, # only for conditional orders
# 'p_r_trigger_price': 123.45, # new trigger price also known as stop_px
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostOrderReplace'
elif market['futures']:
method = 'futuresPrivatePostOrderReplace'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is not None:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderReplace'
elif market['inverse']:
method = 'v2PrivatePostStopOrderReplace'
elif market['futures']:
method = 'futuresPrivatePostStopOrderReplace'
request['stop_order_id'] = stopOrderId
params = self.omit(params, ['stop_order_id'])
else:
request['order_id'] = id
if amount is not None:
qty = self.amount_to_precision(symbol, amount)
if market['inverse']:
qty = int(qty)
else:
qty = float(qty)
request['p_r_qty'] = qty
if price is not None:
request['p_r_price'] = float(self.price_to_precision(symbol, price))
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"order_id": "efa44157-c355-4a98-b6d6-1d846a936b93"},
# "time_now": "1539778407.210858",
# "rate_limit_status": 99, # remaining number of accesses in one minute
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {"stop_order_id": "378a1bbc-a93a-4e75-87f4-502ea754ba36"},
# "ext_info": null,
# "time_now": "1577475760.604942",
# "rate_limit_status": 96,
# "rate_limit_reset_ms": 1577475760612,
# "rate_limit": "100"
# }
#
result = self.safe_value(response, 'result', {})
return {
'info': response,
'id': self.safe_string_2(result, 'order_id', 'stop_order_id'),
'order_id': self.safe_string(result, 'order_id'),
'stop_order_id': self.safe_string(result, 'stop_order_id'),
}
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'order_link_id': 'string', # one of order_id, stop_order_id or order_link_id is required
# regular orders ---------------------------------------------
# 'order_id': id, # one of order_id or order_link_id is required for regular orders
# conditional orders ---------------------------------------------
# 'stop_order_id': id, # one of stop_order_id or order_link_id is required for conditional orders
}
method = None
if market['swap']:
if market['linear']:
method = 'privateLinearPostOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostOrderCancel'
elif market['futures']:
method = 'futuresPrivatePostOrderCancel'
stopOrderId = self.safe_string(params, 'stop_order_id')
if stopOrderId is None:
orderLinkId = self.safe_string(params, 'order_link_id')
if orderLinkId is None:
request['order_id'] = id
else:
if market['swap']:
if market['linear']:
method = 'privateLinearPostStopOrderCancel'
elif market['inverse']:
method = 'v2PrivatePostStopOrderCancel'
elif market['futures']:
method = 'futuresPrivatePostStopOrderCancel'
response = await getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
async def cancel_all_orders(self, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
options = self.safe_value(self.options, 'cancelAllOrders', {})
defaultMethod = None
if market['swap']:
if market['linear']:
defaultMethod = 'privateLinearPostOrderCancelAll'
elif market['inverse']:
defaultMethod = 'v2PrivatePostOrderCancelAll'
elif market['futures']:
defaultMethod = 'futuresPrivatePostOrderCancelAll'
method = self.safe_string(options, 'method', defaultMethod)
response = await getattr(self, method)(self.extend(request, params))
result = self.safe_value(response, 'result', [])
return self.parse_orders(result, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'order_id': 'string'
# 'order_link_id': 'string', # unique client order id, max 36 characters
# 'symbol': market['id'], # default BTCUSD
# 'order': 'desc', # asc
# 'page': 1,
# 'limit': 20, # max 50
# 'order_status': 'Created,New'
# conditional orders ---------------------------------------------
# 'stop_order_id': 'string',
# 'stop_order_status': 'Untriggered',
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['limit'] = limit
options = self.safe_value(self.options, 'fetchOrders', {})
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
defaultMethod = None
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = (marketDefined and market['futures']) or (marketType == 'futures')
if linear:
defaultMethod = 'privateLinearGetOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetOrderList'
elif futures:
defaultMethod = 'futuresPrivateGetOrderList'
query = params
if ('stop_order_id' in params) or ('stop_order_status' in params):
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is not None:
if isinstance(stopOrderStatus, list):
stopOrderStatus = ','.join(stopOrderStatus)
request['stop_order_status'] = stopOrderStatus
query = self.omit(params, 'stop_order_status')
if linear:
defaultMethod = 'privateLinearGetStopOrderList'
elif inverse:
defaultMethod = 'v2PrivateGetStopOrderList'
elif futures:
defaultMethod = 'futuresPrivateGetStopOrderList'
method = self.safe_string(options, 'method', defaultMethod)
response = await getattr(self, method)(self.extend(request, query))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 6,
# "data": [
# {
# "user_id": 1,
# "symbol": "BTCUSD",
# "side": "Sell",
# "order_type": "Market",
# "price": 7074,
# "qty": 2,
# "time_in_force": "ImmediateOrCancel",
# "order_status": "Filled",
# "ext_fields": {
# "close_on_trigger": True,
# "orig_order_type": "BLimit",
# "prior_x_req_price": 5898.5,
# "op_from": "pc",
# "remark": "127.0.0.1",
# "o_req_num": -34799032763,
# "xreq_type": "x_create"
# },
# "last_exec_time": "1577448481.696421",
# "last_exec_price": 7070.5,
# "leaves_qty": 0,
# "leaves_value": 0,
# "cum_exec_qty": 2,
# "cum_exec_value": 0.00028283,
# "cum_exec_fee": 0.00002,
# "reject_reason": "NoError",
# "order_link_id": "",
# "created_at": "2019-12-27T12:08:01.000Z",
# "updated_at": "2019-12-27T12:08:01.000Z",
# "order_id": "f185806b-b801-40ff-adec-52289370ed62"
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577448922.437871",
# "rate_limit_status": 98,
# "rate_limit_reset_ms": 1580885703683,
# "rate_limit": 100
# }
#
# conditional orders
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "current_page": 1,
# "last_page": 1,
# "data": [
# {
# "user_id": 1,
# "stop_order_status": "Untriggered",
# "symbol": "BTCUSD",
# "side": "Buy",
# "order_type": "Limit",
# "price": 8000,
# "qty": 1,
# "time_in_force": "GoodTillCancel",
# "stop_order_type": "Stop",
# "trigger_by": "LastPrice",
# "base_price": 7000,
# "order_link_id": "",
# "created_at": "2019-12-27T12:48:24.000Z",
# "updated_at": "2019-12-27T12:48:24.000Z",
# "stop_px": 7500,
# "stop_order_id": "a85cd1c0-a9a4-49d3-a1bd-bab5ebe946d5"
# },
# ]
# },
# "ext_info": null,
# "time_now": "1577451658.755468",
# "rate_limit_status": 599,
# "rate_limit_reset_ms": 1577451658762,
# "rate_limit": 600
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_orders(data, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Rejected',
'Filled',
'Cancelled',
# conditional orders
# 'Active',
# 'Triggered',
# 'Cancelled',
# 'Rejected',
# 'Deactivated',
]
options = self.safe_value(self.options, 'fetchClosedOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
defaultStatuses = [
'Created',
'New',
'PartiallyFilled',
'PendingCancel',
# conditional orders
# 'Untriggered',
]
options = self.safe_value(self.options, 'fetchOpenOrders', {})
status = self.safe_value(options, 'order_status', defaultStatuses)
if isinstance(status, list):
status = ','.join(status)
request = {}
stopOrderStatus = self.safe_value(params, 'stop_order_status')
if stopOrderStatus is None:
request['order_status'] = status
else:
request['stop_order_status'] = stopOrderStatus
return await self.fetch_orders(symbol, since, limit, self.extend(request, params))
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
request = {
'order_id': id,
}
return await self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
request = {
# 'order_id': 'f185806b-b801-40ff-adec-52289370ed62', # if not provided will return user's trading records
# 'symbol': market['id'],
# 'start_time': int(since / 1000),
# 'page': 1,
# 'limit' 20, # max 50
}
market = None
orderId = self.safe_string(params, 'order_id')
if orderId is not None:
request['order_id'] = orderId
params = self.omit(params, 'order_id')
market = self.market(symbol)
request['symbol'] = market['id']
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit # default 20, max 50
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
marketDefined = (market is not None)
linear = (marketDefined and market['linear']) or (marketType == 'linear')
inverse = (marketDefined and market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = (marketDefined and market['futures']) or (marketType == 'futures')
method = None
if linear:
method = 'privateLinearGetTradeExecutionList'
elif inverse:
method = 'v2PrivateGetExecutionList'
elif futures:
method = 'futuresPrivateGetExecutionList'
response = await getattr(self, method)(self.extend(request, params))
#
# inverse
#
# {
# "ret_code": 0,
# "ret_msg": "OK",
# "ext_code": "",
# "ext_info": "",
# "result": {
# "order_id": "Abandonednot !", # Abandonednot !
# "trade_list": [
# {
# "closed_size": 0,
# "cross_seq": 277136382,
# "exec_fee": "0.0000001",
# "exec_id": "256e5ef8-abfe-5772-971b-f944e15e0d68",
# "exec_price": "8178.5",
# "exec_qty": 1,
# "exec_time": "1571676941.70682",
# "exec_type": "Trade", #Exec Type Enum
# "exec_value": "0.00012227",
# "fee_rate": "0.00075",
# "last_liquidity_ind": "RemovedLiquidity", #Liquidity Enum
# "leaves_qty": 0,
# "nth_fill": 2,
# "order_id": "7ad50cb1-9ad0-4f74-804b-d82a516e1029",
# "order_link_id": "",
# "order_price": "8178",
# "order_qty": 1,
# "order_type": "Market", #Order Type Enum
# "side": "Buy", #Side Enum
# "symbol": "BTCUSD", #Symbol Enum
# "user_id": 1
# }
# ]
# },
# "time_now": "1577483699.281488",
# "rate_limit_status": 118,
# "rate_limit_reset_ms": 1577483699244737,
# "rate_limit": 120
# }
#
# linear
#
# {
# "ret_code":0,
# "ret_msg":"OK",
# "ext_code":"",
# "ext_info":"",
# "result":{
# "current_page":1,
# "data":[
# {
# "order_id":"b59418ec-14d4-4ef9-b9f4-721d5d576974",
# "order_link_id":"",
# "side":"Sell",
# "symbol":"BTCUSDT",
# "exec_id":"0327284d-faec-5191-bd89-acc5b4fafda9",
# "price":0.5,
# "order_price":0.5,
# "order_qty":0.01,
# "order_type":"Market",
# "fee_rate":0.00075,
# "exec_price":9709.5,
# "exec_type":"Trade",
# "exec_qty":0.01,
# "exec_fee":0.07282125,
# "exec_value":97.095,
# "leaves_qty":0,
# "closed_size":0.01,
# "last_liquidity_ind":"RemovedLiquidity",
# "trade_time":1591648052,
# "trade_time_ms":1591648052861
# }
# ]
# },
# "time_now":"1591736501.979264",
# "rate_limit_status":119,
# "rate_limit_reset_ms":1591736501974,
# "rate_limit":120
# }
#
result = self.safe_value(response, 'result', {})
trades = self.safe_value_2(result, 'trade_list', 'data', [])
return self.parse_trades(trades, market, since, limit)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
'wallet_fund_type': 'Deposit', # Deposit, Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = await self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'deposit'})
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'coin': currency['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'status': 'Pending', # ToBeConfirmed, UnderReview, Pending, Success, CancelByUser, Reject, Expire
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = await self.v2PrivateGetWalletWithdrawList(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# },
# ],
# "current_page": 1,
# "last_page": 1
# },
# "ext_info": null,
# "time_now": "1577482295.125488",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577482295132,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_transactions(data, currency, since, limit, {'type': 'withdrawal'})
def parse_transaction_status(self, status):
statuses = {
'ToBeConfirmed': 'pending',
'UnderReview': 'pending',
'Pending': 'pending',
'Success': 'ok',
'CancelByUser': 'canceled',
'Reject': 'rejected',
'Expire': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchWithdrawals
#
# {
# "id": 137,
# "user_id": 1,
# "coin": "XRP", # Coin Enum
# "status": "Pending", # Withdraw Status Enum
# "amount": "20.00000000",
# "fee": "0.25000000",
# "address": "rH7H595XYEVTEHU2FySYsWnmfACBnZS9zM",
# "tx_id": "",
# "submited_at": "2019-06-11T02:20:24.000Z",
# "updated_at": "2019-06-11T02:20:24.000Z"
# }
#
# fetchDeposits ledger entries
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(transaction, 'coin')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.parse8601(self.safe_string_2(transaction, 'submited_at', 'exec_time'))
updated = self.parse8601(self.safe_string(transaction, 'updated_at'))
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
address = self.safe_string(transaction, 'address')
feeCost = self.safe_number(transaction, 'fee')
type = self.safe_string_lower(transaction, 'type')
fee = None
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx_id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
# 'coin': currency['id'],
# 'currency': currency['id'], # alias
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(till),
# 'wallet_fund_type': 'Deposit', # Withdraw, RealisedPNL, Commission, Refund, Prize, ExchangeOrderWithdraw, ExchangeOrderDeposit
# 'page': 1,
# 'limit': 20, # max 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['coin'] = currency['id']
if since is not None:
request['start_date'] = self.yyyymmdd(since)
if limit is not None:
request['limit'] = limit
response = await self.v2PrivateGetWalletFundRecords(self.extend(request, params))
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": {
# "data": [
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
# ]
# },
# "ext_info": null,
# "time_now": "1577481867.115552",
# "rate_limit_status": 119,
# "rate_limit_reset_ms": 1577481867122,
# "rate_limit": 120
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_ledger(data, currency, since, limit)
def parse_ledger_entry(self, item, currency=None):
#
# {
# "id": 234467,
# "user_id": 1,
# "coin": "BTC",
# "wallet_id": 27913,
# "type": "Realized P&L",
# "amount": "-0.00000006",
# "tx_id": "",
# "address": "BTCUSD",
# "wallet_balance": "0.03000330",
# "exec_time": "2019-12-09T00:00:25.000Z",
# "cross_seq": 0
# }
#
currencyId = self.safe_string(item, 'coin')
code = self.safe_currency_code(currencyId, currency)
amount = self.safe_number(item, 'amount')
after = self.safe_number(item, 'wallet_balance')
direction = 'out' if (amount < 0) else 'in'
before = None
if after is not None and amount is not None:
difference = amount if (direction == 'out') else -amount
before = self.sum(after, difference)
timestamp = self.parse8601(self.safe_string(item, 'exec_time'))
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
id = self.safe_string(item, 'id')
referenceId = self.safe_string(item, 'tx_id')
return {
'id': id,
'currency': code,
'account': self.safe_string(item, 'wallet_id'),
'referenceAccount': None,
'referenceId': referenceId,
'status': None,
'amount': amount,
'before': before,
'after': after,
'fee': None,
'direction': direction,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'type': type,
'info': item,
}
def parse_ledger_entry_type(self, type):
types = {
'Deposit': 'transaction',
'Withdraw': 'transaction',
'RealisedPNL': 'trade',
'Commission': 'fee',
'Refund': 'cashback',
'Prize': 'prize', # ?
'ExchangeOrderWithdraw': 'transaction',
'ExchangeOrderDeposit': 'transaction',
}
return self.safe_string(types, type, type)
async def fetch_positions(self, symbols=None, params={}):
await self.load_markets()
request = {}
if isinstance(symbols, list):
length = len(symbols)
if length != 1:
raise ArgumentsRequired(self.id + ' fetchPositions takes an array with exactly one symbol')
request['symbol'] = self.market_id(symbols[0])
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
response = None
if type == 'linear':
response = await self.privateLinearGetPositionList(self.extend(request, params))
elif type == 'inverse':
response = await self.v2PrivateGetPositionList(self.extend(request, params))
elif type == 'inverseFuture':
response = await self.futuresPrivateGetPositionList(self.extend(request, params))
if (isinstance(response, basestring)) and self.is_json_encoded_object(response):
response = json.loads(response)
#
# {
# ret_code: 0,
# ret_msg: 'OK',
# ext_code: '',
# ext_info: '',
# result: [] or {} depending on the request
# }
#
return self.safe_value(response, 'result')
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
type = self.safe_string(api, 0)
section = self.safe_string(api, 1)
if type == 'spot':
if section == 'public':
section = 'v1'
else:
section += '/v1'
url = self.implode_hostname(self.urls['api'][type])
request = '/' + type + '/' + section + '/' + path
if (type == 'spot') or (type == 'quote'):
if params:
request += '?' + self.rawencode(params)
elif section == 'public':
if params:
request += '?' + self.rawencode(params)
elif type == 'public':
if params:
request += '?' + self.rawencode(params)
else:
self.check_required_credentials()
timestamp = self.nonce()
query = self.extend(params, {
'api_key': self.apiKey,
'recv_window': self.options['recvWindow'],
'timestamp': timestamp,
})
sortedQuery = self.keysort(query)
auth = self.rawencode(sortedQuery)
signature = self.hmac(self.encode(auth), self.encode(self.secret))
if method == 'POST':
body = self.json(self.extend(query, {
'sign': signature,
}))
headers = {
'Content-Type': 'application/json',
}
else:
request += '?' + self.urlencode(sortedQuery) + '&sign=' + signature
url += request
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
#
# {
# ret_code: 10001,
# ret_msg: 'ReadMapCB: expect {or n, but found \u0000, error ' +
# 'found in #0 byte of ...||..., bigger context ' +
# '...||...',
# ext_code: '',
# ext_info: '',
# result: null,
# time_now: '1583934106.590436'
# }
#
errorCode = self.safe_string(response, 'ret_code')
if errorCode != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
raise ExchangeError(feedback) # unknown message
async def set_margin_mode(self, marginType, symbol=None, params={}):
#
# {
# "ret_code": 0,
# "ret_msg": "ok",
# "ext_code": "",
# "result": null,
# "ext_info": null,
# "time_now": "1577477968.175013",
# "rate_limit_status": 74,
# "rate_limit_reset_ms": 1577477968183,
# "rate_limit": 75
# }
#
leverage = self.safe_value(params, 'leverage')
if leverage is None:
raise ArgumentsRequired(self.id + '.setMarginMode requires a leverage parameter')
marginType = marginType.upper()
if (marginType != 'ISOLATED') and (marginType != 'CROSSED'):
raise BadRequest(self.id + ' marginType must be either isolated or crossed')
await self.load_markets()
market = self.market(symbol)
method = None
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = market['futures'] or (marketType == 'futures')
if linear:
method = 'privateLinearPostPositionSwitchIsolated'
elif inverse:
method = 'v2PrivatePostPositionSwitchIsolated'
elif futures:
method = 'privateFuturesPostPositionSwitchIsolated'
isIsolated = (marginType == 'ISOLATED')
request = {
'symbol': market['id'],
'is_isolated': isIsolated,
'buy_leverage': leverage,
'sell_leverage': leverage,
}
return await getattr(self, method)(self.extend(request, params))
async def set_leverage(self, leverage, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' setLeverage() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
# WARNING: THIS WILL INCREASE LIQUIDATION PRICE FOR OPEN ISOLATED LONG POSITIONS
# AND DECREASE LIQUIDATION PRICE FOR OPEN ISOLATED SHORT POSITIONS
defaultType = self.safe_string(self.options, 'defaultType', 'linear')
marketTypes = self.safe_value(self.options, 'marketTypes', {})
marketType = self.safe_string(marketTypes, symbol, defaultType)
linear = market['linear'] or (marketType == 'linear')
inverse = (market['swap'] and market['inverse']) or (marketType == 'inverse')
futures = market['futures'] or (marketType == 'futures')
method = None
if linear:
method = 'privateLinearPostPositionSetLeverage'
elif inverse:
method = 'v2PrivatePostPositionLeverageSave'
elif futures:
method = 'privateFuturesPostPositionLeverageSave'
buy_leverage = leverage
sell_leverage = leverage
if params['buy_leverage'] and params['sell_leverage'] and linear:
buy_leverage = params['buy_leverage']
sell_leverage = params['sell_leverage']
elif not leverage:
if linear:
raise ArgumentsRequired(self.id + ' setLeverage() requires either the parameter leverage or params["buy_leverage"] and params["sell_leverage"] for linear contracts')
else:
raise ArgumentsRequired(self.id + ' setLeverage() requires parameter leverage for inverse and futures contracts')
if (buy_leverage < 1) or (buy_leverage > 100) or (sell_leverage < 1) or (sell_leverage > 100):
raise BadRequest(self.id + ' leverage should be between 1 and 100')
request = {
'symbol': market['id'],
'leverage_only': True,
}
if not linear:
request['leverage'] = buy_leverage
else:
request['buy_leverage'] = buy_leverage
request['sell_leverage'] = sell_leverage
return await getattr(self, method)(self.extend(request, params))
|
py | b4017070592c4036ca9b8ccccaa9a9d7c67bb4bc | from jira import JIRA
import pandas as pd
from pandas import ExcelWriter
from openpyxl import load_workbook
import configparser
path = "configuration.ini"
config = configparser.ConfigParser()
config.read(path)
result_slug = str(config.get("DEFAULT", "result_slug"))
data_path = str(config.get("DEFAULT", "data_path"))
jql = str(config.get("DEFAULT", "jql"))
workbook_path = str(config.get("DEFAULT", "workbook_path"))
jira_url = str(config.get("DEFAULT", "jira_url"))
login = str(config.get("DEFAULT", "login"))
password = str(config.get("DEFAULT", "password"))
workbook_full_path = workbook_path
workbook = load_workbook(filename = workbook_full_path)
sheet = workbook.active
values = sheet.values
salarydf = pd.DataFrame(values)
salarydf = pd.DataFrame(salarydf.values, columns = ["assigneeuser", "salary_user"])
salarydf.set_index("assigneeuser",inplace=True)
salarydf["salary_per_hour"] = salarydf["salary_user"]/160
print('Salary per user:')
print(salarydf)
allissues = []
print("Connecting to jira, jql filter is", jql)
# Defines a function for connecting to Jira
def connect_jira(jira_server, jira_user, jira_password):
'''
Connect to JIRA. Return None on error
'''
try:
print("Connecting to JIRA: %s" % jira_server)
jira_options = {'server': jira_server}
jira = JIRA(options=jira_options, basic_auth=(jira_user, jira_password))
# ^--- Note the tuple
return jira
except Exception:
print("Failed to connect to JIRA: %s" % e)
return None
jira = connect_jira(jira_url,login , password)
issues = jira.search_issues(jql)
for issue in issues:
allissues.append(issue)
print ('Tasks found:', len(allissues))
issues = pd.DataFrame()
for issue in allissues:
issue = jira.issue(issue.key)
WorkLog = jira.worklogs(issue)
if len(WorkLog) > 0:
for i in range(len(WorkLog)):
d = {
'assigneeuser': str(issue.fields.assignee.name),
'timespent': WorkLog[i].timeSpentSeconds / 3600
}
issues = issues.append(d, ignore_index=True)
issues.set_index('assigneeuser',inplace=True)
grouped = issues.groupby('assigneeuser').mean()
print('Time spent per user:')
print(grouped)
df_merge = pd.merge(salarydf, grouped, on="assigneeuser")
df_merge['summ'] = df_merge['salary_per_hour'] * df_merge['timespent']
print(df_merge)
TaskCost = round(df_merge['summ'].sum(axis = 0, skipna = True))
print('Task cost is',TaskCost, 'rur')
new_path = data_path + '-tasks-'+result_slug+'.xlsx'
writer = ExcelWriter(new_path)
df_merge.to_excel(writer,'jiratasks', index=False, engine='xlsxwriter')
writer.save()
print('Saved to', new_path)
|
py | b40172f042205d697baa952e677b1d3ecdc4f0f5 | """
generates the muco_temp synthetic dataset. In order to use this script, you already have to have to have generated
the sequence meta data files in 'sequence_meta.pkl' and the ground-truth poses. The scripts can be found in mpi_inf_3dhp.ipynb
"""
from databases import mpii_3dhp, muco_temp
from databases.joint_sets import MuPoTSJoints
import numpy as np
from util.misc import ensuredir, load
import os
import cv2
from multiprocessing import Pool
NUM_FRAMES = 2000
def generate_vid_frames(cam, vid_id):
print(cam, vid_id)
metas = sequence_metas[cam][vid_id]
steps = [2 if mpii_3dhp.get_train_fps(meta[0], meta[1]) == 50 else 1 for meta in metas]
out_folder = os.path.join(muco_temp.MUCO_TEMP_PATH, 'frames/cam_%d/vid_%d' % (cam, vid_id))
ensuredir(out_folder)
gt_poses = load(os.path.join(muco_temp.MUCO_TEMP_PATH, 'frames/cam_%d/gt.pkl' % cam))[vid_id]['annot3']
hip_ind = MuPoTSJoints().index_of('hip')
for i in range(NUM_FRAMES):
# generate frame
depths = gt_poses[i, :, hip_ind, 2]
ordered_poses = np.argsort(depths)[::-1] # poses ordered by depth in decreasing order
bg_ind = ordered_poses[0]
img = mpii_3dhp.get_image(metas[bg_ind][0], metas[bg_ind][1], cam, metas[bg_ind][2] + i * steps[bg_ind], rgb=False)
img = img.astype('float32')
# add new pose onto image
for pose_ind in ordered_poses[1:]:
sub, seq, start = metas[pose_ind]
pose_img = mpii_3dhp.get_image(sub, seq, cam, start + i * steps[pose_ind], rgb=False)
# mask is 0 at greenscreen bg, 1 at foreground (body, chair)
mask = mpii_3dhp.get_mask(sub, seq, cam, start + i * steps[pose_ind], 'FGmasks')[:, :, 2] / 255.
mask = cv2.GaussianBlur(mask, (0, 0), 2)[:, :, np.newaxis]
# chair_mask is 0 at chair, 1 everywhere else
chair_mask = mpii_3dhp.get_mask(sub, seq, cam, start + i * steps[pose_ind], 'ChairMasks')[:, :, [2]] / 255
img = chair_mask * img + (1 - chair_mask) * pose_img
img = mask * pose_img + (1 - mask) * img
img = img.astype('uint8')
cv2.imwrite(os.path.join(out_folder, 'img_%04d.jpg' % i), img, [cv2.IMWRITE_JPEG_QUALITY, 80])
if __name__ == '__main__':
sequence_metas = muco_temp.get_metadata()
p = Pool(6)
params = [(cam, vid) for cam in range(11) for vid in range(0, 7)]
p.starmap(generate_vid_frames, params)
|
py | b401732969104f2e718e5eb71f0176dc56cfe56a | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-08-06 01:07
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('library', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='item',
name='available',
),
migrations.RemoveField(
model_name='loan',
name='returned',
),
migrations.AddField(
model_name='item',
name='item_status',
field=models.CharField(choices=[('on_loan', 'Item is on loan'), ('requested', 'Item has been requested'), ('available', 'Item is available')], default='available', max_length=9),
),
]
|
py | b401733525bd566e210c1fcefecc737613eabd84 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for state updating ops that may have benign race conditions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class AssignOpTest(test.TestCase):
# NOTE(mrry): We exclude thess tests from the TSAN TAP target, because they
# contain benign and deliberate data races when multiple threads update
# the same parameters without a lock.
def testParallelUpdateWithoutLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(array_ops.zeros([1024, 1024]))
adds = [
state_ops.assign_add(
p, ones_t, use_locking=False) for _ in range(20)
]
self.evaluate(variables.global_variables_initializer())
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertTrue((vals >= ones).all())
self.assertTrue((vals <= ones * 20).all())
def testParallelAssignWithoutLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
ones_t = array_ops.fill([1024, 1024], float(1))
p = variables.Variable(array_ops.zeros([1024, 1024]))
assigns = [
state_ops.assign(p, math_ops.multiply(ones_t, float(i)), False)
for i in range(1, 21)
]
self.evaluate(variables.global_variables_initializer())
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is taken from one of the assignments.
self.assertTrue((vals > 0).all())
self.assertTrue((vals <= 20).all())
# NOTE(skyewm): We exclude these tests from the TSAN TAP target, because they
# contain non-benign but known data races between the variable assignment and
# returning the output tensors. This issue will be resolved with the new
# resource variables.
def testParallelUpdateWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
adds = [
state_ops.assign_add(
p, ones_t, use_locking=True) for _ in range(20)
]
self.evaluate(p.initializer)
def run_add(add_op):
self.evaluate(add_op)
threads = [
self.checkedThread(
target=run_add, args=(add_op,)) for add_op in adds
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
def testParallelAssignWithLocking(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
zeros_t = array_ops.fill([1024, 1024], 0.0)
ones_t = array_ops.fill([1024, 1024], 1.0)
p = variables.Variable(zeros_t)
assigns = [
state_ops.assign(
p, math_ops.multiply(ones_t, float(i)), use_locking=True)
for i in range(1, 21)
]
self.evaluate(p.initializer)
def run_assign(assign_op):
self.evaluate(assign_op)
threads = [
self.checkedThread(
target=run_assign, args=(assign_op,)) for assign_op in assigns
]
for t in threads:
t.start()
for t in threads:
t.join()
vals = self.evaluate(p)
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
test.main()
|
py | b401735be5fc8a882c470f5684890bbbb201257f | import random
import torch
import logging
import multiprocessing
import numpy as np
logger = logging.getLogger(__name__)
def add_args(parser):
parser.add_argument("--task", type=str, required=True,
choices=['summarize', 'concode', 'translate', 'refine', 'defect', 'clone'])
parser.add_argument("--sub_task", type=str, default='')
parser.add_argument("--lang", type=str, default='')
parser.add_argument("--eval_task", type=str, default='')
parser.add_argument("--model_type", default="codet5", type=str, choices=['roberta', 'bart', 'codet5'])
parser.add_argument("--add_lang_ids", action='store_true')
parser.add_argument("--data_num", default=-1, type=int)
parser.add_argument("--start_epoch", default=0, type=int)
parser.add_argument("--num_train_epochs", default=100, type=int)
parser.add_argument("--patience", default=5, type=int)
parser.add_argument("--tokenizer_path", type=str, required=True)
parser.add_argument("--cache_path", type=str, required=True)
parser.add_argument("--summary_dir", type=str, required=True)
parser.add_argument("--data_dir", type=str, required=True)
parser.add_argument("--res_dir", type=str, required=True)
parser.add_argument("--res_fn", type=str, default='')
parser.add_argument("--add_task_prefix", action='store_true', help="Whether to add task prefix for t5 and codet5")
parser.add_argument("--save_last_checkpoints", action='store_true')
parser.add_argument("--always_save_model", action='store_true')
parser.add_argument("--do_eval_bleu", action='store_true', help="Whether to evaluate bleu on dev set.")
## Required parameters
parser.add_argument("--model_name_or_path", default="roberta-base", type=str,
help="Path to pre-trained model: e.g. roberta-base")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model predictions and checkpoints will be written.")
parser.add_argument("--load_model_path", default=None, type=str,
help="Path to trained model: Should contain the .bin files")
## Other parameters
parser.add_argument("--train_filename", default=None, type=str,
help="The train filename. Should contain the .jsonl files for this task.")
parser.add_argument("--dev_filename", default=None, type=str,
help="The dev filename. Should contain the .jsonl files for this task.")
parser.add_argument("--test_filename", default=None, type=str,
help="The test filename. Should contain the .jsonl files for this task.")
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="roberta-base", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--max_source_length", default=64, type=int,
help="The maximum total source sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--max_target_length", default=32, type=int,
help="The maximum total target sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run eval on the train set.")
parser.add_argument("--do_eval", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_test", action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_lower_case", action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--no_cuda", action='store_true',
help="Avoid using CUDA when available")
parser.add_argument("--train_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for training.")
parser.add_argument("--eval_batch_size", default=8, type=int,
help="Batch size per GPU/CPU for evaluation.")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--beam_size", default=10, type=int,
help="beam size for beam search")
parser.add_argument("--weight_decay", default=0.0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--save_steps", default=-1, type=int, )
parser.add_argument("--log_steps", default=-1, type=int, )
parser.add_argument("--max_steps", default=-1, type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
parser.add_argument("--eval_steps", default=-1, type=int,
help="")
parser.add_argument("--train_steps", default=-1, type=int,
help="")
parser.add_argument("--warmup_steps", default=100, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--local_rank", type=int, default=-1,
help="For distributed training: local_rank")
parser.add_argument('--seed', type=int, default=1234,
help="random seed for initialization")
args = parser.parse_args()
if args.task in ['summarize']:
args.lang = args.sub_task
elif args.task in ['refine', 'concode', 'clone']:
args.lang = 'java'
elif args.task == 'defect':
args.lang = 'c'
elif args.task == 'translate':
args.lang = 'c_sharp' if args.sub_task == 'java-cs' else 'java'
return args
def set_dist(args):
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else:
# Setup for distributed data parallel
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
args.n_gpu = 1
cpu_cont = multiprocessing.cpu_count()
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, cpu count: %d",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), cpu_cont)
args.device = device
args.cpu_cont = cpu_cont
def set_seed(args):
"""set random seed."""
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
|
py | b40173b5946ca11ce4ae1cce86c722d3452875fd | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class DelegatedSubnetServiceOperations(object):
"""DelegatedSubnetServiceOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~dnc.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def get_details(
self,
resource_group_name, # type: str
resource_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "models.DelegatedSubnet"
"""Gets details about the specified dnc DelegatedSubnet Link.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the resource. It must be a minimum of 3 characters, and a
maximum of 63.
:type resource_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DelegatedSubnet, or the result of cls(response)
:rtype: ~dnc.models.DelegatedSubnet
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DelegatedSubnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
accept = "application/json"
# Construct URL
url = self.get_details.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z][a-z0-9]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DelegatedSubnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}'} # type: ignore
def _put_details_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "models.DelegatedSubnet"
**kwargs # type: Any
):
# type: (...) -> "models.DelegatedSubnet"
cls = kwargs.pop('cls', None) # type: ClsType["models.DelegatedSubnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._put_details_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z][a-z0-9]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DelegatedSubnet')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('DelegatedSubnet', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('DelegatedSubnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_put_details_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}'} # type: ignore
def begin_put_details(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "models.DelegatedSubnet"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.DelegatedSubnet"]
"""Put delegated subnet resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the resource. It must be a minimum of 3 characters, and a
maximum of 63.
:type resource_name: str
:param parameters: Delegated subnet details.
:type parameters: ~dnc.models.DelegatedSubnet
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DelegatedSubnet or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~dnc.models.DelegatedSubnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DelegatedSubnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._put_details_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DelegatedSubnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z][a-z0-9]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_put_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}'} # type: ignore
def _patch_details_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "models.ResourceUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> "models.DelegatedSubnet"
cls = kwargs.pop('cls', None) # type: ClsType["models.DelegatedSubnet"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._patch_details_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z][a-z0-9]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ResourceUpdateParameters')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DelegatedSubnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_patch_details_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}'} # type: ignore
def begin_patch_details(
self,
resource_group_name, # type: str
resource_name, # type: str
parameters, # type: "models.ResourceUpdateParameters"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.DelegatedSubnet"]
"""Patch delegated subnet resource.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the resource. It must be a minimum of 3 characters, and a
maximum of 63.
:type resource_name: str
:param parameters: Delegated subnet details.
:type parameters: ~dnc.models.ResourceUpdateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either DelegatedSubnet or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~dnc.models.DelegatedSubnet]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.DelegatedSubnet"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._patch_details_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DelegatedSubnet', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z][a-z0-9]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_patch_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}'} # type: ignore
def _delete_details_initial(
self,
resource_group_name, # type: str
resource_name, # type: str
force_delete=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
accept = "application/json"
# Construct URL
url = self._delete_details_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z][a-z0-9]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if force_delete is not None:
query_parameters['forceDelete'] = self._serialize.query("force_delete", force_delete, 'bool')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_details_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}'} # type: ignore
def begin_delete_details(
self,
resource_group_name, # type: str
resource_name, # type: str
force_delete=None, # type: Optional[bool]
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Delete dnc DelegatedSubnet.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:param resource_name: The name of the resource. It must be a minimum of 3 characters, and a
maximum of 63.
:type resource_name: str
:param force_delete: Force delete resource.
:type force_delete: bool
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_details_initial(
resource_group_name=resource_group_name,
resource_name=resource_name,
force_delete=force_delete,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'resourceName': self._serialize.url("resource_name", resource_name, 'str', max_length=63, min_length=3, pattern=r'^[a-z][a-z0-9]*$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_details.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets/{resourceName}'} # type: ignore
def list_by_subscription(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.DelegatedSubnets"]
"""Get all the DelegatedSubnets resources in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DelegatedSubnets or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~dnc.models.DelegatedSubnets]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DelegatedSubnets"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DelegatedSubnets', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.DelegatedNetwork/delegatedSubnets'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.DelegatedSubnets"]
"""Get all the DelegatedSubnets resources in a resource group.
:param resource_group_name: The name of the resource group. The name is case insensitive.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DelegatedSubnets or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~dnc.models.DelegatedSubnets]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.DelegatedSubnets"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-03-15"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str', min_length=1),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('DelegatedSubnets', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DelegatedNetwork/delegatedSubnets'} # type: ignore
|
py | b40174808c08ca85d4b66f1f3af3d9de0769211e | # coding:utf-8
from __future__ import absolute_import, division, print_function
import urllib.request
import re
import os
class Spider:
def __init__(self):
self.siteURL = 'http://disi.unitn.it/~sartori/datasets/deviantart-dataset/'
self.save_dir = 'save/'
self.save_error = 'save/_____error_url.txt'
self.error_url = []
def getPage(self, url):
request = urllib.request.Request(url)
try:
response = urllib.request.urlopen(request)
return response.read().decode('ISO-8859-1')
except urllib.request.URLError as e:
self.error_url.append(str(url))
print(e, u'发现无法打开的链接:', self.error_url[-1])
def getImgUrl(self):
page = self.getPage(url=self.siteURL)
pattern = re.compile('<td width="657" valign="top"><font size="2"><a href="(.*?)">.*?</a></font></td>', re.S)
items = re.findall(pattern, page)
contents = []
for item in items:
contents.append(item)
return contents
def getImg(self):
img_list = self.getImgUrl()
idx = 1
for img_url in img_list:
page = self.getPage(url=img_url)
pattern = re.compile('<meta name="twitter:image" content="(.*?)">', re.S)
try:
items = re.findall(pattern, page)
print(items[0])
file_name = self.save_dir + str(items[0].split('/')[-1])
if os.path.exists(file_name):
print('第%d张图片%s已存在' % (idx, file_name))
pass
else:
self.saveImg(imageURL=items[0], fileName=file_name)
print(u'第%d张图片%s已保存' % (idx, file_name))
idx += 1
except:
pass
f = open(self.save_error, 'w')
for error_url in self.error_url:
f.write(error_url)
f.write('\n')
f.close()
print(u'已将错误链接保存至 %s' % self.save_error)
def saveImg(self, imageURL, fileName):
u = urllib.request.urlopen(imageURL)
data = u.read()
f = open(fileName, 'wb')
f.write(data)
f.close()
if __name__ == '__main__':
Spider().getImg()
|
py | b401758e0d620d20613c7c33c53e1292e208fdfa | from unittest import TestCase
from rexpro._compat import PY2, xrange
from nose.plugins.attrib import attr
import os
if PY2:
from rexpro.connectors.rgevent import RexProGeventConnection, RexProGeventSocket, RexProGeventConnectionPool
import gevent
def slow_start_simulation(ref):
gevent.sleep(1)
conn = ref.get_connection()
return conn
def spawn_slow_network_and_query_slow_response(ref, script, sleep_time, data):
conn = slow_start_simulation(ref)
return conn.execute(script=script, params={'sleep_length': sleep_time, 'data': data})
@attr('concurrency', 'gevent', 'python2')
class TestGeventConcurrency(TestCase):
SOCKET_CLASS = RexProGeventSocket
CONN_CLASS = RexProGeventConnection
POOL_CLASS = RexProGeventConnectionPool
host = os.getenv('TITAN_HOST', 'localhost')
port = int(os.getenv('TITAN_REXPRO_PORT', 8184))
default_graphname = 'graph'
username = 'rexster'
password = 'rexster'
timeout = 30
test_graphs = [
#'emptygraph', # Tinkergraph
'graph', # Tinkergraph
#'emptysailgraph', # in memory sail graph
#'sailgraph', #sail graph
#'orientdbsample', # OrientDB
#'neo4jsample', # Neo4j
#'dexsample', # DexGraph
#'titangraph', # Titan
]
NUM_ITER = 10
SLOW_NETWORK_QUERY = """def test_slow_query(sleep_length, data) {
sleep sleep_length
return data
}
test_slow_query(sleep_length, data)
"""
def get_connection(self, host=None, port=None, graphname=None, username=None, password=None, timeout=None):
return self.CONN_CLASS(
host or self.host,
port or self.port,
graphname or self.default_graphname,
username=username or self.username,
password=password or self.password,
timeout=timeout or self.timeout
)
def test_start_many_connections(self):
""" Test starting up many connections """
gevent.joinall([gevent.spawn(self.get_connection) for _ in xrange(self.NUM_ITER)], timeout=3)
def test_start_many_slow_connections(self):
""" Test starting many slow connections """
gevent.joinall([gevent.spawn(slow_start_simulation, self) for _ in xrange(self.NUM_ITER)], timeout=3)
def test_many_network_calls(self):
""" Test known responses on a network that should be slow, we should get them all asynchronously """
threads = []
for i in xrange(self.NUM_ITER):
threads.append(gevent.spawn(spawn_slow_network_and_query_slow_response,
self,
self.SLOW_NETWORK_QUERY,
1,
{'value': i, i: 'value'}
)
)
gevent.joinall(threads, timeout=5)
|
py | b401775f5af0e9b7b7978646db33631b271d516f | #!/usr/bin/env python3
import os
import sys
import textwrap
self_path = os.path.dirname(os.path.realpath(__file__));
f = open(self_path + "/unicode/CaseFolding.txt", "r")
status_list = [ "C", "F" ]
folding_list = [ dict(), dict(), dict() ]
# Filter the foldings for "full" folding.
for line in f:
comment_off = line.find("#")
if comment_off >= 0:
line = line[:comment_off]
line = line.strip()
if not line:
continue
raw_codepoint, status, raw_mapping, ignored_tail = line.split(";", 3)
if not status.strip() in status_list:
continue
codepoint = int(raw_codepoint.strip(), 16)
mapping = [int(it, 16) for it in raw_mapping.strip().split(" ")]
mapping_len = len(mapping)
if mapping_len in range(1, 4):
folding_list[mapping_len-1][codepoint] = mapping
else:
assert(False)
f.close()
# If we assume that (index0 ... index-1) makes a range (as defined below),
# check that the newly provided index is compatible with the range too; i.e.
# verify that the range can be extended without breaking its properties.
#
# Currently, we can handle ranges which:
#
# (1) either form consecutive sequence of codepoints and which map that range
# to other consecutive range of codepoints (of the same length);
#
# (2) or a consecutive sequence of codepoints with step 2 where each codepoint
# CP is mapped to the codepoint CP+1
# (e.g. 0x1234 -> 0x1235; 0x1236 -> 0x1237; 0x1238 -> 0x1239; ...).
#
# Note: When the codepoints in the range are mapped to multiple codepoints,
# only the 1st mapped codepoint is considered. All the other ones have to be
# shared by all the mappings covered by the range.
def is_range_compatible(folding, codepoint_list, index0, index):
N = index - index0
codepoint0 = codepoint_list[index0]
codepoint1 = codepoint_list[index0+1]
codepointN = codepoint_list[index]
mapping0 = folding[codepoint0]
mapping1 = folding[codepoint1]
mappingN = folding[codepointN]
# Check the range type (1):
if codepoint1 - codepoint0 == 1 and codepointN - codepoint0 == N \
and mapping1[0] - mapping0[0] == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - mapping0[0] == N and mappingN[1:] == mapping0[1:]:
return True
# Check the range type (2):
if codepoint1 - codepoint0 == 2 and codepointN - codepoint0 == 2 * N \
and mapping0[0] - codepoint0 == 1 \
and mapping1[0] - codepoint1 == 1 and mapping1[1:] == mapping0[1:] \
and mappingN[0] - codepointN == 1 and mappingN[1:] == mapping0[1:]:
return True
return False
def mapping_str(list, mapping):
return ",".join("0x{:04x}".format(x) for x in mapping)
for mapping_len in range(1, 4):
folding = folding_list[mapping_len-1]
codepoint_list = list(folding)
index0 = 0
count = len(folding)
records = list()
data_records = list()
while index0 < count:
index1 = index0 + 1
while index1 < count and is_range_compatible(folding, codepoint_list, index0, index1):
index1 += 1
if index1 - index0 > 2:
# Range of codepoints
records.append("R(0x{:04x},0x{:04x})".format(codepoint_list[index0], codepoint_list[index1-1]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index1-1]]))
index0 = index1
else:
# Single codepoint
records.append("S(0x{:04x})".format(codepoint_list[index0]))
data_records.append(mapping_str(data_records, folding[codepoint_list[index0]]))
index0 += 1
sys.stdout.write("static const unsigned FOLD_MAP_{}[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
sys.stdout.write("static const unsigned FOLD_MAP_{}_DATA[] = {{\n".format(mapping_len))
sys.stdout.write("\n".join(textwrap.wrap(", ".join(data_records), 110,
initial_indent = " ", subsequent_indent=" ")))
sys.stdout.write("\n};\n")
|
py | b40177b686c4f36ff52bccde43e53d7cd1cb962b | from collections import namedtuple # for type hints
import enum # for type hints
import os
import struct
from types import ModuleType
from typing import Dict
from . import base
from . import id_software
from . import lumps
GoldSrcLumpHeader = namedtuple("GoldSrcLumpHeader", ["offset", "length"])
class GoldSrcBsp(id_software.IdTechBsp): # TODO: QuakeBsp subclass?
file_magic = None
# https://github.com/ValveSoftware/halflife/blob/master/utils/common/bspfile.h
# http://hlbsp.sourceforge.net/index.php?content=bspdef
def __repr__(self):
version = f"(version {self.bsp_version})" # no file_magic
branch_script = ".".join(self.branch.__name__.split(".")[-2:])
return f"<{self.__class__.__name__} '{self.filename}' {branch_script} {version}>"
def _preload(self):
self.file = open(os.path.join(self.folder, self.filename), "rb")
self.bsp_version = int.from_bytes(self.file.read(4), "little")
self.file.seek(0, 2) # move cursor to end of file
self.bsp_file_size = self.file.tell()
self.headers = dict()
self.loading_errors: Dict[str, Exception] = dict()
for LUMP_enum in self.branch.LUMP:
LUMP_NAME = LUMP_enum.name
self.file.seek(self.branch.lump_header_address[LUMP_enum])
offset, length = struct.unpack("2I", self.file.read(8))
lump_header = GoldSrcLumpHeader(offset, length)
self.headers[LUMP_NAME] = lump_header
if length == 0:
continue # empty lump
try:
if LUMP_NAME in self.branch.LUMP_CLASSES:
LumpClass = self.branch.LUMP_CLASSES[LUMP_NAME]
BspLump = lumps.create_BspLump(self.file, lump_header, LumpClass)
elif LUMP_NAME in self.branch.SPECIAL_LUMP_CLASSES:
SpecialLumpClass = self.branch.SPECIAL_LUMP_CLASSES[LUMP_NAME]
self.file.seek(offset)
BspLump = SpecialLumpClass(self.file.read(length))
elif LUMP_NAME in self.branch.BASIC_LUMP_CLASSES:
LumpClass = self.branch.BASIC_LUMP_CLASSES[LUMP_NAME]
BspLump = lumps.create_BasicBspLump(self.file, lump_header, LumpClass)
else:
BspLump = lumps.create_RawBspLump(self.file, lump_header)
except Exception as exc:
self.loading_errors[LUMP_NAME] = exc
BspLump = lumps.create_RawBspLump(self.file, lump_header)
# NOTE: doesn't decompress LZMA, fix that
setattr(self, LUMP_NAME, BspLump)
def _read_header(self, LUMP: enum.Enum) -> GoldSrcLumpHeader:
"""Reads bytes of lump"""
self.file.seek(self.branch.lump_header_address[LUMP])
offset, length = struct.unpack("2I", self.file.read(8))
header = GoldSrcLumpHeader(offset, length)
return header
class ValveBsp(base.Bsp):
# https://developer.valvesoftware.com/wiki/Source_BSP_File_Format
file_magic = b"VBSP"
def __init__(self, branch: ModuleType, filename: str = "untitled.bsp", autoload: bool = True):
super(ValveBsp, self).__init__(branch, filename, autoload)
# TODO: migrate Source specific functionality from base.Bsp to ValveBsp
def _read_header(self, LUMP: enum.Enum) -> namedtuple: # any LumpHeader
"""Get LUMP from self.branch.LUMP; e.g. self.branch.LUMP.ENTITIES"""
# NOTE: each branch of VBSP has unique headers,
# -- so branch.read_lump_header function is used
# TODO: move to a system of using header LumpClasses instead of the above
return self.branch.read_lump_header(self.file, LUMP)
def save_as(self, filename: str = None):
raise NotImplementedError()
# # TODO: get LumpHeaderClass from branch
# lump_order = sorted([L for L in self.branch.LUMP],
# key=lambda L: (self.headers[L.name].offset, self.headers[L.name].length))
# # ^ {"lump.name": LumpHeader / ExternalLumpHeader}
# # NOTE: messes up on empty lumps, so we can't get an exact 1:1 copy /;
# raw_lumps: Dict[str, bytes] = dict()
# # ^ {"LUMP.name": b"raw lump data]"}
# for LUMP in self.branch.LUMP:
# lump_bytes = self.lump_as_bytes(LUMP.name)
# if lump_bytes != b"": # don't write empty lumps
# raw_lumps[LUMP.name] = lump_bytes
# # recalculate headers
# current_offset = 0
# headers = dict()
# for LUMP in lump_order:
# if LUMP.name not in raw_lumps: # lump is not present
# version = self.headers[LUMP.name].version # PHYSICS_LEVEL needs version preserved
# headers[LUMP.name] = LumpHeader(current_offset, 0, version, 0)
# continue
# # wierd hack to align unused lump offsets correctly
# if current_offset == 0:
# current_offset = 16 + (16 * 128) # first byte after headers
# offset = current_offset
# length = len(raw_lumps[LUMP.name])
# version = self.headers[LUMP.name].version
# fourCC = 0 # fourCC is always 0 because we aren't encoding
# header = LumpHeader(offset, length, version, fourCC)
# headers[LUMP.name] = header # recorded for noting padding
# current_offset += length
# # pad to start at the next multiple of 4 bytes
# # TODO: note the padding so we can remove it when writing .bsp_lump
# if current_offset % 4 != 0:
# current_offset += 4 - current_offset % 4
# del current_offset
# if "GAME_LUMP" in raw_lumps:
# raw_lumps["GAME_LUMP"] = self.GAME_LUMP.as_bytes(headers["GAME_LUMP"].offset)
# # make file
# os.makedirs(os.path.dirname(os.path.realpath(filename)), exist_ok=True)
# outfile = open(filename, "wb")
# bsp_version = self.bsp_version
# if isinstance(self.bsp_version, tuple):
# bsp_version = bsp_version[0] + bsp_version[1] << 16
# outfile.write(struct.pack("4s2I", self.file_magic, bsp_version, self.revision))
# # write headers
# for LUMP in self.branch.LUMP:
# header = headers[LUMP.name]
# outfile.write(struct.pack("4I", header.offset, header.length, header.version, header.fourCC))
# # write lump contents (cannot be done until headers allocate padding)
# for LUMP in lump_order:
# if LUMP.name not in raw_lumps:
# continue
# padding_length = headers[LUMP.name].offset - outfile.tell()
# if padding_length > 0: # NOTE: padding_length should not exceed 3
# outfile.write(b"\0" * padding_length)
# outfile.write(raw_lumps[LUMP.name])
# # final padding
# end = outfile.tell()
# padding_length = 0
# if end % 4 != 0:
# padding_length = 4 - end % 4
# outfile.write(b"\0" * padding_length)
# outfile.close() # main .bsp is written
|
py | b4017809184aff686ec36fb7086022567a8d584d | # -*- coding: utf-8 -*-
# Copyright (c) 2016-2021 by University of Kassel and Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
import numpy as np
import pytest
import pandapower as pp
try:
import pplog as logging
except ImportError:
import logging
def test_cost_pol_gen():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_poly_cost(net, 0, "gen", cp1_eur_per_mw=1)
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values)
net.poly_cost.cp1_eur_per_mw.at[0] = 0
net.poly_cost.cp2_eur_per_mw2.at[0] = 1
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values**2)
def test_cost_pol_all_elements():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_gen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_sgen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_poly_cost(net, 0, "gen", cp1_eur_per_mw=1)
pp.create_poly_cost(net, 0, "sgen", cp1_eur_per_mw=1)
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert abs(net.res_cost - (net.res_gen.p_mw.values + net.res_sgen.p_mw.values)) < 1e-2
net.poly_cost.cp1_eur_per_mw.at[0] = 0
net.poly_cost.cp2_eur_per_mw2.at[0] = 1
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_gen.p_mw.values**2 + net.res_sgen.p_mw.values)
def test_cost_pol_q():
""" Testing a very simple network for the resulting cost value
constraints with OPF """
# boundaries:
vm_max = 1.05
vm_min = 0.95
# create net
net = pp.create_empty_network()
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=10.)
pp.create_bus(net, max_vm_pu=vm_max, min_vm_pu=vm_min, vn_kv=.4)
pp.create_sgen(net, 1, p_mw=0.1, controllable=True, min_p_mw=0.005, max_p_mw=0.15, max_q_mvar=0.05,
min_q_mvar=-0.05)
pp.create_ext_grid(net, 0)
pp.create_load(net, 1, p_mw=0.02, controllable=False)
pp.create_line_from_parameters(net, 0, 1, 50, name="line2", r_ohm_per_km=0.876,
c_nf_per_km=260.0, max_i_ka=0.123, x_ohm_per_km=0.1159876,
max_loading_percent=100 * 690)
pp.create_poly_cost(net, 0, "sgen", cp1_eur_per_mw=0, cq1_eur_per_mvar=-1)
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert abs(net.res_cost + (net.res_sgen.q_mvar.values)) < 1e-2
net.poly_cost.cq1_eur_per_mvar.at[0] = 0
net.poly_cost.cq2_eur_per_mvar2.at[0] = 1
# net.poly_cost.c.at[0] = np.array([[1, 0, 0]])
# run OPF
pp.runopp(net)
assert net["OPF_converged"]
assert np.isclose(net.res_cost, net.res_sgen.q_mvar.values**2)
if __name__ == "__main__":
logger = logging.getLogger(__name__)
logger.setLevel("DEBUG")
pytest.main(["test_costs_pol.py", "-xs"]) |
py | b401786762f1421175987ecce2532ed56b5b256f | # -*- coding: utf-8 -*-
#################################################################################
# Author : Webkul Software Pvt. Ltd. (<https://webkul.com/>)
# Copyright(c): 2015-Present Webkul Software Pvt. Ltd.
# License URL : https://store.webkul.com/license.html/
# All Rights Reserved.
#
#
#
# This program is copyright property of the author mentioned above.
# You can`t redistribute it and/or modify it.
#
#
# You should have received a copy of the License along with this program.
# If not, see <https://store.webkul.com/license.html/>
#################################################################################
from odoo import api, fields, models, _
from odoo.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
class ResUsers(models.Model):
_inherit = 'res.users'
@api.model
def signup(self, values, token=None):
""" """
context = dict(self._context)
if values.get('is_seller', False):
context["is_seller"] = values.get('is_seller', False)
values.pop("is_seller")
return super(ResUsers, self.with_context(context)).signup(values, token)
@api.multi
def copy(self, default=None):
self.ensure_one()
user_obj = super(ResUsers, self).copy(default=default)
if self._context.get('is_seller', False):
# Set Default fields for seller (i.e: payment_methods, commission, location_id, etc...)
wk_valse = {
"payment_method": [(6, 0, user_obj.partner_id._set_payment_method())],
"commission": self.env['ir.default'].get('res.config.settings', 'global_commission'),
"location_id": self.env['ir.default'].get('res.config.settings', 'warehouse_location_id') or False,
"warehouse_id": self.env['ir.default'].get('res.config.settings', 'mp_default_warehouse_id') or False,
"auto_product_approve": self.env['ir.default'].get('res.config.settings', 'auto_product_approve'),
"seller_payment_limit": self.env['ir.default'].get('res.config.settings', 'seller_payment_limit'),
"next_payment_request": self.env['ir.default'].get('res.config.settings', 'next_payment_requset'),
"auto_approve_qty": self.env['ir.default'].get('res.config.settings', 'auto_approve_qty'),
"show_seller_since": self.env['ir.default'].get('res.config.settings', 'seller_since'),
"show_seller_address": self.env['ir.default'].get('res.config.settings', 'shipping_address'),
"show_product_count": self.env['ir.default'].get('res.config.settings', 'product_count'),
"show_sale_count": self.env['ir.default'].get('res.config.settings', 'sale_count'),
"show_return_policy": self.env['ir.default'].get('res.config.settings', 'return_policy'),
"show_shipping_policy": self.env['ir.default'].get('res.config.settings', 'shipping_policy'),
"show_seller_review": self.env['ir.default'].get('res.config.settings', 'seller_review'),
"seller" : True,
}
user_obj.partner_id.write(wk_valse)
# Add user to Pending seller group
# user_obj.partner_id.seller = True
draft_seller_group_id = self.env['ir.model.data'].get_object_reference('odoo_marketplace', 'marketplace_draft_seller_group')[1]
groups_obj = self.env["res.groups"].browse(draft_seller_group_id)
if groups_obj:
for group_obj in groups_obj:
group_obj.write({"users": [(4, user_obj.id, 0)]})
return user_obj
@api.multi
def notification_on_partner_as_a_seller(self):
# Here Ids must be single user is
for user_obj in self:
if user_obj.partner_id.seller:
template = self.env['mail.template']
config_setting_obj = self.env['res.config.settings'].get_values()
if config_setting_obj["enable_notify_admin_4_new_seller"] and config_setting_obj.get("notify_admin_4_new_seller_m_tmpl_id",False) and config_setting_obj["notify_admin_4_new_seller_m_tmpl_id"]:
# Notify to admin by admin on new seller creation
temp_id = config_setting_obj["notify_admin_4_new_seller_m_tmpl_id"]
if temp_id:
template.browse(temp_id).send_mail(user_obj.partner_id.id, True)
if config_setting_obj["enable_notify_seller_4_new_seller"] and config_setting_obj.get("notify_seller_4_new_seller_m_tmpl_id",False) and config_setting_obj["notify_seller_4_new_seller_m_tmpl_id"]:
# Notify to Seller by admin on new seller creation
temp_id2 = config_setting_obj["notify_seller_4_new_seller_m_tmpl_id"]
if temp_id2:
template.browse(temp_id2).send_mail(user_obj.partner_id.id, True)
# @api.model
# def create(self, vals):
# print("cals----------------", vals)
# return super(ResUsers, self).create(vals)
|
py | b401786f10a4ac1e2b47ee98bc4ca0e2e4352ddc | import os, sys, math, time
import numpy as np
import open3d as o3d
from skimage import io
from constants import *
## HELPER FUNCTIONS ##
# detects pixels along the laser line in the image
# RETURNS: list of laser pixels as (x, y) tuples
def detect_laser_pixels(image, laser_threshold, window_len):
# extract element-wise channel diff intensity
nonred = (image[...,1] >> 1) + (image[...,2] >> 1)
intensity = np.maximum(image[...,0], nonred) - nonred
rows, cols = intensity.shape
# apply hamming window to smooth intensity rows
window = np.hamming(window_len)
intensity = np.reshape(intensity, (rows*cols,))
filtered = np.convolve(intensity, window, mode='same')
# compute row-wise local max intensity pixels
mask = np.zeros((rows*cols,), dtype=np.bool)
mask[1:-1] = np.diff(np.sign(np.diff(filtered))) < 0
mask &= intensity > laser_threshold
mask = np.reshape(mask, (rows,cols))
return np.ndarray.tolist(np.argwhere(mask))
# converts a list of pixels to corresponding screen points
# in world space as a numpy array
# RETURNS: list of screen points
def pixels_to_screen_points(pixels, dim, pixel_skip):
camera_sensor_height = CAMERA_SENSOR_WIDTH * (dim[0] / dim[1])
iw, ih = 1.0 / dim[1], 1.0 / dim[0]
# convert pixels to camera space
camera_points = np.zeros((4, len(pixels)))
for i in range(len(pixels)):
px, py = pixels[i][1] + 0.5, pixels[i][0] * pixel_skip + 0.5
nx, ny = px * iw - 0.5, py * ih - 0.5
cx, cy = nx * CAMERA_SENSOR_WIDTH, ny * camera_sensor_height
camera_points[:,i] = np.array([cx, -cy, -CAMERA_FOCAL_LENGTH, 1.00])
# convert pixels to world space
screen_points = (CAMERA_TO_WORLD @ camera_points)[0:3,:].T
return np.ndarray.tolist(screen_points)
# Perform ray plane intersection from the camera origin through
# each screen point to the laser plane to determine the point of
# the laser on the object in world space. This function removes
# points relating to the background by checking the Z coordinate.
# RETURNS: list of world points
def screen_points_to_laser_plane(screen_points):
normal, center, origin = LASER_N, LASER_P, CAMERA_POS
numerator = np.dot(center - origin, normal)
world_points = []
for point in screen_points:
direction = norm(point - CAMERA_POS)
denom = np.dot(normal, direction)
if abs(denom) < 1.0e-6: continue
time = numerator / denom
world = CAMERA_POS + direction * time
if time < 0 or time > CAMERA_POS[1] + 1.5 or world[2] < 0: continue
world_points.append(world)
return world_points
# Reverse rotate points along the center axis based on
# image index. This gets the corresponding point on the object.
# RETURNS: list of object points
def world_points_to_object_points(world_points, angle):
cosine, sine = math.cos(angle), math.sin(angle)
rotate = np.array( \
[[cosine, -sine, 0.0],
[ sine, cosine, 0.0],
[ 0.0, 0.0, 1.0]])
object_points = []
for world_point in world_points:
object_points.append( \
np.ndarray.flatten((rotate @ world_point.reshape((-1, 1)))))
return object_points
## POINT CLOUD GENERATION SCRIPT ##
# generate all points for point cloud from scan
def generate_points(scan_dir, laser_threshold, window_len, pixel_skip, image_skip, verbose):
_, _, image_names = next(os.walk(scan_dir), (None, None, []))
image_names.sort()
image_num = len(image_names)
base_angle = (2.0 * math.pi) / image_num
time_pixels = 0
time_screen_points = 0
time_world_points = 0
time_object_points = 0
points = []
for i in range(0, image_num, image_skip):
image_name = os.path.join(scan_dir, image_names[i])
if verbose: print("processing image %d, file %s..." % (i, image_name))
# read a scan image
image = io.imread(image_name)
dim = image.shape
image = image[::pixel_skip,...]
angle = -base_angle * i
# generate points for that image and add to point cloud
start = time.time()
pixels = detect_laser_pixels(image, laser_threshold, window_len)
time_pixels += time.time() - start
start = time.time()
screen_points = pixels_to_screen_points(pixels, dim, pixel_skip)
time_screen_points += time.time() - start
start = time.time()
world_points = screen_points_to_laser_plane(screen_points)
time_world_points += time.time() - start
start = time.time()
object_points = world_points_to_object_points(world_points, angle)
time_object_points += time.time() - start
points.extend(object_points)
if verbose:
print("time_pixels =", time_pixels)
print("time_screen_points =", time_screen_points)
print("time_world_points =", time_world_points)
print("time_object_points =", time_object_points)
return points
def run_points(scan_dir, out_filename, laser_threshold, window_len, pixel_skip, image_skip, verbose, display):
print(scan_dir, out_filename, laser_threshold, window_len, pixel_skip, image_skip, verbose)
points = generate_points(scan_dir, laser_threshold, window_len, pixel_skip, image_skip, verbose)
if verbose: print("%d points generated" % len(points))
np_points = np.asarray(points, dtype=np.float64)
pcl = o3d.geometry.PointCloud()
pcl.points = o3d.utility.Vector3dVector(np_points)
if verbose: print("writing pcd file %s..." % out_filename)
o3d.io.write_point_cloud(out_filename, pcl)
if display:
pcd = o3d.io.read_point_cloud(out_filename)
o3d.visualization.draw_geometries([pcd], width=1280, height=720)
def main():
if len(sys.argv) != 3:
print("Usage: python points.py <scan_dir> <output_filename>")
exit(-1)
scan_dir = sys.argv[1]
output_filename = sys.argv[2]
if not os.path.isdir(scan_dir):
print("Error: scan_dir argument (%s) is not a valid directory" % scan_dir)
exit(-1)
if len(os.listdir(scan_dir)) == 0:
print("Error: scan_dir argument (%s) does not contain any files" % scan_dir)
exit(-1)
print("Using image scan directory " + scan_dir)
run_points(scan_dir, output_filename, DEFAULT_LASER_THRESHOLD, DEFAULT_WINDOW_LEN,
DEFAULT_PIXEL_SKIP, 1, DEBUG, True)
if __name__ == "__main__":
main()
# def add_debugging_visualizations(points):
# # add camera position to point cloud
# points.append(CAMERA_POS)
# # add laser plane normal to point cloud
# step = 5.0 / 100.0
# ray = LASER_N
# for i in range(100):
# time = i * step
# points.append(LASER_P + ray * time)
# # add laser plane grid to point cloud
# step1 = 5.0 / 10.0
# ray1 = norm(LASER_POS - LASER_N * np.dot(LASER_POS, LASER_N))
# ray2 = norm(np.cross(LASER_N, ray1))
# for i in range(10):
# for j in range(10):
# x, y = j * step1 - 2.5, i * step1 - 2.5
# points.append(LASER_P + x * ray1 + y * ray2)
# # add xyz axes to point cloud
# stepxyz = 5.0 / 20.0
# rayx = np.array([1.0, 0.0, 0.0])
# rayy = np.array([0.0, 1.0, 0.0])
# rayz = np.array([0.0, 0.0, 1.0])
# for i in range(20):
# amt = i * stepxyz - 2.5
# points.append(amt * rayx)
# points.append(amt * rayy)
# points.append(amt * rayz)
# return points |
py | b40179145fc23338a73570ab5d172fa650ac59fe | from __future__ import unicode_literals
from .responses import ResourceGroupsResponse
url_bases = ["https?://resource-groups(-fips)?.(.+).amazonaws.com"]
url_paths = {
"{0}/groups$": ResourceGroupsResponse.dispatch,
"{0}/groups/(?P<resource_group_name>[^/]+)$": ResourceGroupsResponse.dispatch,
"{0}/groups/(?P<resource_group_name>[^/]+)/query$": ResourceGroupsResponse.dispatch,
"{0}/groups-list$": ResourceGroupsResponse.dispatch,
"{0}/resources/(?P<resource_arn>[^/]+)/tags$": ResourceGroupsResponse.dispatch,
}
|
py | b4017a3819a62f2f3157c0799466d044dad9e3da | #####################################
# LICENSE #
#####################################
#
# Copyright (C) 2020 Elmar Glaubauf
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This script will create a Takes from Existing Cameras
Twitter: @eglaubauf
Web: www.elmar-glaubauf.at
"""
import hou
class TakesFromCams():
"""Creates Takes from the Selected Cams for the First RS-ROP found"""
def __init__(self):
self.count = 0
self.cams = self.get_cams()
self.rop = self.setup_rop()
self.create_takes()
self.display_message()
def get_cams(self):
"""Calls the Selected Camera Nodes"""
cams = []
for c in hou.selectedNodes():
if c.type().name() == "cam":
cams.append(c)
return cams
def setup_rop(self):
"""Gets the current Redshift ROP"""
out = hou.node("/out")
for o in out.children():
if o.type().name() == "Redshift_ROP":
return o
rop = out.createNode("Redshift_ROP")
return rop
def create_takes(self):
"""Iterates over all selected Nodes and creates Takes"""
if not self.cams: # Return if there are no Cam Nodes
return
master_take = hou.takes.currentTake()
for c in self.cams:
# Check against OBJ-Level Nodes and Subnets
child = master_take.addChildTake('take_' + c.name())
hou.takes.setCurrentTake(child)
child.addParmTuple(self.rop.parm("RS_renderCamera").tuple())
self.rop.parm("RS_renderCamera").set(c.path())
self.count += 1
hou.takes.setCurrentTake(master_take)
def display_message(self):
"""Displays the Count of Created Materials to the User"""
if self.count > 0:
hou.ui.displayMessage(str(self.count) + ' Takes have been created')
else:
hou.ui.displayMessage('Please select Camera-Nodes to create Takes')
|
py | b4017a646326c2e3ed60212e16037cfcb08a3b0d | """
Destroy the database for the specified user
(who must not be siteUserAdmin)
"""
import pymongo
from pymongo import MongoClient
import sys
import secrets.admin_secrets
import secrets.client_secrets
MONGO_ADMIN_URL = "mongodb://{}:{}@{}:{}/admin".format(
secrets.admin_secrets.admin_user,
secrets.admin_secrets.admin_pw,
secrets.admin_secrets.host,
secrets.admin_secrets.port)
try:
dbclient = MongoClient(MONGO_ADMIN_URL)
db = getattr(dbclient, secrets.client_secrets.db)
print("Got database")
print("Attempting drop users")
# db.command( {"dropAllUsersFromDatabase": 1 } )
db.remove_user(secrets.client_secrets.db_user)
print("Dropped database users for {}".format(secrets.client_secrets.db))
db.command( {"dropDatabase": 1 } )
print("Dropped database {}".format(secrets.client_secrets.db))
except Exception as err:
print("Failed")
print(err)
|
py | b4017c64f97e84e257d2c92cad2c91288f476540 | """ Locale support.
The module provides low-level access to the C lib's locale APIs
and adds high level number formatting APIs as well as a locale
aliasing engine to complement these.
The aliasing engine includes support for many commonly used locale
names and maps them to values suitable for passing to the C lib's
setlocale() function. It also includes default encodings for all
supported locale names.
"""
import sys
import encodings
import encodings.aliases
import re
import collections
from builtins import str as _builtin_str
import functools
# Try importing the _locale module.
#
# If this fails, fall back on a basic 'C' locale emulation.
# Yuck: LC_MESSAGES is non-standard: can't tell whether it exists before
# trying the import. So __all__ is also fiddled at the end of the file.
__all__ = ["getlocale", "getdefaultlocale", "getpreferredencoding", "Error",
"setlocale", "resetlocale", "localeconv", "strcoll", "strxfrm",
"str", "atof", "atoi", "format", "format_string", "currency",
"normalize", "LC_CTYPE", "LC_COLLATE", "LC_TIME", "LC_MONETARY",
"LC_NUMERIC", "LC_ALL", "CHAR_MAX"]
def _strcoll(a,b):
""" strcoll(string,string) -> int.
Compares two strings according to the locale.
"""
return (a > b) - (a < b)
def _strxfrm(s):
""" strxfrm(string) -> string.
Returns a string that behaves for cmp locale-aware.
"""
return s
try:
from _locale import *
except ImportError:
# Locale emulation
CHAR_MAX = 127
LC_ALL = 6
LC_COLLATE = 3
LC_CTYPE = 0
LC_MESSAGES = 5
LC_MONETARY = 4
LC_NUMERIC = 1
LC_TIME = 2
Error = ValueError
def localeconv():
""" localeconv() -> dict.
Returns numeric and monetary locale-specific parameters.
"""
# 'C' locale default values
return {'grouping': [127],
'currency_symbol': '',
'n_sign_posn': 127,
'p_cs_precedes': 127,
'n_cs_precedes': 127,
'mon_grouping': [],
'n_sep_by_space': 127,
'decimal_point': '.',
'negative_sign': '',
'positive_sign': '',
'p_sep_by_space': 127,
'int_curr_symbol': '',
'p_sign_posn': 127,
'thousands_sep': '',
'mon_thousands_sep': '',
'frac_digits': 127,
'mon_decimal_point': '',
'int_frac_digits': 127}
def setlocale(category, value=None):
""" setlocale(integer,string=None) -> string.
Activates/queries locale processing.
"""
if value not in (None, '', 'C'):
raise Error('_locale emulation only supports "C" locale')
return 'C'
# These may or may not exist in _locale, so be sure to set them.
if 'strxfrm' not in globals():
strxfrm = _strxfrm
if 'strcoll' not in globals():
strcoll = _strcoll
_localeconv = localeconv
# With this dict, you can override some items of localeconv's return value.
# This is useful for testing purposes.
_override_localeconv = {}
@functools.wraps(_localeconv)
def localeconv():
d = _localeconv()
if _override_localeconv:
d.update(_override_localeconv)
return d
### Number formatting APIs
# Author: Martin von Loewis
# improved by Georg Brandl
# Iterate over grouping intervals
def _grouping_intervals(grouping):
last_interval = None
for interval in grouping:
# if grouping is -1, we are done
if interval == CHAR_MAX:
return
# 0: re-use last group ad infinitum
if interval == 0:
if last_interval is None:
raise ValueError("invalid grouping")
while True:
yield last_interval
yield interval
last_interval = interval
#perform the grouping from right to left
def _group(s, monetary=False):
conv = localeconv()
thousands_sep = conv[monetary and 'mon_thousands_sep' or 'thousands_sep']
grouping = conv[monetary and 'mon_grouping' or 'grouping']
if not grouping:
return (s, 0)
if s[-1] == ' ':
stripped = s.rstrip()
right_spaces = s[len(stripped):]
s = stripped
else:
right_spaces = ''
left_spaces = ''
groups = []
for interval in _grouping_intervals(grouping):
if not s or s[-1] not in "0123456789":
# only non-digit characters remain (sign, spaces)
left_spaces = s
s = ''
break
groups.append(s[-interval:])
s = s[:-interval]
if s:
groups.append(s)
groups.reverse()
return (
left_spaces + thousands_sep.join(groups) + right_spaces,
len(thousands_sep) * (len(groups) - 1)
)
# Strip a given amount of excess padding from the given string
def _strip_padding(s, amount):
lpos = 0
while amount and s[lpos] == ' ':
lpos += 1
amount -= 1
rpos = len(s) - 1
while amount and s[rpos] == ' ':
rpos -= 1
amount -= 1
return s[lpos:rpos+1]
_percent_re = re.compile(r'%(?:\((?P<key>.*?)\))?'
r'(?P<modifiers>[-#0-9 +*.hlL]*?)[eEfFgGdiouxXcrs%]')
def format(percent, value, grouping=False, monetary=False, *additional):
"""Returns the locale-aware substitution of a %? specifier
(percent).
additional is for format strings which contain one or more
'*' modifiers."""
# this is only for one-percent-specifier strings and this should be checked
match = _percent_re.match(percent)
if not match or len(match.group())!= len(percent):
raise ValueError(("format() must be given exactly one %%char "
"format specifier, %s not valid") % repr(percent))
return _format(percent, value, grouping, monetary, *additional)
def _format(percent, value, grouping=False, monetary=False, *additional):
if additional:
formatted = percent % ((value,) + additional)
else:
formatted = percent % value
# floats and decimal ints need special action!
if percent[-1] in 'eEfFgG':
seps = 0
parts = formatted.split('.')
if grouping:
parts[0], seps = _group(parts[0], monetary=monetary)
decimal_point = localeconv()[monetary and 'mon_decimal_point'
or 'decimal_point']
formatted = decimal_point.join(parts)
if seps:
formatted = _strip_padding(formatted, seps)
elif percent[-1] in 'diu':
seps = 0
if grouping:
formatted, seps = _group(formatted, monetary=monetary)
if seps:
formatted = _strip_padding(formatted, seps)
return formatted
def format_string(f, val, grouping=False):
"""Formats a string in the same way that the % formatting would use,
but takes the current locale into account.
Grouping is applied if the third parameter is true."""
percents = list(_percent_re.finditer(f))
new_f = _percent_re.sub('%s', f)
if isinstance(val, collections.Mapping):
new_val = []
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
new_val.append(format(perc.group(), val, grouping))
else:
if not isinstance(val, tuple):
val = (val,)
new_val = []
i = 0
for perc in percents:
if perc.group()[-1]=='%':
new_val.append('%')
else:
starcount = perc.group('modifiers').count('*')
new_val.append(_format(perc.group(),
val[i],
grouping,
False,
*val[i+1:i+1+starcount]))
i += (1 + starcount)
val = tuple(new_val)
return new_f % val
def currency(val, symbol=True, grouping=False, international=False):
"""Formats val according to the currency settings
in the current locale."""
conv = localeconv()
# check for illegal values
digits = conv[international and 'int_frac_digits' or 'frac_digits']
if digits == 127:
raise ValueError("Currency formatting is not possible using "
"the 'C' locale.")
s = format('%%.%if' % digits, abs(val), grouping, monetary=True)
# '<' and '>' are markers if the sign must be inserted between symbol and value
s = '<' + s + '>'
if symbol:
smb = conv[international and 'int_curr_symbol' or 'currency_symbol']
precedes = conv[val<0 and 'n_cs_precedes' or 'p_cs_precedes']
separated = conv[val<0 and 'n_sep_by_space' or 'p_sep_by_space']
if precedes:
s = smb + (separated and ' ' or '') + s
else:
s = s + (separated and ' ' or '') + smb
sign_pos = conv[val<0 and 'n_sign_posn' or 'p_sign_posn']
sign = conv[val<0 and 'negative_sign' or 'positive_sign']
if sign_pos == 0:
s = '(' + s + ')'
elif sign_pos == 1:
s = sign + s
elif sign_pos == 2:
s = s + sign
elif sign_pos == 3:
s = s.replace('<', sign)
elif sign_pos == 4:
s = s.replace('>', sign)
else:
# the default if nothing specified;
# this should be the most fitting sign position
s = sign + s
return s.replace('<', '').replace('>', '')
def str(val):
"""Convert float to integer, taking the locale into account."""
return format("%.12g", val)
def delocalize(string):
"Parses a string as a normalized number according to the locale settings."
#First, get rid of the grouping
ts = localeconv()['thousands_sep']
if ts:
string = string.replace(ts, '')
#next, replace the decimal point with a dot
dd = localeconv()['decimal_point']
if dd:
string = string.replace(dd, '.')
return string
def atof(string, func=float):
"Parses a string as a float according to the locale settings."
return func(delocalize(string))
def atoi(string):
"Converts a string to an integer according to the locale settings."
return int(delocalize(string))
def _test():
setlocale(LC_ALL, "")
#do grouping
s1 = format("%d", 123456789,1)
print(s1, "is", atoi(s1))
#standard formatting
s1 = str(3.14)
print(s1, "is", atof(s1))
### Locale name aliasing engine
# Author: Marc-Andre Lemburg, [email protected]
# Various tweaks by Fredrik Lundh <[email protected]>
# store away the low-level version of setlocale (it's
# overridden below)
_setlocale = setlocale
def _replace_encoding(code, encoding):
if '.' in code:
langname = code[:code.index('.')]
else:
langname = code
# Convert the encoding to a C lib compatible encoding string
norm_encoding = encodings.normalize_encoding(encoding)
#print('norm encoding: %r' % norm_encoding)
norm_encoding = encodings.aliases.aliases.get(norm_encoding.lower(),
norm_encoding)
#print('aliased encoding: %r' % norm_encoding)
encoding = norm_encoding
norm_encoding = norm_encoding.lower()
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
else:
norm_encoding = norm_encoding.replace('_', '')
norm_encoding = norm_encoding.replace('-', '')
if norm_encoding in locale_encoding_alias:
encoding = locale_encoding_alias[norm_encoding]
#print('found encoding %r' % encoding)
return langname + '.' + encoding
def _append_modifier(code, modifier):
if modifier == 'euro':
if '.' not in code:
return code + '.ISO8859-15'
_, _, encoding = code.partition('.')
if encoding in ('ISO8859-15', 'UTF-8'):
return code
if encoding == 'ISO8859-1':
return _replace_encoding(code, 'ISO8859-15')
return code + '@' + modifier
def normalize(localename):
""" Returns a normalized locale code for the given locale
name.
The returned locale code is formatted for use with
setlocale().
If normalization fails, the original name is returned
unchanged.
If the given encoding is not known, the function defaults to
the default encoding for the locale code just like setlocale()
does.
"""
# Normalize the locale name and extract the encoding and modifier
code = localename.lower()
if ':' in code:
# ':' is sometimes used as encoding delimiter.
code = code.replace(':', '.')
if '@' in code:
code, modifier = code.split('@', 1)
else:
modifier = ''
if '.' in code:
langname, encoding = code.split('.')[:2]
else:
langname = code
encoding = ''
# First lookup: fullname (possibly with encoding and modifier)
lang_enc = langname
if encoding:
norm_encoding = encoding.replace('-', '')
norm_encoding = norm_encoding.replace('_', '')
lang_enc += '.' + norm_encoding
lookup_name = lang_enc
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
return code
#print('first lookup failed')
if modifier:
# Second try: fullname without modifier (possibly with encoding)
code = locale_alias.get(lang_enc, None)
if code is not None:
#print('lookup without modifier succeeded')
if '@' not in code:
return _append_modifier(code, modifier)
if code.split('@', 1)[1].lower() == modifier:
return code
#print('second lookup failed')
if encoding:
# Third try: langname (without encoding, possibly with modifier)
lookup_name = langname
if modifier:
lookup_name += '@' + modifier
code = locale_alias.get(lookup_name, None)
if code is not None:
#print('lookup without encoding succeeded')
if '@' not in code:
return _replace_encoding(code, encoding)
code, modifier = code.split('@', 1)
return _replace_encoding(code, encoding) + '@' + modifier
if modifier:
# Fourth try: langname (without encoding and modifier)
code = locale_alias.get(langname, None)
if code is not None:
#print('lookup without modifier and encoding succeeded')
if '@' not in code:
code = _replace_encoding(code, encoding)
return _append_modifier(code, modifier)
code, defmod = code.split('@', 1)
if defmod.lower() == modifier:
return _replace_encoding(code, encoding) + '@' + defmod
return localename
def _parse_localename(localename):
""" Parses the locale code for localename and returns the
result as tuple (language code, encoding).
The localename is normalized and passed through the locale
alias engine. A ValueError is raised in case the locale name
cannot be parsed.
The language code corresponds to RFC 1766. code and encoding
can be None in case the values cannot be determined or are
unknown to this implementation.
"""
code = normalize(localename)
if '@' in code:
# Deal with locale modifiers
code, modifier = code.split('@', 1)
if modifier == 'euro' and '.' not in code:
# Assume Latin-9 for @euro locales. This is bogus,
# since some systems may use other encodings for these
# locales. Also, we ignore other modifiers.
return code, 'iso-8859-15'
if '.' in code:
return tuple(code.split('.')[:2])
elif code == 'C':
return None, None
raise ValueError('unknown locale: %s' % localename)
def _build_localename(localetuple):
""" Builds a locale code from the given tuple (language code,
encoding).
No aliasing or normalizing takes place.
"""
try:
language, encoding = localetuple
if language is None:
language = 'C'
if encoding is None:
return language
else:
return language + '.' + encoding
except (TypeError, ValueError):
raise TypeError('Locale must be None, a string, or an iterable of two strings -- language code, encoding.')
def getdefaultlocale(envvars=('LC_ALL', 'LC_CTYPE', 'LANG', 'LANGUAGE')):
""" Tries to determine the default locale settings and returns
them as tuple (language code, encoding).
According to POSIX, a program which has not called
setlocale(LC_ALL, "") runs using the portable 'C' locale.
Calling setlocale(LC_ALL, "") lets it use the default locale as
defined by the LANG variable. Since we don't want to interfere
with the current locale setting we thus emulate the behavior
in the way described above.
To maintain compatibility with other platforms, not only the
LANG variable is tested, but a list of variables given as
envvars parameter. The first found to be defined will be
used. envvars defaults to the search path used in GNU gettext;
it must always contain the variable name 'LANG'.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
try:
# check if it's supported by the _locale module
import _locale
code, encoding = _locale._getdefaultlocale()
except (ImportError, AttributeError):
pass
else:
# make sure the code/encoding values are valid
if sys.platform == "win32" and code and code[:2] == "0x":
# map windows language identifier to language name
code = windows_locale.get(int(code, 0))
# ...add other platform-specific processing here, if
# necessary...
return code, encoding
# fall back on POSIX behaviour
import os
lookup = os.environ.get
for variable in envvars:
localename = lookup(variable,None)
if localename:
if variable == 'LANGUAGE':
localename = localename.split(':')[0]
break
else:
localename = 'C'
return _parse_localename(localename)
def getlocale(category=LC_CTYPE):
""" Returns the current setting for the given locale category as
tuple (language code, encoding).
category may be one of the LC_* value except LC_ALL. It
defaults to LC_CTYPE.
Except for the code 'C', the language code corresponds to RFC
1766. code and encoding can be None in case the values cannot
be determined.
"""
localename = _setlocale(category)
if category == LC_ALL and ';' in localename:
raise TypeError('category LC_ALL is not supported')
return _parse_localename(localename)
def setlocale(category, locale=None):
""" Set the locale for the given category. The locale can be
a string, an iterable of two strings (language code and encoding),
or None.
Iterables are converted to strings using the locale aliasing
engine. Locale strings are passed directly to the C lib.
category may be given as one of the LC_* values.
"""
if locale and not isinstance(locale, _builtin_str):
# convert to string
locale = normalize(_build_localename(locale))
return _setlocale(category, locale)
def resetlocale(category=LC_ALL):
""" Sets the locale for category to the default setting.
The default setting is determined by calling
getdefaultlocale(). category defaults to LC_ALL.
"""
_setlocale(category, _build_localename(getdefaultlocale()))
if sys.platform.startswith("win"):
# On Win32, this will return the ANSI code page
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using."""
import _bootlocale
return _bootlocale.getpreferredencoding(False)
else:
# On Unix, if CODESET is available, use that.
try:
CODESET
except NameError:
# Fall back to parsing environment variables :-(
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
by looking at environment variables."""
res = getdefaultlocale()[1]
if res is None:
# LANG not set, default conservatively to ASCII
res = 'ascii'
return res
else:
def getpreferredencoding(do_setlocale = True):
"""Return the charset that the user is likely using,
according to the system configuration."""
import _bootlocale
if do_setlocale:
oldloc = setlocale(LC_CTYPE)
try:
setlocale(LC_CTYPE, "")
except Error:
pass
result = _bootlocale.getpreferredencoding(False)
if do_setlocale:
setlocale(LC_CTYPE, oldloc)
return result
### Database
#
# The following data was extracted from the locale.alias file which
# comes with X11 and then hand edited removing the explicit encoding
# definitions and adding some more aliases. The file is usually
# available as /usr/lib/X11/locale/locale.alias.
#
#
# The local_encoding_alias table maps lowercase encoding alias names
# to C locale encoding names (case-sensitive). Note that normalize()
# first looks up the encoding in the encodings.aliases dictionary and
# then applies this mapping to find the correct C lib name for the
# encoding.
#
locale_encoding_alias = {
# Mappings for non-standard encoding names used in locale names
'437': 'C',
'c': 'C',
'en': 'ISO8859-1',
'jis': 'JIS7',
'jis7': 'JIS7',
'ajec': 'eucJP',
'koi8c': 'KOI8-C',
'microsoftcp1251': 'CP1251',
'microsoftcp1255': 'CP1255',
'microsoftcp1256': 'CP1256',
'88591': 'ISO8859-1',
'88592': 'ISO8859-2',
'88595': 'ISO8859-5',
'885915': 'ISO8859-15',
# Mappings from Python codec names to C lib encoding names
'ascii': 'ISO8859-1',
'latin_1': 'ISO8859-1',
'iso8859_1': 'ISO8859-1',
'iso8859_10': 'ISO8859-10',
'iso8859_11': 'ISO8859-11',
'iso8859_13': 'ISO8859-13',
'iso8859_14': 'ISO8859-14',
'iso8859_15': 'ISO8859-15',
'iso8859_16': 'ISO8859-16',
'iso8859_2': 'ISO8859-2',
'iso8859_3': 'ISO8859-3',
'iso8859_4': 'ISO8859-4',
'iso8859_5': 'ISO8859-5',
'iso8859_6': 'ISO8859-6',
'iso8859_7': 'ISO8859-7',
'iso8859_8': 'ISO8859-8',
'iso8859_9': 'ISO8859-9',
'iso2022_jp': 'JIS7',
'shift_jis': 'SJIS',
'tactis': 'TACTIS',
'euc_jp': 'eucJP',
'euc_kr': 'eucKR',
'utf_8': 'UTF-8',
'koi8_r': 'KOI8-R',
'koi8_t': 'KOI8-T',
'koi8_u': 'KOI8-U',
'kz1048': 'RK1048',
'cp1251': 'CP1251',
'cp1255': 'CP1255',
'cp1256': 'CP1256',
# XXX This list is still incomplete. If you know more
# mappings, please file a bug report. Thanks.
}
for k, v in sorted(locale_encoding_alias.items()):
k = k.replace('_', '')
locale_encoding_alias.setdefault(k, v)
#
# The locale_alias table maps lowercase alias names to C locale names
# (case-sensitive). Encodings are always separated from the locale
# name using a dot ('.'); they should only be given in case the
# language name is needed to interpret the given encoding alias
# correctly (CJK codes often have this need).
#
# Note that the normalize() function which uses this tables
# removes '_' and '-' characters from the encoding part of the
# locale name before doing the lookup. This saves a lot of
# space in the table.
#
# MAL 2004-12-10:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.4
# and older):
#
# updated 'bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bg_bg' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'bulgarian' -> 'bg_BG.ISO8859-5' to 'bg_BG.CP1251'
# updated 'cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'cz_cz' -> 'cz_CZ.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'czech' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'dutch' -> 'nl_BE.ISO8859-1' to 'nl_NL.ISO8859-1'
# updated 'et' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'et_ee' -> 'et_EE.ISO8859-4' to 'et_EE.ISO8859-15'
# updated 'fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'fi_fi' -> 'fi_FI.ISO8859-1' to 'fi_FI.ISO8859-15'
# updated 'iw' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'iw_il' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'japanese' -> 'ja_JP.SJIS' to 'ja_JP.eucJP'
# updated 'lt' -> 'lt_LT.ISO8859-4' to 'lt_LT.ISO8859-13'
# updated 'lv' -> 'lv_LV.ISO8859-4' to 'lv_LV.ISO8859-13'
# updated 'sl' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'slovene' -> 'sl_CS.ISO8859-2' to 'sl_SI.ISO8859-2'
# updated 'th_th' -> 'th_TH.TACTIS' to 'th_TH.ISO8859-11'
# updated 'zh_cn' -> 'zh_CN.eucCN' to 'zh_CN.gb2312'
# updated 'zh_cn.big5' -> 'zh_TW.eucTW' to 'zh_TW.big5'
# updated 'zh_tw' -> 'zh_TW.eucTW' to 'zh_TW.big5'
#
# MAL 2008-05-30:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.5
# and older):
#
# updated 'cs_cs.iso88592' -> 'cs_CZ.ISO8859-2' to 'cs_CS.ISO8859-2'
# updated 'serbocroatian' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_hr.iso88592' -> 'sh_HR.ISO8859-2' to 'hr_HR.ISO8859-2'
# updated 'sh_sp' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sh_yu' -> 'sh_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sp' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sp_yu' -> 'sp_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_sp' -> 'sr_SP.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.cp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.iso88592' -> 'sr_YU.ISO8859-2' to 'sr_CS.ISO8859-2'
# updated 'sr_yu.iso88595' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.iso88595@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
# updated 'sr_yu.microsoftcp1251@cyrillic' -> 'sr_YU.CP1251' to 'sr_CS.CP1251'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_YU.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_YU.ISO8859-5' to 'sr_CS.ISO8859-5'
#
# AP 2010-04-12:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 2.6.5
# and older):
#
# updated 'ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'ru_ru' -> 'ru_RU.ISO8859-5' to 'ru_RU.UTF-8'
# updated 'serbocroatian' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sh_yu' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
# updated 'sr@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs.utf8@latn' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_CS.ISO8859-2' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8@latin'
# updated 'sr_yu.utf8@cyrillic' -> 'sr_CS.UTF-8' to 'sr_RS.UTF-8'
# updated 'sr_yu@cyrillic' -> 'sr_CS.ISO8859-5' to 'sr_RS.UTF-8'
#
# SS 2013-12-20:
# Updated alias mapping to most recent locale.alias file
# from X.org distribution using makelocalealias.py.
#
# These are the differences compared to the old mapping (Python 3.3.3
# and older):
#
# updated 'a3' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'a3_az.koi8c' -> 'a3_AZ.KOI8-C' to 'az_AZ.KOI8-C'
# updated 'cs_cs.iso88592' -> 'cs_CS.ISO8859-2' to 'cs_CZ.ISO8859-2'
# updated 'hebrew' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'hebrew.iso88598' -> 'iw_IL.ISO8859-8' to 'he_IL.ISO8859-8'
# updated 'sd' -> '[email protected]' to 'sd_IN.UTF-8'
# updated 'sr@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs' -> 'sr_RS.UTF-8' to 'sr_CS.UTF-8'
# updated 'sr_cs.utf8@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
# updated 'sr_cs@latn' -> 'sr_RS.UTF-8@latin' to 'sr_CS.UTF-8@latin'
#
# SS 2014-10-01:
# Updated alias mapping with glibc 2.19 supported locales.
locale_alias = {
'a3': 'az_AZ.KOI8-C',
'a3_az': 'az_AZ.KOI8-C',
'a3_az.koic': 'az_AZ.KOI8-C',
'aa_dj': 'aa_DJ.ISO8859-1',
'aa_er': 'aa_ER.UTF-8',
'aa_et': 'aa_ET.UTF-8',
'af': 'af_ZA.ISO8859-1',
'af_za': 'af_ZA.ISO8859-1',
'am': 'am_ET.UTF-8',
'am_et': 'am_ET.UTF-8',
'american': 'en_US.ISO8859-1',
'an_es': 'an_ES.ISO8859-15',
'ar': 'ar_AA.ISO8859-6',
'ar_aa': 'ar_AA.ISO8859-6',
'ar_ae': 'ar_AE.ISO8859-6',
'ar_bh': 'ar_BH.ISO8859-6',
'ar_dz': 'ar_DZ.ISO8859-6',
'ar_eg': 'ar_EG.ISO8859-6',
'ar_in': 'ar_IN.UTF-8',
'ar_iq': 'ar_IQ.ISO8859-6',
'ar_jo': 'ar_JO.ISO8859-6',
'ar_kw': 'ar_KW.ISO8859-6',
'ar_lb': 'ar_LB.ISO8859-6',
'ar_ly': 'ar_LY.ISO8859-6',
'ar_ma': 'ar_MA.ISO8859-6',
'ar_om': 'ar_OM.ISO8859-6',
'ar_qa': 'ar_QA.ISO8859-6',
'ar_sa': 'ar_SA.ISO8859-6',
'ar_sd': 'ar_SD.ISO8859-6',
'ar_sy': 'ar_SY.ISO8859-6',
'ar_tn': 'ar_TN.ISO8859-6',
'ar_ye': 'ar_YE.ISO8859-6',
'arabic': 'ar_AA.ISO8859-6',
'as': 'as_IN.UTF-8',
'as_in': 'as_IN.UTF-8',
'ast_es': 'ast_ES.ISO8859-15',
'ayc_pe': 'ayc_PE.UTF-8',
'az': 'az_AZ.ISO8859-9E',
'az_az': 'az_AZ.ISO8859-9E',
'az_az.iso88599e': 'az_AZ.ISO8859-9E',
'be': 'be_BY.CP1251',
'be@latin': 'be_BY.UTF-8@latin',
'be_bg.utf8': 'bg_BG.UTF-8',
'be_by': 'be_BY.CP1251',
'be_by@latin': 'be_BY.UTF-8@latin',
'bem_zm': 'bem_ZM.UTF-8',
'ber_dz': 'ber_DZ.UTF-8',
'ber_ma': 'ber_MA.UTF-8',
'bg': 'bg_BG.CP1251',
'bg_bg': 'bg_BG.CP1251',
'bho_in': 'bho_IN.UTF-8',
'bn_bd': 'bn_BD.UTF-8',
'bn_in': 'bn_IN.UTF-8',
'bo_cn': 'bo_CN.UTF-8',
'bo_in': 'bo_IN.UTF-8',
'bokmal': 'nb_NO.ISO8859-1',
'bokm\xe5l': 'nb_NO.ISO8859-1',
'br': 'br_FR.ISO8859-1',
'br_fr': 'br_FR.ISO8859-1',
'brx_in': 'brx_IN.UTF-8',
'bs': 'bs_BA.ISO8859-2',
'bs_ba': 'bs_BA.ISO8859-2',
'bulgarian': 'bg_BG.CP1251',
'byn_er': 'byn_ER.UTF-8',
'c': 'C',
'c-french': 'fr_CA.ISO8859-1',
'c.ascii': 'C',
'c.en': 'C',
'c.iso88591': 'en_US.ISO8859-1',
'c.utf8': 'en_US.UTF-8',
'c_c': 'C',
'c_c.c': 'C',
'ca': 'ca_ES.ISO8859-1',
'ca_ad': 'ca_AD.ISO8859-1',
'ca_es': 'ca_ES.ISO8859-1',
'ca_es@valencia': 'ca_ES.ISO8859-15@valencia',
'ca_fr': 'ca_FR.ISO8859-1',
'ca_it': 'ca_IT.ISO8859-1',
'catalan': 'ca_ES.ISO8859-1',
'cextend': 'en_US.ISO8859-1',
'chinese-s': 'zh_CN.eucCN',
'chinese-t': 'zh_TW.eucTW',
'crh_ua': 'crh_UA.UTF-8',
'croatian': 'hr_HR.ISO8859-2',
'cs': 'cs_CZ.ISO8859-2',
'cs_cs': 'cs_CZ.ISO8859-2',
'cs_cz': 'cs_CZ.ISO8859-2',
'csb_pl': 'csb_PL.UTF-8',
'cv_ru': 'cv_RU.UTF-8',
'cy': 'cy_GB.ISO8859-1',
'cy_gb': 'cy_GB.ISO8859-1',
'cz': 'cs_CZ.ISO8859-2',
'cz_cz': 'cs_CZ.ISO8859-2',
'czech': 'cs_CZ.ISO8859-2',
'da': 'da_DK.ISO8859-1',
'da_dk': 'da_DK.ISO8859-1',
'danish': 'da_DK.ISO8859-1',
'dansk': 'da_DK.ISO8859-1',
'de': 'de_DE.ISO8859-1',
'de_at': 'de_AT.ISO8859-1',
'de_be': 'de_BE.ISO8859-1',
'de_ch': 'de_CH.ISO8859-1',
'de_de': 'de_DE.ISO8859-1',
'de_li.utf8': 'de_LI.UTF-8',
'de_lu': 'de_LU.ISO8859-1',
'deutsch': 'de_DE.ISO8859-1',
'doi_in': 'doi_IN.UTF-8',
'dutch': 'nl_NL.ISO8859-1',
'dutch.iso88591': 'nl_BE.ISO8859-1',
'dv_mv': 'dv_MV.UTF-8',
'dz_bt': 'dz_BT.UTF-8',
'ee': 'ee_EE.ISO8859-4',
'ee_ee': 'ee_EE.ISO8859-4',
'eesti': 'et_EE.ISO8859-1',
'el': 'el_GR.ISO8859-7',
'el_cy': 'el_CY.ISO8859-7',
'el_gr': 'el_GR.ISO8859-7',
'el_gr@euro': 'el_GR.ISO8859-15',
'en': 'en_US.ISO8859-1',
'en_ag': 'en_AG.UTF-8',
'en_au': 'en_AU.ISO8859-1',
'en_be': 'en_BE.ISO8859-1',
'en_bw': 'en_BW.ISO8859-1',
'en_ca': 'en_CA.ISO8859-1',
'en_dk': 'en_DK.ISO8859-1',
'en_dl.utf8': 'en_DL.UTF-8',
'en_gb': 'en_GB.ISO8859-1',
'en_hk': 'en_HK.ISO8859-1',
'en_ie': 'en_IE.ISO8859-1',
'en_in': 'en_IN.ISO8859-1',
'en_ng': 'en_NG.UTF-8',
'en_nz': 'en_NZ.ISO8859-1',
'en_ph': 'en_PH.ISO8859-1',
'en_sg': 'en_SG.ISO8859-1',
'en_uk': 'en_GB.ISO8859-1',
'en_us': 'en_US.ISO8859-1',
'en_us@euro@euro': 'en_US.ISO8859-15',
'en_za': 'en_ZA.ISO8859-1',
'en_zm': 'en_ZM.UTF-8',
'en_zw': 'en_ZW.ISO8859-1',
'en_zw.utf8': 'en_ZS.UTF-8',
'eng_gb': 'en_GB.ISO8859-1',
'english': 'en_EN.ISO8859-1',
'english_uk': 'en_GB.ISO8859-1',
'english_united-states': 'en_US.ISO8859-1',
'english_united-states.437': 'C',
'english_us': 'en_US.ISO8859-1',
'eo': 'eo_XX.ISO8859-3',
'eo.utf8': 'eo.UTF-8',
'eo_eo': 'eo_EO.ISO8859-3',
'eo_us.utf8': 'eo_US.UTF-8',
'eo_xx': 'eo_XX.ISO8859-3',
'es': 'es_ES.ISO8859-1',
'es_ar': 'es_AR.ISO8859-1',
'es_bo': 'es_BO.ISO8859-1',
'es_cl': 'es_CL.ISO8859-1',
'es_co': 'es_CO.ISO8859-1',
'es_cr': 'es_CR.ISO8859-1',
'es_cu': 'es_CU.UTF-8',
'es_do': 'es_DO.ISO8859-1',
'es_ec': 'es_EC.ISO8859-1',
'es_es': 'es_ES.ISO8859-1',
'es_gt': 'es_GT.ISO8859-1',
'es_hn': 'es_HN.ISO8859-1',
'es_mx': 'es_MX.ISO8859-1',
'es_ni': 'es_NI.ISO8859-1',
'es_pa': 'es_PA.ISO8859-1',
'es_pe': 'es_PE.ISO8859-1',
'es_pr': 'es_PR.ISO8859-1',
'es_py': 'es_PY.ISO8859-1',
'es_sv': 'es_SV.ISO8859-1',
'es_us': 'es_US.ISO8859-1',
'es_uy': 'es_UY.ISO8859-1',
'es_ve': 'es_VE.ISO8859-1',
'estonian': 'et_EE.ISO8859-1',
'et': 'et_EE.ISO8859-15',
'et_ee': 'et_EE.ISO8859-15',
'eu': 'eu_ES.ISO8859-1',
'eu_es': 'eu_ES.ISO8859-1',
'eu_fr': 'eu_FR.ISO8859-1',
'fa': 'fa_IR.UTF-8',
'fa_ir': 'fa_IR.UTF-8',
'fa_ir.isiri3342': 'fa_IR.ISIRI-3342',
'ff_sn': 'ff_SN.UTF-8',
'fi': 'fi_FI.ISO8859-15',
'fi_fi': 'fi_FI.ISO8859-15',
'fil_ph': 'fil_PH.UTF-8',
'finnish': 'fi_FI.ISO8859-1',
'fo': 'fo_FO.ISO8859-1',
'fo_fo': 'fo_FO.ISO8859-1',
'fr': 'fr_FR.ISO8859-1',
'fr_be': 'fr_BE.ISO8859-1',
'fr_ca': 'fr_CA.ISO8859-1',
'fr_ch': 'fr_CH.ISO8859-1',
'fr_fr': 'fr_FR.ISO8859-1',
'fr_lu': 'fr_LU.ISO8859-1',
'fran\xe7ais': 'fr_FR.ISO8859-1',
'fre_fr': 'fr_FR.ISO8859-1',
'french': 'fr_FR.ISO8859-1',
'french.iso88591': 'fr_CH.ISO8859-1',
'french_france': 'fr_FR.ISO8859-1',
'fur_it': 'fur_IT.UTF-8',
'fy_de': 'fy_DE.UTF-8',
'fy_nl': 'fy_NL.UTF-8',
'ga': 'ga_IE.ISO8859-1',
'ga_ie': 'ga_IE.ISO8859-1',
'galego': 'gl_ES.ISO8859-1',
'galician': 'gl_ES.ISO8859-1',
'gd': 'gd_GB.ISO8859-1',
'gd_gb': 'gd_GB.ISO8859-1',
'ger_de': 'de_DE.ISO8859-1',
'german': 'de_DE.ISO8859-1',
'german.iso88591': 'de_CH.ISO8859-1',
'german_germany': 'de_DE.ISO8859-1',
'gez_er': 'gez_ER.UTF-8',
'gez_et': 'gez_ET.UTF-8',
'gl': 'gl_ES.ISO8859-1',
'gl_es': 'gl_ES.ISO8859-1',
'greek': 'el_GR.ISO8859-7',
'gu_in': 'gu_IN.UTF-8',
'gv': 'gv_GB.ISO8859-1',
'gv_gb': 'gv_GB.ISO8859-1',
'ha_ng': 'ha_NG.UTF-8',
'he': 'he_IL.ISO8859-8',
'he_il': 'he_IL.ISO8859-8',
'hebrew': 'he_IL.ISO8859-8',
'hi': 'hi_IN.ISCII-DEV',
'hi_in': 'hi_IN.ISCII-DEV',
'hi_in.isciidev': 'hi_IN.ISCII-DEV',
'hne': 'hne_IN.UTF-8',
'hne_in': 'hne_IN.UTF-8',
'hr': 'hr_HR.ISO8859-2',
'hr_hr': 'hr_HR.ISO8859-2',
'hrvatski': 'hr_HR.ISO8859-2',
'hsb_de': 'hsb_DE.ISO8859-2',
'ht_ht': 'ht_HT.UTF-8',
'hu': 'hu_HU.ISO8859-2',
'hu_hu': 'hu_HU.ISO8859-2',
'hungarian': 'hu_HU.ISO8859-2',
'hy_am': 'hy_AM.UTF-8',
'hy_am.armscii8': 'hy_AM.ARMSCII_8',
'ia': 'ia.UTF-8',
'ia_fr': 'ia_FR.UTF-8',
'icelandic': 'is_IS.ISO8859-1',
'id': 'id_ID.ISO8859-1',
'id_id': 'id_ID.ISO8859-1',
'ig_ng': 'ig_NG.UTF-8',
'ik_ca': 'ik_CA.UTF-8',
'in': 'id_ID.ISO8859-1',
'in_id': 'id_ID.ISO8859-1',
'is': 'is_IS.ISO8859-1',
'is_is': 'is_IS.ISO8859-1',
'iso-8859-1': 'en_US.ISO8859-1',
'iso-8859-15': 'en_US.ISO8859-15',
'iso8859-1': 'en_US.ISO8859-1',
'iso8859-15': 'en_US.ISO8859-15',
'iso_8859_1': 'en_US.ISO8859-1',
'iso_8859_15': 'en_US.ISO8859-15',
'it': 'it_IT.ISO8859-1',
'it_ch': 'it_CH.ISO8859-1',
'it_it': 'it_IT.ISO8859-1',
'italian': 'it_IT.ISO8859-1',
'iu': 'iu_CA.NUNACOM-8',
'iu_ca': 'iu_CA.NUNACOM-8',
'iu_ca.nunacom8': 'iu_CA.NUNACOM-8',
'iw': 'he_IL.ISO8859-8',
'iw_il': 'he_IL.ISO8859-8',
'iw_il.utf8': 'iw_IL.UTF-8',
'ja': 'ja_JP.eucJP',
'ja_jp': 'ja_JP.eucJP',
'ja_jp.euc': 'ja_JP.eucJP',
'ja_jp.mscode': 'ja_JP.SJIS',
'ja_jp.pck': 'ja_JP.SJIS',
'japan': 'ja_JP.eucJP',
'japanese': 'ja_JP.eucJP',
'japanese-euc': 'ja_JP.eucJP',
'japanese.euc': 'ja_JP.eucJP',
'jp_jp': 'ja_JP.eucJP',
'ka': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianacademy': 'ka_GE.GEORGIAN-ACADEMY',
'ka_ge.georgianps': 'ka_GE.GEORGIAN-PS',
'ka_ge.georgianrs': 'ka_GE.GEORGIAN-ACADEMY',
'kk_kz': 'kk_KZ.RK1048',
'kl': 'kl_GL.ISO8859-1',
'kl_gl': 'kl_GL.ISO8859-1',
'km_kh': 'km_KH.UTF-8',
'kn': 'kn_IN.UTF-8',
'kn_in': 'kn_IN.UTF-8',
'ko': 'ko_KR.eucKR',
'ko_kr': 'ko_KR.eucKR',
'ko_kr.euc': 'ko_KR.eucKR',
'kok_in': 'kok_IN.UTF-8',
'korean': 'ko_KR.eucKR',
'korean.euc': 'ko_KR.eucKR',
'ks': 'ks_IN.UTF-8',
'ks_in': 'ks_IN.UTF-8',
'[email protected]': 'ks_IN.UTF-8@devanagari',
'ku_tr': 'ku_TR.ISO8859-9',
'kw': 'kw_GB.ISO8859-1',
'kw_gb': 'kw_GB.ISO8859-1',
'ky': 'ky_KG.UTF-8',
'ky_kg': 'ky_KG.UTF-8',
'lb_lu': 'lb_LU.UTF-8',
'lg_ug': 'lg_UG.ISO8859-10',
'li_be': 'li_BE.UTF-8',
'li_nl': 'li_NL.UTF-8',
'lij_it': 'lij_IT.UTF-8',
'lithuanian': 'lt_LT.ISO8859-13',
'lo': 'lo_LA.MULELAO-1',
'lo_la': 'lo_LA.MULELAO-1',
'lo_la.cp1133': 'lo_LA.IBM-CP1133',
'lo_la.ibmcp1133': 'lo_LA.IBM-CP1133',
'lo_la.mulelao1': 'lo_LA.MULELAO-1',
'lt': 'lt_LT.ISO8859-13',
'lt_lt': 'lt_LT.ISO8859-13',
'lv': 'lv_LV.ISO8859-13',
'lv_lv': 'lv_LV.ISO8859-13',
'mag_in': 'mag_IN.UTF-8',
'mai': 'mai_IN.UTF-8',
'mai_in': 'mai_IN.UTF-8',
'mg_mg': 'mg_MG.ISO8859-15',
'mhr_ru': 'mhr_RU.UTF-8',
'mi': 'mi_NZ.ISO8859-1',
'mi_nz': 'mi_NZ.ISO8859-1',
'mk': 'mk_MK.ISO8859-5',
'mk_mk': 'mk_MK.ISO8859-5',
'ml': 'ml_IN.UTF-8',
'ml_in': 'ml_IN.UTF-8',
'mn_mn': 'mn_MN.UTF-8',
'mni_in': 'mni_IN.UTF-8',
'mr': 'mr_IN.UTF-8',
'mr_in': 'mr_IN.UTF-8',
'ms': 'ms_MY.ISO8859-1',
'ms_my': 'ms_MY.ISO8859-1',
'mt': 'mt_MT.ISO8859-3',
'mt_mt': 'mt_MT.ISO8859-3',
'my_mm': 'my_MM.UTF-8',
'nan_tw@latin': 'nan_TW.UTF-8@latin',
'nb': 'nb_NO.ISO8859-1',
'nb_no': 'nb_NO.ISO8859-1',
'nds_de': 'nds_DE.UTF-8',
'nds_nl': 'nds_NL.UTF-8',
'ne_np': 'ne_NP.UTF-8',
'nhn_mx': 'nhn_MX.UTF-8',
'niu_nu': 'niu_NU.UTF-8',
'niu_nz': 'niu_NZ.UTF-8',
'nl': 'nl_NL.ISO8859-1',
'nl_aw': 'nl_AW.UTF-8',
'nl_be': 'nl_BE.ISO8859-1',
'nl_nl': 'nl_NL.ISO8859-1',
'nn': 'nn_NO.ISO8859-1',
'nn_no': 'nn_NO.ISO8859-1',
'no': 'no_NO.ISO8859-1',
'no@nynorsk': 'ny_NO.ISO8859-1',
'no_no': 'no_NO.ISO8859-1',
'no_no.iso88591@bokmal': 'no_NO.ISO8859-1',
'no_no.iso88591@nynorsk': 'no_NO.ISO8859-1',
'norwegian': 'no_NO.ISO8859-1',
'nr': 'nr_ZA.ISO8859-1',
'nr_za': 'nr_ZA.ISO8859-1',
'nso': 'nso_ZA.ISO8859-15',
'nso_za': 'nso_ZA.ISO8859-15',
'ny': 'ny_NO.ISO8859-1',
'ny_no': 'ny_NO.ISO8859-1',
'nynorsk': 'nn_NO.ISO8859-1',
'oc': 'oc_FR.ISO8859-1',
'oc_fr': 'oc_FR.ISO8859-1',
'om_et': 'om_ET.UTF-8',
'om_ke': 'om_KE.ISO8859-1',
'or': 'or_IN.UTF-8',
'or_in': 'or_IN.UTF-8',
'os_ru': 'os_RU.UTF-8',
'pa': 'pa_IN.UTF-8',
'pa_in': 'pa_IN.UTF-8',
'pa_pk': 'pa_PK.UTF-8',
'pap_an': 'pap_AN.UTF-8',
'pd': 'pd_US.ISO8859-1',
'pd_de': 'pd_DE.ISO8859-1',
'pd_us': 'pd_US.ISO8859-1',
'ph': 'ph_PH.ISO8859-1',
'ph_ph': 'ph_PH.ISO8859-1',
'pl': 'pl_PL.ISO8859-2',
'pl_pl': 'pl_PL.ISO8859-2',
'polish': 'pl_PL.ISO8859-2',
'portuguese': 'pt_PT.ISO8859-1',
'portuguese_brazil': 'pt_BR.ISO8859-1',
'posix': 'C',
'posix-utf2': 'C',
'pp': 'pp_AN.ISO8859-1',
'pp_an': 'pp_AN.ISO8859-1',
'ps_af': 'ps_AF.UTF-8',
'pt': 'pt_PT.ISO8859-1',
'pt_br': 'pt_BR.ISO8859-1',
'pt_pt': 'pt_PT.ISO8859-1',
'ro': 'ro_RO.ISO8859-2',
'ro_ro': 'ro_RO.ISO8859-2',
'romanian': 'ro_RO.ISO8859-2',
'ru': 'ru_RU.UTF-8',
'ru_ru': 'ru_RU.UTF-8',
'ru_ua': 'ru_UA.KOI8-U',
'rumanian': 'ro_RO.ISO8859-2',
'russian': 'ru_RU.ISO8859-5',
'rw': 'rw_RW.ISO8859-1',
'rw_rw': 'rw_RW.ISO8859-1',
'sa_in': 'sa_IN.UTF-8',
'sat_in': 'sat_IN.UTF-8',
'sc_it': 'sc_IT.UTF-8',
'sd': 'sd_IN.UTF-8',
'sd_in': 'sd_IN.UTF-8',
'[email protected]': 'sd_IN.UTF-8@devanagari',
'sd_pk': 'sd_PK.UTF-8',
'se_no': 'se_NO.UTF-8',
'serbocroatian': 'sr_RS.UTF-8@latin',
'sh': 'sr_RS.UTF-8@latin',
'sh_ba.iso88592@bosnia': 'sr_CS.ISO8859-2',
'sh_hr': 'sh_HR.ISO8859-2',
'sh_hr.iso88592': 'hr_HR.ISO8859-2',
'sh_sp': 'sr_CS.ISO8859-2',
'sh_yu': 'sr_RS.UTF-8@latin',
'shs_ca': 'shs_CA.UTF-8',
'si': 'si_LK.UTF-8',
'si_lk': 'si_LK.UTF-8',
'sid_et': 'sid_ET.UTF-8',
'sinhala': 'si_LK.UTF-8',
'sk': 'sk_SK.ISO8859-2',
'sk_sk': 'sk_SK.ISO8859-2',
'sl': 'sl_SI.ISO8859-2',
'sl_cs': 'sl_CS.ISO8859-2',
'sl_si': 'sl_SI.ISO8859-2',
'slovak': 'sk_SK.ISO8859-2',
'slovene': 'sl_SI.ISO8859-2',
'slovenian': 'sl_SI.ISO8859-2',
'so_dj': 'so_DJ.ISO8859-1',
'so_et': 'so_ET.UTF-8',
'so_ke': 'so_KE.ISO8859-1',
'so_so': 'so_SO.ISO8859-1',
'sp': 'sr_CS.ISO8859-5',
'sp_yu': 'sr_CS.ISO8859-5',
'spanish': 'es_ES.ISO8859-1',
'spanish_spain': 'es_ES.ISO8859-1',
'sq': 'sq_AL.ISO8859-2',
'sq_al': 'sq_AL.ISO8859-2',
'sq_mk': 'sq_MK.UTF-8',
'sr': 'sr_RS.UTF-8',
'sr@cyrillic': 'sr_RS.UTF-8',
'sr@latn': 'sr_CS.UTF-8@latin',
'sr_cs': 'sr_CS.UTF-8',
'sr_cs.iso88592@latn': 'sr_CS.ISO8859-2',
'sr_cs@latn': 'sr_CS.UTF-8@latin',
'sr_me': 'sr_ME.UTF-8',
'sr_rs': 'sr_RS.UTF-8',
'sr_rs@latn': 'sr_RS.UTF-8@latin',
'sr_sp': 'sr_CS.ISO8859-2',
'sr_yu': 'sr_RS.UTF-8@latin',
'sr_yu.cp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.iso88592': 'sr_CS.ISO8859-2',
'sr_yu.iso88595': 'sr_CS.ISO8859-5',
'sr_yu.iso88595@cyrillic': 'sr_CS.ISO8859-5',
'sr_yu.microsoftcp1251@cyrillic': 'sr_CS.CP1251',
'sr_yu.utf8': 'sr_RS.UTF-8',
'sr_yu.utf8@cyrillic': 'sr_RS.UTF-8',
'sr_yu@cyrillic': 'sr_RS.UTF-8',
'ss': 'ss_ZA.ISO8859-1',
'ss_za': 'ss_ZA.ISO8859-1',
'st': 'st_ZA.ISO8859-1',
'st_za': 'st_ZA.ISO8859-1',
'sv': 'sv_SE.ISO8859-1',
'sv_fi': 'sv_FI.ISO8859-1',
'sv_se': 'sv_SE.ISO8859-1',
'sw_ke': 'sw_KE.UTF-8',
'sw_tz': 'sw_TZ.UTF-8',
'swedish': 'sv_SE.ISO8859-1',
'szl_pl': 'szl_PL.UTF-8',
'ta': 'ta_IN.TSCII-0',
'ta_in': 'ta_IN.TSCII-0',
'ta_in.tscii': 'ta_IN.TSCII-0',
'ta_in.tscii0': 'ta_IN.TSCII-0',
'ta_lk': 'ta_LK.UTF-8',
'te': 'te_IN.UTF-8',
'te_in': 'te_IN.UTF-8',
'tg': 'tg_TJ.KOI8-C',
'tg_tj': 'tg_TJ.KOI8-C',
'th': 'th_TH.ISO8859-11',
'th_th': 'th_TH.ISO8859-11',
'th_th.tactis': 'th_TH.TIS620',
'th_th.tis620': 'th_TH.TIS620',
'thai': 'th_TH.ISO8859-11',
'ti_er': 'ti_ER.UTF-8',
'ti_et': 'ti_ET.UTF-8',
'tig_er': 'tig_ER.UTF-8',
'tk_tm': 'tk_TM.UTF-8',
'tl': 'tl_PH.ISO8859-1',
'tl_ph': 'tl_PH.ISO8859-1',
'tn': 'tn_ZA.ISO8859-15',
'tn_za': 'tn_ZA.ISO8859-15',
'tr': 'tr_TR.ISO8859-9',
'tr_cy': 'tr_CY.ISO8859-9',
'tr_tr': 'tr_TR.ISO8859-9',
'ts': 'ts_ZA.ISO8859-1',
'ts_za': 'ts_ZA.ISO8859-1',
'tt': 'tt_RU.TATAR-CYR',
'tt_ru': 'tt_RU.TATAR-CYR',
'tt_ru.tatarcyr': 'tt_RU.TATAR-CYR',
'tt_ru@iqtelif': 'tt_RU.UTF-8@iqtelif',
'turkish': 'tr_TR.ISO8859-9',
'ug_cn': 'ug_CN.UTF-8',
'uk': 'uk_UA.KOI8-U',
'uk_ua': 'uk_UA.KOI8-U',
'univ': 'en_US.UTF-8',
'universal': 'en_US.UTF-8',
'universal.utf8@ucs4': 'en_US.UTF-8',
'unm_us': 'unm_US.UTF-8',
'ur': 'ur_PK.CP1256',
'ur_in': 'ur_IN.UTF-8',
'ur_pk': 'ur_PK.CP1256',
'uz': 'uz_UZ.UTF-8',
'uz_uz': 'uz_UZ.UTF-8',
'uz_uz@cyrillic': 'uz_UZ.UTF-8',
've': 've_ZA.UTF-8',
've_za': 've_ZA.UTF-8',
'vi': 'vi_VN.TCVN',
'vi_vn': 'vi_VN.TCVN',
'vi_vn.tcvn': 'vi_VN.TCVN',
'vi_vn.tcvn5712': 'vi_VN.TCVN',
'vi_vn.viscii': 'vi_VN.VISCII',
'vi_vn.viscii111': 'vi_VN.VISCII',
'wa': 'wa_BE.ISO8859-1',
'wa_be': 'wa_BE.ISO8859-1',
'wae_ch': 'wae_CH.UTF-8',
'wal_et': 'wal_ET.UTF-8',
'wo_sn': 'wo_SN.UTF-8',
'xh': 'xh_ZA.ISO8859-1',
'xh_za': 'xh_ZA.ISO8859-1',
'yi': 'yi_US.CP1255',
'yi_us': 'yi_US.CP1255',
'yo_ng': 'yo_NG.UTF-8',
'yue_hk': 'yue_HK.UTF-8',
'zh': 'zh_CN.eucCN',
'zh_cn': 'zh_CN.gb2312',
'zh_cn.big5': 'zh_TW.big5',
'zh_cn.euc': 'zh_CN.eucCN',
'zh_hk': 'zh_HK.big5hkscs',
'zh_hk.big5hk': 'zh_HK.big5hkscs',
'zh_sg': 'zh_SG.GB2312',
'zh_sg.gbk': 'zh_SG.GBK',
'zh_tw': 'zh_TW.big5',
'zh_tw.euc': 'zh_TW.eucTW',
'zh_tw.euctw': 'zh_TW.eucTW',
'zu': 'zu_ZA.ISO8859-1',
'zu_za': 'zu_ZA.ISO8859-1',
}
#
# This maps Windows language identifiers to locale strings.
#
# This list has been updated from
# http://msdn.microsoft.com/library/default.asp?url=/library/en-us/intl/nls_238z.asp
# to include every locale up to Windows Vista.
#
# NOTE: this mapping is incomplete. If your language is missing, please
# submit a bug report to the Python bug tracker at http://bugs.python.org/
# Make sure you include the missing language identifier and the suggested
# locale code.
#
windows_locale = {
0x0436: "af_ZA", # Afrikaans
0x041c: "sq_AL", # Albanian
0x0484: "gsw_FR",# Alsatian - France
0x045e: "am_ET", # Amharic - Ethiopia
0x0401: "ar_SA", # Arabic - Saudi Arabia
0x0801: "ar_IQ", # Arabic - Iraq
0x0c01: "ar_EG", # Arabic - Egypt
0x1001: "ar_LY", # Arabic - Libya
0x1401: "ar_DZ", # Arabic - Algeria
0x1801: "ar_MA", # Arabic - Morocco
0x1c01: "ar_TN", # Arabic - Tunisia
0x2001: "ar_OM", # Arabic - Oman
0x2401: "ar_YE", # Arabic - Yemen
0x2801: "ar_SY", # Arabic - Syria
0x2c01: "ar_JO", # Arabic - Jordan
0x3001: "ar_LB", # Arabic - Lebanon
0x3401: "ar_KW", # Arabic - Kuwait
0x3801: "ar_AE", # Arabic - United Arab Emirates
0x3c01: "ar_BH", # Arabic - Bahrain
0x4001: "ar_QA", # Arabic - Qatar
0x042b: "hy_AM", # Armenian
0x044d: "as_IN", # Assamese - India
0x042c: "az_AZ", # Azeri - Latin
0x082c: "az_AZ", # Azeri - Cyrillic
0x046d: "ba_RU", # Bashkir
0x042d: "eu_ES", # Basque - Russia
0x0423: "be_BY", # Belarusian
0x0445: "bn_IN", # Begali
0x201a: "bs_BA", # Bosnian - Cyrillic
0x141a: "bs_BA", # Bosnian - Latin
0x047e: "br_FR", # Breton - France
0x0402: "bg_BG", # Bulgarian
# 0x0455: "my_MM", # Burmese - Not supported
0x0403: "ca_ES", # Catalan
0x0004: "zh_CHS",# Chinese - Simplified
0x0404: "zh_TW", # Chinese - Taiwan
0x0804: "zh_CN", # Chinese - PRC
0x0c04: "zh_HK", # Chinese - Hong Kong S.A.R.
0x1004: "zh_SG", # Chinese - Singapore
0x1404: "zh_MO", # Chinese - Macao S.A.R.
0x7c04: "zh_CHT",# Chinese - Traditional
0x0483: "co_FR", # Corsican - France
0x041a: "hr_HR", # Croatian
0x101a: "hr_BA", # Croatian - Bosnia
0x0405: "cs_CZ", # Czech
0x0406: "da_DK", # Danish
0x048c: "gbz_AF",# Dari - Afghanistan
0x0465: "div_MV",# Divehi - Maldives
0x0413: "nl_NL", # Dutch - The Netherlands
0x0813: "nl_BE", # Dutch - Belgium
0x0409: "en_US", # English - United States
0x0809: "en_GB", # English - United Kingdom
0x0c09: "en_AU", # English - Australia
0x1009: "en_CA", # English - Canada
0x1409: "en_NZ", # English - New Zealand
0x1809: "en_IE", # English - Ireland
0x1c09: "en_ZA", # English - South Africa
0x2009: "en_JA", # English - Jamaica
0x2409: "en_CB", # English - Carribbean
0x2809: "en_BZ", # English - Belize
0x2c09: "en_TT", # English - Trinidad
0x3009: "en_ZW", # English - Zimbabwe
0x3409: "en_PH", # English - Philippines
0x4009: "en_IN", # English - India
0x4409: "en_MY", # English - Malaysia
0x4809: "en_IN", # English - Singapore
0x0425: "et_EE", # Estonian
0x0438: "fo_FO", # Faroese
0x0464: "fil_PH",# Filipino
0x040b: "fi_FI", # Finnish
0x040c: "fr_FR", # French - France
0x080c: "fr_BE", # French - Belgium
0x0c0c: "fr_CA", # French - Canada
0x100c: "fr_CH", # French - Switzerland
0x140c: "fr_LU", # French - Luxembourg
0x180c: "fr_MC", # French - Monaco
0x0462: "fy_NL", # Frisian - Netherlands
0x0456: "gl_ES", # Galician
0x0437: "ka_GE", # Georgian
0x0407: "de_DE", # German - Germany
0x0807: "de_CH", # German - Switzerland
0x0c07: "de_AT", # German - Austria
0x1007: "de_LU", # German - Luxembourg
0x1407: "de_LI", # German - Liechtenstein
0x0408: "el_GR", # Greek
0x046f: "kl_GL", # Greenlandic - Greenland
0x0447: "gu_IN", # Gujarati
0x0468: "ha_NG", # Hausa - Latin
0x040d: "he_IL", # Hebrew
0x0439: "hi_IN", # Hindi
0x040e: "hu_HU", # Hungarian
0x040f: "is_IS", # Icelandic
0x0421: "id_ID", # Indonesian
0x045d: "iu_CA", # Inuktitut - Syllabics
0x085d: "iu_CA", # Inuktitut - Latin
0x083c: "ga_IE", # Irish - Ireland
0x0410: "it_IT", # Italian - Italy
0x0810: "it_CH", # Italian - Switzerland
0x0411: "ja_JP", # Japanese
0x044b: "kn_IN", # Kannada - India
0x043f: "kk_KZ", # Kazakh
0x0453: "kh_KH", # Khmer - Cambodia
0x0486: "qut_GT",# K'iche - Guatemala
0x0487: "rw_RW", # Kinyarwanda - Rwanda
0x0457: "kok_IN",# Konkani
0x0412: "ko_KR", # Korean
0x0440: "ky_KG", # Kyrgyz
0x0454: "lo_LA", # Lao - Lao PDR
0x0426: "lv_LV", # Latvian
0x0427: "lt_LT", # Lithuanian
0x082e: "dsb_DE",# Lower Sorbian - Germany
0x046e: "lb_LU", # Luxembourgish
0x042f: "mk_MK", # FYROM Macedonian
0x043e: "ms_MY", # Malay - Malaysia
0x083e: "ms_BN", # Malay - Brunei Darussalam
0x044c: "ml_IN", # Malayalam - India
0x043a: "mt_MT", # Maltese
0x0481: "mi_NZ", # Maori
0x047a: "arn_CL",# Mapudungun
0x044e: "mr_IN", # Marathi
0x047c: "moh_CA",# Mohawk - Canada
0x0450: "mn_MN", # Mongolian - Cyrillic
0x0850: "mn_CN", # Mongolian - PRC
0x0461: "ne_NP", # Nepali
0x0414: "nb_NO", # Norwegian - Bokmal
0x0814: "nn_NO", # Norwegian - Nynorsk
0x0482: "oc_FR", # Occitan - France
0x0448: "or_IN", # Oriya - India
0x0463: "ps_AF", # Pashto - Afghanistan
0x0429: "fa_IR", # Persian
0x0415: "pl_PL", # Polish
0x0416: "pt_BR", # Portuguese - Brazil
0x0816: "pt_PT", # Portuguese - Portugal
0x0446: "pa_IN", # Punjabi
0x046b: "quz_BO",# Quechua (Bolivia)
0x086b: "quz_EC",# Quechua (Ecuador)
0x0c6b: "quz_PE",# Quechua (Peru)
0x0418: "ro_RO", # Romanian - Romania
0x0417: "rm_CH", # Romansh
0x0419: "ru_RU", # Russian
0x243b: "smn_FI",# Sami Finland
0x103b: "smj_NO",# Sami Norway
0x143b: "smj_SE",# Sami Sweden
0x043b: "se_NO", # Sami Northern Norway
0x083b: "se_SE", # Sami Northern Sweden
0x0c3b: "se_FI", # Sami Northern Finland
0x203b: "sms_FI",# Sami Skolt
0x183b: "sma_NO",# Sami Southern Norway
0x1c3b: "sma_SE",# Sami Southern Sweden
0x044f: "sa_IN", # Sanskrit
0x0c1a: "sr_SP", # Serbian - Cyrillic
0x1c1a: "sr_BA", # Serbian - Bosnia Cyrillic
0x081a: "sr_SP", # Serbian - Latin
0x181a: "sr_BA", # Serbian - Bosnia Latin
0x045b: "si_LK", # Sinhala - Sri Lanka
0x046c: "ns_ZA", # Northern Sotho
0x0432: "tn_ZA", # Setswana - Southern Africa
0x041b: "sk_SK", # Slovak
0x0424: "sl_SI", # Slovenian
0x040a: "es_ES", # Spanish - Spain
0x080a: "es_MX", # Spanish - Mexico
0x0c0a: "es_ES", # Spanish - Spain (Modern)
0x100a: "es_GT", # Spanish - Guatemala
0x140a: "es_CR", # Spanish - Costa Rica
0x180a: "es_PA", # Spanish - Panama
0x1c0a: "es_DO", # Spanish - Dominican Republic
0x200a: "es_VE", # Spanish - Venezuela
0x240a: "es_CO", # Spanish - Colombia
0x280a: "es_PE", # Spanish - Peru
0x2c0a: "es_AR", # Spanish - Argentina
0x300a: "es_EC", # Spanish - Ecuador
0x340a: "es_CL", # Spanish - Chile
0x380a: "es_UR", # Spanish - Uruguay
0x3c0a: "es_PY", # Spanish - Paraguay
0x400a: "es_BO", # Spanish - Bolivia
0x440a: "es_SV", # Spanish - El Salvador
0x480a: "es_HN", # Spanish - Honduras
0x4c0a: "es_NI", # Spanish - Nicaragua
0x500a: "es_PR", # Spanish - Puerto Rico
0x540a: "es_US", # Spanish - United States
# 0x0430: "", # Sutu - Not supported
0x0441: "sw_KE", # Swahili
0x041d: "sv_SE", # Swedish - Sweden
0x081d: "sv_FI", # Swedish - Finland
0x045a: "syr_SY",# Syriac
0x0428: "tg_TJ", # Tajik - Cyrillic
0x085f: "tmz_DZ",# Tamazight - Latin
0x0449: "ta_IN", # Tamil
0x0444: "tt_RU", # Tatar
0x044a: "te_IN", # Telugu
0x041e: "th_TH", # Thai
0x0851: "bo_BT", # Tibetan - Bhutan
0x0451: "bo_CN", # Tibetan - PRC
0x041f: "tr_TR", # Turkish
0x0442: "tk_TM", # Turkmen - Cyrillic
0x0480: "ug_CN", # Uighur - Arabic
0x0422: "uk_UA", # Ukrainian
0x042e: "wen_DE",# Upper Sorbian - Germany
0x0420: "ur_PK", # Urdu
0x0820: "ur_IN", # Urdu - India
0x0443: "uz_UZ", # Uzbek - Latin
0x0843: "uz_UZ", # Uzbek - Cyrillic
0x042a: "vi_VN", # Vietnamese
0x0452: "cy_GB", # Welsh
0x0488: "wo_SN", # Wolof - Senegal
0x0434: "xh_ZA", # Xhosa - South Africa
0x0485: "sah_RU",# Yakut - Cyrillic
0x0478: "ii_CN", # Yi - PRC
0x046a: "yo_NG", # Yoruba - Nigeria
0x0435: "zu_ZA", # Zulu
}
def _print_locale():
""" Test function.
"""
categories = {}
def _init_categories(categories=categories):
for k,v in globals().items():
if k[:3] == 'LC_':
categories[k] = v
_init_categories()
del categories['LC_ALL']
print('Locale defaults as determined by getdefaultlocale():')
print('-'*72)
lang, enc = getdefaultlocale()
print('Language: ', lang or '(undefined)')
print('Encoding: ', enc or '(undefined)')
print()
print('Locale settings on startup:')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
print()
print('Locale settings after calling resetlocale():')
print('-'*72)
resetlocale()
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
try:
setlocale(LC_ALL, "")
except:
print('NOTE:')
print('setlocale(LC_ALL, "") does not support the default locale')
print('given in the OS environment variables.')
else:
print()
print('Locale settings after calling setlocale(LC_ALL, ""):')
print('-'*72)
for name,category in categories.items():
print(name, '...')
lang, enc = getlocale(category)
print(' Language: ', lang or '(undefined)')
print(' Encoding: ', enc or '(undefined)')
print()
###
try:
LC_MESSAGES
except NameError:
pass
else:
__all__.append("LC_MESSAGES")
if __name__=='__main__':
print('Locale aliasing:')
print()
_print_locale()
print()
print('Number formatting:')
print()
_test()
|
py | b4017c6a0fc65c882bacb14cdc416f24d29dbc71 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 10 09:54:44 2019
@author: vikash
"""
from distutils.core import setup
setup(
name = 'bin_boundary', # How you named your package folder (MyLib)
packages = ['bin_boundary'], # Chose the same as "name"
version = '0.1', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'This package perform one of the smoothing method of Binning method(Bin by Boundaries).It ask for file name and column name ', # Give a short description about your library
author = 'VIKASH SINGH', # Type in your name
author_email = '[email protected]', # Type in your E-Mail
url = 'https://github.com/Vikash29Singh/bin_boundary.git', # Provide either the link to your github or to your website
download_url = 'https://github.com/Vikash29Singh/bin_boundary/archive/v0.1.tar.gz', # I explain this later on
keywords = ['bin', 'boundary', 'python'], # Keywords that define your package best
install_requires=[ # I get to this in a second
'pandas',
'numpy',
],
classifiers=[
'Development Status :: 3 - Alpha', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which python versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
|
py | b4017d10baebaafa6e5853afd7b87d3a97eca147 | # This sample tests the type checker's handling of named tuples.
from collections import namedtuple
from typing import NamedTuple
NamedTuple1 = namedtuple("NamedTuple1", "field1 field2")
NamedTuple1(1, 2)
NamedTuple1(field2=1, field1=2)
# This should generate an error because there
# is no field called field3.
NamedTuple1(field1=1, field2=3, field3=2)
# This should generate an error because there
# should be two parameters.
NamedTuple1(1)
# This should generate an error because there
# should be two parameters.
NamedTuple1(1, 2, 3)
NamedTuple2 = namedtuple("NamedTuple2", "field1, field2")
NamedTuple2.__new__.__defaults__ = ([], )
NamedTuple2()
NamedTuple2(1)
NamedTuple2(field1=1, field2=3)
# This should generate an error because there
# should be two or fewer parameters.
NamedTuple2(1, 2, 3)
NamedTuple3 = NamedTuple("NamedTuple3", [
('field1', 'str'), # 'str' should be treated as forward reference
('field2', int)
])
NamedTuple3('hello', 2)
# This should generate an error because of a
# type mismatch.
NamedTuple3('1', '2')
# This should generate an error because of a
# type mismatch.
NamedTuple3(field2=1, field1=2)
|
py | b4017d1b3b90a157a06215b1804a0c83b04888f7 | from flask import Blueprint
main = Blueprint('home', __name__)
@main.route('/')
def home_page():
return 'Home page' |
py | b4017d205391fdf143d970bd3b80ca1f7a237acc | """
Launch app with `streamlit run main.py --server.port 8000`.
"""
import google.oauth2.credentials
import pandas_gbq
from datetime import date
import os
from google.cloud import storage
import pandas as pd
import numpy as np
import streamlit as st
from fbprophet import Prophet
import plotly.graph_objs as go
#fsspec
#gcsfs
storage_client = storage.Client()
def is_exist(bucket_name,object):
bucket = storage_client.bucket(bucket_name)
blob = bucket.get_blob(object)
try:
return blob.exists(storage_client)
except:
return False
#@st.cache(allow_output_mutation=True) # This function will be cached
def dataset(n):
"""
Connection to Google BigQuery
"""
'''
credentials = google.oauth2.credentials.Credentials(
'xxxx')
project_id = "al-bi-bq-prod"
final_date = date.today()
sql_query = f"""
select date as ds, sum(total_installs) as y from `al-bi-bq-prod.dwh.fact_daily_stats`
where _partitiondate between '2020-11-01' and '{final_date}'
group by 1
order by 1"""
df_init = pandas_gbq.read_gbq(sql_query, project_id=project_id)
df_init['ds'] = df_init['ds'].dt.strftime('%Y-%m-%d')'''
df_init = pd.read_csv('gs://axiomm/installs.csv')
df_init.drop(df_init.tail(n).index, inplace=True)
return df_init
def prediction(dataset):
"""
Modeling and prediction making.
:param dataset: imported dataset
:return: predicted metrics value for the next period, graph of the model performance
"""
#with open('serialized_model.json', 'r') as fin:
# model = model_from_json(json.load(fin)) # Load model
model = Prophet(daily_seasonality=True, yearly_seasonality=True)
model.fit(dataset)
future = model.make_future_dataframe(periods=7, freq='d')
forecast = model.predict(future)
forecast['ds'] = forecast['ds'].dt.strftime('%Y-%m-%d')
#fig = fbprophet.plot.plot_plotly(model, forecast, xlabel='Date', ylabel='Metric_value')
fig = go.Figure()
fig.add_trace(go.Scatter(x=dataset['ds'], y=dataset['y'], name='Actual', ))
fig.add_trace(go.Scatter(x=forecast['ds'], y=forecast['yhat'], name='Prediction', ))
fig.add_trace(go.Scatter(x=forecast['ds'], y=forecast['trend'], name='Trend', ))
# fig.add_trace(go.Scatter(x=forecast['ds'], y=forecast['rain'], name='Rain',))
# fig.add_trace(go.Scatter(x=forecast['ds'], y=forecast['temp'], name='Temp',))
# fig.add_trace(go.Scatter(x=forecast['ds'], y=forecast['holidays'], name='Holidays', ))
# fig.add_trace(go.Scatter(x=forecast['ds'], y=forecast['yearly'], name='Yearly', ))
fig.add_trace(go.Scatter(x=forecast['ds'], y=forecast['weekly'], name='Weekly', ))
forecast = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
return forecast, model, fig
def anomaly(data_new, forecast_old):
df_merged = pd.merge(forecast_old, data_new, on='ds', how='left')
df_merged['Anomaly?'] = np.where(
(df_merged['y'] < df_merged['yhat_lower']) | (df_merged['y'] > df_merged['yhat_upper']),
'Yes', 'No')
df_merged = df_merged[['ds','yhat_lower','yhat_upper','yhat','y','Anomaly?']]
df_merged.columns = ['date', 'Lowest possible value', 'Highest possible value','Actual prediction','Actual value', 'Anomaly?']
df_merged["Lowest possible value"] = df_merged["Lowest possible value"].astype(int)
df_merged["Highest possible value"] = df_merged["Highest possible value"].astype(int)
df_merged["Actual prediction"] = df_merged["Actual prediction"].astype(int)
df_merged["Actual value"] = df_merged["Actual value"].fillna("0").astype(int)
df_merged.to_csv('forecast_merged.csv')
storage_client.get_bucket('axiomm').blob('forecast_merged.csv').upload_from_filename(
'forecast_merged.csv')
return df_merged
def forecast_horizons(data_new, forecast_for_today):
merged_table = anomaly(data_new, forecast_for_today).reset_index()
merged_table_month = merged_table[len(merged_table) - 31:]
merged_table_week = merged_table[len(merged_table) - 7:]
merged_table_day = merged_table[len(merged_table) - 7: len(merged_table) - 6]
return merged_table_day, merged_table_week, merged_table_month, merged_table
def color_survived(val):
color = 'red' if val == 'Yes' else 'white'
return f'background-color: {color}'
def main():
if is_exist('axiomm','forecast.csv') == False:
#os.path.exists('gs://axiomm/forecast.csv'):
data_first = dataset(1)
forecast_for_today = prediction(data_first)[0]
#gstorage
forecast_for_today.to_csv('forecast.csv')
storage_client.get_bucket('axiomm').blob('forecast.csv').upload_from_filename('forecast.csv')
main()
else:
#daily_iterations
data_new = dataset(0)
#gstorage
forecast_for_today = pd.read_csv('gs://axiomm/forecast.csv')
forecast_for_tomorrow = prediction(data_new)[0]
#Safe update
last_date1 = forecast_for_today['ds'].iloc[-1]
last_date2 = forecast_for_tomorrow['ds'].iloc[-1]
if last_date1 != last_date2:
# gstorage
forecast_for_tomorrow.to_csv('forecast.csv')
storage_client.get_bucket('axiomm').blob('forecast.csv').upload_from_filename(
'forecast.csv')
# gstorage
forecast_for_today.to_csv('forecast_for_spammers.csv')
storage_client.get_bucket('axiomm').blob('forecast_for_spammers.csv').upload_from_filename(
'forecast_for_spammers.csv')
else:
st.text('No new updates')
# gstorage
forecast_for_today = pd.read_csv('gs://axiomm/forecast_for_spammers.csv')
# output
st.write('# Today')
st.table(forecast_horizons(data_new, forecast_for_today)[0].style.applymap(color_survived, subset=['Anomaly?']))
st.write('# Weekly forecast')
st.table(forecast_horizons(data_new, forecast_for_today)[1].style.applymap(color_survived, subset=['Anomaly?']))
st.write('# Monthly performance')
st.table(forecast_horizons(data_new, forecast_for_today)[2].style.applymap(color_survived, subset=['Anomaly?']))
st.write('# Anomaly visual')
st.plotly_chart(prediction(data_new)[2])
if __name__ == "__main__":
main()
#Milestones:
# send to slack
# AWS chalice
# test on datetime
# separate model training from prediction: move it to a different function (with cross val and pickle, then load model)
|
py | b4017ea498e3c88254ffba6697860a31f66b7bf0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import shlex, sys, subprocess, os
import Env, Regions
def optimizeRegion(region):
optDir = Regions.getRegionMergedTileDir(region)
if os.path.isdir(optDir):
command = "python %stiles_opt.py %s" %(Env.tilersToolsDir, optDir)
print command
thisone = subprocess.Popen(shlex.split(command))
thisone.wait()
else:
print "Region tiles don't exist... run BatchRegionMerger.py first"
if __name__== "__main__":
if not sys.argv.__len__() == 2:
print "You must supply a region:"
Regions.printRegionList()
sys.exit()
else:
optimizeRegion(sys.argv[1]) |
py | b4017f0d46733e0aa7facc668e775eb3c38a6a75 | # SPDX-License-Identifier: Apache-2.0
"""
tf2onnx.rewriter.loop_rewriter_base
"""
import logging
from collections import OrderedDict
from tf2onnx import utils
from tf2onnx.graph_matcher import OpTypePattern, GraphMatcher
from tf2onnx.utils import is_tf_loopcond_op, is_tf_tensor_array_op
from tf2onnx.utils import is_tf_tensor_array_gather_op, is_tf_tensor_array_write_op, is_tf_tensor_array_read_op
from tf2onnx.rewriter.rnn_utils import REWRITER_RESULT
from tf2onnx.utils import TensorValueInfo
logger = logging.getLogger(__name__)
INVALID_INPUT_ID = utils.make_name("invalid_input_id")
# todo(pengwa) remove protected-access with changes to Graph/Node later.
# pylint: disable=missing-docstring,invalid-name,unused-argument,using-constant-test,protected-access
class Context(object):
def __init__(self):
self.while_context_scope = None
self.loop_properties = LoopProperties()
self.loop_cond = None
self.cell_graph = None # GraphInfo of cell graph
self.cond_graph = None # GraphInfo of condition graph
class GraphInfo(object):
def __init__(self, ops, inputs, outputs):
self.nodes = ops
self.inputs = inputs # list of TensorValueInfo in order
self.outputs = outputs # list of TensorValueInfo in order
self.dependent_vars = None
class LoopProperties(object):
def __init__(self):
# use enter name as key, they are initial inputs.
# we don't use enter_input_id because it might be
# used as initial input for more than one Enter nodes.
self.state_variables = OrderedDict()
self.scan_variables = OrderedDict()
self.unneeded_scan_variables = OrderedDict()
self.tensor_array_inputs = [] # list of type InputTensorArray
def add_variable(self, var):
utils.make_sure(var.enter_name not in self.scan_variables,
"variable %s already exists as scan variable.", var.enter_name)
utils.make_sure(var.enter_name not in self.state_variables,
"variable %s already exists as state variable.", var.enter_name)
if var.tensor_array_type == TensorArrayVariableType.READ_LAST:
# If the variable just returns the last value of the constructed tensor array, it doesn't need to be
# a scan output
self.unneeded_scan_variables[var.enter_name] = var
elif var.tensor_array_type == TensorArrayVariableType.GATHER_ALL:
self.scan_variables[var.enter_name] = var
else:
self.state_variables[var.enter_name] = var
def get_variables(self, checker):
if not checker:
return self.all_variables.values()
return [v for v in self.all_variables.values() if checker(v)]
@property
def all_variables(self):
items = self.state_variables.copy()
items.update(self.scan_variables)
items.update(self.unneeded_scan_variables)
return items
# state inputs and outputs are in pairs, even though some outputs are not depending on corresponding input,
# we leave the input id be None.
@property
def state_inputs(self):
return [v.switch_true_identity_output for v in self.state_variables.values()]
@property
def state_inputs_initial_values(self):
return [v.enter_input_id for v in self.state_variables.values()]
@property
def state_outputs(self):
return [v.next_iteration_input for v in self.state_variables.values()]
@property
def state_outputs_exits(self):
return [v.exit_output for v in self.state_variables.values()]
# scan output (e.g. tensor array) won't be used by next iteration calculation
@property
def scan_outputs(self):
return [v.next_iteration_input for v in self.scan_variables.values()]
@property
def scan_outputs_exits(self):
return [v.exit_output for v in self.scan_variables.values()]
# treat input tensor array as scan inputs
def add_scan_input(self, input_tensor_array):
self.tensor_array_inputs.append(input_tensor_array)
# usually it is called TensorArrayReadV3
@property
def scan_inputs(self):
return [i.consumer for i in self.tensor_array_inputs]
@property
def scan_inputs_initial_values(self):
return [i.data_input_id for i in self.tensor_array_inputs]
def has_variable_with_ta_type(self, tensor_array_type):
for variable in self.all_variables.values():
if variable.tensor_array_type == tensor_array_type:
return True
return False
class TensorArrayVariableType:
GATHER_ALL = "GATHER_ALL"
READ_LAST = "READ_LAST"
class LoopVariable(object):
"""In TensorFlow loop, all loop variables are listed both in iteration body graph's inputs, and outputs.
Loop (state variable 1, state variable 2) {
# do the calculation
# updated state variable 1 not necessarily only depends on state variable 1, it might depend
# on 0, 1 or more state variables.
# So if it depends on 0 state variable, then switch_true_identity_output.id is None. For this case,
# during conversion, a fake input for ONNX Loop body graph is created, but not consumed by any node.
return (updated) state variable 1, (updated) state variable 2, scan variable 1, scan variable 2
}
Here we take the perspective of body graph's outputs:
1. start from the iteration body graph's output (e.g. next_iteration_input.id)
2. find body graph generating it (those node between NextIteration and Switch)
3. find the variable initial value (e.g. enter_input_id)
4. check whether it is a tensor array
5. the body graph output might go to next iteration as corresponding input
(e.g. switch_true_identity_output.id).
"""
def __init__(self, enter_name, enter_input_id, next_iteration_input_id,
switch_true_identity_output_id, exit_output_id, tensor_array_type, ta_index_id, g):
self.enter_name = enter_name
self.enter_input_id = enter_input_id
# the output of iteration body graph for this variable
# should not be None
utils.make_sure(next_iteration_input_id, "next_iteration_input_id should not be None")
self.next_iteration_input = TensorValueInfo(next_iteration_input_id, g)
# the starting point of iteration body graph,
# might be None when this variable value (either initial value or last iteration output value)
# is not consumed iteration body graph nodes.
self.switch_true_identity_output = TensorValueInfo(switch_true_identity_output_id, g)
# the switch_false branch is ended with Exit, which is a boundary for the loop,
# might be None when no consumers for the variable output.
self.exit_output = TensorValueInfo(exit_output_id, g)
# only applicable for tensor array variable
self.tensor_array_type = tensor_array_type
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
# then we can be sure this is equivalent to scan output behavior.
self.ta_index_id = ta_index_id
class InputTensorArray(object):
def __init__(self, data_input_id, index_input_id, consumer_id, g):
self.index_input_id = index_input_id
self.data_input_id = data_input_id
# tensor array is unstacked before being used in loop, consumer_id is the node
# (in the iteration body graph) consuming one of the element of tensor array.
self.consumer = TensorValueInfo(consumer_id, g)
class LoopRewriterBase(object):
def __init__(self, g):
self.g = g
self.ta_read_input_pattern = \
OpTypePattern("TensorArrayReadV3", name="ta_read", inputs=[
OpTypePattern("Enter", name="ta_enter", inputs=[
OpTypePattern("TensorArrayV3")
]),
OpTypePattern("Identity", name="ta_index"),
OpTypePattern("Enter", name="ta_scatter_enter", inputs=[
OpTypePattern("TensorArrayScatterV3", name="ta_input_scatter")
]),
])
def create_context(self):
return Context()
def need_rewrite(self, context):
return False
def rewrite(self, context):
return REWRITER_RESULT.FAIL
def run_internal(self, allow_ta_read_last=False):
loopcond_ops = []
for op in self.g.get_nodes():
if is_tf_loopcond_op(op):
loopcond_ops.append(op)
# self.g.get_nodes may change inside this loop so that we parse all LoopCond first
for op in loopcond_ops:
logger.debug("======================\n handling loop cond node called %s", op.name)
context = self.create_context()
context.loop_cond = op
self._check_in_read_only_mode(context) # parses loop variables
loop_properties = context.loop_properties
if not allow_ta_read_last and loop_properties.has_variable_with_ta_type(TensorArrayVariableType.READ_LAST):
continue
if self.need_rewrite(context):
# cut off connection between cell/cond graphs and useless nodes like Merge, NextIteration.
self._cut_off_connection_for_cell(context)
context.cell_graph = self._crop_loop_body_sub_graph(context)
context.cond_graph = self._crop_loop_condition_sub_graph(context)
_result = self.rewrite(context)
if _result == REWRITER_RESULT.OK:
logger.debug("rewrite successfully")
elif _result == REWRITER_RESULT.SKIP:
logger.debug("rewrite skipped for LoopCond called %s", op.name)
continue
elif _result == REWRITER_RESULT.FAIL:
raise ValueError("rewrite failed, so just fast fail it")
if self.g.outputs:
# clean the graph based on output names.
self.g.delete_unused_nodes(self.g.outputs)
return self.g.get_nodes()
def _check_in_read_only_mode(self, context):
self._parse_loop_variables(context)
self._parse_input_ta(context)
def _parse_loop_variables(self, context):
loop_cond_op = context.loop_cond
parts = loop_cond_op.name.split('/')
context.while_context_scope = '/'.join(parts[0:-1]) + "/"
logger.debug("found while loop scope %s", context.while_context_scope)
switch_nodes = self.g.find_output_consumers(loop_cond_op.output[0])
for s in switch_nodes:
if s.type != 'Switch':
raise ValueError("LoopCond's output node should be followed with a Switch node")
loop_var = self._get_loop_var_from_switch(s)
context.loop_properties.add_variable(loop_var)
def inputs_equal(inp1, inp2):
# Checks input equality with an exception for a Select pattern in some LSTM nodes
if inp1 == inp2:
return True
node1 = self.g.get_node_by_output(inp1)
node2 = self.g.get_node_by_output(inp2)
if node1.type != "Select" or node2.type != "Select":
return False
if node1.inputs[0].type != "Tile" or node2.inputs[0].type != "Tile":
return False
if node1.inputs[0].input[0] != node2.inputs[0].input[0]:
return False
# Ignore the tile input. It gets its shape from different nodes but is actually the same.
return node1.input[1:] == node2.input[1:]
for unneeded_scan_variable in context.loop_properties.unneeded_scan_variables.values():
for state_variable in context.loop_properties.state_variables.values():
if inputs_equal(unneeded_scan_variable.next_iteration_input.id, state_variable.next_iteration_input.id):
unneeded_scan_variable.equivalent_state_variable = state_variable
break
def _parse_input_ta(self, context):
graph_inputs = [v.switch_true_identity_output.id for v in context.loop_properties.all_variables.values()
if v.switch_true_identity_output.id]
matcher = GraphMatcher(self.ta_read_input_pattern, allow_reorder=False)
match_results = matcher.match_ops(self.g.get_nodes())
match_results = [r for r in match_results if r.get_op("ta_index").output[0] in graph_inputs]
for match in match_results:
ta_input_scatter = match.get_op("ta_input_scatter")
# the 3rd input of scatter is the value
data_input_id = ta_input_scatter.input[2]
ta_read_node = match.get_op("ta_read")
# todo: need check ta's index variable is a scalar starting from 1, and increase by 1 each iteration.
# then we can be sure this is equivalent to scan input behavior.
index_input_id = ta_read_node.input[1]
unstacked_ta_consumer = match.get_op("ta_read").output[0]
ta = InputTensorArray(data_input_id, index_input_id, unstacked_ta_consumer, self.g)
context.loop_properties.add_scan_input(ta)
def _crop_loop_body_sub_graph(self, context):
# according to input and output, find the body graph
loop_props = context.loop_properties
inputs = loop_props.state_inputs + loop_props.scan_inputs
input_ids = [input_tensor_value_info.id for input_tensor_value_info in inputs]
outputs = loop_props.state_outputs + loop_props.scan_outputs
output_ids = [out_tensor_value_info.id for out_tensor_value_info in outputs]
ops, enter_nodes, _ = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=False)
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(enter_node.output[0], enter_node.input[0], ops=ops)
return GraphInfo(ops, inputs, outputs)
def _crop_loop_condition_sub_graph(self, context):
input_ids = []
output_ids = [context.loop_cond.input[0]]
outputs = [TensorValueInfo(o, self.g) for o in output_ids]
ops, enter_nodes, merge_nodes = self.find_subgraph(set(input_ids), set(output_ids), self.g, merge_as_end=True)
for enter_node in enter_nodes:
# connect Enter's output to Enter's input
self.g.replace_all_inputs(enter_node.output[0], enter_node.input[0], ops=ops)
dependent_vars = []
for merge_node in merge_nodes:
enter_node = [n for n in merge_node.inputs if n.type == "Enter"][0]
loop_var = context.loop_properties.all_variables[enter_node.name]
# cut off connection between condition graph and Merge node.
# replace condition graph's inputs to be cell graph's outputs, because we want condition graph
# to consumer cell graph outputs.
non_switch_consumers = [n for n in self.g.find_output_consumers(merge_node.output[0]) if n.type != "Switch"]
self.g.replace_all_inputs(merge_node.output[0], loop_var.next_iteration_input.id,
ops=non_switch_consumers)
dependent_vars.append(loop_var)
# cut off connection between condition graph and LoopCond node.
self.g.replace_all_inputs(context.loop_cond.output[0], INVALID_INPUT_ID, ops=[context.loop_cond])
graph_info = GraphInfo(ops, [], outputs)
graph_info.dependent_vars = dependent_vars
return graph_info
def _cut_off_connection_for_cell(self, context):
for val in context.loop_properties.all_variables.values():
if val.switch_true_identity_output.id:
# remove the node to cut off a starting node of the cell (e.g. loop body).
n = self.g.get_node_by_output(val.switch_true_identity_output.id)
self.g.remove_node(n.name)
if val.tensor_array_type == TensorArrayVariableType.GATHER_ALL:
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
ta_write_nodes = [n for n in self.g.get_nodes() if is_tf_tensor_array_write_op(n)]
self.g.replace_all_inputs(val.next_iteration_input.id, INVALID_INPUT_ID, ops=ta_write_nodes)
else:
# connect NextIteration to an invalid node, to cut off an ending node of the cell.
next_iter_nodes = [n for n in self.g.get_nodes() if n.type == "NextIteration"]
self.g.replace_all_inputs(val.next_iteration_input.id, INVALID_INPUT_ID, ops=next_iter_nodes)
for scan_input in context.loop_properties.scan_inputs:
# remove the node to cut off connection between scan_input and the cell.
self.g.remove_node(self.g.get_node_by_output(scan_input.id).name)
def _get_loop_var_from_switch(self, switch_node):
if switch_node.type != 'Switch':
logger.error("not a switch node, skip")
return None
# the first input is data
merge_node = switch_node.inputs[0]
if merge_node.type != "Merge":
logger.error("switch node does not has Merge as its first input")
return None
# find the output_true consumers
switch_consumers = self.g.find_output_consumers(switch_node.output[1])
switch_true_consumer_cnt = len(switch_consumers)
if switch_true_consumer_cnt == 0:
switch_true_identity_output = None
elif switch_true_consumer_cnt == 1:
if switch_consumers[0].type == "Identity":
switch_true_identity_output = switch_consumers[0].output[0]
else:
# using grappler there is not necessarily an identity behind switch
switch_true_identity_output = switch_node.output[1]
else:
# insert identity if there are 2 or more consumers. This can happen on tf-1.15.
switch_true_identity_output = self.g.make_node("Identity", [switch_node.output[1]],
shapes=[switch_node.output_shapes[1]],
dtypes=[switch_node.output_dtypes[1]])
switch_true_identity_output = switch_true_identity_output.output[0]
for n in switch_consumers:
for i, nn in enumerate(n.input):
if nn == switch_node.output[1]:
n.input[i] = switch_true_identity_output
target_node_input_id = None
enter_node = [n for n in merge_node.inputs if n.type == 'Enter'][0]
target_node_input_id = enter_node.input[0]
logger.debug("a Switch >> Merge >> Enter is found called %s", enter_node.inputs[0].name)
next_iteration_node = [n for n in merge_node.inputs if n.type == 'NextIteration'][0]
last_iteration_output_id = next_iteration_node.input[0]
# find the output_false consumers to see whether there is consumer for this var
switch_false_consumers = self.g.find_output_consumers(switch_node.output[0])
false_consumer_count = len(switch_false_consumers)
exit_output_id = None
if false_consumer_count == 1:
exit_node = switch_false_consumers[0]
if exit_node.type != "Exit":
raise ValueError("switch false branch is followed by non-Exit")
exit_output_id = exit_node.output[0]
elif false_consumer_count == 0:
# sometime, the variable output won't be used in the new iteration as input.
exit_output_id = None
else:
raise ValueError("unexpected number of switch false consumers")
ta_type = None
ta_index_id = None
if is_tf_tensor_array_op(self.g.get_node_by_output(target_node_input_id)):
ta_write_node = self.g.get_node_by_output(last_iteration_output_id)
utils.make_sure(is_tf_tensor_array_write_op(ta_write_node), "ta nextiteration is not following ta write op")
last_iteration_output_id = ta_write_node.input[2]
ta_index_id = ta_write_node.input[1]
# here we parse patterns generated by
# ta.write(), then ta.stack(), because this is the most frequent usage pattern.
if exit_output_id:
exit_consumers = self.g.find_output_consumers(exit_output_id)
ta_access_node = [n for n in exit_consumers if is_tf_tensor_array_gather_op(n) or \
is_tf_tensor_array_read_op(n)][0]
if is_tf_tensor_array_read_op(ta_access_node):
ta_type = TensorArrayVariableType.READ_LAST
else:
ta_type = TensorArrayVariableType.GATHER_ALL
# update exit output id, treat the gather output as ta's output
exit_output_id = ta_access_node.output[0]
loop_var = LoopVariable(enter_node.name, target_node_input_id, last_iteration_output_id,
switch_true_identity_output, exit_output_id, ta_type, ta_index_id, self.g)
return loop_var
@staticmethod
def find_subgraph(input_ids, output_ids, g, merge_as_end=False):
logger.debug("input ids %s ", input_ids)
logger.debug("output ids %s ", output_ids)
enter_nodes = set()
merge_nodes = set()
def find_input_boundary(node):
if node.type == "Enter":
enter_nodes.add(node)
logger.debug("terminate the input search at %s", node.name)
return False
if merge_as_end is True and node.type == "Merge":
merge_nodes.add(node)
logger.debug("terminate the input search at %s", node.name)
return False
if node.is_const():
logger.debug("terminate search at const node %s", node.name)
return False
for o in node.output:
if o in input_ids:
return False
return True
nodes = g.extract_sub_graph_nodes(output_ids, input_checker=find_input_boundary)
return nodes, enter_nodes, merge_nodes
@staticmethod
def construct_graph_from_nodes(parent_g, nodes, outputs):
return utils.construct_graph_from_nodes(
parent_g,
nodes,
[out.id for out in outputs],
[out.shape for out in outputs],
[out.dtype for out in outputs]
)
|
py | b4017f3a4645b695805b83acb7f4425129d328c4 | # Copyright (C) 2018 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# disable Invalid constant name pylint warning for mandatory Alembic variables.
# pylint: disable=invalid-name
"""
Fix tracking columns.
These helpers is used by the following migrations:
* ggrc.migrations.verisions.20160728120017_29c8b9c5d34b;
* ggrc_basic_permissions.migrations.verisions.20160728142641_4e105fc39b25;
* ggrc_gdrive_integration.migrations.verisions.20160804095642_395186a2d8;
* ggrc_risk_assessments.migrations.verisions.20160804101106_4d4b04a5b9c6;
* ggrc_risks.migrations.verisions.20160804095405_3d2acc8a4425;
* ggrc_workflows.migrations.verisions.20160728142921_4cb78ab9a321.
"""
import sqlalchemy as sa
from alembic import op
tables = {
"ggrc": [
"access_groups",
"assessment_templates",
"assessments",
"audit_objects",
"audits",
"background_tasks",
"categories",
"categorizations",
"clauses",
"comments",
"contexts",
"controls",
"custom_attribute_definitions",
"custom_attribute_values",
"data_assets",
"directives",
"documents",
"events",
"facilities",
"helps",
"issues",
"markets",
"meetings",
"notification_configs",
"notification_types",
"notifications",
"object_owners",
"object_people",
"objectives",
"options",
"org_groups",
"people",
"products",
"programs",
"projects",
"relationships",
"requests",
"revisions",
"sections",
"systems",
"vendors",
],
"ggrc_gdrive_integration": [
"object_events",
"object_files",
"object_folders",
],
"ggrc_basic_permissions": [
"context_implications",
"contexts",
"roles",
"user_roles",
],
"ggrc_risks": [
"risks",
"risk_objects",
"threats",
],
"ggrc_risk_assessments": [
"risk_assessments",
],
"ggrc_workflows": [
"cycle_task_entries",
"cycle_task_group_object_tasks",
"cycle_task_group_objects",
"cycle_task_groups",
"cycles",
"notification_types",
"task_group_objects",
"task_group_tasks",
"task_groups",
"workflow_people",
"workflows",
],
}
def upgrade_tables(module):
"""Updgrade tables from given module."""
for table in tables[module]:
op.execute("""
UPDATE %s
SET
created_at = IF(
created_at,
created_at,
IF(updated_at, updated_at, now())
),
updated_at = IF(
updated_at,
updated_at,
IF(created_at, created_at, now())
)
WHERE created_at IS NULL OR updated_at IS NULL
""" % table)
op.alter_column(table, "created_at", type_=sa.DateTime, nullable=False)
op.alter_column(table, "updated_at", type_=sa.DateTime, nullable=False)
def downgrade_tables(module):
"""Downgrade tables from given module."""
for table in tables[module]:
op.alter_column(table, "created_at", type_=sa.DateTime, nullable=True)
op.alter_column(table, "updated_at", type_=sa.DateTime, nullable=True)
|
py | b401801bd4a6793bde8ec30788e9d9451822025e | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import warnings
from tensorflow.python.keras import layers as keras_layers
from tensorflow.python.keras.legacy_tf_layers import base
from tensorflow.python.ops import init_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export(v1=['layers.Dense'])
class Dense(keras_layers.Dense, base.Layer):
"""Densely-connected layer class.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
_reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer instance (or name) for the kernel matrix.
bias_initializer: Initializer instance (or name) for the bias.
kernel_regularizer: Regularizer instance for the kernel matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
kernel_constraint: Constraint function for the kernel matrix.
bias_constraint: Constraint function for the bias.
kernel: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(units=units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
**kwargs)
@tf_export(v1=['layers.dense'])
def dense(
inputs, units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
trainable=True,
name=None,
reuse=None):
"""Functional interface for the densely-connected layer.
This layer implements the operation:
`outputs = activation(inputs * kernel + bias)`
where `activation` is the activation function passed as the `activation`
argument (if not `None`), `kernel` is a weights matrix created by the layer,
and `bias` is a bias vector created by the layer
(only if `use_bias` is `True`).
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
kernel_initializer: Initializer function for the weight matrix.
If `None` (default), weights are initialized using the default
initializer used by `tf.compat.v1.get_variable`.
bias_initializer: Initializer function for the bias.
kernel_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
kernel_constraint: An optional projection function to be applied to the
kernel after being updated by an `Optimizer` (e.g. used to implement
norm constraints or value constraints for layer weights). The function
must take as input the unprojected variable and must return the
projected variable (which must have the same shape). Constraints are
not safe to use when doing asynchronous distributed training.
bias_constraint: An optional projection function to be applied to the
bias after being updated by an `Optimizer`.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor the same shape as `inputs` except the last dimension is of
size `units`.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.dense` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.Dense` instead.')
layer = Dense(units,
activation=activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
bias_initializer=bias_initializer,
kernel_regularizer=kernel_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
kernel_constraint=kernel_constraint,
bias_constraint=bias_constraint,
trainable=trainable,
name=name,
_scope=name,
_reuse=reuse)
return layer.apply(inputs)
@tf_export(v1=['layers.Dropout'])
class Dropout(keras_layers.Dropout, base.Layer):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
rate: The dropout rate, between 0 and 1. E.g. `rate=0.1` would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed`.
for behavior.
name: The name of the layer (string).
"""
def __init__(self, rate=0.5,
noise_shape=None,
seed=None,
name=None,
**kwargs):
super(Dropout, self).__init__(rate=rate,
noise_shape=noise_shape,
seed=seed,
name=name,
**kwargs)
def call(self, inputs, training=False):
return super(Dropout, self).call(inputs, training=training)
@tf_export(v1=['layers.dropout'])
def dropout(inputs,
rate=0.5,
noise_shape=None,
seed=None,
training=False,
name=None):
"""Applies Dropout to the input.
Dropout consists in randomly setting a fraction `rate` of input units to 0
at each update during training time, which helps prevent overfitting.
The units that are kept are scaled by `1 / (1 - rate)`, so that their
sum is unchanged at training time and inference time.
Arguments:
inputs: Tensor input.
rate: The dropout rate, between 0 and 1. E.g. "rate=0.1" would drop out
10% of input units.
noise_shape: 1D tensor of type `int32` representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)`, and you want the dropout mask
to be the same for all timesteps, you can use
`noise_shape=[batch_size, 1, features]`.
seed: A Python integer. Used to create random seeds. See
`tf.compat.v1.set_random_seed`
for behavior.
training: Either a Python boolean, or a TensorFlow boolean scalar tensor
(e.g. a placeholder). Whether to return the output in training mode
(apply dropout) or in inference mode (return the input untouched).
name: The name of the layer (string).
Returns:
Output tensor.
Raises:
ValueError: if eager execution is enabled.
"""
warnings.warn('`tf.layers.dropout` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.Dropout` instead.')
layer = Dropout(rate, noise_shape=noise_shape, seed=seed, name=name)
return layer.apply(inputs, training=training)
@tf_export(v1=['layers.Flatten'])
class Flatten(keras_layers.Flatten, base.Layer):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
Examples:
```
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, 16)`
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
y = Flatten()(x)
# now `y` has shape `(None, None)`
```
"""
pass
@tf_export(v1=['layers.flatten'])
def flatten(inputs, name=None, data_format='channels_last'):
"""Flattens an input tensor while preserving the batch axis (axis 0).
Arguments:
inputs: Tensor input.
name: The name of the layer (string).
data_format: A string, one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, height, width, channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, height, width)`.
Returns:
Reshaped tensor.
Examples:
```
x = tf.compat.v1.placeholder(shape=(None, 4, 4), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, 16)`
x = tf.compat.v1.placeholder(shape=(None, 3, None), dtype='float32')
y = flatten(x)
# now `y` has shape `(None, None)`
```
"""
warnings.warn('`tf.layers.flatten` is deprecated and '
'will be removed in a future version. '
'Please use `tf.keras.layers.Flatten` instead.')
layer = Flatten(name=name, data_format=data_format)
return layer.apply(inputs)
# Aliases
FullyConnected = Dense
fully_connected = dense
|
py | b40181123818f383f073bc77256fdda2eae8b94f | # coding=utf-8
from humansms.service.MultipleMessageService import MultipleMessageService
send = MultipleMessageService('conta.integracao', 'senha.integracao')
res = send.sendMultipleFileCSV("C:\Users\teste\Desktop\arquivo.csv")
for msgResponse in res:
print msgResponse.getCode() + " - " + msgResponse.getDescription() |
py | b401811b8943a5f823b905443908725a10f7973b | def nomenclature_prediction(q="", k=10, threshold=0, ft_model=[]):
labels, probas = ft_model.predict(q, k, threshold)
return [{"nomenclature": l.replace("__label__", "").replace("__", ""), "probability": p} for l, p in zip(labels, probas)]
|
py | b401814dd40bbdd94c1177ca4d1847e7daaafc24 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The TMIcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the invalidateblock RPC."""
from test_framework.test_framework import TMIcoinTestFramework
from test_framework.util import *
class InvalidateTest(TMIcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
self.setup_nodes()
def run_test(self):
self.log.info("Make sure we repopulate setBlockIndexCandidates after InvalidateBlock:")
self.log.info("Mine 4 blocks on Node 0")
self.nodes[0].generate(4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
self.log.info("Mine competing 6 blocks on Node 1")
self.nodes[1].generate(6)
assert(self.nodes[1].getblockcount() == 6)
self.log.info("Connect nodes to force a reorg")
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes[0:2])
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
self.log.info("Invalidate block 2 on node 0 and verify we reorg to node 0's original chain")
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
self.log.info("Make sure we won't reorg to a lower work chain:")
connect_nodes_bi(self.nodes,1,2)
self.log.info("Sync node 2 to node 1 so both have 6 blocks")
sync_blocks(self.nodes[1:3])
assert(self.nodes[2].getblockcount() == 6)
self.log.info("Invalidate block 5 on node 1 so its tip is now at 4")
self.nodes[1].invalidateblock(self.nodes[1].getblockhash(5))
assert(self.nodes[1].getblockcount() == 4)
self.log.info("Invalidate block 3 on node 2, so its tip is now 2")
self.nodes[2].invalidateblock(self.nodes[2].getblockhash(3))
assert(self.nodes[2].getblockcount() == 2)
self.log.info("..and then mine a block")
self.nodes[2].generate(1)
self.log.info("Verify all nodes are at the right height")
time.sleep(5)
assert_equal(self.nodes[2].getblockcount(), 3)
assert_equal(self.nodes[0].getblockcount(), 4)
node1height = self.nodes[1].getblockcount()
if node1height < 4:
raise AssertionError("Node 1 reorged to a lower height: %d"%node1height)
if __name__ == '__main__':
InvalidateTest().main()
|
py | b40181b0290705f5356794b30814bced1b317042 | from celescope.rna_virus.__init__ import __ASSAY__
from celescope.tools.multi import Multi
class Multi_rna_virus(Multi):
def star_virus(self, sample):
step = 'star_virus'
fq = f'{self.outdir_dic[sample]["cutadapt"]}/{sample}_clean_2.fq{self.fq_suffix}'
cmd_line = self.get_cmd_line(step, sample)
cmd = (
f'{cmd_line} '
f'--fq {fq} '
)
self.process_cmd(cmd, step, sample, m=self.args.starMem, x=self.args.thread)
def count_virus(self, sample):
step = 'count_virus'
barcode_file = f'{self.outdir_dic[sample]["count"]}/{sample}_cell_matrix_10X/barcodes.tsv'
virus_bam = f'{self.outdir_dic[sample]["star_virus"]}/{sample}_virus_Aligned.sortedByCoord.out.bam'
cmd_line = self.get_cmd_line(step, sample)
cmd = (
f'{cmd_line} '
f'--virus_bam {virus_bam} '
f'--barcode_file {barcode_file} '
)
self.process_cmd(cmd, step, sample, m=5, x=1)
def analysis_rna_virus(self, sample):
step = 'analysis_rna_virus'
virus_file = f'{self.outdir_dic[sample]["count_virus"]}/{sample}_virus_UMI_count.tsv'
matrix_file = f'{self.outdir_dic[sample]["count"]}/{sample}_matrix.tsv.gz'
cmd_line = self.get_cmd_line(step, sample)
cmd = (
f'{cmd_line} '
f'--virus_file {virus_file} '
f'--matrix_file {matrix_file} '
)
self.process_cmd(cmd, step, sample, m=15, x=1)
def main():
multi = Multi_rna_virus(__ASSAY__)
multi.run()
if __name__ == '__main__':
main()
|
py | b40181b59f6aea5b2b499f0e857ea6a975c3f1bc | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from enigma import eConsoleAppContainer
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.ScrollLabel import ScrollLabel
from Components.Sources.StaticText import StaticText
from Screens.MessageBox import MessageBox
from enigma import getDesktop
def getDesktopSize():
s = getDesktop(0).size()
return (s.width(), s.height())
def isHD():
desktopSize = getDesktopSize()
return desktopSize[0] == 1280
class Console2(Screen):
if isHD():
skin = '''<screen position="17,center" size="1245,681" title="Command execution..." backgroundColor="#16000000" flags="wfNoBorder">
<widget name="text" position="9,48" size="1237,587" backgroundColor="#16000000" foregroundColor="#00ffffff" font="Console;24"/>
<eLabel text="Command execution..." font="Regular;30" size="1000,40" position="8,3" foregroundColor="#00ffffff" backgroundColor="#16000000" zPosition="4"/>
<eLabel position="10,674" size="165,5" backgroundColor="#00ff2525" zPosition="1"/>
<eLabel position="238,674" size="165,5" backgroundColor="#00389416" zPosition="1"/>
<eLabel position="1068,674" size="165,5" backgroundColor="#000080ff" zPosition="1"/>
<eLabel text="Cancel" position="10,646" zPosition="2" size="165,30" font="Regular;24" halign="center" valign="center" backgroundColor="#16000000" foregroundColor="#00ffffff" transparent="1"/>
<eLabel text="Hide/Show" position="238,646" zPosition="2" size="165,30" font="Regular;24" halign="center" valign="center" backgroundColor="#16000000" foregroundColor="#00ffffff" transparent="1"/>
<eLabel text="Restart GUI" position="1068,646" zPosition="2" size="165,30" font="Regular;24" halign="center" valign="center" backgroundColor="#16000000" foregroundColor="#00ffffff" transparent="1"/>
</screen>'''
else:
skin = '''<screen position="center,center" size="1886,1051" title="Command execution..." backgroundColor="#16000000" flags="wfNoBorder">
<widget name="text" position="9,93" size="1868,897" backgroundColor="#16000000" foregroundColor="#00ffffff" font="Console;33"/>
<eLabel text="Command execution..." font="Regular;45" size="1163,80" position="8,3" foregroundColor="#00ffffff" backgroundColor="#16000000" zPosition="4"/>
<eLabel position="10,1043" size="250,5" backgroundColor="#00ff2525" zPosition="1"/>
<eLabel position="353,1043" size="250,5" backgroundColor="#00389416" zPosition="1"/>
<eLabel position="1626,1043" size="250,5" backgroundColor="#000080ff" zPosition="1"/>
<eLabel text="Cancel" position="10,1004" zPosition="2" size="250,40" font="Regular;28" halign="center" valign="center" backgroundColor="#16000000" foregroundColor="#00ffffff" transparent="1"/>
<eLabel text="Hide/Show" render="Label" position="353,1004" zPosition="2" size="250,40" font="Regular;28" halign="center" valign="center" backgroundColor="#16000000" foregroundColor="#00ffffff" transparent="1"/>
<eLabel text="Restart GUI" position="1626,1004" zPosition="2" size="250,40" font="Regular;28" halign="center" valign="center" backgroundColor="#16000000" foregroundColor="#00ffffff" transparent="1"/>
</screen>'''
def __init__(self, session, title = 'Console', cmdlist = None, finishedCallback = None, closeOnSuccess = False, showStartStopText = True, skin = None):
Screen.__init__(self, session)
self.finishedCallback = finishedCallback
self.closeOnSuccess = closeOnSuccess
self.showStartStopText = showStartStopText
if skin:
self.skinName = [skin, 'Console2']
self.errorOcurred = False
self['text'] = ScrollLabel('')
self['key_red'] = StaticText(_('Cancel'))
self['key_green'] = StaticText(_('Hide'))
self["actions"] = ActionMap(["WizardActions", "DirectionActions",'ColorActions'],
{
"ok": self.cancel,
"up": self["text"].pageUp,
"down": self["text"].pageDown,
"red": self.cancel,
"green": self.toggleHideShow,
"blue": self.restartenigma,
"exit": self.cancel,
}, -1)
self.cmdlist = isinstance(cmdlist, list) and cmdlist or [cmdlist]
self.newtitle = title == 'Console' and _('Console') or title
self.cancel_msg = None
self.onShown.append(self.updateTitle)
self.container = eConsoleAppContainer()
self.run = 0
self.finished = False
try: ## DreamOS By RAED
self.container.appClosed.append(self.runFinished)
self.container.dataAvail.append(self.dataAvail)
except:
self.container.appClosed_conn = self.container.appClosed.connect(self.runFinished)
self.container.dataAvail_conn = self.container.dataAvail.connect(self.dataAvail)
self.onLayoutFinish.append(self.startRun)
def updateTitle(self):
self.setTitle(self.newtitle)
def startRun(self):
if self.showStartStopText:
self['text'].setText(_('Execution progress:') + '\n\n')
print('[Console] executing in run', self.run, ' the command:', self.cmdlist[self.run])
if self.container.execute(self.cmdlist[self.run]):
self.runFinished(-1)
def runFinished(self, retval):
if retval:
self.errorOcurred = True
self.show()
self.run += 1
if self.run != len(self.cmdlist):
if self.container.execute(self.cmdlist[self.run]):
self.runFinished(-1)
else:
self.show()
self.finished = True
try:
lastpage = self['text'].isAtLastPage()
except:
lastpage = self['text']
if self.cancel_msg:
self.cancel_msg.close()
if self.showStartStopText:
self['text'].appendText(_('Execution finished!!'))
if self.finishedCallback is not None:
self.finishedCallback()
if not self.errorOcurred and self.closeOnSuccess:
self.closeConsole()
else:
self['text'].appendText(_('\nPress OK or Exit to abort!'))
self['key_red'].setText(_('Exit'))
self['key_green'].setText('')
def toggleHideShow(self):
if self.finished:
return
if self.shown:
self.hide()
else:
self.show()
def cancel(self):
if self.finished:
self.closeConsole()
else:
self.cancel_msg = self.session.openWithCallback(self.cancelCallback, MessageBox, _('Cancel execution?'), type=MessageBox.TYPE_YESNO, default=False)
def cancelCallback(self, ret = None):
self.cancel_msg = None
if ret:
try: ## DreamOS By RAED
self.container.appClosed.remove(self.runFinished)
self.container.dataAvail.remove(self.dataAvail)
except:
self.container.appClosed_conn = None
self.container.dataAvail_conn = None
self.container.kill()
self.close()
def closeConsole(self):
if self.finished:
try: ## DreamOS By RAED
self.container.appClosed.remove(self.runFinished)
self.container.dataAvail.remove(self.dataAvail)
except:
self.container.appClosed_conn = None
self.container.dataAvail_conn = None
self.close()
else:
self.show()
def dataAvail(self, str):
self['text'].appendText(str)
def restartenigma(self):
from Screens.Standby import TryQuitMainloop
self.session.open(TryQuitMainloop, 3) |
py | b4018270602c4db637fed916281e352a66709838 | from __future__ import print_function
import os
import sys
import argparse
import time
import math
import os.path as osp
import tensorboard_logger as tb_logger
import torch
import torch.backends.cudnn as cudnn
from torchvision import transforms, datasets
from glob import glob
from util import TwoCropTransform, AverageMeter
from util import adjust_learning_rate, warmup_learning_rate
from util import set_optimizer, save_model
from networks.resnet_big import SupConResNet
from losses import SupConLoss
from torch.utils.data import Dataset
from data import build
from data.datasets import init_dataset, ImageDataset
import numpy as np
import matplotlib.pyplot as plt
import math
import time
from scipy.spatial.distance import pdist
import torch
from torch.optim.optimizer import Optimizer, required
import re
from PIL import Image
from reid.evaluators import Evaluator
from collections import deque
import random
import os
try:
import apex
from apex import amp, optimizers
except ImportError:
pass
def parse_option():
parser = argparse.ArgumentParser('argument for training')
parser.add_argument('--print_freq', type=int, default=10,
help='print frequency')
parser.add_argument('--save_freq', type=int, default=50,
help='save frequency')
parser.add_argument('--batch_size', type=int, default=256,
help='batch_size')
parser.add_argument('--num_workers', type=int, default=16,
help='num of workers to use')
parser.add_argument('--epochs', type=int, default=100,
help='number of training epochs')
# optimization
parser.add_argument('--learning_rate', type=float, default=0.05,
help='learning rate')
parser.add_argument('--lr_decay_epochs', type=str, default='700,800,900',
help='where to decay lr, can be a list')
parser.add_argument('--lr_decay_rate', type=float, default=0.1,
help='decay rate for learning rate')
parser.add_argument('--weight_decay', type=float, default=1e-4,
help='weight decay')
parser.add_argument('--momentum', type=float, default=0.9,
help='momentum')
# model dataset
parser.add_argument('--model', type=str, default='resnet50')
parser.add_argument('--dataset', type=str, default='cifar10',
choices=['cifar10', 'cifar100', 'path'], help='dataset')
parser.add_argument('--mean', type=str, help='mean of dataset in path in form of str tuple')
parser.add_argument('--std', type=str, help='std of dataset in path in form of str tuple')
parser.add_argument('--data_folder', type=str, default=None, help='path to custom dataset')
parser.add_argument('--size', type=int, default=32, help='parameter for RandomResizedCrop')
# method
parser.add_argument('--method', type=str, default='SupCon',
choices=['SupCon', 'SimCLR'], help='choose method')
# temperature
parser.add_argument('--temp', type=float, default=0.05,
help='temperature for loss function')
# other setting
parser.add_argument('--cosine', action='store_true',
help='using cosine annealing')
parser.add_argument('--syncBN', action='store_true',
help='using synchronized batch normalization')
parser.add_argument('--warm', action='store_true',
help='warm-up for large batch training')
parser.add_argument('--trial', type=str, default='0',
help='id for recording multiple runs')
opt = parser.parse_args()
# check if dataset is path that passed required arguments
if opt.dataset == 'path':
assert opt.data_folder is not None \
and opt.mean is not None \
and opt.std is not None
# set the path according to the environment
if opt.data_folder is None:
opt.data_folder = './datasets/'
opt.model_path = './save/SupCon/{}_models'.format(opt.dataset)
opt.tb_path = './save/SupCon/{}_tensorboard'.format(opt.dataset)
iterations = opt.lr_decay_epochs.split(',')
opt.lr_decay_epochs = list([])
for it in iterations:
opt.lr_decay_epochs.append(int(it))
opt.model_name = '{}_{}_{}_lr_{}_decay_{}_bsz_{}_temp_{}_trial_{}'.\
format(opt.method, opt.dataset, opt.model, opt.learning_rate,
opt.weight_decay, opt.batch_size, opt.temp, opt.trial)
if opt.cosine:
opt.model_name = '{}_cosine'.format(opt.model_name)
# warm-up for large-batch training,
if opt.batch_size > 256:
opt.warm = True
if opt.warm:
opt.model_name = '{}_warm'.format(opt.model_name)
opt.warmup_from = 0.01
opt.warm_epochs = 10
if opt.cosine:
eta_min = opt.learning_rate * (opt.lr_decay_rate ** 3)
opt.warmup_to = eta_min + (opt.learning_rate - eta_min) * (
1 + math.cos(math.pi * opt.warm_epochs / opt.epochs)) / 2
else:
opt.warmup_to = opt.learning_rate
opt.tb_folder = os.path.join(opt.tb_path, opt.model_name)
if not os.path.isdir(opt.tb_folder):
os.makedirs(opt.tb_folder)
opt.save_folder = os.path.join(opt.model_path, opt.model_name)
if not os.path.isdir(opt.save_folder):
os.makedirs(opt.save_folder)
return opt
EETA_DEFAULT = 0.001
class LARS(Optimizer):
"""
Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You,
I. Gitman, and B. Ginsburg. (https://arxiv.org/abs/1708.03888)
"""
def __init__(
self,
params,
lr=required,
momentum=0.9,
use_nesterov=False,
weight_decay=0.0,
exclude_from_weight_decay=None,
exclude_from_layer_adaptation=None,
classic_momentum=True,
eeta=EETA_DEFAULT,
):
"""Constructs a LARSOptimizer.
Args:
lr: A `float` for learning rate.
momentum: A `float` for momentum.
use_nesterov: A 'Boolean' for whether to use nesterov momentum.
weight_decay: A `float` for weight decay.
exclude_from_weight_decay: A list of `string` for variable screening, if
any of the string appears in a variable's name, the variable will be
excluded for computing weight decay. For example, one could specify
the list like ['batch_normalization', 'bias'] to exclude BN and bias
from weight decay.
exclude_from_layer_adaptation: Similar to exclude_from_weight_decay, but
for layer adaptation. If it is None, it will be defaulted the same as
exclude_from_weight_decay.
classic_momentum: A `boolean` for whether to use classic (or popular)
momentum. The learning rate is applied during momeuntum update in
classic momentum, but after momentum for popular momentum.
eeta: A `float` for scaling of learning rate when computing trust ratio.
name: The name for the scope.
"""
self.epoch = 0
defaults = dict(
lr=lr,
momentum=momentum,
use_nesterov=use_nesterov,
weight_decay=weight_decay,
exclude_from_weight_decay=exclude_from_weight_decay,
exclude_from_layer_adaptation=exclude_from_layer_adaptation,
classic_momentum=classic_momentum,
eeta=eeta,
)
super(LARS, self).__init__(params, defaults)
self.lr = lr
self.momentum = momentum
self.weight_decay = weight_decay
self.use_nesterov = use_nesterov
self.classic_momentum = classic_momentum
self.eeta = eeta
self.exclude_from_weight_decay = exclude_from_weight_decay
# exclude_from_layer_adaptation is set to exclude_from_weight_decay if the
# arg is None.
if exclude_from_layer_adaptation:
self.exclude_from_layer_adaptation = exclude_from_layer_adaptation
else:
self.exclude_from_layer_adaptation = exclude_from_weight_decay
def step(self, epoch=None, closure=None):
loss = None
if closure is not None:
loss = closure()
if epoch is None:
epoch = self.epoch
self.epoch += 1
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
eeta = group["eeta"]
lr = group["lr"]
for p in group["params"]:
if p.grad is None:
continue
param = p.data
grad = p.grad.data
param_state = self.state[p]
# TODO: get param names
# if self._use_weight_decay(param_name):
grad += self.weight_decay * param
if self.classic_momentum:
trust_ratio = 1.0
# TODO: get param names
# if self._do_layer_adaptation(param_name):
w_norm = torch.norm(param)
g_norm = torch.norm(grad)
device = g_norm.get_device()
trust_ratio = torch.where(
w_norm.ge(0),
torch.where(
g_norm.ge(0),
(self.eeta * w_norm / g_norm),
torch.Tensor([1.0]).to(device),
),
torch.Tensor([1.0]).to(device),
).item()
scaled_lr = lr * trust_ratio
if "momentum_buffer" not in param_state:
next_v = param_state["momentum_buffer"] = torch.zeros_like(
p.data
)
else:
next_v = param_state["momentum_buffer"]
next_v.mul_(momentum).add_(scaled_lr, grad)
if self.use_nesterov:
update = (self.momentum * next_v) + (scaled_lr * grad)
else:
update = next_v
p.data.add_(-update)
else:
raise NotImplementedError
return loss
def _use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _do_layer_adaptation(self, param_name):
"""Whether to do layer-wise learning rate adaptation for `param_name`."""
if self.exclude_from_layer_adaptation:
for r in self.exclude_from_layer_adaptation:
if re.search(r, param_name) is not None:
return False
return True
def load_optimizer(model,batch_size):
scheduler = None
# optimized using LARS with linear learning rate scaling
# (i.e. LearningRate = 0.3 × BatchSize/256) and weight decay of 10−6.
learning_rate = 0.3 #* batch_size / 256
optimizer = LARS(
model.parameters(),
lr=learning_rate,
weight_decay=1.5e-6,
exclude_from_weight_decay=["batch_normalization", "bias"],
)
# "decay the learning rate with the cosine decay schedule without restarts"
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, 20, eta_min=0, last_epoch=-1)
return optimizer, scheduler
class Market(object):
def __init__(self, root):
self.images_dir = osp.join(root)
self.camstyle_path = 'bounding_box_train_camstyle'
self.camstyle = []
self.num_camstyle_ids = 0
self.load()
def preprocess(self, path, relabel=True):
pattern = re.compile(r'([-\d]+)_c(\d)')
all_pids = {}
ret = []
fpaths = sorted(glob(osp.join(self.images_dir, path, '*.jpg')))
for fpath in fpaths:
fname = osp.basename(fpath)
pid, cam = map(int, pattern.search(fname).groups())
if pid == -1: continue
if relabel:
if pid not in all_pids:
all_pids[pid] = len(all_pids)
else:
if pid not in all_pids:
all_pids[pid] = pid
pid = all_pids[pid]
cam -= 1
ret.append((fname, pid, cam))
return ret, int(len(all_pids))
def load(self):
self.camstyle, self.num_camstyle_ids = self.preprocess(self.camstyle_path)
print(" camstyle | {:5d} | {:8d}"
.format(self.num_camstyle_ids, len(self.camstyle)))
class Preprocessor(object):
def __init__(self, dataset, root=None, transform=None):
super(Preprocessor, self).__init__()
self.dataset = dataset
self.root = root
self.transform = transform
def __len__(self):
return len(self.dataset)
def __getitem__(self, indices):
if isinstance(indices, (tuple, list)):
return [self._get_single_item(index) for index in indices]
return self._get_single_item(indices)
def _get_single_item(self, index):
fname, pid, camid = self.dataset[index]
fpath = fname
if self.root is not None:
fpath = osp.join(self.root, fname)
img = Image.open(fpath).convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, fname, pid, camid
class RandomErasing(object):
def __init__(self, p=0.5, sl=0.02, sh=0.4, r1=0.3, r2=3):
self.p = p
self.sl = sl
self.sh = sh
self.r1 = r1
self.r2 = r2
def __call__(self, img):
if np.random.rand() > self.p:
return img
img = np.array(img)
while True:
img_h, img_w, img_c = img.shape
img_area = img_h * img_w
mask_area = np.random.uniform(self.sl, self.sh) * img_area
mask_aspect_ratio = np.random.uniform(self.r1, self.r2)
mask_w = int(np.sqrt(mask_area / mask_aspect_ratio))
mask_h = int(np.sqrt(mask_area * mask_aspect_ratio))
mask = np.random.rand(mask_h, mask_w, img_c) * 255
left = np.random.randint(0, img_w)
top = np.random.randint(0, img_h)
right = left + mask_w
bottom = top + mask_h
if right <= img_w and bottom <= img_h:
break
img[top:bottom, left:right, :] = mask
return Image.fromarray(img)
class RandomPatch(object):
"""Random patch data augmentation.
输入是 : hwc 0-255
和 随机擦除是一致差不多的, 都是像素块遮挡,区别在于,这个遮挡区域不是灰色块,是 图片btach ,随机的一个面积放进去的
There is a patch pool that stores randomly extracted pathces from person images.
For each input image, RandomPatch
1) extracts a random patch and stores the patch in the patch pool;
2) randomly selects a patch from the patch pool and pastes it on the
input (at random position) to simulate occlusion.
Reference:
- Zhou et al. Omni-Scale Feature Learning for Person Re-Identification. ICCV, 2019.
- Zhou et al. Learning Generalisable Omni-Scale Representations
for Person Re-Identification. arXiv preprint, 2019.
min_sample_size 和 batch 有关系
batch 64 min_sample_size=60 61张图片原来的样子, 3张处理后的图片
"""
def __init__(self, prob_happen=1, pool_capacity=50000, min_sample_size=5,
patch_min_area=0.01, patch_max_area=0.5, patch_min_ratio=0.1,
prob_rotate=0.5, prob_flip_leftright=0.5,
):
self.prob_happen = prob_happen
self.patch_min_area = patch_min_area
self.patch_max_area = patch_max_area
self.patch_min_ratio = patch_min_ratio
self.prob_rotate = prob_rotate
self.prob_flip_leftright = prob_flip_leftright
self.patchpool = deque(maxlen=pool_capacity)
self.min_sample_size = min_sample_size
def generate_wh(self, W, H):
area = W * H
for attempt in range(100):
target_area = random.uniform(self.patch_min_area, self.patch_max_area) * area
aspect_ratio = random.uniform(self.patch_min_ratio, 1. / self.patch_min_ratio)
h = int(round(math.sqrt(target_area * aspect_ratio)))
w = int(round(math.sqrt(target_area / aspect_ratio)))
if w < W and h < H:
return w, h
return None, None
def transform_patch(self, patch):
if random.uniform(0, 1) > self.prob_flip_leftright:
patch = patch.transpose(Image.FLIP_LEFT_RIGHT)
if random.uniform(0, 1) > self.prob_rotate:
patch = patch.rotate(random.randint(-10, 10))
return patch
def __call__(self, img):
W, H = img.size # original image size
# collect new patch
w, h = self.generate_wh(W, H)
if w is not None and h is not None:
x1 = random.randint(0, W - w)
y1 = random.randint(0, H - h)
new_patch = img.crop((x1, y1, x1 + w, y1 + h)) #剪切一部分图片
self.patchpool.append(new_patch)
#print("**************************")
if len(self.patchpool) < self.min_sample_size:
#print(len(self.patchpool))
# print(np.self.patchpool)
#print(self.min_sample_size)
return img
if random.uniform(0, 1) > self.prob_happen:
return img
# paste a randomly selected patch on a random position
patch = random.sample(self.patchpool, 1)[0]
patchW, patchH = patch.size
x1 = random.randint(0, W - patchW)
y1 = random.randint(0, H - patchH)
patch = self.transform_patch(patch)
img.paste(patch, (x1, y1))
return img
def set_loader(opt):
# construct data loader
'''if opt.dataset == 'cifar10':
mean = (0.485,0.456,0.406)
std = (0.229,0.224,0.225)
elif opt.dataset == 'cifar100':
mean = (0.5071, 0.4867, 0.4408)
std = (0.2675, 0.2565, 0.2761)
elif opt.dataset == 'path':
mean = eval(opt.mean)
std = eval(opt.mean)
else:
raise ValueError('dataset not supported: {}'.format(opt.dataset))'''
#加载camstyle数据集
#root = 'C:\\Users\\DELL\\Desktop\\SupContrast-master\\data\\market1501'
#CamStyle_dataset = Market(root)
mean = (0.485,0.456,0.406)
std = (0.229,0.224,0.225)
size = (256,128)
normalize = transforms.Normalize(mean=mean, std=std)
train_transform = transforms.Compose([
#transforms.Resize(size=(256,256)), #先调整至fakeimg 的size,(统一尺寸)方便进行RandomPatch
#RandomPatch(), #随机补丁
transforms.RandomResizedCrop(size=size), #随机裁剪
transforms.RandomHorizontalFlip(), #随机水平翻转
transforms.RandomRotation(180), #随机旋转
#transforms.Resize(size=size), #resize
transforms.RandomGrayscale(p=0.2), #将图像以一定的概率转换为灰度图像
RandomErasing(), #随机擦除
transforms.ToTensor(),
normalize,
])
source_transform = transforms.Compose([
transforms.Resize(size=size),
transforms.ToTensor(),
normalize,
])
NAMES = 'market1501'
DIR = os.getcwd()
ROOT_DIR = DIR+'\\data'
dataset = init_dataset(NAMES, root=ROOT_DIR)
#加载Camstyle图像在ImageDataset中
train_dataset = ImageDataset(dataset.train,TwoCropTransform(train_transform,source_transform))
'''if opt.dataset == 'cifar10':
train_dataset = datasets.CIFAR10(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'cifar100':
train_dataset = datasets.CIFAR100(root=opt.data_folder,
transform=TwoCropTransform(train_transform),
download=True)
elif opt.dataset == 'path':
train_dataset = datasets.ImageFolder(root=opt.data_folder,
transform=TwoCropTransform(train_transform))
else:
raise ValueError(opt.dataset)'''
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.batch_size, shuffle=(train_sampler is None),
num_workers=opt.num_workers, pin_memory=True, sampler=train_sampler)
query_loader = torch.utils.data.DataLoader(
Preprocessor(dataset.query,
root=osp.join(dataset.dataset_dir, dataset.query_dir), transform=source_transform),
batch_size=opt.batch_size, num_workers=opt.num_workers,
shuffle=False, pin_memory=True)
gallery_loader = torch.utils.data.DataLoader(
Preprocessor(dataset.gallery,
root=osp.join(dataset.dataset_dir, dataset.gallery_dir), transform=source_transform),
batch_size=opt.batch_size, num_workers=opt.num_workers,
shuffle=False, pin_memory=True)
return train_loader,query_loader,gallery_loader,dataset
def set_model(opt):
#model
model = SupConResNet(name=opt.model)
#loss
criterion = SupConLoss(temperature=opt.temp)
if torch.cuda.is_available():
if torch.cuda.device_count() > 1:
model.encoder = torch.nn.DataParallel(model.encoder)
model = model.cuda()
criterion = criterion.cuda()
cudnn.benchmark = True
return model, criterion
def train(train_loader, model, criterion, optimizer, epoch, opt):
"""one epoch training"""
model.train()
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
end = time.time()
#序号;images三张,数据增强两张,原图一张;真实标签(未使用);;
for idx,(images, labels,camera_id,image_path) in enumerate(train_loader):
data_time.update(time.time() - end)
#数据增强 2*bs 张
images_1 = torch.cat([images[0], images[1]], dim=0)
if torch.cuda.is_available():
images_1 = images_1.cuda(non_blocking=True)
labels = labels.cuda(non_blocking=True)
bsz = labels.shape[0]
# warm-up learning rate
if epoch <= 2:
warmup_learning_rate(opt, epoch, idx, len(train_loader), optimizer)
# compute loss
features = model(images_1)
f1, f2 = torch.split(features, [bsz, bsz], dim=0)
features = torch.cat([f1.unsqueeze(1), f2.unsqueeze(1)], dim=1)
#原始图像,聚类算法生成label
real_image = images[2]
if torch.cuda.is_available():
real_image = real_image.cuda(non_blocking=True)
features_realimage = model(real_image)
features_realimage = features_realimage.cpu()
features_realimage = features_realimage.detach().numpy()
features_realimage = np.mat(features_realimage).transpose()
clusters, clusterNum = dbscan(features_realimage, 0.75, 1)
labels = torch.Tensor(clusters)
#测试loss
'''features = torch.Tensor([[[1,2],[4,3]],[[1,1],[2,2]]])
features.cuda()
labels = torch.Tensor([1,2])
labels.cuda()'''
if clusterNum != bsz:
print(clusterNum)
if opt.method == 'SupCon':
loss = criterion(features, labels)
elif opt.method == 'SimCLR':
loss = criterion(features)
else:
raise ValueError('contrastive method not supported: {}'.
format(opt.method))
# update metric
losses.update(loss.item(), bsz)
#
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print info
if (idx + 1) % opt.print_freq == 0:
print('Train: [{0}][{1}/{2}]\t'
'BT {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'DT {data_time.val:.3f} ({data_time.avg:.3f})\t'
'loss {loss.val:.3f} ({loss.avg:.3f})'.format(
epoch, idx + 1, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
sys.stdout.flush()
return losses.avg
UNCLASSIFIED = False
NOISE = 0
def dist(a, b):
#v1 = math.sqrt(np.power(a - b, 2).sum())
#return v1
up=np.double(np.bitwise_and((a != b),np.bitwise_or(a != 0, b != 0)).sum())
down=np.double(np.bitwise_or(a != 0, b != 0).sum())
d1=(up/down)
return d1
#X=np.vstack([a,b])
#d2=pdist(X,'jaccard') # 算出来的就是jaccard距离,需要计算jaccard系数的话就需要1-d2
#return d2
def eps_neighbor(a, b, eps):
return dist(a, b) < eps
def region_query(data, pointId, eps):
nPoints = data.shape[1]
seeds = []
for i in range(nPoints):
if eps_neighbor(data[:, pointId], data[:, i], eps):
seeds.append(i)
return seeds
def expand_cluster(data, clusterResult, pointId, clusterId, eps, minPts):
seeds = region_query(data, pointId, eps)
if len(seeds) < minPts: # 不满足minPts条件的为噪声点
clusterResult[pointId] = NOISE
return False
else:
clusterResult[pointId] = clusterId # 划分到该簇
for seedId in seeds:
clusterResult[seedId] = clusterId
while len(seeds) > 0: # 持续扩张
currentPoint = seeds[0]
queryResults = region_query(data, currentPoint, eps)
if len(queryResults) >= minPts:
for i in range(len(queryResults)):
resultPoint = queryResults[i]
if clusterResult[resultPoint] == UNCLASSIFIED:
seeds.append(resultPoint)
clusterResult[resultPoint] = clusterId
elif clusterResult[resultPoint] == NOISE:
clusterResult[resultPoint] = clusterId
seeds = seeds[1:]
return True
def dbscan(data, eps, minPts):
clusterId = 1
nPoints = data.shape[1]
clusterResult = [UNCLASSIFIED] * nPoints
for pointId in range(nPoints):
point=data[:, pointId]
if clusterResult[pointId] == UNCLASSIFIED:
if expand_cluster(data, clusterResult, pointId, clusterId, eps, minPts):
clusterId = clusterId + 1
return clusterResult, clusterId - 1
def main():
#初始化配置
opt = parse_option()
#加载数据集
train_loader,query_loader,gallery_loader,dataset = set_loader(opt)
#构建模型,loss函数, ResNet50 加载imagenet预训练权重
model, criterion = set_model(opt)
#构建优化器
#optimizer = set_optimizer(opt, model)
optimizer, scheduler = load_optimizer(model,opt.batch_size)
# tensorboard
logger = tb_logger.Logger(logdir=opt.tb_folder, flush_secs=2)
#evaluator = Evaluator(model)
#evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, 2048, True)
# training routine
for epoch in range(1, opt.epochs + 1):
#adjust_learning_rate(opt, optimizer, epoch)
# train for one epoch
time1 = time.time()
#对每一个eproch聚类 费时长
'''for idx in train_loader:
labels = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]'''
'''for inx in train_loader.dataset:
inx[1] = 0'''
'''for idx,(images, labels,camera_id,image_path) in enumerate(train_loader):
labels = torch.Tensor([int(0),int(0),int(0),int(0),int(0),int(0),int(0),int(0)])
print(idx)'''
'''features = []
for (image1,image2,image),label,camera_id,path in train_loader.dataset:
v1 = torch.unsqueeze(image,0)
v1 = v1.cuda(non_blocking=True)
feature = model(v1)
feature = feature.cpu()
feature = feature.detach().numpy()
feature = feature[0]
features.append(feature)
#if len(features) == 100:
#break
#print(len(features))
features = np.mat(features).transpose()
clusters, clusterNum = dbscan(features, 0.75, 1)'''
loss = train(train_loader, model, criterion, optimizer, epoch, opt)
#更新学习率
scheduler.step()
time2 = time.time()
print('epoch {}, total time {:.2f}'.format(epoch, time2 - time1))
# tensorboard logger
logger.log_value('loss', loss, epoch)
logger.log_value('learning_rate', optimizer.param_groups[0]['lr'], epoch)
if epoch == 1 or epoch % 10 == 0:
evaluator = Evaluator(model)
with torch.no_grad():
evaluator.evaluate(query_loader, gallery_loader, dataset.query, dataset.gallery, 2048, True)
if epoch % opt.save_freq == 0:
save_file = os.path.join(
opt.save_folder, 'ckpt_epoch_{epoch}.pth'.format(epoch=epoch))
save_model(model, optimizer, opt, epoch, save_file)
# save the last model
save_file = os.path.join(
opt.save_folder, 'last.pth')
save_model(model, optimizer, opt, opt.epochs, save_file)
if __name__ == '__main__':
main()
|
py | b40182fb6d3df2d31072176b6ddbf2402c966960 | #
# Autogenerated by Frugal Compiler (3.14.2)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
import sys
import traceback
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from thrift.Thrift import TType
from tornado import gen
from frugal.exceptions import TApplicationExceptionType
from frugal.middleware import Method
from frugal.subscription import FSubscription
from frugal.transport import TMemoryOutputBuffer
from .ttypes import *
class EventsSubscriber(object):
"""
This docstring gets added to the generated code because it has
the @ sign. Prefix specifies topic prefix tokens, which can be static or
variable.
"""
_DELIMITER = '.'
def __init__(self, provider, middleware=None):
"""
Create a new EventsSubscriber.
Args:
provider: FScopeProvider
middleware: ServiceMiddleware or list of ServiceMiddleware
"""
middleware = middleware or []
if middleware and not isinstance(middleware, list):
middleware = [middleware]
middleware += provider.get_middleware()
self._middleware = middleware
self._provider = provider
@gen.coroutine
def subscribe_EventCreated(self, user, EventCreated_handler):
"""
This is a docstring.
Args:
user: string
EventCreated_handler: function which takes FContext and Event
"""
op = 'EventCreated'
prefix = 'foo.{}.'.format(user)
topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op)
transport, protocol_factory = self._provider.new_subscriber()
yield transport.subscribe(topic, self._recv_EventCreated(protocol_factory, op, EventCreated_handler))
raise gen.Return(FSubscription(topic, transport))
def _recv_EventCreated(self, protocol_factory, op, handler):
method = Method(handler, self._middleware)
def callback(transport):
iprot = protocol_factory.get_protocol(transport)
ctx = iprot.read_request_headers()
mname, _, _ = iprot.readMessageBegin()
if mname != op:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
raise TApplicationException(TApplicationExceptionType.UNKNOWN_METHOD)
req = Event()
req.read(iprot)
iprot.readMessageEnd()
try:
method([ctx, req])
except:
traceback.print_exc()
sys.exit(1)
return callback
@gen.coroutine
def subscribe_SomeInt(self, user, SomeInt_handler):
"""
Args:
user: string
SomeInt_handler: function which takes FContext and i64
"""
op = 'SomeInt'
prefix = 'foo.{}.'.format(user)
topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op)
transport, protocol_factory = self._provider.new_subscriber()
yield transport.subscribe(topic, self._recv_SomeInt(protocol_factory, op, SomeInt_handler))
raise gen.Return(FSubscription(topic, transport))
def _recv_SomeInt(self, protocol_factory, op, handler):
method = Method(handler, self._middleware)
def callback(transport):
iprot = protocol_factory.get_protocol(transport)
ctx = iprot.read_request_headers()
mname, _, _ = iprot.readMessageBegin()
if mname != op:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
raise TApplicationException(TApplicationExceptionType.UNKNOWN_METHOD)
req = iprot.readI64()
iprot.readMessageEnd()
try:
method([ctx, req])
except:
traceback.print_exc()
sys.exit(1)
return callback
@gen.coroutine
def subscribe_SomeStr(self, user, SomeStr_handler):
"""
Args:
user: string
SomeStr_handler: function which takes FContext and string
"""
op = 'SomeStr'
prefix = 'foo.{}.'.format(user)
topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op)
transport, protocol_factory = self._provider.new_subscriber()
yield transport.subscribe(topic, self._recv_SomeStr(protocol_factory, op, SomeStr_handler))
raise gen.Return(FSubscription(topic, transport))
def _recv_SomeStr(self, protocol_factory, op, handler):
method = Method(handler, self._middleware)
def callback(transport):
iprot = protocol_factory.get_protocol(transport)
ctx = iprot.read_request_headers()
mname, _, _ = iprot.readMessageBegin()
if mname != op:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
raise TApplicationException(TApplicationExceptionType.UNKNOWN_METHOD)
req = iprot.readString()
iprot.readMessageEnd()
try:
method([ctx, req])
except:
traceback.print_exc()
sys.exit(1)
return callback
@gen.coroutine
def subscribe_SomeList(self, user, SomeList_handler):
"""
Args:
user: string
SomeList_handler: function which takes FContext and list<map<id,Event>>
"""
op = 'SomeList'
prefix = 'foo.{}.'.format(user)
topic = '{}Events{}{}'.format(prefix, self._DELIMITER, op)
transport, protocol_factory = self._provider.new_subscriber()
yield transport.subscribe(topic, self._recv_SomeList(protocol_factory, op, SomeList_handler))
raise gen.Return(FSubscription(topic, transport))
def _recv_SomeList(self, protocol_factory, op, handler):
method = Method(handler, self._middleware)
def callback(transport):
iprot = protocol_factory.get_protocol(transport)
ctx = iprot.read_request_headers()
mname, _, _ = iprot.readMessageBegin()
if mname != op:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
raise TApplicationException(TApplicationExceptionType.UNKNOWN_METHOD)
req = []
(_, elem73) = iprot.readListBegin()
for _ in range(elem73):
elem74 = {}
(_, _, elem75) = iprot.readMapBegin()
for _ in range(elem75):
elem77 = iprot.readI64()
elem76 = Event()
elem76.read(iprot)
elem74[elem77] = elem76
iprot.readMapEnd()
req.append(elem74)
iprot.readListEnd()
iprot.readMessageEnd()
try:
method([ctx, req])
except:
traceback.print_exc()
sys.exit(1)
return callback
|
py | b4018353e1788228301d8c41f7247106b8170010 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Model to parse manifest files."""
from lxml import etree
from lxml import objectify
class MavenPom:
"""Model for Maven POM (Project Object Model)."""
def __init__(self, document=None):
"""Initialize constructor for MavenPom class.
:document: Parse the content of the file.
:returns: None
"""
if not document:
raise ValueError("No content is provided for parsing")
self.document = document.strip()
if not isinstance(self.document, (bytes, bytearray)):
self.document = self.document.encode()
self.root = objectify.fromstring(self.document)
# create a dependencies element if doesn't exist
if getattr(self.root, 'dependencies', None) is None:
_prev = getattr(self.root, 'dependencyManagement', None)\
or getattr(self.root, 'properties', None)\
or getattr(self.root, 'name', None)
if _prev is not None:
_prev.addnext(objectify.Element('dependencies'))
else:
self.root.dependencies = objectify.ObjectifiedElement()
self.root = self._reload(self.root)
self.dependency_set = set([self.Dependency(d) for d in getattr(
self.root.dependencies, 'dependency', [])])
def __getitem__(self, key):
"""Return the value for attr key."""
attr = getattr(self.root, key, None)
objectify.deannotate(self.root)
return attr
def __setitem__(self, key, value):
"""Set value for attr key."""
_prev = getattr(self.root, 'modelVersion', None)
if key in ('groupId', 'artifactId', 'name', 'version', 'packaging') and _prev is not None:
# add these tags just after modelVersion tag.
element = etree.Element(key)
element.text = value
_prev.addnext(element)
else:
setattr(self.root, key, value)
objectify.deannotate(self.root)
self._reload(self.root)
def add_element(self, data={}, parent=None, next_to=None):
"""Add element to POM.
data: dict
parent: etree.Element or string
return: None
"""
_prev = None
if next_to is not None:
if isinstance(next_to, (str, bytes)):
_prev = getattr(self.root, next_to, None)
else:
_prev = next_to
if isinstance(parent, (str, bytes)):
if _prev is not None:
parent = etree.Element(parent)
_prev.addnext(parent)
else:
parent = etree.SubElement(self.root, parent)
if isinstance(data, dict):
for key, value in data.items():
self.add_element(value, etree.SubElement(parent, key))
elif isinstance(data, (tuple, list)):
for value in data:
self.add_element(value, parent)
elif isinstance(data, (bytes, bytearray)):
parent._setText(data.decode())
else:
parent._setText(data)
def add_dependency(self, dependency):
"""Add dependency to POM.
dependency: dict
return: None
"""
self.dependency_set.add(self.Dependency(dependency))
def add_dependencies(self, dependencies):
"""Add dependency to POM.
dependencies: list
return: None
"""
self.dependency_set.update({self.Dependency(dep)
for dep in dependencies})
def remove_dependency(self, dependency):
"""Remove dependency to POM.
dependency: dict
return: None
"""
self.dependency_set.remove(self.Dependency(dependency))
def __contains__(self, dependency):
"""Check for dependency exists or not.
dependency: dict
return: bool
"""
return self.Dependency(dependency) in self.dependency_set
def get_dependencies(self):
"""Return list of all the dependencies.
return: generator
"""
for dep in self.dependency_set:
yield dep
def _commit(self):
"""Commit the changes to the XML root object."""
for dep in self.dependency_set:
self.root.dependencies.append(MavenPom.to_objectify(dep))
self.root = self._reload(self.root)
@staticmethod
def tostring(obj, decoding=False):
"""Convert the xml object into string.
:returns: String
"""
if getattr(obj, '_commit', None) is not None:
obj._commit()
objectify.deannotate(obj.root, xsi_nil=True,
pytype=False, xsi=False, cleanup_namespaces=True)
_str = etree.tostring(obj.root, pretty_print=True)
if decoding:
return _str.decode()
return _str
@staticmethod
def to_objectify(obj):
"""Convert the object into ObjectifiedElement.
:returns: ObjectifiedElement
"""
return obj.root
@staticmethod
def _reload(obj):
obj = objectify.fromstring(etree.tostring(obj))
objectify.deannotate(obj, xsi_nil=True, cleanup_namespaces=True)
return obj
class Dependency:
"""Dependency class of outer class MavenPom."""
def __init__(self, dependency=None):
"""Initialize constructor for Dependency class.
:returns: None
"""
self.Exclusion = MavenPom.Exclusion
if dependency is not None:
if not isinstance(dependency, objectify.ObjectifiedElement):
self.root = objectify.Element('dependency')
else:
self.root = dependency
for k, v in dependency.items():
if k == 'exclusions' and len(v) > 0:
self.root.exclusions = objectify.ObjectifiedElement()
for excl in v:
self.root.exclusions.append(
MavenPom.to_objectify(self.Exclusion(excl)))
else:
setattr(self.root, k, v)
def __repr__(self):
"""Representation of an Dependency object in string."""
return "groupId: {}\nartifactId: {}"\
.format(self.root.groupId, self.root.artifactId)
def __eq__(self, other):
"""Check equality of dependency object.
other: Dependency
Return: boolean
"""
return (self.root.groupId, self.root.artifactId) ==\
(other.root.groupId, other.root.artifactId)
def __ne__(self, other):
"""Check non-equality of Dependency object.
other: Dependency
Return: boolean
"""
return not self.__eq__(other)
def __getitem__(self, key):
"""Return the value for attr key."""
attr = getattr(self.root, key, None)
objectify.deannotate(self.root)
return attr
def __setitem__(self, key, value):
"""Set value for attr key."""
attr = setattr(self.root, key, value)
objectify.deannotate(self.root)
return attr
def __hash__(self):
"""Return hash for String representation of an Dependency object."""
return hash(self.__repr__())
class Exclusion:
"""Exclusion class of outer class MavenPom."""
def __init__(self, exclusion=None):
"""Initialize constructor for Exclusion class.
:returns: None
"""
if exclusion is not None:
if not isinstance(exclusion, objectify.ObjectifiedElement):
self.root = objectify.Element('exclusion')
else:
self.root = exclusion
for k, v in exclusion.items():
setattr(self.root, k, v)
def __eq__(self, other):
"""Check equality of Exclusion object.
other: Exclusion
Return: boolean
"""
return (self.root.groupId, self.root.artifactId) ==\
(other.root.groupId, other.root.artifactId)
def __ne__(self, other):
"""Check non-equality of Exclusion object.
other: Exclusion
Return: boolean
"""
return not self.__eq__(other)
def __getitem__(self, key):
"""Return the value for attr key."""
return getattr(self.root, key, None)
def __setitem__(self, key, value):
"""Set value for attr key."""
return setattr(self.root, key, value)
class Properties:
"""Properties class of outer class MavenPom."""
pass
class Plugin:
"""Plugin class of outer class MavenPom."""
pass
class PypiRequirements:
"""Model for pip requirements.txt."""
def __init__(self):
"""Initialize constructor for PypiRequirements class.
:returns: None
"""
raise NotImplementedError
class NpmPackage:
"""Model for NPM package.json."""
def __init__(self):
"""Initialize constructor for NpmPackage class.
:returns: None
"""
raise NotImplementedError
|
py | b40183fc20d65ff719df87448de51c40b9627bc8 | from project.rooms.room import Room
class AloneOld(Room):
room_cost = 10
def __init__(self, name: str, pension: float):
super().__init__(name, pension, 1)
self.room_cost = 10
|
py | b40184b2918e54fde17b97401932878f8feb6e56 | # coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tf_agents.bandits.agents.linalg."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tf_agents.bandits.policies import linalg
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import # TF internal
tfd = tfp.distributions
tf.compat.v1.enable_v2_behavior()
def test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_batch1_contextdim10',
'batch_size': 1,
'context_dim': 10,
}, {
'testcase_name': '_batch4_contextdim5',
'batch_size': 4,
'context_dim': 5,
})
class LinalgTest(tf.test.TestCase, parameterized.TestCase):
@test_cases()
def testAInvUpdate(self, batch_size, context_dim):
a_array = 2 * np.eye(context_dim) + np.array(
range(context_dim * context_dim)).reshape((context_dim, context_dim))
a_array = a_array + a_array.T
a_inv_array = np.linalg.inv(a_array)
x_array = np.array(range(batch_size * context_dim)).reshape(
(batch_size, context_dim))
expected_a_inv_updated_array = np.linalg.inv(
a_array + np.matmul(np.transpose(x_array), x_array))
a_inv = tf.constant(
a_inv_array, dtype=tf.float32, shape=[context_dim, context_dim])
x = tf.constant(x_array, dtype=tf.float32, shape=[batch_size, context_dim])
a_inv_update = linalg.update_inverse(a_inv, x)
self.assertAllClose(expected_a_inv_updated_array,
self.evaluate(a_inv + a_inv_update))
@test_cases()
def testAInvUpdateEmptyObservations(self, batch_size, context_dim):
a_array = 2 * np.eye(context_dim) + np.array(
range(context_dim * context_dim)).reshape((context_dim, context_dim))
a_array = a_array + a_array.T
a_inv_array = np.linalg.inv(a_array)
expected_a_inv_update_array = np.zeros([context_dim, context_dim],
dtype=np.float32)
a_inv = tf.constant(
a_inv_array, dtype=tf.float32, shape=[context_dim, context_dim])
x = tf.constant([], dtype=tf.float32, shape=[0, context_dim])
a_inv_update = linalg.update_inverse(a_inv, x)
self.assertAllClose(expected_a_inv_update_array,
self.evaluate(a_inv_update))
def cg_test_cases():
return parameterized.named_parameters(
{
'testcase_name': '_n_1',
'n': 1,
'rhs': 1,
}, {
'testcase_name': '_n_10',
'n': 10,
'rhs': 1,
}, {
'testcase_name': '_n_100',
'n': 100,
'rhs': 5,
})
@test_util.run_all_in_graph_and_eager_modes
class ConjugateGradientTest(tf.test.TestCase, parameterized.TestCase):
@cg_test_cases()
def testConjugateGradientBasic(self, n, rhs):
x_obs = tf.constant(np.random.rand(n, 2), dtype=tf.float32, shape=[n, 2])
a_mat = tf.eye(n) + tf.matmul(x_obs, tf.linalg.matrix_transpose(x_obs))
x_exact = tf.constant(np.random.rand(n), dtype=tf.float32, shape=[n, 1])
b = tf.matmul(a_mat, x_exact)
x_approx = self.evaluate(linalg.conjugate_gradient(a_mat, b))
x_exact_numpy = self.evaluate(x_exact)
self.assertAllClose(x_exact_numpy, x_approx, rtol=1e-4, atol=1e-4)
@cg_test_cases()
def testConjugateGradientMultipleRHS(self, n, rhs):
x_obs = tf.constant(np.random.rand(n, 2), dtype=tf.float32, shape=[n, 2])
a_mat = tf.eye(n) + tf.matmul(x_obs, tf.linalg.matrix_transpose(x_obs))
x_exact = tf.constant(
np.random.rand(n, rhs), dtype=tf.float32, shape=[n, rhs])
b_mat = tf.matmul(a_mat, x_exact)
x_approx = self.evaluate(
linalg.conjugate_gradient_solve(a_mat, b_mat))
x_exact_numpy = self.evaluate(x_exact)
self.assertAllClose(x_exact_numpy, x_approx, rtol=1e-4, atol=1e-4)
@cg_test_cases()
def testConjugateGradientMultipleRHSPlaceholders(self, n, rhs):
# Test the case where a_mat and b_mat are placeholders and they have unknown
# dimension values.
if tf.executing_eagerly():
return
x_obs = tf.constant(np.random.rand(n, 2), dtype=tf.float32, shape=[n, 2])
a_mat = tf.eye(n) + tf.matmul(x_obs, tf.linalg.matrix_transpose(x_obs))
a_mat_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, None))
a_mat_value = self.evaluate(a_mat)
x_exact = tf.constant(
np.random.rand(n, rhs), dtype=tf.float32, shape=[n, rhs])
b_mat = tf.matmul(a_mat, x_exact)
b_mat_ph = tf.compat.v1.placeholder(tf.float32, shape=(None, None))
b_mat_value = self.evaluate(b_mat)
x_exact_numpy = self.evaluate(x_exact)
with self.cached_session() as sess:
x_approx = linalg.conjugate_gradient_solve(a_mat_ph, b_mat_ph)
x_approx_value = sess.run(
x_approx,
feed_dict={a_mat_ph: a_mat_value, b_mat_ph: b_mat_value})
self.assertAllClose(x_exact_numpy, x_approx_value, rtol=1e-4, atol=1e-4)
if __name__ == '__main__':
tf.test.main()
|
py | b40184e45225ede484fcf432ef353d1d69682dfd | import uvicorn
from fastapi import FastAPI
from mealie.core.config import APP_VERSION, settings
from mealie.core.root_logger import get_logger
from mealie.routes import backup_routes, debug_routes, migration_routes, theme_routes, utility_routes
from mealie.routes.about import about_router
from mealie.routes.groups import groups_router
from mealie.routes.mealplans import meal_plan_router
from mealie.routes.media import media_router
from mealie.routes.recipe import recipe_router
from mealie.routes.shopping_list import shopping_list_router
from mealie.routes.site_settings import settings_router
from mealie.routes.users import user_router
from mealie.services.events import create_general_event
logger = get_logger()
app = FastAPI(
title="Mealie",
description="A place for all your recipes",
version=APP_VERSION,
docs_url=settings.DOCS_URL,
redoc_url=settings.REDOC_URL,
)
def start_scheduler():
import mealie.services.scheduler.scheduled_jobs # noqa: F401
def api_routers():
# Authentication
app.include_router(user_router)
app.include_router(groups_router)
app.include_router(shopping_list_router)
# Recipes
app.include_router(recipe_router)
app.include_router(media_router)
app.include_router(about_router)
# Meal Routes
app.include_router(meal_plan_router)
# Settings Routes
app.include_router(settings_router)
app.include_router(theme_routes.public_router)
app.include_router(theme_routes.user_router)
# Backups/Imports Routes
app.include_router(backup_routes.router)
# Migration Routes
app.include_router(migration_routes.router)
# Debug routes
app.include_router(debug_routes.public_router)
app.include_router(debug_routes.admin_router)
# Utility routes
app.include_router(utility_routes.router)
api_routers()
@app.on_event("startup")
def system_startup():
start_scheduler()
logger.info("-----SYSTEM STARTUP----- \n")
logger.info("------APP SETTINGS------")
logger.info(
settings.json(
indent=4,
exclude={
"SECRET",
"DEFAULT_PASSWORD",
"SFTP_PASSWORD",
"SFTP_USERNAME",
"DB_URL", # replace by DB_URL_PUBLIC for logs
"POSTGRES_USER",
"POSTGRES_PASSWORD",
},
)
)
create_general_event("Application Startup", f"Mealie API started on port {settings.API_PORT}")
def main():
uvicorn.run(
"app:app",
host="0.0.0.0",
port=settings.API_PORT,
reload=True,
reload_dirs=["mealie"],
debug=True,
log_level="info",
log_config=None,
workers=1,
forwarded_allow_ips="*",
)
if __name__ == "__main__":
main()
|
py | b40184fc6b4d6a9cff9e0b819e4bad1344f9e8af | # Copyright (c) Facebook, Inc. All Rights Reserved
# -*- coding: utf-8 -*- #
"""*********************************************************************************************"""
# FileName [ upstream/wav2vec/expert.py ]
# Synopsis [ the wav2vec wrapper ]
# Author [ S3PRL ]
# Copyright [ Copyleft(c), Speech Lab, NTU, Taiwan ]
"""*********************************************************************************************"""
import argparse
from packaging import version
import torch
from torch.nn.utils.rnn import pad_sequence
import fairseq
from fairseq.models.wav2vec import Wav2VecModel
from ..interfaces import UpstreamBase
SAMPLE_RATE = 16000
EXAMPLE_SEC = 5
class UpstreamExpert(UpstreamBase):
"""
The wav2vec wrapper
"""
def __init__(self, ckpt, **kwargs):
super().__init__(**kwargs)
if version.parse(fairseq.__version__) > version.parse("0.10.2"):
cp = torch.load(ckpt, map_location=None if torch.cuda.is_available() else torch.device("cpu"))
args = cp["args"]
base_wav2vec_architecture(args)
self.model = Wav2VecModel.build_model(args, task=None)
self.model.load_state_dict(cp["model"])
elif version.parse(fairseq.__version__) == version.parse("0.10.2"):
cp = torch.load(ckpt)
self.model = Wav2VecModel.build_model(cp["args"], task=None)
self.model.load_state_dict(cp["model"])
else:
raise NotImplementedError
if len(self.hooks) == 0:
self.add_hook(
"self.model.feature_extractor",
lambda input, output: output.transpose(1, 2),
)
self.add_hook(
"self.model.feature_aggregator",
lambda input, output: output.transpose(1, 2),
)
module_name = "self.model.feature_aggregator.conv_layers"
for conv_id in range(len(eval(module_name)) - 1):
self.add_hook(
f"{module_name}[{conv_id + 1}]",
lambda input, output: input[0].transpose(1, 2),
)
def get_downsample_rates(self, key: str) -> int:
return 160
def forward(self, wavs):
"""
Code snippet modified from fairseq
"""
result = {}
padded_wav = pad_sequence(wavs, batch_first=True)
features = self.model.feature_extractor(padded_wav)
result["z"] = features.transpose(1, 2).contiguous()
if self.model.vector_quantizer:
q_res = self.model.vector_quantizer(features, produce_targets=True)
result["codewords"] = q_res["x"].transpose(1, 2).contiguous()
result["codeids"] = q_res["targets"]
features = q_res["x"]
x = self.model.dropout_feats(features)
x = self.model.feature_aggregator(x)
result["c"] = x.transpose(1, 2).contiguous()
result["default"] = result["c"]
# The keys "hidden_states" and "last_hidden_state" are handled by UpstreamBase's hooks
return result
def base_wav2vec_architecture(args):
conv_feature_layers = "[(512, 10, 5)]"
conv_feature_layers += " + [(512, 8, 4)]"
conv_feature_layers += " + [(512, 4, 2)] * 3"
args.conv_feature_layers = getattr(args, "conv_feature_layers", conv_feature_layers)
args.conv_aggregator_layers = getattr(
args, "conv_aggregator_layers", "[(512, 3, 1)] * 9"
)
args.prediction_steps = getattr(args, "prediction_steps", 12)
args.num_negatives = getattr(args, "num_negatives", 1)
args.sample_distance = getattr(args, "sample_distance", None)
args.cross_sample_negatives = getattr(args, "cross_sample_negatives", 0)
args.dropout = getattr(args, "dropout", 0.0)
args.dropout_features = getattr(args, "dropout_features", 0.0)
args.dropout_agg = getattr(args, "dropout_agg", 0.0)
args.encoder = getattr(args, "encoder", "cnn")
args.aggregator = getattr(args, "aggregator", "cnn")
args.skip_connections_feat = getattr(args, "skip_connections_feat", False)
args.skip_connections_agg = getattr(args, "skip_connections_agg", False)
args.residual_scale = getattr(args, "residual_scale", 0.5)
args.gru_dim = getattr(args, "gru_dim", 512)
args.no_conv_bias = getattr(args, "no_conv_bias", False)
args.agg_zero_pad = getattr(args, "agg_zero_pad", False)
args.log_compression = getattr(args, "log_compression", False)
args.balanced_classes = getattr(args, "balanced_classes", False)
args.infonce = getattr(args, "infonce", False)
args.project_features = getattr(args, "project_features", "none")
args.non_affine_group_norm = getattr(args, "non_affine_group_norm", False)
args.offset = getattr(args, "offset", "auto")
args.activation = getattr(args, "activation", "relu")
args.vq_type = getattr(args, "vq_type", "none")
args.vq_vars = getattr(args, "vq_vars", 320)
args.vq_groups = getattr(args, "vq_groups", 2)
args.vq_dim = getattr(args, "vq_dim", 0)
args.vq_depth = getattr(args, "vq_depth", 1)
args.combine_groups = getattr(args, "combine_groups", False)
args.vq_temp = getattr(args, "vq_temp", "(2.0, 0.5, 0.999995)")
args.vq_gamma = getattr(args, "vq_gamma", 0.25)
|
py | b401850c0a5c1b9a22cd0cff16a63d0d1b31f51d | import numpy as np
import torch
import torch.nn.functional as F
import os
from skimage import io
import shutil
def save_proxies(cfg, filename, proxies, label_map):
try:
os.mkdir('../proxies/model_{}'.format(cfg.dataset))
except:
pass
try:
os.mkdir('{}'.format('../proxies'+cfg.resume))
except:
pass
data = {'proxies': proxies, 'label_map': label_map}
torch.save(data,'../proxies'+cfg.resume+'/{}.pth'.format(filename))
def l2_norm(input):
input_size = input.size()
buffer = torch.pow(input, 2)
normp = torch.sum(buffer, 1).add_(1e-12)
norm = torch.sqrt(normp)
_output = torch.div(input, norm.view(-1, 1).expand_as(input))
output = _output.view(input_size)
return output
def calc_recall_at_k(T, Y, k):
"""
T : [nb_samples] (target labels)
Y : [nb_samples x k] (k predicted labels/neighbours)
"""
# print('T.shape',T.shape,'Y.shape',T.shape)
s = 0
for t,y in zip(T,Y):
if t in torch.Tensor(y).long()[:k]:
s += 1
return s / (1. * len(T))
def predict_batchwise(model, dataloader, device):
model_is_training = model.training
model.eval()
ds = dataloader.dataset
A = [[] for i in range(len(ds[0]))]
with torch.no_grad():
# extract batches (A becomes list of samples)
for batch_id, batch in enumerate(dataloader):
for i, J in enumerate(batch):
# i = 0: sz_batch * images
# i = 1: sz_batch * labels
# i = 2: sz_batch * indices
if i == 0:
# move images to device of model (approximate device)
J = J.to(device)
J = model(J) #.cuda())
for j in J:
A[i].append(j)
model.train()
model.train(model_is_training) # revert to previous training state
return [torch.stack(A[i]) for i in range(len(A)) if i!=2]
def proxy_init_calc(model, dataloader):
nb_classes = dataloader.dataset.nb_classes()
X, T, *_ = predict_batchwise(model, dataloader)
proxy_mean = torch.stack([X[T==class_idx].mean(0) for class_idx in range(nb_classes)])
return proxy_mean
def evaluate_cos(model, dataloader, nearest_neighbours, device):
# calculate embeddings with model and get targets
X, T = predict_batchwise(model, dataloader, device)
X = l2_norm(X)
# get predictions by assigning nearest 8 neighbors with cosine
K = nearest_neighbours
Y = []
cos_sim = F.linear(X, X)
Y = T[cos_sim.topk(1 + K)[1][:,1:]]
Y = Y.float().cpu()
recall = []
for k in [1, 2, 4, 8, 16, 32]:
r_at_k = calc_recall_at_k(T, Y, k)
recall.append(r_at_k)
# print("R@{} : {:.3f}".format(k, 100 * r_at_k))
return recall
# def generate_candidate_proxies(dl_cand):
def save_debug_images(cfg, models_dir, dl, mode, range_ = 1):
try:
shutil.rmtree('{}/{}'.format(cfg.debug_images,models_dir.split('/')[-1]))
except:
pass
try:
os.mkdir(cfg.debug_images)
except:
pass
try:
os.mkdir('{}/{}'.format(cfg.debug_images,models_dir.split('/')[-1]))
# print('{}/{}'.format(cfg.debug_images,models_dir.split('/')[-1]))
except:
pass
try:
os.mkdir('{}/{}/{}'.format(cfg.debug_images,models_dir.split('/')[-1],mode))
# print('{}/{}/{}'.format(cfg.debug_images,models_dir.split('/')[-1],mode))
except:
pass
for batch_idx, (x, y, y_str) in enumerate(dl):
if batch_idx>range_:
break
for idx in range(10):
io.imsave('{}/{}/{}/batchid@{}_id@{}_label@{}.png'.format(cfg.debug_images,
models_dir.split('/')[-1],
mode,batch_idx,
idx,y_str[idx]),
(x[idx].permute(1,2,0).numpy()*255).astype(np.uint8)) |
py | b40185268e0a445adfb14d3394e88107d6997757 | print('Tabuada!')
n = 7
for a in range(1, 11):
print(a, 'x', n, '=', a*n)
print('FIM') |
py | b401857a16bb4408e0c8f7988a5ee9fddb199e1c | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayTradeBuyerCreditQueryModel(object):
def __init__(self):
self._buyer_credit_source = None
self._buyer_user_id = None
self._credit_scene = None
self._merchant_credit_source = None
self._merchant_user_id = None
@property
def buyer_credit_source(self):
return self._buyer_credit_source
@buyer_credit_source.setter
def buyer_credit_source(self, value):
self._buyer_credit_source = value
@property
def buyer_user_id(self):
return self._buyer_user_id
@buyer_user_id.setter
def buyer_user_id(self, value):
self._buyer_user_id = value
@property
def credit_scene(self):
return self._credit_scene
@credit_scene.setter
def credit_scene(self, value):
self._credit_scene = value
@property
def merchant_credit_source(self):
return self._merchant_credit_source
@merchant_credit_source.setter
def merchant_credit_source(self, value):
self._merchant_credit_source = value
@property
def merchant_user_id(self):
return self._merchant_user_id
@merchant_user_id.setter
def merchant_user_id(self, value):
self._merchant_user_id = value
def to_alipay_dict(self):
params = dict()
if self.buyer_credit_source:
if hasattr(self.buyer_credit_source, 'to_alipay_dict'):
params['buyer_credit_source'] = self.buyer_credit_source.to_alipay_dict()
else:
params['buyer_credit_source'] = self.buyer_credit_source
if self.buyer_user_id:
if hasattr(self.buyer_user_id, 'to_alipay_dict'):
params['buyer_user_id'] = self.buyer_user_id.to_alipay_dict()
else:
params['buyer_user_id'] = self.buyer_user_id
if self.credit_scene:
if hasattr(self.credit_scene, 'to_alipay_dict'):
params['credit_scene'] = self.credit_scene.to_alipay_dict()
else:
params['credit_scene'] = self.credit_scene
if self.merchant_credit_source:
if hasattr(self.merchant_credit_source, 'to_alipay_dict'):
params['merchant_credit_source'] = self.merchant_credit_source.to_alipay_dict()
else:
params['merchant_credit_source'] = self.merchant_credit_source
if self.merchant_user_id:
if hasattr(self.merchant_user_id, 'to_alipay_dict'):
params['merchant_user_id'] = self.merchant_user_id.to_alipay_dict()
else:
params['merchant_user_id'] = self.merchant_user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayTradeBuyerCreditQueryModel()
if 'buyer_credit_source' in d:
o.buyer_credit_source = d['buyer_credit_source']
if 'buyer_user_id' in d:
o.buyer_user_id = d['buyer_user_id']
if 'credit_scene' in d:
o.credit_scene = d['credit_scene']
if 'merchant_credit_source' in d:
o.merchant_credit_source = d['merchant_credit_source']
if 'merchant_user_id' in d:
o.merchant_user_id = d['merchant_user_id']
return o
|
py | b40185d4e1f19f58de817517f8dfefd462280547 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 功能测试-表空间
Case Name : 索引变更表空间,使表与索引不在一个表空间,变更表空间后索引功能正常
Description :
1、创建tablespace1指定相对路径为location1;创建tablespace2指定相对路径为location2
2、在tablespace1上创建表及索引;
3、在tablespace1上创建的表插入数据;
4、查询表对应的表空间及表文件物理位置;
5、查询索引对应的表空间及索引文件物理位置;
6、查询数据;
7、变更索引的表空间为tablespace2;
8、在tablespace1上创建的表插入数据;
9、查询表对应的表空间及表文件物理位置;
10、查询索引对应的表空间及索引文件物理位置;
11、查询数据;
Expect :
1、创建tablespace1指定相对路径为location1;创建tablespace2指定相对路径为location2 创建成功
2、在tablespace1上创建表及索引; 创建成功
3、在tablespace1上创建的表插入数据; 插入成功
4、查询表对应的表空间及表文件物理位置; 查询结果正确
5、查询索引对应的表空间及索引文件物理位置; 查询结果正确
6、查询数据; 正常使用索引
7、变更索引的表空间为tablespace2; 变更成功
8、在tablespace1上创建的表插入数据; 插入成功
9、查询表对应的表空间及表文件物理位置; 查询结果不变
10、查询索引对应的表空间及索引文件物理位置; 表空间变更
11、查询数据; 正常使用索引
History :
"""
import os
import unittest
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
from yat.test import Node
from yat.test import macro
class Tablespace(unittest.TestCase):
def setUp(self):
self.log = Logger()
self.log.info(f'-----{os.path.basename(__file__)} start-----')
self.sh = CommonSH('PrimaryDbUser')
self.pri_root = Node(node='PrimaryRoot')
self.constant = Constant()
self.tbspc_name1 = 'tsp_tbspc0036_1'
self.tbspc_location1 = 'tbspc0036_1'
self.tbspc_name2 = 'tsp_tbspc0036_2'
self.tbspc_location2 = 'tbspc0036_2'
self.table_name = 't_tbspc0036'
self.index_name = 'idx_tbspc0036'
self.create_sql = f"drop table if exists {self.table_name};" \
f"create table {self.table_name} (id int,name varchar(100)) " \
f"tablespace {self.tbspc_name1};" \
f"create index {self.index_name} on {self.table_name}(id) " \
f"tablespace {self.tbspc_name1};"
self.insert_sql = f"insert into {self.table_name} " \
f"select generate_series(1, 100000)," \
f"'name-'||generate_series(1, 100000);" \
f"analyze {self.table_name};"
self.select_sql = f"set enable_indexscan=on;" \
f"set enable_bitmapscan=off;" \
f"explain select * from {self.table_name} " \
f"where id=80000;"
def test_main(self):
step_txt = '----step1:创建tablespace1指定相对路径为location1;' \
'创建tablespace2指定相对路径为location2 expect:创建成功----'
self.log.info(step_txt)
create_sql = f"drop tablespace if exists {self.tbspc_name1}; " \
f"create tablespace {self.tbspc_name1} " \
f"relative location '{self.tbspc_location1}' ;" \
f"drop tablespace if exists {self.tbspc_name2}; " \
f"create tablespace {self.tbspc_name2} " \
f"relative location '{self.tbspc_location2}' ;"
create_result = self.sh.execut_db_sql(create_sql)
self.log.info(create_result)
assert_flag = create_result.splitlines().count(
self.constant.TABLESPCE_CREATE_SUCCESS)
self.assertEqual(assert_flag, 2, "执行失败" + step_txt)
self.log.info('--查询tablespace1 oid--')
select_sql = f"select oid from pg_tablespace where " \
f"spcname = '{self.tbspc_name1}';"
tbspc1_oid = self.sh.execut_db_sql(select_sql).splitlines()[
-2].strip()
self.log.info(tbspc1_oid)
self.log.info('--查询tablespace2 oid--')
select_sql = f"select oid from pg_tablespace where " \
f"spcname = '{self.tbspc_name2}';"
tbspc2_oid = self.sh.execut_db_sql(select_sql).splitlines()[
-2].strip()
self.log.info(tbspc2_oid)
step_txt = '----step2:在tablespace1上创建表及索引; expect:创建成功----'
self.log.info(step_txt)
create_result = self.sh.execut_db_sql(self.create_sql)
self.log.info(create_result)
self.assertIn(self.constant.CREATE_INDEX_SUCCESS_MSG, create_result,
"执行失败" + step_txt)
step_txt = '----step3:在tablespace1上创建的表插入数据; expect:插入成功----'
self.log.info(step_txt)
insert_result = self.sh.execut_db_sql(self.insert_sql)
self.log.info(insert_result)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, insert_result,
"执行失败" + step_txt)
self.assertIn(self.constant.ANALYZE_SUCCESS_MSG, insert_result,
"执行失败" + step_txt)
step_txt = '----step4:查询表对应的表空间及表文件物理位置; expect:查询结果正确----'
self.log.info(step_txt)
self.log.info('--查询pg_class系统表中表对应的tablespace--')
table_info_sql = f"select oid,reltablespace from pg_class where " \
f"relname = '{self.table_name}';"
tmp_result = self.sh.execut_db_sql(table_info_sql).splitlines()[
-2].split('|')
self.log.info(tmp_result)
tb_tbspc_oid = tmp_result[1].strip()
self.assertEqual(tb_tbspc_oid, tbspc1_oid, "执行失败" + step_txt)
self.log.info('--查询表对应的tablespace位置--')
check_tb_1 = self.check_ob_tbspc(self.table_name)
self.log.info('--表文件所在的路径为tablespace1相对路径--')
self.assertIn(self.tbspc_location1, check_tb_1[0],
"执行失败" + step_txt)
step_txt = '----step5:查询索引对应的表空间及索引文件物理位置; expect:查询结果正确----'
self.log.info(step_txt)
self.log.info('--查询pg_class系统表中索引对应的tablespace--')
index_info_sql = f"select oid,reltablespace from pg_class where " \
f"relname = '{self.index_name}';"
tmp_result = self.sh.execut_db_sql(index_info_sql).splitlines()[
-2].split('|')
self.log.info(tmp_result)
idx_tbspc_oid = tmp_result[1].strip()
self.assertEqual(idx_tbspc_oid, tbspc1_oid, "执行失败" + step_txt)
self.log.info('--查询索引对应的tablespace位置--')
check_idx_1 = self.check_ob_tbspc(self.index_name)
self.log.info('--索引文件所在的路径为tablespace1相对路径--')
self.assertIn(self.tbspc_location1, check_idx_1[0],
"执行失败" + step_txt)
step_txt = '----step6:查询数据; expect:正常使用索引----'
self.log.info(step_txt)
select_result = self.sh.execut_db_sql(self.select_sql)
self.log.info(select_result)
self.assertIn('Index Scan using', select_result, "执行失败" + step_txt)
step_txt = '----step7:变更索引的表空间为tablespace2; expect:变更成功----'
self.log.info(step_txt)
alter_sql = f"alter index {self.index_name} " \
f"set tablespace {self.tbspc_name2};"
alter_result = self.sh.execut_db_sql(alter_sql)
self.log.info(alter_result)
self.assertIn(self.constant.ALTER_INDEX_SUCCESS_MSG, alter_result,
"执行失败" + step_txt)
step_txt = '----step8:在tablespace1上创建的表插入数据; expect:插入成功----'
self.log.info(step_txt)
insert_result = self.sh.execut_db_sql(self.insert_sql)
self.log.info(insert_result)
self.assertIn(self.constant.INSERT_SUCCESS_MSG, insert_result,
"执行失败" + step_txt)
self.assertIn(self.constant.ANALYZE_SUCCESS_MSG, insert_result,
"执行失败" + step_txt)
step_txt = '----step9:查询表对应的表空间及表文件物理位置; expect:查询结果不变----'
self.log.info(step_txt)
self.log.info('--查询pg_class系统表中表对应的tablespace--')
tmp_result = self.sh.execut_db_sql(table_info_sql).splitlines()[
-2].split('|')
self.log.info(tmp_result)
tb_tbspc_oid = tmp_result[1].strip()
self.assertEqual(tb_tbspc_oid, tbspc1_oid, "执行失败" + step_txt)
self.log.info('--查询表对应的tablespace位置--')
check_tb_1 = self.check_ob_tbspc(self.table_name)
self.log.info('--表文件所在的路径为tablespace1相对路径--')
self.assertIn(self.tbspc_location1, check_tb_1[0],
"执行失败" + step_txt)
step_txt = '----step10:查询索引对应的表空间及索引文件物理位置; expect:表空间变更----'
self.log.info(step_txt)
self.log.info('--查询pg_class系统表中索引对应的tablespace--')
tmp_result = self.sh.execut_db_sql(index_info_sql).splitlines()[
-2].split('|')
self.log.info(tmp_result)
idx_tbspc_oid = tmp_result[1].strip()
self.assertEqual(idx_tbspc_oid, tbspc2_oid, "执行失败" + step_txt)
self.log.info('--查询索引对应的tablespace位置--')
check_idx_1 = self.check_ob_tbspc(self.index_name)
self.log.info('--索引文件所在的路径为tablespace2相对路径--')
self.assertIn(self.tbspc_location2, check_idx_1[0],
"执行失败" + step_txt)
step_txt = '----step11:查询数据; expect:正常使用索引----'
self.log.info(step_txt)
select_result = self.sh.execut_db_sql(self.select_sql)
self.log.info(select_result)
self.assertIn('Index Scan using', select_result, "执行失败" + step_txt)
def check_ob_tbspc(self, object_name):
"""
:param object_name: 数据库对象名称,例如表名、索引名
:return: 数据库对象文件实际位置及占用空间
"""
location_sql = f"select pg_relation_filepath(" \
f"(select oid from pg_class where relname = '{object_name}')" \
f"::regclass);"
t_link = self.sh.execut_db_sql(location_sql).splitlines()[-2].strip()
self.log.info('数据库对象文件链接路径:' + t_link)
t_link_dir = os.path.dirname(
os.path.join(macro.DB_INSTANCE_PATH, t_link))
t_file_name = os.path.basename(t_link)
ls_cmd = f'cd $(readlink -f {t_link_dir}) && ' \
f'pwd && ' \
f'ls -al . && ' \
f'du -b {t_file_name}'
self.log.info(ls_cmd)
ls_result = self.pri_root.sh(ls_cmd).result()
self.log.info(ls_result)
self.log.info('--数据库对象文件所在的路径--')
file_location = ls_result.splitlines()[0].strip()
self.log.info(file_location)
rel_file = os.path.join(file_location, t_file_name)
self.log.info(rel_file)
self.log.info('--数据库对象文件所占的大小--')
file_size = ls_result.splitlines()[-1].split()[0].strip()
self.log.info(file_size)
return rel_file, file_size
def tearDown(self):
self.log.info('----this is teardown----')
step1_txt = '----清理表空间及用户; expect:成功----'
self.log.info(step1_txt)
clean_sql = f"drop table if exists {self.table_name};" \
f"drop tablespace if exists {self.tbspc_name1}; " \
f"drop tablespace if exists {self.tbspc_name2};"
clean_result = self.sh.execut_db_sql(clean_sql)
self.log.info(clean_result)
self.log.info(f'-----{os.path.basename(__file__)} end-----')
drop_tbspc = clean_result.count(self.constant.TABLESPCE_DROP_SUCCESS)
self.assertEqual(2, drop_tbspc, "执行失败" + step1_txt)
|
py | b4018955ab0af28af03aa74284bf38fbef5f30d8 | import os
import time
import webbrowser
import tkinter as tk
from tkinter import *
from itertools import permutations
#contain full program in a function for GUI
def WordScrape(userLet):
#define counter and empty lists
filterWords = []
finalList = []
comboPerms = []
allPerms = []
#function to convert list to string
def convert(list):
s = [str(i) for i in list]
res = str(", ".join(s))
return res
#removes duplicates from list
def noRepeats(x):
return list(dict.fromkeys(x))
#function to remove 2 and 1 letter words
def removeShortPerms(wordss):
return [i for i in wordss if len(i) >= 3]
#interpret user input
userLen = len(userLet)
#open words doc and split words into list
with open("everyword.rtf") as f:
words = f.read().split()
words = [x.lower() for x in words]
wordsLen = len(words)
#find all permutations of user input
perms = [''.join(p) for p in permutations(userLet)]
permsLen = len(perms)
print("\nLoading permutations...")
#print("All permutations: ", perms)
#filter out words longer than the user-submitted letters(or too short)
for z in range(0, wordsLen):
if 3 <= len(words[z]) <= userLen:
filterWords.append(words[z])
filterLen = len(filterWords)
#create smaller permutations
def shorterPerms(num):
newPerms = []
for y in range(0, permsLen):
miniPerm = perms[y]
miniPerm = miniPerm[num : : ]
newPerms.append(miniPerm)
return newPerms
#add all perm lists depending on number of letters
if 1 <= userLen <= 3:
allPerms = perms
if 4 <= userLen <= 9:
for i in range(1, userLen):
comboPerms+= list(dict.fromkeys(shorterPerms(i)))
allPerms = perms + comboPerms
allPerms = list(dict.fromkeys(allPerms))
allPerms = removeShortPerms(allPerms)
allPermsLen = len(allPerms)
#find match with perms from word doc
print("Finding matches...\n")
for x in range(0, allPermsLen):
for y in range(0, filterLen):
if allPerms[x] == filterWords[y]:
finalList.append(allPerms[x])
outputList = noRepeats(finalList)
return outputList
window = Tk()
window.configure(background='#EEE')
window.title("WordScrape")
window.geometry('325x350')
window.resizable(width=False, height=False)
n = 14
fonty = "Courier"
def submitbtn():
wordys = WordScrape(txt.get())
lbl1.configure(text=wordys, wraplength=280, justify=LEFT)
def clearbtn():
txt.delete(0, END)
txt.insert(0, "")
lbl1.configure(text="")
def tagbtn():
webbrowser.open("http://sahasramesh.com")
#center tkinter window
window.eval('tk::PlaceWindow %s center' % window.winfo_toplevel())
fr = Frame(window, bg='#EEE')
fr.grid(column=0, row=0, padx=(10, 0), pady=(10, 10), sticky=W)
#(0,0) letter prompt text
lbl = Label(fr, text="Enter Letters:", bg='#EEE', font=(fonty, n))
lbl.pack(side=LEFT)
#(1,0) source folder text box
txt = Entry(fr, width=20, bg='#EEE', font=(fonty, n))
txt.pack(side=LEFT)
#(1,1) output label
lbl1 = Label(window, text="", bg='#EEE', font=(fonty, n))
lbl1.grid(column=0, row=1, padx=(10,0), sticky=W)
fr1 = Frame(window, bg='#EEE')
fr1.grid(column=0, row=2, padx=(10, 0), pady=(10, 0), sticky=W)
#(0,2) submit button
btn = Button(fr1, text="Submit", fg="#FF4500", font=(fonty, n), command=submitbtn)
btn.pack(side=LEFT)
lbl2 = Label(fr1, text=" ", bg='#EEE', font=(fonty, n))
lbl2.pack(side=LEFT)
btn1 = Button(fr1, text="Clear", font=(fonty, n), command=clearbtn)
btn1.pack(side=LEFT)
fr2 = Frame(window, bg='#EEE')
fr2.grid(column=0, row=3, padx=(10, 0), pady=(10, 0), sticky=W)
tag = Label(fr2, text="An original project by", fg='#737373', bg='#EEE', font=(fonty, 10))
tag.pack(side=LEFT)
btn2 = Button(fr2, text="Sahas Ramesh", fg="#FF4500", bd=0, activebackground='#EEE', highlightbackground='#EEE', highlightcolor='#EEE', highlightthickness=0, font=(fonty, 10), command=tagbtn)
btn2.pack(side=LEFT)
window.mainloop()
'''
pyinstaller --onefile --windowed --add-binary='/System/Library/Frameworks/Tk.framework/Tk':'tk' --add-binary='/System/Library/Frameworks/Tcl.framework/Tcl':'tcl' word_finder.py
'''
|
py | b401895867a07f3c01422242f20a2c9d20be02b5 | import pytest
from udb_py.common import *
from udb_py.index.udb_hash_multivalued_index import UdbHashMultivaluedIndex
class UdbHashMultivaluedIndexTest(UdbHashMultivaluedIndex):
@property
def index(self):
return self._hash
def test_should_delete():
i = UdbHashMultivaluedIndexTest(['a', 'b', 'c'])
i.insert('123', 123).insert('123', 123).insert('123', 333).delete('123', 123)
assert i.index.get('123', 123) == {333}
def test_should_insert():
i = UdbHashMultivaluedIndexTest(['a', 'b', 'c'])
i.insert('123', 123).insert('123', 123).insert('123', 333)
assert i.index.get('123') == {123, 333}
def test_should_insert_by_schema():
i = UdbHashMultivaluedIndexTest(['a', 'b', 'c'])
i.insert_by_schema({'a': 1, 'b': 2, 'c': 3}, 123)
assert i.index.get(''.join(type_formatter_iter([1, 2, 3]))) == {123}
def test_should_insert_by_schema_with_default_value():
i = UdbHashMultivaluedIndexTest((('a', required), ('b', 1), ('c', required)))
i.insert_by_schema({'a': 1, 'c': 3}, 123)
assert i.index.get(''.join(type_formatter_iter([1, 1, 3]))) == {123}
def test_should_insert_by_schema_with_default_value_as_callable():
i = UdbHashMultivaluedIndexTest((('a', required), ('b', lambda key, values: 1), ('c', required)))
i.insert_by_schema({'a': 1, 'c': 3}, 123)
assert i.index.get(''.join(type_formatter_iter([1, 1, 3]))) == {123}
def test_should_upsert():
i = UdbHashMultivaluedIndexTest(['a', 'b', 'c'])
i.insert('123', 123).insert('123', 123).insert('123', 111).upsert('123', '321', 123)
assert i.index.get('321') == {123}
def test_should_upsert_deleting_old_key():
i = UdbHashMultivaluedIndexTest(['a', 'b', 'c'])
i.insert('123', 123).insert('123', 123).insert('123', 111).upsert('123', '321', 123)
assert i.index.get('123') == {111}
def test_should_search_by_key():
i = UdbHashMultivaluedIndexTest(['a', 'b', 'c'])
i.insert('123', 123).insert('123', 123).insert('123', 333).insert('321', 321).insert('111', 111).insert('333', 333)
assert list(i.search_by_key('123')) == [123, 333]
def test_should_search_by_key_in():
i = UdbHashMultivaluedIndexTest(['a', 'b', 'c'])
i.insert('123', 123).insert('123', 123).insert('123', 333).insert('321', 321).insert('111', 111).insert('333', 333)
assert list(i.search_by_key_in(['123', '111'])) == [123, 333, 111]
|
py | b4018b297b3a3078782581c15902fab98916cf3d | from direct.directnotify import DirectNotifyGlobal
from direct.fsm import StateData
import CogHQLoader
from toontown.toonbase import ToontownGlobals
from direct.gui import DirectGui
from toontown.toonbase import TTLocalizer
from toontown.toon import Toon
from direct.fsm import State
from toontown.coghq import BossbotHQExterior
from toontown.coghq import BossbotHQBossBattle
from toontown.coghq import BossbotOfficeExterior
from toontown.coghq import CountryClubInterior
from panda3d.core import DecalEffect, TextEncoder
import random
aspectSF = 0.7227
class BossbotCogHQLoader(CogHQLoader.CogHQLoader):
notify = DirectNotifyGlobal.directNotify.newCategory('BossbotCogHQLoader')
def __init__(self, hood, parentFSMState, doneEvent):
CogHQLoader.CogHQLoader.__init__(self, hood, parentFSMState, doneEvent)
self.fsm.addState(State.State('countryClubInterior', self.enterCountryClubInterior, self.exitCountryClubInterior, ['quietZone', 'cogHQExterior']))
self.fsm.addState(State.State('golfcourse', self.enterGolfCourse, self.exitGolfCourse, ['quietZone', 'cogHQExterior']))
for stateName in ['start', 'cogHQExterior', 'quietZone']:
state = self.fsm.getStateNamed(stateName)
state.addTransition('countryClubInterior')
state.addTransition('golfcourse')
self.musicFile = random.choice(['phase_12/audio/bgm/Bossbot_Entry_v1.ogg', 'phase_12/audio/bgm/Bossbot_Entry_v2.ogg', 'phase_12/audio/bgm/Bossbot_Entry_v3.ogg'])
self.cogHQExteriorModelPath = 'phase_14/models/neighborhoods/CogGolfCourtyard'
self.cogHQLobbyModelPath = 'phase_12/models/bossbotHQ/CogGolfCourtyard'
self.geom = None
return
def load(self, zoneId):
CogHQLoader.CogHQLoader.load(self, zoneId)
Toon.loadBossbotHQAnims()
def unloadPlaceGeom(self):
if self.geom:
self.geom.removeNode()
self.geom = None
self.stopCollisionDetection()
CogHQLoader.CogHQLoader.unloadPlaceGeom(self)
return
def loadPlaceGeom(self, zoneId):
self.notify.info('loadPlaceGeom: %s' % zoneId)
zoneId = zoneId - zoneId % 100
self.notify.debug('zoneId = %d ToontownGlobals.BossbotHQ=%d' % (zoneId, ToontownGlobals.BossbotHQ))
if zoneId == ToontownGlobals.BossbotHQ:
self.geom = loader.loadModel(self.cogHQExteriorModelPath)
self.geom.find('**/ground').setBin('ground', -10)
self.post = loader.loadModel('phase_6/models/golf/golf_construction_sign')
self.post.reparentTo(self.geom.find('**/sign_post'))
gzLinkTunnel = self.geom.find('**/LinkTunnel1')
gzLinkTunnel.setName('linktunnel_gz_17000_DNARoot')
self.makeSigns()
top = self.geom.find('**/TunnelEntrance')
origin = top.find('**/tunnel_origin')
origin.setH(-33.33)
elif zoneId == ToontownGlobals.BossbotLobby:
if base.config.GetBool('want-qa-regression', 0):
self.notify.info('QA-REGRESSION: COGHQ: Visit BossbotLobby')
self.notify.debug('cogHQLobbyModelPath = %s' % self.cogHQLobbyModelPath)
self.geom = loader.loadModel(self.cogHQLobbyModelPath)
else:
self.notify.warning('loadPlaceGeom: unclassified zone %s' % zoneId)
self.startCollisionDetection()
CogHQLoader.CogHQLoader.loadPlaceGeom(self, zoneId)
def makeSigns(self):
def makeSign(topStr, signStr, textId, scale=TTLocalizer.BCHQLsignText):
top = self.geom.find('**/' + topStr)
sign = top.find('**/' + signStr)
locator = top.find('**/sign_origin')
signText = DirectGui.OnscreenText(text=TextEncoder.upper(TTLocalizer.GlobalStreetNames[textId][(-1)]), font=ToontownGlobals.getSuitFont(), scale=scale, fg=(0,
0,
0,
1), parent=sign)
signText.setPosHpr(locator, 0, -0.1, -0.25, 0, 0, 0)
signText.setDepthWrite(0)
makeSign('Gate_1', 'Sign_3', 10400)
makeSign('Gate_2', 'Sign_6', 10700)
makeSign('TunnelEntrance', 'Sign_2', 1000)
makeSign('Gate_3', 'Sign_3', 10600)
makeSign('Gate_4', 'Sign_4', 10500)
makeSign('GateHouse', 'Sign_5', 10200)
makeSign('Gate_5', 'Sign_3', 10800, scale=0.87)
def unload(self):
CogHQLoader.CogHQLoader.unload(self)
Toon.unloadSellbotHQAnims()
def enterStageInterior(self, requestStatus):
self.placeClass = StageInterior.StageInterior
self.stageId = requestStatus['stageId']
self.enterPlace(requestStatus)
def exitStageInterior(self):
self.exitPlace()
self.placeClass = None
return
def getExteriorPlaceClass(self):
self.notify.debug('getExteriorPlaceClass')
return BossbotHQExterior.BossbotHQExterior
def getBossPlaceClass(self):
self.notify.debug('getBossPlaceClass')
return BossbotHQBossBattle.BossbotHQBossBattle
def enterFactoryExterior(self, requestStatus):
self.placeClass = BossbotOfficeExterior.BossbotOfficeExterior
self.enterPlace(requestStatus)
def exitFactoryExterior(self):
taskMgr.remove('titleText')
self.hood.hideTitleText()
self.exitPlace()
self.placeClass = None
return
def enterCogHQBossBattle(self, requestStatus):
self.notify.debug('BossbotCogHQLoader.enterCogHQBossBattle')
CogHQLoader.CogHQLoader.enterCogHQBossBattle(self, requestStatus)
base.cr.forbidCheesyEffects(1)
def exitCogHQBossBattle(self):
self.notify.debug('BossbotCogHQLoader.exitCogHQBossBattle')
CogHQLoader.CogHQLoader.exitCogHQBossBattle(self)
base.cr.forbidCheesyEffects(0)
def enterCountryClubInterior(self, requestStatus):
self.placeClass = CountryClubInterior.CountryClubInterior
self.notify.info('enterCountryClubInterior, requestStatus=%s' % requestStatus)
self.countryClubId = requestStatus['countryClubId']
self.enterPlace(requestStatus)
def exitCountryClubInterior(self):
self.exitPlace()
self.placeClass = None
del self.countryClubId
return
def enteringARace(self, status):
if not status['where'] == 'golfcourse':
return 0
else:
if ZoneUtil.isDynamicZone(status['zoneId']):
return status['hoodId'] == self.hood.hoodId
return ZoneUtil.getHoodId(status['zoneId']) == self.hood.hoodId
def enteringAGolfCourse(self, status):
if not status['where'] == 'golfcourse':
return 0
else:
if ZoneUtil.isDynamicZone(status['zoneId']):
return status['hoodId'] == self.hood.hoodId
return ZoneUtil.getHoodId(status['zoneId']) == self.hood.hoodId
def enterGolfCourse(self, requestStatus):
if requestStatus.has_key('curseId'):
self.golfCourseId = requestStatus['courseId']
else:
self.golfCourseId = 0
self.accept('raceOver', self.handleRaceOver)
self.accept('leavingGolf', self.handleLeftGolf)
base.transitions.irisOut(t=0.2)
def exitGolfCourse(self):
del self.golfCourseId
def handleRaceOver(self):
print 'you done!!'
def handleLeftGolf(self):
self.loadPlaceGeom(ToontownGlobals.BossbotHQ)
req = {'loader': 'cogHQLoader', 'where': 'cogHQExterior',
'how': 'teleportIn',
'zoneId': self.hood.hoodId,
'hoodId': self.hood.hoodId,
'shardId': None}
self.fsm.request('quietZone', [req])
return
def __riverDamageTick(self, task):
base.localAvatar.b_squish(20, True)
task.delayTime = 1.0
return task.again
def startRiverDamage(self, collision):
taskMgr.add(self.__riverDamageTick, 'oil-river-tick')
def stopRiverDamage(self, collision):
taskMgr.remove('oil-river-tick')
def startCollisionDetection(self):
self.accept('enterouch', self.startRiverDamage)
self.accept('exitouch', self.stopRiverDamage)
def stopCollisionDetection(self):
taskMgr.remove('oil-river-tick')
self.ignore('enterouch')
self.ignore('exitouch') |
py | b4018c922129fb0996d89112763c4b5d3f82a941 | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListProcessesRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'instance_id': 'str',
'db_user_id': 'str',
'user': 'str',
'database': 'str',
'offset': 'int',
'limit': 'int',
'x_language': 'str'
}
attribute_map = {
'instance_id': 'instance_id',
'db_user_id': 'db_user_id',
'user': 'user',
'database': 'database',
'offset': 'offset',
'limit': 'limit',
'x_language': 'X-Language'
}
def __init__(self, instance_id=None, db_user_id=None, user=None, database=None, offset=None, limit=None, x_language=None):
"""ListProcessesRequest - a model defined in huaweicloud sdk"""
self._instance_id = None
self._db_user_id = None
self._user = None
self._database = None
self._offset = None
self._limit = None
self._x_language = None
self.discriminator = None
self.instance_id = instance_id
self.db_user_id = db_user_id
if user is not None:
self.user = user
if database is not None:
self.database = database
if offset is not None:
self.offset = offset
if limit is not None:
self.limit = limit
if x_language is not None:
self.x_language = x_language
@property
def instance_id(self):
"""Gets the instance_id of this ListProcessesRequest.
实例ID
:return: The instance_id of this ListProcessesRequest.
:rtype: str
"""
return self._instance_id
@instance_id.setter
def instance_id(self, instance_id):
"""Sets the instance_id of this ListProcessesRequest.
实例ID
:param instance_id: The instance_id of this ListProcessesRequest.
:type: str
"""
self._instance_id = instance_id
@property
def db_user_id(self):
"""Gets the db_user_id of this ListProcessesRequest.
数据库用户ID
:return: The db_user_id of this ListProcessesRequest.
:rtype: str
"""
return self._db_user_id
@db_user_id.setter
def db_user_id(self, db_user_id):
"""Sets the db_user_id of this ListProcessesRequest.
数据库用户ID
:param db_user_id: The db_user_id of this ListProcessesRequest.
:type: str
"""
self._db_user_id = db_user_id
@property
def user(self):
"""Gets the user of this ListProcessesRequest.
用户
:return: The user of this ListProcessesRequest.
:rtype: str
"""
return self._user
@user.setter
def user(self, user):
"""Sets the user of this ListProcessesRequest.
用户
:param user: The user of this ListProcessesRequest.
:type: str
"""
self._user = user
@property
def database(self):
"""Gets the database of this ListProcessesRequest.
数据库
:return: The database of this ListProcessesRequest.
:rtype: str
"""
return self._database
@database.setter
def database(self, database):
"""Sets the database of this ListProcessesRequest.
数据库
:param database: The database of this ListProcessesRequest.
:type: str
"""
self._database = database
@property
def offset(self):
"""Gets the offset of this ListProcessesRequest.
偏移量。从第一条数据偏移offset条数据后开始查询,默认为0(偏移0条数据,表示从第一条数据开始查询),必须为数字,不能为负数。
:return: The offset of this ListProcessesRequest.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListProcessesRequest.
偏移量。从第一条数据偏移offset条数据后开始查询,默认为0(偏移0条数据,表示从第一条数据开始查询),必须为数字,不能为负数。
:param offset: The offset of this ListProcessesRequest.
:type: int
"""
self._offset = offset
@property
def limit(self):
"""Gets the limit of this ListProcessesRequest.
每页记录数,默认为20,最大取值100。
:return: The limit of this ListProcessesRequest.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListProcessesRequest.
每页记录数,默认为20,最大取值100。
:param limit: The limit of this ListProcessesRequest.
:type: int
"""
self._limit = limit
@property
def x_language(self):
"""Gets the x_language of this ListProcessesRequest.
语言
:return: The x_language of this ListProcessesRequest.
:rtype: str
"""
return self._x_language
@x_language.setter
def x_language(self, x_language):
"""Sets the x_language of this ListProcessesRequest.
语言
:param x_language: The x_language of this ListProcessesRequest.
:type: str
"""
self._x_language = x_language
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListProcessesRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b4018d3430ef66298a70745f3403b92561424b05 | """Javascript Object Signing and Encryption (JOSE).
This package is a Python implementation of the standards developed by
IETF `Javascript Object Signing and Encryption (Active WG)`_, in
particular the following RFCs:
- `JSON Web Algorithms (JWA)`_
- `JSON Web Key (JWK)`_
- `JSON Web Signature (JWS)`_
Originally developed as part of the ACME_ protocol implementation.
.. _`Javascript Object Signing and Encryption (Active WG)`:
https://tools.ietf.org/wg/jose/
.. _`JSON Web Algorithms (JWA)`:
https://datatracker.ietf.org/doc/draft-ietf-jose-json-web-algorithms/
.. _`JSON Web Key (JWK)`:
https://datatracker.ietf.org/doc/draft-ietf-jose-json-web-key/
.. _`JSON Web Signature (JWS)`:
https://datatracker.ietf.org/doc/draft-ietf-jose-json-web-signature/
.. _ACME: https://pypi.python.org/pypi/acme
"""
import sys
import warnings
# flake8: noqa
from josepy.b64 import (
b64decode,
b64encode,
)
from josepy.errors import (
DeserializationError,
SerializationError,
Error,
UnrecognizedTypeError,
)
from josepy.interfaces import JSONDeSerializable
from josepy.json_util import (
Field,
JSONObjectWithFields,
TypedJSONObjectWithFields,
decode_b64jose,
decode_cert,
decode_csr,
decode_hex16,
encode_b64jose,
encode_cert,
encode_csr,
encode_hex16,
)
from josepy.jwa import (
HS256,
HS384,
HS512,
JWASignature,
PS256,
PS384,
PS512,
RS256,
RS384,
RS512,
)
from josepy.jwk import (
JWK,
JWKRSA,
)
from josepy.jws import (
Header,
JWS,
Signature,
)
from josepy.util import (
ComparableX509,
ComparableKey,
ComparableRSAKey,
ImmutableMap,
)
for (major, minor) in [(2, 6), (3, 3)]:
if sys.version_info[:2] == (major, minor):
warnings.warn(
"Python {0}.{1} support will be dropped in the next release of "
"josepy. Please upgrade your Python version.".format(major, minor),
DeprecationWarning,
)
|
py | b4018ddb3a20d8aba2451fe0824a8176154d852d | import glob
import tqdm
import subprocess
import os
import shutil
ORIGINAL_DIR = '/Users/max/git/scitech/assets/'
## You can edit as you wish. You need to install all of these first.
convert = "/usr/local/bin/convert"
quality = 100
optimizers = {
"jpgs": ['**/*.jpg', '/usr/local/bin/guetzli --nomemlimit --quality 100 input output'],
"jpges": ['**/*.jpeg', '/usr/local/bin/guetzli --nomemlimit --quality 100 input output'],
"pngs": ['**/*.png', '/usr/local/bin/pngcrush input output'],
}
for filetype, options in optimizers.items():
print(filetype)
glob_opt = options[0]
print(glob_opt)
cli_opt = options[1]
files = glob.iglob(ORIGINAL_DIR + glob_opt, recursive=True)
for each_file in files:
if "screenshot-github.jpg" in each_file:
continue
base_filename = each_file.split('.')[0]
print(base_filename)
if not os.path.isfile(base_filename + ".webp"):
subprocess.run(convert + " " + each_file + " -quality "+
str(quality) +" " + str(base_filename) + ".webp", shell=True)
if not os.path.isfile(base_filename + ".jp2"):
subprocess.run(convert + " " + each_file + " -quality "+
str(quality) +" " + str(base_filename) + ".jp2", shell=True)
|
py | b4019101094267c0b1960091becf9fd4a95d331a | from django.conf import settings
TRACK_AJAX_REQUESTS = getattr(settings, 'TRACK_AJAX_REQUESTS', False)
TRACK_ANONYMOUS_USERS = getattr(settings, 'TRACK_ANONYMOUS_USERS', True)
TRACK_PAGEVIEWS = getattr(settings, 'TRACK_PAGEVIEWS', False)
TRACK_IGNORE_URLS = getattr(settings, 'TRACK_IGNORE_URLS', (
r'^(favicon\.ico|robots\.txt)$',
))
TRACK_IGNORE_STATUS_CODES = getattr(settings, 'TRACK_IGNORE_STATUS_CODES', [])
TRACK_USING_GEOIP = getattr(settings, 'TRACK_USING_GEOIP', False)
if hasattr(settings, 'TRACKING_USE_GEOIP'):
raise DeprecationWarning('TRACKING_USE_GEOIP has been renamed to TRACK_USING_GEOIP')
TRACK_REFERER = getattr(settings, 'TRACK_REFERER', False)
TRACK_QUERY_STRING = getattr(settings, 'TRACK_QUERY_STRING', False) |
py | b401914f58fa7e4c910d7c9b1b67b472e339dd38 | # -*- coding: iso-8859-15 -*-
# =============================================================================
# Copyright (c) 2004, 2006 Sean C. Gillies
# Copyright (c) 2005 Nuxeo SARL <http://nuxeo.com>
#
# Authors : Sean Gillies <[email protected]>
# Julien Anguenot <[email protected]>
#
# Contact email: [email protected]
# =============================================================================
"""
API for Web Map Service (WMS) methods and metadata.
Support for version 1.1.1 of the WMS protocol.
"""
from __future__ import (absolute_import, division, print_function)
import cgi
import urllib2
from urllib import urlencode
import warnings
from bcube_owslib.etree import etree
from bcube_owslib.util import openURL, testXMLValue, extract_xml_list, xmltag_split
from bcube_owslib.fgdc import Metadata
from bcube_owslib.iso import MD_Metadata
class ServiceException(Exception):
"""WMS ServiceException
Attributes:
message -- short error message
xml -- full xml error message from server
"""
def __init__(self, message, xml):
self.message = message
self.xml = xml
def __str__(self):
return repr(self.message)
class WebMapService_1_1_1(object):
"""Abstraction for OGC Web Map Service (WMS)
Implements IWebMapService
"""
def __getitem__(self,name):
''' check contents dictionary to allow dict like access to service layers'''
if name in self.__getattribute__('contents').keys():
return self.__getattribute__('contents')[name]
else:
raise KeyError("No content named %s" % name)
def __init__(self, url, version='1.1.1', xml=None,
username=None, password=None, parse_remote_metadata=False
):
"""Initialize."""
self.url = url
self.username = username
self.password = password
self.version = '1.1.1'
self._capabilities = None
# Authentication handled by Reader
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
if xml: # read from stored xml
self._capabilities = reader.readString(xml)
else: # read from server
self._capabilities = reader.read(self.url)
# avoid building capabilities metadata if the response is a ServiceExceptionReport
se = self._capabilities.find('ServiceException')
if se is not None:
err_message = str(se.text).strip()
raise ServiceException(err_message, xml)
# build metadata objects
self._buildMetadata(parse_remote_metadata)
def _getcapproperty(self):
if not self._capabilities:
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
self._capabilities = ServiceMetadata(reader.read(self.url))
return self._capabilities
def _buildMetadata(self, parse_remote_metadata=False):
''' set up capabilities metadata objects '''
#serviceIdentification metadata
serviceelem=self._capabilities.find('Service')
self.identification=ServiceIdentification(serviceelem, self.version)
#serviceProvider metadata
self.provider=ServiceProvider(serviceelem)
#serviceOperations metadata
self.operations=[]
for elem in self._capabilities.find('Capability/Request')[:]:
self.operations.append(OperationMetadata(elem))
#serviceContents metadata: our assumption is that services use a top-level
#layer as a metadata organizer, nothing more.
self.contents={}
caps = self._capabilities.find('Capability')
#recursively gather content metadata for all layer elements.
#To the WebMapService.contents store only metadata of named layers.
def gather_layers(parent_elem, parent_metadata):
for index, elem in enumerate(parent_elem.findall('Layer')):
cm = ContentMetadata(elem, parent=parent_metadata, index=index+1, parse_remote_metadata=parse_remote_metadata)
if cm.id:
if cm.id in self.contents:
warnings.warn('Content metadata for layer "%s" already exists. Using child layer' % cm.id)
self.contents[cm.id] = cm
gather_layers(elem, cm)
gather_layers(caps, None)
#exceptions
self.exceptions = [f.text for f \
in self._capabilities.findall('Capability/Exception/Format')]
def items(self):
'''supports dict-like items() access'''
items=[]
for item in self.contents:
items.append((item,self.contents[item]))
return items
def getcapabilities(self):
"""Request and return capabilities document from the WMS as a
file-like object.
NOTE: this is effectively redundant now"""
reader = WMSCapabilitiesReader(
self.version, url=self.url, un=self.username, pw=self.password
)
u = self._open(reader.capabilities_url(self.url))
# check for service exceptions, and return
if u.info().gettype() == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = str(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getmap(self, layers=None, styles=None, srs=None, bbox=None,
format=None, size=None, time=None, transparent=False,
bgcolor='#FFFFFF',
exceptions='application/vnd.ogc.se_xml',
method='Get',
**kwargs
):
"""Request and return an image from the WMS as a file-like object.
Parameters
----------
layers : list
List of content layer names.
styles : list
Optional list of named styles, must be the same length as the
layers list.
srs : string
A spatial reference system identifier.
bbox : tuple
(left, bottom, right, top) in srs units.
format : string
Output image format such as 'image/jpeg'.
size : tuple
(width, height) in pixels.
transparent : bool
Optional. Transparent background if True.
bgcolor : string
Optional. Image background color.
method : string
Optional. HTTP DCP method name: Get or Post.
**kwargs : extra arguments
anything else e.g. vendor specific parameters
Example
-------
>>> wms = WebMapService('http://giswebservices.massgis.state.ma.us/geoserver/wms', version='1.1.1')
>>> img = wms.getmap(layers=['massgis:GISDATA.SHORELINES_ARC'],\
styles=[''],\
srs='EPSG:4326',\
bbox=(-70.8, 42, -70, 42.8),\
size=(300, 300),\
format='image/jpeg',\
transparent=True)
>>> out = open('example.jpg.jpg', 'wb')
>>> out.write(img.read())
>>> out.close()
"""
try:
base_url = next((m.get('url') for m in self.getOperationByName('GetMap').methods if m.get('type').lower() == method.lower()))
except StopIteration:
base_url = self.url
request = {'version': self.version, 'request': 'GetMap'}
# check layers and styles
assert len(layers) > 0
request['layers'] = ','.join(layers)
if styles:
assert len(styles) == len(layers)
request['styles'] = ','.join(styles)
else:
request['styles'] = ''
# size
request['width'] = str(size[0])
request['height'] = str(size[1])
request['srs'] = str(srs)
request['bbox'] = ','.join([repr(x) for x in bbox])
request['format'] = str(format)
request['transparent'] = str(transparent).upper()
request['bgcolor'] = '0x' + bgcolor[1:7]
request['exceptions'] = str(exceptions)
if time is not None:
request['time'] = str(time)
if kwargs:
for kw in kwargs:
request[kw]=kwargs[kw]
data = urlencode(request)
u = openURL(base_url, data, method, username = self.username, password = self.password)
# check for service exceptions, and return
if u.info()['Content-Type'] == 'application/vnd.ogc.se_xml':
se_xml = u.read()
se_tree = etree.fromstring(se_xml)
err_message = unicode(se_tree.find('ServiceException').text).strip()
raise ServiceException(err_message, se_xml)
return u
def getServiceXML(self):
xml = None
if self._capabilities is not None:
xml = etree.tostring(self._capabilities)
return xml
def getfeatureinfo(self):
raise NotImplementedError
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
raise KeyError("No operation named %s" % name)
class ServiceIdentification(object):
''' Implements IServiceIdentificationMetadata '''
def __init__(self, infoset, version):
self._root = infoset
self.type = testXMLValue(self._root.find('Name'))
self.version = version
self.title = testXMLValue(self._root.find('Title'))
self.abstract = testXMLValue(self._root.find('Abstract'))
self.keywords = extract_xml_list(self._root.findall('KeywordList/Keyword'))
self.accessconstraints = testXMLValue(self._root.find('AccessConstraints'))
self.fees = testXMLValue(self._root.find('Fees'))
class ServiceProvider(object):
''' Implements IServiceProviderMetatdata '''
def __init__(self, infoset):
self._root = infoset
name = self._root.find('ContactInformation/ContactPersonPrimary/ContactOrganization')
if name is not None:
self.name = name.text
else:
self.name = None
self.url = self._root.find('OnlineResource').attrib.get('{http://www.w3.org/1999/xlink}href', '')
# contact metadata
contact = self._root.find('ContactInformation')
# sometimes there is a contact block that is empty, so make
# sure there are children to parse
if contact is not None and contact[:] != []:
self.contact = ContactMetadata(contact)
else:
self.contact = None
def getContentByName(self, name):
"""Return a named content item."""
for item in self.contents:
if item.name == name:
return item
raise KeyError("No content named %s" % name)
def getOperationByName(self, name):
"""Return a named content item."""
for item in self.operations:
if item.name == name:
return item
class ContentMetadata:
"""
Abstraction for WMS layer metadata.
Implements IContentMetadata.
"""
def __init__(self, elem, parent=None, index=0, parse_remote_metadata=False, timeout=30):
if elem.tag != 'Layer':
raise ValueError('%s should be a Layer' % (elem,))
self.parent = parent
if parent:
self.index = "%s.%d" % (parent.index, index)
else:
self.index = str(index)
self.id = self.name = testXMLValue(elem.find('Name'))
# layer attributes
self.queryable = int(elem.attrib.get('queryable', 0))
self.cascaded = int(elem.attrib.get('cascaded', 0))
self.opaque = int(elem.attrib.get('opaque', 0))
self.noSubsets = int(elem.attrib.get('noSubsets', 0))
self.fixedWidth = int(elem.attrib.get('fixedWidth', 0))
self.fixedHeight = int(elem.attrib.get('fixedHeight', 0))
# title is mandatory property
self.title = None
title = testXMLValue(elem.find('Title'))
if title is not None:
self.title = title.strip()
self.abstract = testXMLValue(elem.find('Abstract'))
# bboxes
boxes = elem.findall('BoundingBox')
self.boundingBoxes = []
for b in boxes:
try:
# sometimes the SRS attribute is (wrongly) not provided
srs = b.attrib['SRS']
except KeyError:
srs = None
self.boundingBoxes.append((
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
srs,
))
if self.parent:
if hasattr(self.parent, 'boundingBox'):
self.boundingBoxes.append(self.parent.boundingBox)
# ScaleHint
sh = elem.find('ScaleHint')
self.scaleHint = None
if sh is not None:
if 'min' in sh.attrib and 'max' in sh.attrib:
self.scaleHint = {'min': sh.attrib['min'], 'max': sh.attrib['max']}
attribution = elem.find('Attribution')
self.attribution = {}
if attribution is not None:
title = attribution.find('Title')
url = attribution.find('OnlineResource')
logo = attribution.find('LogoURL')
if title is not None:
self.attribution['title'] = title.text
if url is not None:
self.attribution['url'] = url.attrib['{http://www.w3.org/1999/xlink}href']
if logo is not None:
self.attribution['logo_size'] = (
int(logo.attrib['width']), int(logo.attrib['height'])
)
self.attribution['logo_url'] = logo.find(
'OnlineResource'
).attrib['{http://www.w3.org/1999/xlink}href']
b = elem.find('LatLonBoundingBox')
if b is not None:
self.boundingBoxWGS84 = (
float(b.attrib['minx']),
float(b.attrib['miny']),
float(b.attrib['maxx']),
float(b.attrib['maxy']),
)
elif self.parent:
self.boundingBoxWGS84 = self.parent.boundingBoxWGS84
else:
self.boundingBoxWGS84 = None
# SRS options
self.crsOptions = []
# Copy any parent SRS options (they are inheritable properties)
if self.parent:
self.crsOptions = list(self.parent.crsOptions)
# Look for SRS option attached to this layer
if elem.find('SRS') is not None:
# some servers found in the wild use a single SRS
# tag containing a whitespace separated list of SRIDs
# instead of several SRS tags. hence the inner loop
for srslist in map(lambda x: x.text, elem.findall('SRS')):
if srslist:
for srs in srslist.split():
self.crsOptions.append(srs)
# Get rid of duplicate entries
self.crsOptions = list(set(self.crsOptions))
#Set self.crsOptions to None if the layer (and parents) had no SRS options
if len(self.crsOptions) == 0:
#raise ValueError('%s no SRS available!?' % (elem,))
#Comment by D Lowe.
#Do not raise ValueError as it is possible that a layer is purely a parent layer and does not have SRS specified. Instead set crsOptions to None
# Comment by Jachym:
# Do not set it to None, but to [], which will make the code
# work further. Fixed by anthonybaxter
self.crsOptions=[]
#Styles
self.styles = {}
#Copy any parent styles (they are inheritable properties)
if self.parent:
self.styles = self.parent.styles.copy()
#Get the styles for this layer (items with the same name are replaced)
for s in elem.findall('Style'):
name = s.find('Name')
title = s.find('Title')
if name is None or title is None:
raise ValueError('%s missing name or title' % (s,))
style = { 'title' : title.text }
# legend url
legend = s.find('LegendURL/OnlineResource')
if legend is not None:
style['legend'] = legend.attrib['{http://www.w3.org/1999/xlink}href']
self.styles[name.text] = style
# keywords
self.keywords = [f.text for f in elem.findall('KeywordList/Keyword')]
# timepositions - times for which data is available.
self.timepositions=None
self.defaulttimeposition = None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='time':
if extent.text:
self.timepositions=extent.text.split(',')
self.defaulttimeposition = extent.attrib.get("default")
break
# Elevations - available vertical levels
self.elevations=None
for extent in elem.findall('Extent'):
if extent.attrib.get("name").lower() =='elevation':
if extent.text:
self.elevations=extent.text.split(',')
break
# MetadataURLs
self.metadataUrls = []
for m in elem.findall('MetadataURL'):
metadataUrl = {
'type': testXMLValue(m.attrib['type'], attrib=True),
'format': testXMLValue(m.find('Format')),
'url': testXMLValue(m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href'], attrib=True)
}
if metadataUrl['url'] is not None and parse_remote_metadata: # download URL
try:
content = urllib2.urlopen(metadataUrl['url'], timeout=timeout)
doc = etree.parse(content)
if metadataUrl['type'] is not None:
if metadataUrl['type'] == 'FGDC':
metadataUrl['metadata'] = Metadata(doc)
if metadataUrl['type'] == 'TC211':
metadataUrl['metadata'] = MD_Metadata(doc)
except Exception:
metadataUrl['metadata'] = None
self.metadataUrls.append(metadataUrl)
# DataURLs
self.dataUrls = []
for m in elem.findall('DataURL'):
dataUrl = {
'format': m.find('Format').text.strip(),
'url': m.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
}
self.dataUrls.append(dataUrl)
self.layers = []
for child in elem.findall('Layer'):
self.layers.append(ContentMetadata(child, self))
def __str__(self):
return 'Layer Name: %s Title: %s' % (self.name, self.title)
class OperationMetadata:
"""Abstraction for WMS OperationMetadata.
Implements IOperationMetadata.
"""
def __init__(self, elem):
"""."""
self.name = xmltag_split(elem.tag)
# formatOptions
self.formatOptions = [f.text for f in elem.findall('Format')]
self.methods = []
for verb in elem.findall('DCPType/HTTP/*'):
url = verb.find('OnlineResource').attrib['{http://www.w3.org/1999/xlink}href']
self.methods.append({'type' : xmltag_split(verb.tag), 'url': url})
class ContactMetadata:
"""Abstraction for contact details advertised in GetCapabilities.
"""
def __init__(self, elem):
name = elem.find('ContactPersonPrimary/ContactPerson')
if name is not None:
self.name=name.text
else:
self.name=None
email = elem.find('ContactElectronicMailAddress')
if email is not None:
self.email=email.text
else:
self.email=None
self.address = self.city = self.region = None
self.postcode = self.country = None
address = elem.find('ContactAddress')
if address is not None:
street = address.find('Address')
if street is not None: self.address = street.text
city = address.find('City')
if city is not None: self.city = city.text
region = address.find('StateOrProvince')
if region is not None: self.region = region.text
postcode = address.find('PostCode')
if postcode is not None: self.postcode = postcode.text
country = address.find('Country')
if country is not None: self.country = country.text
organization = elem.find('ContactPersonPrimary/ContactOrganization')
if organization is not None: self.organization = organization.text
else:self.organization = None
position = elem.find('ContactPosition')
if position is not None: self.position = position.text
else: self.position = None
class WMSCapabilitiesReader:
"""Read and parse capabilities document into a lxml.etree infoset
"""
def __init__(self, version='1.1.1', url=None, un=None, pw=None):
"""Initialize"""
self.version = version
self._infoset = None
self.url = url
self.username = un
self.password = pw
#if self.username and self.password:
## Provide login information in order to use the WMS server
## Create an OpenerDirector with support for Basic HTTP
## Authentication...
#passman = HTTPPasswordMgrWithDefaultRealm()
#passman.add_password(None, self.url, self.username, self.password)
#auth_handler = HTTPBasicAuthHandler(passman)
#opener = build_opener(auth_handler)
#self._open = opener.open
def capabilities_url(self, service_url):
"""Return a capabilities url
"""
qs = []
if service_url.find('?') != -1:
qs = cgi.parse_qsl(service_url.split('?')[1])
params = [x[0] for x in qs]
if 'service' not in params:
qs.append(('service', 'WMS'))
if 'request' not in params:
qs.append(('request', 'GetCapabilities'))
if 'version' not in params:
qs.append(('version', self.version))
urlqs = urlencode(tuple(qs))
return service_url.split('?')[0] + '?' + urlqs
def read(self, service_url):
"""Get and parse a WMS capabilities document, returning an
elementtree instance
service_url is the base url, to which is appended the service,
version, and request parameters
"""
getcaprequest = self.capabilities_url(service_url)
#now split it up again to use the generic openURL function...
spliturl=getcaprequest.split('?')
u = openURL(spliturl[0], spliturl[1], method='Get', username = self.username, password = self.password)
return etree.fromstring(u.read())
def readString(self, st):
"""Parse a WMS capabilities document, returning an elementtree instance
string should be an XML capabilities document
"""
if not isinstance(st, str):
raise ValueError("String must be of type string, not %s" % type(st))
return etree.fromstring(st) |
py | b40192142c8541ac53409cfe849195564c5b741d | #!/usr/bin/env python
"""
demo - standalone script
This is a standalone script that cannot be unit tested: It just executes.
"""
def say_hello(name):
print('hello, {}.'.format(name))
def say_goodbye(name):
print('goodbye, {}.'.format(name))
say_hello('mark rosewater')
say_goodbye('john finkel')
|
py | b40192a08cf1499872377607928286d1c929b238 | import _plotly_utils.basevalidators
class MarkerValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="marker", parent_name="scattergeo.unselected", **kwargs
):
super(MarkerValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Marker"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the marker color of unselected points,
applied only when a selection exists.
opacity
Sets the marker opacity of unselected points,
applied only when a selection exists.
size
Sets the marker size of unselected points,
applied only when a selection exists.
""",
),
**kwargs
)
|
py | b401936f34c39b249b925886f65a584c221b099c | """*****************************************************************************
* Copyright (C) 2019 Microchip Technology Inc. and its subsidiaries.
*
* Subject to your compliance with these terms, you may use Microchip software
* and any derivatives exclusively with Microchip products. It is your
* responsibility to comply with third party license terms applicable to your
* use of third party software (including open source software) that may
* accompany Microchip software.
*
* THIS SOFTWARE IS SUPPLIED BY MICROCHIP "AS IS". NO WARRANTIES, WHETHER
* EXPRESS, IMPLIED OR STATUTORY, APPLY TO THIS SOFTWARE, INCLUDING ANY IMPLIED
* WARRANTIES OF NON-INFRINGEMENT, MERCHANTABILITY, AND FITNESS FOR A
* PARTICULAR PURPOSE.
*
* IN NO EVENT WILL MICROCHIP BE LIABLE FOR ANY INDIRECT, SPECIAL, PUNITIVE,
* INCIDENTAL OR CONSEQUENTIAL LOSS, DAMAGE, COST OR EXPENSE OF ANY KIND
* WHATSOEVER RELATED TO THE SOFTWARE, HOWEVER CAUSED, EVEN IF MICROCHIP HAS
* BEEN ADVISED OF THE POSSIBILITY OR THE DAMAGES ARE FORESEEABLE. TO THE
* FULLEST EXTENT ALLOWED BY LAW, MICROCHIP'S TOTAL LIABILITY ON ALL CLAIMS IN
* ANY WAY RELATED TO THIS SOFTWARE WILL NOT EXCEED THE AMOUNT OF FEES, IF ANY,
* THAT YOU HAVE PAID DIRECTLY TO MICROCHIP FOR THIS SOFTWARE.
*****************************************************************************"""
###################################################################################################
#################################### Global Variables #############################################
###################################################################################################
global interruptsChildren
interruptsChildren = ATDF.getNode('/avr-tools-device-file/devices/device/interrupts').getChildren()
###################################################################################################
######################################### Functions ###############################################
###################################################################################################
def getIRQnumber(string):
for param in interruptsChildren:
name = param.getAttribute("name")
if string == name:
irq_index = param.getAttribute("index")
break
return irq_index
def _get_enblReg_parms(vectorNumber):
# This takes in vector index for interrupt, and returns the IECx register name as well as
# mask and bit location within it for given interrupt
index = int(vectorNumber / 32)
regName = "IEC" + str(index)
return regName
def _get_statReg_parms(vectorNumber):
# This takes in vector index for interrupt, and returns the IFSx register name as well as
# mask and bit location within it for given interrupt
index = int(vectorNumber / 32)
regName = "IFS" + str(index)
return regName
def setI2CInterruptData(status):
for id in InterruptVector:
Database.setSymbolValue("core", id, status, 1)
for id in InterruptHandlerLock:
Database.setSymbolValue("core", id, status, 1)
for id in InterruptHandler:
interruptName = id.split("_INTERRUPT_HANDLER")[0]
if status == True:
Database.setSymbolValue("core", id, interruptName + "_InterruptHandler", 1)
else:
Database.setSymbolValue("core", id, interruptName + "_Handler", 1)
###################################################################################################
########################################## Callbacks #############################################
###################################################################################################
def updateI2CInterruptData(symbol, event):
status = False
for id in InterruptVectorUpdate:
id = id.replace("core.", "")
if Database.getSymbolValue("core", id) == True:
status = True
break
if status == True:
symbol.setVisible(True)
else:
symbol.setVisible(False)
# Calculates BRG value
def baudRateCalc(clk, baud):
# Equation from FRM
#I2CxBRG = [PBCLK/(2*FSCK) - (PBCLK*TPGOB)/2] - 1
#where TPGD = 130ns
I2CxBRG = (clk / (2 * baud) - (clk * 0.000000104) / 2) - 1
if I2CxBRG >= 3 and I2CxBRG < 65536:
i2cmSym_BaudError_Comment.setVisible(False)
else:
i2cmSym_BaudError_Comment.setVisible(True)
if I2CxBRG < 3:
I2CxBRG = 3
return int(I2CxBRG)
def baudRateTrigger(symbol, event):
clk = int(Database.getSymbolValue("core", i2cInstanceName.getValue() + "_CLOCK_FREQUENCY"))
baud = int(i2cSym_BAUD.getValue())
brgVal = baudRateCalc(clk, baud)
symbol.setValue(brgVal, 2)
def i2cSourceFreq(symbol, event):
symbol.setValue(int(Database.getSymbolValue("core", i2cInstanceName.getValue() + "_CLOCK_FREQUENCY")), 2)
def updateI2CClockWarningStatus(symbol, event):
symbol.setVisible(not event["value"])
###################################################################################################
########################################## Component #############################################
###################################################################################################
def instantiateComponent(i2cComponent):
global i2cInstanceName
global InterruptVector
global InterruptHandlerLock
global InterruptHandler
global InterruptVectorUpdate
global i2cSym_BAUD
InterruptVector = []
InterruptHandler = []
InterruptHandlerLock = []
InterruptVectorUpdate = []
i2cInstanceName = i2cComponent.createStringSymbol("I2C_INSTANCE_NAME", None)
i2cInstanceName.setVisible(False)
i2cInstanceName.setDefaultValue(i2cComponent.getID().upper())
#Clock enable
Database.setSymbolValue("core", i2cInstanceName.getValue() + "_CLOCK_ENABLE", True, 1)
## I2C Clock Frequency
i2cSym_ClkValue = i2cComponent.createIntegerSymbol("I2C_CLOCK_FREQ", None)
i2cSym_ClkValue.setLabel("I2C Clock Frequency")
i2cSym_ClkValue.setMin(0)
i2cSym_ClkValue.setReadOnly(True)
i2cSym_ClkValue.setVisible(False)
i2cSym_ClkValue.setDefaultValue(int(Database.getSymbolValue("core", i2cInstanceName.getValue() + "_CLOCK_FREQUENCY")))
i2cSym_ClkValue.setDependencies(i2cSourceFreq, ["core." + i2cInstanceName.getValue() + "_CLOCK_FREQUENCY"])
#DISSLW: Slew Rate Control Disable bit
i2cSym_SlewRateControl = i2cComponent.createBooleanSymbol("I2C_DISSLW", None)
i2cSym_SlewRateControl.setLabel("Disable Slew Rate Control")
#SMEN: SMBus Input Levels bit
i2cSym_SMBusInputLevels = i2cComponent.createBooleanSymbol("I2C_SMEN", None)
i2cSym_SMBusInputLevels.setLabel("SMBus Input Levels")
#SIDL: Stop in Idle Mode bit
i2cSym_StopInIdleMode = i2cComponent.createBooleanSymbol("I2C_SIDL", None)
i2cSym_StopInIdleMode.setLabel("Stop in Idle Mode bit")
#Baud Rate
i2cSym_BAUD = i2cComponent.createLongSymbol("I2C_CLOCK_SPEED", None)
i2cSym_BAUD.setLabel("I2C Baud Rate (Hz)")
i2cSym_BAUD.setDefaultValue(50000)
i2cSym_BAUD.setMin(1)
i2cSym_BAUD.setMax(1000000)
#I2C Baud Rate not supported comment
global i2cmSym_BaudError_Comment
i2cmSym_BaudError_Comment = i2cComponent.createCommentSymbol("I2C_BAUD_ERROR_COMMENT", None)
i2cmSym_BaudError_Comment.setLabel("********** WARNING!: Baud Rate is out of range **********")
i2cmSym_BaudError_Comment.setVisible(False)
## Baud Rate Frequency dependency
i2cSym_BRGValue = i2cComponent.createIntegerSymbol("BRG_VALUE", None)
i2cSym_BRGValue.setVisible(False)
i2cSym_BRGValue.setDependencies(baudRateTrigger, ["I2C_CLOCK_SPEED", "core." + i2cInstanceName.getValue() + "_CLOCK_FREQUENCY"])
#Use setValue instead of setDefaultValue to store symbol value in default.xml
i2cSym_BRGValue.setValue(baudRateCalc(i2cSym_ClkValue.getValue(), i2cSym_BAUD.getValue()) , 1)
## Master Interrupt Setup
i2cMasterInt = i2cInstanceName.getValue() + "_MASTER"
InterruptVector.append(i2cMasterInt + "_INTERRUPT_ENABLE")
InterruptHandler.append(i2cMasterInt + "_INTERRUPT_HANDLER")
InterruptHandlerLock.append(i2cMasterInt + "_INTERRUPT_HANDLER_LOCK")
InterruptVectorUpdate.append("core." + i2cMasterInt + "_INTERRUPT_ENABLE_UPDATE")
MasterVectorNum = int(getIRQnumber(i2cMasterInt))
enblRegName = _get_enblReg_parms(MasterVectorNum)
statRegName = _get_statReg_parms(MasterVectorNum)
#IEC REG
i2cMasterIntIEC = i2cComponent.createStringSymbol("I2C_MASTER_IEC_REG", None)
i2cMasterIntIEC.setDefaultValue(enblRegName)
i2cMasterIntIEC.setVisible(False)
#IFS REG
i2cMasterIntIFS = i2cComponent.createStringSymbol("I2C_MASTER_IFS_REG", None)
i2cMasterIntIFS.setDefaultValue(statRegName)
i2cMasterIntIFS.setVisible(False)
## Slave Interrupt Setup
i2cSlaveInt = i2cInstanceName.getValue() + "_SLAVE"
SlaveVectorNum = int(getIRQnumber(i2cSlaveInt))
enblRegName = _get_enblReg_parms(SlaveVectorNum)
statRegName = _get_statReg_parms(SlaveVectorNum)
#IEC REG
i2cSlaveIntIEC = i2cComponent.createStringSymbol("I2C_SLAVE_IEC_REG", None)
i2cSlaveIntIEC.setDefaultValue(enblRegName)
i2cSlaveIntIEC.setVisible(False)
#IFS REG
i2cSlaveIntIFS = i2cComponent.createStringSymbol("I2C_SLAVE_IFS_REG", None)
i2cSlaveIntIFS.setDefaultValue(statRegName)
i2cSlaveIntIFS.setVisible(False)
## Bus Error Interrupt Setup
i2cBusInt = i2cInstanceName.getValue() + "_BUS"
InterruptVector.append(i2cBusInt + "_INTERRUPT_ENABLE")
InterruptHandler.append(i2cBusInt + "_INTERRUPT_HANDLER")
InterruptHandlerLock.append(i2cBusInt + "_INTERRUPT_HANDLER_LOCK")
InterruptVectorUpdate.append("core." + i2cBusInt + "_INTERRUPT_ENABLE_UPDATE")
BusVectorNum = int(getIRQnumber(i2cBusInt))
enblRegName = _get_enblReg_parms(BusVectorNum)
statRegName = _get_statReg_parms(BusVectorNum)
#IEC REG
i2cBusIntIEC = i2cComponent.createStringSymbol("I2C_BUS_IEC_REG", None)
i2cBusIntIEC.setDefaultValue(enblRegName)
i2cBusIntIEC.setVisible(False)
#IFS REG
i2cBusIntIFS = i2cComponent.createStringSymbol("I2C_BUS_IFS_REG", None)
i2cBusIntIFS.setDefaultValue(statRegName)
i2cBusIntIFS.setVisible(False)
# Clock Warning status
i2cSym_ClkEnComment = i2cComponent.createCommentSymbol("I2C_CLOCK_ENABLE_COMMENT", None)
i2cSym_ClkEnComment.setLabel("Warning!!! " + i2cInstanceName.getValue() + " Peripheral Clock is Disabled in Clock Manager")
i2cSym_ClkEnComment.setVisible(False)
i2cSym_ClkEnComment.setDependencies(updateI2CClockWarningStatus, ["core." + i2cInstanceName.getValue() + "_CLOCK_ENABLE"])
############################################################################
#### Dependency ####
############################################################################
## EVIC Interrupt Dynamic settings
setI2CInterruptData(True)
i2cSymIntEnComment = i2cComponent.createCommentSymbol("I2C_INTRRUPT_ENABLE_COMMENT", None)
i2cSymIntEnComment.setLabel("Warning!!! " + i2cInstanceName.getValue() + " Interrupt is Disabled in Interrupt Manager")
i2cSymIntEnComment.setVisible(False)
i2cSymIntEnComment.setDependencies(updateI2CInterruptData, InterruptVectorUpdate)
###################################################################################################
####################################### Driver Symbols ############################################
###################################################################################################
#I2C API Prefix
i2cSym_API_Prefix = i2cComponent.createStringSymbol("I2C_PLIB_API_PREFIX", None)
i2cSym_API_Prefix.setDefaultValue(i2cInstanceName.getValue())
i2cSym_API_Prefix.setVisible(False)
###################################################################################################
####################################### Code Generation ##########################################
###################################################################################################
configName = Variables.get("__CONFIGURATION_NAME")
i2cHeaderFile = i2cComponent.createFileSymbol("I2C_HEADER", None)
i2cHeaderFile.setSourcePath("../peripheral/i2c_01441/templates/plib_i2c.h.ftl")
i2cHeaderFile.setOutputName("plib_" + i2cInstanceName.getValue().lower() + ".h")
i2cHeaderFile.setDestPath("peripheral/i2c/")
i2cHeaderFile.setProjectPath("config/" + configName +"/peripheral/i2c/")
i2cHeaderFile.setType("HEADER")
i2cHeaderFile.setMarkup(True)
i2cGlobalHeaderFile = i2cComponent.createFileSymbol("I2C_GLOBALHEADER", None)
i2cGlobalHeaderFile.setSourcePath("../peripheral/i2c_01441/plib_i2c_master.h")
i2cGlobalHeaderFile.setOutputName("plib_i2c_master.h")
i2cGlobalHeaderFile.setDestPath("peripheral/i2c/")
i2cGlobalHeaderFile.setProjectPath("config/" + configName +"/peripheral/i2c/")
i2cGlobalHeaderFile.setType("HEADER")
i2cSource1File = i2cComponent.createFileSymbol("I2C_SOURCE", None)
i2cSource1File.setSourcePath("../peripheral/i2c_01441/templates/plib_i2c.c.ftl")
i2cSource1File.setOutputName("plib_" + i2cInstanceName.getValue().lower() + ".c")
i2cSource1File.setDestPath("peripheral/i2c/")
i2cSource1File.setProjectPath("config/" + configName +"/peripheral/i2c/")
i2cSource1File.setType("SOURCE")
i2cSource1File.setMarkup(True)
i2cSystemInitFile = i2cComponent.createFileSymbol("I2C_INIT", None)
i2cSystemInitFile.setType("STRING")
i2cSystemInitFile.setOutputName("core.LIST_SYSTEM_INIT_C_SYS_INITIALIZE_PERIPHERALS")
i2cSystemInitFile.setSourcePath("../peripheral/i2c_01441/templates/system/initialization.c.ftl")
i2cSystemInitFile.setMarkup(True)
i2cSystemDefFile = i2cComponent.createFileSymbol("I2C_DEF", None)
i2cSystemDefFile.setType("STRING")
i2cSystemDefFile.setOutputName("core.LIST_SYSTEM_DEFINITIONS_H_INCLUDES")
i2cSystemDefFile.setSourcePath("../peripheral/i2c_01441/templates/system/definitions.h.ftl")
i2cSystemDefFile.setMarkup(True)
|
py | b40193f5a14edaca3329a9d115cb7fbd979cc3a5 | #!/usr/bin/python -u
#
# this tests the Expand() API of the xmlTextReader interface
# this extract the Dragon bibliography entries from the XML specification
#
import libxml2
import sys
# Memory debug specific
libxml2.debugMemory(1)
expect="""<bibl id="Aho" key="Aho/Ullman">Aho, Alfred V.,
Ravi Sethi, and Jeffrey D. Ullman.
<emph>Compilers: Principles, Techniques, and Tools</emph>.
Reading: Addison-Wesley, 1986, rpt. corr. 1988.</bibl>"""
f = open('../../test/valid/REC-xml-19980210.xml', 'rb')
input = libxml2.inputBuffer(f)
reader = input.newTextReader("REC")
res=""
while reader.Read() > 0:
while reader.Name() == 'bibl':
node = reader.Expand() # expand the subtree
if node.xpathEval("@id = 'Aho'"): # use XPath on it
res = res + node.serialize()
if reader.Next() != 1: # skip the subtree
break;
if res != expect:
print("Error: didn't get the expected output")
print("got '%s'" % (res))
print("expected '%s'" % (expect))
#
# cleanup
#
del input
del reader
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
|
py | b40195382138bf022fe0f50902befacbb0906a2f | from builtins import range
import numpy as np
def affine_forward(x, w, b):
"""
Computes the forward pass for an affine (fully-connected) layer.
The input x has shape (N, d_1, ..., d_k) and contains a minibatch of N
examples, where each example x[i] has shape (d_1, ..., d_k). We will
reshape each input into a vector of dimension D = d_1 * ... * d_k, and
then transform it to an output vector of dimension M.
Inputs:
- x: A numpy array containing input data, of shape (N, d_1, ..., d_k)
- w: A numpy array of weights, of shape (D, M)
- b: A numpy array of biases, of shape (M,)
Returns a tuple of:
- out: output, of shape (N, M)
- cache: (x, w, b)
"""
out = None
###########################################################################
# TODO: Implement the affine forward pass. Store the result in out. You #
# will need to reshape the input into rows. #
###########################################################################
flat_x = x.reshape(x.shape[0], -1)
out = np.dot(flat_x, w) + b
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b)
return out, cache
def affine_backward(dout, cache):
"""
Computes the backward pass for an affine layer.
Inputs:
- dout: Upstream derivative, of shape (N, M)
- cache: Tuple of:
- x: Input data, of shape (N, d_1, ... d_k)
- w: Weights, of shape (D, M)
Returns a tuple of:
- dx: Gradient with respect to x, of shape (N, d1, ..., d_k)
- dw: Gradient with respect to w, of shape (D, M)
- db: Gradient with respect to b, of shape (M,)
"""
x, w, b = cache
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the affine backward pass. #
###########################################################################
input_shape = x.shape
flat_x = x.reshape(input_shape[0], -1)
dw = np.dot(flat_x.T, dout)
db = np.sum(dout, axis=0)
dx = np.dot(dout, w.T)
dx = dx.reshape(input_shape)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def relu_forward(x):
"""
Computes the forward pass for a layer of rectified linear units (ReLUs).
Input:
- x: Inputs, of any shape
Returns a tuple of:
- out: Output, of the same shape as x
- cache: x
"""
out = None
###########################################################################
# TODO: Implement the ReLU forward pass. #
###########################################################################
out = np.maximum(0, x)
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = x
return out, cache
def relu_backward(dout, cache):
"""
Computes the backward pass for a layer of rectified linear units (ReLUs).
Input:
- dout: Upstream derivatives, of any shape
- cache: Input x, of same shape as dout
Returns:
- dx: Gradient with respect to x
"""
dx, x = None, cache
###########################################################################
# TODO: Implement the ReLU backward pass. #
###########################################################################
dx = dout
dx[x <= 0] = 0
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def batchnorm_forward(x, gamma, beta, bn_param):
"""
Forward pass for batch normalization.
During training the sample mean and (uncorrected) sample variance are
computed from minibatch statistics and used to normalize the incoming data.
During training we also keep an exponentially decaying running mean of the
mean and variance of each feature, and these averages are used to normalize
data at test-time.
At each timestep we update the running averages for mean and variance using
an exponential decay based on the momentum parameter:
running_mean = momentum * running_mean + (1 - momentum) * sample_mean
running_var = momentum * running_var + (1 - momentum) * sample_var
Note that the batch normalization paper suggests a different test-time
behavior: they compute sample mean and variance for each feature using a
large number of training images rather than using a running average. For
this implementation we have chosen to use running averages instead since
they do not require an additional estimation step; the torch7
implementation of batch normalization also uses running averages.
Input:
- x: Data of shape (N, D)
- gamma: Scale parameter of shape (D,)
- beta: Shift paremeter of shape (D,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: of shape (N, D)
- cache: A tuple of values needed in the backward pass
"""
mode = bn_param['mode']
eps = bn_param.get('eps', 1e-5)
momentum = bn_param.get('momentum', 0.9)
N, D = x.shape
running_mean = bn_param.get('running_mean', np.zeros(D, dtype=x.dtype))
running_var = bn_param.get('running_var', np.zeros(D, dtype=x.dtype))
out, cache = None, None
if mode == 'train':
#######################################################################
# TODO: Implement the training-time forward pass for batch norm. #
# Use minibatch statistics to compute the mean and variance, use #
# these statistics to normalize the incoming data, and scale and #
# shift the normalized data using gamma and beta. #
# #
# You should store the output in the variable out. Any intermediates #
# that you need for the backward pass should be stored in the cache #
# variable. #
# #
# You should also use your computed sample mean and variance together #
# with the momentum variable to update the running mean and running #
# variance, storing your result in the running_mean and running_var #
# variables. #
#######################################################################
# Step 1 - compute mini-bacth mean mu (D, )
mu = np.mean(x, axis=0)
# print('mu', mu.shape)
# Step 2 - compute xmu (N, D)
xmu = x - mu
# print('xmu', xmu.shape)
# Step 3 - compute xmu2 (N, D)
xmu2 = xmu ** 2
# print('xmu2', xmu2.shape)
# Step 4 - compute var (D, )
var = np.mean(xmu2, axis=0)
# print('var', var.shape)
# Step 5 - compute sqrtvar (D, )
sqrtvar = np.sqrt(var + eps)
# print('sqrtvar', sqrtvar.shape)
# Step 6 - compute invsqrtvar (D, )
invsqrtvar = 1.0 / sqrtvar
# print('invsqrtvar', invsqrtvar.shape)
# Step 7 - compute normalized (N, D)
normalized = xmu * invsqrtvar
# print('normalized', normalized.shape)
# Step 8 - compute scaled (N, D)
scaled = gamma * normalized
# print('scaled', scaled.shape)
# Step 9 - compute out (N, D)
out = beta + scaled
# print('out', out.shape)
# compute running_mean & running_var
running_mean = momentum * running_mean + (1 - momentum) * mu
running_var = momentum * running_var + (1 - momentum) * var
cache = (x, mu, xmu, xmu2, var, sqrtvar, invsqrtvar, normalized, scaled, out, gamma, beta, eps)
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test-time forward pass for batch normalization. #
# Use the running mean and variance to normalize the incoming data, #
# then scale and shift the normalized data using gamma and beta. #
# Store the result in the out variable. #
#######################################################################
# batch normalization in test mode
out = x - running_mean # center
out *= 1.0 / np.sqrt(running_var + eps) # normalize
out *= gamma # scale
out += beta # shift
#######################################################################
# END OF YOUR CODE #
#######################################################################
else:
raise ValueError('Invalid forward batchnorm mode "%s"' % mode)
# Store the updated running means back into bn_param
bn_param['running_mean'] = running_mean
bn_param['running_var'] = running_var
return out, cache
def batchnorm_backward(dout, cache):
"""
Backward pass for batch normalization.
For this implementation, you should write out a computation graph for
batch normalization on paper and propagate gradients backward through
intermediate nodes.
Inputs:
- dout: Upstream derivatives, of shape (N, D)
- cache: Variable of intermediates from batchnorm_forward.
Returns a tuple of:
- dx: Gradient with respect to inputs x, of shape (N, D)
- dgamma: Gradient with respect to scale parameter gamma, of shape (D,)
- dbeta: Gradient with respect to shift parameter beta, of shape (D,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
###########################################################################
x, mu, xmu, xmu2, var, sqrtvar, invsqrtvar, normalized, scaled, out, gamma, beta, eps = cache
N, D = dout.shape
# Backward step 9
dbeta = np.sum(dout, axis=0)
dscaled = dout
# Backward step 8
dgamma = np.sum(dscaled * normalized, axis=0)
dnormalized = gamma * dscaled
# Backward step 7
dxmu = invsqrtvar * dnormalized
dinvsqrtvar = np.sum(xmu * dnormalized, axis=0)
# Backward step 6
dsqrtvar = -1.0 / (sqrtvar ** 2) * dinvsqrtvar
# Backward step 5
dvar = 0.5 / sqrtvar * dsqrtvar
# Backward step 4
dxmu2 = np.ones((xmu2.shape)) / float(N) * dvar
# Backward step 3
dxmu += 2 * xmu * dxmu2
# Backward step 2
dx = dxmu
dmu = -1.0 * np.sum(dxmu, axis=0)
# Backward step 1
dx += np.ones((x.shape)) / float(N) * dmu
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def batchnorm_backward_alt(dout, cache):
"""
Alternative backward pass for batch normalization.
For this implementation you should work out the derivatives for the batch
normalizaton backward pass on paper and simplify as much as possible. You
should be able to derive a simple expression for the backward pass.
Note: This implementation should expect to receive the same cache variable
as batchnorm_backward, but might not use all of the values in the cache.
Inputs / outputs: Same as batchnorm_backward
"""
dx, dgamma, dbeta = None, None, None
##########################################################################
# TODO: Implement the backward pass for batch normalization. Store the #
# results in the dx, dgamma, and dbeta variables. #
# #
# After computing the gradient with respect to the centered inputs, you #
# should be able to compute gradients with respect to the inputs in a #
# single statement; our implementation fits on a single 80-character line. #
##########################################################################
x, mu, xmu, xmu2, var, sqrtvar, invsqrtvar, normalized, scaled, out, gamma, beta, eps = cache
N, D = dout.shape
dbeta = np.sum(dout, axis=0)
dgamma = np.sum((x - mu) * (var + eps)**(-1. / 2.) * dout, axis=0)
dx = (1. / N) * gamma * (var + eps)**(-1. / 2.) * (N * dout - np.sum(dout, axis=0)
- (x - mu) * (var + eps)**(-1.0) * np.sum(dout * (x - mu), axis=0))
return dx, dgamma, dbeta
def dropout_forward(x, dropout_param):
"""
Performs the forward pass for (inverted) dropout.
Inputs:
- x: Input data, of any shape
- dropout_param: A dictionary with the following keys:
- p: Dropout parameter. We drop each neuron output with probability p.
- mode: 'test' or 'train'. If the mode is train, then perform dropout;
if the mode is test, then just return the input.
- seed: Seed for the random number generator. Passing seed makes this
function deterministic, which is needed for gradient checking but not
in real networks.
Outputs:
- out: Array of the same shape as x.
- cache: tuple (dropout_param, mask). In training mode, mask is the dropout
mask that was used to multiply the input; in test mode, mask is None.
"""
p, mode = dropout_param['p'], dropout_param['mode']
if 'seed' in dropout_param:
np.random.seed(dropout_param['seed'])
mask = None
out = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase forward pass for inverted dropout. #
# Store the dropout mask in the mask variable. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
#######################################################################
# TODO: Implement the test phase forward pass for inverted dropout. #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
cache = (dropout_param, mask)
out = out.astype(x.dtype, copy=False)
return out, cache
def dropout_backward(dout, cache):
"""
Perform the backward pass for (inverted) dropout.
Inputs:
- dout: Upstream derivatives, of any shape
- cache: (dropout_param, mask) from dropout_forward.
"""
dropout_param, mask = cache
mode = dropout_param['mode']
dx = None
if mode == 'train':
#######################################################################
# TODO: Implement training phase backward pass for inverted dropout #
#######################################################################
pass
#######################################################################
# END OF YOUR CODE #
#######################################################################
elif mode == 'test':
dx = dout
return dx
def conv_forward_naive(x, w, b, conv_param):
"""
A naive implementation of the forward pass for a convolutional layer.
The input consists of N data points, each with C channels, height H and
width W. We convolve each input with F different filters, where each filter
spans all C channels and has height HH and width HH.
Input:
- x: Input data of shape (N, C, H, W)
- w: Filter weights of shape (F, C, HH, WW)
- b: Biases, of shape (F,)
- conv_param: A dictionary with the following keys:
- 'stride': The number of pixels between adjacent receptive fields in the
horizontal and vertical directions.
- 'pad': The number of pixels that will be used to zero-pad the input.
Returns a tuple of:
- out: Output data, of shape (N, F, H', W') where H' and W' are given by
H' = 1 + (H + 2 * pad - HH) / stride
W' = 1 + (W + 2 * pad - WW) / stride
- cache: (x, w, b, conv_param)
"""
out = None
###########################################################################
# TODO: Implement the convolutional forward pass. #
# Hint: you can use the function np.pad for padding. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, w, b, conv_param)
return out, cache
def conv_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a convolutional layer.
Inputs:
- dout: Upstream derivatives.
- cache: A tuple of (x, w, b, conv_param) as in conv_forward_naive
Returns a tuple of:
- dx: Gradient with respect to x
- dw: Gradient with respect to w
- db: Gradient with respect to b
"""
dx, dw, db = None, None, None
###########################################################################
# TODO: Implement the convolutional backward pass. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dw, db
def max_pool_forward_naive(x, pool_param):
"""
A naive implementation of the forward pass for a max pooling layer.
Inputs:
- x: Input data, of shape (N, C, H, W)
- pool_param: dictionary with the following keys:
- 'pool_height': The height of each pooling region
- 'pool_width': The width of each pooling region
- 'stride': The distance between adjacent pooling regions
Returns a tuple of:
- out: Output data
- cache: (x, pool_param)
"""
out = None
###########################################################################
# TODO: Implement the max pooling forward pass #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
cache = (x, pool_param)
return out, cache
def max_pool_backward_naive(dout, cache):
"""
A naive implementation of the backward pass for a max pooling layer.
Inputs:
- dout: Upstream derivatives
- cache: A tuple of (x, pool_param) as in the forward pass.
Returns:
- dx: Gradient with respect to x
"""
dx = None
###########################################################################
# TODO: Implement the max pooling backward pass #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx
def spatial_batchnorm_forward(x, gamma, beta, bn_param):
"""
Computes the forward pass for spatial batch normalization.
Inputs:
- x: Input data of shape (N, C, H, W)
- gamma: Scale parameter, of shape (C,)
- beta: Shift parameter, of shape (C,)
- bn_param: Dictionary with the following keys:
- mode: 'train' or 'test'; required
- eps: Constant for numeric stability
- momentum: Constant for running mean / variance. momentum=0 means that
old information is discarded completely at every time step, while
momentum=1 means that new information is never incorporated. The
default of momentum=0.9 should work well in most situations.
- running_mean: Array of shape (D,) giving running mean of features
- running_var Array of shape (D,) giving running variance of features
Returns a tuple of:
- out: Output data, of shape (N, C, H, W)
- cache: Values needed for the backward pass
"""
out, cache = None, None
###########################################################################
# TODO: Implement the forward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return out, cache
def spatial_batchnorm_backward(dout, cache):
"""
Computes the backward pass for spatial batch normalization.
Inputs:
- dout: Upstream derivatives, of shape (N, C, H, W)
- cache: Values from the forward pass
Returns a tuple of:
- dx: Gradient with respect to inputs, of shape (N, C, H, W)
- dgamma: Gradient with respect to scale parameter, of shape (C,)
- dbeta: Gradient with respect to shift parameter, of shape (C,)
"""
dx, dgamma, dbeta = None, None, None
###########################################################################
# TODO: Implement the backward pass for spatial batch normalization. #
# #
# HINT: You can implement spatial batch normalization using the vanilla #
# version of batch normalization defined above. Your implementation should#
# be very short; ours is less than five lines. #
###########################################################################
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return dx, dgamma, dbeta
def svm_loss(x, y):
"""
Computes the loss and gradient using for multiclass SVM classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
N = x.shape[0]
correct_class_scores = x[np.arange(N), y]
margins = np.maximum(0, x - correct_class_scores[:, np.newaxis] + 1.0)
margins[np.arange(N), y] = 0
loss = np.sum(margins) / N
num_pos = np.sum(margins > 0, axis=1)
dx = np.zeros_like(x)
dx[margins > 0] = 1
dx[np.arange(N), y] -= num_pos
dx /= N
return loss, dx
def softmax_loss(x, y):
"""
Computes the loss and gradient for softmax classification.
Inputs:
- x: Input data, of shape (N, C) where x[i, j] is the score for the jth
class for the ith input.
- y: Vector of labels, of shape (N,) where y[i] is the label for x[i] and
0 <= y[i] < C
Returns a tuple of:
- loss: Scalar giving the loss
- dx: Gradient of the loss with respect to x
"""
shifted_logits = x - np.max(x, axis=1, keepdims=True)
Z = np.sum(np.exp(shifted_logits), axis=1, keepdims=True)
log_probs = shifted_logits - np.log(Z)
probs = np.exp(log_probs)
N = x.shape[0]
loss = -np.sum(log_probs[np.arange(N), y]) / N
dx = probs.copy()
dx[np.arange(N), y] -= 1
dx /= N
return loss, dx
|
py | b401958c9e850bc11bc90acc5ed06701241b6bb5 | class DifferentStrings:
def minimize(self, A, B):
def diff(s):
return sum(map(lambda (a, b): a != b, zip(A, s)))
la = len(A)
return min(diff(B[i : i + la]) for i in xrange(len(B) - la + 1))
|
py | b4019597c8ea49214f14db15a171ecc0922a6422 | from .fhirbase import fhirbase
class Address(fhirbase):
"""
An address expressed using postal conventions (as opposed to GPS or
other location definition formats). This data type may be used to
convey addresses for use in delivering mail as well as for visiting
locations which might not be valid for mail delivery. There are a
variety of postal address formats defined around the world.
Args:
use: The purpose of this address.
type: Distinguishes between physical addresses (those you can visit)
and mailing addresses (e.g. PO Boxes and care-of addresses). Most
addresses are both.
text: A full text representation of the address.
line: This component contains the house number, apartment number,
street name, street direction, P.O. Box number, delivery hints, and
similar address information.
city: The name of the city, town, village or other community or
delivery center.
district: The name of the administrative area (county).
state: Sub-unit of a country with limited sovereignty in a federally
organized country. A code may be used if codes are in common use (i.e.
US 2 letter state codes).
postalCode: A postal code designating a region defined by the postal
service.
country: Country - a nation as commonly understood or generally
accepted.
period: Time period when address was/is in use.
"""
__name__ = 'Address'
def __init__(self, dict_values=None):
self.use = None
# type: str
# possible values: home, work, temp, old
self.type = None
# type: str
# possible values: postal, physical, both
self.text = None
# type: str
self.line = None
# type: list
self.city = None
# type: str
self.district = None
# type: str
self.state = None
# type: str
self.postalCode = None
# type: str
self.country = None
# type: str
self.period = None
# reference to Period
self.object_id = None
# unique identifier for object class
if dict_values:
self.set_attributes(dict_values)
self.assert_type()
def assert_type(self):
if self.use is not None:
for value in self.use:
if value is not None and value.lower() not in [
'home', 'work', 'temp', 'old']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'home, work, temp, old'))
if self.type is not None:
for value in self.type:
if value is not None and value.lower() not in [
'postal', 'physical', 'both']:
raise ValueError('"{}" does not match possible values: {}'.format(
value, 'postal, physical, both'))
def get_relationships(self):
return [
{'parent_entity': 'Period',
'parent_variable': 'object_id',
'child_entity': 'Address',
'child_variable': 'period'},
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.