ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b408b82f3bb071b8e529087152927ef47e4da2ab | from __future__ import unicode_literals
import json
from werkzeug.exceptions import BadRequest
class ResourceNotFoundException(BadRequest):
def __init__(self, message):
super(ResourceNotFoundException, self).__init__()
self.description = json.dumps({
"message": message,
'__type': 'ResourceNotFoundException',
})
class ValidationException(BadRequest):
def __init__(self, message):
super(ValidationException, self).__init__()
self.description = json.dumps({
"message": message,
'__type': 'ResourceNotFoundException',
})
|
py | b408b88b1b9801a534cdd4536b9b5e91d81a1d11 | from packaging.version import Version
import os
import warnings
import yaml
import mxnet as mx
import numpy as np
import pandas as pd
import pytest
from mxnet import context as ctx
from mxnet.gluon import Trainer
from mxnet.gluon.data import DataLoader
from mxnet.gluon.nn import HybridSequential, Dense
import mlflow
import mlflow.gluon
import mlflow.pyfunc.scoring_server as pyfunc_scoring_server
from mlflow import pyfunc
from mlflow.models import infer_signature, Model
from mlflow.models.utils import _read_example
from mlflow.tracking.artifact_utils import _download_artifact_from_uri
from mlflow.utils.environment import _mlflow_conda_env
from mlflow.utils.file_utils import TempDir
from mlflow.utils.model_utils import _get_flavor_configuration
from tests.gluon.utils import get_estimator
from tests.helper_functions import (
pyfunc_serve_and_score_model,
_compare_conda_env_requirements,
_assert_pip_requirements,
_is_available_on_pypi,
)
if Version(mx.__version__) >= Version("2.0.0"):
array_module = mx.np
else:
array_module = mx.nd
EXTRA_PYFUNC_SERVING_TEST_ARGS = [] if _is_available_on_pypi("mxnet") else ["--no-conda"]
@pytest.fixture
def model_path(tmpdir):
return os.path.join(tmpdir.strpath, "model")
@pytest.fixture
def gluon_custom_env(tmpdir):
conda_env = os.path.join(str(tmpdir), "conda_env.yml")
_mlflow_conda_env(conda_env, additional_pip_deps=["mxnet", "pytest"])
return conda_env
@pytest.fixture(scope="module")
def model_data():
mnist = mx.test_utils.get_mnist()
train_data = array_module.array(mnist["train_data"].reshape(-1, 784))
train_label = array_module.array(mnist["train_label"])
test_data = array_module.array(mnist["test_data"].reshape(-1, 784))
return train_data, train_label, test_data
@pytest.fixture(scope="module")
def gluon_model(model_data):
train_data, train_label, _ = model_data
dataset = mx.gluon.data.ArrayDataset(train_data, train_label)
train_data_loader = DataLoader(dataset, batch_size=128, last_batch="discard")
model = HybridSequential()
model.add(Dense(128, activation="relu"))
model.add(Dense(64, activation="relu"))
model.add(Dense(10))
model.initialize()
model.hybridize()
trainer = Trainer(
model.collect_params(), "adam", optimizer_params={"learning_rate": 0.001, "epsilon": 1e-07}
)
est = get_estimator(model, trainer)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
est.fit(train_data_loader, epochs=3)
return model
@pytest.mark.large
def test_model_save_load(gluon_model, model_data, model_path):
_, _, test_data = model_data
expected = array_module.argmax(gluon_model(test_data), axis=1)
mlflow.gluon.save_model(gluon_model, model_path)
# Loading Gluon model
model_loaded = mlflow.gluon.load_model(model_path, ctx.cpu())
actual = array_module.argmax(model_loaded(test_data), axis=1)
assert all(expected == actual)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_path)
test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
assert all(np.argmax(pyfunc_preds.values, axis=1) == expected.asnumpy())
# test with numpy array input
pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data.values)
assert all(np.argmax(pyfunc_preds, axis=1) == expected.asnumpy())
@pytest.mark.large
def test_signature_and_examples_are_saved_correctly(gluon_model, model_data):
model = gluon_model
signature_ = infer_signature(model_data[0].asnumpy())
example_ = model_data[0].asnumpy()[
:3,
]
for signature in (None, signature_):
for example in (None, example_):
with TempDir() as tmp:
path = tmp.path("model")
mlflow.gluon.save_model(
model, path=path, signature=signature, input_example=example
)
mlflow_model = Model.load(path)
assert signature == mlflow_model.signature
if example is None:
assert mlflow_model.saved_input_example_info is None
else:
assert np.array_equal(_read_example(mlflow_model, path), example)
@pytest.mark.large
def test_model_log_load(gluon_model, model_data, model_path):
# pylint: disable=unused-argument
_, _, test_data = model_data
expected = array_module.argmax(gluon_model(test_data), axis=1)
artifact_path = "model"
with mlflow.start_run():
mlflow.gluon.log_model(gluon_model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
# Loading Gluon model
model_loaded = mlflow.gluon.load_model(model_uri, ctx.cpu())
actual = array_module.argmax(model_loaded(test_data), axis=1)
assert all(expected == actual)
# Loading pyfunc model
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri)
test_pyfunc_data = pd.DataFrame(test_data.asnumpy())
pyfunc_preds = pyfunc_loaded.predict(test_pyfunc_data)
assert all(np.argmax(pyfunc_preds.values, axis=1) == expected.asnumpy())
@pytest.mark.large
def test_model_save_persists_specified_conda_env_in_mlflow_model_directory(
gluon_model, model_path, gluon_custom_env
):
mlflow.gluon.save_model(gluon_model=gluon_model, path=model_path, conda_env=gluon_custom_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != gluon_custom_env
with open(gluon_custom_env, "r") as f:
gluon_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == gluon_custom_env_parsed
@pytest.mark.large
def test_model_save_persists_requirements_in_mlflow_model_directory(
gluon_model, model_path, gluon_custom_env
):
mlflow.gluon.save_model(gluon_model=gluon_model, path=model_path, conda_env=gluon_custom_env)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(gluon_custom_env, saved_pip_req_path)
@pytest.mark.large
def test_save_model_with_pip_requirements(gluon_model, tmpdir):
# Path to a requirements file
tmpdir1 = tmpdir.join("1")
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
mlflow.gluon.save_model(gluon_model, tmpdir1.strpath, pip_requirements=req_file.strpath)
_assert_pip_requirements(tmpdir1.strpath, ["mlflow", "a"], strict=True)
# List of requirements
tmpdir2 = tmpdir.join("2")
mlflow.gluon.save_model(
gluon_model, tmpdir2.strpath, pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(tmpdir2.strpath, ["mlflow", "a", "b"], strict=True)
# Constraints file
tmpdir3 = tmpdir.join("3")
mlflow.gluon.save_model(
gluon_model, tmpdir3.strpath, pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
tmpdir3.strpath, ["mlflow", "b", "-c constraints.txt"], ["a"], strict=True
)
@pytest.mark.large
def test_save_model_with_extra_pip_requirements(gluon_model, tmpdir):
default_reqs = mlflow.gluon.get_default_pip_requirements()
# Path to a requirements file
tmpdir1 = tmpdir.join("1")
req_file = tmpdir.join("requirements.txt")
req_file.write("a")
mlflow.gluon.save_model(gluon_model, tmpdir1.strpath, extra_pip_requirements=req_file.strpath)
_assert_pip_requirements(tmpdir1.strpath, ["mlflow", *default_reqs, "a"])
# List of requirements
tmpdir2 = tmpdir.join("2")
mlflow.gluon.save_model(
gluon_model, tmpdir2.strpath, extra_pip_requirements=[f"-r {req_file.strpath}", "b"]
)
_assert_pip_requirements(tmpdir2.strpath, ["mlflow", *default_reqs, "a", "b"])
# Constraints file
tmpdir3 = tmpdir.join("3")
mlflow.gluon.save_model(
gluon_model, tmpdir3.strpath, extra_pip_requirements=[f"-c {req_file.strpath}", "b"]
)
_assert_pip_requirements(
tmpdir3.strpath, ["mlflow", *default_reqs, "b", "-c constraints.txt"], ["a"]
)
@pytest.mark.large
def test_model_save_accepts_conda_env_as_dict(gluon_model, model_path):
conda_env = dict(mlflow.gluon.get_default_conda_env())
conda_env["dependencies"].append("pytest")
mlflow.gluon.save_model(gluon_model=gluon_model, path=model_path, conda_env=conda_env)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == conda_env
@pytest.mark.large
def test_log_model_persists_specified_conda_env_in_mlflow_model_directory(
gluon_model, gluon_custom_env
):
artifact_path = "model"
with mlflow.start_run():
mlflow.gluon.log_model(
gluon_model=gluon_model, artifact_path=artifact_path, conda_env=gluon_custom_env
)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
pyfunc_conf = _get_flavor_configuration(model_path=model_path, flavor_name=pyfunc.FLAVOR_NAME)
saved_conda_env_path = os.path.join(model_path, pyfunc_conf[pyfunc.ENV])
assert os.path.exists(saved_conda_env_path)
assert saved_conda_env_path != gluon_custom_env
with open(gluon_custom_env, "r") as f:
gluon_custom_env_parsed = yaml.safe_load(f)
with open(saved_conda_env_path, "r") as f:
saved_conda_env_parsed = yaml.safe_load(f)
assert saved_conda_env_parsed == gluon_custom_env_parsed
@pytest.mark.large
def test_model_log_persists_requirements_in_mlflow_model_directory(gluon_model, gluon_custom_env):
artifact_path = "model"
with mlflow.start_run():
mlflow.gluon.log_model(
gluon_model=gluon_model, artifact_path=artifact_path, conda_env=gluon_custom_env
)
model_path = _download_artifact_from_uri(
"runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
)
saved_pip_req_path = os.path.join(model_path, "requirements.txt")
_compare_conda_env_requirements(gluon_custom_env, saved_pip_req_path)
@pytest.mark.large
def test_gluon_model_serving_and_scoring_as_pyfunc(gluon_model, model_data):
_, _, test_data = model_data
expected = array_module.argmax(gluon_model(test_data), axis=1)
artifact_path = "model"
with mlflow.start_run():
mlflow.gluon.log_model(gluon_model, artifact_path=artifact_path)
model_uri = "runs:/{run_id}/{artifact_path}".format(
run_id=mlflow.active_run().info.run_id, artifact_path=artifact_path
)
scoring_response = pyfunc_serve_and_score_model(
model_uri=model_uri,
data=pd.DataFrame(test_data.asnumpy()),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON_SPLIT_ORIENTED,
extra_args=EXTRA_PYFUNC_SERVING_TEST_ARGS,
)
response_values = pd.read_json(scoring_response.content, orient="records").values.astype(
np.float32
)
assert all(np.argmax(response_values, axis=1) == expected.asnumpy())
|
py | b408b9abbd97ef9ede45faff4385cf9069f359f7 | from ..factory import Type
class updateNotification(Type):
notification_group_id = None # type: "int32"
notification = None # type: "notification"
|
py | b408bb16e9bcfeebc2169eca0eb9a387e2c4bccc | # encoding: utf-8
import datetime
import six
from collections import OrderedDict
import sqlalchemy as sa
from sqlalchemy import orm
from ckan.model import meta, core
__all__ = ['DomainObject', 'DomainObjectOperation']
class Enum(set):
'''Simple enumeration
e.g. Animal = Enum("dog", "cat", "horse")
joey = Animal.DOG
'''
def __init__(self, *names):
super(Enum, self).__init__(names)
def __getattr__(self, name):
if name in self:
return name
raise AttributeError
DomainObjectOperation = Enum('new', 'changed', 'deleted')
class DomainObject(object):
text_search_fields = []
Session = meta.Session
def __init__(self, **kwargs):
for k,v in kwargs.items():
setattr(self, k, v)
@classmethod
def count(cls):
return cls.Session.query(cls).count()
@classmethod
def by_name(cls, name, autoflush=True):
obj = meta.Session.query(cls).autoflush(autoflush)\
.filter_by(name=name).first()
return obj
@classmethod
def text_search(cls, query, term):
register = cls
make_like = lambda x,y: x.ilike('%' + y + '%')
q = None
for field in cls.text_search_fields:
attr = getattr(register, field)
q = sa.or_(q, make_like(attr, term))
return query.filter(q)
@classmethod
def active(cls):
return meta.Session.query(cls).filter_by(state=core.State.ACTIVE)
def save(self):
self.add()
self.commit()
def add(self):
self.Session.add(self)
def commit_remove(self):
self.commit()
self.remove()
def commit(self):
self.Session.commit()
def remove(self):
self.Session.remove()
def delete(self):
# stateful objects have this method overridden - see
# core.StatefulObjectMixin
self.Session.delete(self)
def purge(self):
self.Session().autoflush = False
self.Session.delete(self)
def as_dict(self):
_dict = OrderedDict()
table = orm.class_mapper(self.__class__).mapped_table
for col in table.c:
val = getattr(self, col.name)
if isinstance(val, datetime.date):
val = str(val)
if isinstance(val, datetime.datetime):
val = val.isoformat()
_dict[col.name] = val
return _dict
def __lt__(self, other):
return self.name < other.name
def __str__(self):
return repr(self)
def __unicode__(self):
repr = u'<%s' % self.__class__.__name__
table = orm.class_mapper(self.__class__).mapped_table
for col in table.c:
try:
repr += u' %s=%s' % (col.name, getattr(self, col.name))
except Exception as inst:
repr += u' %s=%s' % (col.name, inst)
repr += '>'
return repr
def __repr__(self):
return six.ensure_str(self.__unicode__())
|
py | b408bb6fd01c8c8b25b1c47f0fbd5ed6ce089f48 | import os
import sys
from setuptools import find_packages
from setuptools import setup
version = '1.26.0.dev0'
install_requires = [
'dns-lexicon>=3.2.1',
'setuptools>=41.6.0',
]
if not os.environ.get('SNAP_BUILD'):
install_requires.extend([
# We specify the minimum acme and certbot version as the current plugin
# version for simplicity. See
# https://github.com/certbot/certbot/issues/8761 for more info.
f'acme>={version}',
f'certbot>={version}',
])
elif 'bdist_wheel' in sys.argv[1:]:
raise RuntimeError('Unset SNAP_BUILD when building wheels '
'to include certbot dependencies.')
if os.environ.get('SNAP_BUILD'):
install_requires.append('packaging')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
]
setup(
name='certbot-dns-luadns',
version=version,
description="LuaDNS Authenticator plugin for Certbot",
url='https://github.com/certbot/certbot',
author="Certbot Project",
author_email='[email protected]',
license='Apache License 2.0',
python_requires='>=3.7',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Plugins',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
},
entry_points={
'certbot.plugins': [
'dns-luadns = certbot_dns_luadns._internal.dns_luadns:Authenticator',
],
},
)
|
py | b408bc3834a2c6f5e87e661e45119edbdd474c15 | # coredefs.py
r'''
This file contains the dictionary `defs' which has the following keys:
Families:
1. `commands' argument definitions for commands
2. `environments': argument definitions for environments
3. `declarations': argument definitions for declarations
Stop token definitions:
4. `blocks': keyed on species: `chapter', `item', etc.
5. `modes': keyed on genus: Alignment, FontStyle, FontSize, Language
Counter definitions:
6. `numbered' primary numbered species (and reset counters)
7. `numbered_like': species numbered with a primary numbered species (e.g. lemma 1, theorem 2, ...)
8. `marker_formats': how numbers are displayed (e.g. by \thechapter)
The Family classes are divided into Genera, and each species is allocated to
a specific Genus class. The Genus names are mostly arbitrary and are passed through to templates.
The Genus names are capitalized which helps to distingish them from species names,
which are taken from the corresponding latex command name and so are mostly lower case.
TODO: some species/genus/argument/character names are hard-wired in the parser
Custom: def, newcommand, newenvironment, ...
Number: arabic, alph, roman, ...
Verbatim: verbatim
We need to prevent these from being overwrriten by a custom definitions file.
Declarations
"Declarations produce neither text nor space but either
affect the way LATEX prints the following text or provide
information for later use. Font size changes are an example
of declarations. \large will cause any text that follows to
appear in a larger type size. Declarations are often used
within a group to limit their scope."
Note: for every declaration Latex always provides an environment
of the same name eg
\begin{bf}hello\end{bf}
works so do we need to specify these in the env definitions too?
1. For mode declarations eg \bf, the parser creates the corresponding
Declaration.FontStyle:bf
node then process tokens until the next mode declaration of
genus 'FontStyle' or another stop tokenis encountered, eg
(0,'sc'), (2, '}') or (0,'end')
Tex-style environments work in the same way, they just define blocks ...
2. TODO: directive declarations change the global parameters
Counters: eg \setcounter{section}
- change the value of the counter on-the-fly
Lengths: eg \setlength{\parskip}{1em}:
- record for write functions (in registry.lengths say)
'''
r'''
% \CharacterTable
% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z
% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z
% Digits \0\1\2\3\4\5\6\7\8\9
% Exclamation \! Double quote \" Hash (number) \#
% Dollar \$ Percent \% Ampersand \&
% Acute accent \' Left paren \( Right paren \)
% Asterisk \* Plus \+ Comma \,
% Minus \- Point \. Solidus \/
% Colon \: Semicolon \; Less than \<
% Equals \= Greater than \> Question mark \?
% Commercial at \@ Left bracket \[ Backslash \\
% Right bracket \] Circumflex \^ Underscore \_
% Grave accent \` Left brace \{ Vertical bar \|
% Right brace \} Tilde \~}
'''
# Active characters
# subclassed from ControlSequence (because they can have arguments e.g. "o in babel german)
# _ and ^ are also active characters (?)
# active_chars = ['~']
# Control character names based on the \CharacterTable
# Classes corresponsing to control characters are named,
# the character itself is stored as an attribute (symbol).
# This is to avoid class names such as '[' or '!' because
# xml does not accept such element names.
character_names = {
'!': 'Exclamation',
'$': 'Dollar',
"'": 'Acute',
'*': 'Asterisk',
'-': 'Minus',
':': 'Colon',
'=': 'Equals',
'@': 'At',
']': 'Right_bracket',
'`': 'Grave',
'}': 'Right_brace',
'"': 'Double_quote',
'%': 'Percent',
'(': 'Left_paren',
'+': 'Plus',
'.': 'Point',
';': 'Semicolon',
'>': 'Greater_than',
'[': 'Left_bracket',
'^': 'Circumflex',
'{': 'Left_brace',
'~': 'Tilde',
'#': 'Hash',
'&': 'Ampersand',
')': 'Right_paren',
',': 'Comma',
'/': 'Solidus',
'<': 'Less_than',
'?': 'Question_mark',
'\\': 'Backslash',
'_': 'Underscore',
'|': 'Vertical_bar',
}
# main definitions directory
defs = {
# ------------------------------
# active characters
'active': {
'default': [
'~', # non-breaking space
],
# 'german': [
# '"{char}', # umulat (see babel)
# ],
},
# ------------------------------
# commands
'commands': {
'Accent': [
'"{char}', # umulat \"o
"'{char}", # acute \'o
'`{char}', # grave \`o
'^{char}', # circumflex \^o
'.{char}', # dot over \.o
'={char}', # commandn \=o
'c{char}', # cedilla
'k{char}', # ogonek
'b{char}', # bar under
'd{char}', # dot under
'r{char}', # ring over
'u{char}', # breve over
'v{char}', # caron over
],
'Bibtex': [
'bibliographystyle{style}',
'bibliography{bibtex_file}',
],
'Box': [
'fbox{contents}',
],
'Caption': [
'caption[*][lst-entry]{caption_text}',
],
'FontStyle': [
'emph{text}',
'textrm{text}',
'textit{text}',
'textbf{text}',
'textsl{text}',
'textsf{text}',
'texttt{text}',
'textsc{text}',
'texttt{text}',
'underline{text}',
],
'Footnote': [
'footnote{text}',
'mpfootnote{text}',
],
'Horizontal': [
' ', # space \<space>
',', # half-space \,
'!', # half-space back \!
'quad',
'qquad',
'noindent',
'mbox{contents}',
'hfill',
],
'Input': [
'input{file}',
'include{file}',
],
'Item': [
'item[marker]',
'bibitem[marker]{key}',
],
'Label': [
'label{key}',
],
'Macro': [
'def{name}[numargs]{def}',
'newcommand{name}[numargs][opt]{def}',
'renewcommand{name}[numargs][opt]{def}',
'providecommand{name}[numargs][opt]{def}',
'newenvironment{name}[numargs]{begdef}{enddef}',
'renewenvironment{name}[numargs]{begdef}{enddef}',
'newtheorem{name}[numbered_like]{caption}[numbered_within]',
],
'Maths': [
'[', # begin displaymath ($$)
']', # end displaymath ($$)
'(', # begin mathmode ($)
')', # end mathmode ($)
],
'Misc': [
'addtocontents{file}{text}',
'addcontentsline{file}{sec_unit}{entry}',
'address{return_address}',
],
'Numeral': [
'arabic{counter}',
'alph{counter}',
'Alph{counter}',
'roman{counter}',
'Roman{counter}',
'fnsymbol{counter}',
],
'Preamble': [
'title[short_title]{title}',
'author{names}',
'date{date}',
'usepackage[options]{name}',
'documentclass[options]{name}',
],
'Section': [
'chapter[*][short-title]{title}',
'section[*][short-title]{title}',
'subsection[*][short-title]{title}',
'subsubsection[*][short-title]{title}',
'paragraph[*][short-title]{title}',
'subparagraph[*][short-title]{title}',
],
'Special': [
'$', # dollar
'&', # ampersand
'%', # percent
'{', # left brace
'}', # right brace
'_', # underscore
],
'Symbol': [
'i', # dotless i
'j', # dotless j
'l', # barred l
'o', # slashed o
'dag', # dagger
'ddag', # double dagger
'S', # section
'P', # paragraph
'copyright', # copyright
'pounds', # sterling
],
'Tabular': [
'\\[*][length]', # line break for tabular
],
'Vertical': [
'par',
'smallskip',
'bigskip',
'vspace[*]{length}',
],
'Vspace': [
'addvspace{length}',
'bigskip',
],
'Xref': [
'ref{key}',
'cite[text]{key_list}',
'pageref{key}',
'eqref{key}',
],
'Camnotes': [ # move to camnotes.json
'includevideo[*][options]{url}',
],
'Cambi': [ # move to cambi.json
'bi', 'cy', 'en', 'fr', 'de',
'eng{text}',
'cym{text}',
'wel{text}',
],
'Graphicx': [ # move to graphicx.json
'includegraphics[*][options]{file}',
'graphicspath{paths}',
'DeclareGraphicsExtensions{ext_list}',
],
'Hyperref': [ # move to hyperref.json (and have separate genera 'xref' for internal and 'href' for external)
'autoref{key}',
'nameref{key}',
'hyperref[key]{text}',
'url{url}',
'href{url}{text}',
],
'Lipsum': [ # move to lipsumdef.json
'lipsum[num]',
],
},
# ------------------------------
# environments
'environments': {
'Document': [
'document',
],
'Tabular': [
'tabular[pos]{cols}',
'tabular*{width}[pos]{cols}',
],
'List': [
'list{label}{spacing}',
'itemize[options]',
'enumerate[options]',
'description[options]',
'trivlist',
'thebibliography{widest_label}',
],
'Float': [
'table[*][options]',
'figure[*][options]',
'video[*][options]', # should be in camnotesdef.json
],
'Picture': [
'picture[options]',
'tikzpicture[options]',
'pspicture[options]',
],
'Displaymath': [
'displaymath',
'equation[*]',
'eqnarray[*]',
'align[*]',
'gather[*]',
],
'Verbatim': [
'verbatim',
'lstlisting',
],
'Align': [
'center',
'flushleft',
'flushright',
],
'Box': [
'abstract[options]',
'quote[options]',
'minipage{width}[options]',
],
'Cambi': [ # move to cambidef.json
'english',
'cymraeg',
'welsh',
]
},
# ------------------------------
# declarations
'declarations': {
'Counters': [
'newcounter{name}[master]',
'addtocounter{counter}{value}',
'setcounter{counter}{value}',
'usecounter{counter}',
'value{counter}',
'counterwithin{name}{master}',
'counterwithout{name}{master}',
],
'Length': [
'addtolength{name}{len}',
'baselineskip',
'baselinestretch',
],
'Alignment': [
'centering', # eqiv. to center env
'raggedleft', # eqiv. to flushright env
'raggedright', # eqiv. to flushleft env
],
'FontStyle': [
'rm', 'rmfamily',
'sf', 'sffamily',
'bf', 'bfseries',
'it', 'itshape',
'sl', 'slshape',
'sc', 'scshape',
'tt', 'ttshape',
'em',
'normalfont',
],
'FontSize': [
'tiny',
'scriptsize',
'footnotesize',
'small',
'normalsize',
'large',
'Large',
'LARGE',
'huge',
'Huge',
],
'Language': [ # move to cambi.json
'bi',
'cy',
'en',
'fr',
'de',
],
},
# ------------------------------
# block_declarations (stop tokens are all cmds of the same genus)
'block_declarations': [
'Alignment',
'FontStyle',
'FontSize',
'Language',
],
# ------------------------------
# stop tokens for block commands
'block_commands': {
"chapter": ["document"],
"section": ["chapter", "document"],
"subsection": ["section", "chapter", "document"],
'subsubsection': ["subsection", "section", "chapter", "document"],
'paragraph': ["subsubsection", "subsection", "section", "chapter", "document"],
'subparagraph': ["paragraph", "subsubsection", "subsection", "section", "chapter", "document"],
"item": ["itemize", "enumerate", "list"],
"bibitem": ["thebibliography"],
},
# ------------------------------
# numbered species and master counters
'numbered': {
'chapter': 'document',
'section': 'chapter',
'subsection': 'section',
'subsubsection': 'subsection',
'paragraph': 'subsubsection',
'subparagraph': 'paragraph',
'page': 'document',
'equation': 'chapter',
'figure': 'chapter',
'table': 'chapter',
'footnote': 'chapter',
'mpfootnote': 'chapter',
'enumi': 'document',
'enumii': 'enumi',
'enumiii': 'enumii',
'enumiv': 'enumiii',
'thebibliography': 'document',
'bibitem': 'thebibliography',
# package-specific (should be moved)
'subfigure': 'figure',
'subtable': 'table',
'video': 'chapter',
},
# 'counters': {
# 'enumerate': 'document',
# 'enumi': 'enumerate',
# 'enumii': 'enumi',
# 'enumiii': 'enumii',
# 'enumiv': 'enumiii',
# 'thebibliography': 'document',
# 'bibitem': 'thebibliography',
# },
# ------------------------------
# shared counters
'numbered_like': {
'eqnarray': 'equation',
'align': 'equation',
},
# ------------------------------
# default numeric labels
# TODO: choose relative to documentclass
'marker_formats': {
# 'chapter': '\\arabic{chapter}.',
'chapter': '',
'section': '\\arabic{section}',
# 'section': '\\Roman{section}',
'subsection': '\\thesection.\\arabic{subsection}',
# 'subsection': '\\thesection.\\alph{subsection}',
'subsubsection': '\\thesubsection.\\arabic{subsubsection}',
'paragraph': '\\thesubsubsection.\\arabic{paragraph}',
'subparagraph': '\\theparagraph.\\arabic{subparagraph}',
'equation': '\\thesection.\\arabic{equation}',
'figure': '\\arabic{figure}',
'subfigure': '\\alph{subfigure}',
'table': '\\arabic{table}',
'subtable': '\\alph{subtable}',
'page': '\\arabic{page}',
'footnote': '\\arabic{footnote}',
'mpfootnote': '\\alph{footnote}',
'enumi': '\\arabic{enumi}.',
'enumii': '(\\alph{enumii})',
'enumiii': '\\roman{enumiii}.',
'enumiv': '\\Alph{enumiv}.',
},
# names (as found in babel files)
'names': {
'videoname': {
'en': 'Video',
'cy': 'Fideo',
},
'prefacename': {
'en': 'Preface',
'cy': 'Rhagair',
},
'refname': {
'en': 'References',
'cy': 'Cyfeiriadau',
},
'abstractname': {
'en': 'Abstract',
'cy': 'Crynodeb',
},
'bibname': {
'en': 'Bibliography',
'cy': 'Llyfryddiaeth',
},
'chaptername': {
'en': 'Chapter',
'cy': 'Pennod',
},
'sectionname': {
'en': 'Section',
'cy': 'Adran',
},
'subsectionname': {
'en': 'Subection',
'cy': 'Isdran',
},
'subsubsectionname': {
'en': 'Subsubection',
'cy': 'Isisadran',
},
'paragraphname': {
'en': 'Paragraph',
'cy': 'Paragraff',
},
'subparagraphname': {
'en': 'Subparagraph',
'cy': 'Isbaragraff',
},
'appendixname': {
'en': 'Appendix',
'cy': 'Atodiad',
},
'contentsname': {
'en': 'Contents',
'cy': 'Cynnwys',
},
'listfigurename': {
'en': 'List of Figures',
'cy': 'Rhestr Ddarluniau',
},
'listtablename': {
'en': 'List of Tables',
'cy': 'Rhestr Dablau',
},
'indexname': {
'en': 'Index',
'cy': 'Mynegai',
},
'figurename': {
'en': 'Figure',
'cy': 'Darlun',
},
'tablename': {
'en': 'Table',
'cy': 'Tabl',
},
'partname': {
'en': 'Part',
'cy': 'Rhan',
},
'enclname': {
'en': 'encl',
'cy': 'amgae\"edig',
},
'ccname': {
'en': 'cc',
'cy': 'cop\\"\\i au',
},
'headtoname': {
'en': 'To',
'cy': 'At',
},
'pagename': {
'en': 'page',
'cy': 'tudalen',
},
'seename': {
'en': 'see',
'cy': 'gweler',
},
'alsoname': {
'en': 'see also',
'cy': 'gweler hefyd',
},
'proofname': {
'en': 'Proof',
'cy': 'Prawf',
},
'glossaryname': {
'en': 'Glossary',
'cy': 'Rhestr termau',
},
},
}
# ==============================
# mathmode (not currently used)
# If we rely on MathJax to render mathmode elements, then provided the reconstruction
# via chars() is faithful we can recover the source and place this in the output file,
# which means we can avoid defining mathmode commands explicitly.
# One day we might want output in MathML format or similar ...
#
# ... so here are some anyway!
# ==============================
mathmode_defs = {
'commands': {
'accents': [
'hat{char}',
'widehat{chars}',
'check{char}',
'tilde{char}',
'widetilde{chars}',
'acute{char}',
'grave{char}',
'dchar{o}',
'ddchar{o}',
'breve{char}',
'bar{char}',
'vec{char}',
],
'dots': [
'cdots', 'ddots', 'ldots', 'vdots',
],
'font': [
'mathrm{char}',
'mathit{char}',
'mathbf{char}',
'mathcal{char}',
'boldmath',
'unboldmath',
],
'misc': [
'displaystyle',
'scriptstyle',
'backslash',
'frac{num}{den}',
'text{text}',
],
'tags': [ # amsmath
'tag{key}',
],
},
'environments': {
'tabular': [
'array[pos]{cols}',
'cases',
],
},
'symbols': {
'greek': [
'alpha',
'beta',
'gamma',
'delta',
'epsilon',
'varepsilon',
'zeta',
'eta',
'theta',
'vartheta',
'iota',
'kappa',
'lambda',
'mu',
'nu',
'xi',
'pi',
'varpi',
'rho',
'varrho',
'sigma',
'varsigma',
'tau',
'upsilon',
'phi',
'varphi',
'chi',
'psi',
'omega',
'Gamma',
'Delta',
'Theta',
'Lambda',
'Xi',
'Pi',
'Sigma',
'Upsilon',
'Phi',
'Psi',
'Omega',
],
'other': [
'aleph',
'hbar',
'imath',
'jmath',
'ell',
'wp',
'Re',
'Im',
'prime',
'nabla',
'surd',
'angle',
'forall',
'exists',
'backslash',
'partial',
'infty',
'triangle',
'Box',
'Diamond',
'flat',
'natural',
'sharp',
'clubsuit',
'diamondsuit',
'heartsuit',
'spadesuit',
],
},
} |
py | b408bc6deefd15312c46d321c6a2d396e0aa7998 | import time
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from urllib.parse import urlparse
from urllib.parse import quote_plus
from .bridge import BaseBridge
class YoutubeBridge(BaseBridge):
"""A bridge that interacts with and scrapes YouTube"""
def __init__(self, driver):
self.driver = driver
def __disable_autoplay(self):
self.driver.execute_script(
"document.getElementById('movie_player').setAutonavState(1);"
)
def __click_link(self, text, timeout=3):
try:
WebDriverWait(self.driver, timeout).until(
EC.visibility_of_element_located((By.PARTIAL_LINK_TEXT, text))
)
self.driver.find_element_by_link_text(text).click()
except Exception:
pass
def __get_player_state(self):
# -1 – unstarted
# 0 – ended
# 1 – playing
# 2 – paused
# 3 – buffering
# 5 – video cued
return self.driver.execute_script(
"return document.getElementById('movie_player').getPlayerState();"
)
def __play_video(self):
self.driver.execute_script(
"""
document.getElementById('movie_player').playVideo();
"""
)
def __get_player_data(self):
return self.driver.execute_script(
"""
return document.getElementById('movie_player').getVideoData()
"""
)
def __get_player_page_data(self):
return self.driver.execute_script(
"""
data = {};
player = document.querySelector("#movie_player");
try { data['is_live'] = player.getVideoData().isLive } catch(err) {};
try { data['is_listed'] = player.getVideoData().isListed } catch(err) {};
try { data['channel_url'] = document.querySelector('.ytd-channel-name a')['href'] } catch(err) {};
try { data['channel_sub_count'] = document.querySelector("#owner-sub-count").innerText } catch(err) {};
try { data['view_count'] = document.querySelector('#info #count .view-count').innerText } catch(err) {};
try { data['view_count'] = document.querySelector('#info #count .view-count').innerText } catch(err) {};
try { data['posted_on'] = document.querySelector('#info #date yt-formatted-string').innerText } catch(err) {};
try { data['like_count'] = document.querySelector("yt-formatted-string[aria-label*= likes").ariaLabel } catch(err) {};
try { data['dislike_count'] = document.querySelector("yt-formatted-string[aria-label*= dislikes").ariaLabel } catch(err) {};
return data;
"""
)
def __get_video_data(self):
data = self.__get_player_data()
player_page_data = self.__get_player_page_data()
video = {
**player_page_data,
"page_type": "video",
"title": data.get("title", None),
"id": data.get("video_id", None),
"channel_name": data.get("author", None),
"is_live": player_page_data.get("is_live", None),
"is_listed": player_page_data.get("is_listed", None),
"recommendations": self.__scrape_sidebar(),
"caption_tracks": self.driver.execute_script(
"""
player = document.querySelector("#movie_player");
player.loadModule("captions");
return player.getOption("captions", "tracklist");
"""
),
}
return video
def __get_page_data(self):
return self.driver.execute_script(
"""
try {
return window.getPageData().data.response
} catch(err) {
return ytInitialData
}
"""
)
def __scrape_sidebar(self):
recs = self.driver.execute_script(
"""
return [...document.querySelectorAll("#items.ytd-watch-next-secondary-results-renderer > *")].map((d, i) => {
data = {};
try { data['item_type'] = d.tagName; } catch(err) {};
try { data['position'] = i + 1; } catch(err) {};
try { data['title'] = d.querySelector("h3").innerText } catch(err) {};
try { data['url'] = d.querySelector("a.yt-simple-endpoint")['href'] } catch(err) {};
try { data['channel_name'] = d.querySelector(".ytd-channel-name").innerText } catch(err) {};
try { data['metadata'] = d.querySelector("#metadata-line").innerText } catch(err) {};
try { data['duration_text'] = d.querySelector("span.ytd-thumbnail-overlay-time-status-renderer").innerText.trim() } catch(err) {};
try { data['thumbnail_url'] = d.querySelector("img")['src'] } catch(err) {};
return data;
})
""" # noqa: E501
)
recs = [
rec for rec in recs if rec["item_type"] != "YTD-CONTINUATION-ITEM-RENDERER"
]
return recs
def __scrape_search_results(self):
recs = self.driver.execute_script(
"""
return [...document.querySelectorAll("#contents.ytd-item-section-renderer > *")].map((d, i) => {
data = {};
try { data['item_type'] = d.tagName; } catch(err) {};
try { data['position'] = i + 1; } catch(err) {};
try { data['thumbnail_url'] = d.querySelector("img")['src'] } catch(err) {};
try { data['title'] = d.querySelector("h3").innerText } catch(err) {};
try { data['url'] = d.querySelector("a.yt-simple-endpoint")['href'] } catch(err) {};
try { data['channel_name'] = d.querySelector(".ytd-channel-name a").innerText } catch(err) {};
try { data['channel_url'] = d.querySelector(".ytd-channel-name a")['href'] } catch(err) {};
try { data['metadata'] = d.querySelector("#metadata-line").innerText } catch(err) {};
try { data['metadata'] = d.querySelector(".movie-metadata-list").innerText } catch(err) {};
try { data['duration_text'] = d.querySelector("span.ytd-thumbnail-overlay-time-status-renderer").innerText } catch(err) {};
try { data['description'] = d.querySelector("#description-text ").innerText } catch(err) {};
return data;
})
""" # noqa: E501
)
recs = [
rec
for rec in recs
if rec["item_type"] != "YTD-HORIZONTAL-CARD-LIST-RENDERER"
]
return recs
def __scrape_homepage(self):
return self.driver.execute_script(
"""
return [...document.querySelectorAll("#contents.ytd-rich-grid-renderer > ytd-rich-item-renderer")].map((d, i) => {
data = {};
try { data['item_type'] = d.tagName; } catch(err) {};
try { data['position'] = i + 1; } catch(err) {};
try { data['thumbnail_url'] = d.querySelector("img")['src'] } catch(err) {};
try { data['title'] = d.querySelector("h3").innerText } catch(err) {};
try { data['url'] = d.querySelector("a.yt-simple-endpoint")['href'] } catch(err) {};
try { data['channel_name'] = d.querySelector(".ytd-channel-name a").innerText } catch(err) {};
try { data['channel_url'] = d.querySelector(".ytd-channel-name a")['href'] } catch(err) {};
try { data['metadata'] = d.querySelector("#metadata-line").innerText } catch(err) {};
try { data['metadata'] = d.querySelector(".movie-metadata-list").innerText } catch(err) {};
try { data['duration_text'] = d.querySelector("span.ytd-thumbnail-overlay-time-status-renderer").innerText } catch(err) {};
try { data['description'] = d.querySelector("#description-text ").innerText } catch(err) {};
return data;
})
""" # noqa: E501
)
def __attempt_ad_skip(self, delay=6):
time.sleep(delay)
self.driver.find_element_by_class_name("ytp-ad-skip-button-text").click()
def __wait_for_video_completion(self, skipahead=True):
try:
# Confirm it's a video page
self.driver.find_element_by_css_selector("#movie_player")
# Wait for the player to be ready
while True:
try:
# This will throw an exception if the player
# is not initialized
self.__get_player_state()
break
except:
time.sleep(1)
except:
pass
try:
# Wait for ad overlay to show up, then click the
# skip button.
WebDriverWait(self.driver, 3).until(
EC.visibility_of_element_located((By.CLASS_NAME, "ad-showing"))
)
self.__attempt_ad_skip()
except:
# No ad
pass
if self.__get_player_state() != 1:
self.__play_video()
if skipahead:
time.sleep(1)
self.driver.execute_script(
"""
let player = document.getElementById('movie_player');
player.seekTo(player.getDuration() - 2);
player.playVideo()
"""
)
# In case we do get stuck behind an ad, we'll wait for a long time
length = self.driver.execute_script(
"return document.getElementById('movie_player').getDuration()"
)
is_live = self.driver.execute_script(
"return document.getElementById('movie_player').getVideoData().isLive"
)
if length == 0 or is_live:
# It's live, just exit
time.sleep(1)
return
elif 'list' in self.driver.current_url:
# It's a playlist, just move on because
# it won't autostop
return
else:
try:
# Wait until it's done, or up to XX seconds
# max_wait = length + 20
max_wait = 10
WebDriverWait(self.driver, max_wait).until(
lambda s: self.__get_player_state() == 0
)
self.__disable_autoplay()
except:
return
def __get_page_type(self):
# TODO remove and replace
try:
key = list(self.__get_page_data()["contents"].keys())[0]
types = {
"twoColumnWatchNextResults": "video",
"twoColumnSearchResultsRenderer": "search_results",
"twoColumnBrowseResultsRenderer": "homepage",
}
return types[key]
except:
return "unknown"
def get_data(self):
page_type = self.__get_page_type()
if page_type == "unknown":
return { "page_type": page_type }
elif page_type == "video":
return self.__get_video_data()
elif page_type == "search_results":
return {
"page_type": page_type,
"term": self.driver.find_element_by_css_selector(
"input#search"
).get_attribute("value"),
"recommendations": self.__scrape_search_results(),
}
elif page_type == "homepage":
return {"page_type": page_type, "recommendations": self.__scrape_homepage()}
def run(self, url):
parsed = urlparse(url)
# Remove popups that might get in the way of clicking
try:
self.driver.execute_script(
"""
document.querySelectorAll('.ytd-popup-container').forEach(d=> d.remove());
""" # noqa: E501
)
except Exception:
pass
# Execute the command
if parsed.scheme in ["http", "https"]:
self.driver.get(url)
if self.__get_page_type() == "video":
self.__wait_for_video_completion()
elif parsed.path == "homepage":
self.driver.get("https://www.youtube.com/")
elif parsed.path == "search":
self.driver.get(
f"https://www.youtube.com/results?search_query={quote_plus(parsed.query)}" # noqa: E501
)
elif parsed.path == "next_up":
self.driver.find_element_by_css_selector(
"ytd-compact-autoplay-renderer h3"
).click()
self.__wait_for_video_completion()
elif parsed.path == "like":
self.driver.find_element_by_xpath(
'//button[starts-with(@aria-label, "Like")]'
).click()
elif parsed.path == "dislike":
self.driver.find_element_by_xpath(
'//button[starts-with(@aria-label, "Dislike")]'
).click()
elif parsed.path == "subscribe":
self.driver.find_element_by_xpath(
'//button[starts-with(@aria-label, "Subscribe to")]'
).click()
elif parsed.path == "unsubscribe":
self.driver.find_element_by_xpath(
'//button[starts-with(@aria-label, "Unsubscribe from")]'
).click()
time.sleep(0.25)
self.driver.find_element_by_xpath(
'//button[starts-with(@aria-label, "Unsubscribe")]'
).click()
elif parsed.path == "sign_in":
try:
self.driver.find_element_by_xpath('//*[@aria-label="Sign in"]').click()
except Exception:
pass
print("Please sign in manually, I'll wait up to 10 minutes")
WebDriverWait(self.driver, 10 * 60).until(
EC.visibility_of_element_located((By.ID, "avatar-btn"))
)
else:
raise Exception(f"unknown URL {url}")
return self.get_data()
|
py | b408bc7a91a3cc57610394378e456254f0af6d3c | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""BERT finetuning runner with sentence piece tokenization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from npu_bridge.npu_init import *
import collections
import csv
import os
import six
from six.moves import zip
import tensorflow as tf
import modeling_google as modeling
import optimization_google as optimization
import tokenization_google as tokenization
flags = tf.flags
FLAGS = flags.FLAGS
## Required parameters
flags.DEFINE_string(
"data_dir", None,
"The input data dir. Should contain the .tsv files (or other data files) "
"for the task.")
flags.DEFINE_string(
"albert_config_file", None,
"The config json file corresponding to the pre-trained ALBERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("task_name", None, "The name of the task to train.")
flags.DEFINE_string(
"vocab_file", None,
"The vocabulary file that the ALBERT model was trained on.")
flags.DEFINE_string("spm_model_file", None,
"The model file for sentence piece tokenization.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
## Other parameters
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained ALBERT model).")
flags.DEFINE_bool(
"use_pooled_output", True, "Whether to use the CLS token outputs")
flags.DEFINE_bool(
"do_lower_case", True,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.")
flags.DEFINE_bool(
"do_predict", False,
"Whether to run the model in inference mode on the test set.")
flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.")
flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.")
flags.DEFINE_integer("predict_batch_size", 8, "Total batch size for predict.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class PaddingInputExample(object):
"""Fake example so the num input examples is a multiple of the batch size.
When running eval/predict on the TPU, we need to pad the number of examples
to be a multiple of the batch size, because the TPU requires a fixed batch
size. The alternative is to drop the last batch, which is bad because it means
the entire output data won't be generated.
We use this class instead of `None` because treating `None` as padding
battches could cause silent errors.
"""
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self,
input_ids,
input_mask,
segment_ids,
label_id,
is_real_example=True):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
self.is_real_example = is_real_example
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
# Note(mingdachen): We will rely on this guid for GLUE submission.
guid = tokenization.preprocess_text(line[0], lower=FLAGS.do_lower_case)
text_a = tokenization.preprocess_text(line[8], lower=FLAGS.do_lower_case)
text_b = tokenization.preprocess_text(line[9], lower=FLAGS.do_lower_case)
if set_type == "test":
label = "contradiction"
else:
label = tokenization.preprocess_text(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class LCQMCPairClassificationProcessor(DataProcessor):
"""Processor for the internal data set. sentence pair classification"""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.txt")), "train")
# dev_0827.tsv
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.txt")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.txt")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
print("length of lines:",len(lines))
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
try:
label = tokenization.convert_to_unicode(line[2])
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
except Exception:
print('###error.i:', i, line)
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.preprocess_text(line[3], lower=FLAGS.do_lower_case)
text_b = tokenization.preprocess_text(line[4], lower=FLAGS.do_lower_case)
if set_type == "test":
guid = line[0]
label = "0"
else:
label = tokenization.preprocess_text(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
guid = line[0]
text_a = tokenization.preprocess_text(
line[1], lower=FLAGS.do_lower_case)
label = "0"
else:
text_a = tokenization.preprocess_text(
line[3], lower=FLAGS.do_lower_case)
label = tokenization.preprocess_text(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
def convert_single_example(ex_index, example, label_list, max_seq_length,
tokenizer):
"""Converts a single `InputExample` into a single `InputFeatures`."""
if isinstance(example, PaddingInputExample):
return InputFeatures(
input_ids=[0] * max_seq_length,
input_mask=[0] * max_seq_length,
segment_ids=[0] * max_seq_length,
label_id=0,
is_real_example=False)
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in ALBERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
tf.logging.info("*** Example ***")
tf.logging.info("guid: %s" % (example.guid))
tf.logging.info("tokens: %s" % " ".join(
[tokenization.printable_text(x) for x in tokens]))
tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
tf.logging.info("label: %s (id = %d)" % (example.label, label_id))
feature = InputFeatures(
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id,
is_real_example=True)
return feature
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, tokenizer, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
writer = tf.python_io.TFRecordWriter(output_file)
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_int_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
features["label_ids"] = create_int_feature([feature.label_id])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def file_based_input_fn_builder(input_file, seq_length, is_training,
drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
name_to_features = {
"input_ids": tf.FixedLenFeature([seq_length], tf.int64),
"input_mask": tf.FixedLenFeature([seq_length], tf.int64),
"segment_ids": tf.FixedLenFeature([seq_length], tf.int64),
"label_ids": tf.FixedLenFeature([], tf.int64),
"is_real_example": tf.FixedLenFeature([], tf.int64),
}
def _decode_record(record, name_to_features):
"""Decodes a record to a TensorFlow example."""
example = tf.parse_single_example(record, name_to_features)
# tf.Example only supports tf.int64, but the TPU only supports tf.int32.
# So cast all int64 to int32.
for name in list(example.keys()):
t = example[name]
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
return example
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
# For training, we want a lot of parallel reading and shuffling.
# For eval, we want no shuffling and parallel reading doesn't matter.
d = tf.data.TFRecordDataset(input_file)
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.apply(
tf.contrib.data.map_and_batch(
lambda record: _decode_record(record, name_to_features),
batch_size=batch_size,
drop_remainder=True))
return d
return input_fn
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def create_model(albert_config, is_training, input_ids, input_mask, segment_ids,
labels, num_labels, use_one_hot_embeddings):
"""Creates a classification model."""
model = modeling.AlbertModel(
config=albert_config,
is_training=is_training,
input_ids=input_ids,
input_mask=input_mask,
token_type_ids=segment_ids,
use_one_hot_embeddings=use_one_hot_embeddings)
# In the demo, we are doing a simple classification task on the entire
# segment.
#
# If you want to use the token-level output, use model.get_sequence_output()
# instead.
if FLAGS.use_pooled_output:
tf.logging.info("using pooled output")
output_layer = model.get_pooled_output()
else:
tf.logging.info("using meaned output")
output_layer = tf.reduce_mean(model.get_sequence_output(), axis=1)
hidden_size = output_layer.shape[-1].value
output_weights = tf.get_variable(
"output_weights", [num_labels, hidden_size],
initializer=tf.truncated_normal_initializer(stddev=0.02))
output_bias = tf.get_variable(
"output_bias", [num_labels], initializer=tf.zeros_initializer())
with tf.variable_scope("loss"):
if is_training:
# I.e., 0.1 dropout
output_layer = npu_ops.dropout(output_layer, keep_prob=0.9)
logits = tf.matmul(output_layer, output_weights, transpose_b=True)
logits = tf.nn.bias_add(logits, output_bias)
predictions = tf.argmax(logits, axis=-1, output_type=tf.int32)
probabilities = tf.nn.softmax(logits, axis=-1)
log_probs = tf.nn.log_softmax(logits, axis=-1)
one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32)
per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1)
loss = tf.reduce_mean(per_example_loss)
return (loss, per_example_loss, probabilities, predictions)
def model_fn_builder(albert_config, num_labels, init_checkpoint, learning_rate,
num_train_steps, num_warmup_steps, use_tpu,
use_one_hot_embeddings):
"""Returns `model_fn` closure for TPUEstimator."""
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
tf.logging.info("*** Features ***")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
input_ids = features["input_ids"]
input_mask = features["input_mask"]
segment_ids = features["segment_ids"]
label_ids = features["label_ids"]
is_real_example = None
if "is_real_example" in features:
is_real_example = tf.cast(features["is_real_example"], dtype=tf.float32)
else:
is_real_example = tf.ones(tf.shape(label_ids), dtype=tf.float32)
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
(total_loss, per_example_loss, probabilities, predictions) = \
create_model(albert_config, is_training, input_ids, input_mask,
segment_ids, label_ids, num_labels, use_one_hot_embeddings)
tvars = tf.trainable_variables()
initialized_variable_names = {}
scaffold_fn = None
if init_checkpoint:
(assignment_map, initialized_variable_names
) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)
if use_tpu:
def tpu_scaffold():
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
tf.train.init_from_checkpoint(init_checkpoint, assignment_map)
tf.logging.info("**** Trainable Variables ****")
for var in tvars:
init_string = ""
if var.name in initialized_variable_names:
init_string = ", *INIT_FROM_CKPT*"
tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape,
init_string)
output_spec = None
if mode == tf.estimator.ModeKeys.TRAIN:
train_op = optimization.create_optimizer(
total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.EVAL:
def metric_fn(per_example_loss, label_ids, predictions, is_real_example):
accuracy = tf.metrics.accuracy(
labels=label_ids, predictions=predictions, weights=is_real_example)
loss = tf.metrics.mean(values=per_example_loss, weights=is_real_example)
return {
"eval_accuracy": accuracy,
"eval_loss": loss,
}
eval_metrics = (metric_fn,
[per_example_loss, label_ids,
predictions, is_real_example])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
else:
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
predictions={"probabilities": probabilities,
"predictions": predictions},
scaffold_fn=scaffold_fn)
return output_spec
return model_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def input_fn_builder(features, seq_length, is_training, drop_remainder):
"""Creates an `input_fn` closure to be passed to TPUEstimator."""
all_input_ids = []
all_input_mask = []
all_segment_ids = []
all_label_ids = []
for feature in features:
all_input_ids.append(feature.input_ids)
all_input_mask.append(feature.input_mask)
all_segment_ids.append(feature.segment_ids)
all_label_ids.append(feature.label_id)
def input_fn(params):
"""The actual input function."""
batch_size = params["batch_size"]
num_examples = len(features)
# This is for demo purposes and does NOT scale to large data sets. We do
# not use Dataset.from_generator() because that uses tf.py_func which is
# not TPU compatible. The right way to load data is with TFRecordReader.
d = tf.data.Dataset.from_tensor_slices({
"input_ids":
tf.constant(
all_input_ids, shape=[num_examples, seq_length],
dtype=tf.int32),
"input_mask":
tf.constant(
all_input_mask,
shape=[num_examples, seq_length],
dtype=tf.int32),
"segment_ids":
tf.constant(
all_segment_ids,
shape=[num_examples, seq_length],
dtype=tf.int32),
"label_ids":
tf.constant(all_label_ids, shape=[num_examples], dtype=tf.int32),
})
if is_training:
d = d.repeat()
d = d.shuffle(buffer_size=100)
d = d.batch(batch_size=batch_size, drop_remainder=True)
return d
return input_fn
# This function is not used by this file but is still used by the Colab and
# people who depend on it.
def convert_examples_to_features(examples, label_list, max_seq_length,
tokenizer):
"""Convert a set of `InputExample`s to a list of `InputFeatures`."""
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
tf.logging.info("Writing example %d of %d" % (ex_index, len(examples)))
feature = convert_single_example(ex_index, example, label_list,
max_seq_length, tokenizer)
features.append(feature)
return features
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"xnli": XnliProcessor,
"lcqmc_pair": LCQMCPairClassificationProcessor
}
tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,
FLAGS.init_checkpoint)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError(
"At least one of `do_train`, `do_eval` or `do_predict' must be True.")
albert_config = modeling.AlbertConfig.from_json_file(FLAGS.albert_config_file)
if FLAGS.max_seq_length > albert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the ALBERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, albert_config.max_position_embeddings))
tf.gfile.MakeDirs(FLAGS.output_dir)
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = tokenization.FullTokenizer(
vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case,
spm_model_file=FLAGS.spm_model_file)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
train_examples = None
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
train_examples = processor.get_train_examples(FLAGS.data_dir)
num_train_steps = int(
len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = model_fn_builder(
albert_config=albert_config,
num_labels=len(label_list),
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this will fall back to normal Estimator on CPU
# or GPU.
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=False,
model_fn=model_fn,
config=npu_run_config_init(run_config=run_config),
train_batch_size=FLAGS.train_batch_size,
eval_batch_size=FLAGS.eval_batch_size,
predict_batch_size=FLAGS.predict_batch_size, eval_on_tpu=False, export_to_tpu=False)
if FLAGS.do_train:
train_file = os.path.join(FLAGS.output_dir, "train.tf_record")
file_based_convert_examples_to_features(
train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file)
tf.logging.info("***** Running training *****")
tf.logging.info(" Num examples = %d", len(train_examples))
tf.logging.info(" Batch size = %d", FLAGS.train_batch_size)
tf.logging.info(" Num steps = %d", num_train_steps)
train_input_fn = file_based_input_fn_builder(
input_file=train_file,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps, hooks=npu_hooks_append())
if FLAGS.do_eval:
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
num_actual_eval_examples = len(eval_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(PaddingInputExample())
eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record")
file_based_convert_examples_to_features(
eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file)
tf.logging.info("***** Running evaluation *****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(eval_examples), num_actual_eval_examples,
len(eval_examples) - num_actual_eval_examples)
tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size)
# This tells the estimator to run through the entire set.
eval_steps = None
# However, if running eval on the TPU, you will need to specify the
# number of steps.
if FLAGS.use_tpu:
assert len(eval_examples) % FLAGS.eval_batch_size == 0
eval_steps = int(len(eval_examples) // FLAGS.eval_batch_size)
eval_drop_remainder = True if FLAGS.use_tpu else False
eval_input_fn = file_based_input_fn_builder(
input_file=eval_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=eval_drop_remainder)
#######################################################################################################################
# evaluate all checkpoints; you can use the checkpoint with the best dev accuarcy
steps_and_files = []
filenames = tf.gfile.ListDirectory(FLAGS.output_dir)
for filename in filenames:
if filename.endswith(".index"):
ckpt_name = filename[:-6]
cur_filename = os.path.join(FLAGS.output_dir, ckpt_name)
global_step = int(cur_filename.split("-")[-1])
tf.logging.info("Add {} to eval list.".format(cur_filename))
steps_and_files.append([global_step, cur_filename])
steps_and_files = sorted(steps_and_files, key=lambda x: x[0])
output_eval_file = os.path.join(FLAGS.data_dir, "eval_results_albert_zh.txt")
print("output_eval_file:",output_eval_file)
tf.logging.info("output_eval_file:"+output_eval_file)
with tf.gfile.GFile(output_eval_file, "w") as writer:
for global_step, filename in sorted(steps_and_files, key=lambda x: x[0]):
result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps, checkpoint_path=filename)
tf.logging.info("***** Eval results %s *****" % (filename))
writer.write("***** Eval results %s *****\n" % (filename))
for key in sorted(result.keys()):
tf.logging.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
#######################################################################################################################
# result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps)
# output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt")
# with tf.gfile.GFile(output_eval_file, "w") as writer:
# tf.logging.info("***** Eval results *****")
# for key in sorted(result.keys()):
# tf.logging.info(" %s = %s", key, str(result[key]))
# writer.write("%s = %s\n" % (key, str(result[key])))
if FLAGS.do_predict:
predict_examples = processor.get_test_examples(FLAGS.data_dir)
num_actual_predict_examples = len(predict_examples)
if FLAGS.use_tpu:
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on.
while len(predict_examples) % FLAGS.predict_batch_size != 0:
predict_examples.append(PaddingInputExample())
predict_file = os.path.join(FLAGS.output_dir, "predict.tf_record")
file_based_convert_examples_to_features(predict_examples, label_list,
FLAGS.max_seq_length, tokenizer,
predict_file)
tf.logging.info("***** Running prediction*****")
tf.logging.info(" Num examples = %d (%d actual, %d padding)",
len(predict_examples), num_actual_predict_examples,
len(predict_examples) - num_actual_predict_examples)
tf.logging.info(" Batch size = %d", FLAGS.predict_batch_size)
predict_drop_remainder = True if FLAGS.use_tpu else False
predict_input_fn = file_based_input_fn_builder(
input_file=predict_file,
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=predict_drop_remainder)
result = estimator.predict(input_fn=predict_input_fn)
output_predict_file = os.path.join(FLAGS.output_dir, "test_results.tsv")
output_submit_file = os.path.join(FLAGS.output_dir, "submit_results.tsv")
with tf.gfile.GFile(output_predict_file, "w") as pred_writer,\
tf.gfile.GFile(output_submit_file, "w") as sub_writer:
num_written_lines = 0
tf.logging.info("***** Predict results *****")
for (i, (example, prediction)) in\
enumerate(zip(predict_examples, result)):
probabilities = prediction["probabilities"]
if i >= num_actual_predict_examples:
break
output_line = "\t".join(
str(class_probability)
for class_probability in probabilities) + "\n"
pred_writer.write(output_line)
actual_label = label_list[int(prediction["predictions"])]
sub_writer.write(
six.ensure_str(example.guid) + "\t" + actual_label + "\n")
num_written_lines += 1
assert num_written_lines == num_actual_predict_examples
if __name__ == "__main__":
flags.mark_flag_as_required("data_dir")
flags.mark_flag_as_required("task_name")
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("albert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run() |
py | b408bd017d004036e0b21aa1042676ab3db7fe0b | import mmcv
import numpy as np
import os
from tools.data_converter.s3dis_data_utils import S3DISData, S3DISSegData
from tools.data_converter.scannet_data_utils import ScanNetData, ScanNetSegData
from tools.data_converter.sunrgbd_data_utils import SUNRGBDData
def create_indoor_info_file(data_path,
pkl_prefix='sunrgbd',
save_path=None,
use_v1=False,
workers=4):
"""Create indoor information file.
Get information of the raw data and save it to the pkl file.
Args:
data_path (str): Path of the data.
pkl_prefix (str): Prefix of the pkl to be saved. Default: 'sunrgbd'.
save_path (str): Path of the pkl to be saved. Default: None.
use_v1 (bool): Whether to use v1. Default: False.
workers (int): Number of threads to be used. Default: 4.
"""
assert os.path.exists(data_path)
assert pkl_prefix in ['sunrgbd', 'scannet', 's3dis'], \
f'unsupported indoor dataset {pkl_prefix}'
save_path = data_path if save_path is None else save_path
assert os.path.exists(save_path)
# generate infos for both detection and segmentation task
if pkl_prefix in ['sunrgbd', 'scannet']:
train_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_train.pkl')
val_filename = os.path.join(save_path, f'{pkl_prefix}_infos_val.pkl')
if pkl_prefix == 'sunrgbd':
# SUN RGB-D has a train-val split
train_dataset = SUNRGBDData(
root_path=data_path, split='train', use_v1=use_v1)
val_dataset = SUNRGBDData(
root_path=data_path, split='val', use_v1=use_v1)
else:
# ScanNet has a train-val-test split
train_dataset = ScanNetData(root_path=data_path, split='train')
val_dataset = ScanNetData(root_path=data_path, split='val')
test_dataset = ScanNetData(root_path=data_path, split='test')
test_filename = os.path.join(save_path,
f'{pkl_prefix}_infos_test.pkl')
infos_train = train_dataset.get_infos(
num_workers=workers, has_label=True)
mmcv.dump(infos_train, train_filename, 'pkl')
print(f'{pkl_prefix} info train file is saved to {train_filename}')
infos_val = val_dataset.get_infos(num_workers=workers, has_label=True)
mmcv.dump(infos_val, val_filename, 'pkl')
print(f'{pkl_prefix} info val file is saved to {val_filename}')
if pkl_prefix == 'scannet':
infos_test = test_dataset.get_infos(
num_workers=workers, has_label=False)
mmcv.dump(infos_test, test_filename, 'pkl')
print(f'{pkl_prefix} info test file is saved to {test_filename}')
# generate infos for the semantic segmentation task
# e.g. re-sampled scene indexes and label weights
# scene indexes are used to re-sample rooms with different number of points
# label weights are used to balance classes with different number of points
if pkl_prefix == 'scannet':
# label weight computation function is adopted from
# https://github.com/charlesq34/pointnet2/blob/master/scannet/scannet_dataset.py#L24
train_dataset = ScanNetSegData(
data_root=data_path,
ann_file=train_filename,
split='train',
num_points=8192,
label_weight_func=lambda x: 1.0 / np.log(1.2 + x))
# TODO: do we need to generate on val set?
val_dataset = ScanNetSegData(
data_root=data_path,
ann_file=val_filename,
split='val',
num_points=8192,
label_weight_func=lambda x: 1.0 / np.log(1.2 + x))
# no need to generate for test set
train_dataset.get_seg_infos()
val_dataset.get_seg_infos()
elif pkl_prefix == 's3dis':
# S3DIS doesn't have a fixed train-val split
# it has 6 areas instead, so we generate info file for each of them
# in training, we will use dataset to wrap different areas
splits = [f'Area_{i}' for i in [1, 2, 3, 4, 5, 6]]
for split in splits:
dataset = S3DISData(root_path=data_path, split=split)
info = dataset.get_infos(num_workers=workers, has_label=True)
filename = os.path.join(save_path,
f'{pkl_prefix}_infos_{split}.pkl')
mmcv.dump(info, filename, 'pkl')
print(f'{pkl_prefix} info {split} file is saved to {filename}')
seg_dataset = S3DISSegData(
data_root=data_path,
ann_file=filename,
split=split,
num_points=4096,
label_weight_func=lambda x: 1.0 / np.log(1.2 + x))
seg_dataset.get_seg_infos()
|
py | b408bd8df6c4c92285d8f7d06a00a67ed574f2eb | # Copyright (c) Microsoft Corporation and contributors.
# Licensed under the MIT License.
import warnings
from collections.abc import Iterable
from functools import reduce
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
import networkx as nx
import numpy as np
import pandas as pd
import scipy.sparse
from scipy.optimize import linear_sum_assignment
from scipy.sparse import csgraph, csr_matrix, diags, isspmatrix_csr
from scipy.sparse.csgraph import connected_components
from sklearn.metrics import confusion_matrix
from sklearn.utils import check_array, check_consistent_length, column_or_1d
from sklearn.utils.multiclass import type_of_target, unique_labels
def import_graph(graph, copy=True):
"""
A function for reading a graph and returning a shared data type.
Parameters
----------
graph: object
Either array-like, shape (n_vertices, n_vertices) numpy array,
a scipy.sparse.csr_matrix, or an object of type networkx.Graph.
copy: bool, (default=True)
Whether to return a copied version of array. If False and input is np.array,
the output returns the original input.
Returns
-------
out: array-like, shape (n_vertices, n_vertices)
A graph.
See Also
--------
networkx.Graph, numpy.array
"""
if isinstance(graph, (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)):
out = nx.to_numpy_array(graph, nodelist=sorted(graph.nodes), dtype=np.float)
elif isinstance(graph, (np.ndarray, np.memmap, csr_matrix)):
shape = graph.shape
if len(shape) > 3:
msg = "Input tensor must have at most 3 dimensions, not {}.".format(
len(shape)
)
raise ValueError(msg)
elif len(shape) == 3:
if shape[1] != shape[2]:
msg = "Input tensor must have same number of vertices."
raise ValueError(msg)
min_features = shape[1]
min_samples = 2
else:
min_features = np.max(shape)
min_samples = min_features
out = check_array(
graph,
dtype=[np.float64, np.float32],
accept_sparse=True,
ensure_2d=True,
allow_nd=True, # For omni tensor input
ensure_min_features=min_features,
ensure_min_samples=min_samples,
copy=copy,
)
else:
msg = "Input must be networkx.Graph, np.array, or scipy.sparse.csr_matrix,\
not {}.".format(
type(graph)
)
raise TypeError(msg)
return out
def import_edgelist(
path, extension="edgelist", delimiter=None, nodetype=int, return_vertices=False
):
"""
Function for reading a single or multiple edgelists. When importing multiple
edgelists, the union of vertices from all graphs is computed so that each output
graph have matched vertex set. The order of nodes are sorted by node values.
Parameters
----------
path : str, Path object, or iterable
If ``path`` is a directory, then the importing order will be sorted in
alphabetical order.
extension : str, optional
If ``path`` is a directory, then the function will convert all files
with matching extension.
delimiter : str or None, default=None, optional
Delimiter of edgelist. If None, the delimiter is whitespace.
nodetype : int (default), float, str, Python type, optional
Convert node data from strings to specified type.
return_vertices : bool, default=False, optional
Returns the union of all individual edgelists.
Returns
-------
out : list of array-like, or array-like, shape (n_vertices, n_vertices)
If ``path`` is a directory, a list of arrays is returned. If ``path`` is a file,
an array is returned.
vertices : array-like, shape (n_vertices, )
If ``return_vertices``` is True, then returns an array of all vertices that were
included in the output graphs.
"""
# p = Path(path)
if not isinstance(path, (str, Path, Iterable)):
msg = "path must be a string or Iterable, not {}".format(type(path))
raise TypeError(msg)
# get a list of files to import
if isinstance(path, (str, Path)):
p = Path(path)
if p.is_dir():
files = sorted(p.glob("*" + extension))
elif p.is_file():
files = [p]
else:
raise ValueError("No graphs founds to import.")
else: # path is an iterable
files = [Path(f) for f in path]
if len(files) == 0:
msg = "No files found with '{}' extension found.".format(extension)
raise ValueError(msg)
graphs = [
nx.read_weighted_edgelist(f, nodetype=nodetype, delimiter=delimiter)
for f in files
]
if all(len(G.nodes) == 0 for G in graphs):
msg = (
"All graphs have 0 vertices. Please double check if proper "
+ "'delimiter' is given."
)
warnings.warn(msg, UserWarning)
# Compute union of all vertices
vertices = np.sort(reduce(np.union1d, [G.nodes for G in graphs]))
for g in graphs:
g.add_nodes_from(vertices)
out = [nx.to_numpy_array(G, nodelist=vertices, dtype=np.float) for G in graphs]
# only return adjacency matrix if input is only 1 graph
if len(out) == 1:
out = out[0]
if return_vertices:
return out, vertices
else:
return out
def is_symmetric(X):
return abs(X - X.T).sum() == 0
def is_loopless(X):
return not np.any(np.diag(X) != 0)
def is_unweighted(
graph: Union[
np.ndarray,
scipy.sparse.csr_matrix,
nx.Graph,
nx.DiGraph,
nx.MultiGraph,
nx.MultiDiGraph,
],
weight_attribute: Any = "weight",
):
"""
Attempts to determine if the provided graph is weighted.
Parameters
----------
graph : Union[np.ndarray, scipy.sparse.csr_matrix, nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDigraph]
The graph to test for weightedness. If a networkx graph, we can just ask it directly by querying the weight
attribute specified on every edge. It's possible an individual edge can be weighted but the full graph is not.
If an adjacency matrix defined by a numpy.ndarray or scipy.sparse.csr_matrix, we check every value; if
they are only 0 and 1, we claim the graph is unweighted.
weight_attribute : Any
Default is ``weight``. Only used for networkx, and used on the edge data dictionary as a key to look up the
weight.
Returns
-------
bool
True if unweighted, False if weighted
Raises
------
TypeError
If the provided graph is not a numpy.ndarray, scipy.sparse.csr_matrix, or nx.Graph
"""
if isinstance(graph, np.ndarray):
return ((graph == 0) | (graph == 1)).all()
elif isinstance(graph, csr_matrix):
# brute force. if anyone has a better way, please PR
rows, columns = graph.nonzero()
for i in range(0, len(rows)):
if graph[rows[i], columns[i]] != 1 and graph[rows[i], columns[i]] != 0:
return False
return True
elif isinstance(graph, nx.Graph):
return nx.is_weighted(graph, weight=weight_attribute)
else:
raise TypeError(
"This function only works on numpy.ndarray or scipy.sparse.csr_matrix instances"
)
def is_almost_symmetric(X, atol=1e-15):
if (X.ndim != 2) or (X.shape[0] != X.shape[1]):
return False
if isinstance(X, (np.ndarray, scipy.sparse.spmatrix)):
return abs(X - X.T).max() <= atol
else:
raise TypeError("input a correct matrix type.")
def symmetrize(graph, method="avg"):
"""
A function for forcing symmetry upon a graph.
Parameters
----------
graph: object
Either array-like, (n_vertices, n_vertices) numpy matrix,
or an object of type networkx.Graph.
method: {'avg' (default), 'triu', 'tril',}, optional
An option indicating which half of the edges to
retain when symmetrizing.
- 'avg'
Retain the average weight between the upper and lower
right triangle, of the adjacency matrix.
- 'triu'
Retain the upper right triangle.
- 'tril'
Retain the lower left triangle.
Returns
-------
graph: array-like, shape (n_vertices, n_vertices)
Graph with asymmetries removed.
Examples
--------
>>> a = np.array([
... [0, 1, 1],
... [0, 0, 1],
... [0, 0, 1]])
>>> symmetrize(a, method="triu")
array([[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
"""
# graph = import_graph(graph)
sparse = isspmatrix_csr(graph)
pac = scipy.sparse if sparse else np
if method == "triu":
graph = pac.triu(graph)
elif method == "tril":
graph = pac.tril(graph)
elif method == "avg":
graph = (pac.triu(graph) + pac.tril(graph)) / 2
else:
msg = "You have not passed a valid parameter for the method."
raise ValueError(msg)
dia = diags(graph.diagonal()) if sparse else np.diag(np.diag(graph))
graph = graph + graph.T - dia
return graph
def remove_loops(graph):
"""
A function to remove loops from a graph.
Parameters
----------
graph: object
Either array-like, (n_vertices, n_vertices) numpy matrix,
or an object of type networkx.Graph.
Returns
-------
graph: array-like, shape(n_vertices, n_vertices)
the graph with self-loops (edges between the same node) removed.
"""
graph = import_graph(graph)
dia = diags(graph.diagonal()) if isspmatrix_csr(graph) else np.diag(np.diag(graph))
graph = graph - dia
return graph
def to_laplacian(graph, form="DAD", regularizer=None):
r"""
A function to convert graph adjacency matrix to graph Laplacian.
Currently supports I-DAD, DAD, and R-DAD Laplacians, where D is the diagonal
matrix of degrees of each node raised to the -1/2 power, I is the
identity matrix, and A is the adjacency matrix.
R-DAD is regularized Laplacian: where :math:`D_t = D + regularizer \times I`.
Parameters
----------
graph: object
Either array-like, (n_vertices, n_vertices) numpy array,
scipy.sparse.csr_matrix, or an object of type networkx.Graph.
form: {'I-DAD' (default), 'DAD', 'R-DAD'}, string, optional
- 'I-DAD'
Computes :math:`L = I - D_i A D_i`
- 'DAD'
Computes :math:`L = D_o A D_i`
- 'R-DAD'
Computes :math:`L = D_o^r A D_i^r`
where :math:`D_o^r = D_o + regularizer \times I` and likewise for :math:`D_i`
regularizer: int, float or None, optional (default=None)
Constant to add to the degree vector(s). If None, average node degree is added.
If int or float, must be >= 0. Only used when ``form`` is 'R-DAD'.
Returns
-------
L : numpy.ndarray
2D (n_vertices, n_vertices) array representing graph
Laplacian of specified form
References
----------
.. [1] Qin, Tai, and Karl Rohe. "Regularized spectral clustering
under the degree-corrected stochastic blockmodel." In Advances
in Neural Information Processing Systems, pp. 3120-3128. 2013
.. [2] Rohe, Karl, Tai Qin, and Bin Yu. "Co-clustering directed graphs to discover
asymmetries and directional communities." Proceedings of the National Academy
of Sciences 113.45 (2016): 12679-12684.
Examples
--------
>>> a = np.array([
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 0]])
>>> to_laplacian(a, "DAD")
array([[0. , 0.70710678, 0.70710678],
[0.70710678, 0. , 0. ],
[0.70710678, 0. , 0. ]])
"""
valid_inputs = ["I-DAD", "DAD", "R-DAD"]
if form not in valid_inputs:
raise TypeError("Unsuported Laplacian normalization")
A = import_graph(graph)
in_degree = np.reshape(np.asarray(A.sum(axis=0)), (-1,))
out_degree = np.reshape(np.asarray(A.sum(axis=1)), (-1,))
# regularize laplacian with parameter
# set to average degree
if form == "R-DAD":
if regularizer is None:
regularizer = np.mean(out_degree)
elif not isinstance(regularizer, (int, float)):
raise TypeError(
"Regularizer must be a int or float, not {}".format(type(regularizer))
)
elif regularizer < 0:
raise ValueError("Regularizer must be greater than or equal to 0")
in_degree += regularizer
out_degree += regularizer
with np.errstate(divide="ignore"):
in_root = 1 / np.sqrt(in_degree) # this is 10x faster than ** -0.5
out_root = 1 / np.sqrt(out_degree)
diag = diags if isspmatrix_csr(graph) else np.diag
in_root[np.isinf(in_root)] = 0
out_root[np.isinf(out_root)] = 0
in_root = diag(in_root) # just change to sparse diag for sparse support
out_root = diag(out_root)
if form == "I-DAD":
L = diag(in_degree) - A
L = in_root @ L @ in_root
elif form == "DAD" or form == "R-DAD":
L = out_root @ A @ in_root
if is_symmetric(A):
return symmetrize(
L, method="avg"
) # sometimes machine prec. makes this necessary
return L
def is_fully_connected(graph):
r"""
Checks whether the input graph is fully connected in the undirected case
or weakly connected in the directed case.
Connected means one can get from any vertex :math:`u` to vertex :math:`v` by traversing
the graph. For a directed graph, weakly connected means that the graph
is connected after it is converted to an unweighted graph (ignore the
direction of each edge)
Parameters
----------
graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph,
scipy.sparse.csr_matrix, np.ndarray
Input graph in any of the above specified formats. If np.ndarray,
interpreted as an :math:`n \times n` adjacency matrix
Returns
-------
boolean: True if the entire input graph is connected
References
----------
http://mathworld.wolfram.com/ConnectedGraph.html
http://mathworld.wolfram.com/WeaklyConnectedDigraph.html
Examples
--------
>>> a = np.array([
... [0, 1, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> is_fully_connected(a)
False
"""
if isinstance(graph, (np.ndarray, csr_matrix)):
directed = not is_symmetric(graph)
n_components = connected_components(
csgraph=graph, directed=directed, connection="weak", return_labels=False
)
return n_components == 1
else:
if type(graph) in [nx.Graph, nx.MultiGraph]:
return nx.is_connected(graph)
elif type(graph) in [nx.DiGraph, nx.MultiDiGraph]:
return nx.is_weakly_connected(graph)
def largest_connected_component(
graph: Union[
nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph, np.ndarray, csr_matrix
],
return_inds: bool = False,
) -> Union[
nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph, np.ndarray, csr_matrix
]:
r"""
Finds the largest connected component for the input graph.
The largest connected component is the fully connected subgraph
which has the most nodes.
Parameters
----------
graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray, scipy.sparse.csr_matrix
Input graph in any of the above specified formats. If np.ndarray or
scipy.sparse.csr_matrix interpreted as an :math:`n \times n` adjacency matrix.
return_inds: boolean, default: False
Whether to return a np.ndarray containing the indices/nodes in the original
adjacency matrix that were kept and are now in the returned graph.
Returns
-------
graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray, scipy.sparse.csr_matrix
New graph of the largest connected component, returned in the input format.
inds: (optional)
Indices/nodes from the original adjacency matrix that were kept after taking
the largest connected component.
Notes
-----
For networks input in ``scipy.sparse.csr_matrix`` format, explicit zeros are removed
prior to finding the largest connected component, thus they are not treated as
edges. This differs from the convention in
:func:`scipy.sparse.csgraph.connected_components`.
"""
if isinstance(graph, (nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph)):
return _largest_connected_component_networkx(graph, return_inds=return_inds)
elif isinstance(graph, (np.ndarray, csr_matrix)):
return _largest_connected_component_adjacency(graph, return_inds=return_inds)
else:
msg = (
"`graph` must either be a networkx graph or an adjacency matrix in"
" numpy ndarray or scipy csr_matrix format."
)
raise TypeError(msg)
def _largest_connected_component_networkx(
graph: Union[nx.Graph, nx.DiGraph, nx.MultiGraph, nx.MultiDiGraph],
return_inds: bool = False,
):
if type(graph) in [nx.Graph, nx.MultiGraph]:
lcc_nodes = max(nx.connected_components(graph), key=len)
elif type(graph) in [nx.DiGraph, nx.MultiDiGraph]:
lcc_nodes = max(nx.weakly_connected_components(graph), key=len)
lcc = graph.subgraph(lcc_nodes).copy()
lcc.remove_nodes_from([n for n in lcc if n not in lcc_nodes])
if return_inds:
nodelist = np.array(list(lcc_nodes))
if return_inds:
return lcc, nodelist
else:
return lcc
def _largest_connected_component_adjacency(
adjacency: Union[np.ndarray, csr_matrix],
return_inds: bool = False,
):
if isinstance(adjacency, csr_matrix):
adjacency.eliminate_zeros()
# If you treat an undirected graph as directed and take the largest weakly connected
# component, you'll get the same answer as taking the largest connected component of
# that undirected graph. So I wrote it this way to avoid the cost of checking for
# directedness, and it makes the code simpler too.
n_components, labels = csgraph.connected_components(
adjacency, directed=True, connection="weak", return_labels=True
)
if n_components > 1:
unique_labels, counts = np.unique(labels, return_counts=True)
lcc_label_ind = np.argmax(counts) # LCC is the component with the most nodes,
# so it is the component label with the highest count in the label array
lcc_label = unique_labels[lcc_label_ind] # grab the component label for the LCC
lcc_mask = labels == lcc_label # create a boolean mask array for where the
# component labels equal that of the largest connected component
lcc = adjacency[lcc_mask][:, lcc_mask] # mask the adjacency matrix to only LCC
else:
lcc = adjacency
lcc_mask = np.ones(adjacency.shape[0], dtype=bool)
if return_inds:
all_inds = np.arange(adjacency.shape[0])
lcc_inds = all_inds[lcc_mask]
return lcc, lcc_inds
else:
return lcc
def multigraph_lcc_union(graphs, return_inds=False):
r"""
Finds the union of all multiple graphs, then compute the largest connected
component.
Parameters
----------
graphs: list or np.ndarray
List of array-like, (n_vertices, n_vertices), or list of np.ndarray
nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph.
return_inds: boolean, default: False
Whether to return a np.ndarray containing the indices in the original
adjacency matrix that were kept and are now in the returned graph.
Ignored when input is networkx object
Returns
-------
out : list or np.ndarray
If input was a list
"""
if isinstance(graphs, list):
if not isinstance(graphs[0], np.ndarray):
raise NotImplementedError
out = [import_graph(g) for g in graphs]
if len(set(map(np.shape, out))) != 1:
msg = "All input graphs must have the same size"
raise ValueError(msg)
bar = np.stack(out).mean(axis=0)
elif isinstance(graphs, np.ndarray):
shape = graphs.shape
if shape[1] != shape[2]:
msg = "Input graphs must be square"
raise ValueError(msg)
bar = graphs.mean(axis=0)
else:
msg = "Expected list or np.ndarray, but got {} instead.".format(type(graphs))
raise ValueError(msg)
_, idx = largest_connected_component(bar, return_inds=True)
idx = np.array(idx)
if isinstance(graphs, np.ndarray):
graphs[:, idx[:, None], idx]
elif isinstance(graphs, list):
if isinstance(graphs[0], np.ndarray):
graphs = [g[idx[:, None], idx] for g in graphs]
if return_inds:
return graphs, idx
return graphs
def multigraph_lcc_intersection(graphs, return_inds=False):
r"""
Finds the intersection of multiple graphs's largest connected components.
Computes the largest connected component for each graph that was input, and
takes the intersection over all of these resulting graphs. Note that this
does not guarantee finding the largest graph where every node is shared among
all of the input graphs.
Parameters
----------
graphs: list or np.ndarray
if list, each element must be an :math:`n \times n` np.ndarray adjacency matrix
return_inds: boolean, default: False
Whether to return a np.ndarray containing the indices in the original
adjacency matrix that were kept and are now in the returned graph.
Ignored when input is networkx object
Returns
-------
graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray
New graph of the largest connected component of the input parameter.
inds: (optional)
Indices from the original adjacency matrix that were kept after taking
the largest connected component
"""
lcc_by_graph = []
inds_by_graph = []
for graph in graphs:
lcc, inds = largest_connected_component(graph, return_inds=True)
lcc_by_graph.append(lcc)
inds_by_graph.append(inds)
inds_intersection = reduce(np.intersect1d, inds_by_graph)
new_graphs = []
for graph in graphs:
if type(graph) is np.ndarray:
lcc = graph[inds_intersection, :][:, inds_intersection]
else:
lcc = graph.subgraph(inds_intersection).copy()
lcc.remove_nodes_from([n for n in lcc if n not in inds_intersection])
new_graphs.append(lcc)
# this is not guaranteed be connected after one iteration because taking the
# intersection of nodes among graphs can cause some components to become
# disconnected, so, we check for this and run again if necessary
recurse = False
for new_graph in new_graphs:
if not is_fully_connected(new_graph):
recurse = True
break
if recurse:
new_graphs, new_inds_intersection = multigraph_lcc_intersection(
new_graphs, return_inds=True
)
# new inds intersection are the indices of new_graph that were kept on recurse
# need to do this because indices could have shifted during recursion
if type(graphs[0]) is np.ndarray:
inds_intersection = inds_intersection[new_inds_intersection]
else:
inds_intersection = new_inds_intersection
if type(graphs) != list:
new_graphs = np.stack(new_graphs)
if return_inds:
return new_graphs, inds_intersection
else:
return new_graphs
def augment_diagonal(graph, weight=1):
r"""
Replaces the diagonal of an adjacency matrix with :math:`\frac{d}{nverts - 1}` where
:math:`d` is the degree vector for an unweighted graph and the sum of magnitude of
edge weights for each node for a weighted graph. For a directed graph the in/out
:math:`d` is averaged.
Parameters
----------
graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray,
scipy.scr_matrix.
Input graph in any of the above specified formats. If np.ndarray,
interpreted as an :math:`n \times n` adjacency matrix
weight: float/int
scalar value to multiply the new diagonal vector by
Returns
-------
graph : np.array
Adjacency matrix with average degrees added to the diagonal.
Examples
--------
>>> a = np.array([
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 0]])
>>> augment_diagonal(a)
array([[1. , 1. , 1. ],
[1. , 0.5, 0. ],
[1. , 0. , 0.5]])
"""
graph = import_graph(graph)
graph = remove_loops(graph)
divisor = graph.shape[0] - 1
in_degrees = np.squeeze(np.asarray(abs(graph).sum(axis=0)))
out_degrees = np.squeeze(np.asarray(abs(graph).sum(axis=1)))
degrees = (in_degrees + out_degrees) / 2
diag = weight * degrees / divisor
graph += diags(diag) if isspmatrix_csr(graph) else np.diag(diag)
return graph
def binarize(graph):
"""
Binarize the input adjacency matrix.
Parameters
----------
graph: nx.Graph, nx.DiGraph, nx.MultiDiGraph, nx.MultiGraph, np.ndarray
Input graph in any of the above specified formats. If np.ndarray,
interpreted as an :math:`n \times n` adjacency matrix
Returns
-------
graph : np.array
Adjacency matrix with all nonzero values transformed to one.
Examples
--------
>>> a = np.array([[0, 1, 2], [1, 0, 3], [2, 3, 0]])
>>> binarize(a)
array([[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.]])
"""
graph = import_graph(graph)
graph[graph != 0] = 1
return graph
def cartesian_product(*arrays):
"""
Compute the cartesian product of multiple arrays
"""
N = len(arrays)
return np.transpose(
np.meshgrid(*arrays, indexing="ij"), np.roll(np.arange(N + 1), -1)
).reshape(-1, N)
def fit_plug_in_variance_estimator(X):
"""
Takes in ASE of a graph and returns a function that estimates
the variance-covariance matrix at a given point using the
plug-in estimator from the RDPG Central Limit Theorem.
Parameters
----------
X : np.ndarray, shape (n, d)
adjacency spectral embedding of a graph
Returns
-------
plug_in_variance_estimtor: functions
a function that estimates variance (see below)
"""
n = len(X)
# precompute the Delta and the middle term matrix part
delta = 1 / (n) * (X.T @ X)
delta_inverse = np.linalg.inv(delta)
middle_term_matrix = np.einsum("bi,bo->bio", X, X)
def plug_in_variance_estimator(x):
"""
Takes in a point of a matrix of points in R^d and returns an
estimated covariance matrix for each of the points
Parameters:
-----------
x: np.ndarray, shape (n, d)
points to estimate variance at
if 1-dimensional - reshaped to (1, d)
Returns:
-------
covariances: np.ndarray, shape (n, d, d)
n estimated variance-covariance matrices of the points provided
"""
if x.ndim < 2:
x = x.reshape(1, -1)
# the following two lines are a properly vectorized version of
# middle_term = 0
# for i in range(n):
# middle_term += np.multiply.outer((x @ X[i] - (x @ X[i]) ** 2),
# np.outer(X[i], X[i]))
# where the matrix part does not involve x and has been computed above
middle_term_scalar = x @ X.T - (x @ X.T) ** 2
middle_term = np.tensordot(middle_term_scalar, middle_term_matrix, axes=1)
covariances = delta_inverse @ (middle_term / n) @ delta_inverse
return covariances
return plug_in_variance_estimator
def remove_vertices(graph, indices, return_removed=False):
"""
Remove a subgraph of adjacency vectors from an adjacency matrix, giving back the
truncated matrix and optionally the removed subgraph. Here, an adjacency vector
is the set of edge weights for a particular vertex.
Parameters
----------
graph: networkx.Graph or array-like, shape (n, n)
The adjacency matrix for some graph.
indices: int or array-like, length m
Index/indices of the adjacency vector(s) to be removed.
return_removed: bool, by default False (optional)
Whether to return the tuple ``(A, V)``,
where ``A`` is the truncated adjacency matrix,
``V`` is an array representing the removed subgraph.
Returns
-------
truncated_graph: np.ndarray
The truncated matrix.
This is a copy of `graph` of shape (k, k), with ``k=n-m``, without the ``m``
adjacency vectors given by `indices`.
removed_subgraph: np.ndarray or tuple, shape (m, k) (optional)
Array of removed adjacency vectors without edges to each other.
If directed, this is a tuple ``(V_1, V_2)``,
with ``V_1`` being an array of adjacency vectors from the removed subgraph to the truncated graph,
and ``V_2`` being an array of adjacency vectors from the truncated graph to the removed subgraph.
Examples
--------
# Undirected
>>> A = np.array([[0, 1, 2],
[1, 0, 3],
[2, 3, 0]])
>>> remove_vertices(A, 0)
array([[0., 3.],
[3., 0.]]))
>>> remove_vertices(A, 0, return_removed=True)
(array([[0., 3.],
[3., 0.]]),
array([1., 2.]))
# Directed
>>> B = np.array([[0, 1, 2, 3],
[4, 0, 5, 6],
[7, 8, 0, 9],
[10, 11, 12, 0]])
>>> remove_vertices(B, 0, return_removed=True)
(array([[ 0., 5., 6.],
[ 8., 0., 9.],
[11., 12., 0.]]),
(array([ 4., 7., 10.]), array([1., 2., 3.])))
>>> remove_vertices(B, [0, -1], return_removed=True)
(array([[0., 5.],
[8., 0.]]),
(array([[4., 7.],
[6., 9.]]),
array([[ 1., 2.],
[11., 12.]])))
"""
graph = import_graph(graph)
if isinstance(indices, list) and len(indices) >= len(graph):
raise IndexError("You must pass in fewer vertex indices than vertices.")
directed = not is_almost_symmetric(graph)
# truncate graph
mask = np.ones(graph.shape[0], dtype=bool)
mask[indices] = 0
A = graph[mask, :][:, mask]
if return_removed:
rows = graph[mask]
vertices = rows[:, indices].T
if directed:
cols = graph[:, mask]
vertices_right = cols[indices]
return A, (vertices, vertices_right)
return A, vertices
return A
def remap_labels(
y_true: Union[List, np.ndarray, pd.Series],
y_pred: Union[List, np.ndarray, pd.Series],
return_map: bool = False,
) -> np.ndarray:
"""
Remaps a categorical labeling (such as one predicted by a clustering algorithm) to
match the labels used by another similar labeling.
Given two :math:`n`-length vectors describing a categorical labeling of :math:`n`
samples, this method reorders the labels of the second vector (`y_pred`) so that as
many samples as possible from the two label vectors are in the same category.
Parameters
----------
y_true : array-like of shape (n_samples,)
Ground truth labels, or, labels to map to.
y_pred : array-like of shape (n_samples,)
Labels to remap to match the categorical labeling of `y_true`. The categorical
labeling of `y_pred` will be preserved exactly, but the labels used to
denote the categories will be changed to best match the categories used in
`y_true`.
return_map : bool, optional
Whether to return a dictionary where the keys are the original category labels
from `y_pred` and the values are the new category labels that they were mapped
to.
Returns
-------
remapped_y_pred : np.ndarray of shape (n_samples,)
Same categorical labeling as that of `y_pred`, but with the category labels
permuted to best match those of `y_true`.
label_map : dict
Mapping from the original labels of `y_pred` to the new labels which best
resemble those of `y_true`. Only returned if `return_map` was True.
Examples
--------
>>> y_true = np.array([0,0,1,1,2,2])
>>> y_pred = np.array([2,2,1,1,0,0])
>>> remap_labels(y_true, y_pred)
array([0, 0, 1, 1, 2, 2])
Notes
-----
This method will work well when the label vectors describe a somewhat similar
categorization of the data (as measured by metrics such as
:func:`sklearn.metrics.adjusted_rand_score`, for example). When the categorizations
are not similar, the remapping may not make sense (as such a remapping does not
exist).
For example, consider when one category in `y_true` is exactly split in half into
two categories in `y_pred`. If this is the case, it is impossible to say which of
the categories in `y_pred` match that original category from `y_true`.
"""
check_consistent_length(y_true, y_pred)
true_type = type_of_target(y_true)
pred_type = type_of_target(y_pred)
valid_target_types = {"binary", "multiclass"}
if (true_type not in valid_target_types) or (pred_type not in valid_target_types):
msg = "Elements of `y_true` and `y_pred` must represent a valid binary or "
msg += "multiclass labeling, see "
msg += "https://scikit-learn.org/stable/modules/generated/sklearn.utils.multiclass.type_of_target.html"
msg += " for more information."
raise ValueError(msg)
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if not isinstance(return_map, bool):
raise TypeError("return_map must be of type bool.")
labels = unique_labels(y_true, y_pred)
confusion_mat = confusion_matrix(y_true, y_pred, labels=labels)
row_inds, col_inds = linear_sum_assignment(confusion_mat, maximize=True)
label_map = dict(zip(labels[col_inds], labels[row_inds]))
remapped_y_pred = np.vectorize(label_map.get)(y_pred)
if return_map:
return remapped_y_pred, label_map
else:
return remapped_y_pred
def remap_node_ids(
graph: nx.Graph, weight_attribute: str = "weight", weight_default: float = 1.0
) -> Tuple[nx.Graph, Dict[Any, str]]:
"""
Given a graph with arbitrarily types node ids, return a new graph that contains the exact same edgelist
except the node ids are remapped to a string representation.
Parameters
----------
graph : nx.Graph
A graph that has node ids of arbitrary types.
weight_attribute : str,
Default is ``weight``. An optional attribute to specify which column in your graph contains the weight value.
weight_default : float,
Default edge weight to use if a weight is not found on an edge in the graph
Returns
-------
Tuple[nx.Graph, Dict[Any, str]]
A new graph that contains the same edges except the node ids are remapped to strings. The keys in
the dictionary are the old node ids and the values are the newly remapped node ids.
Raises
------
TypeError
"""
if not isinstance(graph, nx.Graph):
raise TypeError("graph must be of type nx.Graph")
if not nx.is_weighted(graph, weight=weight_attribute):
warnings.warn(
f'Graph has at least one unweighted edge using weight_attribute "{weight_attribute}". '
f'Defaulting unweighted edges to "{weight_default}"'
)
node_id_dict = dict()
graph_remapped = type(graph)()
for source, target, weight in graph.edges(
data=weight_attribute, default=weight_default
):
if source not in node_id_dict:
node_id_dict[source] = str(len(node_id_dict.keys()))
if target not in node_id_dict:
node_id_dict[target] = str(len(node_id_dict.keys()))
graph_remapped.add_edge(node_id_dict[source], node_id_dict[target])
graph_remapped[node_id_dict[source]][node_id_dict[target]][
weight_attribute
] = weight
return graph_remapped, node_id_dict
def suppress_common_warnings():
"""
Suppresses common warnings that occur when using graspologic.
"""
warnings.filterwarnings("ignore", category=RuntimeWarning)
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
warnings.simplefilter("always", category=UserWarning)
|
py | b408be1be64d1ff34ea5db37a1a5fcafd3f98ecb | import pytest
from app.common import merge_into
@pytest.mark.parametrize('initial, updates, expected', [(
{'name': 'Margot', 'integer': 1337},
{'integer': 777, 'robot': 'pupper'},
{'name': 'Margot', 'integer': 777, 'robot': 'pupper'}
),
(
{'name': 'Margot', 'integer': 1337, 'another_dict': {
"key1": "value1", "key2": "value2"
}},
{'integer': 777, 'robot': 'pupper', 'another_dict': {
"key2": "mod_value2", "key3": "value3"
}},
{'name': 'Margot', 'integer': 777, 'robot': 'pupper', 'another_dict': {
"key1": "value1", "key2": "mod_value2", "key3": "value3"
}}
)
])
def test_merge_into(initial, updates, expected):
result = merge_into(initial, updates)
assert result == expected
|
py | b408be25a415ab56001c2928f5078e98c0ea4140 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class FirewallPoliciesOperations(object):
"""FirewallPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified Firewall Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallPolicy"
"""Gets the specified Firewall Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.FirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def update_tags(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
firewall_policy_parameters, # type: "_models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallPolicy"
"""Updates a Firewall Policy Tags.
:param resource_group_name: The resource group name of the Firewall Policy.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy being updated.
:type firewall_policy_name: str
:param firewall_policy_parameters: Parameters supplied to Update Firewall Policy tags.
:type firewall_policy_parameters: ~azure.mgmt.network.v2019_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: FirewallPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_08_01.models.FirewallPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(firewall_policy_parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
parameters, # type: "_models.FirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> "_models.FirewallPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'FirewallPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
firewall_policy_name, # type: str
parameters, # type: "_models.FirewallPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.FirewallPolicy"]
"""Creates or updates the specified Firewall Policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param firewall_policy_name: The name of the Firewall Policy.
:type firewall_policy_name: str
:param parameters: Parameters supplied to the create or update Firewall Policy operation.
:type parameters: ~azure.mgmt.network.v2019_08_01.models.FirewallPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either FirewallPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2019_08_01.models.FirewallPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
firewall_policy_name=firewall_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('FirewallPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'firewallPolicyName': self._serialize.url("firewall_policy_name", firewall_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies/{firewallPolicyName}'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FirewallPolicyListResult"]
"""Lists all Firewall Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.FirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/firewallPolicies'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.FirewallPolicyListResult"]
"""Gets all the Firewall Policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either FirewallPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2019_08_01.models.FirewallPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.FirewallPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('FirewallPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/firewallPolicies'} # type: ignore
|
py | b408be2e405c1bcd491bfcf68649cf969d1fa843 | class BinaryTree:
def __init__(self, rootObj):
self.key = rootObj
self.left = None
self.right = None
def insertLeft(self, node):
if self.left == None:
self.left = BinaryTree(node)
else:
t = BinaryTree(node)
t.left = self.left
self.left = t
def insertRight(self, node):
if self.right == None:
self.right = BinaryTree(node)
else:
t = BinaryTree(node)
t.right = self.right
self.right = t
def getLeftChild(self):
return self.left
def getRightChild(self):
return self.right
def setRootVal(self, obj):
self.key = obj
def getRootVal(self):
return self.key
if __name__ == '__main__':
r = BinaryTree('a')
print(r.getRootVal())
print(r.getLeftChild())
r.insertLeft('b')
print(r.getLeftChild())
print(r.getLeftChild().getRootVal())
r.insertRight('c')
print(r.getRightChild())
print(r.getRightChild().getRootVal())
r.getRightChild().setRootVal('hello')
print(r.getRightChild().getRootVal()) |
py | b408be50389d72bd7d49c7bbafcfdcf07a8f46ad | # -*- coding: utf-8 -*-
# Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Robert Layton <[email protected]>
# Andreas Mueller <[email protected]>
# Philippe Gervais <[email protected]>
# Lars Buitinck
# Joel Nothman <[email protected]>
# License: BSD 3 clause
import itertools
from functools import partial
import warnings
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from joblib import Parallel, delayed, effective_n_jobs
from ..utils.validation import _num_samples
from ..utils.validation import check_non_negative
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches, get_chunk_n_rows
from ..utils import is_scalar_nan
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..utils._mask import _get_mask
from ._pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
from ..exceptions import DataConversionWarning
# Utility Functions
def _return_float_dtype(X, Y):
"""
1. If dtype of X and Y is float32, then dtype float32 is returned.
2. Else dtype float is returned.
"""
if not issparse(X) and not isinstance(X, np.ndarray):
X = np.asarray(X)
if Y is None:
Y_dtype = X.dtype
elif not issparse(Y) and not isinstance(Y, np.ndarray):
Y = np.asarray(Y)
Y_dtype = Y.dtype
else:
Y_dtype = Y.dtype
if X.dtype == Y_dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
return X, Y, dtype
def check_pairwise_arrays(X, Y, precomputed=False, dtype=None,
accept_sparse='csr', force_all_finite=True,
copy=False):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats (or dtype if provided). Finally, the function
checks that the size of the second dimension of the two arrays is equal, or
the equivalent check for a precomputed distance matrix.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
precomputed : bool
True if X is to be treated as precomputed distances to the samples in
Y.
dtype : string, type, list of types or None (default=None)
Data type required for X and Y. If None, the dtype will be an
appropriate float type selected by _return_float_dtype.
.. versionadded:: 0.18
accept_sparse : string, boolean or list/tuple of strings
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. If the input is sparse but not in the allowed format,
it will be converted to the first listed format. True allows the input
to be any format. False means that a sparse matrix input will
raise an error.
force_all_finite : boolean or 'allow-nan', (default=True)
Whether to raise an error on np.inf and np.nan in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accept both np.inf and np.nan in array.
- 'allow-nan': accept only np.nan values in array. Values cannot
be infinite.
.. versionadded:: 0.22
``force_all_finite`` accepts the string ``'allow-nan'``.
copy : bool
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
.. versionadded:: 0.22
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y, dtype_float = _return_float_dtype(X, Y)
estimator = 'check_pairwise_arrays'
if dtype is None:
dtype = dtype_float
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
else:
X = check_array(X, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
Y = check_array(Y, accept_sparse=accept_sparse, dtype=dtype,
copy=copy, force_all_finite=force_all_finite,
estimator=estimator)
if precomputed:
if X.shape[1] != Y.shape[0]:
raise ValueError("Precomputed metric requires shape "
"(n_queries, n_indexed). Got (%d, %d) "
"for %d indexed." %
(X.shape[0], X.shape[1], Y.shape[0]))
elif X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False,
X_norm_squared=None):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if one argument varies but the other remains unchanged, then
`dot(x, x)` and/or `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
squared : boolean, optional
Return squared Euclidean distances.
X_norm_squared : array-like of shape (n_samples,), optional
Pre-computed dot-products of vectors in X (e.g.,
``(X**2).sum(axis=1)``)
May be ignored in some cases, see the note below.
Notes
-----
To achieve better accuracy, `X_norm_squared` and `Y_norm_squared` may be
unused if they are passed as ``float32``.
Returns
-------
distances : array, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[0., 1.],
[1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
X, Y = check_pairwise_arrays(X, Y)
# If norms are passed as float32, they are unused. If arrays are passed as
# float32, norms needs to be recomputed on upcast chunks.
# TODO: use a float64 accumulator in row_norms to avoid the latter.
if X_norm_squared is not None:
XX = check_array(X_norm_squared)
if XX.shape == (1, X.shape[0]):
XX = XX.T
elif XX.shape != (X.shape[0], 1):
raise ValueError(
"Incompatible dimensions for X and X_norm_squared")
if XX.dtype == np.float32:
XX = None
elif X.dtype == np.float32:
XX = None
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
if X is Y and XX is not None:
# shortcut in the common case euclidean_distances(X, X)
YY = XX.T
elif Y_norm_squared is not None:
YY = np.atleast_2d(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
if YY.dtype == np.float32:
YY = None
elif Y.dtype == np.float32:
YY = None
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X.dtype == np.float32:
# To minimize precision issues with float32, we compute the distance
# matrix on chunks of X and Y upcast to float64
distances = _euclidean_distances_upcast(X, XX, Y, YY)
else:
# if dtype is already float64, no need to chunk and upcast
distances = - 2 * safe_sparse_dot(X, Y.T, dense_output=True)
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
if X is Y:
np.fill_diagonal(distances, 0)
return distances if squared else np.sqrt(distances, out=distances)
def nan_euclidean_distances(X, Y=None, squared=False,
missing_values=np.nan, copy=True):
"""Calculate the euclidean distances in the presence of missing values.
Compute the euclidean distance between each pair of samples in X and Y,
where Y=X is assumed if Y=None. When calculating the distance between a
pair of samples, this formulation ignores feature coordinates with a
missing value in either sample and scales up the weight of the remaining
coordinates:
dist(x,y) = sqrt(weight * sq. distance from present coordinates)
where,
weight = Total # of coordinates / # of present coordinates
For example, the distance between ``[3, na, na, 6]`` and ``[1, na, 4, 5]``
is:
.. math::
\\sqrt{\\frac{4}{2}((3-1)^2 + (6-5)^2)}
If all the coordinates are missing or if there are no common present
coordinates then NaN is returned for that pair.
Read more in the :ref:`User Guide <metrics>`.
.. versionadded:: 0.22
Parameters
----------
X : array-like, shape=(n_samples_1, n_features)
Y : array-like, shape=(n_samples_2, n_features)
squared : bool, default=False
Return squared Euclidean distances.
missing_values : np.nan or int, default=np.nan
Representation of missing value
copy : boolean, default=True
Make and use a deep copy of X and Y (if Y exists)
Returns
-------
distances : array, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import nan_euclidean_distances
>>> nan = float("NaN")
>>> X = [[0, 1], [1, nan]]
>>> nan_euclidean_distances(X, X) # distance between rows of X
array([[0. , 1.41421356],
[1.41421356, 0. ]])
>>> # get distance to origin
>>> nan_euclidean_distances(X, [[0, 0]])
array([[1. ],
[1.41421356]])
References
----------
* John K. Dixon, "Pattern Recognition with Partly Missing Data",
IEEE Transactions on Systems, Man, and Cybernetics, Volume: 9, Issue:
10, pp. 617 - 621, Oct. 1979.
http://ieeexplore.ieee.org/abstract/document/4310090/
See also
--------
paired_distances : distances between pairs of elements of X and Y.
"""
force_all_finite = 'allow-nan' if is_scalar_nan(missing_values) else True
X, Y = check_pairwise_arrays(X, Y, accept_sparse=False,
force_all_finite=force_all_finite, copy=copy)
# Get missing mask for X
missing_X = _get_mask(X, missing_values)
# Get missing mask for Y
missing_Y = missing_X if Y is X else _get_mask(Y, missing_values)
# set missing values to zero
X[missing_X] = 0
Y[missing_Y] = 0
distances = euclidean_distances(X, Y, squared=True)
# Adjust distances for missing values
XX = X * X
YY = Y * Y
distances -= np.dot(XX, missing_Y.T)
distances -= np.dot(missing_X, YY.T)
np.clip(distances, 0, None, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
np.fill_diagonal(distances, 0.0)
present_X = 1 - missing_X
present_Y = present_X if Y is X else ~missing_Y
present_count = np.dot(present_X, present_Y.T)
distances[present_count == 0] = np.nan
# avoid divide by zero
np.maximum(1, present_count, out=present_count)
distances /= present_count
distances *= X.shape[1]
if not squared:
np.sqrt(distances, out=distances)
return distances
def _euclidean_distances_upcast(X, XX=None, Y=None, YY=None, batch_size=None):
"""Euclidean distances between X and Y
Assumes X and Y have float32 dtype.
Assumes XX and YY have float64 dtype or are None.
X and Y are upcast to float64 by chunks, which size is chosen to limit
memory increase by approximately 10% (at least 10MiB).
"""
n_samples_X = X.shape[0]
n_samples_Y = Y.shape[0]
n_features = X.shape[1]
distances = np.empty((n_samples_X, n_samples_Y), dtype=np.float32)
if batch_size is None:
x_density = X.nnz / np.prod(X.shape) if issparse(X) else 1
y_density = Y.nnz / np.prod(Y.shape) if issparse(Y) else 1
# Allow 10% more memory than X, Y and the distance matrix take (at
# least 10MiB)
maxmem = max(
((x_density * n_samples_X + y_density * n_samples_Y) * n_features
+ (x_density * n_samples_X * y_density * n_samples_Y)) / 10,
10 * 2 ** 17)
# The increase amount of memory in 8-byte blocks is:
# - x_density * batch_size * n_features (copy of chunk of X)
# - y_density * batch_size * n_features (copy of chunk of Y)
# - batch_size * batch_size (chunk of distance matrix)
# Hence x² + (xd+yd)kx = M, where x=batch_size, k=n_features, M=maxmem
# xd=x_density and yd=y_density
tmp = (x_density + y_density) * n_features
batch_size = (-tmp + np.sqrt(tmp ** 2 + 4 * maxmem)) / 2
batch_size = max(int(batch_size), 1)
x_batches = gen_batches(n_samples_X, batch_size)
for i, x_slice in enumerate(x_batches):
X_chunk = X[x_slice].astype(np.float64)
if XX is None:
XX_chunk = row_norms(X_chunk, squared=True)[:, np.newaxis]
else:
XX_chunk = XX[x_slice]
y_batches = gen_batches(n_samples_Y, batch_size)
for j, y_slice in enumerate(y_batches):
if X is Y and j < i:
# when X is Y the distance matrix is symmetric so we only need
# to compute half of it.
d = distances[y_slice, x_slice].T
else:
Y_chunk = Y[y_slice].astype(np.float64)
if YY is None:
YY_chunk = row_norms(Y_chunk, squared=True)[np.newaxis, :]
else:
YY_chunk = YY[:, y_slice]
d = -2 * safe_sparse_dot(X_chunk, Y_chunk.T, dense_output=True)
d += XX_chunk
d += YY_chunk
distances[x_slice, y_slice] = d.astype(np.float32, copy=False)
return distances
def _argmin_min_reduce(dist, start):
indices = dist.argmin(axis=1)
values = dist[np.arange(dist.shape[0]), indices]
return indices, values
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples1, n_features)
Array containing points.
Y : {array-like, sparse matrix}, shape (n_samples2, n_features)
Arrays containing points.
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable, default 'euclidean'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
indices, values = zip(*pairwise_distances_chunked(
X, Y, reduce_func=_argmin_min_reduce, metric=metric,
**metric_kwargs))
indices = np.concatenate(indices)
values = np.concatenate(values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
----------
X : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
axis : int, optional, default 1
Axis along which the argmin and distances are to be computed.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
if metric_kwargs is None:
metric_kwargs = {}
return pairwise_distances_argmin_min(X, Y, axis, metric,
metric_kwargs=metric_kwargs)[0]
def haversine_distances(X, Y=None):
"""Compute the Haversine distance between samples in X and Y
The Haversine (or great circle) distance is the angular distance between
two points on the surface of a sphere. The first distance of each point is
assumed to be the latitude, the second is the longitude, given in radians.
The dimension of the data must be 2.
.. math::
D(x, y) = 2\\arcsin[\\sqrt{\\sin^2((x1 - y1) / 2)
+ \\cos(x1)\\cos(y1)\\sin^2((x2 - y2) / 2)}]
Parameters
----------
X : array_like, shape (n_samples_1, 2)
Y : array_like, shape (n_samples_2, 2), optional
Returns
-------
distance : {array}, shape (n_samples_1, n_samples_2)
Notes
-----
As the Earth is nearly spherical, the haversine formula provides a good
approximation of the distance between two points of the Earth surface, with
a less than 1% error on average.
Examples
--------
We want to calculate the distance between the Ezeiza Airport
(Buenos Aires, Argentina) and the Charles de Gaulle Airport (Paris, France)
>>> from sklearn.metrics.pairwise import haversine_distances
>>> from math import radians
>>> bsas = [-34.83333, -58.5166646]
>>> paris = [49.0083899664, 2.53844117956]
>>> bsas_in_radians = [radians(_) for _ in bsas]
>>> paris_in_radians = [radians(_) for _ in paris]
>>> result = haversine_distances([bsas_in_radians, paris_in_radians])
>>> result * 6371000/1000 # multiply by Earth radius to get kilometers
array([[ 0. , 11099.54035582],
[11099.54035582, 0. ]])
"""
from sklearn.neighbors import DistanceMetric
return DistanceMetric.get_metric('haversine').pairwise(X, Y)
def manhattan_distances(X, Y=None, sum_over_features=True):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Notes
--------
When X and/or Y are CSR sparse matrices and they are not already
in canonical format, this function modifies them in-place to
make them canonical.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances([[3]], [[3]])
array([[0.]])
>>> manhattan_distances([[3]], [[2]])
array([[1.]])
>>> manhattan_distances([[2]], [[3]])
array([[1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])
array([[0., 2.],
[4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = np.full((2, 2), 2.)
>>> manhattan_distances(X, y, sum_over_features=False)
array([[1., 1.],
[1., 1.]])
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
X.sum_duplicates() # this also sorts indices in-place
Y.sum_duplicates()
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine : dense matrices only
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
np.clip(S, 0, 2, out=S)
if X is Y or Y is None:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
S[np.diag_indices_from(S)] = 0.0
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return row_norms(X - Y)
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
diff = X - Y
if issparse(diff):
diff.data = np.abs(diff.data)
return np.squeeze(np.array(diff.sum(axis=1)))
else:
return np.abs(diff).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
-----
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
return .5 * row_norms(normalize(X) - normalize(Y), squared=True)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : ndarray (n_samples, n_features)
Array 1 for distance computation.
Y : ndarray (n_samples, n_features)
Array 2 for distance computation.
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([0., 1.])
See also
--------
pairwise_distances : Computes the distance between every pair of samples
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None, dense_output=True):
"""
Compute the linear kernel between X and Y.
Read more in the :ref:`User Guide <linear_kernel>`.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.20
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=dense_output)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Read more in the :ref:`User Guide <polynomial_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
degree : int, default 3
gamma : float, default None
if None, defaults to 1.0 / n_features
coef0 : float, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Read more in the :ref:`User Guide <sigmoid_kernel>`.
Parameters
----------
X : ndarray of shape (n_samples_1, n_features)
Y : ndarray of shape (n_samples_2, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
coef0 : float, default 1
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = safe_sparse_dot(X, Y.T, dense_output=True)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <rbf_kernel>`.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def laplacian_kernel(X, Y=None, gamma=None):
"""Compute the laplacian kernel between X and Y.
The laplacian kernel is defined as::
K(x, y) = exp(-gamma ||x-y||_1)
for each pair of rows x in X and y in Y.
Read more in the :ref:`User Guide <laplacian_kernel>`.
.. versionadded:: 0.17
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default None
If None, defaults to 1.0 / n_features
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = -gamma * manhattan_distances(X, Y)
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None, dense_output=True):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Read more in the :ref:`User Guide <cosine_similarity>`.
Parameters
----------
X : ndarray or sparse array, shape: (n_samples_X, n_features)
Input data.
Y : ndarray or sparse array, shape: (n_samples_Y, n_features)
Input data. If ``None``, the output will be the pairwise
similarities between all samples in ``X``.
dense_output : boolean (optional), default True
Whether to return dense output even when the input is sparse. If
``False``, the output is sparse if both input arrays are sparse.
.. versionadded:: 0.17
parameter ``dense_output`` for dense output.
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = safe_sparse_dot(X_normalized, Y_normalized.T,
dense_output=dense_output)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Read more in the :ref:`User Guide <chi2_kernel>`.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
https://research.microsoft.com/en-us/um/people/manik/projects/trade-off/papers/ZhangIJCV06.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'haversine': haversine_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances,
'precomputed': None, # HACK: precomputed is always allowed, never called
'nan_euclidean': nan_euclidean_distances,
}
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'haversine' metrics.pairwise.haversine_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
'nan_euclidean' metrics.pairwise.nan_euclidean_distances
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _dist_wrapper(dist_func, dist_matrix, slice_, *args, **kwargs):
"""Write in-place to a slice of a distance matrix"""
dist_matrix[:, slice_] = dist_func(*args, **kwargs)
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if Y is None:
Y = X
X, Y, dtype = _return_float_dtype(X, Y)
if effective_n_jobs(n_jobs) == 1:
return func(X, Y, **kwds)
# enforce a threading backend to prevent data communication overhead
fd = delayed(_dist_wrapper)
ret = np.empty((X.shape[0], Y.shape[0]), dtype=dtype, order='F')
Parallel(backend="threading", n_jobs=n_jobs)(
fd(func, ret, s, X, Y[s], **kwds)
for s in gen_even_slices(_num_samples(Y), effective_n_jobs(n_jobs)))
if (X is Y or Y is None) and func is euclidean_distances:
# zeroing diagonal for euclidean norm.
# TODO: do it also for other norms.
np.fill_diagonal(ret, 0)
return ret
def _pairwise_callable(X, Y, metric, force_all_finite=True, **kwds):
"""Handle the callable case for pairwise_{distances,kernels}
"""
X, Y = check_pairwise_arrays(X, Y, force_all_finite=force_all_finite)
if X is Y:
# Only calculate metric for upper triangle
out = np.zeros((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.combinations(range(X.shape[0]), 2)
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
# Make symmetric
# NB: out += out.T will produce incorrect results
out = out + out.T
# Calculate diagonal
# NB: nonzero diagonals are allowed for both metrics and kernels
for i in range(X.shape[0]):
x = X[i]
out[i, i] = metric(x, x, **kwds)
else:
# Calculate all cells
out = np.empty((X.shape[0], Y.shape[0]), dtype='float')
iterator = itertools.product(range(X.shape[0]), range(Y.shape[0]))
for i, j in iterator:
out[i, j] = metric(X[i], Y[j], **kwds)
return out
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski",
'nan_euclidean', 'haversine']
_NAN_METRICS = ['nan_euclidean']
def _check_chunk_size(reduced, chunk_size):
"""Checks chunk is a sequence of expected size or a tuple of same
"""
if reduced is None:
return
is_tuple = isinstance(reduced, tuple)
if not is_tuple:
reduced = (reduced,)
if any(isinstance(r, tuple) or not hasattr(r, '__iter__')
for r in reduced):
raise TypeError('reduce_func returned %r. '
'Expected sequence(s) of length %d.' %
(reduced if is_tuple else reduced[0], chunk_size))
if any(_num_samples(r) != chunk_size for r in reduced):
actual_size = tuple(_num_samples(r) for r in reduced)
raise ValueError('reduce_func returned object of length %s. '
'Expected same length as input: %d.' %
(actual_size if is_tuple else actual_size[0],
chunk_size))
def _precompute_metric_params(X, Y, metric=None, **kwds):
"""Precompute data-derived metric parameters if not provided
"""
if metric == "seuclidean" and 'V' not in kwds:
if X is Y:
V = np.var(X, axis=0, ddof=1)
else:
V = np.var(np.vstack([X, Y]), axis=0, ddof=1)
return {'V': V}
if metric == "mahalanobis" and 'VI' not in kwds:
if X is Y:
VI = np.linalg.inv(np.cov(X.T)).T
else:
VI = np.linalg.inv(np.cov(np.vstack([X, Y]).T)).T
return {'VI': VI}
return {}
def pairwise_distances_chunked(X, Y=None, reduce_func=None,
metric='euclidean', n_jobs=None,
working_memory=None, **kwds):
"""Generate a distance matrix chunk by chunk with optional reduction
In cases where not all of a pairwise distance matrix needs to be stored at
once, this is used to calculate pairwise distances in
``working_memory``-sized chunks. If ``reduce_func`` is given, it is run
on each chunk and its return values are concatenated into lists, arrays
or sparse matrices.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or,
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if
metric != "precomputed".
reduce_func : callable, optional
The function which is applied on each chunk of the distance matrix,
reducing it to needed values. ``reduce_func(D_chunk, start)``
is called repeatedly, where ``D_chunk`` is a contiguous vertical
slice of the pairwise distance matrix, starting at row ``start``.
It should return one of: None; an array, a list, or a sparse matrix
of length ``D_chunk.shape[0]``; or a tuple of such objects. Returning
None is useful for in-place operations, rather than reductions.
If None, pairwise_distances_chunked returns a generator of vertical
chunks of the distance matrix.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
working_memory : int, optional
The sought maximum memory for temporary distance matrix chunks.
When None (default), the value of
``sklearn.get_config()['working_memory']`` is used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Yields
------
D_chunk : array or sparse matrix
A contiguous slice of distance matrix, optionally processed by
``reduce_func``.
Examples
--------
Without reduce_func:
>>> import numpy as np
>>> from sklearn.metrics import pairwise_distances_chunked
>>> X = np.random.RandomState(0).rand(5, 3)
>>> D_chunk = next(pairwise_distances_chunked(X))
>>> D_chunk
array([[0. ..., 0.29..., 0.41..., 0.19..., 0.57...],
[0.29..., 0. ..., 0.57..., 0.41..., 0.76...],
[0.41..., 0.57..., 0. ..., 0.44..., 0.90...],
[0.19..., 0.41..., 0.44..., 0. ..., 0.51...],
[0.57..., 0.76..., 0.90..., 0.51..., 0. ...]])
Retrieve all neighbors and average distance within radius r:
>>> r = .2
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r) for d in D_chunk]
... avg_dist = (D_chunk * (D_chunk < r)).mean(axis=1)
... return neigh, avg_dist
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func)
>>> neigh, avg_dist = next(gen)
>>> neigh
[array([0, 3]), array([1]), array([2]), array([0, 3]), array([4])]
>>> avg_dist
array([0.039..., 0. , 0. , 0.039..., 0. ])
Where r is defined per sample, we need to make use of ``start``:
>>> r = [.2, .4, .4, .3, .1]
>>> def reduce_func(D_chunk, start):
... neigh = [np.flatnonzero(d < r[i])
... for i, d in enumerate(D_chunk, start)]
... return neigh
>>> neigh = next(pairwise_distances_chunked(X, reduce_func=reduce_func))
>>> neigh
[array([0, 3]), array([0, 1]), array([2]), array([0, 3]), array([4])]
Force row-by-row generation by reducing ``working_memory``:
>>> gen = pairwise_distances_chunked(X, reduce_func=reduce_func,
... working_memory=0)
>>> next(gen)
[array([0, 3])]
>>> next(gen)
[array([0, 1])]
"""
n_samples_X = _num_samples(X)
if metric == 'precomputed':
slices = (slice(0, n_samples_X),)
else:
if Y is None:
Y = X
# We get as many rows as possible within our working_memory budget to
# store len(Y) distances in each row of output.
#
# Note:
# - this will get at least 1 row, even if 1 row of distances will
# exceed working_memory.
# - this does not account for any temporary memory usage while
# calculating distances (e.g. difference of vectors in manhattan
# distance.
chunk_n_rows = get_chunk_n_rows(row_bytes=8 * _num_samples(Y),
max_n_rows=n_samples_X,
working_memory=working_memory)
slices = gen_batches(n_samples_X, chunk_n_rows)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
for sl in slices:
if sl.start == 0 and sl.stop == n_samples_X:
X_chunk = X # enable optimised paths for X is Y
else:
X_chunk = X[sl]
D_chunk = pairwise_distances(X_chunk, Y, metric=metric,
n_jobs=n_jobs, **kwds)
if ((X is Y or Y is None)
and PAIRWISE_DISTANCE_FUNCTIONS.get(metric, None)
is euclidean_distances):
# zeroing diagonal, taking care of aliases of "euclidean",
# i.e. "l2"
D_chunk.flat[sl.start::_num_samples(X) + 1] = 0
if reduce_func is not None:
chunk_size = D_chunk.shape[0]
D_chunk = reduce_func(D_chunk, sl.start)
_check_chunk_size(D_chunk, chunk_size)
yield D_chunk
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=None,
force_all_finite=True, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix
inputs.
['nan_euclidean'] but it does not yet support sparse matrices.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features], optional
An optional second feature array. Only allowed if
metric != "precomputed".
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
force_all_finite : boolean or 'allow-nan', (default=True)
Whether to raise an error on np.inf and np.nan in array. The
possibilities are:
- True: Force all values of array to be finite.
- False: accept both np.inf and np.nan in array.
- 'allow-nan': accept only np.nan values in array. Values cannot
be infinite.
.. versionadded:: 0.22
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
See also
--------
pairwise_distances_chunked : performs the same calculation as this
function, but returns a generator of chunks of the distance matrix, in
order to limit memory usage.
paired_distances : Computes the distances between corresponding
elements of two arrays
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True,
force_all_finite=force_all_finite)
whom = ("`pairwise_distances`. Precomputed distance "
" need to have non-negative values.")
check_non_negative(X, whom=whom)
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric,
force_all_finite=force_all_finite, **kwds)
else:
if issparse(X) or issparse(Y):
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
dtype = bool if metric in PAIRWISE_BOOLEAN_FUNCTIONS else None
if (dtype == bool and
(X.dtype != bool or (Y is not None and Y.dtype != bool))):
msg = "Data was converted to boolean for metric %s" % metric
warnings.warn(msg, DataConversionWarning)
X, Y = check_pairwise_arrays(X, Y, dtype=dtype,
force_all_finite=force_all_finite)
# precompute data-derived metric params
params = _precompute_metric_params(X, Y, metric=metric, **kwds)
kwds.update(**params)
if effective_n_jobs(n_jobs) == 1 and X is Y:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
func = partial(distance.cdist, metric=metric, **kwds)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
# These distances require boolean arrays, when using scipy.spatial.distance
PAIRWISE_BOOLEAN_FUNCTIONS = [
'dice',
'jaccard',
'kulsinski',
'matching',
'rogerstanimoto',
'russellrao',
'sokalmichener',
'sokalsneath',
'yule',
]
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'laplacian': laplacian_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'laplacian' sklearn.pairwise.laplacian_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
Read more in the :ref:`User Guide <metrics>`.
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": frozenset(["gamma"]),
"cosine": (),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"laplacian": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=None, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are:
['additive_chi2', 'chi2', 'linear', 'poly', 'polynomial', 'rbf',
'laplacian', 'sigmoid', 'cosine']
Read more in the :ref:`User Guide <metrics>`.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two rows from X as input and return the corresponding
kernel value as a single number. This means that callables from
:mod:`sklearn.metrics.pairwise` are not allowed, as they operate on
matrices, not single samples. Use the string identifying the kernel
instead.
filter_params : boolean
Whether to filter invalid parameters or not.
n_jobs : int or None, optional (default=None)
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
**kwds : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
# import GPKernel locally to prevent circular imports
from ..gaussian_process.kernels import Kernel as GPKernel
if metric == "precomputed":
X, _ = check_pairwise_arrays(X, Y, precomputed=True)
return X
elif isinstance(metric, GPKernel):
func = metric.__call__
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = {k: kwds[k] for k in kwds
if k in KERNEL_PARAMS[metric]}
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
elif callable(metric):
func = partial(_pairwise_callable, metric=metric, **kwds)
else:
raise ValueError("Unknown kernel %r" % metric)
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
|
py | b408bf12fbbc60aeda0747111d0a785febfdaf0b | """
A set of classes that can be subclassed or extended to allow for automatically forwarding
methods calls on the subclass to a remote RPC handler.
On cloud side we will have a remoteInstance (with remoteCall stub) that calls
the networking crossbar to send the request to the remote.
On the remote side we will have a RemoteHandler instance and when messages are received
will dispatch them to the handler.
"""
import inspect
import rpyc
from rtCommon.errors import RequestError, StateError
# Possibility A - the "has a" model, returns a 'remote' instance, nothing to do with the original class
class Remoteable(object):
"""
A class that can be subclassed to allow remote invocation.
When isRemote is True it returns a remote stub instance, when false it returns the real instance
"""
def __new__(cls, isRemote=False):
if isRemote is True:
# instance = RemoteStub(cls.__name__)
instance = RemoteStub(cls)
else:
instance = object.__new__(cls)
return instance
def __init__(self, isRemote=False):
self.isRemote = isRemote
class RemoteStub(object):
"""
A remote stub class where none of the attributes of the original class are defined.
Therefore __getattr__ will be called for all attributes (i.e. intercepting normal calls)
and this class overrides __getattr__ to forward the call request to a remote instance
via the registered communication channel function.
"""
def __init__(self, classType, isRemote=True):
assert isRemote is True
self.isRemote = True # always true for the remote stup
self.classType = classType
self.classname = classType.__name__
self.commFunction = None
self.timeout = 5
def setRPCTimeout(self, timeout):
self.timeout = timeout
def registerCommFunction(self, commFunction):
# TODO - perhaps we register a channel instead which goes directly to one end point
self.commFunction = commFunction
def remoteCall(self, attribute, *args, **kwargs) -> any:
# args and kwargs may be of type rpyc.core.netref.type if rpyc was used to
# send this request from the client script to the projectServer; pull the actual object
args = rpyc.classic.obtain(args)
kwargs = rpyc.classic.obtain(kwargs)
callStruct = {'cmd': 'rpc', 'class': self.classname, 'attribute': attribute, 'args': args, 'kwargs': kwargs}
# print(f'remoteCall: {callStruct}}')
timeout = self.timeout
if 'rpc_timeout' in kwargs:
timeout = kwargs.pop('rpc_timeout')
return self.commFunction(callStruct, timeout=timeout)
def __getattr__(self, name):
# Previously just 'return self.remoteCall'
# Create an closure function that populates the self and name args
def anonymous(*args, **kwargs):
return self.remoteCall(name, *args, **kwargs)
attr = getattr(self.classType, name, None)
if attr is None or not callable(attr):
# if attr is None it should be an instance variable
# if attr is not callable is is a class variable
# call the closure function immediately and return the results
return anonymous()
# it is callable so return the function instance
return anonymous
# Possibility B - the "is a" model, subclass the remoteable class
# Note - this just seems too complicated with the recursion of __getattribute__
class RemoteableExtensible(object):
"""
A class that can be subclassed to allow remote invocation. The remote and local versions
are the same class type (not a stub) and in the remote instance case attributes can
be registerd as 'local' meaning calls to them will be handled local, all other calls
would be sent to the remote instance.
"""
def __init__(self, isRemote=False):
self.isRemote = isRemote
self.commFunction = None
self.timeout = 5
self.localAttributes = [
'localAttributes', 'commFunction', 'timeout',
'addLocalAttributes', 'registerCommFunction',
'setRPCTimeout', 'isRunningRemote', 'isRemote'
]
def isRunningRemote(self):
return self.isRemote
def setRPCTimeout(self, timeout):
self.timeout = timeout
def registerCommFunction(self, commFunction):
# TODO - perhaps we register a channel instead which goes directly to one end point
self.commFunction = commFunction
def remoteCall(self, attribute, *args, **kwargs) -> any:
# args and kwargs may be of type rpyc.core.netref.type if rpyc was used to
# send this request to the projectServer from the client script, pull the actual object
args = rpyc.classic.obtain(args)
kwargs = rpyc.classic.obtain(kwargs)
callStruct = {'cmd': 'rpc', 'class': type(self).__name__, 'attribute': attribute, 'args': args, 'kwargs': kwargs}
# print(f'### remoteCall callStruct: {callStruct}')
timeout = self.timeout
if 'rpc_timeout' in kwargs:
timeout = kwargs.pop('rpc_timeout')
# print(f'Remote call using timeout: {timeout}')
result = self.commFunction(callStruct, timeout=timeout)
# print(f'result: {type(result)}')
return result
def addLocalAttributes(self, methods):
if type(methods) is str:
self.localAttributes.append(methods)
elif type(methods) is list:
self.localAttributes.extend(methods)
def __getattribute__(self, name):
# callername = inspect.stack()[1][3]
# if callername in ('__getattribute__', 'remoteCall'):
# raise RecursionError('Remoteable __getattribute__ {name}: add all object attrs to localAttributes')
isremote = object.__getattribute__(self, 'isRemote')
if isremote:
localAttrs = object.__getattribute__(self, 'localAttributes')
if name not in localAttrs:
remoteCallFunc = object.__getattribute__(self, 'remoteCall')
def anonymous(*args, **kwargs):
return remoteCallFunc(name, *args, **kwargs)
attr = object.__getattribute__(self, name)
if attr is None or not callable(attr):
# if attr is None it should be an instance variable
# if attr is not callable it is a class variable
# call the closure function immediately and return the results
return anonymous()
return anonymous
# return super().__getattribute__(name)
# return super(RemoteableB, self).__getattribute__(name)
return object.__getattribute__(self, name)
# TODO - support per client remote instances, either by having a per-client classInstanceDict
# or by supporting a 'new' function call, or by returning handles of the instances (although
# that might be more complext than needed)
class RemoteHandler:
"""
Class that runs at the remote and as message requests are received they are dispatched
to this class for processing.
"""
def __init__(self):
self.classInstanceDict = {}
def registerClassInstance(self, classType, classInstance):
self.classInstanceDict[classType.__name__] = classInstance
def registerClassNameInstance(self, className, classInstance):
self.classInstanceDict[className] = classInstance
def runRemoteCall(self, callDict):
# print(f'remoteCall {callDict}')
className = callDict.get('class')
attributeName = callDict.get('attribute')
if None in (className, attributeName):
raise RequestError(f'Malformed remote call struct: missing one of '
f'class {className}, attribute {attributeName}')
classInstance = self.classInstanceDict.get(className)
if classInstance is None:
raise StateError(f'RemoteHandler: class {className} not registered')
attributeInstance = getattr(classInstance, attributeName)
if not callable(attributeInstance):
return attributeInstance
args = callDict.get('args', ())
if args is None: # Can happen if key 'args' exists and is set to None
args = ()
kwargs = callDict.get('kwargs', {})
if kwargs is None:
kwargs = {}
res = attributeInstance(*args, **kwargs)
return res
|
py | b408bfefdb1add00fe5a63d5b3c5c93e31a0c479 | class Solution:
def deleteNode(self, root, key):
if not root:
return
if key > root.val:
root.right = self.deleteNode(root.right, key)
elif key < root.val:
root.left = self.deleteNode(root.left, key)
else:
if not root.left:
return root.right
else:
temp = root.left
while temp.right:
temp = temp.right
root.val = temp.val
root.left = self.deleteNode(root.left, temp.val)
return root
|
py | b408c06dc1390624dfb236a2a7e0e43f52e048d0 |
__version__ = "0.2020.4.1"
from .trioping import ping, multiPing
|
py | b408c085a38156d9e318943cbcfc8ae90bc5bfe0 | from fastapi import APIRouter
from api.endpoints import detections
api_router = APIRouter()
api_router.include_router(detections.router, tags=["detections"]) |
py | b408c14e2bc6446e1334ef369b7d21a20bf30cc6 | ## @ingroup Methods-Costs-Industrial_Costs
# compute_industrial_costs.py
#
# Created: Sep 2016, T. Orra
# Modified: Aug 2019, T. MacDonald
#
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Units,Data
from SUAVE.Methods.Costs.Correlations.Industrial_Costs import ( estimate_escalation_factor, \
estimate_hourly_rates, \
distribute_non_recurring_cost )
lb_to_kg = 1*Units.lb
lbf_to_N = 1*Units.lbf
# ----------------------------------------------------------------------
# Compute costs to develop and produce the vehicle (only airplanes)
# ----------------------------------------------------------------------
## @ingroup Methods-Costs-Industrial_Costs
def compute_industrial_costs(vehicle,determine_cash_flow=True):
"""Computes costs for design, development, test, and manufacturing of an airplane program
Assumptions:
Production tooling is considered a non-recurring cost
Source:
"Airplane Design, Part VIII - Airplane Cost Estimation", Roskam
Inputs:
vehicle.costs.industrial. data dictionary with inputs for costs estimations:
reference_year [-] reference date for calculations
production_total_units [-] total units to be produced
units_to_amortize [-] number of units to amortize development costs
prototypes_units [-] number of prototypes used in flight test campaign
avionics_cost [$] user-informed avionics costs
test_facilities_cost [$] user-informed test facilities costs
manufacturing_facilities_cost [$] user-informed manufact. facilities costs
development_total_years [-] total years of development, for cash flow
aircraft_type <string> for interior costs: 'military' or 'general aviation' or 'regional' or 'commercial' or 'business'.
difficulty_factor [-] 1.0 for conventional tech., 1.5 for moderately advanc. tech., 2 for agressive use of adv. tech.
cad_factor [-] 1.2 for learning CAD, 1.0 for manual, 0.8 for experienced
stealth [-] 0 for non-stealth, 1 for stealth
material_factor [-] 1.0 for conventional Al, 1.5 for stainless steel, 2~2.5 for composites, 3 for carbon fiber
vehicle.mass_properties.empty [kg]
vehicle.networks.turbofan.
number_of_engines [-]
sealevel_static_thrust [N]
vehicle.passengers [-]
Outputs:
vehicle.costs.industrial.
unit_cost [$] total cost of each airplane produced
non_recurring.total [$]
non_recurring.breakdown.
airframe_engineering [$]
development_support [$]
flight_test [$]
engines [$]
avionics [$]
tooling_development [$]
tooling_production [$]
manufacturing_labor [$]
manufacturing_material [$]
quality_control [$]
test_facilities [$]
manufacturing_facilities [$]
total [$]
recurring.total [$]
recurring.breakdown.
airframe_engineering [$]
interior [$]
manufacturing_labor [$]
manufacturing_material [$]
quality_control [$]
engines [$]
avionics [$]
total [$]
Properties Used:
N/A
"""
# Unpack
costs = vehicle.costs.industrial
reference_year = costs.reference_year
total_production = costs.production_total_units
n_prototypes = costs.prototypes_units
development_total_years = vehicle.costs.industrial.development_total_years
ac_type = costs.aircraft_type.lower()
# define number of airplanes to amortize non-recurring costs
if costs.units_to_amortize:
amortize_units = costs.units_to_amortize
else:
amortize_units = costs.production_total_units
# user-defined costs
avionics_costs = costs.avionics_cost
test_facilities_cost = costs.test_facilities_cost
manuf_facilities_cost = costs.manufacturing_facilities_cost
# factors to account for design especific characteristics
F_diff = costs.difficulty_factor # (1.0 for conventional, 1.5 for moderately advanc. tech., 2 for agressive use of adv. tech.)
F_CAD = costs.cad_factor #(1.2 for learning, 1.0 for manual, 0.8 for experienced)
F_obs = 1 + 3. * costs.stealth #(0 for non-stealth, 1 for stealth)
F_mat = costs.material_factor #1 for conventional Al, 1.5 for stainless steel, 2~2.5 for composites, 3 for carbon fiber
# general airplane data
weight = 0.62 * vehicle.mass_properties.empty / lb_to_kg # correlation for AMPR weight, typical 62% * Empty weight
n_engines = vehicle.networks.turbofan.number_of_engines
sls_thrust = vehicle.networks.turbofan.sealevel_static_thrust / lbf_to_N
n_pax = vehicle.passengers
# estimate escalation factor
method_reference_year = 1970
escalation_factor = estimate_escalation_factor(reference_year) / estimate_escalation_factor(method_reference_year)
# estimate hourly rates
hourly_rates = estimate_hourly_rates(reference_year)
rates_engineering = hourly_rates.engineering
rates_tooling = hourly_rates.tooling
rates_manufacturing = hourly_rates.manufacturing
rates_quality_control = hourly_rates.quality_control
costs.hourly_rates = hourly_rates
# determine equivalent airspeed from MMO (assuming HP=35kft)
MMO = vehicle.envelope.maximum_mach_operational
speed = MMO * 321.32 # KEAS
# =============================================
# Non-recurring costs estimation - DT&E costs
# =============================================
# Airframe Engineering (DT&E)
AENGHD = 0.0396 * weight ** 0.791 * speed ** 1.526 * n_prototypes ** 0.183 * F_diff * F_CAD
AENGCD = AENGHD * rates_engineering # airframe eng costs development
# Development Support (DT&E)
DSC = 0.008325 * weight ** 0.873 * speed ** 1.89 * n_prototypes ** 0.346 * F_diff * escalation_factor
# Engine Cost for prototypes
eng_unit_cost = 520. * sls_thrust ** 0.8356 * escalation_factor * 0.235 # 0.235 to account for cost difference between 1970 and 1998 (roskam vs nicolai method)
ECD = eng_unit_cost * n_prototypes * n_engines * 1.10 #10% to account for spare engine
# Avionics cost for prototypes
AVCOST = avionics_costs * n_prototypes
# Manufacturing Labor (DT&E)
MLHD = 28.984 * weight ** 0.74 * speed ** 0.543 * n_prototypes ** 0.524 * F_diff
MLCD = MLHD * rates_manufacturing
# Manufacturing materials (DT&E)
MMED = 2.0 * 37.632 * F_mat * weight ** 0.689 * speed ** 0.624 * n_prototypes ** 0.792 * escalation_factor
# Tooling (DT&E)
THD = 4.0127 * weight**0.764 * speed ** 0.899 * n_prototypes**0.178*(0.33)**0.066 * F_diff
TCD = THD * rates_tooling # tooling costs for prototypes
# Tooling (Production)
THP = 4.0127 * weight**0.764 * speed ** 0.899 * total_production**0.178*(0.33)**0.066 * F_diff
TCP = THP * rates_tooling - TCD # tooling costs for total production
# Quality Control (DT&E)
QCHD = 0.130 * MLHD
QCCD = QCHD * rates_quality_control
# Flight Test Operations (DT&E)
FTC = 0.001244 * weight ** 1.16 * speed ** 1.371 * n_prototypes ** 1.281 * F_diff * F_obs * escalation_factor
# TOTAL NON-RECURRING COSTS
TNRCE = AENGCD # airframe engineering
TNRCDS = DSC # development support engineering
TNRCFT = FTC # flight test operation costs
TNRCEN = ECD # engine cost for prototypes
TNRCAV = AVCOST # avionics cost for prototypes
TNRCTD = TCD # tooling for development
TNRCTP = TCP # tooling for production
TNRCMN = MLCD # manufacturing labor
TNRCMT = MMED # manufacturing materials
TNRCQA = QCCD # quality and control
TNRTF = test_facilities_cost # test facilities cost
TNRMF = manuf_facilities_cost # manufacturing facilities costs
# sum all components above
TNRC = TNRCE + TNRCDS + TNRCFT + TNRCEN + TNRCAV + TNRCTD + TNRCTP + TNRCMN + TNRCMT + TNRCQA + TNRTF + TNRMF
# append in breakdown structure
cost = Data()
cost.non_recurring = Data()
nrec = cost.non_recurring
nrec.airframe_engineering = TNRCE
nrec.development_support = TNRCDS
nrec.flight_test = TNRCFT
nrec.engines = TNRCEN
nrec.avionics = TNRCAV
nrec.tooling_development = TNRCTD
nrec.tooling_production = TNRCTP
nrec.manufacturing_labor = TNRCMN
nrec.manufacturing_material = TNRCMT
nrec.quality_control = TNRCQA
nrec.test_facilities = TNRTF
nrec.manufacturing_facilities = TNRMF
nrec.total = TNRC
# ================================
# Recurring costs estimation
# ================================
# Airframe Engineering (Production)
AENGHP = 2.0 * (0.0396 * weight ** 0.791 * speed ** 1.526 *(n_prototypes + total_production)**0.183 * F_diff * F_CAD)
AENGCP = AENGHP * rates_engineering - AENGCD
# Engine Cost
ECP = eng_unit_cost * total_production * n_engines
# Avionics cost
AVCOSTR = avionics_costs * total_production
# Interiors cost
if ac_type == 'military':
interior_index = 0.
if ac_type == 'general aviation':
interior_index = 500.
if ac_type == 'regional':
interior_index = 1000.
if ac_type == 'commercial':
interior_index = 2000.
if ac_type == 'business':
interior_index = 3000.
INTRC = interior_index * n_pax * total_production * escalation_factor * 0.296
# Manufacturing Labor (Production)
MLHP = 1.3 * 28.984 * weight ** 0.74 * speed ** 0.543 * total_production ** 0.524 * F_diff
MLCP = MLHP * rates_manufacturing - MLCD
# Manufacturing materials and equipment (Production)
MMEP = 2.0 * 37.632 * F_mat * weight ** 0.689 * speed ** 0.624 * total_production ** 0.792 * escalation_factor
MMEP = MMEP - MMED
# Quality Control (Production)
QCHP = 0.130 * MLHP
QCCP = QCHP * rates_quality_control
# TOTAL RECURRING COSTS
TRCE = AENGCP # airframe engineering
TRCI = INTRC # interior costs
TRCMN = MLCP # manufacturing labor
TRCMT = MMEP # manufacturing materials
TRCQA = QCCP # quality and control
TRCEN = ECP # engine cost for production
TRCAV = AVCOSTR # avionics cost for production
# sum all components above
TRC = TRCE + TRCI + TRCMN + TRCMT + TRCQA + TRCEN + TRCAV
# store rec breakdown
cost.recurring = Data()
rec = cost.recurring
rec.airframe_engineering = TRCE
rec.interior = TRCI
rec.manufacturing_labor = TRCMN
rec.manufacturing_material = TRCMT
rec.quality_control = TRCQA
rec.engines = TRCEN
rec.avionics = TRCAV
rec.total = TRC
# Total cost per unit
unit_cost = TRC / total_production + TNRC / amortize_units
vehicle.costs.industrial.unit_cost = unit_cost
# packing results
vehicle.costs.industrial.non_recurring = Data()
vehicle.costs.industrial.non_recurring.total = TNRC
vehicle.costs.industrial.non_recurring.breakdown = nrec
vehicle.costs.industrial.recurring = Data()
vehicle.costs.industrial.recurring.total = TRC
vehicle.costs.industrial.recurring.breakdown = rec
# distribute non-recurring costs on time
if not development_total_years:
vehicle.costs.industrial.development_total_years = 5.
if determine_cash_flow:
distribute_non_recurring_cost(vehicle.costs) # returns costs.industrial.non_recurring.cash_flow
return
# ----------------------------------------------------------------------
# Module Tests
# ----------------------------------------------------------------------
# this will run from command line, put simple tests for your code here
## @ingroup Methods-Costs-Industrial_Costs
def call_print(config):
"""Prints precalculated costs for an airplane program.
Assumptions:
N/A
Source:
N/A
Inputs:
config.tag <string>
config.costs.industrial.
non_recurring.total [$]
unit_cost [$]
recurring.total [$]
production_total_units [$]
Outputs:
None
Properties Used:
N/A
"""
nrec = config.costs.industrial.non_recurring.total / 1e6
unit = config.costs.industrial.unit_cost / 1e6
abc = config.costs.industrial.recurring.total / config.costs.industrial.production_total_units / 1e6
print('{:10s} Total non reccuring USM: {:7.2f} , Unit: {:7.2f} , Materials:{:7.2f}'.format(config.tag,nrec,unit,abc))
if __name__ == '__main__':
import SUAVE
#==================================
config = SUAVE.Vehicle()
config.tag = 'B777-200'
#==================================
manufact_costs = config.costs.industrial
gt_engine = SUAVE.Components.Energy.Networks.Turbofan()
gt_engine.tag = 'turbofan'
gt_engine.number_of_engines = 2
gt_engine.sealevel_static_thrust = 110000 * Units.lbf
config.append_component(gt_engine)
config.mass_properties.empty = 326000 * Units.lb
config.envelope.maximum_mach_operational = 0.89
config.passengers = 250
manufact_costs.avionics_cost = 2500000.
manufact_costs.production_total_units = 500
manufact_costs.units_to_amortize = 500
manufact_costs.prototypes_units = 9
manufact_costs.reference_year = 2004
manufact_costs.first_flight_year = 1993
manufact_costs.difficulty_factor = 1.75 # (1.0 for conventional, 1.5 for moderately advanc. tech., 2 for agressive use of adv. tech.)
manufact_costs.cad_factor = 1.2 # (1.2 for learning, 1.0 for manual, 0.8 for experienced)
manufact_costs.stealth = 0.0 # (0 for non-stealth, 1 for stealth)
manufact_costs.material_factor = 1.0 # (1 for conventional Al, 1.5 for stainless steel, 2~2.5 for composites, 3 for carbon fiber)
manufact_costs.aircraft_type = 'commercial' # ('military','general aviation','regional','commercial','business')
compute_industrial_costs(config)
call_print(config)
|
py | b408c179732ae48518c3ecd8e9942a03df6f94c3 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "organisation.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | b408c1a1468da8c4b0278fe6776d94f34563ea25 | import matplotlib.pyplot as plt
import numpy as np
plt.figure()
# 分成2行2列,放在第一个位置
plt.subplot(2, 1, 1)
plt.plot([0,1], [0,1])
plt.subplot(2, 3, 4)
plt.plot([0,1], [0,2])
plt.subplot(2, 3, 5)
plt.plot([0,1], [0,3])
plt.subplot(2, 3, 6)
plt.plot([0,1], [0,4])
plt.show()
|
py | b408c1cf94e3c96701156e7aa55053f6df0335fc | """
Mad libs generator
_______________________________
"""
# Loop back to this point once the codde finishes
loop=1
while(loop<10):
# All the questions that the program asks the user
noun = input("Choose a noun : ")
p_noun = input("Choose a plural noun : ")
noun2 = input("Choose a noun : ")
place = input("Choose a place : ")
adjective = input("Choose an adjective (Describing word): ")
noun3 = input("Choose a noun : ")
# Displays the story based on the users input
print("___________________________________")
print("Be kind to your",noun,"-footed",p_noun)
print("For a duck may be somebody's", noun2,",")
print("Be kind to your",p_noun,"in",place)
print("Where the weather is always",adjective,".")
print()
print("You may think that is this the", noun3,",")
print("Well it is.")
print("____________________________________")
# Loop back to "loop = 1"
loop=loop+1
|
py | b408c299ed34d2eaec28201abd319523928cb259 | # model settings
model = dict(
type='CenterNet',
pretrained='modelzoo://resnet18',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_eval=False,
add_summay_every_n_step=200,
style='pytorch'),
neck=dict(type='None'),
bbox_head=dict(
type='CXTHead',
inplanes=(64, 128, 256, 512),
head_conv=128,
wh_conv=64,
hm_head_conv_num=2,
wh_head_conv_num=2,
ct_head_conv_num=1,
fovea_hm=True,
num_classes=81,
use_exp_wh=True,
shortcut_cfg=(1, 2, 3),
shortcut_attention=(True, True, True),
norm_cfg=dict(type='BN'),
fix_debug=True,
norm_wh=False,
only_merge=True,
hm_init_value=None,
giou_weight=5.,
merge_weight=1.,
hm_weight=1.,
ct_weight=1.))
cudnn_benchmark = True
# training and testing settings
train_cfg = dict(
vis_every_n_iters=100,
debug=False)
test_cfg = dict(
score_thr=0.05,
max_per_img=100)
# dataset settings
dataset_type = 'CocoDataset'
data_root = 'data/coco/'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile', to_float32=True),
dict(type='LoadAnnotations', with_bbox=True),
dict(
type='PhotoMetricDistortion',
brightness_delta=32,
contrast_range=(0.5, 1.5),
saturation_range=(0.5, 1.5),
hue_delta=18),
dict(
type='Expand',
mean=img_norm_cfg['mean'],
to_rgb=img_norm_cfg['to_rgb'],
ratio_range=(1, 4)),
dict(
type='MinIoURandomCrop',
min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
min_crop_size=0.3),
dict(type='Resize', img_scale=(512, 512), keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(512, 512),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
data = dict(
imgs_per_gpu=16,
workers_per_gpu=4,
train=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_train2017.json',
img_prefix=data_root + 'train2017/',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/instances_val2017.json',
img_prefix=data_root + 'val2017/',
pipeline=test_pipeline))
# optimizer
optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0002,
paramwise_options=dict(bias_decay_mult=0.))
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=1.0 / 5,
step=[20, 27])
checkpoint_config = dict(save_every_n_steps=200, max_to_keep=1, keep_every_n_epochs=40)
bbox_head_hist_config = dict(
model_type=['ConvModule', 'DeformConvPack'],
sub_modules=['bbox_head'],
save_every_n_steps=200)
# yapf:disable
log_config = dict(interval=20)
# yapf:enable
# runtime settings
total_epochs = 30
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = 'cxt18_sg_nofre_avgwhlog_onlymerge1_wh5_sgd_noaug_nobiaswd_deconv_3lr_warmup_wd2e4modot9_ext_hminit_fix_5x'
load_from = None
resume_from = None
workflow = [('train', 1)]
|
py | b408c35c23566cb8d7f8a50a9b1b89717acb477f | from app.api.base import base_name as names
from app.api.base.base_sql import Sql
class Provider:
@staticmethod
def get_print_form(args):
query = """
select *
from users u
join "Организация" org on u.id_user = org.id_user
where u.id_user = '{id_user}'
"""
return Sql.exec(query=query, args=args)
@staticmethod
def insert_print_form(args):
fields = ''
for f in names.print_form_fields:
if f == names.print_form_fields[0]:
fields += ' ' + f
else:
fields += ', ' + f
args[f] = args.get(f) or "''"
args['fields'] = fields
fields_values = ''
for f in names.print_form_fields:
if f == names.print_form_fields[0]:
fields_values += ' {' + f + '}'
else:
fields_values += ', {' + f + '}'
args['fields_values'] = fields_values
query = """
insert into "Организация" (
{fields}
)
VALUES (
%s
)
""" % fields_values
return Sql.exec(query=query, args=args)
@staticmethod
def update_print_form(args):
fields = ''
for f in names.print_form_fields:
a = args.get(f) or ''
args[f] = "'" + a + "'"
if f == names.print_form_fields[0]:
fields += ' ' + f + ' = ' + '{' + f + '}'
else:
fields += ', ' + f + ' = ' + '{' + f + '}'
query = """
update "Организация"
set
%s
where
id_user = {id_user}
""" % fields
return Sql.exec(query=query, args=args)
|
py | b408c420aeb4e6c61450d2dfd0be728d3f67aa7d | from sklearn_explain.tests.skl_datasets import skl_datasets_test as skltest
skltest.test_class_dataset_and_model("iris" , "XGBClassifier_13")
|
py | b408c4fd2d5e7fa5b1850bd21c55627dbd7fdcb9 | import numpy as np
from os.path import expanduser
from subprocess import check_call
BMatchingSolver_PATH = expanduser('~/Downloads/BMatchingSolver/Release/BMatchingSolver')
DEGREE_PATH = 'degree.txt'
MATRIX_PATH = 'D.txt'
RESULT_PATH = 'results.txt'
def hacky_b_matching(D, b, max_iter=5000, cache_size=None, fmt='%.8f'):
if cache_size is None:
cache_size = D.shape[0]//2
np.savetxt(DEGREE_PATH, np.zeros((D.shape[0],1), dtype=int)+b, fmt='%d')
np.savetxt(MATRIX_PATH, -D, fmt=fmt)
cmd = '%s -w %s -d %s -o %s -n %d -t 0 -v 0 -c %d -i %d >/dev/null' % (
BMatchingSolver_PATH, MATRIX_PATH, DEGREE_PATH,
RESULT_PATH, D.shape[0], cache_size, max_iter)
check_call(cmd, shell=True)
pairs = np.loadtxt(RESULT_PATH, dtype=int)
W = np.zeros_like(D, dtype=int)
W[pairs[:,0],pairs[:,1]] = 1
return W
|
py | b408c5eb4f7f75d28d9d6752456d7686e5fc385e | import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "constant", cycle_length = 5, transform = "cumsum", sigma = 0.0, exog_count = 20, ar_order=0);
|
py | b408c61cb581f8dc0a3f4d218e99bc7c8d70f601 | # Copyright (c) 2020, Fabio Muratore, Honda Research Institute Europe GmbH, and
# Technical University of Darmstadt.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Fabio Muratore, Honda Research Institute Europe GmbH,
# or Technical University of Darmstadt, nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL FABIO MURATORE, HONDA RESEARCH INSTITUTE EUROPE GMBH,
# OR TECHNICAL UNIVERSITY OF DARMSTADT BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pytest
from copy import deepcopy
from pyrado.domain_randomization.default_randomizers import create_default_randomizer
from pyrado.environment_wrappers.action_delay import ActDelayWrapper
from pyrado.environment_wrappers.action_noise import GaussianActNoiseWrapper
from pyrado.environment_wrappers.action_normalization import ActNormWrapper
from pyrado.environment_wrappers.domain_randomization import DomainRandWrapperBuffer
from pyrado.environment_wrappers.downsampling import DownsamplingWrapper
from pyrado.environment_wrappers.observation_normalization import ObsNormWrapper, ObsRunningNormWrapper
from pyrado.environment_wrappers.observation_partial import ObsPartialWrapper
from pyrado.environment_wrappers.utils import inner_env, remove_env, typed_env
from pyrado.environments.pysim.quanser_cartpole import QCartPoleSwingUpSim
from pyrado.environments.sim_base import SimEnv
from pyrado.policies.special.dummy import DummyPolicy
from pyrado.sampling.rollout import rollout
from pyrado.utils.data_types import RenderMode
from pyrado.domain_randomization.utils import wrap_like_other_env
@pytest.mark.wrapper
def test_combination():
env = QCartPoleSwingUpSim(dt=1 / 50.0, max_steps=20)
randomizer = create_default_randomizer(env)
env_r = DomainRandWrapperBuffer(env, randomizer)
env_r.fill_buffer(num_domains=3)
dp_before = []
dp_after = []
for i in range(4):
dp_before.append(env_r.domain_param)
rollout(env_r, DummyPolicy(env_r.spec), eval=True, seed=0, render_mode=RenderMode())
dp_after.append(env_r.domain_param)
assert dp_after[i] != dp_before[i]
assert dp_after[0] == dp_after[3]
env_rn = ActNormWrapper(env)
elb = {"x_dot": -213.0, "theta_dot": -42.0}
eub = {"x_dot": 213.0, "theta_dot": 42.0, "x": 0.123}
env_rn = ObsNormWrapper(env_rn, explicit_lb=elb, explicit_ub=eub)
alb, aub = env_rn.act_space.bounds
assert all(alb == -1)
assert all(aub == 1)
olb, oub = env_rn.obs_space.bounds
assert all(olb == -1)
assert all(oub == 1)
ro_r = rollout(env_r, DummyPolicy(env_r.spec), eval=True, seed=0, render_mode=RenderMode())
ro_rn = rollout(env_rn, DummyPolicy(env_rn.spec), eval=True, seed=0, render_mode=RenderMode())
assert np.allclose(env_rn._process_obs(ro_r.observations), ro_rn.observations)
env_rnp = ObsPartialWrapper(env_rn, idcs=["x_dot", r"cos_theta"])
ro_rnp = rollout(env_rnp, DummyPolicy(env_rnp.spec), eval=True, seed=0, render_mode=RenderMode())
env_rnpa = GaussianActNoiseWrapper(
env_rnp, noise_mean=0.5 * np.ones(env_rnp.act_space.shape), noise_std=0.1 * np.ones(env_rnp.act_space.shape)
)
ro_rnpa = rollout(env_rnpa, DummyPolicy(env_rnpa.spec), eval=True, seed=0, render_mode=RenderMode())
assert np.allclose(ro_rnp.actions, ro_rnpa.actions)
assert not np.allclose(ro_rnp.observations, ro_rnpa.observations)
env_rnpd = ActDelayWrapper(env_rnp, delay=3)
ro_rnpd = rollout(env_rnpd, DummyPolicy(env_rnpd.spec), eval=True, seed=0, render_mode=RenderMode())
assert np.allclose(ro_rnp.actions, ro_rnpd.actions)
assert not np.allclose(ro_rnp.observations, ro_rnpd.observations)
assert isinstance(inner_env(env_rnpd), QCartPoleSwingUpSim)
assert typed_env(env_rnpd, ObsPartialWrapper) is not None
assert isinstance(env_rnpd, ActDelayWrapper)
env_rnpdr = remove_env(env_rnpd, ActDelayWrapper)
assert not isinstance(env_rnpdr, ActDelayWrapper)
@pytest.mark.wrapper
@pytest.mark.parametrize(
"env",
[
"default_qbb",
],
ids=["qbb"],
indirect=True,
)
def test_wrap_like_other_env(env: SimEnv):
wenv_like = deepcopy(env)
wenv_like.dt /= 3
wenv = DownsamplingWrapper(env, factor=3)
assert type(wenv_like) != type(wenv)
wenv_like = wrap_like_other_env(wenv_like, wenv, use_downsampling=True)
assert type(wenv_like) == type(wenv)
wenv = ActNormWrapper(wenv)
assert type(wenv_like) != type(wenv)
wenv_like = wrap_like_other_env(wenv_like, wenv)
assert type(wenv_like) == type(wenv)
wenv = ObsNormWrapper(wenv)
assert type(wenv_like) != type(wenv)
wenv_like = wrap_like_other_env(wenv_like, wenv)
assert type(wenv_like) == type(wenv)
assert type(wenv_like.wrapped_env) == type(wenv.wrapped_env)
wenv = ObsRunningNormWrapper(wenv)
wenv_like = wrap_like_other_env(wenv_like, wenv)
assert type(wenv_like) == type(wenv)
assert type(wenv_like.wrapped_env) == type(wenv.wrapped_env)
wenv = ObsPartialWrapper(wenv, idcs=["x"])
wenv_like = wrap_like_other_env(wenv_like, wenv)
assert type(wenv_like) == type(wenv)
assert type(wenv_like.wrapped_env) == type(wenv.wrapped_env)
|
py | b408c66122e13a5d66f48b0fc07a1d845d7b2efb | import numpy as np
from utils import Isometry3d
class Feature(object):
# id for next feature
next_id = 0
# Takes a vector from the cam0 frame to the cam1 frame.
R_cam0_cam1 = None
t_cam0_cam1 = None
def __init__(self, new_id=0, optimization_config=None):
# An unique identifier for the feature.
self.id = new_id
# Store the observations of the features in the
# state_id(key)-image_coordinates(value) manner.
self.observations = dict() # <StateID, vector4d>
# 3d postion of the feature in the world frame.
self.position = np.zeros(3)
# A indicator to show if the 3d postion of the feature
# has been initialized or not.
self.is_initialized = False
# Optimization configuration for solving the 3d position.
self.optimization_config = optimization_config
def cost(self, T_c0_ci, x, z):
"""
Compute the cost of the camera observations
Arguments:
T_c0_c1: A rigid body transformation takes a vector in c0 frame
to ci frame. (Isometry3d)
x: The current estimation. (vec3)
z: The ith measurement of the feature j in ci frame. (vec2)
Returns:
e: The cost of this observation. (double)
"""
# Compute hi1, hi2, and hi3 as Equation (37).
alpha, beta, rho = x
h = T_c0_ci.R @ np.array([alpha, beta, 1.0]) + rho * T_c0_ci.t
# Predict the feature observation in ci frame.
z_hat = h[:2] / h[2]
# Compute the residual.
e = ((z_hat - z)**2).sum()
return e
def jacobian(self, T_c0_ci, x, z):
"""
Compute the Jacobian of the camera observation
Arguments:
T_c0_c1: A rigid body transformation takes a vector in c0 frame
to ci frame. (Isometry3d)
x: The current estimation. (vec3)
z: The ith measurement of the feature j in ci frame. (vec2)
Returns:
J: The computed Jacobian. (Matrix23)
r: The computed residual. (vec2)
w: Weight induced by huber kernel. (double)
"""
# Compute hi1, hi2, and hi3 as Equation (37).
alpha, beta, rho = x
h = T_c0_ci.R @ np.array([alpha, beta, 1.0]) + rho * T_c0_ci.t
h1, h2, h3 = h
# Compute the Jacobian.
W = np.zeros((3, 3))
W[:, :2] = T_c0_ci.R[:, :2]
W[:, 2] = T_c0_ci.t
J = np.zeros((2, 3))
J[0] = W[0]/h3 - W[2]*h1/(h3*h3)
J[1] = W[1]/h3 - W[2]*h2/(h3*h3)
# Compute the residual.
z_hat = np.array([h1/h3, h2/h3])
r = z_hat - z
# Compute the weight based on the residual.
e = np.linalg.norm(r)
if e <= self.optimization_config.huber_epsilon:
w = 1.0
else:
w = self.optimization_config.huber_epsilon / (2*e)
return J, r, w
def generate_initial_guess(self, T_c1_c2, z1, z2):
"""
Compute the initial guess of the feature's 3d position using
only two views.
Arguments:
T_c1_c2: A rigid body transformation taking a vector from c2 frame
to c1 frame. (Isometry3d)
z1: feature observation in c1 frame. (vec2)
z2: feature observation in c2 frame. (vec2)
Returns:
p: Computed feature position in c1 frame. (vec3)
"""
# Construct a least square problem to solve the depth.
m = T_c1_c2.R @ np.array([*z1, 1.0])
a = m[:2] - z2*m[2] # vec2
b = z2*T_c1_c2.t[2] - T_c1_c2.t[:2] # vec2
# Solve for the depth.
depth = a @ b / (a @ a)
p = np.array([*z1, 1.0]) * depth
return p
def check_motion(self, cam_states):
"""
Check the input camera poses to ensure there is enough translation
to triangulate the feature
Arguments:
cam_states: input camera poses. (dict of <CAMStateID, CAMState>)
Returns:
True if the translation between the input camera poses
is sufficient. (bool)
"""
if self.optimization_config.translation_threshold < 0:
return True
observation_ids = list(self.observations.keys())
first_id = observation_ids[0]
last_id = observation_ids[-1]
first_cam_pose = Isometry3d(
cam_states[first_id].orientation,
cam_states[first_id].position)
last_cam_pose = Isometry3d(
cam_states[last_id].orientation,
cam_states[last_id].position)
# Get the direction of the feature when it is first observed.
# This direction is represented in the world frame.
feature_direction = np.array([*self.observations[first_id][:2], 1.0])
feature_direction = feature_direction / np.linalg.norm(feature_direction)
feature_direction = first_cam_pose.R @ feature_direction
# Compute the translation between the first frame and the last frame.
# We assume the first frame and the last frame will provide the
# largest motion to speed up the checking process.
translation = last_cam_pose.t - first_cam_pose.t
parallel = translation @ feature_direction
orthogonal_translation = translation - parallel * feature_direction
return (np.linalg.norm(orthogonal_translation) >
self.optimization_config.translation_threshold)
def initialize_position(self, cam_states):
"""
Intialize the feature position based on all current available
measurements.
The computed 3d position is used to set the position member variable.
Note the resulted position is in world frame.
Arguments:
cam_states: A dict containing the camera poses with its ID as the
associated key value. (dict of <CAMStateID, CAMState>)
Returns:
True if the estimated 3d position of the feature is valid. (bool)
"""
cam_poses = [] # [Isometry3d]
measurements = [] # [vec2]
T_cam1_cam0 = Isometry3d(
Feature.R_cam0_cam1, Feature.t_cam0_cam1).inverse()
for cam_id, m in self.observations.items():
try:
cam_state = cam_states[cam_id]
except KeyError:
continue
# Add measurements.
measurements.append(m[:2])
measurements.append(m[2:])
# This camera pose will take a vector from this camera frame
# to the world frame.
cam0_pose = Isometry3d(cam_state.orientation, cam_state.position)
cam1_pose = cam0_pose * T_cam1_cam0
cam_poses.append(cam0_pose)
cam_poses.append(cam1_pose)
# All camera poses should be modified such that it takes a vector
# from the first camera frame in the buffer to this camera frame.
T_c0_w = cam_poses[0]
cam_poses_tmp = []
for pose in cam_poses:
cam_poses_tmp.append(pose.inverse() * T_c0_w)
cam_poses = cam_poses_tmp
# Generate initial guess
initial_position = self.generate_initial_guess(
cam_poses[-2], measurements[0], measurements[-2])
solution = np.array([*initial_position[:2], 1.0]) / initial_position[2]
# Apply Levenberg-Marquart method to solve for the 3d position.
lambd = self.optimization_config.initial_damping
inner_loop_count = 0
outer_loop_count = 0
is_cost_reduced = False
delta_norm = float('inf')
# Compute the initial cost.
total_cost = 0.0
# for i, cam_pose in enumerate(cam_poses):
for cam_pose, measurement in zip(cam_poses, measurements):
total_cost += self.cost(cam_pose, solution, measurement)
# Outer loop.
while (outer_loop_count <
self.optimization_config.outer_loop_max_iteration
and delta_norm >
self.optimization_config.estimation_precision):
A = np.zeros((3, 3))
b = np.zeros(3)
for cam_pose, measurement in zip(cam_poses, measurements):
J, r, w = self.jacobian(cam_pose, solution, measurement)
if w == 1.0:
A += J.T @ J
b += J.T @ r
else:
A += w * w * J.T @ J
b += w * w * J.T @ r
# Inner loop.
# Solve for the delta that can reduce the total cost.
while (inner_loop_count <
self.optimization_config.inner_loop_max_iteration
and not is_cost_reduced):
delta = np.linalg.solve(A + lambd * np.identity(3), b) # vec3
new_solution = solution - delta
delta_norm = np.linalg.norm(delta)
new_cost = 0.0
for cam_pose, measurement in zip(cam_poses, measurements):
new_cost += self.cost(
cam_pose, new_solution, measurement)
if new_cost < total_cost:
is_cost_reduced = True
solution = new_solution
total_cost = new_cost
lambd = max(lambd/10., 1e-10)
else:
is_cost_reduced = False
lambd = min(lambd*10., 1e12)
inner_loop_count += 1
inner_loop_count = 0
outer_loop_count += 1
# Covert the feature position from inverse depth
# representation to its 3d coordinate.
final_position = np.array([*solution[:2], 1.0]) / solution[2]
# Check if the solution is valid. Make sure the feature
# is in front of every camera frame observing it.
is_valid_solution = True
for pose in cam_poses:
position = pose.R @ final_position + pose.t
if position[2] <= 0:
is_valid_solution = False
break
# Convert the feature position to the world frame.
self.position = T_c0_w.R @ final_position + T_c0_w.t
self.is_initialized = is_valid_solution
return is_valid_solution |
py | b408c768d3b71ba4808202d3f59d0e59948998dc | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import jsonfield.fields
import django.db.models.deletion
from django.conf import settings
import uuidfield.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('answer_value', models.TextField()),
('answered_timestamp', models.DateTimeField()),
('displayed_timestamp', models.DateTimeField()),
],
),
migrations.CreateModel(
name='AnswerSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', uuidfield.fields.UUIDField(max_length=32)),
('iteration', models.IntegerField()),
('created_timestamp', models.DateTimeField()),
('delivery_timestamp', models.DateTimeField()),
('expiry_timestamp', models.DateTimeField()),
('completed_timestamp', models.DateTimeField()),
('uploaded_timestamp', models.DateTimeField(auto_now_add=True)),
],
),
migrations.CreateModel(
name='EventLog',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('description', models.CharField(max_length=255)),
('data', jsonfield.fields.JSONField()),
],
),
migrations.CreateModel(
name='Program',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_enabled', models.BooleanField(default=False)),
('contact_name', models.CharField(max_length=120)),
('contact_email', models.CharField(max_length=120)),
('contact_phone_number', models.CharField(max_length=60)),
],
),
migrations.CreateModel(
name='ProgramInvite',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('invitation_type', models.IntegerField(default=0)),
('first_name', models.CharField(max_length=20)),
('last_name', models.CharField(max_length=20)),
('email_address', models.EmailField(max_length=254)),
('phone_number', models.CharField(max_length=20, null=True, blank=True)),
('welcome_message', models.TextField(default=b'', blank=True)),
('is_existing_user', models.BooleanField(default=False)),
('username', models.CharField(max_length=20, null=True, blank=True)),
('password', models.CharField(max_length=32, null=True, blank=True)),
('sent_timestamp', models.DateTimeField(auto_now_add=True)),
('inviting_user', models.ForeignKey(related_name='invitations', to=settings.AUTH_USER_MODEL)),
('program', models.ForeignKey(related_name='invitations', to='sema2.Program')),
],
),
migrations.CreateModel(
name='ProgramParticipantBridge',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('status', models.BooleanField(default=True)),
('program', models.ForeignKey(related_name='program_profiles', to='sema2.Program')),
('user', models.ForeignKey(related_name='program_profiles', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProgramParticipantState',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('survey_uuid', models.UUIDField()),
('current_iteration', models.IntegerField(default=0)),
('active', models.BooleanField(default=True)),
('user', models.ForeignKey(related_name='participant_states', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='ProgramVersion',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('editing_status', models.IntegerField(default=0, choices=[(0, b'Draft'), (1, b'Published')])),
('display_name', models.CharField(max_length=80)),
('description', models.TextField(null=True, blank=True)),
('revision_number', models.IntegerField()),
('program', models.ForeignKey(related_name='versions', to='sema2.Program')),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', uuidfield.fields.UUIDField(max_length=32)),
('randomise_option_order', models.BooleanField(default=False)),
('question_type', models.IntegerField(default=0, choices=[(0, b'Text'), (1, b'Mutichoice'), (2, b'Radio'), (3, b'Slider')])),
('question_text', models.CharField(max_length=255)),
('question_tag', models.CharField(max_length=60)),
('min_value', models.IntegerField(default=1)),
('min_label', models.CharField(default=b'Min', max_length=255)),
('max_value', models.IntegerField(default=5)),
('max_label', models.CharField(default=b'Max', max_length=255)),
],
),
migrations.CreateModel(
name='QuestionOption',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('label', models.CharField(max_length=255)),
('value', models.IntegerField(default=-1)),
('question', models.ForeignKey(related_name='options', to='sema2.Question')),
],
),
migrations.CreateModel(
name='QuestionSet',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('display_name', models.CharField(max_length=60)),
('randomise_question_order', models.BooleanField(default=False)),
('uuid', uuidfield.fields.UUIDField(max_length=32)),
],
),
migrations.CreateModel(
name='Schedule',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('uuid', uuidfield.fields.UUIDField(max_length=32)),
('display_name', models.CharField(max_length=20)),
('interval_minutes', models.IntegerField(default=120)),
('expiry_minutes', models.IntegerField(default=120)),
('start_time_hours', models.IntegerField(default=9)),
('start_time_minutes', models.IntegerField(default=0)),
('stop_time_hours', models.IntegerField(default=17)),
('stop_time_minutes', models.IntegerField(default=0)),
('offset_plus_minus_minutes', models.IntegerField(default=0)),
('allow_monday', models.BooleanField(default=True)),
('allow_tuesday', models.BooleanField(default=True)),
('allow_wednesday', models.BooleanField(default=True)),
('allow_thursday', models.BooleanField(default=True)),
('allow_friday', models.BooleanField(default=True)),
('allow_saturday', models.BooleanField(default=True)),
('allow_sunday', models.BooleanField(default=True)),
('program_version', models.ForeignKey(related_name='schedules', to='sema2.ProgramVersion')),
],
),
migrations.CreateModel(
name='Survey',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('display_name', models.CharField(max_length=60)),
('randomise_set_order', models.BooleanField(default=False)),
('trigger_mode', models.IntegerField(default=0, choices=[(0, b'Scheduled'), (1, b'Adhoc')])),
('max_iterations', models.IntegerField(default=-1)),
('uuid', uuidfield.fields.UUIDField(max_length=32)),
('participants', models.ManyToManyField(related_name='surveys', to=settings.AUTH_USER_MODEL)),
('program_version', models.ForeignKey(related_name='surveys', to='sema2.ProgramVersion')),
('schedule', models.ForeignKey(related_name='surveys', blank=True, to='sema2.Schedule', null=True)),
],
),
migrations.AddField(
model_name='questionset',
name='group',
field=models.ManyToManyField(related_name='question_sets', to='sema2.Survey'),
),
migrations.AddField(
model_name='questionset',
name='program_version',
field=models.ForeignKey(related_name='question_sets', to='sema2.ProgramVersion'),
),
migrations.AddField(
model_name='question',
name='set',
field=models.ForeignKey(related_name='questions', to='sema2.QuestionSet'),
),
migrations.AddField(
model_name='program',
name='active_version',
field=models.OneToOneField(related_name='parent', null=True, blank=True, to='sema2.ProgramVersion'),
),
migrations.AddField(
model_name='program',
name='admins',
field=models.ManyToManyField(related_name='administered_programs', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='program',
name='editing_version',
field=models.OneToOneField(related_name='editing_parent', null=True, blank=True, to='sema2.ProgramVersion'),
),
migrations.AddField(
model_name='program',
name='participants',
field=models.ManyToManyField(related_name='participated_programs', through='sema2.ProgramParticipantBridge', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='eventlog',
name='program_version',
field=models.ForeignKey(related_name='events', to='sema2.ProgramVersion'),
),
migrations.AddField(
model_name='answerset',
name='program_version',
field=models.ForeignKey(related_name='answer_sets', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='sema2.ProgramVersion', null=True),
),
migrations.AddField(
model_name='answerset',
name='survey',
field=models.ForeignKey(related_name='answer_sets', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='sema2.Survey', null=True),
),
migrations.AddField(
model_name='answerset',
name='user',
field=models.ForeignKey(related_name='answer_sets', on_delete=django.db.models.deletion.SET_NULL, blank=True, to=settings.AUTH_USER_MODEL, null=True),
),
migrations.AddField(
model_name='answer',
name='question',
field=models.ForeignKey(related_name='answer', on_delete=django.db.models.deletion.SET_NULL, blank=True, to='sema2.Question', null=True),
),
migrations.AddField(
model_name='answer',
name='set',
field=models.ForeignKey(related_name='answers', to='sema2.AnswerSet'),
),
]
|
py | b408c7806fd67a2323cabbd7cafe26a3483bb85e | # Generated by Django 2.2.10 on 2020-04-09 04:18
import django.contrib.postgres.fields.jsonb
import django.core.validators
from django.db import migrations, models
import wazimap_ng.datasets.models.upload
def copy_data_to_new_field(apps, schema_editor):
"""
copy data of Array field subindicator to json field subindicator
"""
Indicator = apps.get_model('datasets', 'Indicator')
for indicator in Indicator.objects.all():
if indicator.subindicators:
groups = indicator.groups
data = []
if groups and len(groups) > 1:
for idx, subindicator in enumerate(indicator.subindicators):
subdata = {}
text_list = ""
text_list = []
subindicators = subindicator.split("/")
for sidx, val in enumerate(groups):
subdata[val] = subindicators[sidx]
text_list.append(f"{val}: {subindicators[sidx]}")
data.append({
"groups": subdata,
"id": idx,
"label": " / ".join(text_list),
})
elif groups and len(groups) == 1:
for idx, subindicator in enumerate(indicator.subindicators):
data.append({
"groups" : {groups[0]: subindicator},
"id": idx,
"label": f"{groups[0]}: {subindicator}",
})
indicator.subindicators_new = data
indicator.save()
class Migration(migrations.Migration):
dependencies = [
('datasets', '0064_merge_20200409_0325'),
]
operations = [
migrations.AddField(
model_name='indicator',
name='subindicators_new',
field=django.contrib.postgres.fields.jsonb.JSONField(default=list, blank=True),
),
migrations.RunPython(copy_data_to_new_field),
migrations.RemoveField(
model_name='indicator',
name='subindicators',
),
migrations.RenameField(
model_name='indicator',
old_name='subindicators_new',
new_name='subindicators',
),
]
|
py | b408c782da419c9d33dcbef17f8321d8f88d501c | """
cfg_loader
~~~~~~~~~~
A library to load configuration
:copyright: Copyright 2017 by ConsenSys France.
:license: BSD, see :ref:`license` for more details.
"""
from .loader import BaseConfigLoader, YamlConfigLoader
from .schema import ConfigSchema
__version__ = '0.3.0-dev'
__all__ = [
'ConfigSchema',
'BaseConfigLoader',
'YamlConfigLoader',
]
|
py | b408c7c9cd4c1dafc8f34b26dac2b5e3a6222596 | # Copyright Pincer 2021-Present
# Full MIT License can be found in `LICENSE` at the project root.
"""sent when a guild ban is removed."""
from __future__ import annotations
from typing import TYPE_CHECKING
from ..objects.events.guild import GuildBanRemoveEvent
from ..utils import Coro
from ..utils.conversion import construct_client_dict
if TYPE_CHECKING:
from ..client import Client
from ..core.gateway import Gateway
from ..core.gateway import GatewayDispatch
async def guild_ban_remove_middleware(
self: Client,
gateway: Gateway,
payload: GatewayDispatch
):
"""|coro|
Middleware for the ``on_guild_ban_remove`` event.
Parameters
----------
payload : :class:`~pincer.core.gateway.GatewayDispatch`
The data received from the guild ban remove event.
gateway : :class:`~pincer.core.gateway.Gateway`
The gateway for the current shard.
Returns
-------
Tuple[:class:`str`, :class:`~pincer.objects.events.guild.GuildBanRemoveEvent`]
``on_guild_ban_remove_update`` and a ``GuildBanRemoveEvent``
""" # noqa: E501
return (
"on_guild_ban_remove",
GuildBanRemoveEvent.from_dict(
construct_client_dict(self, payload.data)
),
)
def export() -> Coro:
return guild_ban_remove_middleware
|
py | b408c84502c62cb6d0a9fdcb5e51694b177b6563 | import torch
import torch.nn as nn
import numpy as np
from torch.nn import functional as F
class LinearNorm(nn.Module):
""" LinearNorm Projection """
def __init__(self, in_features, out_features, bias=False):
super(LinearNorm, self).__init__()
self.linear = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(self.linear.weight)
if bias:
nn.init.constant_(self.linear.bias, 0.0)
def forward(self, x):
x = self.linear(x)
return x
class Conv1DBlock(nn.Module):
""" 1D Convolutional Block """
def __init__(self, in_channels, out_channels, kernel_size, activation=None, dropout=None):
super(Conv1DBlock, self).__init__()
self.conv_layer = nn.Sequential()
self.conv_layer.add_module(
"conv_layer",
ConvNorm(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
padding=int((kernel_size - 1) / 2),
dilation=1,
w_init_gain="tanh",
),
)
if activation is not None:
self.conv_layer.add_module("activ", activation)
self.dropout = dropout
def forward(self, x, mask=None):
x = x.contiguous().transpose(1, 2)
x = self.conv_layer(x)
if self.dropout is not None:
x = F.dropout(x, self.dropout, self.training)
x = x.contiguous().transpose(1, 2)
if mask is not None:
x = x.masked_fill(mask.unsqueeze(-1), 0)
return x
class ConvNorm(nn.Module):
""" 1D Convolution """
def __init__(
self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
padding=None,
dilation=1,
bias=True,
w_init_gain="linear",
):
super(ConvNorm, self).__init__()
if padding is None:
assert kernel_size % 2 == 1
padding = int(dilation * (kernel_size - 1) / 2)
self.conv = nn.Conv1d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
)
def forward(self, signal):
conv_signal = self.conv(signal)
return conv_signal
class FFTBlock(nn.Module):
""" FFT Block """
def __init__(self, d_model, n_head, d_k, d_v, d_inner, kernel_size, dropout=0.1, query_projection=False):
super(FFTBlock, self).__init__()
self.slf_attn = MultiHeadAttention(n_head, d_model, d_k, d_v, dropout=dropout)
self.pos_ffn = PositionwiseFeedForward(
d_model, d_inner, kernel_size, dropout=dropout
)
if query_projection:
self.query_linear = LinearNorm(d_model, d_model, bias=True)
def forward(self, enc_input, mask=None, slf_attn_mask=None, hidden_query=None):
enc_output, enc_slf_attn = self.slf_attn(
self.query_linear(enc_input + hidden_query) if hidden_query is not None else enc_input, \
enc_input, enc_input, mask=slf_attn_mask
)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
enc_output = self.pos_ffn(enc_output)
if mask is not None:
enc_output = enc_output.masked_fill(mask.unsqueeze(-1), 0)
return enc_output, enc_slf_attn
class MultiHeadAttention(nn.Module):
""" Multi-Head Attention """
def __init__(self, n_head, d_model, d_k, d_v, dropout=0.1):
super(MultiHeadAttention, self).__init__()
self.n_head = n_head
self.d_k = d_k
self.d_v = d_v
self.w_qs = LinearNorm(d_model, n_head * d_k)
self.w_ks = LinearNorm(d_model, n_head * d_k)
self.w_vs = LinearNorm(d_model, n_head * d_v)
self.attention = ScaledDotProductAttention(temperature=np.power(d_k, 0.5))
self.layer_norm = nn.LayerNorm(d_model)
self.fc = LinearNorm(n_head * d_v, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, mask=None):
d_k, d_v, n_head = self.d_k, self.d_v, self.n_head
sz_b, len_q, _ = q.size()
sz_b, len_k, _ = k.size()
sz_b, len_v, _ = v.size()
residual = q
q = self.w_qs(q).view(sz_b, len_q, n_head, d_k)
k = self.w_ks(k).view(sz_b, len_k, n_head, d_k)
v = self.w_vs(v).view(sz_b, len_v, n_head, d_v)
q = q.permute(2, 0, 1, 3).contiguous().view(-1, len_q, d_k) # (n*b) x lq x dk
k = k.permute(2, 0, 1, 3).contiguous().view(-1, len_k, d_k) # (n*b) x lk x dk
v = v.permute(2, 0, 1, 3).contiguous().view(-1, len_v, d_v) # (n*b) x lv x dv
mask = mask.repeat(n_head, 1, 1) # (n*b) x .. x ..
output, attn = self.attention(q, k, v, mask=mask)
output = output.view(n_head, sz_b, len_q, d_v)
output = (
output.permute(1, 2, 0, 3).contiguous().view(sz_b, len_q, -1)
) # b x lq x (n*dv)
output = self.dropout(self.fc(output))
output = self.layer_norm(output + residual)
return output, attn
class ScaledDotProductAttention(nn.Module):
""" Scaled Dot-Product Attention """
def __init__(self, temperature):
super(ScaledDotProductAttention, self).__init__()
self.temperature = temperature
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q, k.transpose(1, 2))
attn = attn / self.temperature
if mask is not None:
attn = attn.masked_fill(mask, -np.inf)
attn = self.softmax(attn)
output = torch.bmm(attn, v)
return output, attn
class PositionwiseFeedForward(nn.Module):
""" A two-feed-forward-layer """
def __init__(self, d_in, d_hid, kernel_size, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
# Use Conv1D
# position-wise
self.w_1 = nn.Conv1d(
d_in,
d_hid,
kernel_size=kernel_size[0],
padding=(kernel_size[0] - 1) // 2,
)
# position-wise
self.w_2 = nn.Conv1d(
d_hid,
d_in,
kernel_size=kernel_size[1],
padding=(kernel_size[1] - 1) // 2,
)
self.layer_norm = nn.LayerNorm(d_in)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
residual = x
output = x.transpose(1, 2)
output = self.w_2(F.relu(self.w_1(output)))
output = output.transpose(1, 2)
output = self.dropout(output)
output = self.layer_norm(output + residual)
return output
|
py | b408c848ed02161f3d4185c213a3944c45191f96 | from poroto.test import TestVector
test_vectors = {
'VectorAddReduce':
TestVector(1,{
'N': [12],
'A': [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]],
'Out': [66],
}),
}
|
py | b408c95cf5632476f0e48fc211ecb2582e1ca000 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.9.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..api_client import ApiClient
class NetworkingV1Api(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_namespaced_network_policy(self, namespace, body, **kwargs):
"""
create a NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_namespaced_network_policy(namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1NetworkPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_namespaced_network_policy_with_http_info(namespace, body, **kwargs)
else:
(data) = self.create_namespaced_network_policy_with_http_info(namespace, body, **kwargs)
return data
def create_namespaced_network_policy_with_http_info(self, namespace, body, **kwargs):
"""
create a NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_namespaced_network_policy_with_http_info(namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1NetworkPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_namespaced_network_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `create_namespaced_network_policy`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_namespaced_network_policy`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1NetworkPolicy',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_collection_namespaced_network_policy(self, namespace, **kwargs):
"""
delete collection of NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_collection_namespaced_network_policy(namespace, async=True)
>>> result = thread.get()
:param async bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_collection_namespaced_network_policy_with_http_info(namespace, **kwargs)
else:
(data) = self.delete_collection_namespaced_network_policy_with_http_info(namespace, **kwargs)
return data
def delete_collection_namespaced_network_policy_with_http_info(self, namespace, **kwargs):
"""
delete collection of NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_collection_namespaced_network_policy_with_http_info(namespace, async=True)
>>> result = thread.get()
:param async bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_collection_namespaced_network_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_collection_namespaced_network_policy`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_namespaced_network_policy(self, name, namespace, body, **kwargs):
"""
delete a NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_namespaced_network_policy(name, namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.delete_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
return data
def delete_namespaced_network_policy_with_http_info(self, name, namespace, body, **kwargs):
"""
delete a NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_namespaced_network_policy_with_http_info(name, namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1DeleteOptions body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param int grace_period_seconds: The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.
:param bool orphan_dependents: Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.
:param str propagation_policy: Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.
:return: V1Status
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty', 'grace_period_seconds', 'orphan_dependents', 'propagation_policy']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_namespaced_network_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `delete_namespaced_network_policy`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `delete_namespaced_network_policy`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `delete_namespaced_network_policy`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'grace_period_seconds' in params:
query_params.append(('gracePeriodSeconds', params['grace_period_seconds']))
if 'orphan_dependents' in params:
query_params.append(('orphanDependents', params['orphan_dependents']))
if 'propagation_policy' in params:
query_params.append(('propagationPolicy', params['propagation_policy']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1Status',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_api_resources(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_resources(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_api_resources_with_http_info(**kwargs)
else:
(data) = self.get_api_resources_with_http_info(**kwargs)
return data
def get_api_resources_with_http_info(self, **kwargs):
"""
get available resources
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_api_resources_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: V1APIResourceList
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_resources" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIResourceList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_namespaced_network_policy(self, namespace, **kwargs):
"""
list or watch objects of kind NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_namespaced_network_policy(namespace, async=True)
>>> result = thread.get()
:param async bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1NetworkPolicyList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_namespaced_network_policy_with_http_info(namespace, **kwargs)
else:
(data) = self.list_namespaced_network_policy_with_http_info(namespace, **kwargs)
return data
def list_namespaced_network_policy_with_http_info(self, namespace, **kwargs):
"""
list or watch objects of kind NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_namespaced_network_policy_with_http_info(namespace, async=True)
>>> result = thread.get()
:param async bool
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1NetworkPolicyList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['namespace', 'pretty', '_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_namespaced_network_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `list_namespaced_network_policy`")
collection_formats = {}
path_params = {}
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1NetworkPolicyList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_network_policy_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_network_policy_for_all_namespaces(async=True)
>>> result = thread.get()
:param async bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1NetworkPolicyList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.list_network_policy_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_network_policy_for_all_namespaces_with_http_info(**kwargs)
return data
def list_network_policy_for_all_namespaces_with_http_info(self, **kwargs):
"""
list or watch objects of kind NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.list_network_policy_for_all_namespaces_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server the server will respond with a 410 ResourceExpired error indicating the client must restart their list without the continue field. This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param bool include_uninitialized: If true, partially initialized resources are included in the response.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1NetworkPolicyList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['_continue', 'field_selector', 'include_uninitialized', 'label_selector', 'limit', 'pretty', 'resource_version', 'timeout_seconds', 'watch']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_network_policy_for_all_namespaces" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if '_continue' in params:
query_params.append(('continue', params['_continue']))
if 'field_selector' in params:
query_params.append(('fieldSelector', params['field_selector']))
if 'include_uninitialized' in params:
query_params.append(('includeUninitialized', params['include_uninitialized']))
if 'label_selector' in params:
query_params.append(('labelSelector', params['label_selector']))
if 'limit' in params:
query_params.append(('limit', params['limit']))
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'resource_version' in params:
query_params.append(('resourceVersion', params['resource_version']))
if 'timeout_seconds' in params:
query_params.append(('timeoutSeconds', params['timeout_seconds']))
if 'watch' in params:
query_params.append(('watch', params['watch']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf', 'application/json;stream=watch', 'application/vnd.kubernetes.protobuf;stream=watch'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/networkpolicies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1NetworkPolicyList',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def patch_namespaced_network_policy(self, name, namespace, body, **kwargs):
"""
partially update the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.patch_namespaced_network_policy(name, namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.patch_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.patch_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
return data
def patch_namespaced_network_policy_with_http_info(self, name, namespace, body, **kwargs):
"""
partially update the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.patch_namespaced_network_policy_with_http_info(name, namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param object body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_namespaced_network_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `patch_namespaced_network_policy`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `patch_namespaced_network_policy`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `patch_namespaced_network_policy`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json-patch+json', 'application/merge-patch+json', 'application/strategic-merge-patch+json'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1NetworkPolicy',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_namespaced_network_policy(self, name, namespace, **kwargs):
"""
read the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.read_namespaced_network_policy(name, namespace, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs)
else:
(data) = self.read_namespaced_network_policy_with_http_info(name, namespace, **kwargs)
return data
def read_namespaced_network_policy_with_http_info(self, name, namespace, **kwargs):
"""
read the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.read_namespaced_network_policy_with_http_info(name, namespace, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'pretty', 'exact', 'export']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_namespaced_network_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `read_namespaced_network_policy`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `read_namespaced_network_policy`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
if 'exact' in params:
query_params.append(('exact', params['exact']))
if 'export' in params:
query_params.append(('export', params['export']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1NetworkPolicy',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def replace_namespaced_network_policy(self, name, namespace, body, **kwargs):
"""
replace the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_namespaced_network_policy(name, namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1NetworkPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.replace_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
else:
(data) = self.replace_namespaced_network_policy_with_http_info(name, namespace, body, **kwargs)
return data
def replace_namespaced_network_policy_with_http_info(self, name, namespace, body, **kwargs):
"""
replace the specified NetworkPolicy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.replace_namespaced_network_policy_with_http_info(name, namespace, body, async=True)
>>> result = thread.get()
:param async bool
:param str name: name of the NetworkPolicy (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param V1NetworkPolicy body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1NetworkPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['name', 'namespace', 'body', 'pretty']
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method replace_namespaced_network_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'name' is set
if ('name' not in params) or (params['name'] is None):
raise ValueError("Missing the required parameter `name` when calling `replace_namespaced_network_policy`")
# verify the required parameter 'namespace' is set
if ('namespace' not in params) or (params['namespace'] is None):
raise ValueError("Missing the required parameter `namespace` when calling `replace_namespaced_network_policy`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `replace_namespaced_network_policy`")
collection_formats = {}
path_params = {}
if 'name' in params:
path_params['name'] = params['name']
if 'namespace' in params:
path_params['namespace'] = params['namespace']
query_params = []
if 'pretty' in params:
query_params.append(('pretty', params['pretty']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['*/*'])
# Authentication setting
auth_settings = ['BearerToken']
return self.api_client.call_api('/apis/networking.k8s.io/v1/namespaces/{namespace}/networkpolicies/{name}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1NetworkPolicy',
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | b408caf09a462d356108fa53301affe89dde182f | # Copyright (c) 2022 RWTH Aachen - Werkzeugmaschinenlabor (WZL)
# Contact: Simon Cramer, [email protected]
from sklearn.svm import LinearSVR, SVR
from s3_smart_open import to_pckl, read_pckl, read_pd_fth
from absl import logging
import pandas as pd
from sklearn.utils.validation import check_X_y, check_array, check_is_fitted
from sklearn.base import BaseEstimator, RegressorMixin
from tensorflow import keras
class Regression(BaseEstimator, RegressorMixin):
"""Regression class built with sklearn
"""
def __init__(self, model_name, tolerance, C, verbose, max_iter, epsilon):
"""Initialize shared parameters for Support Vector Regression.
Args:
model_name (str): Name of the model to save/load.
tolerance (float): Tolerance for stopping criterion.
C (float): Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
verbose (int): Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm/liblinear that, if enabled, may not work properly in a multithreaded context.
max_iter (int): Hard limit on iterations within solver, or (only method epsilon) -1 for no limit.
epsilon (float): Epsilon parameter in the epsilon-insensitive loss function. Note that the value of this parameter depends on the scale of the target variable y.
"""
self.tol = tolerance
assert epsilon >= 0
self.epsilon = epsilon
assert C >= 0
self.C = C
self.max_iter = max_iter
self.fitted_ = False
self.model_name = model_name
self.verbose = verbose
def build_model(self):
raise NotImplementedError
@staticmethod
def factoryRegression(model_name, method, tolerance, C, verbose, max_iter, kernel, degree, gamma, coef0, shrinking, cache_size, epsilon, loss, fit_intercept, intercept_scaling, dual, random_state):
"""Wehter to use sklearn.svm.SVR() or sklearn.svm.linearSVR() as model.
Args:
Please find descriptions for arguments in the subclasses
Returns:
[Regression object]: Epsilon(SVR) or Linear SV Regression object to use for fit, predict and evaluate.
"""
if method == 'SVR':
return SVRegression(model_name, tolerance , C, epsilon, verbose, max_iter, kernel, degree, gamma, coef0, shrinking, cache_size)
elif method == 'linearSVR':
return linearSVRegression(model_name, tolerance, C, epsilon, verbose, max_iter, loss, fit_intercept , intercept_scaling, dual, random_state)
else:
raise Exception('Method {} is invalid'.format(method))
def fit(self,X,y):
"""Build and fit the model to a given dataset X (features) and y (targets).
Args:
X (pd.DataFrame): features data
y (pd.DataFrame): target data
"""
X = X.values
y = y.values
X, y = check_X_y(X, y, accept_sparse=False, multi_output=True)
self.model = self.build_model()
logging.info('Begin fitting with '+str(X.shape)+' samples')
self.model.fit(X,y)
self.fitted_ = True
def predict(self,X):
"""Predict the targets given a set of inputs with already fitted model.
Args:
X (pd.DataFrame): Features
Returns:
[pd.DataFrame]: Predicted targets
"""
X = check_array(X, accept_sparse=False)
check_is_fitted(self, 'fitted_')
return self.model.predict(X)
def evaluate(self,X,y,metrics):
"""Evaluate a fitted model and return given keras metrics.
Args:
X (pd.DataFrame): Features
y (pd.DataFrame): Targets
metrics (list[str]): keras.metrics
Returns:
[dict]: Metrics in format {key,value}
"""
X = X.values
y = y.values
X, y = check_X_y(X, y, accept_sparse=False, multi_output=True)
check_is_fitted(self, 'fitted_')
y_pred = self.model.predict(X)
metrics = [keras.metrics.get(m) for m in metrics]
res = {}
for m in metrics:
m.update_state(y, y_pred)
res[m.name] = m.result().numpy().tolist()
return res
def save(self,output_path):
"""Saves the Regression object to disk or to s3 bucket.
Args:
output_path (str): Path to storage location.
"""
to_pckl(output_path,self.model_name+'.pckl',self)
@staticmethod
def load(input_path:str, model_name:str):
"""Loads the Regression object.
Args:
input_path (str): Path where the object is stored
model_name (str): Name of the object to load
Returns:
[sv_regression.Regression object]: Regression object to use for predict and evaluate.
"""
model = read_pckl(input_path, model_name+'.pckl')
return model
class SVRegression(Regression):
"""Scikit learn Epsilon Support Vector Regression Model
Args:
Regression (class): Regression class built with sklearn
"""
def __init__(self,model_name, tolerance, C, epsilon, verbose, max_iter, kernel, degree, gamma, coef0, shrinking, cache_size):
"""Initialize Epsilon Support Vector Regression
Args:
model_name (str): Name of the model to save/load.
tolerance (float): Tolerance for stopping criterion.
C (float): Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
verbose (int): Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm/liblinear that, if enabled, may not work properly in a multithreaded context.
max_iter (int): Hard limit on iterations within solver, or (only method epsilon) -1 for no limit.
kernel (string): Specifies the kernel type to be used in the algorithm.
degree (int): Degree of the polynomial kernel function (‘poly’). Ignored by all other kernels.
gamma (float or str): Kernel coefficient for rbf, poly and sigmoid.
coef0 (float): Independent term in kernel function. It is only significant in ‘poly’ and ‘sigmoid’.
shrinking (bool): Whether to use the shrinking heuristic.
cache_size (float): Specify the size of the kernel cache (in MB).
epsilon (float): in the epsilon-insensitive loss function. Note that the value of this parameter depends on the scale of the target variable y.
"""
assert (max_iter > 0 or max_iter == -1)
super().__init__(model_name, tolerance, C, verbose, max_iter, epsilon)
self.kernel = kernel
assert degree >= 0
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.shrinking = shrinking
assert cache_size > 0
self.cache_size = cache_size
self.verbose = bool(self.verbose)
def build_model(self):
"""Builds model from sklearn.svm.SVR(),
"""
if self.gamma not in ['scale','auto']:
self.gamma = float(self.gamma)
assert self.gamma > 0
model = SVR(tol=self.tol,
C=self.C,
max_iter=self.max_iter,
kernel=self.kernel,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
epsilon=self.epsilon,
shrinking=self.shrinking,
cache_size=self.cache_size,
verbose=self.verbose
)
return model
class linearSVRegression(Regression):
"""Scikit learn linear Support Vector Regression Model
Args:
Regression (class): Regression class built with sklearn
"""
def __init__(self, model_name, tolerance, C, epsilon, verbose, max_iter, loss, fit_intercept, intercept_scaling, dual, random_state):
"""Initialize Linear Support Vector Regression
Args:
model_name (str): Name of the model to save/load.
method (str): Wether to use sklearn.svm.linearSVR() or sklearn.svm.SVR()
tolerance (float): Tolerance for stopping criterion.
C (float): Regularization parameter. The strength of the regularization is inversely proportional to C. Must be strictly positive.
verbose (int): Enable verbose output. Note that this setting takes advantage of a per-process runtime setting in libsvm/liblinear that, if enabled, may not work properly in a multithreaded context.
max_iter (int): Hard limit on iterations within solver, or (only method epsilon) -1 for no limit.
epsilon (float): Epsilon parameter in the epsilon-insensitive loss function. Note that the value of this parameter depends on the scale of the target variable y.
loss (str): Specifies the loss function. The epsilon-insensitive loss (standard SVR) is the L1 loss, while the squared epsilon-insensitive loss (‘squared_epsilon_insensitive’) is the L2 loss.
fit_intercept (bool): Whether to calculate the intercept for this model.
intercept_scaling (float): To lessen the effect of regularization on synthetic feature weight (and therefore on the intercept) intercept_scaling has to be increased.
dual (bool): Select the algorithm to either solve the dual or primal optimization problem.
random_state (int): Controls the pseudo random number generation for shuffling the data. Pass an int for reproducible output across multiple function calls.
"""
assert max_iter > 0, 'No Limit for linear support vector regression is not allowed!'
super().__init__(model_name, tolerance, C, verbose, max_iter, epsilon)
self.loss = loss
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.dual = dual
self.random_state = random_state
self.verbose = verbose
def build_model(self):
"""Builds model from sklearn.svm.linearSVR(),
"""
model = LinearSVR(tol=self.tol,
C=self.C,
max_iter=self.max_iter,
epsilon=self.epsilon,
loss=self.loss,
fit_intercept=self.fit_intercept,
intercept_scaling=self.intercept_scaling,
dual=self.dual,
random_state=self.random_state,
verbose=self.verbose
)
return model
|
py | b408cc12dbfac8f6f07a29ec67baa2b8b02a540b | from django.contrib import admin
# Register your models here.
from userapp.models import User , CustomUser, Role
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group
from rest_framework.authtoken.models import Token
class AdminUserapp(admin.ModelAdmin):
list_display = ('id', 'email', 'username','first_name', 'middle_name','last_name' ,'active', 'admin')
fieldsets = ((_("Personal info"),{'fields':('username', 'first_name', 'middle_name','last_name','password', 'image','active')}),)
# readonly_fields = ('password',)
search_fields = ('username', )
def has_add_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
def has_view_permission(self, request, obj=None):
if request.user.is_staff or request.user.is_superuser:
return True
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser and request.user.is_staff:
return True
elif request.user.is_staff:
False
def has_change_permission(self, request, obj=None):
if request.user.is_superuser and request.user.is_staff:
return True
elif request.user.is_staff:
return False
admin.site.register(User, AdminUserapp)
class AdminRole(admin.ModelAdmin):
list_display = ('id', 'name')
def has_add_permission(self, request, obj=None):
if request.user.is_superuser:
return True
return False
def has_view_permission(self, request, obj=None):
if request.user.is_staff or request.user.is_superuser:
return True
def has_delete_permission(self, request, obj=None):
if request.user.is_superuser and request.user.is_staff:
return True
elif request.user.is_staff:
False
def has_change_permission(self, request, obj=None):
if request.user.is_superuser and request.user.is_staff:
return True
elif request.user.is_staff:
return False
admin.site.unregister(Group)
admin.site.unregister(Token)
admin.site.register(Role,AdminRole)
|
py | b408cd33d91abb58c74e525b5e687e8f64e5f258 | '''
Copyright (c) 2021-2022 OVGU LIA
Author: Harish Kumar Pakala
This source code is licensed under the Apache License 2.0 (see LICENSE.txt).
This source code may use other Open Source software components (see LICENSE.txt).
'''
import abc
class AASEndPointHandler(object):
__metaclass__ = abc.ABCMeta
def __init__(self, pyAAS, msgHandler):
self.saas = pyAAS
self.ipaddressComdrv = "ipaddressComdrv"
self.portComdrv = "portComdrv"
self.msgHandler = msgHandler
@abc.abstractmethod
def configure(self):
pass
# @abc.abstractmethod
def update(self, channel):
pass
@abc.abstractmethod
def start(self, saas):
pass
@abc.abstractmethod
def stop(self):
pass
@abc.abstractmethod
def dispatchMessage(self, tMessage):
pass
@abc.abstractmethod
def retrieveMessage(self, testMesage):
pass
|
py | b408cd4804e2c7cdafc9ec80e1e6610e39cd4981 | import torch.nn.functional as F
import torch
import torch.nn as nn
import torch.optim as optim
import unittest
from label_set_loss_functions.loss import LeafDiceLoss
class TestLeafDiceLoss(unittest.TestCase):
def test_partial_label_2d(self):
num_classes = 4 # labels 0 to 3
labels_superset_map = {
4: [2, 3],
}
# Define 2d examples
target = torch.tensor(
[[0,0,0,0],
[0,1,1,0],
[0,4,4,0],
[0,0,0,0]]
)
# Add another dimension corresponding to the batch (batch size = 1 here)
target = target.unsqueeze(0) # shape (1, H, W)
target_2 = target.clone()
target_2[target_2 > 2] = 2 # inside super class 4
# Perfect segmentation
pred_very_good = 1000 * F.one_hot(
target_2, num_classes=num_classes).permute(0, 3, 1, 2).float()
target_1 = target.clone()
target_1[target_1 > 1] = 1 # outside super class 4
# Intermediate segmentation - wrong for partial labels (4)
pred_1_error = 1000 * F.one_hot(
target_1, num_classes=num_classes).permute(0, 3, 1, 2).float()
# Intermediate segmentation - wrong for all non background (0) labels
pred_3_errors = 1000 * F.one_hot(
torch.zeros_like(target), num_classes=num_classes).permute(0, 3, 1, 2).float()
# initialize the mean dice loss
loss = LeafDiceLoss(labels_superset_map=labels_superset_map)
# mean dice loss for pred_very_good should be close to 0.5 and not 0
# because for empty prediction and empty target segmentation
# the Dice loss value is 0 and not 1
true_res = 0.5
dice_loss_good = float(loss.forward(pred_very_good, target).cpu())
self.assertAlmostEqual(dice_loss_good, true_res, places=3)
true_res = 1 - (1. / 4.) * (1 + (2. / 3.) + 0 + 0)
dice_loss_1_error = float(loss.forward(pred_1_error, target).cpu())
self.assertAlmostEqual(dice_loss_1_error, true_res, places=3)
true_res = 1 - (1. / 4.) * (24. / (12 + 16) + 0 + 0 + 0)
dice_loss_3_error = float(loss.forward(pred_3_errors, target).cpu())
self.assertAlmostEqual(dice_loss_3_error, true_res, places=3)
def test_convergence_partial_label(self):
"""
The goal of this test is to assess if the gradient of the loss function
is correct by testing if we can train a one layer neural network
to segment one image.
We verify that the loss is decreasing in almost all SGD steps.
"""
learning_rate = 0.001
max_iter = 50
num_classes = 2 # labels 0 and 1
labels_superset_map = {
2: [0, 1],
}
# define a simple 3d example
target_seg = torch.tensor(
[
# raw 0
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]],
# raw 1
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 2, 2],
[0, 0, 2, 2]],
# raw 2
[[0, 0, 0, 0],
[0, 1, 1, 0],
[0, 1, 1, 0],
[0, 0, 0, 0]]
]
)
target_seg = torch.unsqueeze(target_seg, dim=0)
image = 12 * target_seg + 27
image = image.float()
num_classes = 2
num_voxels = 3 * 4 * 4
# define a one layer model
class OnelayerNet(nn.Module):
def __init__(self):
super(OnelayerNet, self).__init__()
self.layer = nn.Linear(num_voxels, num_voxels * num_classes)
def forward(self, x):
x = x.view(-1, num_voxels)
x = self.layer(x)
x = x.view(-1, num_classes, 3, 4, 4)
return x
# initialise the network
net = OnelayerNet()
# initialize the loss
loss = LeafDiceLoss(labels_superset_map=labels_superset_map)
# initialize an SGD
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
loss_history = []
# train the network
for _ in range(max_iter):
# set the gradient to zero
optimizer.zero_grad()
# forward pass
output = net(image)
loss_val = loss(output, target_seg)
# backward pass
loss_val.backward()
optimizer.step()
# stats
loss_history.append(loss_val.item())
# count the number of SGD steps in which the loss decreases
num_decreasing_steps = 0
for i in range(len(loss_history) - 1):
if loss_history[i] > loss_history[i+1]:
num_decreasing_steps += 1
decreasing_steps_ratio = float(num_decreasing_steps) / (len(loss_history) - 1)
# verify that the loss is decreasing for sufficiently many SGD steps
self.assertTrue(decreasing_steps_ratio > 0.5)
if __name__ == '__main__':
unittest.main()
|
py | b408d1943820c381c0359d9c6aac831a334f138e | # This code is part of Qiskit.
#
# (C) Copyright IBM 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=missing-module-docstring
from .fake_almaden import FakeAlmaden
|
py | b408d1b933f57f339df80eb415b923e50c9638ba | import json
import unittest
import pystac
from pystac import Item
from pystac.extensions.eo import Band
from tests.utils import (TestCases, test_to_from_dict)
class EOTest(unittest.TestCase):
LANDSAT_EXAMPLE_URI = TestCases.get_path('data-files/eo/eo-landsat-example.json')
BANDS_IN_ITEM_URI = TestCases.get_path('data-files/eo/sample-bands-in-item-properties.json')
def setUp(self):
self.maxDiff = None
def test_to_from_dict(self):
with open(self.LANDSAT_EXAMPLE_URI) as f:
item_dict = json.load(f)
test_to_from_dict(self, Item, item_dict)
def test_validate_eo(self):
item = pystac.read_file(self.LANDSAT_EXAMPLE_URI)
item2 = pystac.read_file(self.BANDS_IN_ITEM_URI)
item.validate()
item2.validate()
def test_bands(self):
eo_item = pystac.read_file(self.BANDS_IN_ITEM_URI)
# Get
self.assertIn("eo:bands", eo_item.properties)
bands = eo_item.ext.eo.bands
self.assertEqual(list(map(lambda x: x.name, bands)), ['band1', 'band2', 'band3', 'band4'])
# Set
new_bands = [
Band.create(name="red", description=Band.band_description("red")),
Band.create(name="green", description=Band.band_description("green")),
Band.create(name="blue", description=Band.band_description("blue")),
]
eo_item.ext.eo.bands = new_bands
self.assertEqual('Common name: red, Range: 0.6 to 0.7',
eo_item.properties['eo:bands'][0]['description'])
self.assertEqual(len(eo_item.ext.eo.bands), 3)
eo_item.validate()
def test_asset_bands(self):
eo_item = pystac.read_file(self.LANDSAT_EXAMPLE_URI)
# Get
b1_asset = eo_item.assets['B1']
asset_bands = eo_item.ext.eo.get_bands(b1_asset)
self.assertIsNot(None, asset_bands)
self.assertEqual(len(asset_bands), 1)
self.assertEqual(asset_bands[0].name, 'B1')
index_asset = eo_item.assets['index']
asset_bands = eo_item.ext.eo.get_bands(index_asset)
self.assertIs(None, asset_bands)
# Set
b2_asset = eo_item.assets['B2']
self.assertEqual(eo_item.ext.eo.get_bands(b2_asset)[0].name, "B2")
eo_item.ext.eo.set_bands(eo_item.ext.eo.get_bands(b1_asset), b2_asset)
new_b2_asset_bands = eo_item.ext.eo.get_bands(eo_item.assets['B2'])
self.assertEqual(new_b2_asset_bands[0].name, 'B1')
eo_item.validate()
# Check adding a new asset
new_bands = [
Band.create(name="red", description=Band.band_description("red")),
Band.create(name="green", description=Band.band_description("green")),
Band.create(name="blue", description=Band.band_description("blue")),
]
asset = pystac.Asset(href="some/path.tif", media_type=pystac.MediaType.GEOTIFF)
eo_item.ext.eo.set_bands(new_bands, asset)
eo_item.add_asset("test", asset)
self.assertEqual(len(eo_item.assets["test"].properties["eo:bands"]), 3)
def test_cloud_cover(self):
item = pystac.read_file(self.LANDSAT_EXAMPLE_URI)
# Get
self.assertIn("eo:cloud_cover", item.properties)
cloud_cover = item.ext.eo.cloud_cover
self.assertEqual(cloud_cover, 78)
# Set
item.ext.eo.cloud_cover = 50
self.assertEqual(item.properties['eo:cloud_cover'], 50)
# Get from Asset
b2_asset = item.assets['B2']
self.assertEqual(item.ext.eo.get_cloud_cover(b2_asset), item.ext.eo.get_cloud_cover())
b3_asset = item.assets['B3']
self.assertEqual(item.ext.eo.get_cloud_cover(b3_asset), 20)
# Set on Asset
item.ext.eo.set_cloud_cover(10, b2_asset)
self.assertEqual(item.ext.eo.get_cloud_cover(b2_asset), 10)
item.validate()
def test_read_pre_09_fields_into_common_metadata(self):
eo_item = pystac.read_file(
TestCases.get_path('data-files/examples/0.8.1/item-spec/examples/'
'landsat8-sample.json'))
self.assertEqual(eo_item.common_metadata.platform, "landsat-8")
self.assertEqual(eo_item.common_metadata.instruments, ["oli_tirs"])
def test_reads_asset_bands_in_pre_1_0_version(self):
eo_item = pystac.read_file(
TestCases.get_path('data-files/examples/0.9.0/item-spec/examples/'
'landsat8-sample.json'))
bands = eo_item.ext.eo.get_bands(eo_item.assets['B9'])
self.assertEqual(len(bands), 1)
self.assertEqual(bands[0].common_name, 'cirrus')
def test_reads_gsd_in_pre_1_0_version(self):
eo_item = pystac.read_file(
TestCases.get_path('data-files/examples/0.9.0/item-spec/examples/'
'landsat8-sample.json'))
self.assertEqual(eo_item.common_metadata.gsd, 30.0)
|
py | b408d2512706066e8910af6bc7d0cf3988a694cd | #!/usr/bin/env python
#
# ROS node to read Nao's sensors and torso odometry through the Aldebaran API.
# This code is currently compatible to NaoQI version 1.6
#
# Copyright 2009 Armin Hornung, University of Freiburg
# http://www.ros.org/wiki/nao
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the University of Freiburg nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import rospy
from sensor_msgs.msg import JointState
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from naoqi_driver.naoqi_node import NaoqiNode
from tf import transformations
import tf
# NAOqi specific
import motion
class NaoqiJointStates(NaoqiNode):
def __init__(self):
NaoqiNode.__init__(self, 'naoqi_joint_states')
self.connectNaoQi()
# default sensor rate: 25 Hz (50 is max, stresses Nao's CPU)
self.sensorRate = rospy.Rate(rospy.get_param('~sensor_rate', 25.0))
self.dataNamesList = ["DCM/Time",
"Device/SubDeviceList/InertialSensor/AngleX/Sensor/Value","Device/SubDeviceList/InertialSensor/AngleY/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AngleZ/Sensor/Value",
"Device/SubDeviceList/InertialSensor/GyroscopeX/Sensor/Value", "Device/SubDeviceList/InertialSensor/GyroscopeY/Sensor/Value",
"Device/SubDeviceList/InertialSensor/GyroscopeZ/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AccelerometerX/Sensor/Value", "Device/SubDeviceList/InertialSensor/AccelerometerY/Sensor/Value",
"Device/SubDeviceList/InertialSensor/AccelerometerZ/Sensor/Value"]
tf_prefix_param_name = rospy.search_param('tf_prefix')
if tf_prefix_param_name:
self.tf_prefix = rospy.get_param(tf_prefix_param_name)
else:
self.tf_prefix = ""
self.base_frameID = rospy.get_param('~base_frame_id', "base_link")
if not(self.base_frameID[0] == '/'):
self.base_frameID = self.tf_prefix + '/' + self.base_frameID
# use sensor values or commanded (open-loop) values for joint angles
self.useJointSensors = rospy.get_param('~use_joint_sensors', True) # (set to False in simulation!)
# init. messages:
self.torsoOdom = Odometry()
self.torsoOdom.header.frame_id = rospy.get_param('~odom_frame_id', "odom")
if not(self.torsoOdom.header.frame_id[0] == '/'):
self.torsoOdom.header.frame_id = self.tf_prefix + '/' + self.torsoOdom.header.frame_id
self.torsoIMU = Imu()
self.torsoIMU.header.frame_id = self.base_frameID
self.jointState = JointState()
self.jointState.name = self.motionProxy.getJointNames('Body')
# simluated model misses some joints, we need to fill:
if (len(self.jointState.name) == 22):
self.jointState.name.insert(6,"LWristYaw")
self.jointState.name.insert(7,"LHand")
self.jointState.name.append("RWristYaw")
self.jointState.name.append("RHand")
msg = "Nao joints found: "+ str(self.jointState.name)
rospy.logdebug(msg)
self.torsoOdomPub = rospy.Publisher("odom", Odometry)
self.torsoIMUPub = rospy.Publisher("imu", Imu)
self.jointStatePub = rospy.Publisher("joint_states", JointState)
self.tf_br = tf.TransformBroadcaster()
rospy.loginfo("nao_joint_states initialized")
# (re-) connect to NaoQI:
def connectNaoQi(self):
rospy.loginfo("Connecting to NaoQi at %s:%d", self.pip, self.pport)
self.motionProxy = self.get_proxy("ALMotion")
self.memProxy = self.get_proxy("ALMemory")
if self.motionProxy is None or self.memProxy is None:
exit(1)
def run(self):
""" Odometry thread code - collects and sends out odometry esimate. """
while self.is_looping():
#
# Build odometry:
#
timestamp = rospy.Time.now()
try:
memData = self.memProxy.getListData(self.dataNamesList)
# odometry data:
odomData = self.motionProxy.getPosition('Torso', motion.SPACE_WORLD, True)
positionData = self.motionProxy.getAngles('Body', self.useJointSensors)
except RuntimeError, e:
print "Error accessing ALMemory, exiting...\n"
print e
rospy.signal_shutdown("No NaoQI available anymore")
self.torsoOdom.header.stamp = timestamp
if len(odomData)==2:
odomData = odomData[1]
elif len(odomData)!=6:
print "Error getting odom data"
continue
self.torsoOdom.pose.pose.position.x = odomData[0]
self.torsoOdom.pose.pose.position.y = odomData[1]
self.torsoOdom.pose.pose.position.z = odomData[2]
q = transformations.quaternion_from_euler(odomData[3], odomData[4], odomData[5])
self.torsoOdom.pose.pose.orientation.x = q[0]
self.torsoOdom.pose.pose.orientation.y = q[1]
self.torsoOdom.pose.pose.orientation.z = q[2]
self.torsoOdom.pose.pose.orientation.w = q[3]
t = self.torsoOdom.pose.pose.position
q = self.torsoOdom.pose.pose.orientation
self.tf_br.sendTransform((t.x, t.y, t.z), (q.x, q.y, q.z, q.w),
timestamp, self.base_frameID, self.torsoOdom.header.frame_id)
self.torsoOdomPub.publish(self.torsoOdom)
# Replace 'None' values with 0
# (=> consistent behavior in 1.8 / 1.10 with 1.6)
# TODO: still required with 1.12 / 1.14?
for i, m in enumerate(memData):
if m is None:
memData[i] = 0
if len(memData) != len(self.dataNamesList):
print "memData length does not match expected length"
print memData
continue
# IMU data:
self.torsoIMU.header.stamp = timestamp
q = transformations.quaternion_from_euler(memData[1], memData[2], memData[3])
self.torsoIMU.orientation.x = q[0]
self.torsoIMU.orientation.y = q[1]
self.torsoIMU.orientation.z = q[2]
self.torsoIMU.orientation.w = q[3]
self.torsoIMU.angular_velocity.x = memData[4]
self.torsoIMU.angular_velocity.y = memData[5]
self.torsoIMU.angular_velocity.z = memData[6] # currently always 0
self.torsoIMU.linear_acceleration.x = memData[7]
self.torsoIMU.linear_acceleration.y = memData[8]
self.torsoIMU.linear_acceleration.z = memData[9]
# covariances unknown
# cf http://www.ros.org/doc/api/sensor_msgs/html/msg/Imu.html
self.torsoIMU.orientation_covariance[0] = -1
self.torsoIMU.angular_velocity_covariance[0] = -1
self.torsoIMU.linear_acceleration_covariance[0] = -1
self.torsoIMUPub.publish(self.torsoIMU)
# Send JointState:
self.jointState.header.stamp = timestamp
self.jointState.header.frame_id = self.base_frameID
self.jointState.position = positionData
# simulated model misses some joints, we need to fill:
if (len(self.jointState.position) == 22):
self.jointState.position.insert(6, 0.0)
self.jointState.position.insert(7, 0.0)
self.jointState.position.append(0.0)
self.jointState.position.append(0.0)
self.jointStatePub.publish(self.jointState)
self.sensorRate.sleep()
if __name__ == '__main__':
joint_states = NaoqiJointStates()
joint_states.start()
rospy.spin()
exit(0)
|
py | b408d2ea0aaf6e957014621cfcc4a2aab193dcd1 | import argparse
import importlib
import os
import re
import signal
import subprocess
import sys
import time
import logging
from act.common import aCTLogger
from act.common.aCTConfig import aCTConfigAPP
from act.arc import aCTDBArc
class aCTReport:
'''Print summary info on jobs in DB. Use --web to print html that is
automatically refreshed. Add filenames to query more than one aCT DB'''
def __init__(self, args):
self.output = ""
self.outfile = args.web
self.actconfs = args.conffiles or [''] # empty string for default behaviour
self.logger=aCTLogger.aCTLogger("aCTReport")
self.actlog=self.logger()
self.actlog.logger.setLevel(logging.INFO)
self.criticallogger = aCTLogger.aCTLogger('aCTCritical', arclog=False)
self.criticallog = self.criticallogger()
if self.outfile:
self.log('<META HTTP-EQUIV="refresh" CONTENT="60"><pre>')
self.log(time.asctime() + '\n')
self.db=aCTDBArc.aCTDBArc(self.actlog)
def log(self, message=''):
self.output += message + '\n'
def AppReport(self):
appconf = aCTConfigAPP()
apps = appconf.getList(["modules", "app"])
for app in apps:
try:
ap = importlib.import_module(f'{app}.aCTReport').report
self.log(ap(self.actconfs))
except ModuleNotFoundError as e:
self.actlog.info(f'No report in module {app}')
except AttributeError:
self.actlog.info(f'aCTReport.report() not found in {app}')
except Exception as e:
self.actlog.error(f'Exception running {app}.aCTReport.report: {e}')
def ProcessReport(self):
if self.actconfs != ['']:
return # don't print processes for combined report
actprocscmd = 'ps ax -ww -o pid,etime,args'
try:
out = subprocess.run(actprocscmd.split(), check=True, encoding='utf-8', stdout=subprocess.PIPE).stdout
except subprocess.CalledProcessError as e:
self.log('Error: could not run ps command: %s' % e.stderr)
return
# Group processes by cluster
cluster_procs = {}
longprocesses = []
for line in out.split('\n'):
reg = re.match(r'\s*(\d*)\s*(.*) .*python.* .*(aCT\w*)\.py\s?(\S*)', line)
if reg:
pid, runningtime, process, cluster = reg.groups()
# ignore Main and this process
if process in ['aCTReport', 'aCTMain', 'aCTHeartbeatWatchdog']:
continue
if cluster == '':
cluster = '(no cluster defined)'
elif not re.match(r'\d\d:\d\d$', runningtime):
# Check for overrunning processes
longprocesses.append((process, pid, cluster, runningtime))
if cluster in cluster_procs:
cluster_procs[cluster].append(process)
else:
cluster_procs[cluster] = [process]
for proc in longprocesses:
self.log('WARNING: %s (pid %s) for %s running for more than one hour (%s), this process will be killed' % proc)
# Kill process and log a critical message to send email
# Too many emails, disable
#self.criticallog.critical('Killing process %s (pid %s) for %s running for more than one hour (%s)' % proc)
try:
os.kill(int(proc[1]), signal.SIGKILL)
except OSError:
pass
self.log()
self.log('Active processes per cluster:')
for cluster in sorted(cluster_procs):
procs = cluster_procs[cluster]
procs.sort()
self.log(f'{cluster:>38.38}: {" ".join(procs)}')
self.log()
def ArcJobReport(self):
rep={}
rtot={}
states = ["Undefined", "Accepted", "Preparing", "Submitting",
"Queuing", "Running", "Finishing", "Finished", "Hold", "Killed",
"Failed", "Deleted", "Other"]
for conf in self.actconfs:
if conf:
os.environ['ACTCONFIGARC'] = conf
db=aCTDBArc.aCTDBArc(self.actlog)
c=db.db.conn.cursor()
c.execute("select jobid,state from arcjobs")
rows=c.fetchall()
for r in rows:
reg=re.search('.+//([^:]+)',str(r[0]))
cl=""
try:
cl=reg.group(1)
except:
cl='WaitingSubmission'
jid=str(r[1])
if jid == 'None':
jid="Other"
try:
rep[cl][jid]+=1
except:
try:
rep[cl][jid]=1
except:
rep[cl]={}
rep[cl][jid]=1
try:
rtot[jid]+=1
except:
rtot[jid]=1
if sum(rtot.values()) == 0:
return
self.log(f"All ARC jobs: {sum(rtot.values())}")
self.log(f"{'':39} {' '.join([f'{s:>9}' for s in states])}")
for k in sorted(rep, key=lambda x: x.split('.')[-1]):
log=f"{k:>38.38}:"
for s in states:
try:
log += f'{rep[k][s]:>10}'
except KeyError:
log += f'{"-":>10}'
self.log(log)
log = f"{'Totals':>38}:"
for s in states:
try:
log += f'{rtot[s]:>10}'
except:
log += f'{"-":>10}'
self.log(log+'\n\n')
def CondorJobReport(self):
rep = {}
rtot = {}
condorjobstatemap = ['Undefined', # used before real state is known
'Idle',
'Running',
'Removed',
'Completed',
'Held',
'Transferring',
'Suspended']
for conf in self.actconfs:
if conf:
os.environ['ACTCONFIGARC'] = conf
db=aCTDBArc.aCTDBArc(self.actlog)
c = db.db.conn.cursor()
c.execute("select cluster, JobStatus from condorjobs")
rows = c.fetchall()
for r in rows:
cl = str(r[0])
if not cl:
cl = 'WaitingSubmission'
jid = r[1]
try:
rep[cl][jid]+=1
except:
try:
rep[cl][jid]=1
except:
rep[cl]={}
rep[cl][jid]=1
try:
rtot[jid]+=1
except:
rtot[jid]=1
if sum(rtot.values()) == 0:
return
self.log(f"All Condor jobs: {sum(rtot.values())}")
self.log(f"{'':39} {' '.join([f'{s:>9}' for s in condorjobstatemap])}")
for k in sorted(rep, key=lambda x: x.split('.')[-1]):
log=f"{k:>38.38}:"
for s in range(8):
try:
log += f'{rep[k][s]:>10}'
except KeyError:
log += f'{"-":>10}'
self.log(log)
log = f"{'Totals':>38}:"
for s in range(8):
try:
log += f'{rtot[s]:>10}'
except:
log += f'{"-":>10}'
self.log(log+'\n\n')
def StuckReport(self):
# Query for lost jobs older than lostlimit
lostlimit = 86400
select = "(arcstate='submitted' or arcstate='running') and " \
+ self.db.timeStampLessThan("tarcstate", lostlimit) + \
" order by tarcstate"
columns = ['cluster']
jobs = self.db.getArcJobsInfo(select, columns)
if jobs:
self.log('Found %d jobs not updated in over %d seconds:\n' % (len(jobs), lostlimit))
clustercount = {}
for job in jobs:
try:
host = re.search('.+//([^:]+)', job['cluster']).group(1)
except:
pass
if host in clustercount:
clustercount[host] += 1
else:
clustercount[host] = 1
for cluster, count in clustercount.items():
self.log(f'{count} {cluster}')
self.log()
def end(self):
if self.outfile:
self.log('</pre>')
def main():
parser = argparse.ArgumentParser(description='Report table of aCT jobs.')
parser.add_argument('conffiles', nargs='*', help='list of configuration files')
parser.add_argument('--web', help='Output suitable for web page')
parser.add_argument('--harvester', action='store_true', help='Dummy arg for backwards compatibility')
args = parser.parse_args(sys.argv[1:])
acts = aCTReport(args)
acts.AppReport()
acts.ArcJobReport()
acts.CondorJobReport()
acts.StuckReport()
acts.ProcessReport()
acts.end()
if acts.outfile is None:
sys.stdout.write(acts.output)
else:
f=open(acts.outfile,"w")
f.write(acts.output)
f.close()
if __name__ == '__main__':
main()
|
py | b408d31d9034bcc876d59012bb1758c5aa338661 | """Reducing Functions in Python
These are functions that recombine an iterable recursively, ending up with a single return value
Also called accumulators, aggregators, or folding functions
Example: Finding the maximum value in an iterable
a0, a1, a2, ...,, aN-1
max(a, b) _> maximum of a and b
result =a0
result = max(result, a1)
result = max(result, a2)
...
result = max(result, an-1)
# max value in a0, a1, a2, ..., an-1
the special case of sequences
(i.e. we can use indexes to access elements in the sequence)
Using a loop
"""
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
Notice the sequence of steps:
l = l[5, 8, 6, 10, 9] # result = 5
max_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def max_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = max_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
l = [5, 8, 6, 10, 9]
^ | | | |
| | |
5 | |
\ | |
max(5, 8) | | |
8 |
\ |
\ |
max(8, 6)
8 | |
\
max(8, 10)
10
\ |
max(10, 9)
10
result -> 10
To caculate the min:
l = l[5, 8, 6, 10, 9] # result = 5
min_value = lambda a, b: a if a > b else b # result = max(5, 8) = 8
def min_sequence(sequence): # result = max(5, 6) = 8
result = sequence[0]
for e in sequence[1:]: # result = max(5, 10) = 10
result = min_value(result, e) # result = max(5, 10) = 10
return result # result -> 10
|
py | b408d328ecc4509391eea1f74cb8871af06a76f5 | import asyncio
import platform
import time
import itertools
import discord
import sqlite3
import datetime
import discord
from discord.ext import commands
from datetime import date, timedelta
from discord import client
import datetime
from urllib import parse, request
import sqlite3
from matplotlib import pyplot as plt
import numpy as np
import asyncio
from uuid import uuid4
import datetime
from urllib import parse, request
from sqlite3.dbapi2 import Cursor
import sqlite3
from colorama import init
from termcolor import colored
machine = platform.node()
init()
class Logger:
def __init__(self, app):
self.app = app
def info(self, message):
print(colored(f'[{time.asctime(time.localtime())}] [{machine}] [{self.app}] {message}', 'yellow'))
def warning(self, message):
print(colored(f'[{time.asctime(time.localtime())}] [{machine}] [{self.app}] {message}', 'green'))
def error(self, message):
print(colored(f'[{time.asctime(time.localtime())}] [{machine}] [{self.app}] {message}', 'red'))
def color(self, message, color):
print(colored(f'[{time.asctime(time.localtime())}] [{machine}] [{self.app}] {message}', color))
logger = Logger("kourage-Attendance")
def attendance(opening_time, closing_time):
embed = discord.Embed(title="Attendance System",
description="Please react before closing time else the message will disappear ",
color=0x11806a)
embed.set_author(name="Mark you attendance by reacting ⬆️ emoji")
embed.set_thumbnail(
url="https://media.discordapp.net/attachments/700257704723087360/819643015470514236/SYM_TEAL.png?width=455"
"&height=447")
embed.add_field(name="Opening Time", value=opening_time, inline=False)
embed.add_field(name="Closing Time", value=closing_time, inline=False)
embed.set_footer(text="Made with ❤️️ by Koders")
return embed
def simple_embed(title, description):
embed = discord.Embed(
title = title,
description = description,
colour=0x11806a
)
embed.set_thumbnail(url="https://media.discordapp.net/attachments/700257704723087360/819643015470514236/SYM_TEAL.png?width=455&height=447")
embed.set_footer(text="Made with ❤️️ by Koders")
embed.timestamp = datetime.datetime.utcnow()
return embed
def attendance_dm(date, time, day):
embed = discord.Embed(title="Thank you for marking your attendance!", color=0x11806a)
embed.set_author(name="Attendance")
embed.set_thumbnail(
url="https://media.discordapp.net/attachments/700257704723087360/819643015470514236/SYM_TEAL.png?width=455"
"&height=447")
embed.add_field(name="Date", value=date, inline=True)
embed.add_field(name="Time", value=time, inline=True)
embed.add_field(name="Day", value=day, inline=True)
return embed
def attendance_missed_dm(date, time, day):
embed = discord.Embed(title="Yo have not marked your attendance yet. Only 5 minutes remaining", color=0x11806a)
embed.set_author(name="Attendance")
embed.set_thumbnail(
url="https://media.discordapp.net/attachments/700257704723087360/819643015470514236/SYM_TEAL.png?width=455"
"&height=447")
embed.add_field(name="Date", value=date, inline=True)
embed.add_field(name="Time", value=time, inline=True)
embed.add_field(name="Day", value=day, inline=True)
return embed
async def leave_and_attendance(ctx, bot, start_date, end_date, users, mode):
"""
This module shows attendance and leaves
:params: start_date(str), end_date(str), user_id(int), username(str)
:return: None
:mode: 1 - for attendance, 2 - leaves
"""
logger.info("Show attendance called")
conn = sqlite3.connect('db/ATTENDANCE.sqlite')
cur = conn.cursor()
if mode == 1:
cur.execute('''SELECT DATE, SHIFT, ABSENTEES FROM Attendance_table WHERE DATE BETWEEN ? AND ?''',(start_date, end_date))
if mode == 2:
cur.execute('''SELECT DATE, SHIFT, PRESENTEES FROM Attendance_table WHERE DATE BETWEEN ? AND ?''', (start_date,end_date))
data = cur.fetchall()
if not data: # TODO Check if this is working
no_data_embed=discord.Embed(title="No attendance data found between " + str(start_date) + ' ' + str(end_date),description="",colour=0x11806a)
await ctx.send(embed=no_data_embed,delete_after=60)
logger.warning("No attendance data found between those dates")
return None
else:
morning_only, evening_only, full_day = {}, {}, {}
selected_dates = []
for each in data:
if each[1] == "M":
morning_only[each[0]] = set(each[2].strip('"{}').split(', ')) # Adding members
elif each[1] == "E":
evening_only[each[0]] = set(each[2].strip('"{}').split(', ')) # Adding members
selected_dates.append(each[0])
# Calculating all possible dates and adding members accordingly
selected_dates = set(selected_dates) # For finding unique dates
for each_date in selected_dates:
full_day[each_date]=set()
if not morning_only:
logger.warning("no morning data")
break
if not evening_only:
logger.warning("no evening data")
break
for each_person in morning_only[each_date]:
try:
if each_person in evening_only[each_date]:
full_day[each_date].add(each_person)
except Exception as err:
logger.error("Something went wrong while fetching the full day attendance")
# if full day is found. removing from morning and evening
for each_person in full_day[each_date]:
morning_only[each_date].remove(each_person)
evening_only[each_date].remove(each_person)
if users:
if mode == 1:
status = "Absent"
elif mode == 2:
status = "Present"
print(type(users))
for user in users:
print(await bot.fetch_user(int(user)))
day_full=0
morning=0
evening=0
dates=0
message=""
not_there=set()
if not morning_only:
not_there=set(itertools.chain(full_day[each_date],evening_only[each_date]))
elif not evening_only:
not_there=set(itertools.chain(full_day[each_date],morning_only[each_date]))
elif not full_day:
not_there=set(itertools.chain(morning_only[each_date],evening_only[each_date]))
else:
not_there=set(itertools.chain(full_day[each_date],morning_only[each_date],evening_only[each_date]))
for each_date in selected_dates:
dates=dates+1
if str(user) in not_there:
try:
if str(user) in full_day[each_date]:
message += each_date + ": " + status + " full day \n"
day_full=day_full+1
except Exception as e:
pass
try:
if str(user) in morning_only[each_date]:
message += each_date + ": " + status + " in morning only \n"
morning=morning+1
except Exception as e:
pass
try:
if str(user) in evening_only[each_date]:
message += each_date + ": " + status + " in evening only \n"
evening=evening+1
except Exception as e:
pass
else:
message += each_date +": Not "+status+"\n"
#graph
def addlabels(x,y):
for i in range(len(x)):
plt.text(i, y[i], y[i], ha = 'center',
Bbox = dict(facecolor = 'grey', alpha =.8))
value = [dates,morning,evening,day_full]
data = ('Total\nDates', 'Morning','Evening','Full')
x_pos = np.arange(len(data))
save_filename='test.png'
plt.bar(x_pos, value, color = ['darkcyan'])
addlabels(data, value)
plt.title(status+' Graph for @'+str(await bot.fetch_user(int(user))))
plt.ylabel('values')
plt.xticks(x_pos, data)
plt.savefig(save_filename,dpi=100)
plt.close()
embed=simple_embed(title="Result for: "+str(await bot.fetch_user(int(user)))+"\n",description="")
embed.add_field(name='Here are the details', value=message, inline=False)
await ctx.send(embed=embed,file=discord.File(save_filename), delete_after = 20)
async def export_csv(ctx,start_date,end_date):
logger.info("export csv called")
conn = sqlite3.connect('db/ATTENDANCE.sqlite')
cur = conn.cursor()
cur.execute('''SELECT DATE, SHIFT, ABSENTEES FROM Attendance_table WHERE DATE BETWEEN ? AND ?''',(start_date, end_date))
data = cur.fetchall()
if not data: # TODO Check if this is working
no_data_embed=discord.Embed(title="No attendance data found between "+str(start_date)+" and "+str(end_date),description="",colour=0x11806a)
await ctx.send(embed=no_data_embed,delete_after=60)
logger.warning("No attendance data found between those dates")
return None
else:
try:
fields = ['Date', 'Shift', 'Absentees']
absentees_file = 'Absentees.csv'
embed=simple_embed(title="Absentees CSV FILE ",description="of dates("+str(start_date)+" "+str(end_date)+")")
with open(absentees_file, 'w') as csvfile:
csvwriter = csv.writer(csvfile)
csvwriter.writerow(fields)
csvwriter.writerows(data)
logger.info("CSV write done")
logger.info(data)
await ctx.send(embed=embed,file=discord.File(absentees_file), delete_after = 60)
logger.info("Absentees("+str(start_date)+" "+str(end_date)+") CSV sent")
except Exception as e:
logger.error("Error sending csv")
def leave_and_attendance(ctx, bot, start_date, end_date, users, mode):
"""
This module shows attendance and leaves
:params: start_date(str), end_date(str), user_id(int), username(str)
:return: None
:mode: 1 - for attendance, 2 - leaves
"""
logger.info("Show attendance called")
conn = sqlite3.connect('db/ATTENDANCE.sqlite')
cur = conn.cursor()
if mode == 1:
cur.execute('''SELECT DATE, SHIFT, PRESENTEES FROM ATTENDANCE WHERE DATE BETWEEN ? AND ?''', str(start_date), str(end_date))
if mode == 2:
cur.execute('''SELECT DATE, SHIFT, ABSENTEES FROM ATTENDANCE WHERE DATE BETWEEN ? AND ?''', str(start_date), str(end_date))
data = cur.fetchone()
if not data: # TODO Check if this is working
logger.warning("No attendance data found between those dates")
return None
else:
# TODO - try with string strips
morning_only, evening_data, full_day = {}, {}, {}
for each in data:
selected_dates = []
if each[1] == "M":
morning_only[each[0]] = set(each[2].strip('"{}').split(', ')) # Adding members
elif each[1] == "E":
evening_only[each[0]] = set(each[2].strip('"{}').split(', ')) # Adding members
selected_dates.append(each[0])
# Calculating all possible dates and adding members accordingly
selected_dates = set(selected_dates) # For finding unique dates
for each_date in selected_dates:
for each_person in morning_only[each_date]:
try:
if each_person in evening_only[each_date]:
full_day[each_date].append(each_person)
# if full day is found. removing from morning and evening
morning_only.remove(each_person)
evening_only.remove(each_person)
except Exception as err:
logger.error("Something went wrong while fetching the full day attendance")
if users:
if mode == 1:
status = "Present"
elif mode == 2:
status = "Absent"
for user in users:
message=""
for each_date in selected_dates:
try:
if user in full_day[each_date]:
message += each_date + ": " + status + " in full day \n"
elif user in morning_only[each_date]:
message += each_date + ": " + status + " in morning only \n"
elif user in evening[each_date]:
message += each_date + ": " + status + " in evening only \n"
except Exception as e:
pass # handling KeyError in dict
embed=simple_embed(title="Result for: "+str(bot.get_user(user)+"\n",description="")
embed.add_field(name='Here are the details', value=message, inline=False)
await ctx.send(embed=embed, delete_after=60)
async def data_input(ctx, bot):
start_date_embed=discord.Embed(title="Enter start date",description="Please enter in this format only 'yyyy-mm-dd'",colour=0x11806a)
start=await ctx.send(embed=start_date_embed,delete_after=60)
start_date1 = await ctx_input(ctx, bot, start)
if not start_date1:
return
end_date_embed=discord.Embed(title="Enter end date",description="Please enter in this format only 'yyyy-mm-dd'",colour=0x11806a)
end=await ctx.send(embed=end_date_embed,delete_after=60)
end_date1 = await ctx_input(ctx, bot, end)
if not end_date1:
return
start_date = datetime.datetime.strptime(start_date1, '%Y-%m-%d')
end_date = datetime.datetime.strptime(end_date1, '%Y-%m-%d')
return start_date, end_date;
attendance_embed=simple_embed(title="Leaves for : "+str(username)+"\n",description="")
attendance_embed.add_field(name='Leaves List', value = attendance_list, inline=False)
return attendance_embed
def attendance(ctx, bot, start_date, end_date, user_id, mode, flag):
"""
This module shows attendance and leaves
:params: start_date(str), end_date(str), user_id(int), username(str)
:return: None
"""
logger.info("Attendance function called")
attendance_list=""
attendance_list=attendance_list+"\n𝗗𝗔𝗧𝗘: "+dates+"\n"
conn = sqlite3.connect('db/ATTENDANCE.sqlite')
cur = conn.cursor()
# TODO
# Fix sql query
if flag == 1:
cur.execute('''SELECT DATE, SHIFT, PRESENTEES FROM ATTENDANCE WHERE ''')
elif flag == 2:
cur.execute('''SELECT DATE, SHIFT, ABSENTEES FROM ATTENDANCE WHERE ''')
data = cur.fetchone()
if not data: # TODO Check if this is working
logger.warning("No attendance data found between those dates")
return None
else:
morning_only, evening_data, full_day = {}, {}, {}
for each in data:
selected_dates = []
# TODO
# Check strip
if each[1] == "M":
morning_only[each[0]] = set(each[2].strip("'{}'").split(', ')) # Adding members
elif each[1] == "E":
evening_only[each[0]] = set(each[2].strip("'{}'").split(', ')) # Adding members
selected_dates.append(each[0])
selected_dates = set(selected_dates) # Unique dates
for each_date in selected_dates:
for each_person in morning_only[each_date]:
try:
if each_person in evening_only[each_date]:
full_day[each_date].append(each_person) # this should be in try and catch
# if full day is found. removing from morning and evening
morning_only.remove(each_person)
evening_only.remove(each_person)
except Exception as err:
logger.error("Something went wrong while fetching the full day attendance")
return morning_only, evening_only, full_day
morning_only[dates] = set of presntees
if((present_morning==True) and (present_evening==True)):
attendance_list=attendance_list+"Present full day\n"
elif((present_morning==False) and (present_evening==True)):
attendance_list=attendance_list+"Absent in morning\n"
elif((present_morning==True) and (present_evening==False)):
attendance_list=attendance_list+"Absent in evening\n"
elif((absent_morning==True) and (absent_evening==True)):
attendance_list=attendance_list+"Absent full day\n"
attendance_embed=simple_embed(title="Leaves for : "+str(username)+"\n",description="")
attendance_embed.add_field(name='Leaves List', value = attendance_list, inline=False)
return attendance_embed
#graph
def addlabels(x,y):
for i in range(len(x)):
plt.text(i, y[i], y[i], ha = 'center',
Bbox = dict(facecolor = 'grey', alpha =.8))
value = [attendance_dates,full_present,morning_present,evening_present,absent]
data = ('Total\nDates', 'Full Day\nPresent', 'Morning\nPresent','Evening\nPresent','Absent')
x_pos = np.arange(len(data))
save_filename='test.png'
plt.bar(x_pos, value, color = ['darkcyan'])
addlabels(data, value)
plt.title('Attendance Graph for @'+str(username))
plt.ylabel('values')
plt.xticks(x_pos, data)
plt.savefig(save_filename,dpi=100)
plt.close()
return attendence_embed,save_filename;
async def ctx_input(ctx, bot, embed, timeout = 60.0):
try:
msg = await bot.wait_for(
"message",
timeout=timeout,
check=lambda message: message.author == ctx.author
)
if msg:
await embed.delete()
_id = msg.content
await msg.delete()
return _id
except asyncio.TimeoutError as err:
await embed.delete()
await ctx.send('Cancelling due to timeout.', delete_after = timeout)
return None
async def data_input(ctx, bot):
start_date_embed=discord.Embed(title="Enter start date",description="Please enter in this format only 'yyyy-mm-dd'",colour=0x11806a)
start=await ctx.send(embed=start_date_embed,delete_after=60)
start_date = await ctx_input(ctx, bot, start)
if not start_date:
return
end_date_embed=discord.Embed(title="Enter end date",description="Please enter in this format only 'yyyy-mm-dd'",colour=0x11806a)
end=await ctx.send(embed=end_date_embed,delete_after=60)
end_date = await ctx_input(ctx, bot, end)
if not end_date:
return
return start_date, end_date;
def simple_embed(title, description):
embed = discord.Embed(
title = title,
description = description,
colour=0x11806a
)
embed.set_thumbnail(url="https://media.discordapp.net/attachments/700257704723087360/819643015470514236/SYM_TEAL.png?width=455&height=447")
embed.set_footer(text="Made with ❤️️ by Koders")
embed.timestamp = datetime.datetime.utcnow()
return embed
_rxn_no = {'1️⃣':1, '2️⃣':2, '3️⃣':3,'4️⃣':4}
async def take_reaction_no(ctx, rxn_amnt, _embed, bot, timeout=300.0):
rxn = dict()
_i = 1
for i in _rxn_no:
if _i > rxn_amnt:
break
rxn[i] = _i
_i += 1
for i in rxn:
await _embed.add_reaction(i)
def check(reaction, user):
_c1 = user.bot is not True and user == ctx.author
return _c1 and str(reaction.emoji) in rxn
try:
result = await bot.wait_for('reaction_add', check=check, timeout=timeout)
reaction, user = result
ret = (None, rxn[str(reaction)]) [ str(reaction) in rxn ]
return ret, _embed
except asyncio.TimeoutError:
await ctx.delete()
async def ctx_input(ctx, bot, embed, timeout = 60.0):
try:
msg = await bot.wait_for(
"message",
timeout=timeout,
check=lambda message: message.author == ctx.author
)
if msg:
await embed.delete()
_id = msg.content
await msg.delete()
return _id
except asyncio.TimeoutError as err:
await embed.delete()
await ctx.send('Cancelling due to timeout.', delete_after = timeout)
return None
|
py | b408d37d69ac8f96f8fb47ed347be1377e06c3a6 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Credentials wrapping MSAL applications and delegating token acquisition and caching to them.
This entails monkeypatching MSAL's OAuth client with an adapter substituting an azure-core pipeline for Requests.
"""
import abc
import base64
import json
import logging
import time
import msal
from six.moves.urllib_parse import urlparse
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from .exception_wrapper import wrap_exceptions
from .msal_transport_adapter import MsalTransportAdapter
from .persistent_cache import load_user_cache
from .._constants import KnownAuthorities
from .._exceptions import AuthenticationRequiredError, CredentialUnavailableError
from .._internal import get_default_authority, normalize_authority
from .._auth_record import AuthenticationRecord
try:
ABC = abc.ABC
except AttributeError: # Python 2.7, abc exists, but not ABC
ABC = abc.ABCMeta("ABC", (object,), {"__slots__": ()}) # type: ignore
try:
from typing import TYPE_CHECKING
except ImportError:
TYPE_CHECKING = False
if TYPE_CHECKING:
# pylint:disable=ungrouped-imports,unused-import
from typing import Any, Mapping, Optional, Type, Union
_LOGGER = logging.getLogger(__name__)
_DEFAULT_AUTHENTICATE_SCOPES = {
"https://" + KnownAuthorities.AZURE_CHINA: ("https://management.core.chinacloudapi.cn//.default",),
"https://" + KnownAuthorities.AZURE_GERMANY: ("https://management.core.cloudapi.de//.default",),
"https://" + KnownAuthorities.AZURE_GOVERNMENT: ("https://management.core.usgovcloudapi.net//.default",),
"https://" + KnownAuthorities.AZURE_PUBLIC_CLOUD: ("https://management.core.windows.net//.default",),
}
def _decode_client_info(raw):
"""Taken from msal.oauth2cli.oidc"""
raw += "=" * (-len(raw) % 4)
raw = str(raw) # On Python 2.7, argument of urlsafe_b64decode must be str, not unicode.
return base64.urlsafe_b64decode(raw).decode("utf-8")
def _build_auth_record(response):
"""Build an AuthenticationRecord from the result of an MSAL ClientApplication token request"""
try:
id_token = response["id_token_claims"]
if "client_info" in response:
client_info = json.loads(_decode_client_info(response["client_info"]))
home_account_id = "{uid}.{utid}".format(**client_info)
else:
# MSAL uses the subject claim as home_account_id when the STS doesn't provide client_info
home_account_id = id_token["sub"]
return AuthenticationRecord(
authority=urlparse(id_token["iss"]).netloc, # "iss" is the URL of the issuing tenant
client_id=id_token["aud"],
home_account_id=home_account_id,
tenant_id=id_token["tid"], # tenant which issued the token, not necessarily user's home tenant
username=id_token["preferred_username"],
)
except (KeyError, ValueError):
# surprising: msal.ClientApplication always requests an id token, whose shape shouldn't change
return None
class MsalCredential(ABC):
"""Base class for credentials wrapping MSAL applications"""
def __init__(self, client_id, client_credential=None, **kwargs):
# type: (str, Optional[Union[str, Mapping[str, str]]], **Any) -> None
authority = kwargs.pop("authority", None)
self._authority = normalize_authority(authority) if authority else get_default_authority()
self._tenant_id = kwargs.pop("tenant_id", None) or "organizations"
self._client_credential = client_credential
self._client_id = client_id
self._cache = kwargs.pop("_cache", None) # internal, for use in tests
if not self._cache:
if kwargs.pop("enable_persistent_cache", False):
allow_unencrypted = kwargs.pop("allow_unencrypted_cache", False)
self._cache = load_user_cache(allow_unencrypted)
else:
self._cache = msal.TokenCache()
self._adapter = kwargs.pop("msal_adapter", None) or MsalTransportAdapter(**kwargs)
# postpone creating the wrapped application because its initializer uses the network
self._msal_app = None # type: Optional[msal.ClientApplication]
@abc.abstractmethod
def get_token(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
pass
@abc.abstractmethod
def _get_app(self):
# type: () -> msal.ClientApplication
pass
def _create_app(self, cls):
# type: (Type[msal.ClientApplication]) -> msal.ClientApplication
"""Creates an MSAL application, patching msal.authority to use an azure-core pipeline during tenant discovery"""
# MSAL application initializers use msal.authority to send AAD tenant discovery requests
with self._adapter:
# MSAL's "authority" is a URL e.g. https://login.microsoftonline.com/common
app = cls(
client_id=self._client_id,
client_credential=self._client_credential,
authority="{}/{}".format(self._authority, self._tenant_id),
token_cache=self._cache,
)
# monkeypatch the app to replace requests.Session with MsalTransportAdapter
app.client.session.close()
app.client.session = self._adapter
return app
class ConfidentialClientCredential(MsalCredential):
"""Wraps an MSAL ConfidentialClientApplication with the TokenCredential API"""
@wrap_exceptions
def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
# type: (*str, **Any) -> AccessToken
# MSAL requires scopes be a list
scopes = list(scopes) # type: ignore
now = int(time.time())
# First try to get a cached access token or if a refresh token is cached, redeem it for an access token.
# Failing that, acquire a new token.
app = self._get_app()
result = app.acquire_token_silent(scopes, account=None) or app.acquire_token_for_client(scopes)
if "access_token" not in result:
raise ClientAuthenticationError(message="authentication failed: {}".format(result.get("error_description")))
return AccessToken(result["access_token"], now + int(result["expires_in"]))
def _get_app(self):
# type: () -> msal.ConfidentialClientApplication
if not self._msal_app:
self._msal_app = self._create_app(msal.ConfidentialClientApplication)
return self._msal_app
class PublicClientCredential(MsalCredential):
"""Wraps an MSAL PublicClientApplication with the TokenCredential API"""
@abc.abstractmethod
def get_token(self, *scopes, **kwargs): # pylint:disable=unused-argument
# type: (*str, **Any) -> AccessToken
pass
def _get_app(self):
# type: () -> msal.PublicClientApplication
if not self._msal_app:
self._msal_app = self._create_app(msal.PublicClientApplication)
return self._msal_app
class InteractiveCredential(PublicClientCredential):
def __init__(self, **kwargs):
self._disable_automatic_authentication = kwargs.pop("disable_automatic_authentication", False)
self._auth_record = kwargs.pop("authentication_record", None) # type: Optional[AuthenticationRecord]
if self._auth_record:
kwargs.pop("client_id", None) # authentication_record overrides client_id argument
tenant_id = kwargs.pop("tenant_id", None) or self._auth_record.tenant_id
super(InteractiveCredential, self).__init__(
client_id=self._auth_record.client_id,
authority=self._auth_record.authority,
tenant_id=tenant_id,
**kwargs
)
else:
super(InteractiveCredential, self).__init__(**kwargs)
def get_token(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
"""Request an access token for `scopes`.
.. note:: This method is called by Azure SDK clients. It isn't intended for use in application code.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises CredentialUnavailableError: the credential is unable to attempt authentication because it lacks
required data, state, or platform support
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message``
attribute gives a reason.
:raises AuthenticationRequiredError: user interaction is necessary to acquire a token, and the credential is
configured not to begin this automatically. Call :func:`authenticate` to begin interactive authentication.
"""
if not scopes:
raise ValueError("'get_token' requires at least one scope")
allow_prompt = kwargs.pop("_allow_prompt", not self._disable_automatic_authentication)
try:
return self._acquire_token_silent(*scopes, **kwargs)
except AuthenticationRequiredError:
if not allow_prompt:
raise
# silent authentication failed -> authenticate interactively
now = int(time.time())
result = self._request_token(*scopes, **kwargs)
if "access_token" not in result:
message = "Authentication failed: {}".format(result.get("error_description") or result.get("error"))
raise ClientAuthenticationError(message=message)
# this may be the first authentication, or the user may have authenticated a different identity
self._auth_record = _build_auth_record(result)
return AccessToken(result["access_token"], now + int(result["expires_in"]))
def authenticate(self, **kwargs):
# type: (**Any) -> AuthenticationRecord
"""Interactively authenticate a user.
:keyword Sequence[str] scopes: scopes to request during authentication, such as those provided by
:func:`AuthenticationRequiredError.scopes`. If provided, successful authentication will cache an access token
for these scopes.
:rtype: ~azure.identity.AuthenticationRecord
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The error's ``message``
attribute gives a reason.
"""
scopes = kwargs.pop("scopes", None)
if not scopes:
if self._authority not in _DEFAULT_AUTHENTICATE_SCOPES:
# the credential is configured to use a cloud whose ARM scope we can't determine
raise CredentialUnavailableError(
message="Authenticating in this environment requires a value for the 'scopes' keyword argument."
)
scopes = _DEFAULT_AUTHENTICATE_SCOPES[self._authority]
_ = self.get_token(*scopes, _allow_prompt=True, **kwargs)
return self._auth_record # type: ignore
@wrap_exceptions
def _acquire_token_silent(self, *scopes, **kwargs):
# type: (*str, **Any) -> AccessToken
result = None
if self._auth_record:
app = self._get_app()
for account in app.get_accounts(username=self._auth_record.username):
if account.get("home_account_id") != self._auth_record.home_account_id:
continue
now = int(time.time())
result = app.acquire_token_silent_with_error(list(scopes), account=account, **kwargs)
if result and "access_token" in result and "expires_in" in result:
return AccessToken(result["access_token"], now + int(result["expires_in"]))
# if we get this far, result is either None or the content of an AAD error response
if result:
details = result.get("error_description") or result.get("error")
raise AuthenticationRequiredError(scopes, error_details=details)
raise AuthenticationRequiredError(scopes)
@abc.abstractmethod
def _request_token(self, *scopes, **kwargs):
# type: (*str, **Any) -> dict
"""Request an access token via a non-silent MSAL token acquisition method, returning that method's result"""
|
py | b408d5138674c7b99ba490164676c9ef7c9b42ca |
TYPE_STRING = 1
TYPE_TWODOUBLES = 2
TYPE_NUMPY = 3
TYPE_DOUBLEANDNUMPY = 4
TYPE_INTEGER = 5
|
py | b408d6ec10b2441dfdb3c2aad6e2c448312deb21 | import numpy as np
import scipy.interpolate
class Waveform:
def __init__(self, data, time):
self.data = np.array(data, dtype=float)
self.time = np.array(time, dtype=float)
self.interp = scipy.interpolate.interp1d(
time, data,
bounds_error=False, fill_value=(data[0], data[-1])
)
# attach measurement objects in a way that is compatible with pverify
measurements = Measurements(self)
self.Measurements_Base = measurements
self.Measurements_NonPeriodic = measurements
self.Measurements_Periodic = measurements
def check_in_limits(self, wave_lo, wave_hi):
# compute bounds
lo_bnds = wave_lo.interp(self.time)
hi_bnds = wave_hi.interp(self.time)
# uncomment for graphical debugging
# import matplotlib.pyplot as plt
# plt.plot(self.time, lo_bnds)
# plt.plot(self.time, hi_bnds)
# plt.plot(self.time, self.data)
# plt.legend(['lo_bnds', 'hi_bnds', 'data'])
# plt.show()
# find any point where the waveform is out of spec
in_spec = np.logical_and(lo_bnds <= self.data, self.data <= hi_bnds)
if np.all(in_spec):
pass
else:
indices = np.where(np.logical_not(in_spec))
times = self.time[indices]
raise Exception(f'Waveform is out of spec. Check times: {times}')
class Measurements:
def __init__(self, wave: Waveform):
self.wave = wave
def min(self):
return np.min(self.wave.data)
def max(self):
return np.max(self.wave.data)
def peak_to_peak(self):
return self.max() - self.min()
def find_settled(self, lo, hi):
# find first index where the waveform is in the settled range
idx = np.argmax((lo <= self.wave.data) & (self.wave.data <= hi))
# if there is no such value, idx will be zero, so we have to check
# for that condition
if not (lo <= self.wave.data[idx] <= hi):
raise Exception('Waveform did not settle to the specified range.')
return self.wave.time[idx]
def frequency(self, level=0, slope='rise', hysteresis=0):
# find the crossing times
crossings = []
armed = False
for t, v in zip(self.wave.time, self.wave.data):
if slope == 'rise':
if armed and (v > (level + hysteresis)):
crossings.append(t)
armed = False
elif v < (level - hysteresis):
armed = True
elif slope == 'fall':
if armed and (v < (level - hysteresis)):
crossings.append(t)
armed = False
elif v > (level + hysteresis):
armed = True
else:
raise Exception(f"Unknown slope type: {slope}")
# measure time from the first crossing to the last crossing,
# as well as the number of periods
dt = crossings[-1] - crossings[0]
num = len(crossings) - 1
# return the average frequency
return num/dt
def frequency_average(self, *args, **kwargs):
# not sure what this function is supposed to do as compared to the previous
# one, so I'm leaving this as an alias to "frequency" for now
return self.frequency(*args, **kwargs)
|
py | b408d76495876d3b3a6553705d0b75958e8df559 | ###############################################################################
# WaterTAP Copyright (c) 2021, The Regents of the University of California,
# through Lawrence Berkeley National Laboratory, Oak Ridge National
# Laboratory, National Renewable Energy Laboratory, and National Energy
# Technology Laboratory (subject to receipt of any required approvals from
# the U.S. Dept. of Energy). All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license
# information, respectively. These files are also available online at the URL
# "https://github.com/watertap-org/watertap/"
#
###############################################################################
import pytest
from pyomo.environ import value
from watertap.examples.flowsheets.case_studies.seawater_RO_desalination.seawater_RO_desalination import (
main,
)
# -----------------------------------------------------------------------------
@pytest.mark.component
def test_seawater_RO_desalination_pressure_exchanger():
m = main(erd_type="pressure_exchanger")
f = m.fs.feed
assert pytest.approx(305.63, rel=1e-4) == value(f.flow_mass_comp[0, "H2O"])
assert pytest.approx(10.822, rel=1e-4) == value(f.flow_mass_comp[0, "tds"])
assert pytest.approx(9.2760e-3, rel=1e-4) == value(f.flow_mass_comp[0, "tss"])
assert pytest.approx(0.3092, rel=1e-4) == value(f.flow_vol[0])
p1 = m.fs.desalination.P1
assert pytest.approx(0.8, rel=1e-4) == value(p1.efficiency_pump[0])
assert pytest.approx(1.1536e6, rel=1e-4) == value(p1.work_mechanical[0])
assert pytest.approx(6.9e6, rel=1e-4) == value(p1.deltaP[0])
assert pytest.approx(70.0, rel=1e-4) == value(p1.ratioP[0])
assert pytest.approx(132.15, rel=1e-4) == value(
p1.inlet.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(4.6800, rel=1e-4) == value(
p1.inlet.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.00, rel=1e-4) == value(p1.inlet.temperature[0.0])
assert pytest.approx(1.0e5, rel=1e-4) == value(p1.inlet.pressure[0.0])
assert pytest.approx(132.15, rel=1e-4) == value(
p1.outlet.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(4.6800, rel=1e-4) == value(
p1.outlet.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.00, rel=1e-4) == value(p1.outlet.temperature[0.0])
assert pytest.approx(7.0e6, rel=1e-4) == value(p1.outlet.pressure[0.0])
ro = m.fs.desalination.RO
assert pytest.approx(13914.0, rel=1e-4) == value(ro.area)
assert pytest.approx(0.43681, rel=1e-4) == value(
ro.recovery_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(0.43293, rel=1e-4) == value(ro.recovery_vol_phase[0.0, "Liq"])
assert pytest.approx(305.57, rel=1e-4) == value(
ro.inlet.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(10.822, rel=1e-4) == value(
ro.inlet.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.00, rel=1e-4) == value(ro.inlet.temperature[0.0])
assert pytest.approx(7.0e6, rel=1e-4) == value(ro.inlet.pressure[0.0])
assert pytest.approx(172.09, rel=1e-4) == value(
ro.retentate.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(10.792, rel=1e-4) == value(
ro.retentate.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.02, rel=1e-4) == value(ro.retentate.temperature[0.0])
assert pytest.approx(6.7759e6, rel=1e-4) == value(ro.retentate.pressure[0.0])
assert pytest.approx(133.48, rel=1e-4) == value(
ro.permeate.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(2.9557e-2, rel=1e-4) == value(
ro.permeate.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.02, rel=1e-4) == value(ro.permeate.temperature[0.0])
assert pytest.approx(1.0132e5, rel=1e-4) == value(ro.permeate.pressure[0.0])
muni = m.fs.municipal
assert pytest.approx(147.59, rel=1e-4) == value(muni.electricity[0])
assert pytest.approx(133.48, rel=1e-4) == value(
muni.inlet.flow_mass_comp[0.0, "H2O"]
)
assert pytest.approx(2.9557e-2, rel=1e-4) == value(
muni.inlet.flow_mass_comp[0.0, "tds"]
)
assert pytest.approx(133.48, rel=1e-4) == value(
muni.outlet.flow_mass_comp[0.0, "H2O"]
)
assert pytest.approx(2.9557e-2, rel=1e-4) == value(
muni.outlet.flow_mass_comp[0.0, "tds"]
)
assert value(m.LCOW) == pytest.approx(0.845200, rel=1e-5)
@pytest.mark.component
def test_seawater_RO_desalination_pump_as_turbine():
m = main(erd_type="pump_as_turbine")
f = m.fs.feed
assert pytest.approx(305.63, rel=1e-4) == value(f.flow_mass_comp[0, "H2O"])
assert pytest.approx(10.822, rel=1e-4) == value(f.flow_mass_comp[0, "tds"])
assert pytest.approx(9.2760e-3, rel=1e-4) == value(f.flow_mass_comp[0, "tss"])
assert pytest.approx(0.3092, rel=1e-4) == value(f.flow_vol[0])
p1 = m.fs.desalination.P1
assert pytest.approx(0.8, rel=1e-4) == value(p1.efficiency_pump[0])
assert pytest.approx(2.6676e6, rel=1e-4) == value(p1.work_mechanical[0])
assert pytest.approx(6.9e6, rel=1e-4) == value(p1.deltaP[0])
assert pytest.approx(70.0, rel=1e-4) == value(p1.ratioP[0])
assert pytest.approx(305.57, rel=1e-4) == value(
p1.inlet.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(10.822, rel=1e-4) == value(
p1.inlet.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.00, rel=1e-4) == value(p1.inlet.temperature[0.0])
assert pytest.approx(1.0e5, rel=1e-4) == value(p1.inlet.pressure[0.0])
assert pytest.approx(305.57, rel=1e-4) == value(
p1.outlet.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(10.822, rel=1e-4) == value(
p1.outlet.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.00, rel=1e-4) == value(p1.outlet.temperature[0.0])
assert pytest.approx(7.0e6, rel=1e-4) == value(p1.outlet.pressure[0.0])
ro = m.fs.desalination.RO
assert pytest.approx(13914.0, rel=1e-4) == value(ro.area)
assert pytest.approx(0.43681, rel=1e-4) == value(
ro.recovery_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(0.43293, rel=1e-4) == value(ro.recovery_vol_phase[0.0, "Liq"])
assert pytest.approx(305.57, rel=1e-4) == value(
ro.inlet.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(10.822, rel=1e-4) == value(
ro.inlet.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.00, rel=1e-4) == value(ro.inlet.temperature[0.0])
assert pytest.approx(7.0e6, rel=1e-4) == value(ro.inlet.pressure[0.0])
assert pytest.approx(172.09, rel=1e-4) == value(
ro.retentate.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(10.792, rel=1e-4) == value(
ro.retentate.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.02, rel=1e-4) == value(ro.retentate.temperature[0.0])
assert pytest.approx(6.7759e6, rel=1e-4) == value(ro.retentate.pressure[0.0])
assert pytest.approx(133.48, rel=1e-4) == value(
ro.permeate.flow_mass_phase_comp[0.0, "Liq", "H2O"]
)
assert pytest.approx(2.9557e-2, rel=1e-4) == value(
ro.permeate.flow_mass_phase_comp[0.0, "Liq", "TDS"]
)
assert pytest.approx(298.02, rel=1e-4) == value(ro.permeate.temperature[0.0])
assert pytest.approx(1.0132e5, rel=1e-4) == value(ro.permeate.pressure[0.0])
muni = m.fs.municipal
assert pytest.approx(147.59, rel=1e-4) == value(muni.electricity[0])
assert pytest.approx(133.48, rel=1e-4) == value(
muni.inlet.flow_mass_comp[0.0, "H2O"]
)
assert pytest.approx(2.9557e-2, rel=1e-4) == value(
muni.inlet.flow_mass_comp[0.0, "tds"]
)
assert pytest.approx(133.48, rel=1e-4) == value(
muni.outlet.flow_mass_comp[0.0, "H2O"]
)
assert pytest.approx(2.9557e-2, rel=1e-4) == value(
muni.outlet.flow_mass_comp[0.0, "tds"]
)
assert value(m.LCOW) == pytest.approx(1.115729, rel=1e-5)
|
py | b408da2e64076c3ca3caf0f0edca218c6f813280 | from typing import Dict, Optional
import numpy
from overrides import overrides
import torch
from torch import nn
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.modules import FeedForward, Seq2SeqEncoder, Seq2VecEncoder, TextFieldEmbedder
from allennlp.models.model import Model
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy
@Model.register("sequence_classification")
class SequenceClassification(Model):
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
embedding_dropout: float,
seq2seq_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None) -> None:
super(SequenceClassification, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self._embedding_dropout = nn.Dropout(embedding_dropout)
self.num_classes = self.vocab.get_vocab_size("label")
self.seq2seq_encoder = seq2seq_encoder
self.self_attentive_pooling_projection = nn.Linear(seq2seq_encoder.get_output_dim(), 1)
self.classifier_feedforward = classifier_feedforward
self.metrics = {
"accuracy": CategoricalAccuracy(),
"accuracy3": CategoricalAccuracy(top_k=3)
}
self.loss = torch.nn.CrossEntropyLoss()
initializer(self)
@overrides
def forward(self, # type: ignore
tokens: Dict[str, torch.LongTensor],
label: torch.LongTensor = None) -> Dict[str, torch.Tensor]:
mask_tokens = util.get_text_field_mask(tokens)
embedded_tokens = self.text_field_embedder(tokens)
dropped_embedded_tokens = self._embedding_dropout(embedded_tokens)
encoded_tokens = self.seq2seq_encoder(dropped_embedded_tokens, mask_tokens)
self_attentive_logits = self.self_attentive_pooling_projection(encoded_tokens).squeeze(2)
self_weights = util.masked_softmax(self_attentive_logits, mask_tokens)
encoding_result = util.weighted_sum(encoded_tokens, self_weights)
logits = self.classifier_feedforward(encoding_result)
class_probabilities = F.softmax(logits, dim=-1)
output_dict = {'logits': logits, 'class_probabilities': class_probabilities}
if label is not None:
loss = self.loss(logits, label)
for metric in self.metrics.values():
metric(logits, label)
output_dict["loss"] = loss
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
"""
Does a simple argmax over the class probabilities, converts indices to string labels, and
adds a ``"label"`` key to the dictionary with the result.
"""
predictions = output_dict["class_probabilities"].cpu().data.numpy()
argmax_indices = numpy.argmax(predictions, axis=-1)
labels = [self.vocab.get_token_from_index(x, namespace="labels")
for x in argmax_indices]
output_dict['label'] = labels
return output_dict
@overrides
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
return {metric_name: metric.get_metric(reset) for metric_name, metric in self.metrics.items()}
|
py | b408dad9d9a6fb0d96719d40117308624e0dda0c | import time
import sys
import math
sys.path.append('../SX1509-WiringPi-Python')
from SX1509 import SX1509
sys.path.append('../classes')
from WiiMote import WiiMote
from RGBLED import RGBLED
from SoundController import SoundController
sys.path.append('../')
from sabines_utils import hsv2rgb
from threading import Timer
expander = SX1509(0x3E)
expander.reset(False)
engineLED = RGBLED(expander, [0, 1, 2, 3], False)
engineLEDLeft = RGBLED(expander, [3, 4, 5, 6], False)
engineLEDRight = RGBLED(expander, [6, 7, 8, 9], False)
bridgeLED = RGBLED(expander, [9, 10, 11, 12], False)
wiimote = WiiMote(None)
wiimote.init()
sound = SoundController()
animating = False
hue = 0
def engineRed():
engineLED.setColor([255, 0, 0])
engineLEDLeft.setColor([255, 0, 0])
engineLEDRight.setColor([255, 0, 0])
engineLEDRight.setColor([255, 0, 0])
sound.start(SoundController.FILES['RESISTANCE'])
wiimote.on(wiimote.WIIMOTE_KEYS['A'], engineRed)
def engineOff():
global animating
animating = False
engineLED.setColor([0, 0, 0])
engineLEDLeft.setColor([0, 0, 0])
engineLEDRight.setColor([0, 0, 0])
bridgeLED.setColor([0, 0, 0])
sound.stop()
wiimote.on(wiimote.WIIMOTE_KEYS['DOWN'], engineOff)
def imperialMarch():
engineLED.setColor([0, 0, 255])
engineLEDLeft.setColor([0, 0, 255])
engineLEDRight.setColor([0, 0, 255])
bridgeLED.setColor([100, 100, 100])
sound.start(SoundController.FILES['MARCH'])
wiimote.on(WiiMote.WIIMOTE_KEYS['B'], imperialMarch)
def rickrollColor():
global animating, hue, engineOff
if animating:
color = hsv2rgb(hue, 1, 1)
engineLED.setColor([color[0], color[1], color[2]])
hue = hue + 1
Timer(0.01, rickrollColor).start()
def rickroll():
global animating, engineOff
engineOff()
sound.stop()
sound.start(SoundController.FILES['RICKROLL'])
animating = True
Timer(0.01, rickrollColor).start()
wiimote.on(WiiMote.WIIMOTE_KEYS['ONE'], rickroll)
while True:
hue = hue |
py | b408dc19b6d1c3a8a97ff9df014d04d017592749 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import NotSupported
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class mexc3(Exchange):
def describe(self):
return self.deep_extend(super(mexc3, self).describe(), {
'id': 'mexc3',
'name': 'MEXC Global',
'countries': ['SC'], # Seychelles
'rateLimit': 50, # default rate limit is 20 times per second
'version': 'v3',
'has': {
'CORS': None,
'spot': None,
'margin': None,
'swap': None,
'future': None,
'option': None,
'addMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': None,
'createDepositAddress': None,
'createLimitOrder': None,
'createMarketOrder': None,
'createOrder': True,
'deposit': None,
'editOrder': None,
'fetchAccounts': True,
'fetchBalance': True,
'fetchBidsAsks': True,
'fetchBorrowRate': None,
'fetchBorrowRateHistory': None,
'fetchBorrowRates': None,
'fetchBorrowRatesPerSymbol': None,
'fetchCanceledOrders': True,
'fetchClosedOrder': None,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDeposit': None,
'fetchDepositAddress': True,
'fetchDepositAddresses': None,
'fetchDepositAddressesByNetwork': True,
'fetchDeposits': True,
'fetchFundingHistory': True,
'fetchFundingRate': True,
'fetchFundingRateHistory': True,
'fetchFundingRates': None,
'fetchIndexOHLCV': True,
'fetchL2OrderBook': True,
'fetchLedger': None,
'fetchLedgerEntry': None,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': None,
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrder': None,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrderBooks': None,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchPositionsRisk': None,
'fetchPremiumIndexOHLCV': False,
'fetchStatus': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingFee': None,
'fetchTradingFees': True,
'fetchTradingLimits': None,
'fetchTransactionFee': None,
'fetchTransactionFees': None,
'fetchTransactions': None,
'fetchTransfer': True,
'fetchTransfers': True,
'fetchWithdrawal': None,
'fetchWithdrawals': True,
'loadMarkets': None,
'privateAPI': True,
'publicAPI': True,
'reduceMargin': True,
'setLeverage': True,
'setMarginMode': None,
'setPositionMode': None,
'signIn': None,
'transfer': None,
'withdraw': True,
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/137283979-8b2a818d-8633-461b-bfca-de89e8c446b2.jpg',
'api': {
'spot': {
'public': 'https://api.mexc.com',
'private': 'https://api.mexc.com',
},
'spot2': {
'public': 'https://www.mexc.com/open/api/v2',
'private': 'https://www.mexc.com/open/api/v2',
},
'contract': {
'public': 'https://contract.mexc.com/api/v1/contract',
'private': 'https://contract.mexc.com/api/v1/private',
},
},
'www': 'https://www.mexc.com/',
'doc': [
'https://mxcdevelop.github.io/apidocs/spot_v3_en/',
'https://mxcdevelop.github.io/APIDoc/', # v1 & v2 : soon to be deprecated
],
'fees': [
'https://www.mexc.com/fee',
],
'referral': 'https://m.mexc.com/auth/signup?inviteCode=1FQ1G',
},
'api': {
'spot': {
'public': {
'get': {
'ping': 1,
'time': 1,
'exchangeInfo': 1,
'depth': 1,
'trades': 1,
'historicalTrades': 1,
'aggTrades': 1,
'klines': 1,
'avgPrice': 1,
'ticker/24hr': 1,
'ticker/price': 1,
'ticker/bookTicker': 1,
'etf/info': 1,
},
},
'private': {
'get': {
'order': 1,
'openOrders': 1,
'allOrders': 1,
'account': 1,
'myTrades': 1,
'sub-account/list': 1,
'sub-account/apiKey': 1,
},
'post': {
'order': 1,
'order/test': 1,
'sub-account/virtualSubAccount': 1,
'sub-account/apiKey': 1,
},
'delete': {
'order': 1,
'openOrders': 1,
'sub-account/apiKey': 1,
},
},
},
'contract': {
'public': {
'get': {
'ping': 2,
'detail': 2,
'support_currencies': 2, # TODO: should we implement 'fetchCurrencies' solely for swap? because spot doesnt have it atm
'depth/{symbol}': 2,
'depth_commits/{symbol}/{limit}': 2,
'index_price/{symbol}': 2,
'fair_price/{symbol}': 2,
'funding_rate/{symbol}': 2,
'kline/{symbol}': 2,
'kline/index_price/{symbol}': 2,
'kline/fair_price/{symbol}': 2,
'deals/{symbol}': 2,
'ticker': 2,
'risk_reverse': 2,
'risk_reverse/history': 2,
'funding_rate/history': 2,
},
},
'private': {
'get': {
'account/assets': 2,
'account/asset/{currency}': 2,
'account/transfer_record': 2,
'position/list/history_positions': 2,
'position/open_positions': 2,
'position/funding_records': 2,
'order/list/open_orders/{symbol}': 2,
'order/list/history_orders': 2,
'order/external/{symbol}/{external_oid}': 2,
'order/get/{order_id}': 2,
'order/batch_query': 8,
'order/deal_details/{order_id}': 2,
'order/list/order_deals': 2,
'planorder/list/orders': 2,
'stoporder/list/orders': 2,
'stoporder/order_details/{stop_order_id}': 2,
'account/risk_limit': 2, # TO_DO: gets max/min position size, allowed sides, leverage, maintenance margin, initial margin, etc...
'account/tiered_fee_rate': 2, # TO_DO: taker/maker fees for account
},
'post': {
'position/change_margin': 2,
'position/change_leverage': 2,
'order/submit': 2,
'order/submit_batch': 40,
'order/cancel': 2,
'order/cancel_with_external': 2,
'order/cancel_all': 2,
'account/change_risk_level': 2,
'planorder/place': 2,
'planorder/cancel': 2,
'planorder/cancel_all': 2,
'stoporder/cancel': 2,
'stoporder/cancel_all': 2,
'stoporder/change_price': 2,
'stoporder/change_plan_price': 2,
},
},
},
'spot2': {
'public': {
'get': {
'market/symbols': 1,
'market/coin/list': 2,
'common/timestamp': 1,
'common/ping': 1,
'market/ticker': 1,
'market/depth': 1,
'market/deals': 1,
'market/kline': 1,
'market/api_default_symbols': 2,
},
},
'private': {
'get': {
'account/info': 1,
'order/open_orders': 1,
'order/list': 1,
'order/query': 1,
'order/deals': 1,
'order/deal_detail': 1,
'asset/deposit/address/list': 2,
'asset/deposit/list': 2,
'asset/address/list': 2,
'asset/withdraw/list': 2,
'asset/internal/transfer/record': 10,
'account/balance': 10,
'asset/internal/transfer/info': 10,
'market/api_symbols': 2,
},
'post': {
'order/place': 1,
'order/place_batch': 1,
'asset/withdraw': 2,
'asset/internal/transfer': 10,
},
'delete': {
'order/cancel': 1,
'order/cancel_by_symbol': 1,
'asset/withdraw': 2,
},
},
},
},
'precisionMode': TICK_SIZE,
'timeframes': {
'1m': '1m', # spot, swap
'3m': '3m', # spot
'5m': '5m', # spot, swap
'15m': '15m', # spot, swap
'30m': '30m', # spot, swap
'1h': '1h', # spot, swap
'2h': '2h', # spot
'4h': '4h', # spot, swap
'6h': '6h', # spot
'8h': '8h', # spot, swap
'12h': '12h', # spot
'1d': '1d', # spot, swap
'3d': '3d', # spot
'1w': '1w', # spot, swap
'1M': '1M', # spot, swap
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': 0.2 / 100, # maker / taker
'taker': 0.2 / 100,
},
},
'options': {
'createMarketBuyOrderRequiresPrice': True,
'unavailableContracts': {
'BTC/USDT:USDT': True,
'LTC/USDT:USDT': True,
'ETH/USDT:USDT': True,
},
'fetchMarkets': {
'types': {
'spot': True,
'future': {
'linear': False,
'inverse': False,
},
'swap': {
'linear': True,
'inverse': False,
},
},
},
'timeframes': {
'spot': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'swap': {
'1m': 'Min1',
'5m': 'Min5',
'15m': 'Min15',
'30m': 'Min30',
'1h': 'Min60',
'4h': 'Hour4',
'8h': 'Hour8',
'1d': 'Day1',
'1w': 'Week1',
'1M': 'Month1',
},
},
'defaultType': 'spot', # spot, swap
'networks': {
'TRX': 'TRC-20',
'TRC20': 'TRC-20',
'ETH': 'ERC-20',
'ERC20': 'ERC-20',
'BEP20': 'BEP20(BSC)',
'BSC': 'BEP20(BSC)',
},
'networkAliases': {
'BSC(BEP20)': 'BSC',
},
'recvWindow': 5 * 1000, # 5 sec, default
'maxTimeTillEnd': 90 * 86400 * 1000 - 1, # 90 days
},
'commonCurrencies': {
'BEYONDPROTOCOL': 'BEYOND',
'BIFI': 'BIFIF',
'BYN': 'BeyondFi',
'COFI': 'COFIX', # conflict with CoinFi
'DFI': 'DfiStarter',
'DFT': 'dFuture',
'DRK': 'DRK',
'EGC': 'Egoras Credit',
'FLUX1': 'FLUX', # switched places
'FLUX': 'FLUX1', # switched places
'FREE': 'FreeRossDAO', # conflict with FREE Coin
'HERO': 'Step Hero', # conflict with Metahero
'MIMO': 'Mimosa',
'PROS': 'Pros.Finance', # conflict with Prosper
'SIN': 'Sin City Token',
},
'exceptions': {
'exact': {
# until mexc migrates fully to v3, it might be worth to note the version & market aside errors, not easily remove obsolete version's exceptions in future
'-1128': BadRequest,
'-2011': BadRequest,
'-1121': BadSymbol,
'2009': InvalidOrder, # {"success":false,"code":2009,"message":"Position is not exists or closed."}
'2011': BadRequest,
'30004': InsufficientFunds,
'1002': InvalidOrder,
'30019': BadRequest,
'30005': InvalidOrder,
'2003': InvalidOrder,
'2005': InsufficientFunds,
'600': BadRequest,
},
'broad': {
'Order quantity error, please try to modify.': BadRequest, # code:2011
'Combination of optional parameters invalid': BadRequest, # code:-2011
'api market order is disabled': BadRequest, #
'Contract not allow place order!': InvalidOrder, # code:1002
'Oversold': InvalidOrder, # code:30005
'Insufficient position': InsufficientFunds, # code:30004
'Insufficient balance!': InsufficientFunds, # code:2005
'Bid price is great than max allow price': InvalidOrder, # code:2003
'Invalid symbol.': BadSymbol, # code:-1121
'Param error!': BadRequest, # code:600
},
},
})
def fetch_status(self, params={}):
"""
the latest known information on the availability of the exchange API
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `status structure <https://docs.ccxt.com/en/latest/manual.html#exchange-status-structure>`
"""
marketType, query = self.handle_market_type_and_params('fetchStatus', None, params)
response = None
status = None
updated = None
if marketType == 'spot':
response = self.spotPublicGetPing(query)
#
# {}
#
status = self.json(response) if response else 'ok'
elif marketType == 'swap':
response = self.contractPublicGetPing(query)
#
# {"success":true,"code":"0","data":"1648124374985"}
#
status = 'ok' if self.safe_value(response, 'success') else self.json(response)
updated = self.safe_integer(response, 'data')
return {
'status': status,
'updated': updated,
'url': None,
'eta': None,
'info': response,
}
def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
marketType, query = self.handle_market_type_and_params('fetchTime', None, params)
response = None
if marketType == 'spot':
response = self.spotPublicGetTime(query)
#
# {"serverTime": "1647519277579"}
#
return self.safe_integer(response, 'serverTime')
elif marketType == 'swap':
response = self.contractPublicGetPing(query)
#
# {"success":true,"code":"0","data":"1648124374985"}
#
return self.safe_integer(response, 'data')
def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an associative dictionary of currencies
"""
response = self.spot2PublicGetMarketCoinList(params)
#
# {
# "code":200,
# "data":[
# {
# "currency":"AGLD",
# "coins":[
# {
# "chain":"ERC20",
# "precision":18,
# "fee":8.09,
# "is_withdraw_enabled":true,
# "is_deposit_enabled":true,
# "deposit_min_confirm":16,
# "withdraw_limit_max":500000.0,
# "withdraw_limit_min":14.0
# }
# ],
# "full_name":"Adventure Gold"
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = {}
for i in range(0, len(data)):
currency = data[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'full_name')
currencyActive = False
currencyPrecision = None
currencyFee = None
currencyWithdrawMin = None
currencyWithdrawMax = None
networks = {}
chains = self.safe_value(currency, 'coins', [])
depositEnabled = False
withdrawEnabled = False
for j in range(0, len(chains)):
chain = chains[j]
networkId = self.safe_string(chain, 'chain')
network = self.safe_network(networkId)
isDepositEnabled = self.safe_value(chain, 'is_deposit_enabled', False)
isWithdrawEnabled = self.safe_value(chain, 'is_withdraw_enabled', False)
active = (isDepositEnabled and isWithdrawEnabled)
currencyActive = active or currencyActive
withdrawMin = self.safe_string(chain, 'withdraw_limit_min')
withdrawMax = self.safe_string(chain, 'withdraw_limit_max')
currencyWithdrawMin = withdrawMin if (currencyWithdrawMin is None) else currencyWithdrawMin
currencyWithdrawMax = withdrawMax if (currencyWithdrawMax is None) else currencyWithdrawMax
if Precise.string_gt(currencyWithdrawMin, withdrawMin):
currencyWithdrawMin = withdrawMin
if Precise.string_lt(currencyWithdrawMax, withdrawMax):
currencyWithdrawMax = withdrawMax
if isDepositEnabled:
depositEnabled = True
if isWithdrawEnabled:
withdrawEnabled = True
networks[network] = {
'info': chain,
'id': networkId,
'network': network,
'active': active,
'deposit': isDepositEnabled,
'withdraw': isWithdrawEnabled,
'fee': self.safe_number(chain, 'fee'),
'precision': self.parse_number(self.parse_precision(self.safe_string(chain, 'precision'))),
'limits': {
'withdraw': {
'min': withdrawMin,
'max': withdrawMax,
},
},
}
networkKeys = list(networks.keys())
networkKeysLength = len(networkKeys)
if (networkKeysLength == 1) or ('NONE' in networks):
defaultNetwork = self.safe_value_2(networks, 'NONE', networkKeysLength - 1)
if defaultNetwork is not None:
currencyFee = defaultNetwork['fee']
currencyPrecision = defaultNetwork['precision']
result[code] = {
'id': id,
'code': code,
'info': currency,
'name': name,
'active': currencyActive,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': currencyFee,
'precision': currencyPrecision,
'limits': {
'amount': {
'min': None,
'max': None,
},
'withdraw': {
'min': currencyWithdrawMin,
'max': currencyWithdrawMax,
},
},
'networks': networks,
}
return result
def safe_network(self, networkId):
if networkId.find('BSC') >= 0:
return 'BEP20'
parts = networkId.split(' ')
networkId = ''.join(parts)
networkId = networkId.replace('-20', '20')
networksById = {
'ETH': 'ETH',
'ERC20': 'ERC20',
'BEP20(BSC)': 'BEP20',
'TRX': 'TRC20',
}
return self.safe_string(networksById, networkId, networkId)
def fetch_markets(self, params={}):
"""
retrieves data on all markets for mexc3
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
spotMarket = self.fetch_spot_markets(params)
swapMarket = self.fetch_swap_markets(params)
return self.array_concat(spotMarket, swapMarket)
def fetch_spot_markets(self, params={}):
response = self.spotPublicGetExchangeInfo(params)
#
# {
# "timezone": "CST",
# "serverTime": 1647521860402,
# "rateLimits": [],
# "exchangeFilters": [],
# "symbols": [
# {
# "symbol": "OGNUSDT",
# "status": "ENABLED",
# "baseAsset": "OGN",
# "baseAssetPrecision": "2",
# "quoteAsset": "USDT",
# "quoteAssetPrecision": "4",
# "orderTypes": [
# "LIMIT",
# "LIMIT_MAKER"
# ],
# "baseCommissionPrecision": "2",
# "quoteCommissionPrecision": "4",
# "quoteOrderQtyMarketAllowed": False,
# "isSpotTradingAllowed": True,
# "isMarginTradingAllowed": True,
# "permissions": [
# "SPOT",
# "MARGIN"
# ],
# "filters": [],
# "baseSizePrecision": "0.01", # seems to be derived of 'baseAssetPrecision'
# "maxQuoteAmount": "5000000",
# "makerCommission": "0.002",
# "takerCommission": "0.002"
# "quoteAmountPrecision": "5", # seem totally unrelated value, as neither quote/base have anything related to self number
# "quotePrecision": "4", # deprecated in favor of 'quoteAssetPrecision'( https://dev.binance.vision/t/what-is-the-difference-between-quoteprecision-and-quoteassetprecision/4333 )
# # note, "icebergAllowed" & "ocoAllowed" fields were recently removed
# },
# ]
# }
#
# Notes:
# - 'quoteAssetPrecision' & 'baseAssetPrecision' are not currency's real blockchain precision(to view currency's actual individual precision, refer to fetchCurrencies() method).
#
data = self.safe_value(response, 'symbols', [])
result = []
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'baseAsset')
quoteId = self.safe_string(market, 'quoteAsset')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
status = self.safe_string(market, 'status')
makerCommission = self.safe_number(market, 'makerCommission')
takerCommission = self.safe_number(market, 'takerCommission')
maxQuoteAmount = self.safe_number(market, 'maxQuoteAmount')
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': False,
'swap': False,
'future': False,
'option': False,
'active': (status == 'ENABLED'),
'contract': False,
'linear': None,
'inverse': None,
'taker': takerCommission,
'maker': makerCommission,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'baseAssetPrecision'))),
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'quoteAssetPrecision'))),
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': maxQuoteAmount,
},
},
'info': market,
})
return result
def fetch_swap_markets(self, params={}):
response = self.contractPublicGetDetail(params)
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "symbol":"BTC_USDT",
# "displayName":"BTC_USDT永续",
# "displayNameEn":"BTC_USDT SWAP",
# "positionOpenType":3,
# "baseCoin":"BTC",
# "quoteCoin":"USDT",
# "settleCoin":"USDT",
# "contractSize":0.0001,
# "minLeverage":1,
# "maxLeverage":125,
# "priceScale":2, # seems useless atm, as it's just how UI shows the price, i.e. 29583.50 for BTC/USDT:USDT, while price ticksize is 0.5
# "volScale":0, # probably: contract amount precision
# "amountScale":4, # probably: quote currency precision
# "priceUnit":0.5, # price tick size
# "volUnit":1, # probably: contract tick size
# "minVol":1,
# "maxVol":1000000,
# "bidLimitPriceRate":0.1,
# "askLimitPriceRate":0.1,
# "takerFeeRate":0.0006,
# "makerFeeRate":0.0002,
# "maintenanceMarginRate":0.004,
# "initialMarginRate":0.008,
# "riskBaseVol":10000,
# "riskIncrVol":200000,
# "riskIncrMmr":0.004,
# "riskIncrImr":0.004,
# "riskLevelLimit":5,
# "priceCoefficientVariation":0.1,
# "indexOrigin":["BINANCE","GATEIO","HUOBI","MXC"],
# "state":0, # 0 enabled, 1 delivery, 2 completed, 3 offline, 4 pause
# "isNew":false,
# "isHot":true,
# "isHidden":false
# },
# ]
# }
#
data = self.safe_value(response, 'data', [])
result = []
for i in range(0, len(data)):
market = data[i]
id = self.safe_string(market, 'symbol')
baseId = self.safe_string(market, 'baseCoin')
quoteId = self.safe_string(market, 'quoteCoin')
settleId = self.safe_string(market, 'settleCoin')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
state = self.safe_string(market, 'state')
result.append({
'id': id,
'symbol': base + '/' + quote + ':' + settle,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': 'swap',
'spot': False,
'margin': False,
'swap': True,
'future': False,
'option': False,
'active': (state == '0'),
'contract': True,
'linear': True,
'inverse': False,
'taker': self.safe_number(market, 'takerFeeRate'),
'maker': self.safe_number(market, 'makerFeeRate'),
'contractSize': self.safe_number(market, 'contractSize'),
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.safe_number(market, 'volUnit'),
'price': self.safe_number(market, 'priceUnit'),
},
'limits': {
'leverage': {
'min': self.safe_number(market, 'minLeverage'),
'max': self.safe_number(market, 'maxLeverage'),
},
'amount': {
'min': self.safe_number(market, 'minVol'),
'max': self.safe_number(market, 'maxVol'),
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
orderbook = None
if market['spot']:
response = self.spotPublicGetDepth(self.extend(request, params))
#
# {
# "lastUpdateId": "744267132",
# "bids": [
# ["40838.50","0.387864"],
# ["40837.95","0.008400"],
# ],
# "asks": [
# ["40838.61","6.544908"],
# ["40838.88","0.498000"],
# ]
# }
#
orderbook = self.parse_order_book(response, symbol)
orderbook['nonce'] = self.safe_integer(response, 'lastUpdateId')
elif market['swap']:
response = self.contractPublicGetDepthSymbol(self.extend(request, params))
#
# {
# "success":true,
# "code":0,
# "data":{
# "asks":[
# [3445.72,48379,1],
# [3445.75,34994,1],
# ],
# "bids":[
# [3445.55,44081,1],
# [3445.51,24857,1],
# ],
# "version":2827730444,
# "timestamp":1634117846232
# }
# }
#
data = self.safe_value(response, 'data')
timestamp = self.safe_integer(data, 'timestamp')
orderbook = self.parse_order_book(data, symbol, timestamp)
orderbook['nonce'] = self.safe_integer(data, 'version')
return orderbook
def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit
# if since is not None:
# request['startTime'] = since; bug in api, waiting for fix
# }
trades = None
if market['spot']:
method = self.safe_string(self.options, 'fetchTradesMethod', 'spotPublicGetAggTrades')
method = self.safe_string(params, 'method', method) # AggTrades, HistoricalTrades, Trades
trades = getattr(self, method)(self.extend(request, params))
#
# /trades, /historicalTrades
#
# [
# {
# "id": null,
# "price": "40798.94",
# "qty": "0.000508",
# "quoteQty": "20.72586152",
# "time": "1647546934374",
# "isBuyerMaker": True,
# "isBestMatch": True
# },
# ]
#
# /aggrTrades
#
# [
# {
# "a": null,
# "f": null,
# "l": null,
# "p": "40679",
# "q": "0.001309",
# "T": 1647551328000,
# "m": True,
# "M": True
# },
# ]
#
elif market['swap']:
response = self.contractPublicGetDealsSymbol(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": [
# {
# "p": 31199,
# "v": 18,
# "T": 1,
# "O": 3,
# "M": 2,
# "t": 1609831235985
# },
# ]
# }
#
trades = self.safe_value(response, 'data')
return self.parse_trades(trades, market, since, limit)
def parse_trade(self, trade, market=None):
id = None
timestamp = None
orderId = None
symbol = None
fee = None
type = None
side = None
takerOrMaker = None
priceString = None
amountString = None
costString = None
# if swap
if 'v' in trade:
#
# swap: fetchTrades
#
# {
# "p": 31199,
# "v": 18,
# "T": 1,
# "O": 3,
# "M": 2,
# "t": 1609831235985
# }
#
timestamp = self.safe_integer(trade, 't')
market = self.safe_market(None, market)
symbol = market['symbol']
priceString = self.safe_string(trade, 'p')
amountString = self.safe_string(trade, 'v')
side = self.parse_order_side(self.safe_string(trade, 'T'))
takerOrMaker = 'taker'
else:
#
# spot: fetchTrades(for aggTrades)
#
# {
# "a": null,
# "f": null,
# "l": null,
# "p": "40679",
# "q": "0.001309",
# "T": 1647551328000,
# "m": True,
# "M": True
# }
#
# spot: fetchMyTrades, fetchOrderTrades
#
# {
# "symbol": "BTCUSDT",
# "id": "133948532984922113",
# "orderId": "133948532531949568",
# "orderListId": "-1",
# "price": "41995.51",
# "qty": "0.0002",
# "quoteQty": "8.399102",
# "commission": "0.016798204",
# "commissionAsset": "USDT",
# "time": "1647718055000",
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
#
# swap: fetchMyTrades, fetchOrderTrades
#
# {
# "id": "299444585",
# "symbol": "STEPN_USDT",
# "side": "1",
# "vol": "1",
# "price": "2.45455",
# "feeCurrency": "USDT",
# "fee": "0.00147273",
# "timestamp": "1648924557000",
# "profit": "0",
# "category": "1",
# "orderId": "265307163526610432",
# "positionMode": "1",
# "taker": True
# }
#
marketId = self.safe_string(trade, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
id = self.safe_string_2(trade, 'id', 'a')
priceString = self.safe_string_2(trade, 'price', 'p')
orderId = self.safe_string(trade, 'orderId')
# if swap
if 'positionMode' in trade:
timestamp = self.safe_integer(trade, 'timestamp')
amountString = self.safe_string(trade, 'vol')
side = self.parse_order_side(self.safe_string(trade, 'side'))
fee = {
'cost': self.safe_number(trade, 'fee'),
'currency': self.safe_currency_code(self.safe_string(trade, 'feeCurrency')),
}
takerOrMaker = 'taker' if self.safe_value(trade, 'taker') else 'maker'
else:
timestamp = self.safe_integer_2(trade, 'time', 'T')
amountString = self.safe_string_2(trade, 'qty', 'q')
costString = self.safe_string(trade, 'quoteQty')
isBuyer = self.safe_value(trade, 'isBuyer')
isMaker = self.safe_value(trade, 'isMaker')
buyerMaker = self.safe_string_2(trade, 'isBuyerMaker', 'm')
if isMaker is not None:
takerOrMaker = 'maker' if isMaker else 'taker'
if isBuyer is not None:
side = 'buy' if isBuyer else 'sell'
if buyerMaker is not None:
side = 'sell' if buyerMaker else 'buy'
takerOrMaker = 'taker'
feeAsset = self.safe_string(trade, 'commissionAsset')
if feeAsset is not None:
fee = {
'cost': self.safe_number(trade, 'commission'),
'currency': self.safe_currency_code(feeAsset),
}
if id is None:
id = self.synthetic_trade_id(market, timestamp, side, amountString, priceString, type, takerOrMaker)
return self.safe_trade({
'id': id,
'order': orderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
'info': trade,
}, market)
def synthetic_trade_id(self, market=None, timestamp=None, side=None, amount=None, price=None, orderType=None, takerOrMaker=None):
# TODO: can be unified method? self approach is being used by multiple exchanges(mexc, woo-coinsbit, dydx, ...)
id = ''
if timestamp is not None:
id = self.number_to_string(timestamp) + '-' + self.safe_string(market, 'id', '_')
if side is not None:
id += '-' + side
if amount is not None:
id += '-' + self.number_to_string(amount)
if price is not None:
id += '-' + self.number_to_string(price)
if takerOrMaker is not None:
id += '-' + takerOrMaker
if orderType is not None:
id += '-' + orderType
return id
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
self.load_markets()
market = self.market(symbol)
options = self.safe_value(self.options, 'timeframes', {})
timeframes = self.safe_value(options, market['type'], {})
timeframeValue = self.safe_string(timeframes, timeframe)
request = {
'symbol': market['id'],
'interval': timeframeValue,
}
candles = None
if market['spot']:
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.spotPublicGetKlines(self.extend(request, params))
#
# [
# [
# 1640804880000,
# "47482.36",
# "47482.36",
# "47416.57",
# "47436.1",
# "3.550717",
# 1640804940000,
# "168387.3"
# ],
# ]
#
candles = response
elif market['swap']:
if since is not None:
request['start'] = int(since / 1000)
priceType = self.safe_string(params, 'price', 'default')
params = self.omit(params, 'price')
method = self.get_supported_mapping(priceType, {
'default': 'contractPublicGetKlineSymbol',
'index': 'contractPublicGetKlineIndexPriceSymbol',
'mark': 'contractPublicGetKlineFairPriceSymbol',
})
response = getattr(self, method)(self.extend(request, params))
#
# {
# "success":true,
# "code":0,
# "data":{
# "time":[1634052300,1634052360,1634052420],
# "open":[3492.2,3491.3,3495.65],
# "close":[3491.3,3495.65,3495.2],
# "high":[3495.85,3496.55,3499.4],
# "low":[3491.15,3490.9,3494.2],
# "vol":[1740.0,351.0,314.0],
# "amount":[60793.623,12260.4885,10983.1375],
# }
# }
#
data = self.safe_value(response, 'data')
candles = self.convert_trading_view_to_ohlcv(data, 'time', 'open', 'high', 'low', 'close', 'vol')
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
return [
self.safe_integer(ohlcv, 0),
self.safe_number(ohlcv, 1),
self.safe_number(ohlcv, 2),
self.safe_number(ohlcv, 3),
self.safe_number(ohlcv, 4),
self.safe_number(ohlcv, 5),
]
def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
request = {}
market = None
isSingularMarket = False
if symbols is not None:
length = len(symbols)
isSingularMarket = length == 1
firstSymbol = self.safe_string(symbols, 0)
market = self.market(firstSymbol)
marketType, query = self.handle_market_type_and_params('fetchTickers', market, params)
tickers = None
if isSingularMarket:
request['symbol'] = market['id']
if marketType == 'spot':
tickers = self.spotPublicGetTicker24hr(self.extend(request, query))
#
# [
# {
# "symbol": "BTCUSDT",
# "priceChange": "184.34",
# "priceChangePercent": "0.00400048",
# "prevClosePrice": "46079.37",
# "lastPrice": "46263.71",
# "lastQty": "",
# "bidPrice": "46260.38",
# "bidQty": "",
# "askPrice": "46260.41",
# "askQty": "",
# "openPrice": "46079.37",
# "highPrice": "47550.01",
# "lowPrice": "45555.5",
# "volume": "1732.461487",
# "quoteVolume": null,
# "openTime": 1641349500000,
# "closeTime": 1641349582808,
# "count": null
# }
# ]
#
elif marketType == 'swap':
response = self.contractPublicGetTicker(self.extend(request, query))
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "symbol":"ETH_USDT",
# "lastPrice":3581.3,
# "bid1":3581.25,
# "ask1":3581.5,
# "volume24":4045530,
# "amount24":141331823.5755,
# "holdVol":5832946,
# "lower24Price":3413.4,
# "high24Price":3588.7,
# "riseFallRate":0.0275,
# "riseFallValue":95.95,
# "indexPrice":3580.7852,
# "fairPrice":3581.08,
# "fundingRate":0.000063,
# "maxBidPrice":3938.85,
# "minAskPrice":3222.7,
# "timestamp":1634162885016
# },
# ]
# }
#
tickers = self.safe_value(response, 'data', [])
# when it's single symbol request, the returned structure is different(singular object) for both spot & swap, thus we need to wrap inside array
if isSingularMarket:
tickers = [tickers]
return self.parse_tickers(tickers, symbols)
def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchTicker', market, params)
ticker = None
request = {
'symbol': market['id'],
}
if marketType == 'spot':
ticker = self.spotPublicGetTicker24hr(self.extend(request, query))
#
# {
# "symbol": "BTCUSDT",
# "priceChange": "184.34",
# "priceChangePercent": "0.00400048",
# "prevClosePrice": "46079.37",
# "lastPrice": "46263.71",
# "lastQty": "",
# "bidPrice": "46260.38",
# "bidQty": "",
# "askPrice": "46260.41",
# "askQty": "",
# "openPrice": "46079.37",
# "highPrice": "47550.01",
# "lowPrice": "45555.5",
# "volume": "1732.461487",
# "quoteVolume": null,
# "openTime": 1641349500000,
# "closeTime": 1641349582808,
# "count": null
# }
#
elif marketType == 'swap':
response = self.contractPublicGetTicker(self.extend(request, query))
#
# {
# "success":true,
# "code":0,
# "data":{
# "symbol":"ETH_USDT",
# "lastPrice":3581.3,
# "bid1":3581.25,
# "ask1":3581.5,
# "volume24":4045530,
# "amount24":141331823.5755,
# "holdVol":5832946,
# "lower24Price":3413.4,
# "high24Price":3588.7,
# "riseFallRate":0.0275,
# "riseFallValue":95.95,
# "indexPrice":3580.7852,
# "fairPrice":3581.08,
# "fundingRate":0.000063,
# "maxBidPrice":3938.85,
# "minAskPrice":3222.7,
# "timestamp":1634162885016
# }
# }
#
ticker = self.safe_value(response, 'data', {})
# when it's single symbol request, the returned structure is different(singular object) for both spot & swap, thus we need to wrap inside array
return self.parse_ticker(ticker, symbol)
def parse_ticker(self, ticker, market=None):
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market)
timestamp = None
bid = None
ask = None
bidVolume = None
askVolume = None
baseVolume = None
quoteVolume = None
open = None
high = None
low = None
changePcnt = None
changeValue = None
prevClose = None
isSwap = self.safe_value(market, 'swap')
# if swap
if isSwap or ('timestamp' in ticker):
#
# {
# "symbol":"ETH_USDT",
# "lastPrice":3581.3,
# "bid1":3581.25,
# "ask1":3581.5,
# "volume24":4045530,
# "amount24":141331823.5755,
# "holdVol":5832946,
# "lower24Price":3413.4,
# "high24Price":3588.7,
# "riseFallRate":0.0275,
# "riseFallValue":95.95,
# "indexPrice":3580.7852,
# "fairPrice":3581.08,
# "fundingRate":0.000063,
# "maxBidPrice":3938.85,
# "minAskPrice":3222.7,
# "timestamp":1634162885016
# }
#
timestamp = self.safe_integer(ticker, 'timestamp')
bid = self.safe_number(ticker, 'bid1')
ask = self.safe_number(ticker, 'ask1')
baseVolume = self.safe_string(ticker, 'volume24')
quoteVolume = self.safe_string(ticker, 'amount24')
high = self.safe_number(ticker, 'high24Price')
low = self.safe_number(ticker, 'lower24Price')
changeValue = self.safe_string(ticker, 'riseFallValue')
changePcnt = self.safe_string(ticker, 'riseFallRate')
changePcnt = self.parse_number(Precise.string_mul(changePcnt, '100'))
else:
#
# {
# "symbol": "BTCUSDT",
# "priceChange": "184.34",
# "priceChangePercent": "0.00400048",
# "prevClosePrice": "46079.37",
# "lastPrice": "46263.71",
# "lastQty": "",
# "bidPrice": "46260.38",
# "bidQty": "",
# "askPrice": "46260.41",
# "askQty": "",
# "openPrice": "46079.37",
# "highPrice": "47550.01",
# "lowPrice": "45555.5",
# "volume": "1732.461487",
# "quoteVolume": null,
# "openTime": 1641349500000,
# "closeTime": 1641349582808,
# "count": null
# }
#
timestamp = self.safe_integer(ticker, 'closeTime')
bid = self.safe_number(ticker, 'bidPrice')
ask = self.safe_number(ticker, 'askPrice')
bidVolume = self.safe_number(ticker, 'bidQty')
askVolume = self.safe_number(ticker, 'askQty')
if bidVolume == 0:
bidVolume = None
if askVolume == 0:
askVolume = None
baseVolume = self.safe_string(ticker, 'volume')
quoteVolume = self.safe_string(ticker, 'quoteVolume')
open = self.safe_string(ticker, 'openPrice')
high = self.safe_number(ticker, 'highPrice')
low = self.safe_number(ticker, 'lowPrice')
prevClose = self.safe_string(ticker, 'prevClosePrice')
changeValue = self.safe_string(ticker, 'priceChange')
changePcnt = self.safe_string(ticker, 'priceChangePercent')
changePcnt = self.parse_number(Precise.string_mul(changePcnt, '100'))
return self.safe_ticker({
'symbol': market['symbol'],
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'open': open,
'high': high,
'low': low,
'close': self.safe_string(ticker, 'lastPrice'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': None,
'previousClose': prevClose,
'change': changeValue,
'percentage': changePcnt,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
def fetch_bids_asks(self, symbols=None, params={}):
"""
fetches the bid and ask price and volume for multiple markets
:param [str]|None symbols: unified symbols of the markets to fetch the bids and asks for, all markets are returned if not assigned
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
self.load_markets()
market = None
isSingularMarket = False
if symbols is not None:
length = len(symbols)
isSingularMarket = length == 1
market = self.market(symbols[0])
marketType, query = self.handle_market_type_and_params('fetchBidsAsks', market, params)
tickers = None
if marketType == 'spot':
tickers = self.spotPublicGetTickerBookTicker(query)
#
# [
# {
# "symbol": "AEUSDT",
# "bidPrice": "0.11001",
# "bidQty": "115.59",
# "askPrice": "0.11127",
# "askQty": "215.48"
# },
# ]
#
elif marketType == 'swap':
raise NotSupported(self.id + ' fetchBidsAsks() is not available for ' + marketType + ' markets')
# when it's single symbol request, the returned structure is different(singular object) for both spot & swap, thus we need to wrap inside array
if isSingularMarket:
tickers = [tickers]
return self.parse_tickers(tickers, symbols)
def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = self.market(symbol)
if market['spot']:
return self.create_spot_order(market, type, side, amount, price, params)
elif market['swap']:
return self.create_swap_order(market, type, side, amount, price, params)
def create_spot_order(self, market, type, side, amount, price=None, params={}):
symbol = market['symbol']
orderSide = 'BUY' if (side == 'buy') else 'SELL'
request = {
'symbol': market['id'],
'side': orderSide,
'type': type.upper(),
}
if orderSide == 'BUY' and type == 'market':
quoteOrderQty = self.safe_number(params, 'quoteOrderQty')
if quoteOrderQty is not None:
amount = quoteOrderQty
elif self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)")
else:
amount = amount * price
request['quoteOrderQty'] = amount
else:
request['quantity'] = self.amount_to_precision(symbol, amount)
if price is not None:
request['price'] = self.price_to_precision(symbol, price)
clientOrderId = self.safe_string(params, 'clientOrderId')
if clientOrderId is not None:
request['newClientOrderId'] = clientOrderId
params = self.omit(params, ['type', 'clientOrderId'])
response = self.spotPrivatePostOrder(self.extend(request, params))
#
# spot
#
# {
# "symbol": "BTCUSDT",
# "orderId": "123738410679123456",
# "orderListId": -1
# }
#
return self.extend(self.parse_order(response, market), {
'side': side,
'type': type,
'price': price,
'amount': amount,
})
def create_swap_order(self, market, type, side, amount, price=None, params={}):
self.load_markets()
symbol = market['symbol']
unavailableContracts = self.safe_value(self.options, 'unavailableContracts', {})
isContractUnavaiable = self.safe_value(unavailableContracts, symbol, False)
if isContractUnavaiable:
raise NotSupported(self.id + ' createSwapOrder() does not support yet self symbol:' + symbol)
openType = None
marginType = self.safe_string_lower(params, 'margin')
if marginType is not None:
if marginType == 'cross':
openType = 2
elif marginType == 'isolated':
openType = 1
else:
raise ArgumentsRequired(self.id + ' createSwapOrder() margin parameter should be either "cross" or "isolated"')
else:
openType = self.safe_integer(params, 'openType', 2) # defaulting to cross margin
if (type != 'limit') and (type != 'market') and (type != 1) and (type != 2) and (type != 3) and (type != 4) and (type != 5) and (type != 6):
raise InvalidOrder(self.id + ' createSwapOrder() order type must either limit, market, or 1 for limit orders, 2 for post-only orders, 3 for IOC orders, 4 for FOK orders, 5 for market orders or 6 to convert market price to current price')
postOnly = self.safe_value(params, 'postOnly', False)
if postOnly:
type = 2
elif type == 'limit':
type = 1
elif type == 'market':
type = 6
# TODO: side not unified
if (side != 1) and (side != 2) and (side != 3) and (side != 4):
raise InvalidOrder(self.id + ' createSwapOrder() order side must be 1 open long, 2 close short, 3 open short or 4 close long')
request = {
'symbol': market['id'],
# 'price': float(self.price_to_precision(symbol, price)),
'vol': float(self.amount_to_precision(symbol, amount)),
# 'leverage': int, # required for isolated margin
'side': side, # 1 open long, 2 close short, 3 open short, 4 close long
#
# supported order types
#
# 1 limit
# 2 post only maker(PO)
# 3 transact or cancel instantly(IOC)
# 4 transact completely or cancel completely(FOK)
# 5 market orders
# 6 convert market price to current price
#
'type': type,
'openType': openType, # 1 isolated, 2 cross
# 'positionId': 1394650, # long, filling in self parameter when closing a position is recommended
# 'externalOid': clientOrderId,
# 'triggerPrice': 10.0, # Required for trigger order
# 'triggerType': 1, # Required for trigger order 1: more than or equal, 2: less than or equal
# 'executeCycle': 1, # Required for trigger order 1: 24 hours,2: 7 days
# 'trend': 1, # Required for trigger order 1: latest price, 2: fair price, 3: index price
# 'orderType': 1, # Required for trigger order 1: limit order,2:Post Only Maker,3: close or cancel instantly ,4: close or cancel completely,5: Market order
}
method = 'contractPrivatePostOrderSubmit'
stopPrice = self.safe_number_2(params, 'triggerPrice', 'stopPrice')
params = self.omit(params, ['stopPrice', 'triggerPrice'])
if stopPrice:
method = 'contractPrivatePostPlanorderPlace'
request['triggerPrice'] = self.price_to_precision(symbol, stopPrice)
request['triggerType'] = self.safe_integer(params, 'triggerType', 1)
request['executeCycle'] = self.safe_integer(params, 'executeCycle', 1)
request['trend'] = self.safe_integer(params, 'trend', 1)
request['orderType'] = self.safe_integer(params, 'orderType', 1)
if (type != 5) and (type != 6) and (type != 'market'):
request['price'] = float(self.price_to_precision(symbol, price))
if openType == 1:
leverage = self.safe_integer(params, 'leverage')
if leverage is None:
raise ArgumentsRequired(self.id + ' createSwapOrder() requires a leverage parameter for isolated margin orders')
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'externalOid')
if clientOrderId is not None:
request['externalOid'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'externalOid', 'postOnly'])
response = getattr(self, method)(self.extend(request, params))
#
# Swap
# {"code":200,"data":"2ff3163e8617443cb9c6fc19d42b1ca4"}
#
# Trigger
# {"success":true,"code":0,"data":259208506303929856}
#
data = self.safe_string(response, 'data')
return self.parse_order(data, market)
def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
data = None
if market['spot']:
clientOrderId = self.safe_string(params, 'clientOrderId')
if clientOrderId is not None:
params = self.omit(params, 'clientOrderId')
request['origClientOrderId'] = clientOrderId
else:
request['orderId'] = id
data = self.spotPrivateGetOrder(self.extend(request, params))
#
# {
# "symbol": "BTCUSDT",
# "orderId": "133734823834147272",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "30000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "CANCELED",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647667102000",
# "updateTime": "1647708567000",
# "isWorking": True,
# "origQuoteOrderQty": "6"
# }
#
elif market['swap']:
request['order_id'] = id
response = self.contractPrivateGetOrderGetOrderId(self.extend(request, params))
#
# {
# "success": True,
# "code": "0",
# "data": {
# "orderId": "264995729269765120",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.2",
# "vol": "15",
# "leverage": "20",
# "side": "1",
# "category": "1",
# "orderType": "1",
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "2.2528",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2",
# "externalOid": "_m_0e9520c256744d64b942985189026d20",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648850305236",
# "updateTime": "1648850305245",
# "positionMode": "1"
# }
# }
#
data = self.safe_value(response, 'data')
return self.parse_order(data, market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of [order structures]{@link https://docs.ccxt.com/en/latest/manual.html#order-structure
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType, query = self.handle_market_type_and_params('fetchOrders', market, params)
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument for spot market')
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = self.spotPrivateGetAllOrders(self.extend(request, query))
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "133949373632483328",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "45000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "SELL",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647718255000",
# "updateTime": "1647718255000",
# "isWorking": True,
# "origQuoteOrderQty": "9"
# },
# ]
#
return self.parse_orders(response, market, since, limit)
else:
if since is not None:
request['start_time'] = since
end = self.safe_integer(params, 'end_time')
if end is None:
request['end_time'] = self.sum(since, self.options['maxTimeTillEnd'])
if limit is not None:
request['page_size'] = limit
method = self.safe_string(self.options, 'cancelOrder', 'contractPrivateGetOrderListHistoryOrders') # contractPrivatePostOrderCancel, contractPrivatePostPlanorderCancel
method = self.safe_string(query, 'method', method)
ordersOfRegular = []
ordersOfTrigger = []
if method == 'contractPrivateGetOrderListHistoryOrders':
response = self.contractPrivateGetOrderListHistoryOrders(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "265230764677709315",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.1",
# "vol": "102",
# "leverage": "20",
# "side": "1",
# "category": "1",
# "orderType": "1",
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "10.96704",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2",
# "externalOid": "_m_7e42f8df6b324c869e4e200397e2b00f",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648906342000",
# "updateTime": "1648906342000",
# "positionMode": "1"
# },
# ]
# }
#
ordersOfRegular = self.safe_value(response, 'data')
else:
# the Planorder endpoints work not only for stop-market orders, but also for stop-limit orders that were supposed to have a separate endpoint
response = self.contractPrivateGetPlanorderListOrders(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "symbol": "STEPN_USDT",
# "leverage": "20",
# "side": "1",
# "vol": "13",
# "openType": "1",
# "state": "1",
# "orderType": "1",
# "errorCode": "0",
# "createTime": "1648984276000",
# "updateTime": "1648984276000",
# "id": "265557643326564352",
# "triggerType": "1",
# "triggerPrice": "3",
# "price": "2.9", # not present in stop-market, but in stop-limit order
# "executeCycle": "87600",
# "trend": "1",
# },
# ]
# }
#
ordersOfTrigger = self.safe_value(response, 'data')
merged = self.array_concat(ordersOfTrigger, ordersOfRegular)
return self.parse_orders(merged, market, since, limit, params)
def fetch_orders_by_ids(self, ids, symbol=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType, query = self.handle_market_type_and_params('fetchOrdersByIds', market, params)
if marketType == 'spot':
raise BadRequest(self.id + ' fetchOrdersByIds() is not supported for ' + marketType)
else:
request['order_ids'] = ','.join(ids)
response = self.contractPrivateGetOrderBatchQuery(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "265230764677709315",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.1",
# "vol": "102",
# "leverage": "20",
# "side": "1",
# "category": "1",
# "orderType": "1",
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "10.96704",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2",
# "externalOid": "_m_7e42f8df6b324c869e4e200397e2b00f",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648906342000",
# "updateTime": "1648906342000",
# "positionMode": "1"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_orders(data, market)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType, query = self.handle_market_type_and_params('fetchOpenOrders', market, params)
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument for spot market')
response = self.spotPrivateGetOpenOrders(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "133949373632483328",
# "orderListId": "-1",
# "clientOrderId": "",
# "price": "45000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "NEW",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "SELL",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647718255199",
# "updateTime": null,
# "isWorking": True,
# "origQuoteOrderQty": "9"
# }
# ]
#
return self.parse_orders(response, market, since, limit)
else:
# TO_DO: another possible way is through: open_orders/{symbol}, but as they have same ratelimits, and less granularity, i think historical orders are more convenient, as it supports more params(however, theoretically, open-orders endpoint might be sligthly fast)
return self.fetch_orders_by_state(2, symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple closed orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of [order structures]{@link https://docs.ccxt.com/en/latest/manual.html#order-structure
"""
return self.fetch_orders_by_state(3, symbol, since, limit, params)
def fetch_canceled_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple canceled orders made by the user
:param str|None symbol: unified market symbol of the market orders were made in
:param int|None since: timestamp in ms of the earliest order, default is None
:param int|None limit: max number of orders to return, default is None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
return self.fetch_orders_by_state(4, symbol, since, limit, params)
def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType = self.handle_market_type_and_params('fetchOrdersByState', market, params)
if marketType == 'spot':
raise BadRequest(self.id + ' fetchOrdersByState() is not supported for ' + marketType)
else:
params['states'] = state
return self.fetch_orders(symbol, since, limit, params)
def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
marketType, query = self.handle_market_type_and_params('cancelOrder', market, params)
data = None
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
request = {
'symbol': market['id'],
}
clientOrderId = self.safe_string(params, 'clientOrderId')
if clientOrderId is not None:
params = self.omit(query, 'clientOrderId')
request['origClientOrderId'] = clientOrderId
else:
request['orderId'] = id
data = self.spotPrivateDeleteOrder(self.extend(request, params))
#
# {
# "symbol": "BTCUSDT",
# "orderId": "133734823834447872",
# "price": "30000",
# "origQty": "0.0002",
# "type": "LIMIT",
# "side": "BUY"
# }
#
else:
# TODO: PlanorderCancel endpoint has bug atm. waiting for fix.
method = self.safe_string(self.options, 'cancelOrder', 'contractPrivatePostOrderCancel') # contractPrivatePostOrderCancel, contractPrivatePostPlanorderCancel
method = self.safe_string(query, 'method', method)
response = getattr(self, method)([id]) # the request cannot be changed or extended. This is the only way to send.
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "264995729269765120",
# "errorCode": "0", # if already canceled: "2041"; if doesn't exist: "2040"
# "errorMsg": "success", # if already canceled: "order state cannot be cancelled"; if doesn't exist: "order not exist"
# }
# ]
# }
#
data = self.safe_value(response, 'data')
order = self.safe_value(data, 0)
errorMsg = self.safe_value(order, 'errorMsg', '')
if errorMsg != 'success':
raise InvalidOrder(self.id + ' cancelOrder() the order with id ' + id + ' cannot be cancelled: ' + errorMsg)
return self.parse_order(data, market)
def cancel_orders(self, ids, symbol=None, params={}):
"""
cancel multiple orders
:param [str] ids: order ids
:param str|None symbol: unified market symbol, default is None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = symbol is not self.market(symbol) if None else None
marketType = self.handle_market_type_and_params('cancelOrders', market, params)
if marketType == 'spot':
raise BadRequest(self.id + ' cancelOrders() is not supported for ' + marketType)
else:
response = self.contractPrivatePostOrderCancel(ids) # the request cannot be changed or extended. The only way to send.
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "orderId": "264995729269765120",
# "errorCode": "0", # if already canceled: "2041"
# "errorMsg": "success", # if already canceled: "order state cannot be cancelled"
# },
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_orders(data, market)
def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
self.load_markets()
market = symbol is not self.market(symbol) if None else None
request = {}
marketType, query = self.handle_market_type_and_params('cancelAllOrders', market, params)
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument on spot')
request['symbol'] = market['id']
response = self.spotPrivateDeleteOpenOrders(self.extend(request, query))
#
# [
# {
# "symbol": "BTCUSDT",
# "orderId": "133926492139692032",
# "price": "30000",
# "origQty": "0.0002",
# "type": "LIMIT",
# "side": "BUY"
# },
# ]
#
return self.parse_orders(response, market)
else:
if symbol is not None:
request['symbol'] = market['id']
# method can be either: contractPrivatePostOrderCancelAll or contractPrivatePostPlanorderCancelAll
# the Planorder endpoints work not only for stop-market orders but also for stop-limit orders that are supposed to have separate endpoint
method = self.safe_string(self.options, 'cancelAllOrders', 'contractPrivatePostOrderCancelAll')
method = self.safe_string(query, 'method', method)
response = getattr(self, method)(self.extend(request, query))
#
# {
# "success": True,
# "code": "0"
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market)
def parse_order(self, order, market=None):
#
# spot: createOrder
#
# {"symbol": "BTCUSDT", "orderId": "123738410679123456", "orderListId": -1}
#
# spot: cancelOrder, cancelAllOrders
#
# {
# "symbol": "BTCUSDT",
# "orderId": "133926441921286144",
# "price": "30000",
# "origQty": "0.0002",
# "type": "LIMIT",
# "side": "BUY"
# }
#
# spot: fetchOrder, fetchOpenOrders, fetchOrders
# {
# "symbol": "BTCUSDT",
# "orderId": "133734823834147272",
# "orderListId": "-1",
# "clientOrderId": null,
# "price": "30000",
# "origQty": "0.0002",
# "executedQty": "0",
# "cummulativeQuoteQty": "0",
# "status": "CANCELED",
# "timeInForce": null,
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": null,
# "icebergQty": null,
# "time": "1647667102000",
# "updateTime": "1647708567000",
# "isWorking": True,
# "origQuoteOrderQty": "6"
# }
#
# swap: createOrder
#
# 2ff3163e8617443cb9c6fc19d42b1ca4
#
# swap: fetchOrder, fetchOrders
#
# regular
# {
# "orderId": "264995729269765120",
# "symbol": "STEPN_USDT",
# "positionId": "0",
# "price": "2.2",
# "vol": "15",
# "leverage": "20",
# "side": "1", # TODO: not unified
# "category": "1",
# "orderType": "1", # TODO: not unified
# "dealAvgPrice": "0",
# "dealVol": "0",
# "orderMargin": "2.2528",
# "takerFee": "0",
# "makerFee": "0",
# "profit": "0",
# "feeCurrency": "USDT",
# "openType": "1",
# "state": "2", # TODO
# "externalOid": "_m_0e9520c256744d64b942985189026d20",
# "errorCode": "0",
# "usedMargin": "0",
# "createTime": "1648850305236",
# "updateTime": "1648850305245",
# "positionMode": "1"
# }
#
# stop
# {
# "id": "265557643326564352",
# "triggerType": "1",
# "triggerPrice": "3",
# "price": "2.9", # not present in stop-market, but in stop-limit order
# "executeCycle": "87600",
# "trend": "1",
# # below keys are same as in regular order structure
# "symbol": "STEPN_USDT",
# "leverage": "20",
# "side": "1",
# "vol": "13",
# "openType": "1",
# "state": "1",
# "orderType": "1",
# "errorCode": "0",
# "createTime": "1648984276000",
# "updateTime": "1648984276000",
# }
#
id = None
if isinstance(order, str):
id = order
else:
id = self.safe_string_2(order, 'orderId', 'id')
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
timestamp = self.safe_integer_2(order, 'time', 'createTime')
fee = None
feeCurrency = self.safe_string(order, 'feeCurrency')
if feeCurrency is not None:
takerFee = self.safe_string(order, 'takerFee')
makerFee = self.safe_string(order, 'makerFee')
feeSum = Precise.string_add(takerFee, makerFee)
fee = {
'currency': feeCurrency,
'cost': self.parse_number(feeSum),
}
return self.safe_order({
'id': id,
'clientOrderId': self.safe_string(order, 'clientOrderId'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None, # TODO: self might be 'updateTime' if order-status is filled, otherwise cancellation time. needs to be checked
'status': self.parse_order_status(self.safe_string_2(order, 'status', 'state')),
'symbol': market['symbol'],
'type': self.parse_order_type(self.safe_string(order, 'type')),
'timeInForce': self.parse_order_time_in_force(self.safe_string(order, 'timeInForce')),
'side': self.parse_order_side(self.safe_string(order, 'side')),
'price': self.safe_number(order, 'price'),
'stopPrice': self.safe_number_2(order, 'stopPrice', 'triggerPrice'),
'average': self.safe_number(order, 'dealAvgPrice'),
'amount': self.safe_number_2(order, 'origQty', 'vol'),
'cost': self.safe_number(order, 'cummulativeQuoteQty'), # 'cummulativeQuoteQty' vs 'origQuoteOrderQty'
'filled': self.safe_number_2(order, 'executedQty', 'dealVol'),
'remaining': None,
'fee': fee,
'trades': None,
'info': order,
}, market)
def parse_order_side(self, status):
statuses = {
'BUY': 'buy',
'SELL': 'sell',
# contracts v1 : TODO
}
return self.safe_string(statuses, status, status)
def parse_order_type(self, status):
statuses = {
'MARKET': 'market',
'LIMIT': 'limit',
'LIMIT_MAKER': 'limit',
}
return self.safe_string(statuses, status, status)
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
'PARTIALLY_FILLED': 'open',
'PARTIALLY_CANCELED': 'canceled',
# contracts v1
# '1': 'uninformed', # TODO: wt?
'2': 'open',
'3': 'closed',
'4': 'canceled',
# '5': 'invalid', # TODO: wt?
}
return self.safe_string(statuses, status, status)
def parse_order_time_in_force(self, status):
statuses = {
'GTC': 'GTC',
'FOK': 'FOK',
'IOC': 'IOC',
}
return self.safe_string(statuses, status, status)
def fetch_account_helper(self, type, params):
if type == 'spot':
return self.spotPrivateGetAccount(params)
#
# {
# "makerCommission": "20",
# "takerCommission": "20",
# "buyerCommission": "0",
# "sellerCommission": "0",
# "canTrade": True,
# "canWithdraw": True,
# "canDeposit": True,
# "updateTime": null,
# "accountType": "SPOT",
# "balances": [
# {
# "asset": "BTC",
# "free": "0.002",
# "locked": "0"
# },
# {
# "asset": "USDT",
# "free": "88.120131350620957006",
# "locked": "0"
# },
# ],
# "permissions": [
# "SPOT"
# ]
# }
#
elif type == 'swap':
response = self.contractPrivateGetAccountAssets(params)
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "currency":"BSV",
# "positionMargin":0,
# "availableBalance":0,
# "cashBalance":0,
# "frozenBalance":0,
# "equity":0,
# "unrealized":0,
# "bonus":0
# },
# ]
# }
#
return self.safe_value(response, 'data')
def fetch_accounts(self, params={}):
"""
fetch all the accounts associated with a profile
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `account structures <https://docs.ccxt.com/en/latest/manual.html#account-structure>` indexed by the account type
"""
# TODO: is the below endpoints suitable for fetchAccounts?
marketType, query = self.handle_market_type_and_params('fetchAccounts', None, params)
self.load_markets()
response = self.fetch_account_helper(marketType, query)
data = self.safe_value(response, 'balances', [])
result = []
for i in range(0, len(data)):
account = data[i]
currencyId = self.safe_string_2(account, 'asset', 'currency')
code = self.safe_currency_code(currencyId)
result.append({
'id': self.safe_string(account, 'id'),
'type': self.safe_string(account, 'type'),
'code': code,
'info': account,
})
return result
def fetch_trading_fees(self, params={}):
"""
fetch the trading fees for multiple markets
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `fee structures <https://docs.ccxt.com/en/latest/manual.html#fee-structure>` indexed by market symbols
"""
self.load_markets()
response = self.fetch_account_helper('spot', params)
makerFee = self.safe_string(response, 'makerCommission')
takerFee = self.safe_string(response, 'takerCommission')
makerFee = Precise.string_div(makerFee, '1000')
takerFee = Precise.string_div(takerFee, '1000')
result = {}
for i in range(0, len(self.symbols)):
symbol = self.symbols[i]
result[symbol] = {
'symbol': symbol,
'maker': self.parse_number(makerFee),
'taker': self.parse_number(takerFee),
'percentage': True,
'tierBased': False,
'info': response,
}
return result
def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
self.load_markets()
marketType, query = self.handle_market_type_and_params('fetchBalance', None, params)
result = {}
response = None
if marketType == 'spot':
response = self.fetch_account_helper('spot', query)
balances = self.safe_value(response, 'balances', [])
for i in range(0, len(balances)):
entry = balances[i]
currencyId = self.safe_string(entry, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(entry, 'free')
account['used'] = self.safe_string(entry, 'locked')
result[code] = account
elif marketType == 'swap':
response = self.contractPrivateGetAccountAssets(query)
#
# {
# "success":true,
# "code":0,
# "data":[
# {"currency":"BSV","positionMargin":0,"availableBalance":0,"cashBalance":0,"frozenBalance":0,"equity":0,"unrealized":0,"bonus":0},
# {"currency":"BCH","positionMargin":0,"availableBalance":0,"cashBalance":0,"frozenBalance":0,"equity":0,"unrealized":0,"bonus":0},
# {"currency":"CRV","positionMargin":0,"availableBalance":0,"cashBalance":0,"frozenBalance":0,"equity":0,"unrealized":0,"bonus":0},
# ]
# }
#
data = self.safe_value(response, 'data', [])
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_string(balance, 'availableBalance')
account['used'] = self.safe_string(balance, 'frozenBalance')
result[code] = account
result['info'] = response
return self.safe_balance(result)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchMyTrades', market, params)
request = {
'symbol': market['id'],
}
trades = None
if marketType == 'spot':
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit
trades = self.spotPrivateGetMyTrades(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "id": "133948532984922113",
# "orderId": "133948532531949568",
# "orderListId": "-1",
# "price": "41995.51",
# "qty": "0.0002",
# "quoteQty": "8.399102",
# "commission": "0.016798204",
# "commissionAsset": "USDT",
# "time": "1647718055000",
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
# ]
#
else:
if since is not None:
request['start_time'] = since
end = self.safe_integer(params, 'end_time')
if end is None:
request['end_time'] = self.sum(since, self.options['maxTimeTillEnd'])
if limit is not None:
request['page_size'] = limit
response = self.contractPrivateGetOrderListOrderDeals(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "id": "299444585",
# "symbol": "STEPN_USDT",
# "side": "1",
# "vol": "1",
# "price": "2.45455",
# "feeCurrency": "USDT",
# "fee": "0.00147273",
# "timestamp": "1648924557000",
# "profit": "0",
# "category": "1",
# "orderId": "265307163526610432",
# "positionMode": "1",
# "taker": True
# }
# ]
# }
#
trades = self.safe_value(response, 'data')
return self.parse_trades(trades, market, since, limit)
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
"""
fetch all the trades made from a single order
:param str id: order id
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
marketType, query = self.handle_market_type_and_params('fetchOrderTrades', market, params)
trades = None
if marketType == 'spot':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrderTrades() requires a symbol argument')
request['symbol'] = market['id']
request['orderId'] = id
trades = self.spotPrivateGetMyTrades(self.extend(request, query))
#
# spot
#
# [
# {
# "symbol": "BTCUSDT",
# "id": "133948532984922113",
# "orderId": "133948532531949568",
# "orderListId": "-1",
# "price": "41995.51",
# "qty": "0.0002",
# "quoteQty": "8.399102",
# "commission": "0.016798204",
# "commissionAsset": "USDT",
# "time": "1647718055000",
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
# ]
#
else:
request['order_id'] = id
response = self.contractPrivateGetOrderDealDetailsOrderId(self.extend(request, query))
#
# {
# "success": True,
# "code": "0",
# "data": [
# {
# "id": "299444585",
# "symbol": "STEPN_USDT",
# "side": "1",
# "vol": "1",
# "price": "2.45455",
# "feeCurrency": "USDT",
# "fee": "0.00147273",
# "timestamp": "1648924557000",
# "profit": "0",
# "category": "1",
# "orderId": "265307163526610432",
# "positionMode": "1",
# "taker": True
# }
# ]
# }
#
trades = self.safe_value(response, 'data')
return self.parse_trades(trades, market, since, limit, query)
def modify_margin_helper(self, symbol, amount, addOrReduce, params={}):
positionId = self.safe_integer(params, 'positionId')
if positionId is None:
raise ArgumentsRequired(self.id + ' modifyMarginHelper() requires a positionId parameter')
self.load_markets()
request = {
'positionId': positionId,
'amount': amount,
'type': addOrReduce,
}
response = self.contractPrivatePostPositionChangeMargin(self.extend(request, params))
#
# {
# "success": True,
# "code": 0
# }
return response
def reduce_margin(self, symbol, amount, params={}):
"""
remove margin from a position
:param str symbol: unified market symbol
:param float amount: the amount of margin to remove
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#reduce-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'SUB', params)
def add_margin(self, symbol, amount, params={}):
"""
add margin
:param str symbol: unified market symbol
:param float amount: amount of margin to add
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `margin structure <https://docs.ccxt.com/en/latest/manual.html#add-margin-structure>`
"""
return self.modify_margin_helper(symbol, amount, 'ADD', params)
def set_leverage(self, leverage, symbol=None, params={}):
"""
set the level of leverage for a market
:param float leverage: the rate of leverage
:param str|None symbol: unified market symbol
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: response from the exchange
"""
self.load_markets()
request = {
'leverage': leverage,
}
positionId = self.safe_integer(params, 'positionId')
if positionId is None:
openType = self.safe_number(params, 'openType') # 1 or 2
positionType = self.safe_number(params, 'positionType') # 1 or 2
market = self.market(symbol) if (symbol is not None) else None
if (openType is None) or (positionType is None) or (market is None):
raise ArgumentsRequired(self.id + ' setLeverage() requires a positionId parameter or a symbol argument with openType and positionType parameters, use openType 1 or 2 for isolated or cross margin respectively, use positionType 1 or 2 for long or short positions')
else:
request['openType'] = openType
request['symbol'] = market['symbol']
request['positionType'] = positionType
else:
request['positionId'] = positionId
return self.contractPrivatePostPositionChangeLeverage(self.extend(request, params))
def fetch_funding_history(self, symbol=None, since=None, limit=None, params={}):
"""
fetch the history of funding payments paid and received on self account
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch funding history for
:param int|None limit: the maximum number of funding history structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `funding history structure <https://docs.ccxt.com/en/latest/manual.html#funding-history-structure>`
"""
self.load_markets()
market = None
request = {
# 'symbol': market['id'],
# 'position_id': positionId,
# 'page_num': 1,
# 'page_size': limit, # default 20, max 100
}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['page_size'] = limit
response = self.contractPrivateGetPositionFundingRecords(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": {
# "pageSize": 20,
# "totalCount": 2,
# "totalPage": 1,
# "currentPage": 1,
# "resultList": [
# {
# "id": 7423910,
# "symbol": "BTC_USDT",
# "positionType": 1,
# "positionValue": 29.30024,
# "funding": 0.00076180624,
# "rate": -0.000026,
# "settleTime": 1643299200000
# },
# {
# "id": 7416473,
# "symbol": "BTC_USDT",
# "positionType": 1,
# "positionValue": 28.9188,
# "funding": 0.0014748588,
# "rate": -0.000051,
# "settleTime": 1643270400000
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
resultList = self.safe_value(data, 'resultList', [])
result = []
for i in range(0, len(resultList)):
entry = resultList[i]
timestamp = self.safe_string(entry, 'settleTime')
result.append({
'info': entry,
'symbol': symbol,
'code': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'id': self.safe_number(entry, 'id'),
'amount': self.safe_number(entry, 'funding'),
})
return result
def parse_funding_rate(self, contract, market=None):
#
# {
# "symbol": "BTC_USDT",
# "fundingRate": 0.000014,
# "maxFundingRate": 0.003,
# "minFundingRate": -0.003,
# "collectCycle": 8,
# "nextSettleTime": 1643241600000,
# "timestamp": 1643240373359
# }
#
nextFundingRate = self.safe_number(contract, 'fundingRate')
nextFundingTimestamp = self.safe_integer(contract, 'nextSettleTime')
marketId = self.safe_string(contract, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(contract, 'timestamp')
datetime = self.iso8601(timestamp)
return {
'info': contract,
'symbol': symbol,
'markPrice': None,
'indexPrice': None,
'interestRate': None,
'estimatedSettlePrice': None,
'timestamp': timestamp,
'datetime': datetime,
'fundingRate': None,
'fundingTimestamp': None,
'fundingDatetime': None,
'nextFundingRate': nextFundingRate,
'nextFundingTimestamp': nextFundingTimestamp,
'nextFundingDatetime': self.iso8601(nextFundingTimestamp),
'previousFundingRate': None,
'previousFundingTimestamp': None,
'previousFundingDatetime': None,
}
def fetch_funding_rate(self, symbol, params={}):
"""
fetch the current funding rate
:param str symbol: unified market symbol
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `funding rate structure <https://docs.ccxt.com/en/latest/manual.html#funding-rate-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.contractPublicGetFundingRateSymbol(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": {
# "symbol": "BTC_USDT",
# "fundingRate": 0.000014,
# "maxFundingRate": 0.003,
# "minFundingRate": -0.003,
# "collectCycle": 8,
# "nextSettleTime": 1643241600000,
# "timestamp": 1643240373359
# }
# }
#
result = self.safe_value(response, 'data', {})
return self.parse_funding_rate(result, market)
def fetch_funding_rate_history(self, symbol=None, since=None, limit=None, params={}):
"""
fetches historical funding rate prices
:param str|None symbol: unified symbol of the market to fetch the funding rate history for
:param int|None since: not used by mexc, but filtered internally by ccxt
:param int|None limit: mexc limit is page_size default 20, maximum is 100
:param dict params: extra parameters specific to the mexc api endpoint
:returns [dict]: a list of `funding rate structures <https://docs.ccxt.com/en/latest/manual.html?#funding-rate-history-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchFundingRateHistory() requires a symbol argument')
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'page_size': limit, # optional
# 'page_num': 1, # optional, current page number, default is 1
}
if limit is not None:
request['page_size'] = limit
response = self.contractPublicGetFundingRateHistory(self.extend(request, params))
#
# {
# "success": True,
# "code": 0,
# "data": {
# "pageSize": 2,
# "totalCount": 21,
# "totalPage": 11,
# "currentPage": 1,
# "resultList": [
# {
# "symbol": "BTC_USDT",
# "fundingRate": 0.000266,
# "settleTime": 1609804800000
# },
# {
# "symbol": "BTC_USDT",
# "fundingRate": 0.00029,
# "settleTime": 1609776000000
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data')
result = self.safe_value(data, 'resultList', [])
rates = []
for i in range(0, len(result)):
entry = result[i]
marketId = self.safe_string(entry, 'symbol')
symbol = self.safe_symbol(marketId)
timestamp = self.safe_string(entry, 'settleTime')
rates.append({
'info': entry,
'symbol': symbol,
'fundingRate': self.safe_number(entry, 'fundingRate'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
})
sorted = self.sort_by(rates, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, market['symbol'], since, limit)
def fetch_leverage_tiers(self, symbols=None, params={}):
"""
retrieve information on the maximum leverage, and maintenance margin for trades of varying trade sizes
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `leverage tiers structures <https://docs.ccxt.com/en/latest/manual.html#leverage-tiers-structure>`, indexed by market symbols
"""
self.load_markets()
response = self.contractPublicGetDetail(params)
#
# {
# "success":true,
# "code":0,
# "data":[
# {
# "symbol": "BTC_USDT",
# "displayName": "BTC_USDT永续",
# "displayNameEn": "BTC_USDT SWAP",
# "positionOpenType": 3,
# "baseCoin": "BTC",
# "quoteCoin": "USDT",
# "settleCoin": "USDT",
# "contractSize": 0.0001,
# "minLeverage": 1,
# "maxLeverage": 125,
# "priceScale": 2,
# "volScale": 0,
# "amountScale": 4,
# "priceUnit": 0.5,
# "volUnit": 1,
# "minVol": 1,
# "maxVol": 1000000,
# "bidLimitPriceRate": 0.1,
# "askLimitPriceRate": 0.1,
# "takerFeeRate": 0.0006,
# "makerFeeRate": 0.0002,
# "maintenanceMarginRate": 0.004,
# "initialMarginRate": 0.008,
# "riskBaseVol": 10000,
# "riskIncrVol": 200000,
# "riskIncrMmr": 0.004,
# "riskIncrImr": 0.004,
# "riskLevelLimit": 5,
# "priceCoefficientVariation": 0.1,
# "indexOrigin": ["BINANCE","GATEIO","HUOBI","MXC"],
# "state": 0, # 0 enabled, 1 delivery, 2 completed, 3 offline, 4 pause
# "isNew": False,
# "isHot": True,
# "isHidden": False
# },
# ...
# ]
# }
#
data = self.safe_value(response, 'data')
return self.parse_leverage_tiers(data, symbols, 'symbol')
def parse_market_leverage_tiers(self, info, market):
"""
@param info: Exchange response for 1 market
{
"symbol": "BTC_USDT",
"displayName": "BTC_USDT永续",
"displayNameEn": "BTC_USDT SWAP",
"positionOpenType": 3,
"baseCoin": "BTC",
"quoteCoin": "USDT",
"settleCoin": "USDT",
"contractSize": 0.0001,
"minLeverage": 1,
"maxLeverage": 125,
"priceScale": 2,
"volScale": 0,
"amountScale": 4,
"priceUnit": 0.5,
"volUnit": 1,
"minVol": 1,
"maxVol": 1000000,
"bidLimitPriceRate": 0.1,
"askLimitPriceRate": 0.1,
"takerFeeRate": 0.0006,
"makerFeeRate": 0.0002,
"maintenanceMarginRate": 0.004,
"initialMarginRate": 0.008,
"riskBaseVol": 10000,
"riskIncrVol": 200000,
"riskIncrMmr": 0.004,
"riskIncrImr": 0.004,
"riskLevelLimit": 5,
"priceCoefficientVariation": 0.1,
"indexOrigin": ["BINANCE","GATEIO","HUOBI","MXC"],
"state": 0, # 0 enabled, 1 delivery, 2 completed, 3 offline, 4 pause
"isNew": False,
"isHot": True,
"isHidden": False
@param market: CCXT market
"""
maintenanceMarginRate = self.safe_string(info, 'maintenanceMarginRate')
initialMarginRate = self.safe_string(info, 'initialMarginRate')
maxVol = self.safe_string(info, 'maxVol')
riskIncrVol = self.safe_string(info, 'riskIncrVol')
riskIncrMmr = self.safe_string(info, 'riskIncrMmr')
riskIncrImr = self.safe_string(info, 'riskIncrImr')
floor = '0'
tiers = []
quoteId = self.safe_string(info, 'quoteCoin')
while(Precise.string_lt(floor, maxVol)):
cap = Precise.string_add(floor, riskIncrVol)
tiers.append({
'tier': self.parse_number(Precise.string_div(cap, riskIncrVol)),
'currency': self.safe_currency_code(quoteId),
'notionalFloor': self.parse_number(floor),
'notionalCap': self.parse_number(cap),
'maintenanceMarginRate': self.parse_number(maintenanceMarginRate),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMarginRate)),
'info': info,
})
initialMarginRate = Precise.string_add(initialMarginRate, riskIncrImr)
maintenanceMarginRate = Precise.string_add(maintenanceMarginRate, riskIncrMmr)
floor = cap
return tiers
def parse_deposit_address(self, depositAddress, currency=None):
#
# {"chain":"ERC-20","address":"0x55cbd73db24eafcca97369e3f2db74b2490586e6"},
# {"chain":"MATIC","address":"0x05aa3236f1970eae0f8feb17ec19435b39574d74"},
# {"chain":"TRC20","address":"TGaPfhW41EXD3sAfs1grLF6DKfugfqANNw"},
# {"chain":"SOL","address":"5FSpUKuh2gjw4mF89T2e7sEjzUA1SkRKjBChFqP43KhV"},
# {"chain":"ALGO","address":"B3XTZND2JJTSYR7R2TQVCUDT4QSSYVAIZYDPWVBX34DGAYATBU3AUV43VU"}
#
#
address = self.safe_string(depositAddress, 'address')
code = self.safe_currency_code(None, currency)
networkId = self.safe_string(depositAddress, 'chain')
network = self.safe_network(networkId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': None,
'network': network,
'info': depositAddress,
}
def fetch_deposit_addresses_by_network(self, code, params={}):
"""
fetch a dictionary of addresses for a currency, indexed by network
:param str code: unified currency code of the currency for the deposit address
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a dictionary of `address structures <https://docs.ccxt.com/en/latest/manual.html#address-structure>` indexed by the network
"""
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.spot2PrivateGetAssetDepositAddressList(self.extend(request, params))
#
# {
# "code":200,
# "data":{
# "currency":"USDC",
# "chains":[
# {"chain":"ERC-20","address":"0x55cbd73db24eafcca97369e3f2db74b2490586e6"},
# {"chain":"MATIC","address":"0x05aa3236f1970eae0f8feb17ec19435b39574d74"},
# {"chain":"TRC20","address":"TGaPfhW41EXD3sAfs1grLF6DKfugfqANNw"},
# {"chain":"SOL","address":"5FSpUKuh2gjw4mF89T2e7sEjzUA1SkRKjBChFqP43KhV"},
# {"chain":"ALGO","address":"B3XTZND2JJTSYR7R2TQVCUDT4QSSYVAIZYDPWVBX34DGAYATBU3AUV43VU"}
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
chains = self.safe_value(data, 'chains', [])
depositAddresses = []
for i in range(0, len(chains)):
depositAddress = self.parse_deposit_address(chains[i], currency)
depositAddresses.append(depositAddress)
return self.index_by(depositAddresses, 'network')
def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
rawNetwork = self.safe_string_upper(params, 'network')
params = self.omit(params, 'network')
response = self.fetch_deposit_addresses_by_network(code, params)
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string(networks, rawNetwork, rawNetwork)
result = None
if network is None:
result = self.safe_value(response, code)
if result is None:
alias = self.safe_string(networks, code, code)
result = self.safe_value(response, alias)
if result is None:
defaultNetwork = self.safe_string(self.options, 'defaultNetwork', 'ERC20')
result = self.safe_value(response, defaultNetwork)
if result is None:
values = list(response.values())
result = self.safe_value(values, 0)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find deposit address for ' + code)
return result
# TODO: add support for all aliases here
result = self.safe_value(response, rawNetwork)
if result is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot find ' + network + ' deposit address for ' + code)
return result
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
self.load_markets()
request = {
# 'currency': currency['id'] + network example: USDT-TRX,
# 'state': 'state',
# 'start_time': since, # default 1 day
# 'end_time': self.milliseconds(),
# 'page_num': 1,
# 'page_size': limit, # default 20, maximum 50
}
currency = None
if code is not None:
rawNetwork = self.safe_string(params, 'network')
params = self.omit(params, 'network')
if rawNetwork is None:
raise ArgumentsRequired(self.id + ' fetchDeposits() requires a network parameter when the currency is specified')
# currently mexc does not have network names unified so for certain things we might need TRX or TRC-20
# due to that I'm applying the network parameter directly so the user can control it on its side
currency = self.currency(code)
request['currency'] = currency['id'] + '-' + rawNetwork
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit
response = self.spot2PrivateGetAssetDepositList(self.extend(request, params))
#
# {
# "code":200,
# "data":{
# "page_size":20,
# "total_page":1,
# "total_size":1,
# "page_num":1,
# "result_list":[
# {
# "currency":"USDC",
# "amount":150.0,
# "fee":0.0,
# "confirmations":19,
# "address":"0x55cbd73db24eafcca97369e3f2db74b2490586e6",
# "state":"SUCCESS",
# "tx_id":"0xc65a9b09e1b71def81bf8bb3ec724c0c1b2b4c82200c8c142e4ea4c1469fd789:0",
# "require_confirmations":12,
# "create_time":"2021-10-11T18:58:25.000+00:00",
# "update_time":"2021-10-11T19:01:06.000+00:00"
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
resultList = self.safe_value(data, 'result_list', [])
return self.parse_transactions(resultList, code, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
self.load_markets()
request = {
# 'withdrawal_id': '4b450616042a48c99dd45cacb4b092a7', # string
# 'currency': currency['id'],
# 'state': 'state',
# 'start_time': since, # default 1 day
# 'end_time': self.milliseconds(),
# 'page_num': 1,
# 'page_size': limit, # default 20, maximum 50
}
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['start_time'] = since
if limit is not None:
request['limit'] = limit
response = self.spot2PrivateGetAssetWithdrawList(self.extend(request, params))
#
# {
# "code":200,
# "data":{
# "page_size":20,
# "total_page":1,
# "total_size":1,
# "page_num":1,
# "result_list":[
# {
# "id":"4b450616042a48c99dd45cacb4b092a7",
# "currency":"USDT-TRX",
# "address":"TRHKnx74Gb8UVcpDCMwzZVe4NqXfkdtPak",
# "amount":30.0,
# "fee":1.0,
# "remark":"self is my first withdrawal remark",
# "state":"WAIT",
# "create_time":"2021-10-11T20:45:08.000+00:00"
# }
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
resultList = self.safe_value(data, 'result_list', [])
return self.parse_transactions(resultList, code, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "currency":"USDC",
# "amount":150.0,
# "fee":0.0,
# "confirmations":19,
# "address":"0x55cbd73db24eafcca97369e3f2db74b2490586e6",
# "state":"SUCCESS",
# "tx_id":"0xc65a9b09e1b71def81bf8bb3ec724c0c1b2b4c82200c8c142e4ea4c1469fd789:0",
# "require_confirmations":12,
# "create_time":"2021-10-11T18:58:25.000+00:00",
# "update_time":"2021-10-11T19:01:06.000+00:00"
# }
#
# fetchWithdrawals
#
# {
# "id":"4b450616042a48c99dd45cacb4b092a7",
# "currency":"USDT-TRX",
# "address":"TRHKnx74Gb8UVcpDCMwzZVe4NqXfkdtPak",
# "amount":30.0,
# "fee":1.0,
# "remark":"self is my first withdrawal remark",
# "state":"WAIT",
# "create_time":"2021-10-11T20:45:08.000+00:00"
# }
#
# withdraw
#
# {
# "withdrawId":"25fb2831fb6d4fc7aa4094612a26c81d"
# }
#
id = self.safe_string_2(transaction, 'id', 'withdrawId')
type = 'deposit' if (id is None) else 'withdrawal'
timestamp = self.parse8601(self.safe_string(transaction, 'create_time'))
updated = self.parse8601(self.safe_string(transaction, 'update_time'))
currencyId = self.safe_string(transaction, 'currency')
network = None
if (currencyId is not None) and (currencyId.find('-') >= 0):
parts = currencyId.split('-')
currencyId = self.safe_string(parts, 0)
networkId = self.safe_string(parts, 1)
network = self.safe_network(networkId)
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
amountString = self.safe_string(transaction, 'amount')
address = self.safe_string(transaction, 'address')
txid = self.safe_string(transaction, 'tx_id')
fee = None
feeCostString = self.safe_string(transaction, 'fee')
if feeCostString is not None:
fee = {
'cost': self.parse_number(feeCostString),
'currency': code,
}
if type == 'withdrawal':
# mexc withdrawal amount includes the fee
amountString = Precise.string_sub(amountString, feeCostString)
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': network,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': None,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': self.parse_number(amountString),
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def parse_transaction_status(self, status):
statuses = {
'WAIT': 'pending',
'WAIT_PACKAGING': 'pending',
'SUCCESS': 'ok',
}
return self.safe_string(statuses, status, status)
def fetch_position(self, symbol, params={}):
"""
fetch data on a single open contract trade position
:param str symbol: unified market symbol of the market the position is held in, default is None
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.fetch_positions(None, self.extend(request, params))
return self.safe_value(response, 0)
def fetch_positions(self, symbols=None, params={}):
"""
fetch all open positions
:param [str]|None symbols: list of unified market symbols
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
self.load_markets()
response = self.contractPrivateGetPositionOpenPositions(params)
#
# {
# "success": True,
# "code": 0,
# "data": [
# {
# "positionId": 1394650,
# "symbol": "ETH_USDT",
# "positionType": 1,
# "openType": 1,
# "state": 1,
# "holdVol": 1,
# "frozenVol": 0,
# "closeVol": 0,
# "holdAvgPrice": 1217.3,
# "openAvgPrice": 1217.3,
# "closeAvgPrice": 0,
# "liquidatePrice": 1211.2,
# "oim": 0.1290338,
# "im": 0.1290338,
# "holdFee": 0,
# "realised": -0.0073,
# "leverage": 100,
# "createTime": 1609991676000,
# "updateTime": 1609991676000,
# "autoAddIm": False
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_positions(data, symbols)
def parse_position(self, position, market=None):
#
# {
# "positionId": 1394650,
# "symbol": "ETH_USDT",
# "positionType": 1,
# "openType": 1,
# "state": 1,
# "holdVol": 1,
# "frozenVol": 0,
# "closeVol": 0,
# "holdAvgPrice": 1217.3,
# "openAvgPrice": 1217.3,
# "closeAvgPrice": 0,
# "liquidatePrice": 1211.2,
# "oim": 0.1290338,
# "im": 0.1290338,
# "holdFee": 0,
# "realised": -0.0073,
# "leverage": 100,
# "createTime": 1609991676000,
# "updateTime": 1609991676000,
# "autoAddIm": False
# }
#
market = self.safe_market(self.safe_string(position, 'symbol'), market)
symbol = market['symbol']
contracts = self.safe_string(position, 'holdVol')
entryPrice = self.safe_number(position, 'openAvgPrice')
initialMargin = self.safe_string(position, 'im')
rawSide = self.safe_string(position, 'positionType')
side = 'long' if (rawSide == '1') else 'short'
openType = self.safe_string(position, 'margin_mode')
marginType = 'isolated' if (openType == '1') else 'cross'
leverage = self.safe_number(position, 'leverage')
liquidationPrice = self.safe_number(position, 'liquidatePrice')
timestamp = self.safe_number(position, 'updateTime')
return {
'info': position,
'symbol': symbol,
'contracts': self.parse_number(contracts),
'contractSize': None,
'entryPrice': entryPrice,
'collateral': None,
'side': side,
'unrealizedProfit': None,
'leverage': self.parse_number(leverage),
'percentage': None,
'marginType': marginType,
'notional': None,
'markPrice': None,
'liquidationPrice': liquidationPrice,
'initialMargin': self.parse_number(initialMargin),
'initialMarginPercentage': None,
'maintenanceMargin': None,
'maintenanceMarginPercentage': None,
'marginRatio': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
}
def fetch_transfer(self, id, since=None, limit=None, params={}):
marketType, query = self.handle_market_type_and_params('fetchTransfer', None, params)
self.load_markets()
if marketType == 'spot':
request = {
'transact_id': id,
}
response = self.spot2PrivateGetAssetInternalTransferInfo(self.extend(request, query))
#
# {
# code: '200',
# data: {
# currency: 'USDT',
# amount: '1',
# transact_id: '954877a2ef54499db9b28a7cf9ebcf41',
# from: 'MAIN',
# to: 'CONTRACT',
# transact_state: 'SUCCESS'
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transfer(data)
elif marketType == 'swap':
raise BadRequest(self.id + ' fetchTransfer() is not supported for ' + marketType)
def fetch_transfers(self, code=None, since=None, limit=None, params={}):
"""
fetch a history of internal transfers made on an account
:param str|None code: unified currency code of the currency transferred
:param int|None since: the earliest time in ms to fetch transfers for
:param int|None limit: the maximum number of transfers structures to retrieve
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns [dict]: a list of `transfer structures <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
marketType, query = self.handle_market_type_and_params('fetchTransfers', None, params)
self.load_markets()
request = {}
currency = None
resultList = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if marketType == 'spot':
if since is not None:
request['start_time'] = since
if limit is not None:
if limit > 50:
raise ExchangeError('This exchange supports a maximum limit of 50')
request['page-size'] = limit
response = self.spot2PrivateGetAssetInternalTransferRecord(self.extend(request, query))
#
# {
# code: '200',
# data: {
# total_page: '1',
# total_size: '5',
# result_list: [{
# currency: 'USDT',
# amount: '1',
# transact_id: '954877a2ef54499db9b28a7cf9ebcf41',
# from: 'MAIN',
# to: 'CONTRACT',
# transact_state: 'SUCCESS'
# },
# ...
# ]
# }
# }
#
data = self.safe_value(response, 'data', {})
resultList = self.safe_value(data, 'result_list', [])
elif marketType == 'swap':
if limit is not None:
request['page_size'] = limit
response = self.contractPrivateGetAccountTransferRecord(self.extend(request, query))
data = self.safe_value(response, 'data')
resultList = self.safe_value(data, 'resultList')
#
# {
# "success": True,
# "code": "0",
# "data": {
# "pageSize": "20",
# "totalCount": "10",
# "totalPage": "1",
# "currentPage": "1",
# "resultList": [
# {
# "id": "2980812",
# "txid": "fa8a1e7bf05940a3b7025856dc48d025",
# "currency": "USDT",
# "amount": "22.90213135",
# "type": "IN",
# "state": "SUCCESS",
# "createTime": "1648849076000",
# "updateTime": "1648849076000"
# },
# ]
# }
# }
#
return self.parse_transfers(resultList, currency, since, limit)
def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
self.load_markets()
currency = self.currency(code)
accounts = {
'spot': 'MAIN',
'swap': 'CONTRACT',
}
fromId = self.safe_string(accounts, fromAccount)
toId = self.safe_string(accounts, toAccount)
if fromId is None:
keys = list(accounts.keys())
raise ExchangeError(self.id + ' fromAccount must be one of ' + ', '.join(keys))
if toId is None:
keys = list(accounts.keys())
raise ExchangeError(self.id + ' toAccount must be one of ' + ', '.join(keys))
request = {
'currency': currency['id'],
'amount': amount,
'from': fromId,
'to': toId,
}
response = self.spot2PrivatePostAssetInternalTransfer(self.extend(request, params))
#
# {
# code: '200',
# data: {
# currency: 'USDT',
# amount: '1',
# transact_id: 'b60c1df8e7b24b268858003f374ecb75',
# from: 'MAIN',
# to: 'CONTRACT',
# transact_state: 'WAIT'
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transfer(data, currency)
def parse_transfer(self, transfer, currency=None):
#
# spot:
#
# {
# currency: 'USDT',
# amount: '1',
# transact_id: 'b60c1df8e7b24b268858003f374ecb75',
# from: 'MAIN',
# to: 'CONTRACT',
# transact_state: 'WAIT'
# }
#
# swap
#
# {
# "currency": "USDT",
# "amount": "22.90213135",
# "txid": "fa8a1e7bf05940a3b7025856dc48d025",
# "id": "2980812",
# "type": "IN",
# "state": "SUCCESS",
# "createTime": "1648849076000",
# "updateTime": "1648849076000"
# }
#
currencyId = self.safe_string(transfer, 'currency')
id = self.safe_string_2(transfer, 'transact_id', 'txid')
timestamp = self.safe_integer(transfer, 'createTime')
datetime = self.iso8601(timestamp) if (timestamp is not None) else None
direction = self.safe_string(transfer, 'type')
accountFrom = None
accountTo = None
if direction is not None:
accountFrom = 'MAIN' if (direction == 'IN') else 'CONTRACT'
accountTo = 'CONTRACT' if (direction == 'IN') else 'MAIN'
else:
accountFrom = self.safe_string(transfer, 'from')
accountTo = self.safe_string(transfer, 'to')
return {
'info': transfer,
'id': id,
'timestamp': timestamp,
'datetime': datetime,
'currency': self.safe_currency_code(currencyId, currency),
'amount': self.safe_number(transfer, 'amount'),
'fromAccount': self.parse_account_id(accountFrom),
'toAccount': self.parse_account_id(accountTo),
'status': self.parse_transfer_status(self.safe_string_2(transfer, 'transact_state', 'state')),
}
def parse_account_id(self, status):
statuses = {
'MAIN': 'spot',
'CONTRACT': 'swap',
}
return self.safe_string(statuses, status, status)
def parse_transfer_status(self, status):
statuses = {
'SUCCESS': 'ok',
'FAILED': 'failed',
'WAIT': 'pending',
}
return self.safe_string(statuses, status, status)
def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the mexc3 api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_2(params, 'network', 'chain') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string(networks, network, network) # handle ETH > ERC-20 alias
self.check_address(address)
self.load_markets()
currency = self.currency(code)
if tag is not None:
address += ':' + tag
request = {
'currency': currency['id'],
'address': address,
'amount': amount,
}
if network is not None:
request['chain'] = network
params = self.omit(params, ['network', 'chain'])
response = self.spot2PrivatePostAssetWithdraw(self.extend(request, params))
#
# {
# "code":200,
# "data": {
# "withdrawId":"25fb2831fb6d4fc7aa4094612a26c81d"
# }
# }
#
data = self.safe_value(response, 'data', {})
return self.parse_transaction(data, currency)
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
section, access = api
path, params = self.resolve_path(path, params)
url = None
if section == 'spot':
url = self.urls['api'][section][access] + '/api/' + self.version + '/' + path
paramsEncoded = ''
if access == 'private':
params['timestamp'] = self.milliseconds()
params['recvWindow'] = self.safe_integer(self.options, 'recvWindow', 5000)
if params:
paramsEncoded = self.urlencode(params)
url += '?' + paramsEncoded
if access == 'private':
self.check_required_credentials()
signature = self.hmac(self.encode(paramsEncoded), self.encode(self.secret), hashlib.sha256)
url += '&' + 'signature=' + signature
headers = {
'X-MEXC-APIKEY': self.apiKey,
}
if method == 'POST':
headers['Content-Type'] = 'application/json'
elif section == 'contract' or section == 'spot2':
url = self.urls['api'][section][access] + '/' + self.implode_params(path, params)
params = self.omit(params, self.extract_params(path))
if access == 'public':
if params:
url += '?' + self.urlencode(params)
else:
self.check_required_credentials()
timestamp = str(self.milliseconds())
auth = ''
headers = {
'ApiKey': self.apiKey,
'Request-Time': timestamp,
'Content-Type': 'application/json',
}
if method == 'POST':
auth = self.json(params)
body = auth
else:
params = self.keysort(params)
if params:
auth += self.urlencode(params)
url += '?' + auth
auth = self.apiKey + timestamp + auth
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256)
headers['Signature'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return
# spot
# {"code":-1128,"msg":"Combination of optional parameters invalid.","_extend":null}
# {"success":false,"code":123456,"message":"Order quantity error...."}
#
# contract
#
# {"code":10232,"msg":"The currency not exist"}
# {"code":10216,"msg":"No available deposit address"}
# {"success":true, "code":0, "data":1634095541710}
#
success = self.safe_value(response, 'success', False) # v1
if success is True:
return
responseCode = self.safe_string(response, 'code', None)
if (responseCode is not None) and (responseCode != '200') and (responseCode != '0'):
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], responseCode, feedback)
raise ExchangeError(feedback)
|
py | b408dc6f3ce96e2b5cfa454031f7ae233e455725 | """
WiderFace evaluation code
author: wondervictor
mail: [email protected]
copyright@wondervictor
MIT License
Copyright (c) 2018 Vic Chan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from distutils.core import setup, Extension
import numpy
from Cython.Build import cythonize
package = Extension(
'bbox',
['box_overlaps.pyx'],
include_dirs=[
numpy.get_include()])
setup(ext_modules=cythonize([package]))
|
py | b408ddca2f6177d25a1ea2b91305351811d525da | import functools as ft
import itertools as it
import pathlib
import numpy as np
from joblib import delayed, Parallel
from scipy.stats import norm
from sklearn.utils.extmath import cartesian
from src.models import RBFFourierBasis, GP
class PoolBO:
def __init__(self, af: str, max_iteration: int, n_init: int = 5, seed: int = 0, noise_var: float = 1e-4,
save_inference_r: bool = False):
self.__known_af = {'random': self.random,
'pi': self.pi,
'ei': self.ei,
'ucb': ft.partial(self.ucb, beta_rt=3),
'gp-ucb': ft.partial(self.ucb, beta_rt=3),
'mes': ft.partial(self.mes, n_sample=100)}
# known_af = {'pi', 'ei', 'ucb', 'gp-ucb', 'random', 'gp-ucb', 'mes'}
assert af in self.__known_af.keys(), f'Unknown acquisition function: {af}'
self.__save_inference_r = save_inference_r
self.__n_init = n_init
self.__seed = seed
self.__rng = np.random.default_rng(seed)
self.__af = af
self.__iteration = 0
self.__input_dim = 2
self.__max_iteration = max_iteration
self.__noise_var = noise_var
self.__noise_std = np.sqrt(noise_var)
n_features = 2000
self.__rff = RBFFourierBasis(n_features, self.input_dim, rng=np.random.default_rng(seed))
self.__w = self.rng.normal(size=n_features)
grid_1d = np.linspace(-5, 5, 70)
self.__x_pool = cartesian([grid_1d for _ in range(self.input_dim)])
self.__observed_idx = self.rng.choice(np.arange(self.x_pool.shape[0]), size=n_init, replace=True).tolist()
x_train = self.__x_pool[self.observed_idx].reshape(n_init, self.input_dim)
y_train = self.obj_f(x_train).reshape(n_init, 1)
self.__f_best = np.max(y_train)
y_train += self.rng.normal(scale=self.noise_std, size=(n_init, 1))
self.__y_best = np.max(y_train)
self.__gp = GP(x_train=x_train, y_train=y_train, noise_var=self.noise_var, standardize=False)
self.__f_opt = np.max(self.obj_f(self.x_pool))
self.__simple_r = [float(self.f_opt - self.f_best)]
self.__inference_r = []
if self.save_inference_r:
m, _ = self.gp.predict(self.x_pool)
max_i = np.argmax(m)
self.__inference_r.append(float(self.f_opt - self.obj_f(self.x_pool[max_i])))
self.__next_x = None
self.__next_i = None
self.__next_x_af = None
return
@property
def observed_idx(self):
return self.__observed_idx
@property
def input_dim(self):
return self.__input_dim
@property
def x_pool(self):
return self.__x_pool
@property
def af(self):
return self.__af
@property
def x_train(self):
return self.__gp.x_train
@property
def y_train(self):
return self.__gp.y_train
@property
def y_best(self):
return self.__y_best
@property
def f_best(self):
return self.__f_best
@property
def f_opt(self):
return self.__f_opt
@property
def gp(self):
return self.__gp
@property
def rng(self):
return self.__rng
@property
def noise_var(self):
return self.__noise_var
@property
def noise_std(self):
return self.__noise_std
@property
def next_x(self):
return self.__next_x
@property
def next_i(self):
return self.__next_i
@property
def next_x_af(self):
return self.__next_x_af
@property
def iteration(self):
return self.__iteration
@property
def max_iteration(self):
return self.__max_iteration
@property
def simple_r(self):
return self.__simple_r
@property
def inference_r(self):
return self.__inference_r
@property
def save_inference_r(self):
return self.__save_inference_r
@property
def known_af_name(self):
return self.__known_af.keys()
def next_observation(self):
self.__known_af[self.af]()
next_f = self.obj_f(self.__next_x)
self.__f_best = float(max(self.__f_best, next_f))
next_f += self.rng.normal(scale=self.noise_std)
self.__y_best = float(max(self.__y_best, next_f))
self.gp.add_observation(self.__next_x, next_f)
self.__simple_r.append(float(self.f_opt - self.f_best))
if self.save_inference_r:
m, _ = self.gp.predict(self.x_pool)
max_i = np.argmax(m)
self.__inference_r.append(float(self.f_opt - self.obj_f(self.x_pool[max_i])))
# self.__x_pool = self.rng.permutation(self.__x_pool)
self.observed_idx.append(int(self.next_i))
self.__iteration += 1
return
def obj_f(self, x):
return self.__rff.transform(x.reshape(-1, self.input_dim)) @ self.__w
def random(self):
self.__next_i = self.rng.choice(np.setdiff1d(np.arange(self.x_pool.shape[0]), self.observed_idx))
self.__next_x = self.x_pool[self.next_i]
return
def pi(self, maximize=True):
mean, var = self.gp.predict(self.x_pool)
std = np.sqrt(var)
if maximize:
z = (mean - self.y_best) / std
else:
z = (self.y_best - mean) / std
af = norm.cdf(z)
af[self.observed_idx] = -1
max_i = np.argmax(af)
self.__next_i = max_i
self.__next_x = self.x_pool[max_i]
self.__next_x_af = af[max_i]
return
def ei(self, maximize=True):
mean, var = self.gp.predict(self.x_pool)
std = np.sqrt(var)
if maximize:
z = (mean - self.y_best) / std
else:
z = (self.y_best - mean) / std
af = std * (z * norm.cdf(z) + norm.pdf(z))
af[self.observed_idx] = -1
max_i = np.argmax(af)
self.__next_i = max_i
self.__next_x = self.x_pool[max_i]
self.__next_x_af = af[max_i]
return
def ucb(self, beta_rt=3):
mean, var = self.gp.predict(self.x_pool)
std = np.sqrt(var)
af = mean + beta_rt * std
af[self.observed_idx] = np.min(af) - 1
max_i = np.argmax(af)
self.__next_i = max_i
self.__next_x = self.x_pool[max_i]
self.__next_x_af = af[max_i]
return
def mes(self, n_sample: int = 10, maximize=True):
mean, cov = self.gp.predict(self.x_pool, fullcov=True)
std = np.sqrt(np.diag(cov))
if not maximize:
mean = -mean
sample_path = self.rng.multivariate_normal(mean, cov, method='cholesky', size=n_sample)
y_max = np.max(sample_path, axis=1).flatten()
gamma = (y_max[:, None] - mean) / std
gamma_cdf = norm.cdf(gamma)
af = np.mean(gamma * norm.pdf(gamma) / (2 * gamma_cdf) - np.log(gamma_cdf), axis=0)
af[self.observed_idx] = np.min(af) - 1
max_i = np.argmax(af)
self.__next_i = max_i
self.__next_x = self.x_pool[max_i]
self.__next_x_af = af[max_i]
return
def main():
max_itr = 100
af_list = ['mes-s', 'mes-g', 'pi', 'random', 'ei']
# af_list=['ucb', 'gp-ucb']
# af_list = ['random']
seeds = np.arange(20)
def one_exp(af, seed):
bo = PoolBO(af, seed=seed, max_iteration=max_itr, n_init=10, save_inference_r=True)
while bo.iteration < max_itr:
# print(f'{bo.simple_r[-1]},{bo.inference_r[-1]}, x:{bo.next_x}, af:{bo.next_x_af}')
bo.next_observation()
np.savez(result_dir / f'{af}_seed{seed}', sr=bo.simple_r, ir=bo.inference_r)
Parallel(n_jobs=40, verbose=10)([delayed(one_exp)(AF, SEED) for AF, SEED in it.product(af_list, seeds)])
return
if __name__ == '__main__':
result_dir = pathlib.Path(__file__).parent.parent / 'image/bo_result'
main()
|
py | b408de42738408c4e16ae5b21f5e1dac484bc1af | from typing import Union, List, Optional, Sequence
import warnings
import torch
from apex.transformer import parallel_state
from apex.transformer.enums import ModelType
from apex.transformer.pipeline_parallel import p2p_communication
from apex.transformer.pipeline_parallel.p2p_communication import FutureTensor
from apex.transformer.pipeline_parallel.utils import get_kth_microbatch
from apex.transformer.pipeline_parallel.utils import listify_model
from apex.transformer.pipeline_parallel.utils import get_num_microbatches
from apex.transformer.pipeline_parallel.utils import get_model_type
from apex.transformer.pipeline_parallel.schedules.common import Batch
from apex.transformer.pipeline_parallel.schedules.common import FwdStepFunc
from apex.transformer.pipeline_parallel.schedules.common import backward_step
from apex.transformer.pipeline_parallel.schedules.common import forward_step
from apex.transformer.pipeline_parallel.schedules.common import free_output_tensor
from apex.transformer.log_util import get_transformer_logger
__all__ = ["forward_backward_pipelining_without_interleaving"]
_logger = get_transformer_logger(__name__)
def get_tensor_shapes(
rank: int,
model_type: ModelType,
*,
tensor_shape: Union[List[int], torch.Size],
decoder_sequence_length: Optional[int] = None,
) -> Sequence[Sequence[int]]:
# Determine right tensor sizes (based on position of rank with respect to split
# rank) and model size.
# Send two tensors if model is T5 and rank is in decoder stage:
# first tensor is decoder (pre-transpose),
# second tensor is encoder (post-transpose).
# If model is T5 and rank is at the boundary:
# send one tensor (post-transpose from encoder).
# Otherwise, send one tensor (pre-transpose).
assert (
len(tensor_shape) == 3
), f"`tensor_shape` should be [sequence_length, micro_batch_size, hidden_size] but {tensor_shape}"
sequence_length, micro_batch_size, hidden_size = tensor_shape
tensor_shapes = []
if model_type == ModelType.encoder_and_decoder:
if decoder_sequence_length is None:
raise ValueError("`decoder_sequence_length` is required for `ModelType.encoder_and_decoder`")
if parallel_state.is_pipeline_stage_before_split(rank):
# If next rank is after split, then need transpose for encoder_hidden_state.
if parallel_state.is_pipeline_stage_before_split(rank + 1):
tensor_shapes.append((sequence_length, micro_batch_size, hidden_size))
else:
tensor_shapes.append((micro_batch_size, sequence_length, hidden_size))
else:
tensor_shapes.append((decoder_sequence_length, micro_batch_size, hidden_size))
tensor_shapes.append((micro_batch_size, sequence_length, hidden_size))
else:
tensor_shapes.append((sequence_length, micro_batch_size, hidden_size))
return tensor_shapes
def recv_forward(
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
input_tensors = []
for tensor_shape in tensor_shapes:
if tensor_shape is None:
input_tensors.append(None)
else:
input_tensors.append(p2p_communication.recv_forward(tensor_shape=tensor_shape, dtype=dtype, async_comm=async_comm))
return input_tensors
def recv_backward(
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
output_tensor_grads = []
for tensor_shape in tensor_shapes:
if tensor_shape is None:
output_tensor_grads.append(None)
else:
output_tensor_grads.append(p2p_communication.recv_backward(tensor_shape=tensor_shape, dtype=dtype, async_comm=async_comm))
return output_tensor_grads
def send_forward(
output_tensors: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
) -> None:
if not isinstance(output_tensors, list):
output_tensors = [output_tensors]
for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes):
if tensor_shape is None:
continue
p2p_communication.send_forward(output_tensor, tensor_shape=tensor_shape, dtype=dtype, async_comm=async_comm)
def send_backward(
input_tensor_grads: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
) -> None:
if not isinstance(input_tensor_grads, list):
input_tensor_grads = [input_tensor_grads]
for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes):
if tensor_shape is None:
continue
p2p_communication.send_backward(input_tensor_grad, tensor_shape=tensor_shape, dtype=dtype, async_comm=async_comm)
def send_forward_recv_backward(
output_tensors: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
if not isinstance(output_tensors, list):
output_tensors = [output_tensors]
output_tensor_grads = []
for (output_tensor, tensor_shape) in zip(output_tensors, tensor_shapes):
if tensor_shape is None:
output_tensor_grads.append(None)
continue
output_tensor_grad = p2p_communication.send_forward_recv_backward(output_tensor, tensor_shape=tensor_shape, dtype=dtype, async_comm=async_comm)
output_tensor_grads.append(output_tensor_grad)
return output_tensor_grads
def send_backward_recv_forward(
input_tensor_grads: Union[torch.Tensor, List[Union[None, torch.Tensor]]],
tensor_shapes: List[Union[None, List[int]]],
*,
dtype: Optional[torch.dtype] = None,
async_comm: bool = False,
) -> List[Union[None, torch.Tensor, FutureTensor]]:
if not isinstance(input_tensor_grads, list):
input_tensor_grads = [input_tensor_grads]
input_tensors = []
for (input_tensor_grad, tensor_shape) in zip(input_tensor_grads, tensor_shapes):
if tensor_shape is None:
input_tensors.append(None)
continue
input_tensor = p2p_communication.send_backward_recv_forward(input_tensor_grad, tensor_shape=tensor_shape, dtype=dtype, async_comm=async_comm)
input_tensors.append(input_tensor)
return input_tensors
def forward_backward_pipelining_without_interleaving(
forward_step_func: FwdStepFunc,
batch: Optional[Batch],
model: Union[torch.nn.Module, List[torch.nn.Module]],
*,
forward_only: bool,
tensor_shape: Optional[Union[List[int], torch.Size]] = None,
decoder_sequence_length: Optional[int] = None,
dtype: Optional[torch.dtype] = None,
grad_scaler: Optional[torch.cuda.amp.GradScaler] = None,
disable_autocast: bool = False,
deallocate_pipeline_outputs: bool = False,
async_comm: bool = False,
**kwargs,
) -> List[Union[torch.Tensor, Sequence[torch.Tensor]]]:
"""Run non-interleaved 1F1B schedule, with communication between pipeline stages.
This pipeline parallel scheduling consists of three steps:
1. warmup
2. 1F1B a.k.a. steady state
3. cooldown if not forward_only
Args:
forward_step_func: A function which takes a minibatch and model as its arguments and
returns model's forward output and the loss function.
The loss function is supposed to take one `torch.Tensor` and
return a `torch.Tensor` of loss and a dictionary of `str` and `torch.Tensor`.
batch: A minibatch, i.e., a list of `torch.Tensor`'s.
model: A `torch.nn.Module` or a list of `torch.nn.Module`.
Keyword args:
forward_only:
tensor_shape: Shape of tensor. Required for P2P communication.
dtype: dtype used in p2p communication. If ``None`` (default value),
torch.float32 will be used even if ``autocast`` is enabled.
grad_scaler:
disable_autocast:
deallocate_pipeline_outputs: If :obj:`True`, free the data of the output tensor of
each pipeline stage. Experimental.
Returns:
a list of loss `torch.Tensor`s if the last stage, empty list otherwise.
"""
# timers = get_timers()
if deallocate_pipeline_outputs:
warnings.warn(
"`deallocate_pipeline_outputs` is experimental and subject to change. "
"This option is not recommended."
)
model: List[torch.nn.Module] = listify_model(model)
if len(model) != 1:
msg = f"`model` is expected be a `nn.Module`, but {type(model)}"
raise RuntimeError(msg)
model: torch.nn.Module = model[0]
# Compute number of warmup microbatches.
num_microbatches: int = get_num_microbatches()
num_warmup_microbatches: int = (
parallel_state.get_pipeline_model_parallel_world_size() - parallel_state.get_pipeline_model_parallel_rank() - 1
)
num_warmup_microbatches: int = min(num_warmup_microbatches, num_microbatches)
num_microbatches_remaining: int = num_microbatches - num_warmup_microbatches
model_type = get_model_type(model)
rank: int = parallel_state.get_pipeline_model_parallel_rank()
recv_tensor_shapes: List[List[int]] = get_tensor_shapes(
rank - 1, model_type, tensor_shape=tensor_shape, decoder_sequence_length=decoder_sequence_length
)
send_tensor_shapes: List[List[int]] = get_tensor_shapes(
rank, model_type, tensor_shape=tensor_shape, decoder_sequence_length=decoder_sequence_length
)
_logger.info(
f"num_microbatches: {num_microbatches}, "
f"num_warmup_microbatches: {num_warmup_microbatches}, "
f"num_microbatches_remaining: {num_microbatches_remaining}"
)
# Input, output tensors only need to be saved when doing backward passes
input_tensors: List[Union[None, torch.Tensor]] = []
output_tensors: List[Union[None, torch.Tensor]] = []
losses_reduced: List[Union[None, torch.Tensor]] = []
###################################################################################################################
# Run warmup forward passes.
###################################################################################################################
_logger.info("Warmup")
for i in range(num_warmup_microbatches):
_logger.debug(f"warmup iter: {i} / {num_warmup_microbatches}")
_logger.debug("receive fwd")
input_tensor = recv_forward(tensor_shapes=recv_tensor_shapes, dtype=dtype, async_comm=async_comm)
cur_microbatch: Optional[torch.Tensor] = get_kth_microbatch(batch, i)
output_tensor = forward_step(
forward_step_func,
cur_microbatch,
model,
input_tensor,
losses_reduced,
dtype,
disable_autocast,
)
_logger.debug("send fwd")
send_forward(output_tensor, tensor_shapes=send_tensor_shapes, dtype=dtype, async_comm=async_comm)
if not forward_only:
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Before running 1F1B, need to receive first forward tensor.
# If all microbatches are run in warmup / cooldown phase, then no need to
# receive this tensor here.
if num_microbatches_remaining > 0:
_logger.debug("recv_forward before steady state start")
input_tensor: List[Union[None, torch.Tensor, FutureTensor]] = recv_forward(tensor_shapes=recv_tensor_shapes, dtype=dtype, async_comm=async_comm)
###################################################################################################################
# Run 1F1B in steady state.
###################################################################################################################
_logger.info("Steady phase")
for i in range(num_microbatches_remaining):
_logger.debug(f"steady iter: {i} / {num_microbatches_remaining}")
last_iteration: bool = i == (num_microbatches_remaining - 1)
cur_microbatch: Optional[torch.Tensor] = get_kth_microbatch(batch, i + num_warmup_microbatches)
output_tensor: Union[torch.Tensor, Sequence[torch.Tensor]] = forward_step(
forward_step_func,
cur_microbatch,
model,
input_tensor,
losses_reduced,
dtype,
disable_autocast,
)
if forward_only:
_logger.debug("send fwd")
send_forward(output_tensor, tensor_shapes=send_tensor_shapes, dtype=dtype, async_comm=async_comm)
if not last_iteration:
_logger.debug("receive fwd (last iteration)")
input_tensor = recv_forward(tensor_shapes=recv_tensor_shapes, dtype=dtype, async_comm=async_comm)
else:
_logger.debug("send fwd & receive bwd")
output_tensor_grad = send_forward_recv_backward(output_tensor, tensor_shapes=send_tensor_shapes, dtype=dtype, async_comm=async_comm)
# Add input_tensor and output_tensor to end of list.
input_tensors.append(input_tensor)
output_tensors.append(output_tensor)
free_output_tensor(output_tensor, deallocate_pipeline_outputs)
# Pop input_tensor and output_tensor from the start of the list for the backward pass.
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
if last_iteration:
input_tensor = None
_logger.debug("send bwd")
send_backward(input_tensor_grad, tensor_shapes=recv_tensor_shapes, dtype=dtype, async_comm=async_comm)
else:
_logger.debug("send bwd and receive fwd")
input_tensor = send_backward_recv_forward(input_tensor_grad, tensor_shapes=recv_tensor_shapes, dtype=dtype, async_comm=async_comm)
###################################################################################################################
# Run cooldown backward passes.
###################################################################################################################
_logger.info("Cooldown phase")
if not forward_only:
for i in range(num_warmup_microbatches):
_logger.debug(f"cooldown iter: {i} / {num_warmup_microbatches}")
input_tensor = input_tensors.pop(0)
output_tensor = output_tensors.pop(0)
_logger.debug("receive bwd")
output_tensor_grad = recv_backward(tensor_shapes=send_tensor_shapes, dtype=dtype, async_comm=async_comm)
input_tensor_grad = backward_step(
input_tensor,
output_tensor,
output_tensor_grad,
model_type=model_type,
grad_scaler=grad_scaler,
deallocate_pipeline_outputs=deallocate_pipeline_outputs,
)
_logger.debug("send bwd")
send_backward(input_tensor_grad, tensor_shapes=recv_tensor_shapes, dtype=dtype, async_comm=async_comm)
return losses_reduced
|
py | b408dfd3d3bcc6926be116f854dccb7f83445696 | """
Galaxy job handler, prepares, runs, tracks, and finishes Galaxy jobs
"""
import os
import time
import logging
import threading
from Queue import Queue, Empty
from sqlalchemy.sql.expression import and_, or_, select, func, true, null
from galaxy import model
from galaxy.util.sleeper import Sleeper
from galaxy.jobs import JobWrapper, TaskWrapper, JobDestination
from galaxy.jobs.mapper import JobNotReadyException
log = logging.getLogger( __name__ )
# States for running a job. These are NOT the same as data states
JOB_WAIT, JOB_ERROR, JOB_INPUT_ERROR, JOB_INPUT_DELETED, JOB_READY, JOB_DELETED, JOB_ADMIN_DELETED, JOB_USER_OVER_QUOTA = 'wait', 'error', 'input_error', 'input_deleted', 'ready', 'deleted', 'admin_deleted', 'user_over_quota'
DEFAULT_JOB_PUT_FAILURE_MESSAGE = 'Unable to run job due to a misconfiguration of the Galaxy job running system. Please contact a site administrator.'
class JobHandler( object ):
"""
Handle the preparation, running, tracking, and finishing of jobs
"""
def __init__( self, app ):
self.app = app
# The dispatcher launches the underlying job runners
self.dispatcher = DefaultJobDispatcher( app )
# Queues for starting and stopping jobs
self.job_queue = JobHandlerQueue( app, self.dispatcher )
self.job_stop_queue = JobHandlerStopQueue( app, self.dispatcher )
def start( self ):
self.job_queue.start()
def shutdown( self ):
self.job_queue.shutdown()
self.job_stop_queue.shutdown()
class JobHandlerQueue( object ):
"""
Job Handler's Internal Queue, this is what actually implements waiting for
jobs to be runnable and dispatching to a JobRunner.
"""
STOP_SIGNAL = object()
def __init__( self, app, dispatcher ):
"""Initializes the Job Handler Queue, creates (unstarted) monitoring thread"""
self.app = app
self.dispatcher = dispatcher
self.sa_session = app.model.context
self.track_jobs_in_database = self.app.config.track_jobs_in_database
# Initialize structures for handling job limits
self.__clear_job_count()
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
# Contains new jobs. Note this is not used if track_jobs_in_database is True
self.queue = Queue()
# Contains jobs that are waiting (only use from monitor thread)
self.waiting_jobs = []
# Contains wrappers of jobs that are limited or ready (so they aren't created unnecessarily/multiple times)
self.job_wrappers = {}
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.monitor_thread = threading.Thread( name="JobHandlerQueue.monitor_thread", target=self.__monitor )
self.monitor_thread.setDaemon( True )
def start( self ):
"""
Starts the JobHandler's thread after checking for any unhandled jobs.
"""
# Recover jobs at startup
self.__check_jobs_at_startup()
# Start the queue
self.monitor_thread.start()
log.info( "job handler queue started" )
def job_wrapper( self, job, use_persisted_destination=False ):
return JobWrapper( job, self, use_persisted_destination=use_persisted_destination )
def job_pair_for_id( self, id ):
job = self.sa_session.query( model.Job ).get( id )
return job, self.job_wrapper( job, use_persisted_destination=True )
def __check_jobs_at_startup( self ):
"""
Checks all jobs that are in the 'new', 'queued' or 'running' state in
the database and requeues or cleans up as necessary. Only run as the
job handler starts.
In case the activation is enforced it will filter out the jobs of inactive users.
"""
jobs_at_startup = []
if self.track_jobs_in_database:
in_list = ( model.Job.states.QUEUED,
model.Job.states.RUNNING )
else:
in_list = ( model.Job.states.NEW,
model.Job.states.QUEUED,
model.Job.states.RUNNING )
if self.app.config.user_activation_on:
jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.outerjoin( model.User ) \
.filter( model.Job.state.in_( in_list ) &
( model.Job.handler == self.app.config.server_name ) &
or_( ( model.Job.user_id == null() ), ( model.User.active == true() ) ) ).all()
else:
jobs_at_startup = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( model.Job.state.in_( in_list ) &
( model.Job.handler == self.app.config.server_name ) ).all()
for job in jobs_at_startup:
if not self.app.toolbox.has_tool( job.tool_id, job.tool_version, exact=True ):
log.warning( "(%s) Tool '%s' removed from tool config, unable to recover job" % ( job.id, job.tool_id ) )
self.job_wrapper( job ).fail( 'This tool was disabled before the job completed. Please contact your Galaxy administrator.' )
elif job.job_runner_name is not None and job.job_runner_external_id is None:
# This could happen during certain revisions of Galaxy where a runner URL was persisted before the job was dispatched to a runner.
log.debug( "(%s) Job runner assigned but no external ID recorded, adding to the job handler queue" % job.id )
job.job_runner_name = None
if self.track_jobs_in_database:
job.set_state( model.Job.states.NEW )
else:
self.queue.put( ( job.id, job.tool_id ) )
elif job.job_runner_name is not None and job.job_runner_external_id is not None and job.destination_id is None:
# This is the first start after upgrading from URLs to destinations, convert the URL to a destination and persist
job_wrapper = self.job_wrapper( job )
job_destination = self.dispatcher.url_to_destination(job.job_runner_name)
if job_destination.id is None:
job_destination.id = 'legacy_url'
job_wrapper.set_job_destination(job_destination, job.job_runner_external_id)
self.dispatcher.recover( job, job_wrapper )
log.info('(%s) Converted job from a URL to a destination and recovered' % (job.id))
elif job.job_runner_name is None:
# Never (fully) dispatched
log.debug( "(%s) No job runner assigned and job still in '%s' state, adding to the job handler queue" % ( job.id, job.state ) )
if self.track_jobs_in_database:
job.set_state( model.Job.states.NEW )
else:
self.queue.put( ( job.id, job.tool_id ) )
else:
# Already dispatched and running
job_wrapper = self.job_wrapper( job )
# Use the persisted destination as its params may differ from
# what's in the job_conf xml
job_destination = JobDestination(id=job.destination_id, runner=job.job_runner_name, params=job.destination_params)
# resubmits are not persisted (it's a good thing) so they
# should be added back to the in-memory destination on startup
try:
config_job_destination = self.app.job_config.get_destination( job.destination_id )
job_destination.resubmit = config_job_destination.resubmit
except KeyError:
log.warning( '(%s) Recovered destination id (%s) does not exist in job config (but this may be normal in the case of a dynamically generated destination)', job.id, job.destination_id )
job_wrapper.job_runner_mapper.cached_job_destination = job_destination
self.dispatcher.recover( job, job_wrapper )
if self.sa_session.dirty:
self.sa_session.flush()
def __monitor( self ):
"""
Continually iterate the waiting jobs, checking is each is ready to
run and dispatching if so.
"""
while self.running:
try:
# If jobs are locked, there's nothing to monitor and we skip
# to the sleep.
if not self.app.job_manager.job_lock:
self.__monitor_step()
except:
log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
def __monitor_step( self ):
"""
Called repeatedly by `monitor` to process waiting jobs. Gets any new
jobs (either from the database or from its own queue), then iterates
over all new and waiting jobs to check the state of the jobs each
depends on. If the job has dependencies that have not finished, it
goes to the waiting queue. If the job has dependencies with errors,
it is marked as having errors and removed from the queue. If the job
belongs to an inactive user it is ignored.
Otherwise, the job is dispatched.
"""
# Pull all new jobs from the queue at once
jobs_to_check = []
resubmit_jobs = []
if self.track_jobs_in_database:
# Clear the session so we get fresh states for job and all datasets
self.sa_session.expunge_all()
# Fetch all new jobs
hda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
.join(model.JobToInputDatasetAssociation) \
.join(model.HistoryDatasetAssociation) \
.join(model.Dataset) \
.filter(and_( (model.Job.state == model.Job.states.NEW ),
or_( ( model.HistoryDatasetAssociation._state == model.HistoryDatasetAssociation.states.FAILED_METADATA ),
( model.HistoryDatasetAssociation.deleted == true() ),
( model.Dataset.state != model.Dataset.states.OK ),
( model.Dataset.deleted == true() ) ) ) ).subquery()
ldda_not_ready = self.sa_session.query(model.Job.id).enable_eagerloads(False) \
.join(model.JobToInputLibraryDatasetAssociation) \
.join(model.LibraryDatasetDatasetAssociation) \
.join(model.Dataset) \
.filter(and_((model.Job.state == model.Job.states.NEW),
or_((model.LibraryDatasetDatasetAssociation._state != null()),
(model.LibraryDatasetDatasetAssociation.deleted == true()),
(model.Dataset.state != model.Dataset.states.OK),
(model.Dataset.deleted == true())))).subquery()
if self.app.config.user_activation_on:
jobs_to_check = self.sa_session.query(model.Job).enable_eagerloads(False) \
.outerjoin( model.User ) \
.filter(and_((model.Job.state == model.Job.states.NEW),
or_((model.Job.user_id == null()), (model.User.active == true())),
(model.Job.handler == self.app.config.server_name),
~model.Job.table.c.id.in_(hda_not_ready),
~model.Job.table.c.id.in_(ldda_not_ready))) \
.order_by(model.Job.id).all()
else:
jobs_to_check = self.sa_session.query(model.Job).enable_eagerloads(False) \
.filter(and_((model.Job.state == model.Job.states.NEW),
(model.Job.handler == self.app.config.server_name),
~model.Job.table.c.id.in_(hda_not_ready),
~model.Job.table.c.id.in_(ldda_not_ready))) \
.order_by(model.Job.id).all()
# Fetch all "resubmit" jobs
resubmit_jobs = self.sa_session.query(model.Job).enable_eagerloads(False) \
.filter(and_((model.Job.state == model.Job.states.RESUBMITTED),
(model.Job.handler == self.app.config.server_name))) \
.order_by(model.Job.id).all()
else:
# Get job objects and append to watch queue for any which were
# previously waiting
for job_id in self.waiting_jobs:
jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) )
try:
while 1:
message = self.queue.get_nowait()
if message is self.STOP_SIGNAL:
return
# Unpack the message
job_id, tool_id = message
# Get the job object and append to watch queue
jobs_to_check.append( self.sa_session.query( model.Job ).get( job_id ) )
except Empty:
pass
# Ensure that we get new job counts on each iteration
self.__clear_job_count()
# Check resubmit jobs first so that limits of new jobs will still be enforced
for job in resubmit_jobs:
log.debug( '(%s) Job was resubmitted and is being dispatched immediately', job.id )
# Reassemble resubmit job destination from persisted value
jw = self.job_wrapper( job )
jw.job_runner_mapper.cached_job_destination = JobDestination( id=job.destination_id, runner=job.job_runner_name, params=job.destination_params )
self.increase_running_job_count(job.user_id, jw.job_destination.id)
self.dispatcher.put( jw )
# Iterate over new and waiting jobs and look for any that are
# ready to run
new_waiting_jobs = []
for job in jobs_to_check:
try:
# Check the job's dependencies, requeue if they're not done.
# Some of these states will only happen when using the in-memory job queue
job_state = self.__check_job_state( job )
if job_state == JOB_WAIT:
new_waiting_jobs.append( job.id )
elif job_state == JOB_INPUT_ERROR:
log.info( "(%d) Job unable to run: one or more inputs in error state" % job.id )
elif job_state == JOB_INPUT_DELETED:
log.info( "(%d) Job unable to run: one or more inputs deleted" % job.id )
elif job_state == JOB_READY:
self.dispatcher.put( self.job_wrappers.pop( job.id ) )
log.info( "(%d) Job dispatched" % job.id )
elif job_state == JOB_DELETED:
log.info( "(%d) Job deleted by user while still queued" % job.id )
elif job_state == JOB_ADMIN_DELETED:
log.info( "(%d) Job deleted by admin while still queued" % job.id )
elif job_state == JOB_USER_OVER_QUOTA:
log.info( "(%d) User (%s) is over quota: job paused" % ( job.id, job.user_id ) )
job.set_state( model.Job.states.PAUSED )
for dataset_assoc in job.output_datasets + job.output_library_datasets:
dataset_assoc.dataset.dataset.state = model.Dataset.states.PAUSED
dataset_assoc.dataset.info = "Execution of this dataset's job is paused because you were over your disk quota at the time it was ready to run"
self.sa_session.add( dataset_assoc.dataset.dataset )
self.sa_session.add( job )
elif job_state == JOB_ERROR:
log.error( "(%d) Error checking job readiness" % job.id )
else:
log.error( "(%d) Job in unknown state '%s'" % ( job.id, job_state ) )
new_waiting_jobs.append( job.id )
except Exception:
log.exception( "failure running job %d" % job.id )
# Update the waiting list
if not self.track_jobs_in_database:
self.waiting_jobs = new_waiting_jobs
# Remove cached wrappers for any jobs that are no longer being tracked
for id in self.job_wrappers.keys():
if id not in new_waiting_jobs:
del self.job_wrappers[id]
# Flush, if we updated the state
self.sa_session.flush()
# Done with the session
self.sa_session.remove()
def __check_job_state( self, job ):
"""
Check if a job is ready to run by verifying that each of its input
datasets is ready (specifically in the OK state). If any input dataset
has an error, fail the job and return JOB_INPUT_ERROR. If any input
dataset is deleted, fail the job and return JOB_INPUT_DELETED. If all
input datasets are in OK state, return JOB_READY indicating that the
job can be dispatched. Otherwise, return JOB_WAIT indicating that input
datasets are still being prepared.
"""
if not self.track_jobs_in_database:
in_memory_not_ready_state = self.__verify_in_memory_job_inputs( job )
if in_memory_not_ready_state:
return in_memory_not_ready_state
# Else, if tracking in the database, job.state is guaranteed to be NEW and
# the inputs are guaranteed to be OK.
# Create the job wrapper so that the destination can be set
job_id = job.id
job_wrapper = self.job_wrappers.get( job_id, None )
if not job_wrapper:
job_wrapper = self.job_wrapper( job )
self.job_wrappers[ job_id ] = job_wrapper
# If state == JOB_READY, assume job_destination also set - otherwise
# in case of various error or cancelled states do not assume
# destination has been set.
state, job_destination = self.__verify_job_ready( job, job_wrapper )
if state == JOB_READY:
# PASS. increase usage by one job (if caching) so that multiple jobs aren't dispatched on this queue iteration
self.increase_running_job_count(job.user_id, job_destination.id )
return state
def __verify_job_ready( self, job, job_wrapper ):
""" Compute job destination and verify job is ready at that
destination by checking job limits and quota. If this method
return a job state of JOB_READY - it MUST also return a job
destination.
"""
job_destination = None
try:
assert job_wrapper.tool is not None, 'This tool was disabled before the job completed. Please contact your Galaxy administrator.'
# Cause the job_destination to be set and cached by the mapper
job_destination = job_wrapper.job_destination
except AssertionError as e:
log.warning( "(%s) Tool '%s' removed from tool config, unable to run job" % ( job.id, job.tool_id ) )
job_wrapper.fail( e )
return JOB_ERROR, job_destination
except JobNotReadyException as e:
job_state = e.job_state or JOB_WAIT
return job_state, None
except Exception as e:
failure_message = getattr( e, 'failure_message', DEFAULT_JOB_PUT_FAILURE_MESSAGE )
if failure_message == DEFAULT_JOB_PUT_FAILURE_MESSAGE:
log.exception( 'Failed to generate job destination' )
else:
log.debug( "Intentionally failing job with message (%s)" % failure_message )
job_wrapper.fail( failure_message )
return JOB_ERROR, job_destination
# job is ready to run, check limits
# TODO: these checks should be refactored to minimize duplication and made more modular/pluggable
state = self.__check_destination_jobs( job, job_wrapper )
if state == JOB_READY:
state = self.__check_user_jobs( job, job_wrapper )
if state == JOB_READY and self.app.config.enable_quotas:
quota = self.app.quota_agent.get_quota( job.user )
if quota is not None:
try:
usage = self.app.quota_agent.get_usage( user=job.user, history=job.history )
if usage > quota:
return JOB_USER_OVER_QUOTA, job_destination
except AssertionError as e:
pass # No history, should not happen with an anon user
return state, job_destination
def __verify_in_memory_job_inputs( self, job ):
""" Perform the same checks that happen via SQL for in-memory managed
jobs.
"""
if job.state == model.Job.states.DELETED:
return JOB_DELETED
elif job.state == model.Job.states.ERROR:
return JOB_ADMIN_DELETED
for dataset_assoc in job.input_datasets + job.input_library_datasets:
idata = dataset_assoc.dataset
if not idata:
continue
# don't run jobs for which the input dataset was deleted
if idata.deleted:
self.job_wrappers.pop(job.id, self.job_wrapper( job )).fail( "input data %s (file: %s) was deleted before the job started" % ( idata.hid, idata.file_name ) )
return JOB_INPUT_DELETED
# an error in the input data causes us to bail immediately
elif idata.state == idata.states.ERROR:
self.job_wrappers.pop(job.id, self.job_wrapper( job )).fail( "input data %s is in error state" % ( idata.hid ) )
return JOB_INPUT_ERROR
elif idata.state == idata.states.FAILED_METADATA:
self.job_wrappers.pop(job.id, self.job_wrapper( job )).fail( "input data %s failed to properly set metadata" % ( idata.hid ) )
return JOB_INPUT_ERROR
elif idata.state != idata.states.OK and not ( idata.state == idata.states.SETTING_METADATA and job.tool_id is not None and job.tool_id == self.app.datatypes_registry.set_external_metadata_tool.id ):
# need to requeue
return JOB_WAIT
# All inputs ready to go.
return None
def __clear_job_count( self ):
self.user_job_count = None
self.user_job_count_per_destination = None
self.total_job_count_per_destination = None
def get_user_job_count(self, user_id):
self.__cache_user_job_count()
# This could have been incremented by a previous job dispatched on this iteration, even if we're not caching
rval = self.user_job_count.get(user_id, 0)
if not self.app.config.cache_user_job_count:
result = self.sa_session.execute(select([func.count(model.Job.table.c.id)])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED,
model.Job.states.RUNNING,
model.Job.states.RESUBMITTED)),
(model.Job.table.c.user_id == user_id))))
for row in result:
# there should only be one row
rval += row[0]
return rval
def __cache_user_job_count( self ):
# Cache the job count if necessary
if self.user_job_count is None and self.app.config.cache_user_job_count:
self.user_job_count = {}
query = self.sa_session.execute(select([model.Job.table.c.user_id, func.count(model.Job.table.c.user_id)])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED,
model.Job.states.RUNNING,
model.Job.states.RESUBMITTED)),
(model.Job.table.c.user_id != null())))
.group_by(model.Job.table.c.user_id))
for row in query:
self.user_job_count[row[0]] = row[1]
elif self.user_job_count is None:
self.user_job_count = {}
def get_user_job_count_per_destination(self, user_id):
self.__cache_user_job_count_per_destination()
cached = self.user_job_count_per_destination.get(user_id, {})
if self.app.config.cache_user_job_count:
rval = cached
else:
# The cached count is still used even when we're not caching, it is
# incremented when a job is run by this handler to ensure that
# multiple jobs can't get past the limits in one iteration of the
# queue.
rval = {}
rval.update(cached)
result = self.sa_session.execute(select([model.Job.table.c.destination_id, func.count(model.Job.table.c.destination_id).label('job_count')])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING)), (model.Job.table.c.user_id == user_id)))
.group_by(model.Job.table.c.destination_id))
for row in result:
# Add the count from the database to the cached count
rval[row['destination_id']] = rval.get(row['destination_id'], 0) + row['job_count']
return rval
def __cache_user_job_count_per_destination(self):
# Cache the job count if necessary
if self.user_job_count_per_destination is None and self.app.config.cache_user_job_count:
self.user_job_count_per_destination = {}
result = self.sa_session.execute(select([model.Job.table.c.user_id, model.Job.table.c.destination_id, func.count(model.Job.table.c.user_id).label('job_count')])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING))))
.group_by(model.Job.table.c.user_id, model.Job.table.c.destination_id))
for row in result:
if row['user_id'] not in self.user_job_count_per_destination:
self.user_job_count_per_destination[row['user_id']] = {}
self.user_job_count_per_destination[row['user_id']][row['destination_id']] = row['job_count']
elif self.user_job_count_per_destination is None:
self.user_job_count_per_destination = {}
def increase_running_job_count(self, user_id, destination_id):
if self.app.job_config.limits.registered_user_concurrent_jobs or \
self.app.job_config.limits.anonymous_user_concurrent_jobs or \
self.app.job_config.limits.destination_user_concurrent_jobs:
if self.user_job_count is None:
self.user_job_count = {}
if self.user_job_count_per_destination is None:
self.user_job_count_per_destination = {}
self.user_job_count[user_id] = self.user_job_count.get(user_id, 0) + 1
if user_id not in self.user_job_count_per_destination:
self.user_job_count_per_destination[user_id] = {}
self.user_job_count_per_destination[user_id][destination_id] = self.user_job_count_per_destination[user_id].get(destination_id, 0) + 1
if self.app.job_config.limits.destination_total_concurrent_jobs:
if self.total_job_count_per_destination is None:
self.total_job_count_per_destination = {}
self.total_job_count_per_destination[destination_id] = self.total_job_count_per_destination.get(destination_id, 0) + 1
def __check_user_jobs( self, job, job_wrapper ):
# TODO: Update output datasets' _state = LIMITED or some such new
# state, so the UI can reflect what jobs are waiting due to concurrency
# limits
if job.user:
# Check the hard limit first
if self.app.job_config.limits.registered_user_concurrent_jobs:
count = self.get_user_job_count(job.user_id)
# Check the user's number of dispatched jobs against the overall limit
if count >= self.app.job_config.limits.registered_user_concurrent_jobs:
return JOB_WAIT
# If we pass the hard limit, also check the per-destination count
id = job_wrapper.job_destination.id
count_per_id = self.get_user_job_count_per_destination(job.user_id)
if id in self.app.job_config.limits.destination_user_concurrent_jobs:
count = count_per_id.get(id, 0)
# Check the user's number of dispatched jobs in the assigned destination id against the limit for that id
if count >= self.app.job_config.limits.destination_user_concurrent_jobs[id]:
return JOB_WAIT
# If we pass the destination limit (if there is one), also check limits on any tags (if any)
if job_wrapper.job_destination.tags:
for tag in job_wrapper.job_destination.tags:
# Check each tag for this job's destination
if tag in self.app.job_config.limits.destination_user_concurrent_jobs:
# Only if there's a limit defined for this tag
count = 0
for id in [ d.id for d in self.app.job_config.get_destinations(tag) ]:
# Add up the aggregate job total for this tag
count += count_per_id.get(id, 0)
if count >= self.app.job_config.limits.destination_user_concurrent_jobs[tag]:
return JOB_WAIT
elif job.galaxy_session:
# Anonymous users only get the hard limit
if self.app.job_config.limits.anonymous_user_concurrent_jobs:
count = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( and_( model.Job.session_id == job.galaxy_session.id,
or_( model.Job.state == model.Job.states.RUNNING,
model.Job.state == model.Job.states.QUEUED ) ) ).count()
if count >= self.app.job_config.limits.anonymous_user_concurrent_jobs:
return JOB_WAIT
else:
log.warning( 'Job %s is not associated with a user or session so job concurrency limit cannot be checked.' % job.id )
return JOB_READY
def __cache_total_job_count_per_destination( self ):
# Cache the job count if necessary
if self.total_job_count_per_destination is None:
self.total_job_count_per_destination = {}
result = self.sa_session.execute(select([model.Job.table.c.destination_id, func.count(model.Job.table.c.destination_id).label('job_count')])
.where(and_(model.Job.table.c.state.in_((model.Job.states.QUEUED, model.Job.states.RUNNING))))
.group_by(model.Job.table.c.destination_id))
for row in result:
self.total_job_count_per_destination[row['destination_id']] = row['job_count']
def get_total_job_count_per_destination(self):
self.__cache_total_job_count_per_destination()
# Always use caching (at worst a job will have to wait one iteration,
# and this would be more fair anyway as it ensures FIFO scheduling,
# insofar as FIFO would be fair...)
return self.total_job_count_per_destination
def __check_destination_jobs( self, job, job_wrapper ):
if self.app.job_config.limits.destination_total_concurrent_jobs:
id = job_wrapper.job_destination.id
count_per_id = self.get_total_job_count_per_destination()
if id in self.app.job_config.limits.destination_total_concurrent_jobs:
count = count_per_id.get(id, 0)
# Check the number of dispatched jobs in the assigned destination id against the limit for that id
if count >= self.app.job_config.limits.destination_total_concurrent_jobs[id]:
return JOB_WAIT
# If we pass the destination limit (if there is one), also check limits on any tags (if any)
if job_wrapper.job_destination.tags:
for tag in job_wrapper.job_destination.tags:
# Check each tag for this job's destination
if tag in self.app.job_config.limits.destination_total_concurrent_jobs:
# Only if there's a limit defined for this tag
count = 0
for id in [ d.id for d in self.app.job_config.get_destinations(tag) ]:
# Add up the aggregate job total for this tag
count += count_per_id.get(id, 0)
if count >= self.app.job_config.limits.destination_total_concurrent_jobs[tag]:
return JOB_WAIT
return JOB_READY
def put( self, job_id, tool_id ):
"""Add a job to the queue (by job identifier)"""
if not self.track_jobs_in_database:
self.queue.put( ( job_id, tool_id ) )
self.sleeper.wake()
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
if not self.app.config.track_jobs_in_database:
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job handler queue stopped" )
self.dispatcher.shutdown()
class JobHandlerStopQueue( object ):
"""
A queue for jobs which need to be terminated prematurely.
"""
STOP_SIGNAL = object()
def __init__( self, app, dispatcher ):
self.app = app
self.dispatcher = dispatcher
self.sa_session = app.model.context
# Keep track of the pid that started the job manager, only it
# has valid threads
self.parent_pid = os.getpid()
# Contains new jobs. Note this is not used if track_jobs_in_database is True
self.queue = Queue()
# Contains jobs that are waiting (only use from monitor thread)
self.waiting = []
# Helper for interruptable sleep
self.sleeper = Sleeper()
self.running = True
self.monitor_thread = threading.Thread( name="JobHandlerStopQueue.monitor_thread", target=self.monitor )
self.monitor_thread.setDaemon( True )
self.monitor_thread.start()
log.info( "job handler stop queue started" )
def monitor( self ):
"""
Continually iterate the waiting jobs, stop any that are found.
"""
# HACK: Delay until after forking, we need a way to do post fork notification!!!
time.sleep( 10 )
while self.running:
try:
self.monitor_step()
except:
log.exception( "Exception in monitor_step" )
# Sleep
self.sleeper.sleep( 1 )
def monitor_step( self ):
"""
Called repeatedly by `monitor` to stop jobs.
"""
# Pull all new jobs from the queue at once
jobs_to_check = []
if self.app.config.track_jobs_in_database:
# Clear the session so we get fresh states for job and all datasets
self.sa_session.expunge_all()
# Fetch all new jobs
newly_deleted_jobs = self.sa_session.query( model.Job ).enable_eagerloads( False ) \
.filter( ( model.Job.state == model.Job.states.DELETED_NEW ) &
( model.Job.handler == self.app.config.server_name ) ).all()
for job in newly_deleted_jobs:
jobs_to_check.append( ( job, job.stderr ) )
# Also pull from the queue (in the case of Administrative stopped jobs)
try:
while 1:
message = self.queue.get_nowait()
if message is self.STOP_SIGNAL:
return
# Unpack the message
job_id, error_msg = message
# Get the job object and append to watch queue
jobs_to_check.append( ( self.sa_session.query( model.Job ).get( job_id ), error_msg ) )
except Empty:
pass
for job, error_msg in jobs_to_check:
if ( job.state not in
( job.states.DELETED_NEW,
job.states.DELETED ) and
job.finished ):
# terminated before it got here
log.debug('Job %s already finished, not deleting or stopping', job.id)
continue
final_state = job.states.DELETED
if error_msg is not None:
final_state = job.states.ERROR
job.info = error_msg
job.set_final_state( final_state )
self.sa_session.add( job )
self.sa_session.flush()
if job.job_runner_name is not None:
# tell the dispatcher to stop the job
self.dispatcher.stop( job )
def put( self, job_id, error_msg=None ):
if not self.app.config.track_jobs_in_database:
self.queue.put( ( job_id, error_msg ) )
def shutdown( self ):
"""Attempts to gracefully shut down the worker thread"""
if self.parent_pid != os.getpid():
# We're not the real job queue, do nothing
return
else:
log.info( "sending stop signal to worker thread" )
self.running = False
if not self.app.config.track_jobs_in_database:
self.queue.put( self.STOP_SIGNAL )
self.sleeper.wake()
log.info( "job handler stop queue stopped" )
class DefaultJobDispatcher( object ):
def __init__( self, app ):
self.app = app
self.job_runners = self.app.job_config.get_job_runner_plugins( self.app.config.server_name )
# Once plugins are loaded, all job destinations that were created from
# URLs can have their URL params converted to the destination's param
# dict by the plugin.
self.app.job_config.convert_legacy_destinations(self.job_runners)
log.debug( "Loaded job runners plugins: " + ':'.join( self.job_runners.keys() ) )
def __get_runner_name( self, job_wrapper ):
if job_wrapper.can_split():
runner_name = "tasks"
else:
runner_name = job_wrapper.job_destination.runner
return runner_name
def url_to_destination( self, url ):
"""This is used by the runner mapper (a.k.a. dynamic runner) and
recovery methods to have runners convert URLs to destinations.
New-style runner plugin IDs must match the URL's scheme for this to work.
"""
runner_name = url.split(':', 1)[0]
try:
return self.job_runners[runner_name].url_to_destination(url)
except Exception as e:
log.exception("Unable to convert legacy job runner URL '%s' to job destination, destination will be the '%s' runner with no params: %s" % (url, runner_name, e))
return JobDestination(runner=runner_name)
def put( self, job_wrapper ):
runner_name = self.__get_runner_name( job_wrapper )
try:
if isinstance(job_wrapper, TaskWrapper):
# DBTODO Refactor
log.debug( "(%s) Dispatching task %s to %s runner" % ( job_wrapper.job_id, job_wrapper.task_id, runner_name ) )
else:
log.debug( "(%s) Dispatching to %s runner" % ( job_wrapper.job_id, runner_name ) )
self.job_runners[runner_name].put( job_wrapper )
except KeyError:
log.error( 'put(): (%s) Invalid job runner: %s' % ( job_wrapper.job_id, runner_name ) )
job_wrapper.fail( DEFAULT_JOB_PUT_FAILURE_MESSAGE )
def stop( self, job ):
"""
Stop the given job. The input variable job may be either a Job or a Task.
"""
# The Job and Task classes have been modified so that their accessors
# will return the appropriate value.
# Note that Jobs and Tasks have runner_names, which are distinct from
# the job_runner_name and task_runner_name.
if ( isinstance( job, model.Job ) ):
log.debug( "Stopping job %d:", job.get_id() )
elif( isinstance( job, model.Task ) ):
log.debug( "Stopping job %d, task %d"
% ( job.get_job().get_id(), job.get_id() ) )
else:
log.debug( "Unknown job to stop" )
# The runner name is not set until the job has started.
# If we're stopping a task, then the runner_name may be
# None, in which case it hasn't been scheduled.
if ( job.get_job_runner_name() is not None ):
runner_name = ( job.get_job_runner_name().split( ":", 1 ) )[ 0 ]
if ( isinstance( job, model.Job ) ):
log.debug( "stopping job %d in %s runner" % ( job.get_id(), runner_name ) )
elif ( isinstance( job, model.Task ) ):
log.debug( "Stopping job %d, task %d in %s runner"
% ( job.get_job().get_id(), job.get_id(), runner_name ) )
try:
self.job_runners[runner_name].stop_job( job )
except KeyError:
log.error( 'stop(): (%s) Invalid job runner: %s' % ( job.get_id(), runner_name ) )
# Job and output dataset states have already been updated, so nothing is done here.
def recover( self, job, job_wrapper ):
runner_name = ( job.job_runner_name.split(":", 1) )[0]
log.debug( "recovering job %d in %s runner" % ( job.get_id(), runner_name ) )
try:
self.job_runners[runner_name].recover( job, job_wrapper )
except KeyError:
log.error( 'recover(): (%s) Invalid job runner: %s' % ( job_wrapper.job_id, runner_name ) )
job_wrapper.fail( DEFAULT_JOB_PUT_FAILURE_MESSAGE )
def shutdown( self ):
for runner in self.job_runners.itervalues():
runner.shutdown()
|
py | b408e13a5b28b5264fda4cb870c623d4ae68bb8e | #!/usr/bin/python
from __future__ import print_function
import cProfile
import copy
import sys
import gc
import datetime
import logging
logging.basicConfig(level=logging.INFO)
# own modules
from datalogger import DataLoggerWeb as DataLoggerWeb
#from commons import *
def main(project, tablename, datestring, datalogger):
#caches = datalogger.get_caches(datestring)
caches = datalogger.get_caches(project, tablename, datestring)
suffix = "%s/%s/%s\t" % (datestring, project, tablename)
if caches["tsa"]["raw"] is None:
print(suffix, "Nothing could be done without RAW data")
else:
#print("RAW filename : %s" % caches["tsa"]["raw"])
if len(caches["tsa"]["keys"]) == 0:
print(suffix, "TSA Archive missing, calling get_tsa and get_tsastats")
#datalogger.get_tsa(project, tablename, datestring)
datalogger.get_tsastats(project, tablename, datestring)
else:
#print("TSA filename : %s" % caches["tsa"]["keys"])
if len(caches["tsastat"]["keys"]) == 0:
print(suffix, "TSASTAT Archive missing, calling get_tsastats")
datalogger.get_tsastats(project, tablename, datestring)
else:
#print("TSASTAT filename : %s" % caches["tsastat"]["keys"])
if len(caches["ts"]["keys"]) == 0:
print(suffix, "there are no ts archives, something went wrong, or tsa is completely empty, calling get_tsastats")
datalogger.get_tsastats(project, tablename, datestring)
else:
#print("TS filename : %s" % len(caches["ts"]["keys"]))
#print("TSSTAT filename : %s" % len(caches["tsstat"]["keys"]))
print(suffix, "All fine")
if __name__ == "__main__":
datalogger = DataLoggerWeb()
#for datestring in DataLogger.datewalker("2015-09-01", datalogger.get_last_business_day_datestring()):
yesterday_datestring = (datetime.date.today() - datetime.timedelta(1)).isoformat()
two_weeks_ago_daetstring = (datetime.date.today() - datetime.timedelta(28)).isoformat()
for datestring in datalogger.get_datewalk(two_weeks_ago_daetstring, yesterday_datestring):
for project in datalogger.get_projects():
for tablename in datalogger.get_tablenames(project):
#datalogger = DataLogger(BASEDIR, project, tablename)
main(project, tablename, datestring, datalogger)
#cProfile.run("main()")
|
py | b408e20667c700d1f8eb23b073131a44327d9ad9 | """
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional, TypeVar, Union, overload, TYPE_CHECKING
from .permissions import Permissions
from .errors import InvalidArgument
from .colour import Colour
from .mixins import Hashable
from .utils import snowflake_time, _get_as_snowflake, MISSING
__all__ = (
"RoleTags",
"Role",
)
if TYPE_CHECKING:
import datetime
from .types.role import (
Role as RolePayload,
RoleTags as RoleTagPayload,
)
from .types.guild import RolePositionUpdate
from .guild import Guild
from .member import Member
from .state import ConnectionState
class RoleTags:
"""Represents tags on a role.
A role tag is a piece of extra information attached to a managed role
that gives it context for the reason the role is managed.
While this can be accessed, a useful interface is also provided in the
:class:`Role` and :class:`Guild` classes as well.
.. versionadded:: 1.6
Attributes
------------
bot_id: Optional[:class:`int`]
The bot's user ID that manages this role.
integration_id: Optional[:class:`int`]
The integration ID that manages the role.
"""
__slots__ = (
"bot_id",
"integration_id",
"_premium_subscriber",
)
def __init__(self, data: RoleTagPayload):
self.bot_id: Optional[int] = _get_as_snowflake(data, "bot_id")
self.integration_id: Optional[int] = _get_as_snowflake(data, "integration_id")
# NOTE: The API returns "null" for this if it's valid, which corresponds to None.
# This is different from other fields where "null" means "not there".
# So in this case, a value of None is the same as True.
# Which means we would need a different sentinel.
self._premium_subscriber: Optional[Any] = data.get("premium_subscriber", MISSING)
def is_bot_managed(self) -> bool:
""":class:`bool`: Whether the role is associated with a bot."""
return self.bot_id is not None
def is_premium_subscriber(self) -> bool:
""":class:`bool`: Whether the role is the premium subscriber, AKA "boost", role for the guild."""
return self._premium_subscriber is None
def is_integration(self) -> bool:
""":class:`bool`: Whether the role is managed by an integration."""
return self.integration_id is not None
def __repr__(self) -> str:
return (
f"<RoleTags bot_id={self.bot_id} integration_id={self.integration_id} "
f"premium_subscriber={self.is_premium_subscriber()}>"
)
R = TypeVar("R", bound="Role")
class Role(Hashable):
"""Represents a Discord role in a :class:`Guild`.
.. container:: operations
.. describe:: x == y
Checks if two roles are equal.
.. describe:: x != y
Checks if two roles are not equal.
.. describe:: x > y
Checks if a role is higher than another in the hierarchy.
.. describe:: x < y
Checks if a role is lower than another in the hierarchy.
.. describe:: x >= y
Checks if a role is higher or equal to another in the hierarchy.
.. describe:: x <= y
Checks if a role is lower or equal to another in the hierarchy.
.. describe:: hash(x)
Return the role's hash.
.. describe:: str(x)
Returns the role's name.
.. describe:: str(x)
Returns the role's ID.
.. describe:: int(x)
Returns the role's ID.
Attributes
----------
id: :class:`int`
The ID for the role.
name: :class:`str`
The name of the role.
guild: :class:`Guild`
The guild the role belongs to.
hoist: :class:`bool`
Indicates if the role will be displayed separately from other members.
position: :class:`int`
The position of the role. This number is usually positive. The bottom
role has a position of 0.
.. warning::
Multiple roles can have the same position number. As a consequence
of this, comparing via role position is prone to subtle bugs if
checking for role hierarchy. The recommended and correct way to
compare for roles in the hierarchy is using the comparison
operators on the role objects themselves.
managed: :class:`bool`
Indicates if the role is managed by the guild through some form of
integrations such as Twitch.
mentionable: :class:`bool`
Indicates if the role can be mentioned by users.
tags: Optional[:class:`RoleTags`]
The role tags associated with this role.
"""
__slots__ = (
"id",
"name",
"_permissions",
"_colour",
"position",
"managed",
"mentionable",
"hoist",
"guild",
"tags",
"_state",
)
def __init__(self, *, guild: Guild, state: ConnectionState, data: RolePayload):
self.guild: Guild = guild
self._state: ConnectionState = state
self.id: int = int(data["id"])
self._update(data)
def __str__(self) -> str:
return self.name
def __int__(self) -> int:
return self.id
def __repr__(self) -> str:
return f"<Role id={self.id} name={self.name!r}>"
def __lt__(self: R, other: R) -> bool:
if not isinstance(other, Role) or not isinstance(self, Role):
return NotImplemented
if self.guild != other.guild:
raise RuntimeError("cannot compare roles from two different guilds.")
# the @everyone role is always the lowest role in hierarchy
guild_id = self.guild.id
if self.id == guild_id:
# everyone_role < everyone_role -> False
return other.id != guild_id
if self.position < other.position:
return True
if self.position == other.position:
return int(self.id) > int(other.id)
return False
def __le__(self: R, other: R) -> bool:
r = Role.__lt__(other, self)
if r is NotImplemented:
return NotImplemented
return not r
def __gt__(self: R, other: R) -> bool:
return Role.__lt__(other, self)
def __ge__(self: R, other: R) -> bool:
r = Role.__lt__(self, other)
if r is NotImplemented:
return NotImplemented
return not r
def _update(self, data: RolePayload):
self.name: str = data["name"]
self._permissions: int = int(data.get("permissions", 0))
self.position: int = data.get("position", 0)
self._colour: int = data.get("color", 0)
self.hoist: bool = data.get("hoist", False)
self.managed: bool = data.get("managed", False)
self.mentionable: bool = data.get("mentionable", False)
self.tags: Optional[RoleTags]
try:
self.tags = RoleTags(data["tags"])
except KeyError:
self.tags = None
def is_default(self) -> bool:
""":class:`bool`: Checks if the role is the default role."""
return self.guild.id == self.id
def is_bot_managed(self) -> bool:
""":class:`bool`: Whether the role is associated with a bot.
.. versionadded:: 1.6
"""
return self.tags is not None and self.tags.is_bot_managed()
def is_premium_subscriber(self) -> bool:
""":class:`bool`: Whether the role is the premium subscriber, AKA "boost", role for the guild.
.. versionadded:: 1.6
"""
return self.tags is not None and self.tags.is_premium_subscriber()
def is_integration(self) -> bool:
""":class:`bool`: Whether the role is managed by an integration.
.. versionadded:: 1.6
"""
return self.tags is not None and self.tags.is_integration()
def is_assignable(self) -> bool:
""":class:`bool`: Whether the role is able to be assigned or removed by the bot.
.. versionadded:: 2.0
"""
me = self.guild.me
return not self.is_default() and not self.managed and (me.top_role > self or me.id == self.guild.owner_id)
@property
def permissions(self) -> Permissions:
""":class:`Permissions`: Returns the role's permissions."""
return Permissions(self._permissions)
@property
def colour(self) -> Colour:
""":class:`Colour`: Returns the role colour. An alias exists under ``color``."""
return Colour(self._colour)
@property
def color(self) -> Colour:
""":class:`Colour`: Returns the role color. An alias exists under ``colour``."""
return self.colour
@property
def created_at(self) -> datetime.datetime:
""":class:`datetime.datetime`: Returns the role's creation time in UTC."""
return snowflake_time(self.id)
@property
def mention(self) -> str:
""":class:`str`: Returns a string that allows you to mention a role."""
return f"<@&{self.id}>"
@property
def members(self) -> List[Member]:
"""List[:class:`Member`]: Returns all the members with this role."""
all_members = self.guild.members
if self.is_default():
return all_members
role_id = self.id
return [member for member in all_members if member._roles.has(role_id)]
async def _move(self, position: int, reason: Optional[str]) -> None:
if position <= 0:
raise InvalidArgument("Cannot move role to position 0 or below")
if self.is_default():
raise InvalidArgument("Cannot move default role")
if self.position == position:
return # Save discord the extra request.
http = self._state.http
change_range = range(min(self.position, position), max(self.position, position) + 1)
roles = [r.id for r in self.guild.roles[1:] if r.position in change_range and r.id != self.id]
if self.position > position:
roles.insert(0, self.id)
else:
roles.append(self.id)
payload: List[RolePositionUpdate] = [{"id": z[0], "position": z[1]} for z in zip(roles, change_range)]
await http.move_role_position(self.guild.id, payload, reason=reason)
async def edit(
self,
*,
name: str = MISSING,
permissions: Permissions = MISSING,
colour: Union[Colour, int] = MISSING,
color: Union[Colour, int] = MISSING,
hoist: bool = MISSING,
mentionable: bool = MISSING,
position: int = MISSING,
reason: Optional[str] = MISSING,
) -> Optional[Role]:
"""|coro|
Edits the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
All fields are optional.
.. versionchanged:: 1.4
Can now pass ``int`` to ``colour`` keyword-only parameter.
.. versionchanged:: 2.0
Edits are no longer in-place, the newly edited role is returned instead.
Parameters
-----------
name: :class:`str`
The new role name to change to.
permissions: :class:`Permissions`
The new permissions to change to.
colour: Union[:class:`Colour`, :class:`int`]
The new colour to change to. (aliased to color as well)
hoist: :class:`bool`
Indicates if the role should be shown separately in the member list.
mentionable: :class:`bool`
Indicates if the role should be mentionable by others.
position: :class:`int`
The new role's position. This must be below your top role's
position or it will fail.
reason: Optional[:class:`str`]
The reason for editing this role. Shows up on the audit log.
Raises
-------
Forbidden
You do not have permissions to change the role.
HTTPException
Editing the role failed.
InvalidArgument
An invalid position was given or the default
role was asked to be moved.
Returns
--------
:class:`Role`
The newly edited role.
"""
if position is not MISSING:
await self._move(position, reason=reason)
payload: Dict[str, Any] = {}
if color is not MISSING:
colour = color
if colour is not MISSING:
if isinstance(colour, int):
payload["color"] = colour
else:
payload["color"] = colour.value
if name is not MISSING:
payload["name"] = name
if permissions is not MISSING:
payload["permissions"] = permissions.value
if hoist is not MISSING:
payload["hoist"] = hoist
if mentionable is not MISSING:
payload["mentionable"] = mentionable
data = await self._state.http.edit_role(self.guild.id, self.id, reason=reason, **payload)
return Role(guild=self.guild, data=data, state=self._state)
async def delete(self, *, reason: Optional[str] = None) -> None:
"""|coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
await self._state.http.delete_role(self.guild.id, self.id, reason=reason)
|
py | b408e217a0e36e05a5668650cd1993550894fe4a | """
test_get_references.py -- Given a URI, get the references for the URI
Version 0.1 MC 2013-12-27
-- Initial version.
Version 0.2 MC 2014-09-18
-- Update for PEP 8 and Tools 2
"""
__author__ = "Michael Conlon"
__copyright__ = "Copyright 2014, University of Florida"
__license__ = "BSD 3-Clause license"
__version__ = "0.2"
from vivofoundation import get_references
from datetime import datetime
# Test cases for access and display functions
print datetime.now(), "Start"
print "\nDateTime"
print get_references("http://vivo.ufl.edu/individual/n7860108656")
print "\nDateTimeInterval"
print get_references("http://vivo.ufl.edu/individual/n182882417")
print "\nOrganization"
print get_references("http://vivo.ufl.edu/individual/n8763427")
print "\nAuthorship"
print get_references("http://vivo.ufl.edu/individual/n148010391")
print "\nRole"
print get_references("http://vivo.ufl.edu/individual/n1864549239")
print "\nPerson"
print get_references("http://vivo.ufl.edu/individual/n39051")
print "\nNot Found"
print get_references("http://vivo.ufl.edu/notfound")
print "\nPublication Venue"
print get_references("http://vivo.ufl.edu/individual/n378789540")
print "\nPaper"
print get_references("http://vivo.ufl.edu/individual/n4703866415")
print "\nGrant"
print get_references("http://vivo.ufl.edu/individual/n614029206")
print datetime.now(), "Finish"
|
py | b408e25972c9fe6d7a7263bf0b44d4ac6738a5f9 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class SourceRegistryCredentials(Model):
"""Describes the credential parameters for accessing the source registry.
:param login_mode: The authentication mode which determines the source
registry login scope. The credentials for the source registry
will be generated using the given scope. These credentials will be used to
login to
the source registry during the run. Possible values include: 'None',
'Default'
:type login_mode: str or
~azure.mgmt.containerregistry.v2019_05_01.models.SourceRegistryLoginMode
"""
_attribute_map = {
'login_mode': {'key': 'loginMode', 'type': 'str'},
}
def __init__(self, *, login_mode=None, **kwargs) -> None:
super(SourceRegistryCredentials, self).__init__(**kwargs)
self.login_mode = login_mode
|
py | b408e3830fb5025c67312bc7bc301f249b443098 | # Copyright 2020 Zadara Storage, Inc.
# Originally authored by Jeremy Brown - https://github.com/jwbrown77
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zadarapy.validators import verify_start_limit, verify_load_balancer_name
def get_load_balancer_groups(session, start=None, limit=None, return_type=None,
**kwargs):
"""
Get a list of Load Balancer Groups.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type start: int
:param start: The offset to start displaying accounts from. Optional.
:type: limit: int
:param limit: The maximum number of accounts to return. Optional.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
parameters = verify_start_limit(start=start, limit=limit)
path = "/api/zios/load_balancer_groups.json"
return session.get_api(path=path, parameters=parameters,
return_type=return_type, **kwargs)
def get_load_balancer_group(session, name, return_type=None, **kwargs):
"""
Get a single Load Balancer Group.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type name: str
:param name: load balancer name (id). Required.
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_load_balancer_name(name=name)
path = f"/api/zios/load_balancer_groups/{name}.json"
return session.get_api(path=path, return_type=return_type, **kwargs)
def get_iop_metering_load_balancer_group(session, name, service, interval=None, count=None, return_type=None, **kwargs):
"""
Shows IOPs metering of a load balancer group.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type name: str
:param name: load balancer name (id). Required.
:type service: str
:param service: Proxy. Required.
:type interval: int
:type count: int
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_load_balancer_name(name=name)
body_values = {"service": service}
if interval is not None:
body_values["interval"] = interval
if count is not None:
body_values["count"] = count
path = f"/api/zios/load_balancer_groups/{name}/iops.json?service=proxy"
return session.get_api(path=path, body_values=body_values, return_type=return_type, **kwargs)
def get_latency_metering_load_balancer_group(session, name, service,
interval=None, count=None, return_type=None, **kwargs):
"""
Shows latency metering of a load balancer group.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type name: str
:param name: load balancer name (id). Required.
:type service: str
:param service: Proxy. Required.
:type interval: int
:type count: int
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_load_balancer_name(name=name)
body_values = {"service": service}
if interval is not None:
body_values["interval"] = interval
if count is not None:
body_values["count"] = count
path = f"/api/zios/load_balancer_groups/{name}/latency.json?service=proxy"
return session.get_api(path=path, body_values=body_values, return_type=return_type, **kwargs)
def get_throughput_metering_load_balancer_group(session, name, service,
interval=None, count=None, return_type=None, **kwargs):
"""
Shows throughput metering of a load balancer group.
:type session: zadarapy.session.Session
:param session: A valid zadarapy.session.Session object. Required.
:type name: str
:param name: load balancer name (id). Required.
:type service: str
:param service: Proxy. Required.
:type interval: int
:type count: int
:type return_type: str
:param return_type: If this is set to the string 'json', this function
will return a JSON string. Otherwise, it will return a Python
dictionary. Optional (will return a Python dictionary by default).
:rtype: dict, str
:returns: A dictionary or JSON data set as a string depending on
return_type parameter.
"""
verify_load_balancer_name(name=name)
body_values = {"service": service}
if interval is not None:
body_values["interval"] = interval
if count is not None:
body_values["count"] = count
path = f"/api/zios/load_balancer_groups/{name}/throughput.json?service=proxy"
return session.get_api(path=path, body_values=body_values, return_type=return_type, **kwargs)
|
py | b408e42542cfa31bea5d240f294151b433a7b4cb | from __future__ import print_function
import argparse
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import os
import math
import data_loader
import ResNet as models
from torch.utils import model_zoo
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# Training settings
batch_size = 32
epochs = 200
lr = 0.01
momentum = 0.9
no_cuda =False
seed = 8
log_interval = 10
l2_decay = 5e-4
root_path = "./dataset/"
source_name = "amazon"
target_name = "webcam"
cuda = not no_cuda and torch.cuda.is_available()
torch.manual_seed(seed)
if cuda:
torch.cuda.manual_seed(seed)
kwargs = {'num_workers': 1, 'pin_memory': True} if cuda else {}
source_loader = data_loader.load_training(root_path, source_name, batch_size, kwargs)
target_train_loader = data_loader.load_training(root_path, target_name, batch_size, kwargs)
target_test_loader = data_loader.load_testing(root_path, target_name, batch_size, kwargs)
len_source_dataset = len(source_loader.dataset)
len_target_dataset = len(target_test_loader.dataset)
len_source_loader = len(source_loader)
len_target_loader = len(target_train_loader)
def load_pretrain(model):
url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'
pretrained_dict = model_zoo.load_url(url)
model_dict = model.state_dict()
for k, v in model_dict.items():
if not "cls_fc" in k and not "domain_fc" in k:
model_dict[k] = pretrained_dict[k[k.find(".") + 1:]]
model.load_state_dict(model_dict)
return model
def train(epoch, model):
#最后的全连接层学习率为前面的10倍
LEARNING_RATE = lr / math.pow((1 + 10 * (epoch - 1) / epochs), 0.75)
print("learning rate:", LEARNING_RATE)
optimizer_fea = torch.optim.SGD([
{'params': model.sharedNet.parameters()},
{'params': model.cls_fc.parameters(), 'lr': LEARNING_RATE},
], lr=LEARNING_RATE / 10, momentum=momentum, weight_decay=l2_decay)
optimizer_critic = torch.optim.SGD([
{'params': model.domain_fc.parameters(), 'lr': LEARNING_RATE}
], lr=LEARNING_RATE, momentum=momentum, weight_decay=l2_decay)
data_source_iter = iter(source_loader)
data_target_iter = iter(target_train_loader)
dlabel_src = Variable(torch.ones(batch_size).long().cuda())
dlabel_tgt = Variable(torch.zeros(batch_size).long().cuda())
i = 1
while i <= len_source_loader:
model.train()
source_data, source_label = data_source_iter.next()
if cuda:
source_data, source_label = source_data.cuda(), source_label.cuda()
source_data, source_label = Variable(source_data), Variable(source_label)
clabel_src, dlabel_pred_src = model(source_data)
label_loss = F.nll_loss(F.log_softmax(clabel_src, dim=1), source_label)
critic_loss_src = F.nll_loss(F.log_softmax(dlabel_pred_src, dim=1), dlabel_src)
confusion_loss_src = 0.5 * ( F.nll_loss(F.log_softmax(dlabel_pred_src, dim=1), dlabel_src) + F.nll_loss(F.log_softmax(dlabel_pred_src, dim=1), dlabel_tgt) )
target_data, target_label = data_target_iter.next()
if i % len_target_loader == 0:
data_target_iter = iter(target_train_loader)
if cuda:
target_data, target_label = target_data.cuda(), target_label.cuda()
target_data = Variable(target_data)
clabel_tgt, dlabel_pred_tgt = model(target_data)
critic_loss_tgt = F.nll_loss(F.log_softmax(dlabel_pred_tgt, dim=1), dlabel_tgt)
confusion_loss_tgt = 0.5 * (F.nll_loss(F.log_softmax(dlabel_pred_tgt, dim=1), dlabel_src) + F.nll_loss(
F.log_softmax(dlabel_pred_tgt, dim=1), dlabel_tgt))
confusion_loss_total = (confusion_loss_src + confusion_loss_tgt) / 2
fea_loss_total = confusion_loss_total + label_loss
critic_loss_total = (critic_loss_src + critic_loss_tgt) / 2
optimizer_fea.zero_grad()
fea_loss_total.backward(retain_graph=True)
optimizer_fea.step()
optimizer_fea.zero_grad()
optimizer_critic.zero_grad()
critic_loss_total.backward()
optimizer_critic.step()
if i % log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tconfusion_Loss: {:.6f}\tlabel_Loss: {:.6f}\tdomain_Loss: {:.6f}'.format(
epoch, i * len(source_data),len_source_dataset,
100. * i / len_source_loader, confusion_loss_total.data[0], label_loss.data[0], critic_loss_total.data[0]))
i = i + 1
def test(model):
model.eval()
test_loss = 0
correct = 0
for data, target in target_test_loader:
if cuda:
data, target = data.cuda(), target.cuda()
data, target = Variable(data, volatile=True), Variable(target)
s_output, t_output = model(data)
test_loss += F.nll_loss(F.log_softmax(s_output, dim = 1), target, size_average=False).data[0] # sum up batch loss
pred = s_output.data.max(1)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
test_loss /= len_target_dataset
print('\n{} set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
target_name, test_loss, correct, len_target_dataset,
100. * correct / len_target_dataset))
return correct
if __name__ == '__main__':
model = models.RevGrad(num_classes=31)
correct = 0
print(model)
if cuda:
model.cuda()
model = load_pretrain(model)
for epoch in range(1, epochs + 1):
train(epoch, model)
t_correct = test(model)
if t_correct > correct:
correct = t_correct
print('source: {} to target: {} max correct: {} max accuracy{: .2f}%\n'.format(
source_name, target_name, correct, 100. * correct / len_target_dataset )) |
py | b408e474b03c394cea4a60b853f2efa8ebb4c850 | import glob
import os
import numpy as np
import pandas as pd
from utils.data_and_nn_loader import ROOT
from utils.logger import logger
def make_output_folders(nn, out_dataset):
os.makedirs(
"{}/results/scores/{}/{}".format(ROOT, nn, out_dataset),
exist_ok=True,
)
os.makedirs(
"{}/results/figures/{}/{}".format(ROOT, nn, out_dataset),
exist_ok=True,
)
os.makedirs(
"{}/results/metrics/{}/{}".format(ROOT, nn, out_dataset),
exist_ok=True,
)
def make_image_dataset_folder(dataset_name):
os.makedirs("{}/datasets/{}".format(ROOT, dataset_name), exist_ok=True)
def make_tensor_folder(nn_name, dataset_name):
os.makedirs(
"{}/tensors/{}/{}".format(ROOT, nn_name, dataset_name),
exist_ok=True,
)
def make_metric_folder(nn, out_dataset):
os.makedirs(
"{}/results/metrics/{}/{}".format(ROOT, nn, out_dataset),
exist_ok=True,
)
def make_score_file(nn, out_dataset, filename):
make_output_folders(nn, out_dataset)
return open(
"{}/results/scores/{}/{}/{}".format(ROOT, nn, out_dataset, filename),
"w",
)
def write_score_file(f, data):
np.savetxt(f, data, delimiter=",")
f.close()
def load_score_file(nn, dataset_name, filename):
path = "{}/results/scores/{}/{}/{}".format(ROOT, nn, dataset_name, filename)
logger.info("loading scores from {}".format(path))
return np.loadtxt(path, delimiter=",")
def find_score_file(nn, dataset_name, query):
logger.info("searching for file {}/{}/{}".format(nn, dataset_name, query))
prefix = "{}/results/scores/{}/{}/".format(ROOT, nn, dataset_name)
path = glob.glob(prefix + query)
if len(path) > 0:
return path[0].split("/")[-1]
logger.warn("file not found")
return
def check_existence_score_file(nn_name, dataset_name, filename):
path = glob.glob(
"{}/results/scores/{}/{}/{}".format(ROOT, nn_name, dataset_name, filename)
)
return len(path) > 0
def make_evaluation_metrics_file(nn, out_dataset, filename):
make_metric_folder(nn, out_dataset)
return open(
"{}/results/metrics/{}/{}/{}".format(ROOT, nn, out_dataset, filename),
"w",
)
def write_evaluation_metrics_file(f, header, content):
for item in header:
f.write("%s\n" % item)
for item in content:
f.write("%s\n" % item)
return f
def clean_title(title):
return "_".join(title.lower().split(" ")) + ".txt"
def append_results_to_file(
nn_name,
out_dataset_name,
method_name,
eps,
temperature,
fpr_at_tpr_in,
fpr_at_tpr_out,
detection,
auroc,
aupr_in,
aupr_out,
filename="results",
):
results = pd.DataFrame.from_dict(
{
"nn": [nn_name],
"out_dataset": [out_dataset_name],
"method": [method_name],
"eps": [eps],
"T": [temperature],
"fpr_at_tpr95_in": [fpr_at_tpr_in],
"fpr_at_tpr95_out": [fpr_at_tpr_out],
"detection": [detection],
"auroc": [auroc],
"aupr_in": [aupr_in],
"aupr_out": [aupr_out],
}
)
filename = "{}/results/{}.csv".format(ROOT, filename)
if not os.path.isfile(filename):
results.to_csv(filename, header=True, index=False)
else: # else it exists so append without writing the header
results.to_csv(filename, mode="a", header=False, index=False)
def remove_duplicates(filename):
filename = "{}/results/{}.csv".format(ROOT, filename)
df = pd.read_csv(filename)
logger.info("df has length {}".format(len(df)))
df.drop_duplicates(
subset=["nn", "out_dataset", "method", "eps", "T"], keep="last", inplace=True
)
df.to_csv(filename, index=False, header=True)
logger.info("length reduced to {}".format(len(df)))
|
py | b408e6e80450ef309bfcde0d06752d8f48d8f57e | #!/usr/bin/env python
# Copyright 2015 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import difflib
import sys
def main(argv):
if len(argv) != 3:
print('%s: invalid arguments' % argv[0])
return 2
filename1 = argv[1]
filename2 = argv[2]
try:
with open(filename1, "r") as f1:
str1 = f1.readlines()
with open(filename2, "r") as f2:
str2 = f2.readlines()
diffs = difflib.unified_diff(
str1, str2, fromfile=filename1, tofile=filename2)
except Exception as e:
print("something went astray: %s" % e)
return 1
status_code = 0
for diff in diffs:
sys.stdout.write(diff)
status_code = 1
return status_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
py | b408e7d3b64aca75ce45bf03e34a2762fad04e5d | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 7
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_2_0
from isi_sdk_8_2_0.models.service_policies_extended import ServicePoliciesExtended # noqa: E501
from isi_sdk_8_2_0.rest import ApiException
class TestServicePoliciesExtended(unittest.TestCase):
"""ServicePoliciesExtended unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testServicePoliciesExtended(self):
"""Test ServicePoliciesExtended"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_2_0.models.service_policies_extended.ServicePoliciesExtended() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | b408e8070146e43b51c220be2353df7e7077b1f2 | from sklearn import metrics as skmetrics
import numpy as np
from ._metric import Metric
class ClassificationMetric(Metric):
"""
Classification Metric
Computes an evalution of data based on a specific classification metric
"""
def __init__(self, name: str):
"""
Generates the ClassificationMetric
Parameters
---------
name : str
the name of the metric
"""
self.name = name
def __call__(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Gets the evaluation score of the y_true and y_pred values
Parameters
----------
y_true : np.ndarray
the actual values
y_pred : np.ndarray
the predicted values
"""
return self._compute(y_true, y_pred)
def _compute(self, y_true: np.ndarray, y_pred: np.ndarray) -> float:
"""
Computes the evalution score
Parameters
----------
y_true : np.ndarray
the actual values
y_pred : np.ndarray
the predicted values
"""
if self.name == 'accuracy':
return skmetrics.accuracy_score(y_true=y_true, y_pred=y_pred)
elif self.name == 'f1':
return skmetrics.f1_score(y_true=y_true, y_pred=y_pred)
elif self.name == 'precision':
return skmetrics.precision_score(y_true=y_true, y_pred=y_pred)
elif self.name == 'recall':
return skmetrics.recall_score(y_true=y_true, y_pred=y_pred)
elif self.name == 'auc':
return skmetrics.roc_auc_score(y_true=y_true, y_score=y_pred)
elif self.name == 'auc_multi':
return skmetrics.roc_auc_score(y_true=y_true, y_score=y_pred, multi_class='ovr')
elif self.name == 'logloss':
return skmetrics.log_loss(y_true=y_true, y_pred=y_pred)
else:
raise Exception("Not Implemented")
|
py | b408e8ea49f8387df1b6ccb21fb5c74e9a78591a | #!/usr/bin/env python3
import cgi
import cgitb
import os
import json
cgitb.enable()
print("Content-Type: text/html\n")
print()
print("<!doctype html><title>Hello</title><h2>Hello World</h2>")
#print(os.environ)
env_json = {}
for key, value in os.environ.items():
env_json[key] = value
print(json.dumps(env_json))
print("<h2>" + os.environ.get("QUERY_STRING") + "</h2>")
print("<h2>" + os.environ.get("HTTP_USER_AGENT") + "</h2>") |
py | b408ea3ce2dbd86096640e314dc590aa110a5b03 | # qubit number=4
# total number=39
import cirq
import qiskit
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2
import numpy as np
import networkx as nx
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[3]) # number=16
prog.cz(input_qubit[0],input_qubit[3]) # number=17
prog.h(input_qubit[3]) # number=18
prog.x(input_qubit[3]) # number=13
prog.h(input_qubit[3]) # number=24
prog.cz(input_qubit[0],input_qubit[3]) # number=25
prog.h(input_qubit[3]) # number=26
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
prog.h(input_qubit[0]) # number=5
oracle = build_oracle(n-1, f)
prog.append(oracle.to_gate(),[input_qubit[i] for i in range(n-1)]+[input_qubit[n-1]])
prog.h(input_qubit[1]) # number=6
prog.h(input_qubit[2]) # number=30
prog.cz(input_qubit[0],input_qubit[2]) # number=31
prog.h(input_qubit[2]) # number=32
prog.cx(input_qubit[0],input_qubit[2]) # number=36
prog.x(input_qubit[2]) # number=37
prog.cx(input_qubit[0],input_qubit[2]) # number=38
prog.cx(input_qubit[0],input_qubit[2]) # number=29
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.h(input_qubit[0]) # number=9
prog.cx(input_qubit[3],input_qubit[2]) # number=22
prog.cx(input_qubit[2],input_qubit[0]) # number=10
prog.h(input_qubit[0]) # number=19
prog.cz(input_qubit[2],input_qubit[0]) # number=20
prog.h(input_qubit[0]) # number=21
prog.h(input_qubit[3]) # number=33
prog.cz(input_qubit[2],input_qubit[3]) # number=34
prog.h(input_qubit[3]) # number=35
# circuit end
return prog
if __name__ == '__main__':
a = "111"
b = "0"
f = lambda rep: bitwise_xor(bitwise_dot(a, rep), b)
prog = make_circuit(4,f)
backend = BasicAer.get_backend('statevector_simulator')
sample_shot =8000
info = execute(prog, backend=backend).result().get_statevector()
qubits = round(log2(len(info)))
info = {
np.binary_repr(i, qubits): round((info[i]*(info[i].conjugate())).real,3)
for i in range(2 ** qubits)
}
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_Class2364.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.__len__(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py | b408ea79adb5ca9fcae16f06c02ef44cd0e219bc | from abc import abstractmethod
import torch
class BaseMetric():
def __init__(self, name):
self._num_samples = 0
self.name = name
super().__init__()
@abstractmethod
def reset(self):
self._num_samples = 0
@abstractmethod
def update(self, input, target):
pass
@abstractmethod
def compute(self):
pass
|
py | b408eb0903178bbc182f0739374d2e134f75ef7b | """This is the main script of the project, starting the main loop"""
import tkinter
import exergen
root = tkinter.Tk()
app = exergen.MainWindow(root)
root.mainloop()
|
py | b408eb36a99fe8d47506de1c2fd70b91a1e49cec | # Copyright 2021 Nikita Melekhin. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import glob
import sys
import json
import subprocess
from datetime import datetime
OBJCOPY_TOOL = ""
OBJCOPY_TARGET = ""
def shell(cmd, cwd=None):
return subprocess.check_output(cmd, shell=True, cwd=cwd).decode("ascii")
inpath = sys.argv[1]
outpath = sys.argv[2]
arch = sys.argv[3]
board = sys.argv[4]
host = sys.argv[5]
path_to_bins = sys.argv[6]
if path_to_bins == "__EMPTY_PATH_":
path_to_bins = ""
if len(path_to_bins) != 0:
if path_to_bins[-1] != '/':
path_to_bins += "/"
if (arch == "aarch32"):
if host == "gnu":
OBJCOPY_TOOL = "{0}arm-none-eabi-objcopy".format(path_to_bins)
OBJCOPY_TARGET = "elf32-littlearm"
elif host == "llvm":
OBJCOPY_TOOL = "{0}llvm-objcopy".format(path_to_bins)
OBJCOPY_TARGET = "elf32-littlearm"
elif (arch == "x86"):
if host == "gnu":
OBJCOPY_TOOL = "{0}i686-elf-objcopy".format(path_to_bins)
OBJCOPY_TARGET = "elf32-i386"
elif host == "llvm":
OBJCOPY_TOOL = "{0}llvm-objcopy".format(path_to_bins)
OBJCOPY_TARGET = "elf32-i386"
else:
print("Unsupported arch {0}".format(arch))
exit(1)
run_from = os.getcwd() + '/../utils/compilers/DevTreeCompiler'
inpath_abs = os.getcwd() + '/' + inpath
outpath_abs = os.getcwd() + '/' + outpath
obj_outpath_abs = outpath_abs + "o"
shell("python3 . {0} {1}".format(inpath_abs, outpath_abs), run_from)
shell("{0} -I binary -O {1} --rename-section .data=.odt {2} {3}".format(
OBJCOPY_TOOL, OBJCOPY_TARGET, outpath_abs, obj_outpath_abs))
|
py | b408ec40d8e4404936ef219a1eb08a773e8d6072 | from discord.ext import commands
import discord
import os
import asyncio
import traceback
from schedule import show_schedule
bot = commands.Bot(command_prefix='/')
token = os.environ['DISCORD_BOT_TOKEN']
@bot.event
async def on_command_error(ctx, error):
orig_error = getattr(error, "original", error)
error_msg = ''.join(traceback.TracebackException.from_exception(orig_error).format())
await ctx.send(error_msg)
@bot.command()
async def ping(ctx):
await ctx.send('pong')
@bot.command()
async def schedule(ctx):
msg = show_schedule()
await ctx.send(msg)
@bot.command()
async def testing(ctx):
channel = bot.get_channel(477180851994624000)
message = await channel.fetch_message(801855318970859572)
pushmsg = show_schedule()
await message.edit(content=pushmsg)
bot.run(token) |
py | b408ed38e378b072d0bb244eb7be004776651fe5 | # Generated by Django 3.0 on 2022-04-28 13:08
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('main', '0004_shippingcompany_freedays'),
]
operations = [
migrations.CreateModel(
name='Demurage',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False, unique=True)),
('start_day', models.IntegerField()),
('end_day', models.IntegerField()),
('price_per_day', models.FloatField()),
('size', models.CharField(choices=[('20 feet', '20 feet'), ('40 feet', '40 feet')], max_length=255)),
('is_active', models.BooleanField(default=True)),
('date_created', models.DateTimeField(auto_now_add=True)),
('shipping_company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='demurages', to='main.ShippingCompany')),
],
),
]
|
py | b408edf9ad1150b0b69ac0fb046c2a16e2ff5be5 | from collections import Counter
import pandas as pd
import simdata
import sqlite3
JIRA_DB = \
"E:\OneDrive\phd2\jira_db\issue_repository.db"
def get_issue_type(issue_key):
connection = sqlite3.connect(JIRA_DB)
sql_query = "Select t.name from Issue i, IssueType t where i.key='{key}' and i.issueTypeId = t.id".format(
key=issue_key)
cursor = connection.cursor()
cursor.execute(sql_query)
issue_type = cursor.fetchone()
connection.close()
return issue_type[0]
def main():
all_issues = pd.read_csv(simdata.ALL_ISSUES_CSV)
print "total_issues: ", len(all_issues)
issue_types = []
for _, issue_row in all_issues.iterrows():
issue_key = issue_row[simdata.ISSUE_KEY_COLUMN]
issue_type = get_issue_type(issue_key)
# print "issue_type", issue_type
issue_types.append(issue_type)
type_counter = Counter(issue_types)
print "type_counter", type_counter
print "Bug percetage", type_counter["Bug"] / float(len(all_issues)) * 100
if __name__ == "__main__":
main()
|
py | b408ee5d9e9a0a08ca569bada1fa9892aabfb92a | import json
import requests
from time import sleep
from db.db_handler import DbHandler
def insert_db_data(db_handler: DbHandler, disease: str):
print(f"Started inserting {disease} genes into database")
with open(f"scripts/data/{disease}.json") as f:
data = json.load(f)
for gene_obj in data:
db_handler.create_gene(gene_obj["Gene"], gene_obj["Gene description"])
print("Finished")
def insert_ppi_to_db(db_handler: DbHandler):
print("Started mapping protein-protein interaction network to db")
data = db_handler.get_all_genes()
for i, gene in enumerate(data):
print(f"Mapping protein interaction for gene {gene.symbol} [{i}/{len(data)}]")
sleep(1)
response = requests.get(f"https://string-db.org/api/json/network?identifiers={gene.symbol}")
response_data = json.loads(response.content.decode("utf-8"))
if response.status_code != 200:
continue
for interaction in response_data:
gene_symbol_a = interaction["preferredName_A"]
gene_symbol_b = interaction["preferredName_B"]
gene_a_id = _get_gene_or_create(db_handler, gene_symbol_a)
gene_b_id = _get_gene_or_create(db_handler, gene_symbol_b)
if not db_handler.interact(gene_a_id, gene_b_id):
db_handler.create_interaction(gene_a_id, gene_b_id)
print("Finished")
def map_genes_to_drugs(db_handler: DbHandler):
print("Started mapping protein-drug interaction network to db")
data = db_handler.get_all_genes_with_cluster()
genes = list(map(lambda g: g.symbol, data))
for i in range(0, len(genes), 10):
print(f"Running batch number [{int(i/10)+1}/{int(len(genes)/10)+1}]")
genes_batch = genes[i:i+10]
response = requests.get(f"https://dgidb.org/api/v2/interactions.json?genes={','.join(genes_batch)}&fda_approved_drug=true")
response_data = json.loads(response.content.decode("utf-8"))
if response.status_code != 200:
raise Exception
for gene_obj in response_data["matchedTerms"]:
print(f"Mapping protein-drug interaction for gene: {gene_obj['geneName']}")
gene = db_handler.get_by_symbol(gene_obj['searchTerm'])
for interaction in gene_obj["interactions"]:
if interaction["score"] < 0.8:
continue
drug_symbol = interaction["drugName"]
drug_id = _get_drug_or_create(db_handler, drug_symbol)
if not db_handler.gene_drug_interact(gene.id, drug_id):
db_handler.create_gd_interaction(gene.id, drug_id)
print("Finished")
def _get_gene_or_create(db_handler: DbHandler, gene_symbol: str) -> int:
gene_db = db_handler.get_by_symbol(gene_symbol)
if gene_db is None:
print(f"Adding gene {gene_symbol} into database")
return db_handler.create_gene(gene_symbol, "Added from interaction")
return gene_db.id
def _get_drug_or_create(db_handler: DbHandler, drug_symbol: str) -> int:
drug_db = db_handler.get_drug_by_symbol(drug_symbol)
if drug_db is None:
print(f"Adding drug {drug_symbol} into database")
return db_handler.create_drug(drug_symbol)
return drug_db.id
|
py | b408eeeaec183c35458c8ea0619e1ec8dfb285b7 | # Copyright (c) 2019 Graphcore Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import random
import pickle
import json
import fractions
import math
import subprocess
from logging import getLogger
from functools import reduce
from .dataset import DataSet
from .data_sampler import SequentialSampler, ShuffledSampler, DistributedDataSampler
from .tokenization import FullTokenizer
from .squad_utils import read_squad_examples, convert_examples_to_features, RawResult, write_predictions, InputFeatures
logger = getLogger(__name__)
def generate_random_features(sequence_length, vocab_length, batch_size):
features = []
for i in range(batch_size):
features.append(InputFeatures(
i,
None,
None,
None,
None,
None,
np.random.randint(0, vocab_length, size=sequence_length),
None,
np.random.randint(0, 2, size=sequence_length),
0,
None,
None,
np.random.randint(0, sequence_length, size=1),
np.random.randint(0, sequence_length, size=1),
None,
np.random.randint(0, sequence_length+1, size=1)
))
return features
class SquadDataLoader(object):
def __init__(self,
features,
sequence_length=None,
batch_size=1,
dtype=np.int32,
sampler=None):
self.features = features
self.batch_size = batch_size
self.dtype = dtype
self.sequence_length = sequence_length
self.sampler = sampler
if sampler is None:
self.sampler = SequentialSampler(features)
self.num_batches = len(self.sampler)//self.batch_size
def __len__(self):
return self.num_batches
def __iter__(self):
self.feature_iterator = iter([self.features[idx] for idx in self.sampler])
return self
def __next__(self):
items = [next(self.feature_iterator) for _ in range(self.batch_size)]
indicies = []
positions = []
segments = []
sequence_mask_idx = []
start_pos = []
end_pos = []
uid = []
for item in items:
indicies.append(item.input_ids)
padding_max = self.sequence_length if self.sequence_length is not None else len(item.input_ids)
padding_length = len(item.input_ids) - item.padding_start_index
position_padding = np.full(padding_length, padding_max)
position_ids = np.arange(0, item.padding_start_index)
positions.append(np.concatenate((position_ids, position_padding)).astype(np.int32))
segments.append(item.segment_ids)
sequence_mask_idx.append(item.padding_start_index)
start_pos.append(item.start_position)
end_pos.append(item.end_position)
uid.append(item.unique_id)
# Including impossible samples during training is under investigation. T12851
# if item.is_impossible:
# logger.warning("Impossible sample exists in the dataset. "
# f"start pos: {item.start_position}, end pos: {item.end_position}")
inputs = []
for i in [indicies, positions, segments, sequence_mask_idx, start_pos, end_pos, uid]:
inputs.append(np.stack(i))
return inputs
class BertDataTransform(object):
'''
Masks the indices that are larger than the vocab_length
'''
def __init__(self, dataloader, vocab_length, sequence_length, embedding_dict, positional_dict, merge_both_embeddings, is_training=True):
self.dataloader = dataloader
self.vocab_length = vocab_length
self.sequence_length = sequence_length
self.is_training = is_training
self.embedding_dict = embedding_dict
self.positional_dict = positional_dict
self.merge_both_embeddings = merge_both_embeddings
def __len__(self):
return len(self.dataloader)
def __iter__(self):
self.dataloader_iterator = iter(self.dataloader)
return self
def __next__(self):
items = next(self.dataloader_iterator)
# Specific BERT Post Processing. TODO: Find a better place for this processing
# The vocab_length may be smaller than the original vocab... In this case with the custom_op
# Out of Bounds indicies over a certain threshold will cause numerical issues.
# 100 is unknown token [UNK]
# 0 in the label is padding
OOB = items[0] >= self.vocab_length
items[0][OOB] = 100
# Force use of uint32 for all inputs.
for i in range(len(items)):
if self.is_training or i < 4:
items[i] = items[i].astype(np.uint32)
if self.embedding_dict is not None:
items[0] = np.take(self.embedding_dict, items[0], 0)
if self.positional_dict is not None:
positional_expanded = np.take(self.positional_dict, items[1], 0)
if self.merge_both_embeddings:
items[0] += positional_expanded
else:
items[1] = positional_expanded
return items
def load_or_cache_features(input_file,
vocab_file,
sequence_length,
is_training=True,
cache_file=None,
overwrite_cache=False,
do_lower_case=False):
if cache_file is None:
cache_file = input_file + f".{sequence_length}.cache"
if os.path.exists(cache_file) and not overwrite_cache:
examples = None
logger.info(f"Loading Cache {cache_file}")
with open(cache_file, "rb") as f:
features = pickle.load(f)
else:
logger.info("Reading Examples")
examples = read_squad_examples(input_file=input_file,
is_training=is_training,
version_2_with_negative=False)
# google-research/bert uses sequence_length 384 with doc_stride 128
# TODO: Find a good value for the doc_stride with sequence_length <384
doc_stride = 128
if sequence_length < 384:
doc_stride = 64
logger.info("Converting to Features")
features = convert_examples_to_features(examples=examples,
tokenizer=FullTokenizer(vocab_file, do_lower_case=do_lower_case),
max_seq_length=sequence_length,
doc_stride=doc_stride,
max_query_length=64,
is_training=is_training)
logger.info(f"Saving Cache {cache_file}")
with open(cache_file, "wb") as f:
pickle.dump(features, f)
return features, examples
class SquadDataSet(DataSet):
def __init__(self,
features,
examples,
input_file,
is_training,
output_dir=None,
evaluate_script=None,
do_lower_case=False,
n_extra=0,
**kwargs):
super().__init__(**kwargs)
self.features = features
self.examples = examples
self.is_training = is_training
self.input_file = input_file
self.output_dir = output_dir
self.do_lower_case = do_lower_case
if not self.is_training and self.output_dir is not None:
os.makedirs(self.output_dir, exist_ok=True)
# If examples is None, features was loaded from the cache
# So the examples need to be recreated.
if self.examples is None:
self.examples = read_squad_examples(input_file=self.input_file,
is_training=self.is_training,
version_2_with_negative=False)
self.results = []
self.evaluate_script = evaluate_script
self.n_extra = n_extra
def add_results(self, data, logits):
# Results will be batched. Flatten to individual results
start_logits, end_logits = [
logit.reshape(-1, logit.shape[-1]).tolist()
for logit in logits]
for i, unique_id in enumerate(data["uid"]):
self.results.append(RawResult(
unique_id=unique_id,
start_logits=start_logits[i],
end_logits=end_logits[i]
))
def write_predictions(self, epoch=None):
if self.is_training:
raise RuntimeError("Predictions cannot be written for training datasets")
if self.output_dir is None:
raise RuntimeError("Predictions cannot be written when output_dir is None")
suffix = f"_{epoch}" if epoch is not None else ""
predictions_file = os.path.join(self.output_dir, f"predictions{suffix}.json")
nbest_file = os.path.join(self.output_dir, f"nbest_predictions{suffix}.json")
null_log_odds_file = os.path.join(self.output_dir, f"null_odds{suffix}.json")
self.results = self.results[:len(self.results) - self.n_extra]
write_predictions(self.examples,
self.features,
self.results,
20, 30,
self.do_lower_case,
predictions_file,
nbest_file,
null_log_odds_file,
True,
False, 0)
if self.evaluate_script is not None:
evaluation = subprocess.check_output(["python", self.evaluate_script, self.input_file, predictions_file])
evaluation = json.loads(evaluation)
f1 = evaluation["f1"]
exact_match = evaluation["exact_match"]
status_string = f"F1 Score: {f1} | Exact Match: {exact_match}"
if epoch is not None:
status_string = f"Epoch: {epoch:3}{args.epochs - 1} | " + status_string
logger.info(status_string)
def get_bert_dataset(tensor_shapes,
input_file,
output_dir,
sequence_length,
vocab_file,
vocab_length,
batch_size,
batches_per_step,
embedding_dict,
positional_dict,
merge_both_embeddings=False,
replication_factor=1,
accumulation_factor=1,
shuffle=True,
is_training=True,
overwrite_cache=False,
no_drop_remainder=False,
evaluate_script=None,
generated_data=False,
do_lower_case=False,
max_pipeline_stage=1,
seed=0,
mpi_size=1,
mpi_rank=0,
is_distributed=False):
samples_per_step = batch_size * batches_per_step * \
replication_factor * accumulation_factor
div_factor = batch_size * replication_factor * accumulation_factor * batches_per_step
pad = 0
if generated_data:
features = generate_random_features(
sequence_length, vocab_length, samples_per_step)
examples = None
output_dir = None
logger.info("Generating random dataset")
else:
features, examples = load_or_cache_features(
input_file,
vocab_file,
sequence_length,
is_training,
overwrite_cache=overwrite_cache,
do_lower_case=do_lower_case)
if no_drop_remainder and not generated_data:
# dataset will be padded to be divisible by batch-size and samples-per-step
pad = int(np.ceil(len(features)/div_factor)) * div_factor - len(features)
if is_distributed:
sampler = DistributedDataSampler(
features, seed, shuffle,
mpi_size, mpi_rank, padding=False, padding_sub=pad, div_factor=div_factor)
pad = sampler.get_subpadding_size()
elif shuffle:
sampler = ShuffledSampler(features, seed, pad)
else:
sampler = SequentialSampler(features, pad)
if no_drop_remainder and not generated_data:
logger.info(f"no_drop_remainder: Dataset padded by {pad} samples")
dl = SquadDataLoader(
features,
sequence_length=sequence_length,
batch_size=samples_per_step,
sampler=sampler
)
bert_ds = BertDataTransform(
dl,
vocab_length,
sequence_length,
embedding_dict,
positional_dict,
merge_both_embeddings,
is_training=is_training)
if not is_training:
# Add uid to the data dictionary so evaluation script can be run
tensor_shapes += [
("start", None),
("end", None),
("uid", None)]
ds = SquadDataSet(
features,
examples,
input_file,
is_training,
output_dir,
evaluate_script,
do_lower_case=do_lower_case,
n_extra=pad,
loader=bert_ds,
tensor_shapes=tensor_shapes,
batches_per_step=batches_per_step,
replication_factor=replication_factor,
accumulation_factor=accumulation_factor)
return ds
|
py | b408eeecbe53b1503910f1e53e9e9a775e81a630 | import numpy
import pytest
from matchms import Spectrum
from matchms.filtering import normalize_intensities
from matchms.similarity import CosineGreedyVectorial
def test_cosine_greedy_without_parameters():
spectrum_1 = Spectrum(mz=numpy.array([100, 150, 200, 300, 500, 510, 1100], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"))
spectrum_2 = Spectrum(mz=numpy.array([100, 140, 190, 300, 490, 510, 1090], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"))
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial()
score, n_matches = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.81421, 0.0001), "Expected different cosine score."
assert n_matches == 3
def test_cosine_score_greedy_with_tolerance_0_2():
spectrum_1 = Spectrum(mz=numpy.array([100, 150, 200, 300, 500, 510, 1100], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata=dict())
spectrum_2 = Spectrum(mz=numpy.array([50, 100, 200, 299.5, 489.5, 510.5, 1040], dtype="float"),
intensities=numpy.array([700, 200, 100, 1000, 200, 5, 500], dtype="float"),
metadata=dict())
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial(tolerance=0.2)
score, n_matches = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.081966, 0.0001), "Expected different cosine score."
assert n_matches == 2
def test_cosine_score_greedy_with_tolerance_2_0():
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 299, 300, 301, 500, 510], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 200, 20, 100], dtype="float"),
metadata=dict())
spectrum_2 = Spectrum(mz=numpy.array([100, 200, 300, 301, 500, 512], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 20, 100], dtype="float"),
metadata=dict())
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial(tolerance=2.0)
score, n_matches = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
assert score == pytest.approx(0.903412, 0.0001), "Expected different cosine score."
assert n_matches == 6
def test_cosine_score_greedy_order_of_arguments():
spectrum_1 = Spectrum(mz=numpy.array([100, 200, 299, 300, 301, 500, 510], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 200, 20, 100], dtype="float"),
metadata=dict())
spectrum_2 = Spectrum(mz=numpy.array([100, 200, 300, 301, 500, 512], dtype="float"),
intensities=numpy.array([10, 10, 500, 100, 20, 100], dtype="float"),
metadata=dict())
norm_spectrum_1 = normalize_intensities(spectrum_1)
norm_spectrum_2 = normalize_intensities(spectrum_2)
cosine_greedy = CosineGreedyVectorial(tolerance=2.0)
score_1_2, n_matches_1_2 = cosine_greedy(norm_spectrum_1, norm_spectrum_2)
score_2_1, n_matches_2_1 = cosine_greedy(norm_spectrum_2, norm_spectrum_1)
assert score_1_2 == score_2_1, "Expected that the order of the arguments would not matter."
assert n_matches_1_2 == n_matches_2_1, "Expected that the order of the arguments would not matter."
|
py | b408eff35942fd2443f1373f823f56f2f75bc8dd |
from ..utils import Object
class InputPassportElementAddress(Object):
"""
A Telegram Passport element to be saved containing the user's address
Attributes:
ID (:obj:`str`): ``InputPassportElementAddress``
Args:
address (:class:`telegram.api.types.address`):
The address to be saved
Returns:
InputPassportElement
Raises:
:class:`telegram.Error`
"""
ID = "inputPassportElementAddress"
def __init__(self, address, **kwargs):
self.address = address # Address
@staticmethod
def read(q: dict, *args) -> "InputPassportElementAddress":
address = Object.read(q.get('address'))
return InputPassportElementAddress(address)
|
py | b408f062bd0f080bc19ce6d7deafb0ccdf2f1c09 | #!/usr/bin/env python3
"""Project Euler - Problem 1 Module"""
def collatz_rec(p, nr_steps):
"""Recursive Collatz Step Calculation"""
nr_steps += 1
if (p <= 1):
return nr_steps
if p % 2 == 0:
return collatz_rec(int(p/2), nr_steps)
else:
return collatz_rec(int(3*p + 1), nr_steps)
collatzbuffer = {}
def collatz_buffer( p ):
"""Buffered Collatz Step Calculation"""
if p <= 1:
return 1
next = 0
if (p % 2 == 0):
next = int(p/2)
else:
next = int(3*p + 1)
# Check buffer
next_collatz = -1
if not (next in collatzbuffer):
next_collatz = collatz_buffer(next)
else:
next_collatz = collatzbuffer[next]
# Fill Buffer;
collatzbuffer[p] = 1 + next_collatz
return 1 + next_collatz
def problem14(max_starting_number):
"""Problem 14 - Longest Collatz sequence"""
max_steps = 0
max_col_number = 0
for cur in range(1, max_starting_number):
#c = collatz_rec(x, 0) # Slow Version
c = collatz_buffer(cur)
if c > max_steps:
max_steps = c
max_col_number = cur
return max_col_number
def run():
"""Default Run Method"""
return problem14(1000000)
# return collatz_rec(13,0)
if __name__ == '__main__':
print("Result: ", run())
|
py | b408f269b9d11b1d251ec9664d6f02aa8c526c0d | #FUNCION PARA CREAR "HOLA MUNDO"
print("HOLA MUNDO CON PYTHON")
miVariable = "22222"
print( miVariable)
print(miVariable)
print(miVariable)
miVariable = 500000
print(miVariable)
x = 2000
y = 2120
z = x + y
print(z)
print(x)
print(y)
#FUNCION id SIRVE PARA SABER LA LOCALIZACION EN MEMORIA DE LAS VARIABLES
print(id(x))
print(id(y))
print(id(z))
|
py | b408f500dbd0944defecc000877e4a7a4c07b840 | """
WSGI config for memebuilder project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "memebuilder.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
py | b408f5f5e529a324cdcb26b9901e7f0705485711 | # General imports
import sys
import os
#import glob
import pandas as pd
import hashlib
#%% Logging
import logging
loggers_dict = logging.Logger.manager.loggerDict
logger = logging.getLogger()
logger.handlers = []
# Set level
logger.setLevel(logging.DEBUG)
# Create formatter
#FORMAT = "%(asctime)s - %(levelno)s - %(module)-15s - %(funcName)-15s - %(message)s"
FORMAT = "%(asctime)s L%(levelno)s: %(message)s"
DATE_FMT = "%Y-%m-%d %H:%M:%S"
formatter = logging.Formatter(FORMAT, DATE_FMT)
# Create handler and assign
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
logger.handlers = [handler]
logger.critical("Logging started")
#%% IO
PATH_PROJECT_BASE = r"/home/batman/ocn/plankton-datascience"
assert os.path.exists(PATH_PROJECT_BASE)
PATH_DATA_CATALOGUE = os.path.join(PATH_PROJECT_BASE,'planktonDS_data_seeding/OceanDataSets_master catalog r00.csv')
assert os.path.exists(PATH_DATA_CATALOGUE)
PATH_DATA_CATALOGUE_CLEAN = os.path.join(PATH_PROJECT_BASE,'planktonDS_data_seeding/OceanDataSets_master catalog clean.csv')
#%% Load the data catalogue
df = pd.read_csv(PATH_DATA_CATALOGUE)
df.columns
#%%############################################################################
# Column: SizeGB
###############################################################################
# Cleanup function
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
# Drop non-numeric size
mask_numeric = df.loc[:,'SizeGB'].apply(is_number)
df = df.drop(df[mask_numeric==False].index)
logging.debug("SizeGB Col: Dropped {} non-numeric sizes".format(sum(mask_numeric==False)))
# Convert size to numeric
df.loc[:,'SizeGB'] = pd.to_numeric(df.loc[:,'SizeGB'])
logging.debug("SizeGB Col: Converted size to {}".format(df.loc[:,'SizeGB'].dtype))
# Drop NaN sizes
mask_size_not_na = df.loc[:,'SizeGB'].isna() == False
num_dropped = sum(mask_size_not_na == False)
df = df[mask_size_not_na]
logging.debug("SizeGB Col: Dropped {} size=NaN records".format(num_dropped))
#%%
#df_describe = df.describe(include='all')
#df_info = df.info()
#%%############################################################################
# Column: Download Link
###############################################################################
# Drop non-unique links
num_records = len(df)
#df = df.loc[:,'Download Link'].drop_duplicates(subset='Download Link',keep='first')
df = df.drop_duplicates(subset='Download Link',keep='first')
logging.debug("Download Link Col: {} records with duplicate links dropped".format(num_records-len(df)))
# Drop NAN
num_records = len(df)
df = df.dropna(axis=0, subset = ['Download Link'])
logging.debug("Download Link Col: {} records with duplicate links dropped".format(num_records-len(df)))
#%%
df_describe = df.describe(include='all')
#%%############################################################################
# Column: Formats
###############################################################################
# Cleaning function
def rename_value(df,colname,original_str, new_str):
mask= df.loc[:,colname] == original_str
df.loc[mask,colname] = new_str
logging.debug("Formats Col: Renamed {} {} records from {} to {}".format(sum(mask),colname,original_str, new_str))
return df
df = rename_value(df,'Format','.tar', 'tar')
df = rename_value(df,'Format','.tgz', 'tgz')
# Subset on formats
select_formats=['zip','csv','gzip','tar','tgz']
df_sub = df[df.loc[:,'Format'].isin(select_formats)]
len(df_sub)
logging.debug("Formats Col: Selected {} of {} records, formats: {}".format(len(df_sub), len(df), select_formats))
#%%############################################################################
# Summarize and save
###############################################################################
df_sub_describe = df_sub.describe(include='all')
#%% Add a hash
df_sub['hash'] = df_sub.apply(lambda x: hashlib.sha256(repr(tuple(x)).encode('utf-8')).hexdigest(), axis = 1)
assert(sum(df_sub['hash'].duplicated())==0)
logging.debug("Added a HASH ID column".format())
#%% Save the file
df_sub.to_csv(PATH_DATA_CATALOGUE_CLEAN)
|
py | b408f6564597f6c7f3e31f66c2bf98389650ae9f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Tests for the `license info` function
"""
import pytest
from choosealicense.cli import info
@pytest.mark.usefixtures('mock_api')
class TestInfo():
def test_show_license_info(self, runner):
all_the_licenses = {
'agpl-3.0': ('GNU Affero General Public License v3.0', 12),
'apache-2.0': ('Apache License 2.0', 10),
'bsd-2-clause': ('BSD 2-Clause "Simplified" License', 7),
'bsd-3-clause': ('BSD 3-Clause "New" or "Revised" License', 7),
'epl-2.0': ('Eclipse Public License 2.0', 10),
'gpl-2.0': ('GNU General Public License v2.0', 10),
'gpl-3.0': ('GNU General Public License v3.0', 11),
'lgpl-2.1': ('GNU Lesser General Public License v2.1', 10),
'lgpl-3.0': ('GNU Lesser General Public License v3.0', 11),
'mit': ('MIT License', 7),
'mpl-2.0': ('Mozilla Public License 2.0', 11),
'unlicense': ('The Unlicense', 6)
}
for short_name, fullname_and_rules_number in all_the_licenses.items():
result = runner.invoke(info, [short_name])
output, exit_code = result.output, result.exit_code
rules = output.split('Limitations\n')[1].split('\n')
flat_rules = sum([item.split() for item in rules], [])
fullname, rules_number = fullname_and_rules_number
assert exit_code == 0
assert '</a>' not in output
assert fullname in output
assert '{0:<25}{1:<25}{2}'.format(
'Permissions', 'Conditions', 'Limitations') in output
assert rules_number == len(flat_rules)
def test_show_invalid_license_info(self, runner):
result = runner.invoke(info, ['invalid'])
output, exit_code = result.output, result.exit_code
assert exit_code != 0
assert output == ("Error: Invalid license name, use `license show` "
"to get the all available licenses.\n")
|
py | b408f8164048ecfd62c4ead3d31674932337b078 | class RTreeEventArgs(EventArgs):
# no doc
Cancel=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Cancel(self: RTreeEventArgs) -> bool
Set: Cancel(self: RTreeEventArgs)=value
"""
Id=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Id(self: RTreeEventArgs) -> int
"""
IdB=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IdB(self: RTreeEventArgs) -> int
"""
IdBPtr=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IdBPtr(self: RTreeEventArgs) -> IntPtr
"""
IdPtr=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: IdPtr(self: RTreeEventArgs) -> IntPtr
"""
SearchBoundingBox=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SearchBoundingBox(self: RTreeEventArgs) -> BoundingBox
Set: SearchBoundingBox(self: RTreeEventArgs)=value
"""
SearchSphere=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: SearchSphere(self: RTreeEventArgs) -> Sphere
Set: SearchSphere(self: RTreeEventArgs)=value
"""
Tag=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Tag(self: RTreeEventArgs) -> object
Set: Tag(self: RTreeEventArgs)=value
"""
|
py | b408f82da9f94cd029a48ef9b2ba52bb4d2b63ca | from dataclasses import dataclass
from reamber.base.Hit import Hit
from reamber.o2jam.O2JNoteMeta import O2JNoteMeta
@dataclass
class O2JHit(Hit, O2JNoteMeta):
""" Defines the O2Jam Hit Object
The O2Jam Hit Object is stored in binary file .ojn
"""
pass
|
py | b408fa6626547a638c78b8f7e37c87486756896e | import time
def fibonacci(n):
if n == 1 or n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
while True:
print(fibonacci(42))
time.sleep(1)
|
py | b408fa800a39deabf7faa4df3accf47de62f9f2d | from datetime import datetime, timedelta
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, Integer, String, Date
from sqlalchemy.orm import sessionmaker
Base = declarative_base()
menu = """
1) Today's tasks
2) Week's tasks
3) All tasks
4) Missed tasks
5) Add task
6) Delete task
0) Exit
"""
class Task(Base):
__tablename__ = 'task'
id = Column(Integer, primary_key=True)
task = Column(String, default='default_value')
deadline = Column(Date, default=datetime.today())
def __repr__(self):
return self.string_field
if __name__ == '__main__':
engine = create_engine('sqlite:///todo.db?check_same_thread=False')
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
today = datetime.today().date()
option = ""
while option != '0':
option = input(menu)
if option == '1':
tasks = session.query(Task).filter(Task.deadline == today).all()
print("Today {} {}:".format(today.day, today.strftime('%b')))
if len(tasks) == 0:
print("Nothing to do!")
else:
for num, task in enumerate(tasks):
print("{}. {}".format(num, task.task))
if option == '2':
for dates_ago in range(7):
date = today + timedelta(days = dates_ago)
tasks = session.query(Task).filter(Task.deadline == date).all()
print("{} {} {}:".format(date.strftime('%A'), date.day, date.strftime('%b')))
if len(tasks) == 0:
print("Nothing to do!")
else:
for num, task in enumerate(tasks):
print("{}. {}".format(num, task.task))
print()
if option == '3':
tasks = session.query(Task).all()
print("All tasks:")
if len(tasks) == 0:
print("Nothing to do!")
else:
for num, task in enumerate(tasks):
print("{}. {}. {} {}".format(num, task.task, task.deadline.day, task.deadline.strftime('%b')))
if option == '4':
tasks = session.query(Task).filter(Task.deadline < today).all()
print("Missed tasks:")
if len(tasks) == 0:
print("Nothing is missed!")
else:
for num, task in enumerate(tasks):
print("{}. {}. {} {}".format(num, task.task, task.deadline.day, task.deadline.strftime('%b')))
if option == '5':
task_def = input("Enter task: ")
date_def = input("Enter deadline: ").strip(" ")
deadline = datetime.strptime(date_def, '%Y-%m-%d')
new_task = Task(task=task_def, deadline=deadline)
session.add(new_task)
session.commit()
print("The task has been added!")
if option == '6':
tasks = session.query(Task).all()
if len(tasks) == 0:
print("Nothing to do!")
else:
for num, task in enumerate(tasks):
print("{}. {}. {} {}".format(num, task.task, task.deadline.day, task.deadline.strftime('%b')))
delete = int(input("Chose the number of the task you want to delete: "))
session.delete(tasks[delete])
session.commit()
print("The task has been deleted!")
print()
print("Bye!") |
py | b408facf81c2b15adb3ec369477b3c9d3f9b071a | """
GridFTP Server class. Interact with a server using GridFTP protocal
"""
# pylint: disable=super-init-not-called
from CIME.XML.standard_module_setup import *
from CIME.Servers.generic_server import GenericServer
from CIME.utils import run_cmd
logger = logging.getLogger(__name__)
class GridFTP(GenericServer):
def __init__(self, address, user='', passwd=''):
self._root_address = address
def fileexists(self, rel_path):
stat,out,err = run_cmd("globus-url-copy -list {}".format(os.path.join(self._root_address, os.path.dirname(rel_path))+os.sep))
if stat or os.path.basename(rel_path) not in out:
logging.warning("FAIL: File {} not found.\nstat={} error={}".format(rel_path, stat, err))
return False
return True
def getfile(self, rel_path, full_path):
stat, _,err = run_cmd("globus-url-copy -v {} file://{}".format(os.path.join(self._root_address, rel_path), full_path))
if (stat != 0):
logging.warning("FAIL: GridFTP repo '{}' does not have file '{}' error={}\n".
format(self._root_address,rel_path, err))
return False
return True
def getdirectory(self, rel_path, full_path):
stat, _,err = run_cmd("globus-url-copy -v -r {}{} file://{}{}".format(os.path.join(self._root_address, rel_path), os.sep, full_path, os.sep))
if (stat != 0):
logging.warning("FAIL: GridFTP repo '{}' does not have directory '{}' error={}\n".
format(self._root_address,rel_path, err))
return False
return True
|
py | b408fb852ff2b19c048aa241ca20052ba1444b7c | """
This example shows that the push forward of the eigenvalue decomposition indeed
fails for symmetric matrices with degenerated eigenvalues.
"""
import numpy
from algopy.utpm import *
# Build Symmetric Matrix with degenerate eigenvalues
D,P,N = 3,1,4
A = UTPM(numpy.zeros((D,P,N,N)))
V = UTPM(numpy.random.rand(D,P,N,N))
# A.data[0,0] = numpy.diag([2,2,3,3,2,5.])
# A.data[1,0] = numpy.diag([5,5,3,1,1,3.])
# A.data[2,0] = numpy.diag([3,1,3,1,1,3.])
A.data[0,0] = numpy.diag([2,2,2,5.])
A.data[1,0] = numpy.diag([5,5,6,6.])
A.data[2,0] = numpy.diag([1,1,1,1.])
V,Rtilde = UTPM.qr(V)
A = UTPM.dot(UTPM.dot(V.T, A), V)
# sanity check: solution of the zero'th coefficient using numpy
A0 = A.data[0,0]
l0,Q0 = numpy.linalg.eigh(A0)
L0 = numpy.diag(l0)
B0 = numpy.dot(numpy.dot(Q0,L0), Q0.T)
# pushforward: general UTPM solution
l,Q = UTPM.eigh(A)
L = UTPM.diag(l)
# pullback
lbar = UTPM(numpy.random.rand(*(D,P,N)))
Qbar = UTPM(numpy.random.rand(*(D,P,N,N)))
Abar = UTPM.pb_eigh( lbar, Qbar, A, l, Q)
Abar = Abar.data[0,0]
Adot = A.data[1,0]
Lbar = UTPM._diag(lbar.data)[0,0]
Ldot = UTPM._diag(l.data)[1,0]
Qbar = Qbar.data[0,0]
Qdot = Q.data[1,0]
# print l
# print 'check pushforward:'
print('Q.T A Q - L =\n', UTPM.dot(Q.T, UTPM.dot(A,Q)) - L)
# print 'Q.T Q - I =\n', UTPM.dot(Q.T, Q) - numpy.eye(N)
# print 'check pullback:'
# print 'error measure of the pullback = ', numpy.trace(numpy.dot(Abar.T, Adot)) - numpy.trace( numpy.dot(Lbar.T, Ldot) + numpy.dot(Qbar.T, Qdot))
|
py | b408fc5f69a5145fdf1e8944b36ae767e732d095 | class Nim:
def __init__(self, *heaps):
self._heaps = [h for h in heaps]
self._takeOut0Heaps()
def _takeOut0Heaps(self):
while(True):
try:
self._heaps.remove(0)
except ValueError:
return
def __str__(self):
ret = ""
for i in range(len(self._heaps)):
ret += str(i)+" " + "|"*self._heaps[i] + "\n"
return ret
def getHeaps(self):
return self._heaps
def takeFrom(self, Heap, HowMuch):
if HowMuch<=0:
raise ValueError
self._heaps[Heap]-=HowMuch
if self._heaps[Heap] < 0:
self._heaps[Heap] = 0
self._takeOut0Heaps()
def checkIfFinished(self):
return len(self._heaps) == 0
from random import *
class NimBot:
def __init__(self, nim, mode = "last Wins"):
self._nim = nim
def playMove(self):
heaps = self._nim.getHeaps()
nimSum = 0
take = 0
fromHeap = 0
for h in heaps:
nimSum ^= h
if nimSum == 0:
fromHeap, take = self.getRandomFromTake()
else:
for i in range(len(heaps)):
if heaps[i]^nimSum < heaps[i]:
take = heaps[i] - (heaps[i]^nimSum)
fromHeap = i
break
print "The Bot took " + str(take) + " from " + str(fromHeap)
self._nim.takeFrom(fromHeap, take)
def getRandomFromTake(self):
heaps = self._nim.getHeaps()
fromHeap = randrange(0, len(heaps))
take = randrange(1, heaps[fromHeap]+1)
return fromHeap, take
if __name__ == "__main__":
numOfHeaps = randrange(2, 7)
heaps = [randrange(1, 20) for i in range(numOfHeaps)]
theGame = Nim(*heaps)
theBot = NimBot(theGame)
print "Welcome to Nim!"
print theGame
goFirst = raw_input("Want to go first? (y/n): ")
if(goFirst == 'n'):
print ""
theBot.playMove()
print ""
while(theGame.checkIfFinished() == False):
print "Your Turn!"
print theGame
temp = True
while(temp):
try:
temp = False
heap = int(raw_input("Which Heap?: "))
test = heaps[heap]
except ValueError, IndexError:
print "Type an integer in range"
temp = True
temp = True
while(temp):
try:
temp = False
take = int(raw_input("How much Will you take?: "))
theGame.takeFrom(heap, take)
except ValueError:
print "Type an integer in range"
temp = True
if theGame.checkIfFinished():
k = raw_input("You win!")
break
print theGame
theBot.playMove()
print ""
if theGame.checkIfFinished():
k = raw_input("You Lost!")
break
|
py | b408fd5bd8cef3c50e7ea2ef04e5ae2930e2cb08 | from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
with open(filename) as fh:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", fh.read()))
return metadata['version']
setup(
name='Mopidy-HearThisAt',
version=get_version('mopidy_hearthisat/__init__.py'),
url='https://github.com/simare/mopidy-hearthisat',
license='Apache License, Version 2.0',
author='Ramon Zöllner',
author_email='[email protected]',
description='playing music from hearthis.at',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
'Pykka >= 1.1',
],
entry_points={
'mopidy.ext': [
'hearthisat = mopidy_hearthisat:Extension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
|
py | b408fd676843fc7dc73dca5c8aac4f935dcf490c | import sys
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __repr__(self):
return f"Point<{self.x},{self.y}>"
class Segment:
def __init__(self, start, end):
self.start = start
self.end = end
def __repr__(self):
return f"Segment<{self.start}; {self.end}>"
def __len__(self):
return distance(self.start, self.end)
class Intersection:
def __init__(self, point, steps=0):
self.point = point
self.steps = steps
def distance(a: Point, b: Point):
"""Find Manhattan distance between two points"""
return abs(a.x - b.x) + abs(a.y - b.y)
def build_segments(wire):
wire_segments = []
point = Point(0, 0)
new_point = point
for item in wire:
direction = item[0]
dist = int(item[1:])
if direction == "U":
new_point = point + Point(0, dist)
if direction == "D":
new_point = point + Point(0, -dist)
if direction == "L":
new_point = point + Point(-dist, 0)
if direction == "R":
new_point = point + Point(dist, 0)
wire_segments.append(Segment(point, new_point))
point = new_point
return wire_segments
def vertical(s: Segment):
"""Return True if segment is vertical, False if horizontal-"""
return s.start.x == s.end.x
def intersect(a: Segment, b: Segment):
"""Find point of intersection between horizontal and vertical segments"""
if vertical(a) and not vertical(b):
return intersect(b, a)
x_intersect = b.start.x in range(*sorted([a.start.x, a.end.x]))
y_intersect = a.start.y in range(*sorted([b.start.y, b.end.y]))
if x_intersect and y_intersect:
return Point(b.start.x, a.start.y)
return None
wires = [line.strip().split(',') for line in sys.stdin.readlines()]
segments = [build_segments(wire) for wire in wires]
intersections = []
for i, a in enumerate(segments[0]):
for j, b in enumerate(segments[1]):
p = intersect(a, b)
if p is not None:
steps_a = sum(len(s) for s in segments[0][:i] + [Segment(a.start, p)])
steps_b = sum(len(s) for s in segments[1][:j] + [Segment(b.start, p)])
steps = steps_a + steps_b
intersections.append(Intersection(p, steps))
answer = min(
distance(Point(0, 0), intersection.point)
for intersection in intersections
)
print("part 1")
print(answer)
answer = min(intersection.steps for intersection in intersections)
print("part 2")
print(answer)
|
py | b408fe10c73702978d695fe49a60cf4ca9c95b7e | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'rainymotion'
copyright = '2019, Georgy Ayzel'
author = 'Georgy Ayzel'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '0.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['nbsphinx']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', '**.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme' #'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'rainymotiondoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'rainymotion.tex', 'rainymotion Documentation',
'Georgy Ayzel', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'rainymotion', 'rainymotion Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'rainymotion', 'rainymotion Documentation',
author, 'rainymotion', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
py | b408fe97085e9872070b9c52e81445ec52b3a991 | # Dependencies
import numpy as np
import pandas as pd
from bs4 import BeautifulSoup as bs
import requests
from splinter import Browser
import re
import time
# Initialize browser
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
#executable_path = {'executable_path': 'chromedriver.exe'}
return Browser("chrome", **executable_path, headless=False)
def scrape():
browser = init_browser()
url = 'https://mars.nasa.gov/news/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
news_title = soup.find('div', class_='content_title').text
news_p = soup.find('div', class_='article_teaser_body').text
url = 'https://www.jpl.nasa.gov/spaceimages/'
browser.visit(url)
html = browser.html
soup = bs(html, 'html.parser')
base_url = 'https://www.jpl.nasa.gov'
image_url = soup.find("a", class_="button fancybox")["data-fancybox-href"]
featured_image_url = base_url + image_url
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = bs(html, "html.parser")
mars_weather = soup.find(text=re.compile("InSight sol"))
url = 'https://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
facts_df = tables[0]
facts_df.columns = ['Fact', 'Value']
facts_df['Fact'] = facts_df['Fact'].str.replace(':', '')
facts_df.reset_index(drop=True, inplace=True)
facts_html = facts_df.to_html()
hemisphere_img_urls = []
url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url)
browser.click_link_by_partial_text('Cerberus')
html = browser.html
soup = bs(html, 'html.parser')
results_link_01 = soup.find(class_="downloads").a['href']
results_title_01 = soup.find('h2', class_="title").text
results = soup.find_all('div', class_="description")
base_url = 'https://astrogeology.usgs.gov/'
sites = []
for result in results:
link = result.find('a', class_="itemLink product-item")
link_text = link['href']
hemispheres_url = base_url + link_text
sites.append(hemispheres_url)
hemispheres = []
for site in sites:
browser.visit(site)
html = browser.html
soup = bs(html, 'html.parser')
title = soup.find('h2', class_="title").text.strip()
url = soup.find_all('a', target="_blank", href=True)[0]['href']
hemispheres.append({"title": title, "img_url": url})
output = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"facts_html": facts_html,
"hemispheres": hemispheres
}
return output
|
py | b408febb856420924b8ddedcaa725d25ba27be63 | import cv2
videoCapture = cv2.VideoCapture('MyInputVid.avi')
fps = videoCapture.get(cv2.cv.CV_CAP_PROP_FPS)
size = (int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)),
int(videoCapture.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)))
videoWriter = cv2.VideoWriter(
'MyOutputVid.avi', cv2.cv.CV_FOURCC('I','4','2','0'), fps, size)
success, frame = videoCapture.read()
while success: # Loop until there are no more frames.
videoWriter.write(frame)
success, frame = videoCapture.read() |
py | b408ff74cc09d9ccc957e933e3ade6181a90ff88 | # -*- coding: utf-8 -*-
"""
__init__.py
~~~~~~~~~~~~~~
Views package definition.
:copyright: (c) 2016 by fengweimin.
:date: 16/6/11
"""
from admin import admin
from blog import blog
from crud import crud
from public import public
from seo import seo
|
py | b40900e85016953e50e152771842717b3e63e74f | from typing import List
from sqlalchemy import and_
from sqlalchemy.orm import Session
from fastapi import (
APIRouter,
Depends,
HTTPException,
status,
)
from CTFe.config.database import dal
from CTFe.operations import (
contributor_ops,
auth_ops,
challenge_ops,
)
from CTFe.models import (
User,
Challenge,
)
from CTFe.schemas import (
contributor_schemas,
challenge_schemas,
)
router = APIRouter()
@router.get("/list-challenges", response_model=List[challenge_schemas.Details])
def list_challenges(
*,
db_contributor: User = Depends(auth_ops.get_current_user),
session: Session = Depends(dal.get_session),
) -> challenge_schemas.Details:
""" List all challenges created by the current contributor """
conditions = and_(
Challenge.owner_id == db_contributor.id,
)
db_challenges = challenge_ops.query_challenges_by_(
session, conditions).all()
return db_challenges
@router.get("/username/{username}", response_model=contributor_schemas.Details)
async def get_contributor_by_username(
*,
username: str,
session: Session = Depends(dal.get_session)
) -> contributor_schemas.Details:
""" Get contributor record from DB """
conditions = and_(
User.username == username,
)
db_contributor = contributor_ops.query_contributors_by_(
session, conditions).first()
if db_contributor is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Contributor not found",
)
return db_contributor
@router.get("/{id}", response_model=contributor_schemas.Details)
async def get_contributor(
*,
id: int,
session: Session = Depends(dal.get_session)
) -> contributor_schemas.Details:
""" Get contributor record from DB """
conditions = and_(
User.id == id,
)
db_contributor = contributor_ops.query_contributors_by_(
session, conditions).first()
if db_contributor is None:
raise HTTPException(
status_code=status.HTTP_404_NOT_FOUND,
detail="Contributor not found",
)
return db_contributor
@router.get("/", response_model=List[contributor_schemas.Details])
async def get_all_contributors(
*,
session: Session = Depends(dal.get_session)
) -> List[contributor_schemas.Details]:
""" Get all contributor records from DB """
db_contributors = contributor_ops.query_contributors_by_(session).all()
return db_contributors
@router.put("/", response_model=contributor_schemas.Details)
async def update_contributor(
*,
contributor_update: contributor_schemas.Update,
db_contributor: User = Depends(auth_ops.get_current_user),
session: Session = Depends(dal.get_session)
) -> contributor_schemas.Details:
""" Update contributor record from DB """
db_contributor = contributor_ops.update_contributor(
session, db_contributor, contributor_update)
return db_contributor
@router.delete("/", status_code=204)
async def delete_contributor(
*,
db_contributor: User = Depends(auth_ops.get_current_user),
session: Session = Depends(dal.get_session)
):
""" Delete contributor record from DB """
contributor_ops.delete_contributor(session, db_contributor)
@router.post("/create-challenge", response_model=contributor_schemas.Details)
def create_challenge(
*,
challenge_create: challenge_schemas.Create,
db_contributor: User = Depends(auth_ops.get_current_user),
session: Session = Depends(dal.get_session),
) -> contributor_schemas.Details:
""" Create challenge and assign this contributor to it """
from CTFe.operations import challenge_ops
# Challenge name is already taken
conditions = and_(
Challenge.name == challenge_create.name,
)
db_challenge = challenge_ops.query_challenges_by_(
session, conditions).first()
if db_challenge is not None:
raise HTTPException(
status_code=status.HTTP_409_CONFLICT,
detail=f"The name: { challenge_create.name } is already taken",
)
contributor_ops.create_challenge(
session, challenge_create, db_contributor)
return db_contributor
@router.put("/update-challenge/{challenge_id}", response_model=contributor_schemas.Details)
def update_challenge(
*,
challenge_id: int,
challenge_update: challenge_schemas.Update,
db_contributor: User = Depends(auth_ops.get_current_user),
session: Session = Depends(dal.get_session),
) -> contributor_schemas.Details:
""" Update contributor's challenge """
conditions = and_(
Challenge.id == challenge_id,
Challenge.owner_id == db_contributor.id,
)
db_challenge = challenge_ops.query_challenges_by_(session, conditions).first()
if db_challenge is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail="Challenge not found",
)
challenge_ops.update_challenge(session, db_challenge, challenge_update)
return db_contributor
@router.post("/remove-challenge/{challenge_id}", response_model=contributor_schemas.Details)
def remove_challenge(
*,
challenge_id: int,
db_contributor: User = Depends(auth_ops.get_current_user),
session: Session = Depends(dal.get_session),
) -> contributor_schemas.Details:
""" Delete a challenge related to this contributor """
conditions = and_(
Challenge.id == challenge_id,
Challenge.owner_id == db_contributor.id,
)
db_challenge = challenge_ops.query_challenges_by_(
session, conditions).first()
if db_challenge is None:
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail="Challenge not found",
)
challenge_ops.delete_challenge(session, db_challenge)
return db_contributor
|
py | b40901363a5b9c7cf3d5f242374b3c917e5072e5 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Scripts for loading test case data."""
import numpy as np
def load_fourier_spectrum(fname):
"""Load Fourier amplitude spectrum file created by fas_drvr.exe.
Inputs
------
fname : string
File name of the data file
Returns
-------
event : dict
Dictionary containing the event
"""
assert fname.endswith('_fs.col')
rows = np.loadtxt(fname, skiprows=2)
return dict(
mag=rows[0, 0],
dist=rows[0, 1],
freqs=rows[:, 4],
fourier_amps=rows[:, 8], )
def load_rvt_response_spectrum(fname):
"""Load response spectrum file created by fas_drvr.exe.
Inputs
------
fname : string
File name of the data file
Returns
-------
event : dict
Dictionary containing the event
"""
assert fname.endswith('_rs.rv.col')
rows = np.loadtxt(fname, skiprows=2)
return dict(
damping=rows[0, 0],
mag=rows[0, 3],
dist=rows[0, 4],
duration=rows[0, 16],
freqs=rows[:, 2],
spec_accels=rows[:, 11])
|
py | b4090247e11312ebf1efc83066ebf7567e95436a | # Copyright 2015-2016 Internap.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from collections import namedtuple
from fake_switches import group_sequences
from fake_switches.command_processing.base_command_processor import \
BaseCommandProcessor
from fake_switches.switch_configuration import VlanPort, AggregatedPort
class DellEnabledCommandProcessor(BaseCommandProcessor):
def __init__(self, config):
super(DellEnabledCommandProcessor, self).__init__()
self.config_processor = config
def get_prompt(self):
return "%s#" % self.switch_configuration.name
def do_exit(self, *_):
self.is_done = True
def do_copy(self, *_):
self.write_line("")
self.write_line("This operation may take a few minutes.")
self.write_line("Management interfaces will not be available during this time.")
self.write_line("")
self.write("Are you sure you want to save? (y/n) ")
self.on_keystroke(self.continue_validate_copy)
def continue_validate_copy(self, character):
self.write_line("")
self.write_line("")
if character == 'y':
self.switch_configuration.commit()
self.write_line("Configuration Saved!")
else:
self.write_line("Configuration Not Saved!")
self.show_prompt()
def do_configure(self, *_):
self.move_to(self.config_processor)
def do_show(self, *args):
if "running-config".startswith(args[0]):
if len(args) == 1:
self.write_line('!Current Configuration:')
self.write_line('!System Description "PowerConnect 6224P, 3.3.7.3, VxWorks 6.5"')
self.write_line('!System Software Version 3.3.7.3')
self.write_line('!Cut-through mode is configured as disabled')
self.write_line('!')
self.write_line('configure')
self.write_line('vlan database')
if len(self.switch_configuration.vlans) > 0:
self.write_line('vlan %s' % ','.join(sorted([str(v.number) for v in self.switch_configuration.vlans])))
self.write_line('exit')
for port in self.switch_configuration.ports:
port_config = self.get_port_configuration(port)
if len(port_config) > 0:
self.write_line('interface %s' % port.name)
for item in port_config:
self.write_line(item)
self.write_line('exit')
self.write_line('!')
self.write_line('exit')
elif "interface".startswith(args[1]):
interface_name = ' '.join(args[2:])
port = self.switch_configuration.get_port_by_partial_name(interface_name)
if port:
if isinstance(port, VlanPort):
config = self.get_vlan_port_configuration(port)
else:
config = self.get_port_configuration(port)
if len(config) > 0:
for line in config:
self.write_line(line)
else:
self.write_line("")
self.write_line("")
else:
self.write_line("\nERROR: Invalid input!\n")
elif "vlan".startswith(args[0]):
if len(args) == 1:
self.show_vlan_page(list(self.switch_configuration.vlans))
elif args[1] == "id":
if len(args) < 3:
self.write_line("")
self.write_line("Command not found / Incomplete command. Use ? to list commands.")
self.write_line("")
elif not _is_vlan_id(args[2]):
self.write_line(" ^")
self.write_line("Invalid input. Please specify an integer in the range 1 to 4093.")
self.write_line("")
else:
vlan = self.switch_configuration.get_vlan(int(args[2]))
if vlan is None:
self.write_line("")
self.write_line("ERROR: This VLAN does not exist.")
self.write_line("")
else:
self.show_vlan_page([vlan])
elif "interfaces".startswith(args[0]) and "status".startswith(args[1]):
self.show_page(self.get_interfaces_status_output())
elif "version".startswith(args[0]):
self.show_version()
def get_port_configuration(self, port):
conf = []
if port.shutdown:
conf.append('shutdown')
if port.description:
conf.append("description '{}'".format(port.description))
if port.mode and port.mode != "access":
conf.append('switchport mode {}'.format(port.mode))
if port.access_vlan:
conf.append('switchport access vlan {}'.format(port.access_vlan))
if port.trunk_native_vlan:
conf.append('switchport general pvid {}'.format(port.trunk_native_vlan))
if port.trunk_vlans:
conf.append('switchport {} allowed vlan add {}'.format(port.mode, to_vlan_ranges(port.trunk_vlans)))
if port.spanning_tree is False:
conf.append("spanning-tree disable")
if port.spanning_tree_portfast:
conf.append("spanning-tree portfast")
if port.mtu:
conf.append("mtu {}".format(port.mtu))
if port.lldp_transmit is False:
conf.append('no lldp transmit')
if port.lldp_receive is False:
conf.append('no lldp receive')
if port.lldp_med_transmit_capabilities is False:
conf.append('no lldp med transmit-tlv capabilities')
if port.lldp_med_transmit_network_policy is False:
conf.append('no lldp med transmit-tlv network-policy')
return conf
def get_vlan_port_configuration(self, port):
conf = ["interface {}".format(port.name)]
vlan = self.switch_configuration.get_vlan(port.vlan_id)
if vlan.name:
conf.append('name "{}"'.format(vlan.name))
conf.append('exit')
return conf
def get_interfaces_status_output(self):
output_lines = [
"",
"Port Type Duplex Speed Neg Link Flow Control",
" State Status",
"----- ------------------------------ ------ ------- ---- --------- ------------",
]
interfaces = []
bonds = []
for port in self.switch_configuration.ports:
if isinstance(port, AggregatedPort):
bonds.append(port)
elif not isinstance(port, VlanPort):
interfaces.append(port)
for port in sorted(interfaces, key=lambda e: e.name):
output_lines.append(
"{name: <5} {type: <30} {duplex: <6} {speed: <7} {neg: <4} {state: <9} {flow}".format(
name=port.name.split(" ")[-1], type="10G - Level" if "x" in port.name else "Gigabit - Level",
duplex="Full", speed="Unknown", neg="Auto", state="Down", flow="Inactive"))
output_lines += [
"",
"",
"Ch Type Link",
" State",
"--- ------------------------------ -----",
]
for port in sorted(bonds, key=lambda e: int(e.name.split(" ")[-1])):
output_lines.append("ch{name: <2} {type: <30} {state}".format(
name=port.name.split(" ")[-1], type="Link Aggregate", state="Down", flow="Inactive"))
output_lines += [
"",
"Flow Control:Enabled",
]
return output_lines
def show_vlan_page(self, vlans):
lines_per_pages = 18
self.write_line("")
self.write_line("VLAN Name Ports Type Authorization")
self.write_line("----- --------------- ------------- ----- -------------")
line_count = 0
while len(vlans) > 0 and line_count < lines_per_pages:
vlan = vlans.pop(0)
ports_strings = self._build_port_strings(self.get_ports_for_vlan(vlan))
self.write_line("{number: <5} {name: <32} {ports: <13} {type: <8} {auth: <13}".format(
number=vlan.number, name=vlan_name(vlan), ports=ports_strings[0],
type="Default" if vlan.number == 1 else "Static", auth="Required"))
line_count += 1
for port_string in ports_strings[1:]:
self.write_line("{number: <5} {name: <32} {ports: <13} {type: <8} {auth: <13}".format(
number="", name="", ports=port_string, type="", auth=""))
line_count += 1
self.write_line("")
if len(vlans) > 0:
self.write("--More-- or (q)uit")
self.on_keystroke(self.continue_vlan_pages, vlans)
def get_ports_for_vlan(self, vlan):
ports = []
for port in self.switch_configuration.ports:
if not isinstance(port, VlanPort):
if (port.trunk_vlans and vlan.number in port.trunk_vlans) or port.access_vlan == vlan.number:
ports.append(port)
return ports
def _build_port_strings(self, ports):
port_range_list = group_sequences(ports, are_in_sequence=self._are_in_sequence)
port_list = []
for port_range in port_range_list:
first_details = self._get_interface_details(port_range[0].name)
if len(port_range) == 1:
port_list.append("{}{}".format(first_details.port_prefix, first_details.port))
else:
port_list.append("{0}{1}-{0}{2}".format(first_details.port_prefix, first_details.port, self._get_interface_details(port_range[-1].name).port))
return _assemble_elements_on_lines(port_list, max_line_char=13)
def _get_interface_details(self, interface_name):
interface_descriptor = namedtuple('InterfaceDescriptor', "interface port_prefix port")
re_port_number = re.compile('(\d/[a-zA-Z]+)(\d+)')
interface, slot_descriptor = interface_name.split(" ")
port_prefix, port = re_port_number.match(slot_descriptor).groups()
return interface_descriptor(interface, port_prefix, int(port))
def _are_in_sequence(self, a, b):
details_a = self._get_interface_details(a.name)
details_b = self._get_interface_details(b.name)
return details_a.port + 1 == details_b.port and details_a.port_prefix == details_b.port_prefix
def continue_vlan_pages(self, lines, _):
self.write_line("\r ")
self.write_line("")
self.show_vlan_page(lines)
if not self.awaiting_keystroke:
self.show_prompt()
def show_page(self, lines):
lines_per_pages = 23
line = 0
while len(lines) > 0 and line < lines_per_pages:
self.write_line(lines.pop(0))
line += 1
if len(lines) > 0:
self.write("--More-- or (q)uit")
self.on_keystroke(self.continue_pages, lines)
def continue_pages(self, lines, _):
self.write_line("")
self.show_page(lines)
if not self.awaiting_keystroke:
self.write_line("")
self.show_prompt()
def show_version(self):
self.write_line("")
self.write_line("Image Descriptions")
self.write_line("")
self.write_line(" image1 : default image")
self.write_line(" image2 :")
self.write_line("")
self.write_line("")
self.write_line(" Images currently available on Flash")
self.write_line("")
self.write_line("--------------------------------------------------------------------")
self.write_line(" unit image1 image2 current-active next-active")
self.write_line("--------------------------------------------------------------------")
self.write_line("")
self.write_line(" 1 3.3.7.3 3.3.7.3 image1 image1")
self.write_line(" 2 3.3.7.3 3.3.13.1 image1 image1")
self.write_line("")
def vlan_name(vlan):
if vlan.number == 1:
return "Default"
elif vlan.name is not None:
return vlan.name
else:
return ""
def to_vlan_ranges(vlans):
if len(vlans) == 0:
return "none"
ranges = group_sequences(vlans, are_in_sequence=lambda a, b: a + 1 == b)
return ",".join([to_range_string(r) for r in ranges])
def to_range_string(range_array):
if len(range_array) < 2:
return ",".join([str(n) for n in range_array])
else:
return "%s-%s" % (range_array[0], range_array[-1])
def _is_vlan_id(text):
try:
number = int(text)
except ValueError:
return False
return 1 <= number <= 4093
def _assemble_elements_on_lines(elements, max_line_char, separator=','):
lines = [""]
for element in elements:
if len(lines[-1]) > 1:
lines[-1] += separator
new_line_length = len(lines[-1]) + len(element)
if new_line_length <= max_line_char:
lines[-1] += element
else:
lines.append(element)
return lines
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.