hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
793ffb0ad8f004bda92665ad58c3b30880a75953 | 183 | py | Python | src/front/urls.py | python-krasnodar/clinic-crm | 1d7b84fbf4f1e1510db303df956fa19db8c01b2b | [
"BSD-2-Clause"
] | 3 | 2018-09-14T16:50:49.000Z | 2019-05-13T23:37:33.000Z | src/front/urls.py | sweetlearn/clinic-crm | 1d7b84fbf4f1e1510db303df956fa19db8c01b2b | [
"BSD-2-Clause"
] | null | null | null | src/front/urls.py | sweetlearn/clinic-crm | 1d7b84fbf4f1e1510db303df956fa19db8c01b2b | [
"BSD-2-Clause"
] | 6 | 2018-04-02T04:39:00.000Z | 2021-03-14T11:42:44.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^details/(?P<pk>\d+)/$', views.details, name='details'),
]
| 20.333333 | 66 | 0.622951 |
793ffb40079ee93413042a72829f55058adfcdf2 | 9,619 | py | Python | lib/tests/streamlit/file_util_test.py | pohlt/streamlit | 852764f4f7d2bc06ddf932632df06c9104bf0a35 | [
"Apache-2.0"
] | 5 | 2020-07-06T21:29:56.000Z | 2022-03-12T20:04:27.000Z | lib/tests/streamlit/file_util_test.py | pohlt/streamlit | 852764f4f7d2bc06ddf932632df06c9104bf0a35 | [
"Apache-2.0"
] | null | null | null | lib/tests/streamlit/file_util_test.py | pohlt/streamlit | 852764f4f7d2bc06ddf932632df06c9104bf0a35 | [
"Apache-2.0"
] | 3 | 2020-07-14T23:32:51.000Z | 2021-12-04T16:49:29.000Z | # Copyright 2018-2020 Streamlit Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch, mock_open, MagicMock
import errno
import os
import pytest
import unittest
from streamlit import env_util
from streamlit import file_util
from streamlit import util
FILENAME = "/some/cache/file"
mock_get_path = MagicMock(return_value=FILENAME)
class FileUtilTest(unittest.TestCase):
def setUp(self):
self.patch1 = patch("streamlit.file_util.os.stat")
self.os_stat = self.patch1.start()
def tearDown(self):
self.patch1.stop()
@patch("streamlit.file_util.get_streamlit_file_path", mock_get_path)
@patch("streamlit.file_util.open", mock_open(read_data="data"))
def test_streamlit_read(self):
"""Test streamlitfile_util.streamlit_read."""
with file_util.streamlit_read(FILENAME) as input:
data = input.read()
self.assertEqual("data", data)
@patch("streamlit.file_util.get_streamlit_file_path", mock_get_path)
@patch("streamlit.file_util.open", mock_open(read_data=b"\xaa\xbb"))
def test_streamlit_read_binary(self):
"""Test streamlitfile_util.streamlit_read."""
with file_util.streamlit_read(FILENAME, binary=True) as input:
data = input.read()
self.assertEqual(b"\xaa\xbb", data)
@patch("streamlit.file_util.get_streamlit_file_path", mock_get_path)
@patch("streamlit.file_util.open", mock_open(read_data="data"))
def test_streamlit_read_zero_bytes(self):
"""Test streamlitfile_util.streamlit_read."""
self.os_stat.return_value.st_size = 0
with pytest.raises(util.Error) as e:
with file_util.streamlit_read(FILENAME) as input:
data = input.read()
self.assertEqual(str(e.value), 'Read zero byte file: "/some/cache/file"')
@patch("streamlit.file_util.get_streamlit_file_path", mock_get_path)
def test_streamlit_write(self):
"""Test streamlitfile_util.streamlit_write."""
dirname = os.path.dirname(file_util.get_streamlit_file_path(FILENAME))
# patch streamlit.*.os.makedirs instead of os.makedirs for py35 compat
with patch("streamlit.file_util.open", mock_open()) as open, patch(
"streamlit.util.os.makedirs"
) as makedirs, file_util.streamlit_write(FILENAME) as output:
output.write("some data")
open().write.assert_called_once_with("some data")
makedirs.assert_called_once_with(dirname, exist_ok=True)
@patch("streamlit.file_util.get_streamlit_file_path", mock_get_path)
@patch("streamlit.env_util.IS_DARWIN", True)
def test_streamlit_write_exception(self):
"""Test streamlitfile_util.streamlit_write."""
with patch("streamlit.file_util.open", mock_open()) as p, patch(
"streamlit.util.os.makedirs"
):
p.side_effect = OSError(errno.EINVAL, "[Errno 22] Invalid argument")
with pytest.raises(util.Error) as e, file_util.streamlit_write(
FILENAME
) as output:
output.write("some data")
error_msg = (
"Unable to write file: /some/cache/file\n"
"Python is limited to files below 2GB on OSX. "
"See https://bugs.python.org/issue24658"
)
self.assertEqual(str(e.value), error_msg)
def test_get_project_streamlit_file_path(self):
expected = os.path.join(
os.getcwd(), file_util.CONFIG_FOLDER_NAME, "some/random/file"
)
self.assertEqual(
expected, file_util.get_project_streamlit_file_path("some/random/file")
)
self.assertEqual(
expected,
file_util.get_project_streamlit_file_path("some", "random", "file"),
)
class FileIsInFolderTest(unittest.TestCase):
def test_file_in_folder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b/c/")
self.assertTrue(ret)
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b/c")
self.assertTrue(ret)
def test_file_in_subfolder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a")
self.assertTrue(ret)
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/")
self.assertTrue(ret)
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b")
self.assertTrue(ret)
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/a/b/")
self.assertTrue(ret)
def test_file_not_in_folder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/d/e/f/")
self.assertFalse(ret)
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "/d/e/f")
self.assertFalse(ret)
def test_rel_file_not_in_folder(self):
# Test with and without trailing slash
ret = file_util.file_is_in_folder_glob("foo.py", "/d/e/f/")
self.assertFalse(ret)
ret = file_util.file_is_in_folder_glob("foo.py", "/d/e/f")
self.assertFalse(ret)
def test_file_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "**/c")
self.assertTrue(ret)
def test_file_not_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("/a/b/c/foo.py", "**/f")
self.assertFalse(ret)
def test_rel_file_not_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("foo.py", "**/f")
self.assertFalse(ret)
def test_rel_file_not_in_folder_glob(self):
ret = file_util.file_is_in_folder_glob("foo.py", "")
self.assertTrue(ret)
class FileInPythonPathTest(unittest.TestCase):
@staticmethod
def _make_it_absolute(path):
# Use manual join instead of os.abspath to test against non normalized paths
return os.path.join(os.getcwd(), path)
def test_no_pythonpath(self):
with patch("os.environ", {}) as d:
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("../something/dir1/dir2/module")
)
)
def test_empty_pythonpath(self):
with patch("os.environ", {"PYTHONPATH": ""}):
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("something/dir1/dir2/module")
)
)
def test_python_path_relative(self):
with patch("os.environ", {"PYTHONPATH": "something"}):
self.assertTrue(
file_util.file_in_pythonpath(
self._make_it_absolute("something/dir1/dir2/module")
)
)
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("something_else/module")
)
)
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("../something/dir1/dir2/module")
)
)
def test_python_path_absolute(self):
with patch("os.environ", {"PYTHONPATH": self._make_it_absolute("something")}):
self.assertTrue(
file_util.file_in_pythonpath(
self._make_it_absolute("something/dir1/dir2/module")
)
)
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("something_else/module")
)
)
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("../something/dir1/dir2/module")
)
)
def test_python_path_mixed(self):
with patch(
"os.environ",
{
"PYTHONPATH": os.pathsep.join(
[self._make_it_absolute("something"), "something"]
)
},
):
self.assertTrue(
file_util.file_in_pythonpath(
self._make_it_absolute("something/dir1/dir2/module")
)
)
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("something_else/module")
)
)
def test_current_directory(self):
with patch("os.environ", {"PYTHONPATH": "."}):
self.assertTrue(
file_util.file_in_pythonpath(
self._make_it_absolute("something/dir1/dir2/module")
)
)
self.assertTrue(
file_util.file_in_pythonpath(
self._make_it_absolute("something_else/module")
)
)
self.assertFalse(
file_util.file_in_pythonpath(
self._make_it_absolute("../something_else/module")
)
)
| 37.574219 | 86 | 0.612538 |
793ffe548d975c300956ba463cacf66c0e7d291d | 3,803 | py | Python | cli/kamonohashi/cli/object_storage.py | netresj/kamonohashi | f43bd98f41cbcf00dac815142e38b6113fb6f0a9 | [
"Apache-2.0"
] | 100 | 2019-05-28T10:23:30.000Z | 2022-03-05T06:45:25.000Z | cli/kamonohashi/cli/object_storage.py | netresj/kamonohashi | f43bd98f41cbcf00dac815142e38b6113fb6f0a9 | [
"Apache-2.0"
] | 354 | 2019-05-28T07:34:10.000Z | 2022-03-31T06:25:34.000Z | cli/kamonohashi/cli/object_storage.py | netresj/kamonohashi | f43bd98f41cbcf00dac815142e38b6113fb6f0a9 | [
"Apache-2.0"
] | 35 | 2019-05-29T02:50:36.000Z | 2021-10-08T06:13:13.000Z | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, with_statement
import logging
import os
import os.path
import click
from kamonohashi.cli import util
from kamonohashi.op import rest
from kamonohashi.op.rest.rest import ApiException
def progressbar(length, label):
bar = click.progressbar(length=length, label=label)
bar.short_limit = -1
return bar
def to_int(s):
try:
return int(s)
except (TypeError, ValueError):
return None
def upload_file(api_client, file_path, file_type):
"""Upload a file to object storage.
:param rest.ApiClient api_client:
:param str file_path: source file path
:param str file_type: file type in object storage
:rtype: rest.StorageLogicModelsMultiPartUploadModel
"""
chunk_size = 1024 * 1024 * 16
length = (os.path.getsize(file_path) - 1) // chunk_size + 1
api = rest.StorageApi(api_client)
upload_parameter = api.get_upload_paramater(os.path.basename(file_path), length, file_type)
part_e_tags = []
pool_manager = api_client.rest_client.pool_manager
with progressbar(length=length, label=os.path.basename(file_path)) as bar:
logging.info('open %s', file_path)
with open(file_path, 'rb') as f:
logging.info('begin io %s', file_path)
for i in range(length):
data = f.read(chunk_size)
response = pool_manager.request('PUT', upload_parameter.uris[i], body=data,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
e_tag = response.getheader('ETag')
if not (200 <= response.status <= 299 and e_tag):
raise ApiException(http_resp=response)
part_e_tags.append('{}+{}'.format(i + 1, e_tag))
bar.update(1)
logging.info('end io %s', file_path)
model = rest.StorageLogicModelsCompleteMultiplePartUploadInputModel(
key=upload_parameter.key,
part_e_tags=part_e_tags,
upload_id=upload_parameter.upload_id,
)
api.complete_upload(model=model)
return upload_parameter
def download_file(pool_manager, url, dir_path, file_name):
"""Download a file from object storage.
:param urllib3.PoolManager pool_manager:
:param str url: download url
:param str dir_path: destination directory path
:param str file_name: destination file name
"""
file_name = os.path.basename(file_name)
chunk_size = 1024 * 1024 * 16
if not os.path.exists(dir_path):
os.makedirs(dir_path)
file_path = os.path.join(dir_path, file_name)
with util.release_conn(pool_manager.request('GET', url, preload_content=False)) as response:
if not 200 <= response.status <= 299:
raise ApiException(http_resp=response)
content_length = to_int(response.getheader('Content-Length'))
if content_length is not None:
length = (content_length - 1) // chunk_size + 1
with progressbar(length=length, label=file_name) as bar:
logging.info('open %s', file_path)
with open(file_path, 'wb') as f:
logging.info('begin io %s', file_path)
for chunk in response.stream(chunk_size):
f.write(chunk)
bar.update(1)
logging.info('end io %s', file_path)
else:
print('Downloading', file_name)
logging.info('open %s', file_path)
with open(file_path, 'wb') as f:
logging.info('begin io %s', file_path)
for chunk in response.stream(chunk_size):
f.write(chunk)
logging.info('end io %s', file_path)
| 36.219048 | 110 | 0.626874 |
793ffe9e7283959a003b69599e52faba82ac08bc | 684 | py | Python | feedjack/migrations/0003_auto_20150203_0425.py | allo-/feedjack | 8112474504b7265d6722b74b89171df0e6dc7688 | [
"BSD-3-Clause"
] | 2 | 2017-12-19T17:11:04.000Z | 2020-08-19T21:15:51.000Z | feedjack/migrations/0003_auto_20150203_0425.py | allo-/feedjack | 8112474504b7265d6722b74b89171df0e6dc7688 | [
"BSD-3-Clause"
] | 32 | 2016-03-12T13:57:28.000Z | 2017-03-02T11:11:59.000Z | feedjack/migrations/0003_auto_20150203_0425.py | allo-/feedjack | 8112474504b7265d6722b74b89171df0e6dc7688 | [
"BSD-3-Clause"
] | 2 | 2018-04-06T11:55:47.000Z | 2020-01-12T00:22:04.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('feedjack', '0002_auto_20150203_0401'),
]
operations = [
migrations.AlterField(
model_name='feed',
name='verify_tls_certs',
field=models.BooleanField(default=True, help_text='If https connections are used, this option allows to disable TLS certificate veritication. Has no effect with python versions before 2.7.9, where TLS certs are never checked.', verbose_name='verify TLS certificates, if any'),
preserve_default=True,
),
]
| 32.571429 | 288 | 0.676901 |
793fff029e5b6bc1d4746278febf57d334c73ac5 | 15,112 | py | Python | core/cli.py | Raul1718/Kunlun-M | e7e475f2c96b6f5b64f6adc385821ade97e6fdd1 | [
"MIT"
] | 1,059 | 2020-08-06T13:32:10.000Z | 2022-03-31T07:20:27.000Z | core/cli.py | xj84684/Kunlun-M | e7e475f2c96b6f5b64f6adc385821ade97e6fdd1 | [
"MIT"
] | 87 | 2020-09-08T06:34:45.000Z | 2022-03-28T05:52:36.000Z | core/cli.py | xj84684/Kunlun-M | e7e475f2c96b6f5b64f6adc385821ade97e6fdd1 | [
"MIT"
] | 171 | 2020-08-13T11:53:47.000Z | 2022-03-30T03:23:07.000Z | # -*- coding: utf-8 -*-
"""
cli
~~~
Implements CLI mode
:author: Feei <[email protected]>
:homepage: https://github.com/wufeifei/cobra
:license: MIT, see LICENSE for more details.
:copyright: Copyright (c) 2017 Feei. All rights reserved
"""
import os
import codecs
import pprint
import traceback
from prettytable import PrettyTable
from .detection import Detection
from .engine import scan, Running
from .vendors import Vendors
from core.pretreatment import ast_object
from utils.export import write_to_file
from utils.log import logger, logger_console
from utils.file import Directory, load_kunlunmignore
from utils.utils import show_context
from utils.utils import ParseArgs
from utils.utils import md5, random_generator
from core.vendors import get_project_by_version, get_and_save_vendor_vuls
from Kunlun_M.settings import RULES_PATH
from Kunlun_M.const import VUL_LEVEL, VENDOR_VUL_LEVEL
from web.index.models import ScanTask, ScanResultTask, Rules, NewEvilFunc, Project, ProjectVendors, VendorVulns
from web.index.models import get_resultflow_class, get_and_check_scantask_project_id, check_and_new_project_id, get_and_check_scanresult
def get_sid(target, is_a_sid=False):
target = target
if isinstance(target, list):
target = ';'.join(target)
sid = md5(target)[:5]
if is_a_sid:
pre = 'a'
else:
pre = 's'
sid = '{p}{sid}{r}'.format(p=pre, sid=sid, r=random_generator())
return sid.lower()
def check_scantask(task_name, target_path, parameter_config, project_origin, project_des="", auto_yes=False):
s = ScanTask.objects.filter(task_name=task_name, target_path=target_path, parameter_config=parameter_config, is_finished=1).order_by("-id").first()
if s and not auto_yes:
logger.warning("[INIT] ScanTask for {} has been executed.".format(task_name))
logger.warning("[INIT] whether rescan Task {}?(Y/N) (Default N)".format(task_name))
if input().lower() != 'y':
logger.warning("[INIT] whether Show Last Scan Result?(Y/N) (Default Y)")
if input().lower() != 'n':
display_result(s.id, is_ask=True)
else:
s = ScanTask(task_name=task_name, target_path=target_path, parameter_config=parameter_config)
s.save()
# check and new project
check_and_new_project_id(scantask_id=s.id, task_name=task_name, project_origin=project_origin, project_des=project_des)
else:
s = ScanTask(task_name=task_name, target_path=target_path, parameter_config=parameter_config)
s.save()
# check and new project
check_and_new_project_id(s.id, task_name=task_name, project_origin=project_origin, project_des=project_des)
return s
def display_result(scan_id, is_ask=False):
table = PrettyTable(
['#', 'CVI', 'Rule(ID/Name)', 'Lang/CVE-id', 'Level', 'Target-File:Line-Number',
'Commit(Author)', 'Source Code Content', 'Analysis'])
table.align = 'l'
# check unconfirm
if is_ask:
logger.warning("[INIT] whether Show Unconfirm Result?(Y/N) (Default Y)")
project_id = get_and_check_scantask_project_id(scan_id)
if is_ask:
if input().lower() != 'n':
srs = get_and_check_scanresult(scan_id).objects.filter(scan_project_id=project_id, is_active=True)
else:
srs = get_and_check_scanresult(scan_id).objects.filter(scan_project_id=project_id, is_active=True,
is_unconfirm=False)
else:
srs = get_and_check_scanresult(scan_id).objects.filter(scan_project_id=project_id, is_active=True,
is_unconfirm=False)
logger.info("[INIT] Project ID is {}".format(project_id))
if srs:
logger.info("[MainThread] Scan id {} Result: ".format(scan_id))
for sr in srs:
# for vendor scan
if sr.cvi_id == '9999':
vendor_vuls_id = int(sr.vulfile_path.split(':')[-1])
vv = VendorVulns.objects.filter(id=vendor_vuls_id).first()
if vv:
rule_name = vv.title
author = 'SCA'
level = VENDOR_VUL_LEVEL[int(vv.severity)]
# sr.source_code = vv.description
else:
rule_name = 'SCA Scan'
author = 'SCA'
level = VENDOR_VUL_LEVEL[1]
else:
rule = Rules.objects.filter(svid=sr.cvi_id).first()
rule_name = rule.rule_name
author = rule.author
level = VUL_LEVEL[rule.level]
row = [sr.id, sr.cvi_id, rule_name, sr.language, level, sr.vulfile_path,
author, sr.source_code, sr.result_type]
table.add_row(row)
# show Vuls Chain
ResultFlow = get_resultflow_class(scan_id)
rfs = ResultFlow.objects.filter(vul_id=sr.id)
logger.info("[Chain] Vul {}".format(sr.id))
for rf in rfs:
logger.info("[Chain] {}, {}, {}:{}".format(rf.node_type, rf.node_content, rf.node_path, rf.node_lineno))
try:
if not show_context(rf.node_path, rf.node_lineno):
logger_console.info(rf.node_source)
except:
logger.error("[SCAN] Error: {}".format(traceback.print_exc()))
continue
logger.info(
"[SCAN] ending\r\n -------------------------------------------------------------------------")
logger.info("[SCAN] Trigger Vulnerabilities ({vn})\r\n{table}".format(vn=len(srs), table=table))
# show New evil Function
nfs = NewEvilFunc.objects.filter(project_id=project_id, is_active=1)
if nfs:
table2 = PrettyTable(
['#', 'NewFunction', 'OriginFunction', 'Related Rules id'])
table2.align = 'l'
idy = 1
for nf in nfs:
row = [idy, nf.func_name, nf.origin_func_name, nf.svid]
table2.add_row(row)
idy += 1
logger.info("[MainThread] New evil Function list by NewCore:\r\n{table}".format(table=table2))
else:
logger.info("[MainThread] Scan id {} has no Result.".format(scan_id))
def start(target, formatter, output, special_rules, a_sid=None, language=None, tamper_name=None, black_path=None, is_unconfirm=False, is_unprecom=False):
"""
Start CLI
:param black_path:
:param tamper_name:
:param language:
:param target: File, FOLDER, GIT
:param formatter:
:param output:
:param special_rules:
:param a_sid: all scan id
:return:
"""
global ast_object
# generate single scan id
s_sid = get_sid(target)
r = Running(a_sid)
data = (s_sid, target)
r.init_list(data=target)
r.list(data)
report = '?sid={a_sid}'.format(a_sid=a_sid)
d = r.status()
d['report'] = report
r.status(d)
task_id = a_sid
# 加载 kunlunmignore
load_kunlunmignore()
# parse target mode and output mode
pa = ParseArgs(target, formatter, output, special_rules, language, black_path, a_sid=None)
target_mode = pa.target_mode
output_mode = pa.output_mode
black_path_list = pa.black_path_list
# target directory
try:
target_directory = pa.target_directory(target_mode)
logger.info('[CLI] Target : {d}'.format(d=target_directory))
# static analyse files info
files, file_count, time_consume = Directory(target_directory, black_path_list).collect_files()
# vendor check
project_id = get_and_check_scantask_project_id(task_id)
Vendors(task_id, project_id, target_directory, files)
# detection main language and framework
if not language:
dt = Detection(target_directory, files)
main_language = dt.language
main_framework = dt.framework
else:
main_language = pa.language
main_framework = pa.language
logger.info('[CLI] [STATISTIC] Language: {l} Framework: {f}'.format(l=",".join(main_language), f=main_framework))
logger.info('[CLI] [STATISTIC] Files: {fc}, Extensions:{ec}, Consume: {tc}'.format(fc=file_count,
ec=len(files),
tc=time_consume))
if pa.special_rules is not None:
logger.info('[CLI] [SPECIAL-RULE] only scan used by {r}'.format(r=','.join(pa.special_rules)))
# Pretreatment ast object
ast_object.init_pre(target_directory, files)
ast_object.pre_ast_all(main_language, is_unprecom=is_unprecom)
# scan
scan(target_directory=target_directory, a_sid=a_sid, s_sid=s_sid, special_rules=pa.special_rules,
language=main_language, framework=main_framework, file_count=file_count, extension_count=len(files),
files=files, tamper_name=tamper_name, is_unconfirm=is_unconfirm)
# show result
display_result(task_id)
except KeyboardInterrupt as e:
logger.error("[!] KeyboardInterrupt, exit...")
exit()
except Exception:
result = {
'code': 1002,
'msg': 'Exception'
}
Running(s_sid).data(result)
raise
# 输出写入文件
write_to_file(target=target, sid=s_sid, output_format=formatter, filename=output)
def show_info(type, key):
"""
展示信息
"""
def list_parse(rules_path, istamp=False):
files = os.listdir(rules_path)
result = []
for f in files:
if f.startswith("_") or f.endswith("pyc"):
continue
if os.path.isdir(os.path.join(rules_path, f)):
if f not in ['test', 'tamper']:
result.append(f)
if f.startswith("CVI_"):
result.append(f)
if istamp:
if f not in ['test.py', 'demo.py', 'none.py']:
result.append(f)
return result
info_dict = {}
if type == "rule":
rule_lan_list = list_parse(RULES_PATH)
rule_dict = {}
if key == "all":
# show all
for lan in rule_lan_list:
info_dict[lan] = []
rule_lan_path = os.path.join(RULES_PATH, lan)
info_dict[lan] = list_parse(rule_lan_path)
elif key in rule_lan_list:
info_dict[key] = []
rule_lan_path = os.path.join(RULES_PATH, key)
info_dict[key] = list_parse(rule_lan_path)
elif str(int(key)) == key:
for lan in rule_lan_list:
info_dict[lan] = []
rule_lan_path = os.path.join(RULES_PATH, lan)
info_dict[lan] = list_parse(rule_lan_path)
for lan in info_dict:
if "CVI_{}.py".format(key) in info_dict[lan]:
f = codecs.open(os.path.join(RULES_PATH, lan, "CVI_{}.py".format(key)), encoding='utf-8', errors="ignore")
return f.read()
logger.error('[Show] no CVI id {}.'.format(key))
return ""
else:
logger.error('[Show] error language/CVI id input.')
return ""
i = 0
table = PrettyTable(
['#', 'CVI', 'Lang/CVE-id', 'Rule(ID/Name)', 'Match', 'Status'])
table.align = 'l'
for lan in info_dict:
for rule in info_dict[lan]:
i += 1
rulename = rule.split('.')[0]
rulefile = "rules." + lan + "." + rulename
rule_obj = __import__(rulefile, fromlist=rulename)
p = getattr(rule_obj, rulename)
ruleclass = p()
table.add_row([i, ruleclass.svid, ruleclass.language, ruleclass.vulnerability, ruleclass.match, ruleclass.status])
return table
elif type == "tamper":
table = PrettyTable(
['#', 'TampName', 'FilterFunc', 'InputControl'])
table.align = 'l'
i = 0
tamp_path = os.path.join(RULES_PATH, 'tamper/')
tamp_list = list_parse(tamp_path, True)
if key == "all":
for tamp in tamp_list:
i += 1
tampname = tamp.split('.')[0]
tampfile = "rules.tamper." + tampname
tamp_obj = __import__(tampfile, fromlist=tampname)
filter_func = getattr(tamp_obj, tampname)
input_control = getattr(tamp_obj, tampname + "_controlled")
table.add_row([i, tampname, filter_func, input_control])
return table
elif key + ".py" in tamp_list:
tampname = key
tampfile = "rules.tamper." + tampname
tamp_obj = __import__(tampfile, fromlist=tampname)
filter_func = getattr(tamp_obj, tampname)
input_control = getattr(tamp_obj, tampname + "_controlled")
return """
Tamper Name:
{}
Filter Func:
{}
Input Control:
{}
""".format(tampname, pprint.pformat(filter_func, indent=4), pprint.pformat(input_control, indent=4))
else:
logger.error("[Info] no tamper name {]".format(key))
return ""
def search_project(search_type, keyword, keyword_value, with_vuls=False):
"""
根据信息搜索项目信息
:param with_vuls:
:param search_type:
:param keyword:
:param keyword_value:
:return:
"""
if search_type == 'vendor':
ps = get_project_by_version(keyword, keyword_value)
table = PrettyTable(
['#', 'ProjectId', 'Project Name', 'Project Origin', 'Vendor', 'Version'])
table.align = 'l'
table2 = PrettyTable(
['#', 'Vuln ID', 'Title', 'level', 'CVE', 'Reference', 'Vendor', 'Affected Version'])
table2.align = 'l'
i = 0
j = 0
if not ps:
return False
for p in ps:
pid = p.id
pname = p.project_name
porigin = p.project_origin
vs = ps[p]
for v in vs:
i += 1
vendor_name = v.name
vendor_vension = v.version
table.add_row([i, pid, pname, porigin, vendor_name, vendor_vension])
if with_vuls:
vvs = get_and_save_vendor_vuls(0, vendor_name, vendor_vension, v.language, v.ext)
for vv in vvs:
j += 1
table2.add_row([i, vv.vuln_id, vv.title, VENDOR_VUL_LEVEL[vv.severity], vv.cves, vv.reference, vv.vendor_name, vv.affected_versions])
logger.info("Project List (Small than {} {}):\n{}".format(keyword, keyword_value, table))
logger.info("Vendor {}:{} Vul List:\n{}".format(keyword, keyword_value, table2))
return True
| 32.568966 | 157 | 0.57762 |
793fff1f00944362dc45671cfb4d0b7ebd3abae6 | 1,448 | py | Python | tests/st/ops/ascend/test_tbe_ops/test_addn.py | ythlml/mindspore | 028ae212624164044cfaa84f347fc502cb7fcb0f | [
"Apache-2.0"
] | 7 | 2020-05-24T03:19:26.000Z | 2020-05-24T03:20:00.000Z | tests/st/ops/ascend/test_tbe_ops/test_addn.py | ythlml/mindspore | 028ae212624164044cfaa84f347fc502cb7fcb0f | [
"Apache-2.0"
] | null | null | null | tests/st/ops/ascend/test_tbe_ops/test_addn.py | ythlml/mindspore | 028ae212624164044cfaa84f347fc502cb7fcb0f | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor
from mindspore.common.api import ms_function
from mindspore.common.initializer import initializer
from mindspore.common.parameter import Parameter
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
class Net(nn.Cell):
def __init__(self):
super(Net, self).__init__()
self.add = P.AddN()
def construct(self, x, y):
return self.add((x, y))
def test_net():
x = np.random.randn(1, 3, 3, 4).astype(np.float32)
y = np.random.randn(1, 3, 3, 4).astype(np.float32)
add = Net()
output = add(Tensor(x), Tensor(y))
print(x)
print(y)
print(output.asnumpy())
| 32.177778 | 78 | 0.687845 |
793fff306ada0a6def213e73af270101d74e3dc6 | 1,083 | py | Python | setup.py | fcivaner/hashdir | 74a5ff7319fc02ff5f9a63c51771fbfface446ec | [
"MIT"
] | null | null | null | setup.py | fcivaner/hashdir | 74a5ff7319fc02ff5f9a63c51771fbfface446ec | [
"MIT"
] | null | null | null | setup.py | fcivaner/hashdir | 74a5ff7319fc02ff5f9a63c51771fbfface446ec | [
"MIT"
] | null | null | null | import os
from setuptools import setup, find_packages
import hashdir
this_directory = os.path.abspath(os.path.dirname(__file__))
def read_file(path):
with open(os.path.join(this_directory, path), encoding="utf-8") as f:
return f.read()
long_description = read_file("README.md")
setup(
name=hashdir.name,
version=hashdir.__version__,
url="https://github.com/fcivaner/hashdir",
description=hashdir.description,
description_content_type="text/markdown",
long_description=long_description,
long_description_content_type="text/markdown",
keywords="hash directory imohash md5",
author="Fırat Civaner",
author_email="[email protected]",
license="MIT License",
packages=find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
entry_points={"console_scripts": ["hashdir=hashdir.__main__:main"]},
install_requires=["imohash"],
setup_requires=["setuptools>=41.0.1", "wheel>=0.33.4"],
)
| 29.27027 | 73 | 0.696214 |
793fff7afd858b9e162d61669748df5f925984a0 | 4,812 | py | Python | gen/argo/events/client/models/v1_volume_photon_persistent_disk.py | argoproj-labs/argo-events-client-python | 3d6e3dffca4a12a490c2963f4ac90c8894948bb5 | [
"Apache-2.0"
] | null | null | null | gen/argo/events/client/models/v1_volume_photon_persistent_disk.py | argoproj-labs/argo-events-client-python | 3d6e3dffca4a12a490c2963f4ac90c8894948bb5 | [
"Apache-2.0"
] | null | null | null | gen/argo/events/client/models/v1_volume_photon_persistent_disk.py | argoproj-labs/argo-events-client-python | 3d6e3dffca4a12a490c2963f4ac90c8894948bb5 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Argo Events
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from argo.events.client.configuration import Configuration
class V1VolumePhotonPersistentDisk(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'pd_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'pd_id': 'pdID'
}
def __init__(self, fs_type=None, pd_id=None, local_vars_configuration=None): # noqa: E501
"""V1VolumePhotonPersistentDisk - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._pd_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
self.pd_id = pd_id
@property
def fs_type(self):
"""Gets the fs_type of this V1VolumePhotonPersistentDisk. # noqa: E501
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:return: The fs_type of this V1VolumePhotonPersistentDisk. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1VolumePhotonPersistentDisk.
Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:param fs_type: The fs_type of this V1VolumePhotonPersistentDisk. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def pd_id(self):
"""Gets the pd_id of this V1VolumePhotonPersistentDisk. # noqa: E501
ID that identifies Photon Controller persistent disk # noqa: E501
:return: The pd_id of this V1VolumePhotonPersistentDisk. # noqa: E501
:rtype: str
"""
return self._pd_id
@pd_id.setter
def pd_id(self, pd_id):
"""Sets the pd_id of this V1VolumePhotonPersistentDisk.
ID that identifies Photon Controller persistent disk # noqa: E501
:param pd_id: The pd_id of this V1VolumePhotonPersistentDisk. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pd_id is None: # noqa: E501
raise ValueError("Invalid value for `pd_id`, must not be `None`") # noqa: E501
self._pd_id = pd_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1VolumePhotonPersistentDisk):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1VolumePhotonPersistentDisk):
return True
return self.to_dict() != other.to_dict()
| 31.657895 | 197 | 0.604738 |
793fffcb15f3f3432198b56c54c86c555f81a30e | 3,003 | py | Python | Behavioral-Patterns/Visitor/classic.py | PratikRamdasi/Design-Patterns-in-Python | 689677ab861c96f4908bfc5f5fa77585e14f814c | [
"MIT"
] | null | null | null | Behavioral-Patterns/Visitor/classic.py | PratikRamdasi/Design-Patterns-in-Python | 689677ab861c96f4908bfc5f5fa77585e14f814c | [
"MIT"
] | null | null | null | Behavioral-Patterns/Visitor/classic.py | PratikRamdasi/Design-Patterns-in-Python | 689677ab861c96f4908bfc5f5fa77585e14f814c | [
"MIT"
] | null | null | null | # taken from https://tavianator.com/the-visitor-pattern-in-python/
def _qualname(obj):
"""Get the fully-qualified name of an object (including module)."""
return obj.__module__ + '.' + obj.__qualname__
def _declaring_class(obj):
"""Get the name of the class that declared an object."""
name = _qualname(obj)
return name[:name.rfind('.')]
# Stores the actual visitor methods
_methods = {}
# Delegating visitor implementation
def _visitor_impl(self, arg):
"""Actual visitor method implementation."""
method = _methods[(_qualname(type(self)), type(arg))]
return method(self, arg)
# The actual @visitor decorator
def visitor(arg_type):
"""Decorator that creates a visitor method."""
def decorator(fn):
declaring_class = _declaring_class(fn)
_methods[(declaring_class, arg_type)] = fn
# Replace all decorated methods with _visitor_impl
return _visitor_impl
return decorator
# ↑↑↑ LIBRARY CODE ↑↑↑
# most common - Gang Of Four
# using => Double Dispatch -> overloaded `visit` method
# with self as arument
# more flexible, more scalable
class DoubleExpression:
def __init__(self, value):
self.value = value
def accept(self, visitor):
visitor.visit(self)
class AdditionExpression:
def __init__(self, left, right):
self.left = left
self.right = right
def accept(self, visitor):
visitor.visit(self)
class ExpressionPrinter:
def __init__(self):
self.buffer = []
@visitor(DoubleExpression)
def visit(self, de):
self.buffer.append(str(de.value))
@visitor(AdditionExpression)
def visit(self, ae):
self.buffer.append('(')
# ae.left.accept(self) # better
self.visit(ae.left) # not better than accept
self.buffer.append('+')
# ae.right.accept(self) # better
self.visit(ae.right) # not better than accept
self.buffer.append(')')
def __str__(self):
return ''.join(self.buffer)
class ExpressionEvaluator:
def __init__(self):
self.value = None
@visitor(DoubleExpression)
def visit(self, de):
self.value = de.value
@visitor(AdditionExpression)
def visit(self, ae): # stateful visitor
self.visit(ae.left)
temp = self.value # cache
self.visit(ae.right) # over writes self.value
# so we saved it in temp variable
self.value += temp
if __name__ == '__main__':
# represents 1+(2+3)
e = AdditionExpression(
DoubleExpression(1),
AdditionExpression(
DoubleExpression(2),
DoubleExpression(3)
)
)
# buffer = []
printer = ExpressionPrinter()
printer.visit(e)
evaluator = ExpressionEvaluator()
evaluator.visit(e)
print(f'{printer} = {evaluator.value}') # (1+(2+3)) = 6
| 25.235294 | 72 | 0.606061 |
794000347b4999adfaafda1406a685b8d095259c | 129,363 | py | Python | fkie_node_manager/src/fkie_node_manager/main_window.py | ETHZ-RobotX/multimaster_fkie | 00fd99f210f7a1e15a24b6c531465042c3075f11 | [
"BSD-3-Clause"
] | null | null | null | fkie_node_manager/src/fkie_node_manager/main_window.py | ETHZ-RobotX/multimaster_fkie | 00fd99f210f7a1e15a24b6c531465042c3075f11 | [
"BSD-3-Clause"
] | null | null | null | fkie_node_manager/src/fkie_node_manager/main_window.py | ETHZ-RobotX/multimaster_fkie | 00fd99f210f7a1e15a24b6c531465042c3075f11 | [
"BSD-3-Clause"
] | 1 | 2021-09-12T20:44:20.000Z | 2021-09-12T20:44:20.000Z | # encoding: utf-8
#
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Fraunhofer FKIE/US, Alexander Tiderko
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Fraunhofer nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from datetime import datetime
from docutils import examples
from fkie_multimaster_msgs.msg import MasterState
from python_qt_binding import loadUi, QT_BINDING_VERSION
from python_qt_binding.QtCore import QFile, QPoint, QSize, Qt, QTimer, QUrl, Signal
from python_qt_binding.QtGui import QDesktopServices, QIcon, QKeySequence, QPixmap
from python_qt_binding.QtGui import QPalette, QColor
import getpass
import grpc
import os
import rospy
import socket
import time
import uuid
try:
import xmlrpclib as xmlrpcclient
except ImportError:
import xmlrpc.client as xmlrpcclient
import ruamel.yaml
from fkie_master_discovery.common import resolve_url, subdomain, masteruri_from_master, masteruri_from_ros
from fkie_node_manager_daemon.common import utf8, get_pkg_path
from fkie_node_manager_daemon.host import get_hostname
from fkie_node_manager_daemon import screen
from fkie_node_manager_daemon import url as nmdurl
import fkie_node_manager as nm
from .capability_table import CapabilityTable
from .detailed_msg_box import MessageBox
from .discovery_listener import MasterListService, MasterStateTopic, MasterStatisticTopic, OwnMasterMonitoring
from .editor.editor import Editor
from .launch_enhanced_line_edit import EnhancedLineEdit
from .launch_files_widget import LaunchFilesWidget
from .log_widget import LogWidget
from .master_list_model import MasterModel, MasterIconsDelegate
from .master_view_proxy import MasterViewProxy
from .menu_rqt import MenuRqt
from .network_discovery_dialog import NetworkDiscoveryDialog
from .parameter_dialog import ParameterDialog
from .profile_widget import ProfileWidget
from .progress_queue import ProgressQueue
from .logscreen.screen_dock import ScreenDock
from .select_dialog import SelectDialog
from .sync_dialog import SyncDialog
from .update_handler import UpdateHandler
try:
from python_qt_binding.QtGui import QApplication, QFileDialog, QMainWindow, QStackedLayout, QWidget, QStyle
from python_qt_binding.QtGui import QShortcut, QVBoxLayout, QColorDialog, QDialog, QRadioButton, QDockWidget
except Exception:
from python_qt_binding.QtWidgets import QApplication, QFileDialog, QMainWindow, QStackedLayout, QWidget, QStyle
from python_qt_binding.QtWidgets import QShortcut, QVBoxLayout, QColorDialog, QDialog, QRadioButton, QDockWidget
try:
from diagnostic_msgs.msg import DiagnosticArray, DiagnosticStatus
DIAGNOSTICS_AVAILABLE = True
except Exception:
import sys
sys.stderr.write("Cannot import 'diagnostic_msgs', feature disabled.")
DIAGNOSTICS_AVAILABLE = False
class MainWindow(QMainWindow):
'''
The class to create the main window of the application.
'''
close_signal = Signal()
DELAYED_NEXT_REQ_ON_ERR = 5.0
if DIAGNOSTICS_AVAILABLE:
diagnostics_signal = Signal(DiagnosticStatus)
'''@ivar: the signal is emitted if a message on topic nm_notifier was
reiceved (DiagnosticStatus)'''
def __init__(self, files=[], restricted_to_one_master=False, monitor_port=22622, parent=None):
'''
Creates the window, connects the signals and init the class.
'''
QMainWindow.__init__(self)
self.close_event_count = 0
self.default_load_launch = os.path.abspath(resolve_url(files[0])) if files else ''
self.default_profile_load = os.path.isfile(self.default_load_launch) and self.default_load_launch.endswith('.nmprofile')
restricted_to_one_master = False
self._finished = False
self._history_selected_robot = ''
self.__icons = {'empty': (QIcon(), ''),
'default_pc': (nm.settings().icon('crystal_clear_miscellaneous.png'), nm.settings().icon_path('crystal_clear_miscellaneous.png')),
'log_warning': (nm.settings().icon('crystal_clear_no_io.png'), nm.settings().icon_path('crystal_clear_no_io.png')),
'show_io': (nm.settings().icon('crystal_clear_show_io.png'), nm.settings().icon_path('crystal_clear_show_io.png'))
}
self.__current_icon = None
self.__current_master_label_name = None
self._syncs_to_start = [] # hostnames
self._accept_next_update = False
self._last_window_state = False
self._description_history = []
self._description_accept = ''
# self.setAttribute(Qt.WA_AlwaysShowToolTips, True)
# setup main window frame
self.setObjectName('MainWindow')
# self = mainWindow = QMainWindow()
# self = mainWindow = loader.load(":/forms/MainWindow.ui")
ui_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui', 'MainWindow.ui')
loadUi(ui_file, self)
self.setObjectName('MainUI')
self.setDockOptions(QMainWindow.AllowNestedDocks | QMainWindow.AllowTabbedDocks | QMainWindow.AnimatedDocks | QMainWindow.VerticalTabs)
# set icons
self.logButton.setIcon(nm.settings().icon('crystal_clear_show_io.png'))
self.settingsButton.setIcon(nm.settings().icon('crystal_clear_settings.png'))
self.infoButton.setIcon(nm.settings().icon('crystal_clear_info.png'))
self.simTimeLabel.setPixmap(nm.settings().pixmap('crystal_clear_xclock.png'))
self.launchServerLabel.setPixmap(nm.settings().pixmap('crystal_clear_launch_server.png'))
self.user_label.setPixmap(nm.settings().pixmap('crystal_clear_user.png'))
self.setTimeButton.setIcon(nm.settings().icon('crystal_clear_set_clock.png'))
self.refreshHostButton.setIcon(nm.settings().icon('oxygen_view_refresh.png'))
self.runButton.setIcon(nm.settings().icon('crystal_clear_clicknrun.png'))
self.syncButton.setIcon(nm.settings().icon('irondevil_sync.png'))
self.progressCancelButton_sync.setIcon(nm.settings().icon('crystal_clear_button_close.png'))
self.progressCancelButton.setIcon(nm.settings().icon('crystal_clear_button_close.png'))
self.refreshAllButton.setIcon(nm.settings().icon('oxygen_view_refresh.png'))
self.discoveryButton.setIcon(nm.settings().icon('crystal_clear_discovery.png'))
self.masterLogButton.setIcon(nm.settings().icon('crystal_clear_show_log.png'))
self.startRobotButton.setIcon(nm.settings().icon('crystal_clear_run_zeroconf.png'))
self.close_signal.connect(self.close)
self.close_without_ask = False
self.user_frame.setVisible(False)
self._add_user_to_combo(getpass.getuser())
self.userComboBox.editTextChanged.connect(self.on_user_changed)
self.masterInfoFrame.setEnabled(False)
self.infoButton.clicked.connect(self.on_info_clicked)
self.setTimeButton.clicked.connect(self.on_set_time_clicked)
self.refreshHostButton.clicked.connect(self.on_refresh_master_clicked)
self.masterLogButton.clicked.connect(self.on_master_log_clicked)
self.runButton.clicked.connect(self.on_run_node_clicked)
self.syncButton.released.connect(self.on_sync_dialog_released)
menu_rqt = MenuRqt(self.rqtButton)
menu_rqt.start_rqt_plugin_signal.connect(self.on_rqt_plugin_start)
pal = self.expert_tab.palette()
self._default_color = pal.color(QPalette.Window)
self._set_custom_colors()
# setup settings widget
self.profiler = ProfileWidget(self, self)
self.addDockWidget(Qt.LeftDockWidgetArea, self.profiler)
# setup logger widget
self.log_dock = LogWidget()
self.log_dock.added_signal.connect(self._on_log_added)
self.log_dock.cleared_signal.connect(self._on_log_cleared)
self.log_dock.setVisible(False)
self.addDockWidget(Qt.BottomDockWidgetArea, self.log_dock)
self.logButton.clicked.connect(self._on_log_button_clicked)
self.settingsButton.clicked.connect(self._on_settings_button_clicked)
# setup screen dock
self.screen_dock = ScreenDock(self)
self.addDockWidget(Qt.BottomDockWidgetArea, self.screen_dock)
self.screen_dock.hide()
# setup the launch files view
self.launch_dock = LaunchFilesWidget()
self.launch_dock.load_signal.connect(self.on_load_launch_file)
self.launch_dock.load_profile_signal.connect(self.profiler.on_load_profile_file)
self.launch_dock.edit_signal.connect(self.on_launch_edit)
self.launch_dock.transfer_signal.connect(self.on_launch_transfer)
self.launch_dock.save_profile_signal.connect(self.profiler.on_save_profile)
self.addDockWidget(Qt.LeftDockWidgetArea, self.launch_dock)
self.mIcon = nm.settings().icon('crystal_clear_prop_run.png')
# self.style().standardIcon(QStyle.SP_FileIcon)
self.setWindowTitle("Node Manager")
self.setWindowIcon(self.mIcon)
# self.setCentralWidget(mainWindow)
# init the stack layout which contains the information about different ros master
self.stackedLayout = QStackedLayout()
self.stackedLayout.setObjectName('stackedLayout')
self.tabWidget.currentChanged.connect(self.on_currentChanged_tab)
self.tabLayout = QVBoxLayout(self.tabPlace)
self.tabLayout.setObjectName("tabLayout")
self.tabLayout.setContentsMargins(0, 0, 0, 0)
self.tabLayout.addLayout(self.stackedLayout)
# initialize the progress queue
self._progress_queue = ProgressQueue(self.progressFrame, self.progressBar, self.progressCancelButton, 'Network')
self._progress_queue_sync = ProgressQueue(self.progressFrame_sync, self.progressBar_sync, self.progressCancelButton_sync, 'Sync')
# initialize the view for the discovered ROS master
self.master_model = MasterModel(self.getMasteruri())
self.master_model.sync_start.connect(self.on_sync_start)
self.master_model.sync_stop.connect(self.on_sync_stop)
self.master_delegate = MasterIconsDelegate()
self.masterTableView.setItemDelegateForColumn(1, self.master_delegate)
self.masterTableView.setModel(self.master_model)
self.master_model.parent_view = self.masterTableView
# self.masterTableView.setAlternatingRowColors(True)
# self.masterTableView.clicked.connect(self.on_master_table_clicked)
# self.masterTableView.pressed.connect(self.on_master_table_pressed)
self.masterTableView.activated.connect(self.on_master_table_activated)
sm = self.masterTableView.selectionModel()
sm.currentRowChanged.connect(self.on_masterTableView_selection_changed)
for i, (_, width) in enumerate(MasterModel.header): # _:=name
self.masterTableView.setColumnWidth(i, width)
self.refreshAllButton.clicked.connect(self.on_all_master_refresh_clicked)
self.discoveryButton.clicked.connect(self.on_discover_network_clicked)
self.startRobotButton.clicked.connect(self.on_start_robot_clicked)
# stores the widget to a
self.masters = dict() # masteruri : MasterViewProxy
self.currentMaster = None # MasterViewProxy
self._close_on_exit = True
############################################################################
self.capabilitiesTable = CapabilityTable(self.capabilities_tab)
self.capabilitiesTable.setObjectName("capabilitiesTable")
self.capabilitiesTable.start_nodes_signal.connect(self.on_start_nodes)
self.capabilitiesTable.stop_nodes_signal.connect(self.on_stop_nodes)
self.capabilitiesTable.description_requested_signal.connect(self.on_description_update_cap)
self.capabilities_tab.layout().addWidget(self.capabilitiesTable)
self.descriptionTextEdit.setOpenLinks(False)
self.descriptionTextEdit.anchorClicked.connect(self.on_description_anchorClicked)
self._shortcut_copy = QShortcut(QKeySequence(self.tr("Ctrl+Shift+C", "copy selected description")), self.descriptionTextEdit)
self._shortcut_copy.activated.connect(self.descriptionTextEdit.copy)
self.tabifyDockWidget(self.launch_dock, self.descriptionDock)
self.launch_dock.raise_()
flags = self.windowFlags()
self.setWindowFlags(flags | Qt.WindowContextHelpButtonHint)
self._discover_dialog = None
self.restricted_to_one_master = restricted_to_one_master
if restricted_to_one_master:
self.syncButton.setEnabled(False)
self.refreshAllButton.setEnabled(False)
self.discoveryButton.setEnabled(False)
self.startRobotButton.setEnabled(False)
self._sync_dialog = SyncDialog()
self._shortcut_focus = QShortcut(QKeySequence(self.tr("Ctrl+Shift+F", "switch to next focus area")), self)
self._shortcut_focus.activated.connect(self._show_section_menu)
self.editor_dialogs = dict() # [file] = Editor
'''@ivar: stores the open Editor '''
self.simTimeLabel.setVisible(False)
self.launchServerLabel.setVisible(False)
# since the is_local method is threaded for host names, call it to cache the localhost
nm.is_local("localhost")
# add help page
self.ui_help_web_view.page().setLinkDelegationPolicy(self.ui_help_web_view.page().DelegateAllLinks)
self.ui_help_web_view.linkClicked.connect(self._on_help_link_clicked)
self._help_history = []
self._help_history_idx = -1
self._help_root_url = QUrl('file://%s' % nm.settings().HELP_FILE)
self._on_help_go_home()
self.ui_help_home.clicked.connect(self._on_help_go_home)
self.ui_help_back.clicked.connect(self._on_help_go_back)
self.ui_help_forward.clicked.connect(self._on_help_go_forward)
if self.ui_help_home.icon().isNull():
self.ui_help_home.setText("Home")
if self.ui_help_back.icon().isNull():
self.ui_help_back.setText("Back")
if self.ui_help_forward.icon().isNull():
self.ui_help_forward.setText("Forward")
try:
screen.test_screen()
except Exception as e:
rospy.logerr("No SCREEN available! You can't launch nodes.")
# MessageBox.warning(self, "No SCREEN",
# "No SCREEN available! You can't launch nodes.",
# '%s'%utf8(e))
self.imageLabel.mouseDoubleClickEvent = self.image_mouseDoubleClickEvent
self.masternameLabel.mouseDoubleClickEvent = self.mastername_mouseDoubleClickEvent
try:
self.readSettings()
self.launch_dock.raise_()
except Exception as e:
rospy.logwarn("Error while read settings: %s" % e)
# setup the hide button, which hides the docks on left side
docks = self._dock_widget_in(Qt.LeftDockWidgetArea, only_visible=True)
if not docks:
self.hideDocksButton.toggle()
self.on_hide_docks_toggled(True)
self.hideDocksButton.clicked.connect(self.on_hide_docks_toggled)
if not nm.settings().movable_dock_widgets:
self.networkDock.setFeatures(self.networkDock.NoDockWidgetFeatures)
self.launch_dock.setFeatures(self.launch_dock.NoDockWidgetFeatures)
self.descriptionDock.setFeatures(self.descriptionDock.NoDockWidgetFeatures)
self.log_dock.setFeatures(self.log_dock.NoDockWidgetFeatures)
# =============================
# Initialize the update handler
# =============================
# initialize the class to get the state of discovering of other ROS master
self._update_handler = UpdateHandler()
self._update_handler.master_info_signal.connect(self.on_master_info_retrieved)
self._update_handler.master_errors_signal.connect(self.on_master_errors_retrieved)
self._update_handler.timediff_signal.connect(self.on_master_timediff_retrieved)
self._update_handler.username_signal.connect(self.on_master_username_retrieved)
self._update_handler.error_signal.connect(self.on_master_info_error)
# this monitor class is used, if no master_discovery node is running to get the state of the local ROS master
self.own_master_monitor = OwnMasterMonitoring()
self.own_master_monitor.init(monitor_port)
self.own_master_monitor.state_signal.connect(self.on_master_state_changed)
self.own_master_monitor.err_signal.connect(self.on_master_monitor_err)
# get the name of the service and topic of the discovery node. The name are determine by the message type of those topics
self.masterlist_service = masterlist_service = MasterListService()
masterlist_service.masterlist_signal.connect(self.on_master_list_retrieved)
masterlist_service.masterlist_err_signal.connect(self.on_master_list_err_retrieved)
self.state_topic = MasterStateTopic()
self.state_topic.state_signal.connect(self.on_master_state_changed)
self.stats_topic = MasterStatisticTopic()
self.stats_topic.stats_signal.connect(self.on_conn_stats_updated)
# timer to update the showed update time of the ros state
self.master_timecheck_timer = QTimer()
self.master_timecheck_timer.timeout.connect(self.on_master_timecheck)
self.master_timecheck_timer.start(1000)
self._refresh_time = time.time()
self._last_time_view_update = time.time()
self._con_tries = dict()
self._subscribe()
nm.nmd().monitor.system_diagnostics_signal.connect(self._callback_system_diagnostics)
nm.nmd().monitor.remote_diagnostics_signal.connect(self._callback_diagnostics)
nm.nmd().monitor.username_signal.connect(self._callback_username)
self._select_index = 0
self._shortcut_restart_nodes = QShortcut(QKeySequence(self.tr("Ctrl+Shift+R", "restart selected nodes")), self)
self._shortcut_restart_nodes.activated.connect(self._restart_nodes)
self._shortcut_restart_nodes_g = QShortcut(QKeySequence(self.tr("Ctrl+Shift+Alt+R", "restart selected nodes and reload global parameter")), self)
self._shortcut_restart_nodes_g.activated.connect(self._restart_nodes_g)
nm.nmd().error.connect(self.on_nmd_err)
nm.nmd().settings.yaml_config_signal.connect(self._nmd_yaml_cfg)
def _dock_widget_in(self, area=Qt.LeftDockWidgetArea, only_visible=False):
result = []
docks = [self.launch_dock, self.descriptionDock, self.networkDock]
for dock in docks:
if self.dockWidgetArea(dock) == area:
if not only_visible or (only_visible and dock.isVisibleTo(self)):
result.append(dock)
return result
def _on_log_button_clicked(self):
self.log_dock.setVisible(not self.log_dock.isVisible())
def _on_settings_button_clicked(self):
params = nm.settings().yaml()
dia = ParameterDialog(params, store_geometry="settings_dialog")
dia.setWindowTitle('Node Manager Configuration')
if dia.exec_():
try:
params = dia.getKeywords(only_changed=True, with_tags=True)
nm.settings().set_yaml(params)
except Exception as err:
import traceback
print(traceback.format_exc())
MessageBox.warning(self, "Configuration error",
'Error while set parameter',
'%s' % utf8(err))
def _on_log_added(self, info, warn, err, fatal):
self.logButton.setEnabled(True)
def _on_log_cleared(self):
self.logButton.setIcon(self.__icons['show_io'][0])
self.logButton.setText('')
# self.logButton.setEnabled(False)
def on_hide_docks_toggled(self, checked):
if self.dockWidgetArea(self.launch_dock) == Qt.LeftDockWidgetArea:
self.launch_dock.setVisible(not checked)
if self.dockWidgetArea(self.descriptionDock) == Qt.LeftDockWidgetArea:
self.descriptionDock.setVisible(not checked)
if self.dockWidgetArea(self.networkDock) == Qt.LeftDockWidgetArea:
self.networkDock.setVisible(not checked)
self.hideDocksButton.setArrowType(Qt.RightArrow if checked else Qt.LeftArrow)
def on_currentChanged_tab(self, index):
pass
# if index == self.tabWidget.widget(0):
# self.networkDock.show()
# self.launch_dock.show()
# else:
# self.networkDock.hide()
# self.launch_dock.hide()
def readSettings(self):
if nm.settings().store_geometry:
settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE)
self._history_selected_robot = settings.value("selected_robot", '')
settings.beginGroup("mainwindow")
maximized = settings.value("maximized", 'false') == 'true'
if maximized:
self.showMaximized()
else:
self.resize(settings.value("size", QSize(1024, 720)))
self.move(settings.value("pos", QPoint(0, 0)))
try:
self.restoreState(settings.value("window_state"))
except Exception:
pass
settings.endGroup()
def storeSetting(self):
if nm.settings().store_geometry:
settings = nm.settings().qsettings(nm.settings().CFG_GUI_FILE)
settings.beginGroup("mainwindow")
settings.setValue("size", self.size())
settings.setValue("pos", self.pos())
settings.setValue("maximized", self.isMaximized())
settings.setValue("window_state", self.saveState())
settings.endGroup()
def closeEvent(self, event):
if self.close_event_count > 0:
# we handle force first
self.finish()
QMainWindow.closeEvent(self, event)
return
self.close_event_count += 1
# ask to close nodes on exit
# self.close_without_ask is changes in on_shutdown method in __init__.py
if self._close_on_exit and nm.settings().confirm_exit_when_closing and not self.close_without_ask:
masters = [uri for uri, m in self.masters.items() if m.online]
res = SelectDialog.getValue('Stop nodes?', "Select masters where to stop:",
masters, False, False, '', parent=self,
select_if_single=False,
checkitem1="don't show this dialog again",
closein=nm.settings().timeout_close_dialog,
store_geometry='stop_nodes')
masters2stop, self._close_on_exit = res[0], res[1]
nm.settings().confirm_exit_when_closing = not res[2]
if self._close_on_exit or rospy.is_shutdown():
self.on_finish = True
self._stop_local_master = None
for uri in masters2stop:
try:
m = self.masters[uri]
if m is not None:
if m.is_local:
self._stop_updating()
self._stop_local_master = m
m.stop_nodes_by_name(m.get_nodes_runningIfLocal(), True, [rospy.get_name(), '/rosout'])
if not m.is_local:
m.killall_roscore()
except Exception as e:
rospy.logwarn("Error while stop nodes on %s: %s" % (uri, utf8(e)))
QTimer.singleShot(200, self._test_for_finish)
if masters2stop:
event.ignore()
else:
event.accept()
else:
self._close_on_exit = True
self.close_event_count = 0
event.ignore()
elif self._are_master_in_process():
QTimer.singleShot(200, self._test_for_finish)
self.masternameLabel.setText('<span style=" font-size:14pt; font-weight:600;">%s ...closing...</span>' % self.masternameLabel.text())
rospy.loginfo("Wait for running processes are finished...")
event.ignore()
if event.isAccepted():
self.on_finish = True
self.master_timecheck_timer.stop()
try:
self.storeSetting()
except Exception as e:
rospy.logwarn("Error while store settings: %s" % e)
self.finish()
QMainWindow.closeEvent(self, event)
def _are_master_in_process(self):
for _uri, m in self.masters.items():
m.stop_echo_dialogs()
if m.in_process():
return True
return False
def _test_for_finish(self):
# this method test on exit for running process queues with stopping jobs
if self._are_master_in_process():
QTimer.singleShot(200, self._test_for_finish)
return
if hasattr(self, '_stop_local_master') and self._stop_local_master is not None:
self.finish()
self._stop_local_master.killall_roscore()
del self._stop_local_master
self._close_on_exit = False
self.close()
def _stop_updating(self):
if hasattr(self, "_discover_dialog") and self._discover_dialog is not None:
self._discover_dialog.stop()
self.masterlist_service.stop()
self._progress_queue.stop()
self._progress_queue_sync.stop()
self._update_handler.stop()
self.state_topic.stop()
self.stats_topic.stop()
self.own_master_monitor.stop()
self.launch_dock.stop()
self.log_dock.stop()
def finish(self):
if not self._finished:
self._finished = True
print("Mainwindow finish...")
self.screen_dock.finish()
self._stop_updating()
try:
editors = [e for e in self.editor_dialogs.values()]
for editor in editors:
editor.close()
except Exception as _err:
import traceback
print(traceback.format_exc())
for _, master in self.masters.items():
try:
master.close()
except Exception as _err:
import traceback
print(traceback.format_exc())
print("Mainwindow finished!")
def getMasteruri(self):
'''
Requests the ROS master URI from the ROS master through the RPC interface and
returns it. The 'materuri' attribute will be set to the requested value.
@return: ROS master URI
@rtype: C{str} or C{None}
'''
if not hasattr(self, 'materuri') or self.materuri is None:
masteruri = masteruri_from_ros()
master = xmlrpcclient.ServerProxy(masteruri)
_, _, self.materuri = master.getUri(rospy.get_name()) # _:=code, message
nm.is_local(get_hostname(self.materuri))
return self.materuri
def setMasterOnline(self, masteruri, online=True):
if masteruri in self.masters:
self.masters[masteruri].online = online
def removeMaster(self, masteruri):
'''
Removed master with given master URI from the list.
@param masteruri: the URI of the ROS master
@type masteruri: C{str}
'''
if masteruri in self.masters:
if self.currentMaster is not None and self.currentMaster.masteruri == masteruri:
self.setCurrentMaster(None)
self.masters[masteruri].stop()
self.masters[masteruri].updateHostRequest.disconnect()
self.masters[masteruri].host_description_updated.disconnect()
self.masters[masteruri].capabilities_update_signal.disconnect()
self.masters[masteruri].remove_config_signal.disconnect()
self.masters[masteruri].description_signal.disconnect()
self.masters[masteruri].request_xml_editor.disconnect()
self.masters[masteruri].stop_nodes_signal.disconnect()
self.masters[masteruri].robot_icon_updated.disconnect()
if DIAGNOSTICS_AVAILABLE:
self.diagnostics_signal.disconnect(self.masters[masteruri].append_diagnostic)
self.stackedLayout.removeWidget(self.masters[masteruri])
self.tabPlace.layout().removeWidget(self.masters[masteruri])
for cfg in self.masters[masteruri].default_cfgs:
self.capabilitiesTable.removeConfig(cfg)
self.masters[masteruri].setParent(None)
del self.masters[masteruri]
def getMaster(self, masteruri, create_new=True):
'''
@return: the Widget which represents the master of given ROS master URI. If no
Widget for given URI is available a new one will be created.
@rtype: L{MasterViewProxy}
'''
if masteruri not in self.masters:
if not create_new:
return None
self.masters[masteruri] = MasterViewProxy(masteruri, self)
self.masters[masteruri].updateHostRequest.connect(self.on_host_update_request)
self.masters[masteruri].host_description_updated.connect(self.on_host_description_updated)
self.masters[masteruri].capabilities_update_signal.connect(self.on_capabilities_update)
self.masters[masteruri].remove_config_signal.connect(self.on_remove_config)
self.masters[masteruri].description_signal.connect(self.on_description_update)
self.masters[masteruri].request_xml_editor.connect(self.on_launch_edit)
self.masters[masteruri].stop_nodes_signal.connect(self.on_stop_nodes)
self.masters[masteruri].robot_icon_updated.connect(self._on_robot_icon_changed)
if DIAGNOSTICS_AVAILABLE:
self.diagnostics_signal.connect(self.masters[masteruri].append_diagnostic)
self.stackedLayout.addWidget(self.masters[masteruri])
if masteruri == self.getMasteruri():
self.masters[masteruri].default_load_launch = self.default_load_launch
return self.masters[masteruri]
def on_host_update_request(self, host):
for key, value in self.masters.items():
if get_hostname(key) == host and value.master_state is not None:
self._update_handler.requestMasterInfo(value.master_state.uri, value.master_state.monitoruri)
def on_host_description_updated(self, masteruri, host, descr):
# self.master_model.update_description(nm.nameres().mastername(masteruri, host), descr)
pass
def on_capabilities_update(self, masteruri, address, config_node, descriptions):
for d in descriptions:
self.capabilitiesTable.updateCapabilities(masteruri, config_node, d)
if masteruri is not None:
master = self.getMaster(masteruri)
self.capabilitiesTable.updateState(masteruri, master.master_info)
def on_remove_config(self, cfg):
self.capabilitiesTable.removeConfig(cfg)
def open_screen_dock(self, masteruri, screen_name, nodename, user=''):
self.screen_dock.connect(masteruri, screen_name, nodename, user)
# ======================================================================================================================
# Handling of local monitoring
# (Backup, if no master_discovery node is running)
# ======================================================================================================================
def _subscribe(self):
'''
Try to subscribe to the topics of the master_discovery node. If it fails, the
own local monitoring of the ROS master state will be enabled.
'''
if not self.restricted_to_one_master:
try:
self.masterlist_service.retrieveMasterList(self.getMasteruri(), False)
except Exception:
pass
else:
self._setLocalMonitoring(True)
def _setLocalMonitoring(self, on, discoverer=''):
'''
Enables the local monitoring of the ROS master state and disables the view of
the discoved ROS master.
@param on: the enable / disable the local monitoring
@type on: C{boolean}
'''
if self.own_master_monitor.is_running() != on:
self.master_delegate.set_enabled(not on)
self.masterTableView.setEnabled(not on)
self.refreshAllButton.setEnabled(not on)
self.own_master_monitor.pause(not on)
if on:
self.masterTableView.setToolTip("use 'Start' button to enable the master discovering")
self.networkDock.setWindowTitle("ROS Network [disabled]")
else:
self.masterTableView.setToolTip('')
if on:
# remove discovered ROS master and set the local master to selected
for uri in self.masters.keys():
master = self.masters[uri]
if nm.is_local(get_hostname(uri)) or uri == self.getMasteruri():
if not self._history_selected_robot or master.mastername == self._history_selected_robot:
self.setCurrentMaster(master)
else:
if master.master_state is not None:
self.master_model.removeMaster(master.master_state.name)
else:
try:
# determine the ROS network ID
mcast_group = rospy.get_param(rospy.names.ns_join(discoverer, 'mcast_port'))
self.networkDock.setWindowTitle("ROS Network [id: %d]" % (mcast_group - 11511))
self._subscribe()
except Exception:
# try to get the multicast port of master discovery from log
port = 0
network_id = -1
import re
with open(screen.get_ros_logfile(node=discoverer.rstrip('/')), 'r') as mdfile:
for line in mdfile:
if line.find("Listen for multicast at") > -1:
port = map(int, re.findall(r'\d+', line))[-1]
elif line.find("Network ID") > -1:
network_id = map(int, re.findall(r'\d+', line))[-1]
port = 11511 + network_id
if port > 0:
self.networkDock.setWindowTitle("ROS Network [id: %d]" % (port - 11511))
else:
self.networkDock.setWindowTitle("ROS Network")
def on_master_list_err_retrieved(self, masteruri, error):
'''
The callback method connected to the signal, which is emitted on an error
while call the service to determine the discovered ROS master. On the error
the local monitoring will be enabled.
'''
if 'no service' not in error:
rospy.logwarn(error)
self._setLocalMonitoring(True)
def hasDiscoveryService(self, minfo):
'''
Test whether the new retrieved MasterInfo contains the master_discovery node.
This is identified by a name of the contained 'list_masters' service.
@param minfo: the ROS master Info
@type minfo: U{fkie_master_discovery.MasterInfo<http://docs.ros.org/api/fkie_master_discovery/html/modules.html#module-fkie_master_discovery.master_info>}
'''
# use no discovery services, if roscore is running on a remote host
if self.restricted_to_one_master:
return False
for service in minfo.services.keys():
if service.endswith('list_masters'):
return True
return False
# ======================================================================================================================
# Handling of received ROS master state messages
# ======================================================================================================================
def on_master_list_retrieved(self, masteruri, servic_name, master_list):
'''
Handle the retrieved list with ROS master.
1. update the ROS Network view
@param master_list: a list with ROS masters
@type master_list: C{[U{fkie_master_discovery.msg.MasterState<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/MasterState.html>}]}
'''
result_1 = self.state_topic.registerByROS(self.getMasteruri(), False)
result_2 = self.stats_topic.registerByROS(self.getMasteruri(), False)
local_mon = not result_1 or not result_2
self._setLocalMonitoring(local_mon, rospy.names.namespace(result_1))
self._con_tries[masteruri] = 0
# remove ROS master which are not in the new list
new_uris = [m.uri for m in master_list if m.uri is not None]
for uri in self.masters.keys():
if uri not in new_uris:
master = self.masters[uri]
if not (nm.is_local(get_hostname(uri)) or uri == self.getMasteruri()):
if master.master_state is not None:
self.master_model.removeMaster(master.master_state.name)
self.setMasterOnline(uri, False)
# self.removeMaster(uri)
# add or update master
for m in master_list:
if m.uri is not None:
host = get_hostname(m.uri)
nm.nameres().add_master_entry(m.uri, m.name, host)
m.name = nm.nameres().mastername(m.uri)
master = self.getMaster(m.uri)
master.master_state = m
master.online = True
master.force_next_update()
self._assigne_icon(m.name)
self.master_model.updateMaster(m)
self._update_handler.requestMasterInfo(m.uri, m.monitoruri)
def on_master_state_changed(self, msg):
'''
Handle the received master state message.
1. update the ROS Network view
2. enable local master monitoring, if all masters are removed (the local master too)
@param msg: the ROS message with new master state
@type msg: U{fkie_master_discovery.msg.MasterState<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/MasterState.html>}
'''
# do not update while closing
if hasattr(self, "on_finish"):
rospy.logdebug("ignore changes on %s, because currently on closing...", msg.master.uri)
return
host = get_hostname(msg.master.uri)
if msg.state == MasterState.STATE_CHANGED:
nm.nameres().add_master_entry(msg.master.uri, msg.master.name, host)
msg.master.name = nm.nameres().mastername(msg.master.uri)
self.getMaster(msg.master.uri).master_state = msg.master
self._assigne_icon(msg.master.name)
self.master_model.updateMaster(msg.master)
if nm.settings().autoupdate:
self._update_handler.requestMasterInfo(msg.master.uri, msg.master.monitoruri)
else:
rospy.loginfo("Autoupdate disabled, the data will not be updated for %s" % msg.master.uri)
if not msg.master.online:
host = get_hostname(msg.master.uri)
rospy.loginfo("remove SSH connection for '%s' because the master is now offline" % host)
nm.ssh().remove(host)
if msg.state == MasterState.STATE_NEW:
# if new master with uri of the local master is received update the master list
if msg.master.uri == self.getMasteruri():
self.masterlist_service.retrieveMasterList(msg.master.uri, False)
nm.nameres().add_master_entry(msg.master.uri, msg.master.name, host)
msg.master.name = nm.nameres().mastername(msg.master.uri)
self.getMaster(msg.master.uri).master_state = msg.master
self._assigne_icon(msg.master.name)
self.master_model.updateMaster(msg.master)
if nm.settings().autoupdate:
self._update_handler.requestMasterInfo(msg.master.uri, msg.master.monitoruri)
else:
rospy.loginfo("Autoupdate disabled, the data will not be updated for %s" % msg.master.uri)
if msg.state == MasterState.STATE_REMOVED:
if msg.master.uri == self.getMasteruri():
# switch to locale monitoring, if the local master discovering was removed
nm.nameres().remove_master_entry(msg.master.uri)
self._setLocalMonitoring(True)
else:
nm.nameres().remove_master_entry(msg.master.uri)
self.master_model.removeMaster(msg.master.name)
self.setMasterOnline(msg.master.uri, False)
# self.removeMaster(msg.master.uri)
# start master_sync, if it was selected in the start dialog to start with master_dsicovery
if self._syncs_to_start:
if msg.state in [MasterState.STATE_NEW, MasterState.STATE_CHANGED]:
# we don't know which name for host was used to start master discovery
if host in self._syncs_to_start:
self._syncs_to_start.remove(host)
self.on_sync_start(msg.master.uri)
elif msg.master.name in self._syncs_to_start:
self._syncs_to_start.remove(msg.master.name)
self.on_sync_start(msg.master.uri)
else:
addresses = nm.nameres().addresses(msg.master.uri)
for address in addresses:
if address in self._syncs_to_start:
self._syncs_to_start.remove(address)
self.on_sync_start(msg.master.uri)
# if len(self.masters) == 0:
# self._setLocalMonitoring(True)
def _assigne_icon(self, name, path=None):
'''
Sets the new icon to the given robot. If the path is `None` set search for
.png file with robot name.
:param name: robot name
:type name: str
:param path: path of the icon (Default: None)
:type path: str
'''
icon_path = path if path else nm.settings().robot_image_file(name)
if name not in self.__icons or self.__icons[name][1] != path:
if QFile.exists(icon_path):
self.__icons[name] = (QIcon(icon_path), icon_path)
elif name in self.__icons:
del self.__icons[name]
def on_master_monitor_err(self, msg):
self._con_tries[self.getMasteruri()] += 1
def on_master_info_retrieved(self, minfo):
'''
Integrate the received master info.
@param minfo: the ROS master Info
@type minfo: U{fkie_master_discovery.MasterInfo<http://docs.ros.org/api/fkie_master_discovery/html/modules.html#module-fkie_master_discovery.master_info>}
'''
if hasattr(self, "on_finish"):
rospy.logdebug("ignore changes on %s, because currently on closing...", minfo.masteruri)
return
rospy.logdebug("MASTERINFO from %s (%s) received", minfo.mastername, minfo.masteruri)
self._con_tries[minfo.masteruri] = 0
# cputimes_m = os.times()
# cputime_init_m = cputimes_m[0] + cputimes_m[1]
if minfo.masteruri in self.masters:
for _, master in self.masters.items(): # _:=uri
try:
if not master.online and master.masteruri != minfo.masteruri:
continue
# check for running discovery service
new_info = master.master_info is None or master.master_info.timestamp < minfo.timestamp
# cputimes = os.times()
# cputime_init = cputimes[0] + cputimes[1]
master.master_info = minfo
# cputimes = os.times()
# cputime = cputimes[0] + cputimes[1] - cputime_init
# print master.master_state.name, cputime
if master.master_info is not None:
if self._history_selected_robot == minfo.mastername and self._history_selected_robot == master.mastername and self.currentMaster != master:
if self.currentMaster is not None and not self.currentMaster.is_local:
self.setCurrentMaster(master)
# elif nm.is_local(get_hostname(master.master_info.masteruri)) or self.restricted_to_one_master:
elif master.master_info.masteruri == masteruri_from_master() or self.restricted_to_one_master:
if new_info:
has_discovery_service = self.hasDiscoveryService(minfo)
if (not self.own_master_monitor.isPaused() or not self.masterTableView.isEnabled()) and has_discovery_service:
self._subscribe()
if self.currentMaster is None and (not self._history_selected_robot or self._history_selected_robot == minfo.mastername):
self.setCurrentMaster(master)
# this info is collected by daemon
# if not hasattr(self, "_sub_extended_log"):
# agg_suffix = '_agg' if nm.settings().use_diagnostics_agg else ''
# self._sub_extended_log = rospy.Subscriber('/diagnostics_agg' % agg_suffix, DiagnosticArray, self._callback_diagnostics)
# update the list view, whether master is synchronized or not
if master.master_info.masteruri == minfo.masteruri:
self.master_model.setChecked(master.master_state.name, not minfo.getNodeEndsWith('master_sync') is None)
if self.default_profile_load:
self.default_profile_load = False
QTimer.singleShot(2000, self._load_default_profile_slot)
self.capabilitiesTable.updateState(minfo.masteruri, minfo)
except Exception as e:
rospy.logwarn("Error while process received master info from %s: %s", minfo.masteruri, utf8(e))
# update the duplicate nodes
self.updateDuplicateNodes()
# update the buttons, whether master is synchronized or not
if self.currentMaster is not None and self.currentMaster.master_info is not None and not self.restricted_to_one_master:
self.syncButton.setEnabled(True)
self.syncButton.setChecked(not self.currentMaster.master_info.getNodeEndsWith('master_sync') is None)
else:
self.masterlist_service.retrieveMasterList(minfo.masteruri, False)
self.profiler.update_progress()
# cputimes_m = os.times()
# cputime_m = cputimes_m[0] + cputimes_m[1] - cputime_init_m
# print "ALL:", cputime_m
def _load_default_profile_slot(self):
if not hasattr(self, "on_finish"):
self.profiler.on_load_profile_file(self.default_load_launch)
def on_master_errors_retrieved(self, masteruri, error_list):
self.master_model.updateMasterErrors(nm.nameres().mastername(masteruri), error_list)
def on_master_timediff_retrieved(self, masteruri, timediff):
self.master_model.updateTimeDiff(nm.nameres().mastername(masteruri), timediff)
def on_master_username_retrieved(self, masteruri, username):
master = self.getMaster(masteruri, create_new=False)
if master is not None:
master.current_user = username
def on_master_info_error(self, masteruri, error):
if masteruri not in self._con_tries:
self._con_tries[masteruri] = 0
self._con_tries[masteruri] += 1
if masteruri == self.getMasteruri():
rospy.logwarn("Error while connect to local master_discovery %s: %s", masteruri, error)
# switch to local monitoring after 3 timeouts
if self._con_tries[masteruri] > 2:
self._setLocalMonitoring(True)
master = self.getMaster(masteruri, False)
if master and master.master_state is not None and master.online:
self._update_handler.requestMasterInfo(master.master_state.uri, master.master_state.monitoruri, self.DELAYED_NEXT_REQ_ON_ERR)
def on_conn_stats_updated(self, stats):
'''
Handle the retrieved connection statistics.
1. update the ROS Network view
@param stats: a list with connection statistics
@type stats: C{[U{fkie_master_discovery.msg.LinkState<http://docs.ros.org/api/fkie_multimaster_msgs/html/msg/LinkState.html>}]}
'''
for stat in stats.links:
self.master_model.updateMasterStat(stat.destination, stat.quality)
# ======================================================================================================================
# Handling of master info frame
# ======================================================================================================================
def on_info_clicked(self):
text = '<dl>'
text = '%s<dt><b>Maintainer</b>: Alexander Tiderko <font color=gray>[email protected]</font></dt>' % text
text = '%s<dt><b>Author</b>: Alexander Tiderko, Timo Roehling</dt>' % text
text = '%s<dt><b>License</b>: BSD, some icons are licensed under the GNU Lesser General Public License (LGPL) or Creative Commons Attribution-Noncommercial 3.0 License</dt>' % text
text = '%s</dl>' % text
if nm.__date__ == 'unknown':
text = '%s<dt><b>Version</b>: %s</dt>' % (text, nm.__version__)
else:
text = '%s<dt><b>Version</b>: %s (%s)</dt>' % (text, nm.__version__, nm.__date__)
text = '%s<dt><b>URL</b>: <a href="https://github.com/fkie/multimaster_fkie">https://github.com/fkie/multimaster_fkie</a></dt>' % (text)
MessageBox.about(self, 'About Node Manager', text)
def on_master_log_clicked(self):
'''
Tries to get the log of master_discovery node on the machine requested by a dialog.
'''
# get the history list
user_list = [self.userComboBox.itemText(i) for i in reversed(range(self.userComboBox.count()))]
user_list.insert(0, 'last used')
params = {'Host': {':type': 'string', ':value': 'localhost'},
'Show master discovery log': {':type': 'bool', ':value': True},
'Show master sync log': {':type': 'bool', ':value': False},
'Show daemon log': {':type': 'bool', ':value': False},
'Username': {':type': 'string', ':value': user_list},
'Only screen log': {':type': 'bool', ':value': True, ':hint': 'There are two logs: ROS-Log and SCREEN-Log'},
# 'Optional Parameter': ('list', params_optional)
}
dia = ParameterDialog(params, sidebar_var='Host', store_geometry="master_log_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Show log')
dia.setFocusField('Host')
if dia.exec_():
try:
params = dia.getKeywords(only_changed=False, with_tags=False)
print("params", params)
hostnames = params['Host'] if isinstance(params['Host'], list) else [params['Host']]
log_master_discovery = params['Show master discovery log']
log_master_sync = params['Show master sync log']
log_nm_daemon = params['Show daemon log']
username = params['Username']
screen_only = params['Only screen log']
for hostname in hostnames:
try:
usr = username
if username == 'last used':
usr = nm.settings().host_user(hostname)
else:
nm.settings().set_host_user(hostname, usr)
if log_master_discovery:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: show log of master discovery' % hostname,
nm.starter().openLog,
{'nodename' : '/master_discovery',
'host': hostname,
'user': usr,
'only_screen': screen_only
})
if log_master_sync:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: show log of master sync' % hostname,
nm.starter().openLog,
{'nodename' : '/master_sync',
'host': hostname,
'user': usr,
'only_screen': screen_only
})
if log_nm_daemon:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: show log of nm daemon' % hostname,
nm.starter().openLog,
{'nodename' : '/node_manager_daemon',
'host': hostname,
'user': usr,
'only_screen': screen_only
})
except (Exception, nm.StartException) as err:
import traceback
print(traceback.format_exc(1))
rospy.logwarn("Error while show LOG for master_discovery %s: %s" % (utf8(hostname), utf8(err)))
MessageBox.warning(self, "Show log error",
'Error while show log of master_discovery',
'%s' % utf8(err))
self._progress_queue.start()
except Exception as err:
MessageBox.warning(self, "Show log error",
'Error while parse parameter',
'%s' % utf8(err))
def on_set_time_clicked(self):
if self.currentMaster is not None: # and not self.currentMaster.is_local:
time_dialog = QDialog()
ui_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ui', 'TimeInput.ui')
loadUi(ui_file, time_dialog)
host = get_hostname(self.currentMaster.master_state.uri)
time_dialog.setWindowTitle('Set time on %s' % host)
time_dialog.hostsComboBox.addItems(nm.history().cachedParamValues('/ntp'))
if self.currentMaster.is_local:
time_dialog.dateFrame.setVisible(False)
if time_dialog.exec_():
running_nodes = self.currentMaster.get_nodes_runningIfLocal(remove_system_nodes=True)
if running_nodes:
ret = MessageBox.question(self, 'Set Time', 'There are running nodes. Stop them?', buttons=MessageBox.Yes | MessageBox.No)
if ret == MessageBox.Yes:
self.currentMaster.stop_nodes_by_name(running_nodes)
if time_dialog.dateRadioButton.isChecked():
try:
rospy.loginfo("Set remote host time to local time: %s" % self.currentMaster.master_state.uri)
socket.setdefaulttimeout(10)
p = xmlrpcclient.ServerProxy(self.currentMaster.master_state.monitoruri)
uri, success, newtime, errormsg = p.setTime(time.time())
if not success:
if errormsg.find('password') > -1:
errormsg += "\nPlease modify /etc/sudoers with sudoedit and add user privilege, e.g:"
errormsg += "\n%s ALL=NOPASSWD: /bin/date" % self.currentMaster.current_user
errormsg += "\n!!!needed to be at the very end of file, don't forget a new line at the end!!!"
errormsg += "\n\nBe aware, it does not replace the time synchronization!"
errormsg += "\nIt sets approximate time without undue delays on communication layer."
MessageBox.warning(self, "Time set error",
'Error while set time on %s' % uri, '%s' % utf8(errormsg))
else:
timediff = time.time() - newtime
rospy.loginfo(" New time difference to %s is approx.: %.3fs" % (self.currentMaster.master_state.uri, timediff))
self.on_master_timediff_retrieved(self.currentMaster.master_state.uri, timediff)
except Exception as e:
errormsg = '%s' % e
if errormsg.find('setTime') > -1:
errormsg += "\nUpdate remote fkie_multimaster!"
rospy.logwarn("Error while set time on %s: %s" % (self.currentMaster.master_state.uri, utf8(errormsg)))
MessageBox.warning(self, "Time sync error",
'Error while set time on %s' % self.currentMaster.master_state.uri,
'%s' % utf8(errormsg))
finally:
socket.setdefaulttimeout(None)
elif time_dialog.ntpdateRadioButton.isChecked():
ntp_host = time_dialog.hostsComboBox.currentText()
nm.history().addParamCache('/ntp', ntp_host)
cmd = "%s %s" % ('sudo ntpdate -v -u -t 1', ntp_host)
nm.starter().ntpdate(host, cmd)
def on_refresh_master_clicked(self):
if self.currentMaster is not None:
rospy.loginfo("Request an update from %s", utf8(self.currentMaster.master_state.monitoruri))
if self.currentMaster.master_info is not None:
check_ts = self.currentMaster.master_info.check_ts
self.currentMaster.master_info.timestamp = self.currentMaster.master_info.timestamp - 1.0
self.currentMaster.master_info.check_ts = check_ts
self.currentMaster.perform_master_checks()
if self.currentMaster.master_state is not None:
self._update_handler.requestMasterInfo(self.currentMaster.master_state.uri, self.currentMaster.master_state.monitoruri)
self.currentMaster.force_next_update()
# self.currentMaster.remove_all_def_configs()
def on_run_node_clicked(self):
'''
Open a dialog to run a ROS node without a configuration
'''
from .run_dialog import RunDialog
if self.currentMaster is not None:
dia = RunDialog(get_hostname(self.currentMaster.masteruri), self.currentMaster.masteruri)
if dia.exec_():
params = dia.run_params()
if params:
params['use_nmd'] = True
params['auto_pw_request'] = False # autorequest must be false
params['user'] = self.currentMaster.current_user
try:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'run `%s` on %s' % (params['binary'], params['host']),
nm.starter().runNodeWithoutConfig,
params)
self._progress_queue.start()
except (Exception, nm.StartException) as e:
rospy.logwarn("Error while run `%s` on %s: %s" % (params['binary'], params['host'], utf8(e)))
MessageBox.warning(self, "Run error",
'Error while run node %s [%s]' % (params['binary'], params['package']),
utf8(e))
else:
MessageBox.critical(self, "Run error",
'No binary specified')
def on_rqt_plugin_start(self, name, plugin):
if self.currentMaster is not None:
try:
if name == 'Terminal':
host = get_hostname(self.currentMaster.master_state.uri)
nm.starter().open_terminal(host)
return
args = []
package = 'rqt_gui'
binary = 'rqt_gui'
prefix = 'rqt_'
suffix = ''
if name == 'RViz':
prefix = 'rviz_'
package = 'rviz'
binary = 'rviz'
if plugin:
args = ['-s', plugin]
if name == 'rosbag record':
package = 'rosbag'
binary = 'record'
prefix = ''
topic_names = []
current_tab = self.currentMaster.ui.tabWidget.tabText(self.currentMaster.ui.tabWidget.currentIndex())
if (current_tab == 'Nodes'):
nodes = self.currentMaster.nodesFromIndexes(self.currentMaster.ui.nodeTreeView.selectionModel().selectedIndexes())
if nodes:
for n in nodes:
topic_names.extend(n.published)
else:
topics = self.currentMaster.topicsFromIndexes(self.currentMaster.ui.topicsView.selectionModel().selectedIndexes())
if topics:
topic_names.extend([t.name for t in topics])
count_topics = 'ALL'
if topic_names:
args = [' '.join(topic_names)]
count_topics = '%d selected' % len(topic_names)
else:
args = ['-a']
ret = MessageBox.question(self, 'Start rosbag', 'Start rosbag record with %s topics to %s/record_TIMESTAMP?' % (count_topics, nm.settings().LOG_PATH), buttons=MessageBox.Yes | MessageBox.No)
if ret == MessageBox.No:
return
args.append("-o %s/record" % nm.settings().LOG_PATH)
suffix = "_%d" % int(time.time())
node_name = '%s%s_%s%s' % (prefix, name.lower().replace(' ', '_'),
self.currentMaster.master_state.name, suffix)
self.currentMaster._progress_queue.add2queue(utf8(uuid.uuid4()),
'start %s' % name,
nm.starter().runNodeWithoutConfig,
{'host': 'localhost',
'package': package,
'binary': binary,
'name': nm.nameres().normalize_name(node_name),
'args': args,
'masteruri': '%s' % self.currentMaster.master_state.uri,
'use_nmd': True,
'auto_pw_request': False
})
except (Exception, nm.StartException) as e:
import traceback
print(utf8(traceback.format_exc(1)))
rospy.logwarn("Error while start %s: %s" % (name, utf8(e)))
MessageBox.warning(self, "Start error",
'Error while start %s' % name,
'%s' % utf8(e))
self.currentMaster._progress_queue.start()
def on_sync_dialog_released(self, released=False, masteruri=None, external_call=False):
self.syncButton.setEnabled(False)
master = self.currentMaster
sync_node = None
if masteruri is not None:
master = self.getMaster(masteruri, False)
if master is not None and master.master_info is not None:
sync_node = master.master_info.getNodeEndsWith('master_sync')
if master is not None and (sync_node is None or external_call):
self._sync_dialog.resize(350, 190)
if self._sync_dialog.exec_():
try:
host = get_hostname(master.masteruri)
if self._sync_dialog.interface_filename is not None and not nm.is_local(host):
nmd_uri = nmdurl.nmduri(master.masteruri)
sync_file = nmdurl.join(nmdurl.nmduri(), self._sync_dialog.interface_filename)
# copy the interface file to remote machine
self._progress_queue_sync.add2queue(utf8(uuid.uuid4()),
'Transfer sync interface to %s' % nmd_uri,
nm.starter().transfer_file_nmd,
{'grpc_url': '%s' % nmd_uri,
'path': sync_file,
'auto_pw_request': False,
'user': master.current_user
})
self._progress_queue_sync.add2queue(utf8(uuid.uuid4()),
'Start sync on %s' % host,
nm.starter().runNodeWithoutConfig,
{'host': '%s' % host,
'package': 'fkie_master_sync',
'binary': 'master_sync',
'name': 'master_sync',
'args': self._sync_dialog.sync_args,
'masteruri': '%s' % master.masteruri,
'use_nmd': False,
'auto_pw_request': False,
'user': master.current_user
})
self._progress_queue_sync.start()
except Exception:
import traceback
MessageBox.warning(self, "Start sync error",
"Error while start sync node",
utf8(traceback.format_exc(1)))
else:
self.syncButton.setChecked(False)
elif sync_node is not None:
master.stop_nodes([sync_node])
self.syncButton.setEnabled(True)
def on_sync_start(self, masteruri=None):
'''
Enable or disable the synchronization of the master cores
'''
key_mod = QApplication.keyboardModifiers()
if (key_mod & Qt.ShiftModifier or key_mod & Qt.ControlModifier):
self.on_sync_dialog_released(masteruri=masteruri, external_call=True)
# if not master.master_info is None:
# node = master.master_info.getNodeEndsWith('master_sync')
# self.syncButton.setChecked(not node is None)
else:
self.syncButton.setEnabled(False)
master = self.currentMaster
if masteruri is not None:
master = self.getMaster(masteruri, False)
if master is not None:
# ask the user to start the master_sync with loaded launch file
if master.master_info is not None:
node = master.getNode('/master_sync')
if node and node[0].has_configs():
def_cfg_info = '\nNote: default_cfg parameter will be changed!' if node[0].has_default_cfgs(node[0].cfgs) else ''
ret = MessageBox.question(self, 'Start synchronization', 'Start the synchronization using loaded configuration?\n\n `No` starts the master_sync with default parameter.%s' % def_cfg_info, buttons=MessageBox.Yes | MessageBox.No)
if ret == MessageBox.Yes:
master.start_nodes([node[0]])
return
# start the master sync with default settings
default_sync_args = ["_interface_url:='.'",
'_sync_topics_on_demand:=False',
'_ignore_hosts:=[]', '_sync_hosts:=[]',
'_ignore_nodes:=[]', '_sync_nodes:=[]',
'_ignore_topics:=[]', '_sync_topics:=[]',
'_ignore_services:=[]', '_sync_services:=[]',
'_sync_remote_nodes:=False']
try:
host = get_hostname(master.masteruri)
self._progress_queue_sync.add2queue(utf8(uuid.uuid4()),
'start sync on ' + utf8(host),
nm.starter().runNodeWithoutConfig,
{'host': utf8(host),
'package': 'fkie_master_sync',
'binary': 'master_sync',
'name': 'master_sync',
'args': default_sync_args,
'masteruri': utf8(master.masteruri),
'use_nmd': False,
'auto_pw_request': False,
'user': master.current_user
})
self._progress_queue_sync.start()
except Exception:
pass
self.syncButton.setEnabled(True)
def on_sync_stop(self, masteruri=None):
master = self.currentMaster
if masteruri is not None:
master = self.getMaster(masteruri, False)
if master is not None and master.master_info is not None:
node = master.master_info.getNodeEndsWith('master_sync')
if node is not None:
master.stop_nodes([node])
def on_master_timecheck(self):
# HACK: sometimes the local monitoring will not be activated. This is the detection.
if len(self.masters) < 2 and self.currentMaster is None:
self._subscribe()
return
# update the info panel of the robot. If the node manager is not selected the updates are rarer.
current_time = time.time()
if self.isActiveWindow() or current_time - self._last_time_view_update > 15:
self._last_time_view_update = current_time
if self.currentMaster is not None and self.currentMaster.master_state is not None:
master = self.getMaster(self.currentMaster.master_state.uri)
name = master.master_state.name
masteruri = master.master_state.uri
if self.restricted_to_one_master:
name = ''.join([name, ' <span style=" color:red;">(restricted)</span>'])
if not self.masternameLabel.toolTip():
self.masternameLabel.setToolTip('The multicore options are disabled, because the roscore is running on remote host!')
if master.master_info is not None:
self.showMasterName(masteruri, name, self.timestampStr(master.master_info.check_ts), master.master_state.online)
pass
elif master.master_state is not None:
text = 'Try to get info!!!'
if not nm.settings().autoupdate:
text = 'Press F5 or click on reload to get info'
self.showMasterName(masteruri, name, text, master.master_state.online)
self.userComboBox.setEditText(self.currentMaster.current_user)
if not master.is_valid_user_master_daemon():
self.showMasterName(masteruri, name, self.timestampStr(master.master_info.check_ts), master.master_state.online, 'daemon is running with different user: %s' % master.daemon_user)
else:
self.showMasterName('', 'No robot selected', None, False)
if (current_time - self._refresh_time > 30.0):
masteruri = self.getMasteruri()
if masteruri is not None:
master = self.getMaster(masteruri)
if master is not None and master.master_state is not None and nm.settings().autoupdate:
self._update_handler.requestMasterInfo(master.master_state.uri, master.master_state.monitoruri)
self._refresh_time = current_time
for _, master in self.masters.items():
if master == self.currentMaster:
master.perform_diagnostic_requests()
elif int(current_time) % 3 == 0:
master.perform_diagnostic_requests()
def showMasterName(self, masteruri, name, timestamp, online=True, force_warning=''):
'''
Update the view of the info frame.
'''
con_err = ''
user_warning = ''
force_color_update = False
if not force_warning:
force_color_update = 'daemon is running with different user:' in self.masterInfoLabel.text()
else:
user_warning = '<span style=" color:red;"> %s</span>' % utf8(force_warning)
try:
tries = self._con_tries[masteruri]
if tries > 1:
con_err = '<span style=" color:red;">connection problems (%s tries)! </span>' % utf8(tries)
except Exception:
pass
if self.__current_master_label_name != name or force_color_update:
self.__current_master_label_name = name
show_name = name if nm.settings().show_domain_suffix else subdomain(name)
self.masternameLabel.setText('<span style=" font-size:14pt; font-weight:600; color:black">%s</span>' % show_name)
color = QColor.fromRgb(nm.settings().host_color(self.__current_master_label_name, self._default_color.rgb()))
self._new_color(color)
ts = 'updated: %s' % utf8(timestamp) if timestamp is not None else ''
if not nm.settings().autoupdate:
ts = '%s<span style=" color:orange;"> AU off</span>' % ts
self.masterInfoLabel.setText('<span style=" font-size:8pt; color:black">%s%s%s</span>' % (con_err, ts, user_warning))
# load the robot image, if one exists
if self.masternameLabel.isEnabled():
if name in self.__icons:
if self.__icons[name][0] != self.__current_icon:
icon = self.__icons[name][0]
self.__current_icon = icon
self.imageLabel.setPixmap(icon.pixmap(self.nameFrame.size()))
self.imageLabel.setToolTip(''.join(['<html><head></head><body><img src="', self.__icons[name][1], '" alt="', name, '"></body></html>']))
elif self.__icons['default_pc'][0] != self.__current_icon:
icon = self.__icons['default_pc'][0]
self.__current_icon = icon
self.imageLabel.setPixmap(icon.pixmap(self.nameFrame.size()))
self.imageLabel.setToolTip('')
# set sim_time info
master = self.getMaster(masteruri, False)
sim_time_enabled = self.masternameLabel.isEnabled() and master is not None and master.use_sim_time
self.simTimeLabel.setVisible(bool(sim_time_enabled))
launch_server_enabled = self.masternameLabel.isEnabled() and (master is not None) and master.has_launch_server()
self.launchServerLabel.setVisible(launch_server_enabled)
self.masternameLabel.setEnabled(online)
self.masterInfoFrame.setEnabled((timestamp is not None))
# update warning symbol / text
if not self.log_dock.isVisible() and self.log_dock.count_warn():
if self.logButton.text():
self.logButton.setIcon(self.__icons['log_warning'][0])
self.logButton.setText('')
else:
self.logButton.setText('%d' % self.log_dock.count_warn())
self.logButton.setIcon(self.__icons['empty'][0])
def timestampStr(self, timestamp):
dt = datetime.fromtimestamp(timestamp)
diff = time.time() - timestamp
diff_dt = datetime.fromtimestamp(diff)
before = '0 sec'
if (diff < 60):
before = diff_dt.strftime('%S sec')
elif (diff < 3600):
before = diff_dt.strftime('%M:%S min')
elif (diff < 86400):
before = diff_dt.strftime('%H:%M:%S std')
else:
before = diff_dt.strftime('%d Day(s) %H:%M:%S')
return '%s (%s)' % (dt.strftime('%H:%M:%S'), before)
def updateDuplicateNodes(self):
# update the duplicate nodes
running_nodes = dict()
for _, m in self.masters.items():
if m.online and m.master_state is not None and m.master_state.online:
running_nodes.update(m.get_nodes_runningIfLocal())
for _, m in self.masters.items():
if m.master_state is not None:
m.set_duplicate_nodes(running_nodes)
# ======================================================================================================================
# Handling of master list view
# ======================================================================================================================
def on_master_table_pressed(self, selected):
pass
def on_master_table_clicked(self, selected):
'''
On click on the sync item, the master_sync node will be started or stopped,
depending on run state.
'''
pass
# item = self.master_model.itemFromIndex(selected)
# if isinstance(item, MasterSyncItem):
# pass
def on_master_table_activated(self, selected):
item = self.master_model.itemFromIndex(selected)
MessageBox.information(self, item.name, item.toolTip())
def on_master_selection_changed(self, selected):
'''
If a master was selected, set the corresponding Widget of the stacked layout
to the current widget and shows the state of the selected master.
'''
# si = self.masterTableView.selectedIndexes()
# for index in si:
# if index.row() == selected.row():
item = self.master_model.itemFromIndex(selected)
if item is not None:
self._history_selected_robot = item.master.name
self.setCurrentMaster(item.master.uri)
if not nm.nmd().file.get_packages(item.master.uri):
nm.nmd().file.list_packages_threaded(nmdurl.nmduri(item.master.uri))
if self.currentMaster.master_info is not None and not self.restricted_to_one_master:
node = self.currentMaster.master_info.getNodeEndsWith('master_sync')
self.syncButton.setEnabled(True)
self.syncButton.setChecked(node is not None)
else:
self.syncButton.setEnabled(False)
return
self.launch_dock.raise_()
def setCurrentMaster(self, master):
'''
Changes the view of the master.
:param master: the MasterViewProxy object or masteruri
:type master: MasterViewProxy or str
'''
show_user_field = False
if isinstance(master, MasterViewProxy):
self.currentMaster = master
self.stackedLayout.setCurrentWidget(master)
show_user_field = not master.is_local
self._add_user_to_combo(self.currentMaster.current_user)
self.userComboBox.setEditText(self.currentMaster.current_user)
elif master is None:
self.currentMaster = None
self.stackedLayout.setCurrentIndex(0)
else: # it's masteruri
self.currentMaster = self.getMaster(master)
if self.currentMaster is not None:
self.stackedLayout.setCurrentWidget(self.currentMaster)
show_user_field = not self.currentMaster.is_local
self._add_user_to_combo(self.currentMaster.current_user)
self.userComboBox.setEditText(self.currentMaster.current_user)
else:
self.stackedLayout.setCurrentIndex(0)
if self.currentMaster is not None:
self.launch_dock.set_current_master(self.currentMaster.masteruri, self.currentMaster.master_state.name)
self.user_frame.setVisible(show_user_field)
self.on_master_timecheck()
def _add_user_to_combo(self, user):
for i in range(self.userComboBox.count()):
if user.lower() == self.userComboBox.itemText(i).lower():
return
self.userComboBox.addItem(user)
def on_user_changed(self, user):
if self.currentMaster is not None:
self.currentMaster.current_user = user
def on_masterTableView_selection_changed(self, selected, deselected):
'''
On selection of a master list.
'''
if selected.isValid():
self.on_master_selection_changed(selected)
def on_all_master_refresh_clicked(self):
'''
Retrieves from the master_discovery node the list of all discovered ROS
master and get their current state.
'''
# set the timestamp of the current master info back
for _, m in self.masters.items():
if m.master_info is not None:
check_ts = m.master_info.check_ts
m.master_info.timestamp = m.master_info.timestamp - 1.0
m.master_info.check_ts = check_ts
self.masterlist_service.refresh(self.getMasteruri(), False)
def on_discover_network_clicked(self):
try:
self._discover_dialog.raise_()
except Exception:
mcast_group = rospy.get_param('/master_discovery/mcast_group', '226.0.0.0')
self._discover_dialog = NetworkDiscoveryDialog(mcast_group, 11511, 100, self)
self._discover_dialog.network_join_request.connect(self._join_network)
self._discover_dialog.show()
def on_start_robot_clicked(self):
'''
Tries to start the master_discovery node on the machine requested by a dialog.
'''
# get the history list
user_list = [self.userComboBox.itemText(i) for i in reversed(range(self.userComboBox.count()))]
user_list.insert(0, 'last used')
params_optional = {'Discovery type': {':type': 'string', ':value': ['master_discovery', 'zeroconf']},
'ROS Master Name': {':type': 'string', ':value': 'autodetect'},
'ROS Master URI': {':type': 'string', ':value': 'ROS_MASTER_URI'},
'Robot hosts': {':type': 'string', ':value': ''},
'Username': {':type': 'string', ':value': user_list},
'MCast Group': {':type': 'string', ':value': '226.0.0.0'},
'Heartbeat [Hz]': {':type': 'float', ':value': 0.5}
}
params = {'Host': {':type': 'string', ':value': 'localhost'},
'Network(0..99)': {':type': 'int', ':value': '0'},
'Start sync': {':type': 'bool', ':value': nm.settings().start_sync_with_discovery},
'Optional Parameter': params_optional
}
dia = ParameterDialog(params, sidebar_var='Host', store_geometry="start_robot_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Start discovery')
dia.setFocusField('Host')
if dia.exec_():
try:
params = dia.getKeywords(only_changed=False)
hostnames = params['Host'] if isinstance(params['Host'], list) else [params['Host']]
port = params['Network(0..99)']
start_sync = params['Start sync']
discovery_type = params['Optional Parameter']['Discovery type']
mastername = 'autodetect'
masteruri = 'ROS_MASTER_URI'
if len(hostnames) < 2:
mastername = params['Optional Parameter']['ROS Master Name']
masteruri = params['Optional Parameter']['ROS Master URI']
robot_hosts = params['Optional Parameter']['Robot hosts']
username = params['Optional Parameter']['Username']
mcast_group = params['Optional Parameter']['MCast Group']
heartbeat_hz = params['Optional Parameter']['Heartbeat [Hz]']
if robot_hosts:
robot_hosts = robot_hosts.replace(' ', ',')
robot_hosts = robot_hosts.replace(',,', ',')
robot_hosts = robot_hosts.replace('[', '')
robot_hosts = robot_hosts.replace(']', '')
for hostname in hostnames:
try:
args = []
if port is not None and port and int(port) < 100 and int(port) >= 0:
args.append('_mcast_port:=%s' % (11511 + int(port)))
else:
args.append('_mcast_port:=%s' % (11511))
if not mastername == 'autodetect':
args.append('_name:=%s' % (mastername))
args.append('_mcast_group:=%s' % mcast_group)
args.append('_robot_hosts:=[%s]' % robot_hosts)
args.append('_heartbeat_hz:=%s' % heartbeat_hz)
# TODO: remove the name parameter from the ROS parameter server
usr = username
if username == 'last used':
usr = nm.settings().host_user(hostname)
else:
nm.settings().set_host_user(hostname, usr)
muri = None if masteruri == 'ROS_MASTER_URI' else utf8(masteruri)
# stop if master_discovery already running
self._append_stop_for('/%s' % utf8(discovery_type), hostname, muri, self._progress_queue)
self._progress_queue_sync.start()
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start discovering on %s' % hostname,
nm.starter().runNodeWithoutConfig,
{'host': utf8(hostname),
'package': 'fkie_master_discovery',
'binary': utf8(discovery_type),
'name': utf8(discovery_type),
'args': args,
'masteruri': muri,
'use_nmd': False,
'auto_pw_request': False,
'user': usr
})
# start the master sync with default settings
if start_sync:
if nm.is_local(hostname):
default_sync_args = ["_interface_url:='.'",
'_sync_topics_on_demand:=False',
'_ignore_hosts:=[]', '_sync_hosts:=[]',
'_ignore_nodes:=[]', '_sync_nodes:=[]',
'_ignore_topics:=[]', '_sync_topics:=[]',
'_ignore_services:=[]', '_sync_services:=[]',
'_sync_remote_nodes:=False']
self._append_stop_for('/master_sync', hostname, muri, self._progress_queue_sync)
self._progress_queue_sync.start()
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start sync on %s' % hostname,
nm.starter().runNodeWithoutConfig,
{'host': utf8(hostname),
'package': 'fkie_master_sync',
'binary': 'master_sync',
'name': 'master_sync',
'args': default_sync_args,
'masteruri': muri,
'use_nmd': False,
'auto_pw_request': False,
'user': usr
})
else:
if hostname not in self._syncs_to_start:
self._syncs_to_start.append(hostname)
except (Exception, nm.StartException) as e:
import traceback
print(traceback.format_exc(1))
rospy.logwarn("Error while start master_discovery for %s: %s" % (utf8(hostname), utf8(e)))
MessageBox.warning(self, "Start error",
'Error while start master_discovery',
utf8(e))
self._progress_queue.start()
except Exception as e:
MessageBox.warning(self, "Start error",
'Error while parse parameter',
utf8(e))
def _append_stop_for(self, nodename, hostname, muri, queue):
'''
Appends stop command to given queue for given node
'''
cmuri = muri
if hostname == 'localhost':
lmuri = self.getMasteruri()
if cmuri is None:
cmuri = lmuri
else:
cmuri = cmuri.replace('localhost', get_hostname(lmuri))
elif cmuri is None:
cmuri = nm.nameres().masteruri(nm.nameres().hostname(utf8(hostname)))
if cmuri is not None:
master = self.getMaster(cmuri.rstrip('/') + '/', create_new=False)
if master is not None:
found_nodes = master._get_nodes_by_name([nodename])
for node in found_nodes:
queue.add2queue(utf8(uuid.uuid4()), 'stop %s' % node.name, master.stop_node, {'node': node, 'force': True})
def _join_network(self, network):
try:
master = self.getMaster(self.getMasteruri())
if master is not None:
# we need to stop master_discovery node first. In other case the new (and old) one will be stopped by ROS if one is running.
master.stop_nodes_by_name(['/master_discovery'])
time.sleep(0.5)
hostname = 'localhost'
args = []
if network < 100 and network >= 0:
args.append(''.join(['_mcast_port:=', utf8(11511 + int(network))]))
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'start discovering on ' + utf8(hostname),
nm.starter().runNodeWithoutConfig,
{'host': utf8(hostname),
'package': 'fkie_master_discovery',
'binary': 'master_discovery',
'name': 'master_discovery',
'args': args,
'masteruri': None,
'use_nmd': False,
'auto_pw_request': False
})
self._progress_queue.start()
except (Exception, nm.StartException) as e:
rospy.logwarn("Error while start master_discovery for %s: %s", utf8(hostname), utf8(e))
MessageBox.warning(self, "Start error",
'Error while start master_discovery',
utf8(e))
def poweroff_host(self, host):
try:
if nm.is_local(utf8(host)):
ret = MessageBox.warning(self, "ROS Node Manager",
"Do you really want to shutdown localhost?",
buttons=MessageBox.Ok | MessageBox.Cancel)
if ret == MessageBox.Cancel:
return
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'poweroff `%s`' % host,
nm.starter().poweroff,
{'host': '%s' % host})
masteruris = nm.nameres().masterurisbyaddr(host)
for masteruri in masteruris:
master = self.getMaster(masteruri)
master.stop_nodes_by_name(['/master_discovery'])
self._progress_queue.start()
self.on_description_update('Description', '')
self.launch_dock.raise_()
except (Exception, nm.StartException) as e:
rospy.logwarn("Error while poweroff %s: %s", host, utf8(e))
MessageBox.warning(self, "Run error",
'Error while poweroff %s' % host,
'%s' % utf8(e))
def rosclean(self, masteruri):
try:
host = get_hostname(masteruri)
nuri = nmdurl.nmduri(masteruri)
ret = MessageBox.warning(self, "ROS Node Manager",
"Do you really want delete all logs on `%s`?" % host,
buttons=MessageBox.Ok | MessageBox.Cancel)
if ret == MessageBox.Cancel:
return
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'rosclean `%s`' % nuri,
nm.starter().rosclean,
{'grpc_uri': '%s' % nuri})
master = self.getMaster(masteruri, create_new=False)
if master is not None:
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'update `%s`' % nuri,
master.perform_nmd_requests,
{})
self._progress_queue.start()
self.launch_dock.raise_()
except (Exception, nm.StartException) as e:
rospy.logwarn("Error while rosclean %s: %s", masteruri, utf8(e))
MessageBox.warning(self, "Run error",
'Error while rosclean %s' % masteruri,
'%s' % utf8(e))
# ======================================================================================================================
# Handling of the launch view signals
# ======================================================================================================================
def on_load_launch_file(self, path, args={}, masteruri=None):
'''
Load the launch file. A ROS master must be selected first.
:param str path: the path of the launch file.
'''
master_proxy = None
if masteruri is not None:
master_proxy = self.getMaster(masteruri, False)
if master_proxy is None:
master_proxy = self.stackedLayout.currentWidget()
if isinstance(master_proxy, MasterViewProxy):
try:
master_proxy.launchfiles = (path, args)
except Exception as e:
import traceback
print(utf8(traceback.format_exc(1)))
MessageBox.warning(self, "Loading launch file", path, '%s' % utf8(e))
# self.setCursor(cursor)
else:
MessageBox.information(self, "Load of launch file", "Select a master first!",)
def on_launch_edit(self, grpc_path, search_text='', trynr=1):
'''
Opens the given path in an editor. If file is already open, select the editor.
If search text is given, search for the text in files an goto the line.
:param str grpc_path: path with grpc prefix
:param str search_text: A string to search in file
'''
if grpc_path:
if grpc_path in self.editor_dialogs:
try:
self.editor_dialogs[grpc_path].on_load_request(grpc_path, search_text, only_launch=True)
self.editor_dialogs[grpc_path].raise_()
self.editor_dialogs[grpc_path].activateWindow()
except Exception:
if trynr > 1:
raise
import traceback
print(traceback.format_exc())
del self.editor_dialogs[grpc_path]
self.on_launch_edit(grpc_path, search_text, 2)
else:
editor = Editor([grpc_path], search_text, master_name=self.launch_dock.path2mastername(grpc_path))
if editor.tabWidget.count() > 0:
self.editor_dialogs[grpc_path] = editor
editor.finished_signal.connect(self._editor_dialog_closed)
editor.show()
def _editor_dialog_closed(self, files):
'''
If a editor dialog is closed, remove it from the list...
'''
if files[0] in self.editor_dialogs:
del self.editor_dialogs[files[0]]
def on_launch_transfer(self, files):
'''
Copies the selected file to a remote host
:param file: A list with paths
:type file: [str]
'''
# use node manager daemon
if files:
nmd_url = nmdurl.nmduri()
if self.currentMaster is not None:
nmd_url = get_hostname(self.currentMaster.masteruri)
params = {'master': {':type': 'string', ':value': self.currentMaster.masteruri},
'recursive': {':type': 'bool', ':value': False}
}
dia = ParameterDialog(params, store_geometry="launch_transfer_dialog")
dia.setFilterVisible(False)
dia.setWindowTitle('Transfer file')
dia.setFocusField('master')
if dia.exec_():
try:
params = dia.getKeywords()
nmd_url = params['master']
recursive = params['recursive']
for path in files:
nmd_url = nmdurl.nmduri(nmd_url)
rospy.loginfo("TRANSFER to %s: %s" % (nmd_url, path))
self.launch_dock.progress_queue.add2queue('%s' % uuid.uuid4(),
'transfer files to %s' % nmd_url,
nm.starter().transfer_file_nmd,
{'grpc_url': '%s' % nmd_url,
'path': path,
'auto_pw_request': False
})
if recursive:
self.launch_dock.progress_queue.add2queue('%s' % uuid.uuid4(),
"transfer recursive '%s' to %s" % (path, nmd_url),
self._recursive_transfer,
{'path': path, 'nmd_url': nmd_url})
self.launch_dock.progress_queue.start()
except Exception as e:
MessageBox.warning(self, "Transfer error",
'Error while transfer files', '%s' % utf8(e))
def _recursive_transfer(self, path, nmd_url):
includes = nm.nmd().launch.get_included_files_set(path, True, search_in_ext=nm.settings().SEARCH_IN_EXT)
copy_set = set()
for inc_file in includes:
copy_set.add(inc_file)
for cppath in copy_set:
self.launch_dock.progress_queue.add2queue(utf8(uuid.uuid4()),
'transfer file %s to %s' % (cppath, nmd_url),
nm.starter().transfer_file_nmd,
{'grpc_url': '%s' % nmd_url,
'path': cppath
})
self.launch_dock.progress_queue.start()
def _reload_globals_at_next_start(self, launch_file):
if self.currentMaster is not None:
self.currentMaster.reload_global_parameter_at_next_start(launch_file)
# ======================================================================================================================
# Change file detection
# ======================================================================================================================
def changeEvent(self, event):
'''
'''
if self.isActiveWindow() and self.isActiveWindow() != self._last_window_state:
if hasattr(self, 'currentMaster') and self.currentMaster is not None:
# perform delayed checks for changed files or multiple screens
QTimer.singleShot(250, self.currentMaster.perform_master_checks)
self._last_window_state = self.isActiveWindow()
QMainWindow.changeEvent(self, event)
def enterEvent(self, event):
'''
Check for changed files, if the main gui was entered.
'''
QMainWindow.enterEvent(self, event)
# ======================================================================================================================
# Capabilities handling
# ======================================================================================================================
def on_start_nodes(self, masteruri, cfg, nodes):
if masteruri is not None:
master = self.getMaster(masteruri)
master.start_nodes_by_name(nodes, cfg)
def on_stop_nodes(self, masteruri, nodes):
if masteruri is not None:
master = self.getMaster(masteruri)
master.stop_nodes_by_name(nodes)
def on_description_update(self, title, text, force=False):
# ignore updates if we are currently browse in text dialog
if self._description_accept:
if self._description_accept != title:
if not force:
return
elif not title.endswith(' diagnostic'): # add 'back'-link if title ends with ' diagnostic'
self._description_accept = ''
wtitle = self.descriptionDock.windowTitle().replace('&', '')
same_title = wtitle == title
valid_sender = self.sender() == self.currentMaster or not isinstance(self.sender(), MasterViewProxy)
no_focus = not self.descriptionTextEdit.hasFocus()
if (valid_sender) and (same_title or no_focus or self._accept_next_update):
self._accept_next_update = False
# _description_accept is set to True on click on link of {node, topic, service}
if not same_title:
if self._description_accept:
self._description_history.append((wtitle, self.descriptionTextEdit.toHtml()))
else:
del self._description_history[:]
# prepend 'back' link the text
if self._description_history:
if len(self._description_history) > 15:
self._description_history.pop(0)
text = '<a href="back://" title="back">back</a>%s' % text
self.descriptionDock.setWindowTitle(title)
vbar = self.descriptionTextEdit.verticalScrollBar()
stored_vpos = vbar.value()
self.descriptionTextEdit.setText(text)
vbar.setValue(stored_vpos)
if text and force:
self.descriptionDock.raise_()
def on_description_update_cap(self, title, text):
self.descriptionDock.setWindowTitle(title)
self.descriptionTextEdit.setText(text)
def on_description_anchorClicked(self, url):
self._description_accept = self.descriptionDock.windowTitle().replace('&', '')
self._accept_next_update = True
if url.toString().startswith('open-sync-dialog://'):
self.on_sync_dialog_released(False, url.toString().replace('open-sync-dialog', 'http'), True)
elif url.toString().startswith('show-all-screens://'):
master = self.getMaster(url.toString().replace('show-all-screens', 'http'), False)
if master is not None:
master.on_show_all_screens()
elif url.toString().startswith('remove-all-launch-server://'):
master = self.getMaster(url.toString().replace('remove-all-launch-server', 'http'), False)
if master is not None:
master.on_remove_all_launch_server()
elif url.toString().startswith('node://'):
if self.currentMaster is not None:
self._description_accept = self._url_path(url)
self.currentMaster.on_node_selection_changed(None, None, True, self._description_accept)
elif url.toString().startswith('topic://'):
if self.currentMaster is not None:
self._description_accept = self._url_path(url)
self.currentMaster.on_topic_selection_changed(None, None, True, self._description_accept)
elif url.toString().startswith('topicecho://'):
if self.currentMaster is not None:
self.currentMaster.show_topic_output(self._url_path(url), False)
elif url.toString().startswith('topichz://'):
if self.currentMaster is not None:
self.currentMaster.show_topic_output(self._url_path(url), True)
elif url.toString().startswith('topichzssh://'):
if self.currentMaster is not None:
self.currentMaster.show_topic_output(self._url_path(url), True, use_ssh=True)
elif url.toString().startswith('topicpub://'):
if self.currentMaster is not None:
self.currentMaster.start_publisher(self._url_path(url))
elif url.toString().startswith('topicrepub://'):
if self.currentMaster is not None:
self.currentMaster.start_publisher(self._url_path(url), True)
elif url.toString().startswith('topicstop://'):
if self.currentMaster is not None:
self.currentMaster.on_topic_pub_stop_clicked(self._url_path(url))
elif url.toString().startswith('service://'):
if self.currentMaster is not None:
self._description_accept = self._url_path(url)
self.currentMaster.on_service_selection_changed(None, None, True, self._description_accept)
elif url.toString().startswith('servicecall://'):
if self.currentMaster is not None:
self.currentMaster.service_call(self._url_path(url))
elif url.toString().startswith('unregister-node://'):
if self.currentMaster is not None:
self.currentMaster.on_unregister_nodes()
elif url.toString().startswith('start-node://'):
if self.currentMaster is not None:
self.currentMaster.on_start_clicked()
elif url.toString().startswith('restart-node://'):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes()
elif url.toString().startswith('restart-node-g://'):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes(True)
elif url.toString().startswith('start-node-at-host://'):
if self.currentMaster is not None:
self.currentMaster.on_start_nodes_at_host()
elif url.toString().startswith('start-node-adv://'):
if self.currentMaster is not None:
self.currentMaster.on_start_alt_clicked()
elif url.toString().startswith('kill-node://'):
if self.currentMaster is not None:
self.currentMaster.on_kill_nodes()
elif url.toString().startswith('kill-pid://pid'):
if self.currentMaster is not None:
self.currentMaster.on_kill_pid(int(url.toString().replace('kill-pid://pid', '')))
elif url.toString().startswith('kill-screen://'):
if self.currentMaster is not None:
self.currentMaster.on_kill_screens()
elif url.toString().startswith('copy-log-path://'):
if self.currentMaster is not None:
self.currentMaster.on_log_path_copy()
elif url.toString().startswith('copy://'):
QApplication.clipboard().setText(url.toString().replace('copy://', ''))
elif url.toString().startswith('launch://'):
self.on_launch_edit(self._url_path(url), '')
elif url.toString().startswith('reload-globals://'):
self._reload_globals_at_next_start(url.toString().replace('reload-globals://', 'grpc://'))
elif url.toString().startswith('poweroff://'):
self.poweroff_host(self._url_host(url))
elif url.toString().startswith('rosclean://'):
self.rosclean(url.toString().replace('rosclean', 'http'))
elif url.toString().startswith('sysmon-switch://'):
self.sysmon_active_update(url.toString().replace('sysmon-switch', 'http'))
elif url.toString().startswith('nmd-cfg://'):
self.nmd_cfg(url.toString().replace('nmd-cfg', 'http'))
elif url.toString().startswith('nm-cfg://'):
self._on_settings_button_clicked()
elif url.toString().startswith('show-all-diagnostics://'):
if self.currentMaster is not None:
self.currentMaster.show_diagnostic_messages(self._url_path(url))
elif url.toString().startswith('open-edit://'):
self.on_launch_edit(url.toString().replace('open-edit://', 'grpc://'))
elif url.toString().startswith('back://'):
if self._description_history:
# show last discription on click on back
title, text = self._description_history[-1]
self._description_accept = title
del self._description_history[-1]
self.descriptionDock.setWindowTitle(title)
self.descriptionTextEdit.setText(text)
else:
try:
from python_qt_binding.QtGui import QDesktopServices
QDesktopServices.openUrl(url)
except Exception as err:
rospy.logwarn("can't open url %s: %s" % (url, err))
self._accept_next_update = False
def _url_path(self, url):
'''Helper class for Qt5 compatibility'''
if hasattr(url, 'encodedPath'):
return utf8(url.encodedPath())
else:
return utf8(url.path())
def _url_host(self, url):
'''Helper class for Qt5 compatibility'''
if hasattr(url, 'encodedHost'):
return utf8(url.encodedHost())
else:
return utf8(url.host())
def _restart_nodes(self):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes()
def _restart_nodes_g(self):
if self.currentMaster is not None:
self.currentMaster.on_force_start_nodes(True)
def keyPressEvent(self, event):
'''
'''
QMainWindow.keyPressEvent(self, event)
if event == QKeySequence.Find:
focus_widget = QApplication.focusWidget()
if not isinstance(focus_widget, EnhancedLineEdit):
# set focus to filter line
if self.currentMaster is not None:
self.currentMaster.focus_filter_line()
def _show_section_menu(self, event=None):
# self._timer_alt = None
if self._select_index == 0:
if self.currentMaster is not None:
if self.currentMaster._is_current_tab_name('tabNodes'):
self.currentMaster.ui.nodeTreeView.setFocus(Qt.TabFocusReason)
elif self.currentMaster._is_current_tab_name('tabTopics'):
self.currentMaster.ui.topicsView.setFocus(Qt.TabFocusReason)
elif self.currentMaster._is_current_tab_name('tabServices'):
self.currentMaster.ui.servicesView.setFocus(Qt.TabFocusReason)
elif self.currentMaster._is_current_tab_name('tabParameter'):
self.currentMaster.ui.parameterView.setFocus(Qt.TabFocusReason)
elif self._select_index == 1:
self.launch_dock.raise_()
self.launch_dock.ui_file_view.setFocus(Qt.TabFocusReason)
elif self._select_index == 2:
self.descriptionDock.raise_()
self.descriptionTextEdit.setFocus(Qt.TabFocusReason)
elif self._select_index == 3:
self.startRobotButton.setFocus(Qt.TabFocusReason)
elif self._select_index == 4:
self.hideDocksButton.setFocus(Qt.TabFocusReason)
else:
self._select_index = -1
self._select_index += 1
def keyReleaseEvent(self, event):
'''
Defines some of shortcuts for navigation/management in launch
list view or topics view.
'''
key_mod = QApplication.keyboardModifiers()
if self.currentMaster is not None and self.currentMaster.ui.nodeTreeView.hasFocus():
if event.key() == Qt.Key_F4 and not key_mod:
if self.currentMaster.ui.editConfigButton.isEnabled():
self.currentMaster.on_edit_config_clicked()
elif self.currentMaster.ui.editRosParamButton.isEnabled():
self.currentMaster.on_edit_rosparam_clicked()
elif event.key() == Qt.Key_F3 and not key_mod and self.currentMaster.ui.ioButton.isEnabled():
self.currentMaster.on_io_clicked()
QMainWindow.keyReleaseEvent(self, event)
def image_mouseDoubleClickEvent(self, event):
'''
Set the robot image
'''
if self.currentMaster:
try:
if not os.path.isdir(nm.settings().robots_path):
os.makedirs(nm.settings().robots_path)
(fileName, _) = QFileDialog.getOpenFileName(self,
"Set robot image",
nm.settings().robots_path,
"Image files (*.bmp *.gif *.jpg *.jpeg *.png *.pbm *.xbm);;All files (*)")
if fileName and self.__current_master_label_name:
p = QPixmap(fileName)
p.save(nm.settings().robot_image_file(self.__current_master_label_name))
if self.__current_master_label_name in self.__icons:
del self.__icons[self.__current_master_label_name]
self._assigne_icon(self.__current_master_label_name)
except Exception as e:
MessageBox.warning(self, "Error",
'Set robot image for %s failed!' % utf8(self.__current_master_label_name),
'%s' % utf8(e))
rospy.logwarn("Error while set robot image for %s: %s", utf8(self.__current_master_label_name), utf8(e))
def _set_custom_colors(self):
colors = [self._default_color, QColor(87, 93, 94), QColor(60, 116, 96)]
# QT4 compatibility hack (expected type by QT4 is QRgb, Qt5 is QColor)
if QT_BINDING_VERSION.startswith("4"):
colors = [c.rgb() for c in colors]
QColorDialog.setStandardColor(0, colors[0])
QColorDialog.setStandardColor(1, colors[1])
QColorDialog.setStandardColor(2, colors[2])
def _new_color(self, color):
bg_style = "QWidget#expert_tab { background-color: qlineargradient(x1: 0, y1: 0, x2: 0, y2: 1, stop: 0 %s, stop: 0.7 %s);}" % (color.name(), self._default_color.name())
self.expert_tab.setStyleSheet("%s" % (bg_style))
def mastername_mouseDoubleClickEvent(self, event):
'''
Set the robot color
'''
if self.currentMaster:
try:
prev_color = QColor.fromRgb(nm.settings().host_color(self.__current_master_label_name, self._default_color.rgb()))
cdiag = QColorDialog(prev_color)
cdiag.currentColorChanged.connect(self._new_color)
if cdiag.exec_():
nm.settings().set_host_color(self.__current_master_label_name, cdiag.selectedColor().rgb())
else:
self._new_color(prev_color)
except Exception as e:
MessageBox.warning(self, "Error",
'Set robot color for %s failed!' % utf8(self.__current_master_label_name),
'%s' % utf8(e))
rospy.logwarn("Error while set robot color for %s: %s", utf8(self.__current_master_label_name), utf8(e))
def _on_robot_icon_changed(self, masteruri, path):
'''
One of the robot icons was changed. Update the icon.
'''
master = self.getMaster(masteruri, False)
if master:
self._assigne_icon(master.mastername, resolve_url(path))
def _callback_system_diagnostics(self, data, grpc_url=''):
try:
muri = nmdurl.masteruri(grpc_url)
master = self.getMaster(muri, create_new=False)
if master:
master.update_system_diagnostics(data)
self.master_model.update_master_diagnostic(nm.nameres().mastername(muri), data)
except Exception as err:
rospy.logwarn('Error while process diagnostic messages: %s' % utf8(err))
def _callback_diagnostics(self, data, grpc_url=''):
try:
for diagnostic in data.status:
self.diagnostics_signal.emit(diagnostic)
except Exception as err:
rospy.logwarn('Error while process diagnostic messages: %s' % utf8(err))
def _callback_username(self, username, grpc_url=''):
try:
muri = nmdurl.masteruri(grpc_url)
master = self.getMaster(muri, create_new=False)
if master:
master.daemon_user = username
except Exception as err:
rospy.logwarn('Error while process username from daemon: %s' % utf8(err))
def sysmon_active_update(self, masteruri):
master = self.getMaster(masteruri, create_new=False)
if master is not None:
master.sysmon_active_update()
def nmd_cfg(self, masteruri):
nmd_uri = nmdurl.nmduri(masteruri)
nm.nmd().settings.get_config_threaded(nmd_uri)
def _nmd_yaml_cfg(self, data, nmdurl):
params = {}
try:
params = ruamel.yaml.load(data, Loader=ruamel.yaml.Loader)
except Exception as err:
rospy.logwarn("Error while parse daemon configuration: %s" % utf8(err))
dia = ParameterDialog(params, store_geometry="nmd_cfg_dialog")
dia.setWindowTitle('Daemon Configuration')
dia.setFocusField('load_warn_level')
if dia.exec_():
try:
params = dia.getKeywords(with_tags=True)
buf = ruamel.yaml.compat.StringIO()
ruamel.yaml.dump(params, buf, Dumper=ruamel.yaml.RoundTripDumper)
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: set configuration for daemon' % nmdurl,
nm.nmd().settings.set_config,
{'grpc_url': nmdurl,
'data': buf.getvalue()
})
self._progress_queue.add2queue(utf8(uuid.uuid4()),
'%s: get system diagnostics' % nmdurl,
nm.nmd().monitor.get_system_diagnostics_threaded,
{'grpc_url': nmdurl})
self._progress_queue.start()
except Exception as err:
import traceback
print(traceback.format_exc())
MessageBox.warning(self, "Daemon configuration error",
'Error while parse parameter',
'%s' % utf8(err))
def on_nmd_err(self, method, url, path, error):
'''
Handles the error messages from node_manager_daemon.
:param str method: name of the method caused this error.
:param str url: the URI of the node manager daemon.
:param Exception error: on occurred exception.
'''
muri = nmdurl.masteruri(url)
master = self.getMaster(muri, False)
if master is not None and not master._has_nmd:
# no daemon for this master available, ignore errors
return
reason = method
if method == '_get_nodes':
reason = 'get launch configuration'
rospy.logwarn("Error while %s from %s: %s" % (reason, url, utf8(error)))
if hasattr(error, 'code'):
if error.code() == grpc.StatusCode.UNIMPLEMENTED:
muri = nmdurl.masteruri(url)
master = self.getMaster(muri, create_new=False)
if master:
self.master_model.add_master_error(nm.nameres().mastername(muri), 'node_manager_daemon has unimplemented methods! Please update!')
master.set_diagnostic_warn('/node_manager_daemon', 'unimplemented methods detected! Please update!')
# ======================================================================================================================
# Help site handling
# ======================================================================================================================
def _on_help_go_back(self):
self._on_help_link_clicked(QUrl(''), history_idx=-1)
def _on_help_go_home(self):
self._on_help_link_clicked(self._help_root_url)
def _on_help_go_forward(self):
self._on_help_link_clicked(QUrl(''), history_idx=1)
def _on_help_link_clicked(self, link, history_idx=0):
if link.isEmpty():
# read from history if given link is empty
try:
link = self._help_history[self._help_history_idx + history_idx]
self._help_history_idx += history_idx
except Exception:
pass
if not link.isEmpty():
if history_idx == 0:
# it was not a history request -> add link to history
current_link = self.ui_help_web_view.url()
if current_link != link:
# if we navigate in the history previously remove forward items
if len(self._help_history) - 1 > self._help_history_idx:
self._help_history = self._help_history[:self._help_history_idx + 1]
self._help_history_idx += 1
self._help_history.append(link)
if link.scheme() == 'file':
try:
fpath = link.toLocalFile()
if fpath.endswith('.rst'):
# render .rst files
with open(fpath) as f:
self.ui_help_web_view.setHtml(examples.html_body(utf8(f.read())), link)
else:
self.ui_help_web_view.setUrl(link)
except Exception:
import traceback
msg = "Error while generate help: %s" % traceback.format_exc(2)
rospy.logwarn(msg)
else:
QDesktopServices.openUrl(link)
# update navigation buttons
self.ui_help_back.setEnabled(self._help_history_idx > 0)
self.ui_help_forward.setEnabled(self._help_history_idx < len(self._help_history) - 1)
| 53.833958 | 250 | 0.565046 |
794000deec5b6b8ac744fbc5125dec54cce1908b | 9,859 | py | Python | resources_rc.py | AlliedToasters/detect-craters | 6aa9074bf2b339b62863dec739882a356630466c | [
"BSD-3-Clause"
] | null | null | null | resources_rc.py | AlliedToasters/detect-craters | 6aa9074bf2b339b62863dec739882a356630466c | [
"BSD-3-Clause"
] | null | null | null | resources_rc.py | AlliedToasters/detect-craters | 6aa9074bf2b339b62863dec739882a356630466c | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Resource object code
#
# Created by: The Resource Compiler for PyQt5 (Qt v5.15.2)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x01\x64\
\x89\
\x50\x4e\x47\x0d\x0a\x1a\x0a\x00\x00\x00\x0d\x49\x48\x44\x52\x00\
\x00\x00\x18\x00\x00\x00\x18\x08\x04\x00\x00\x00\x4a\x7e\xf5\x73\
\x00\x00\x00\x20\x63\x48\x52\x4d\x00\x00\x7a\x26\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00\x75\x30\x00\x00\xea\x60\
\x00\x00\x3a\x98\x00\x00\x17\x70\x9c\xba\x51\x3c\x00\x00\x00\x02\
\x62\x4b\x47\x44\x00\xff\x87\x8f\xcc\xbf\x00\x00\x00\x09\x70\x48\
\x59\x73\x00\x00\x0b\x13\x00\x00\x0b\x13\x01\x00\x9a\x9c\x18\x00\
\x00\x00\xdc\x49\x44\x41\x54\x38\xcb\xbd\x92\x41\x0e\x82\x30\x10\
\x45\x5f\x5c\xe0\x21\x24\x1e\xca\x04\x23\x91\x70\x26\x20\x26\xde\
\x41\xa3\x47\xc2\x34\xe0\x46\xbc\x00\x6c\x70\xa5\x9d\x99\x56\x97\
\xce\xac\xfe\xf4\xbf\x64\xfa\x5b\xf8\x77\x2d\x29\xb9\xe2\x98\x98\
\x70\x5c\x28\x58\xfe\xb2\xef\xb8\x33\x9b\xee\xd9\xc6\xcd\x0b\x8e\
\x81\xf9\xdd\x07\x16\x21\x20\xed\x0f\x2a\x6a\x06\x85\x04\xcb\x48\
\xfb\x0a\x80\x54\x21\x99\xbe\xaa\xdc\xbd\xfa\xcc\x1b\x31\xed\x48\
\x3c\x50\xaa\x8d\xeb\x28\x30\xb3\xf7\xc0\x55\x1d\x0c\xa4\x00\xac\
\x79\xaa\xf9\xd9\x03\xce\xa4\x32\xd0\xd0\x18\xfb\xcc\xcd\x03\xd3\
\xd7\x40\x65\x8f\x21\x60\xe3\xd4\x7a\xb4\x2b\xd9\x38\xad\x6e\x3d\
\x70\x89\xc6\x69\xf5\xc9\x03\x45\x34\x4e\xab\x73\xf9\x70\x7d\x24\
\x4e\xad\x9d\x7c\x38\xd8\x46\xe3\x94\x7a\x63\x7f\xd3\xe1\x67\xa4\
\x75\xec\x7b\x7f\x47\xaa\xd8\xf7\x06\xc8\xe8\x02\xb3\x0b\x97\x91\
\x95\xb0\xe7\xcc\x8d\x91\x91\x96\x13\xb9\xbe\xea\x3f\xea\x05\xa7\
\xf0\xfd\xeb\x14\xb8\xd5\x70\x00\x00\x00\x00\x49\x45\x4e\x44\xae\
\x42\x60\x82\
\x00\x00\x01\x13\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x32\x36\x20\x31\
\x34\x68\x2d\x34\x76\x38\x68\x2d\x38\x76\x34\x68\x38\x76\x38\x68\
\x34\x76\x2d\x38\x68\x38\x76\x2d\x34\x68\x2d\x38\x76\x2d\x38\x7a\
\x4d\x32\x34\x20\x34\x43\x31\x32\x2e\x39\x35\x20\x34\x20\x34\x20\
\x31\x32\x2e\x39\x35\x20\x34\x20\x32\x34\x73\x38\x2e\x39\x35\x20\
\x32\x30\x20\x32\x30\x20\x32\x30\x20\x32\x30\x2d\x38\x2e\x39\x35\
\x20\x32\x30\x2d\x32\x30\x53\x33\x35\x2e\x30\x35\x20\x34\x20\x32\
\x34\x20\x34\x7a\x6d\x30\x20\x33\x36\x63\x2d\x38\x2e\x38\x32\x20\
\x30\x2d\x31\x36\x2d\x37\x2e\x31\x38\x2d\x31\x36\x2d\x31\x36\x53\
\x31\x35\x2e\x31\x38\x20\x38\x20\x32\x34\x20\x38\x73\x31\x36\x20\
\x37\x2e\x31\x38\x20\x31\x36\x20\x31\x36\x2d\x37\x2e\x31\x38\x20\
\x31\x36\x2d\x31\x36\x20\x31\x36\x7a\x22\x2f\x3e\x3c\x2f\x73\x76\
\x67\x3e\
\x00\x00\x01\x58\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x34\x31\x2e\x30\
\x39\x20\x31\x30\x2e\x34\x35\x6c\x2d\x32\x2e\x37\x37\x2d\x33\x2e\
\x33\x36\x43\x33\x37\x2e\x37\x36\x20\x36\x2e\x34\x33\x20\x33\x36\
\x2e\x39\x33\x20\x36\x20\x33\x36\x20\x36\x48\x31\x32\x63\x2d\x2e\
\x39\x33\x20\x30\x2d\x31\x2e\x37\x36\x2e\x34\x33\x2d\x32\x2e\x33\
\x31\x20\x31\x2e\x30\x39\x6c\x2d\x32\x2e\x37\x37\x20\x33\x2e\x33\
\x36\x43\x36\x2e\x33\x34\x20\x31\x31\x2e\x31\x35\x20\x36\x20\x31\
\x32\x2e\x30\x33\x20\x36\x20\x31\x33\x76\x32\x35\x63\x30\x20\x32\
\x2e\x32\x31\x20\x31\x2e\x37\x39\x20\x34\x20\x34\x20\x34\x68\x32\
\x38\x63\x32\x2e\x32\x31\x20\x30\x20\x34\x2d\x31\x2e\x37\x39\x20\
\x34\x2d\x34\x56\x31\x33\x63\x30\x2d\x2e\x39\x37\x2d\x2e\x33\x34\
\x2d\x31\x2e\x38\x35\x2d\x2e\x39\x31\x2d\x32\x2e\x35\x35\x7a\x4d\
\x32\x34\x20\x33\x35\x4c\x31\x33\x20\x32\x34\x68\x37\x76\x2d\x34\
\x68\x38\x76\x34\x68\x37\x4c\x32\x34\x20\x33\x35\x7a\x4d\x31\x30\
\x2e\x32\x35\x20\x31\x30\x6c\x31\x2e\x36\x33\x2d\x32\x68\x32\x34\
\x6c\x31\x2e\x38\x37\x20\x32\x68\x2d\x32\x37\x2e\x35\x7a\x22\x2f\
\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x01\x58\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x33\x39\x2e\x36\
\x32\x20\x32\x39\x2e\x39\x38\x4c\x34\x32\x20\x32\x38\x2e\x31\x33\
\x6c\x2d\x32\x2e\x38\x35\x2d\x32\x2e\x38\x35\x2d\x32\x2e\x33\x38\
\x20\x31\x2e\x38\x35\x20\x32\x2e\x38\x35\x20\x32\x2e\x38\x35\x7a\
\x6d\x2d\x2e\x38\x39\x2d\x39\x2e\x34\x33\x4c\x34\x32\x20\x31\x38\
\x20\x32\x34\x20\x34\x6c\x2d\x35\x2e\x38\x33\x20\x34\x2e\x35\x33\
\x20\x31\x35\x2e\x37\x35\x20\x31\x35\x2e\x37\x35\x20\x34\x2e\x38\
\x31\x2d\x33\x2e\x37\x33\x7a\x4d\x36\x2e\x35\x35\x20\x32\x4c\x34\
\x20\x34\x2e\x35\x35\x6c\x38\x2e\x34\x34\x20\x38\x2e\x34\x34\x4c\
\x36\x20\x31\x38\x6c\x33\x2e\x32\x36\x20\x32\x2e\x35\x33\x4c\x32\
\x34\x20\x33\x32\x6c\x34\x2e\x31\x39\x2d\x33\x2e\x32\x36\x20\x32\
\x2e\x38\x35\x20\x32\x2e\x38\x35\x2d\x37\x2e\x30\x36\x20\x35\x2e\
\x34\x39\x4c\x39\x2e\x32\x34\x20\x32\x35\x2e\x36\x31\x20\x36\x20\
\x32\x38\x2e\x31\x33\x6c\x31\x38\x20\x31\x34\x20\x39\x2e\x38\x39\
\x2d\x37\x2e\x37\x4c\x34\x31\x2e\x34\x36\x20\x34\x32\x20\x34\x34\
\x20\x33\x39\x2e\x34\x35\x20\x36\x2e\x35\x35\x20\x32\x7a\x22\x2f\
\x3e\x3c\x2f\x73\x76\x67\x3e\
\x00\x00\x00\xda\
\x3c\
\x73\x76\x67\x20\x78\x6d\x6c\x6e\x73\x3d\x22\x68\x74\x74\x70\x3a\
\x2f\x2f\x77\x77\x77\x2e\x77\x33\x2e\x6f\x72\x67\x2f\x32\x30\x30\
\x30\x2f\x73\x76\x67\x22\x20\x77\x69\x64\x74\x68\x3d\x22\x34\x38\
\x22\x20\x68\x65\x69\x67\x68\x74\x3d\x22\x34\x38\x22\x20\x76\x69\
\x65\x77\x42\x6f\x78\x3d\x22\x30\x20\x30\x20\x34\x38\x20\x34\x38\
\x22\x3e\x3c\x70\x61\x74\x68\x20\x64\x3d\x22\x4d\x32\x33\x2e\x39\
\x39\x20\x33\x37\x2e\x30\x38\x4c\x39\x2e\x32\x34\x20\x32\x35\x2e\
\x36\x31\x20\x36\x20\x32\x38\x2e\x31\x33\x6c\x31\x38\x20\x31\x34\
\x20\x31\x38\x2d\x31\x34\x2d\x33\x2e\x32\x36\x2d\x32\x2e\x35\x33\
\x2d\x31\x34\x2e\x37\x35\x20\x31\x31\x2e\x34\x38\x7a\x4d\x32\x34\
\x20\x33\x32\x6c\x31\x34\x2e\x37\x33\x2d\x31\x31\x2e\x34\x35\x4c\
\x34\x32\x20\x31\x38\x20\x32\x34\x20\x34\x20\x36\x20\x31\x38\x6c\
\x33\x2e\x32\x36\x20\x32\x2e\x35\x33\x4c\x32\x34\x20\x33\x32\x7a\
\x22\x2f\x3e\x3c\x2f\x73\x76\x67\x3e\
"
qt_resource_name = b"\
\x00\x07\
\x07\x3b\xe0\xb3\
\x00\x70\
\x00\x6c\x00\x75\x00\x67\x00\x69\x00\x6e\x00\x73\
\x00\x0d\
\x0e\x99\xe8\x93\
\x00\x43\
\x00\x69\x00\x72\x00\x63\x00\x6c\x00\x65\x00\x43\x00\x72\x00\x61\x00\x74\x00\x65\x00\x72\x00\x73\
\x00\x08\
\x0a\x61\x5a\xa7\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x2e\x00\x70\x00\x6e\x00\x67\
\x00\x05\
\x00\x6f\xa6\x53\
\x00\x69\
\x00\x63\x00\x6f\x00\x6e\x00\x73\
\x00\x1e\
\x0d\xce\xb3\x87\
\x00\x69\
\x00\x63\x00\x5f\x00\x61\x00\x64\x00\x64\x00\x5f\x00\x63\x00\x69\x00\x72\x00\x63\x00\x6c\x00\x65\x00\x5f\x00\x6f\x00\x75\x00\x74\
\x00\x6c\x00\x69\x00\x6e\x00\x65\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x13\
\x0f\x34\x1f\x27\
\x00\x69\
\x00\x63\x00\x5f\x00\x61\x00\x72\x00\x63\x00\x68\x00\x69\x00\x76\x00\x65\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\
\x00\x76\x00\x67\
\x00\x18\
\x02\xe6\x9c\xa7\
\x00\x69\
\x00\x63\x00\x5f\x00\x6c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\x00\x5f\x00\x63\x00\x6c\x00\x65\x00\x61\x00\x72\x00\x5f\x00\x34\
\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\x00\x67\
\x00\x12\
\x0b\x0d\xf4\x27\
\x00\x69\
\x00\x63\x00\x5f\x00\x6c\x00\x61\x00\x79\x00\x65\x00\x72\x00\x73\x00\x5f\x00\x34\x00\x38\x00\x70\x00\x78\x00\x2e\x00\x73\x00\x76\
\x00\x67\
"
qt_resource_struct_v1 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x4a\x00\x02\x00\x00\x00\x04\x00\x00\x00\x05\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x00\xc8\x00\x00\x00\x00\x00\x01\x00\x00\x03\xdb\
\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x05\x37\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x01\x68\
\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x02\x7f\
"
qt_resource_struct_v2 = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x14\x00\x02\x00\x00\x00\x02\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x4a\x00\x02\x00\x00\x00\x04\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x34\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01\x77\x32\xaa\x4d\xf3\
\x00\x00\x00\xc8\x00\x00\x00\x00\x00\x01\x00\x00\x03\xdb\
\x00\x00\x01\x77\x32\xaa\x4d\xf3\
\x00\x00\x00\xfe\x00\x00\x00\x00\x00\x01\x00\x00\x05\x37\
\x00\x00\x01\x77\x32\xaa\x4d\xf3\
\x00\x00\x00\x5a\x00\x00\x00\x00\x00\x01\x00\x00\x01\x68\
\x00\x00\x01\x77\x32\xaa\x4d\xf3\
\x00\x00\x00\x9c\x00\x00\x00\x00\x00\x01\x00\x00\x02\x7f\
\x00\x00\x01\x77\x32\xaa\x4d\xf3\
"
qt_version = [int(v) for v in QtCore.qVersion().split('.')]
if qt_version < [5, 8, 0]:
rcc_version = 1
qt_resource_struct = qt_resource_struct_v1
else:
rcc_version = 2
qt_resource_struct = qt_resource_struct_v2
def qInitResources():
QtCore.qRegisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(rcc_version, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 46.947619 | 129 | 0.723299 |
79400212e6207882501f101dd79477965e3837e7 | 2,040 | py | Python | game-of-life.py | buha/game-of-life | 8f125631987142bae72d8b725d0973e80bb01755 | [
"MIT"
] | 1 | 2020-01-10T13:38:34.000Z | 2020-01-10T13:38:34.000Z | game-of-life.py | buha/game-of-life | 8f125631987142bae72d8b725d0973e80bb01755 | [
"MIT"
] | null | null | null | game-of-life.py | buha/game-of-life | 8f125631987142bae72d8b725d0973e80bb01755 | [
"MIT"
] | null | null | null | import sys
from PyQt5.QtWidgets import QApplication, QMainWindow
from PyQt5.QtCore import Qt
import design
from random import randint
class MainWindow(QMainWindow):
def __init__(self, screen_width, screen_height, c1, c2):
super().__init__()
# build ui
self.ui = design.Ui_MainWindow()
self.ui.setupUi(self)
# resize the main window to a sensible value
self.resize(screen_width / 2, screen_height / 2)
# resize the graphics scene to match the window
uv = self.ui.graphicsView
uv.resize(screen_width, screen_height)
# create a random initial state for the universe
initial = [[(randint(0, 10) == 9) for i in range(uv.cols)] for j in range(uv.rows)]
uv.initialize(initial, c1, c2)
# start the animation directly
uv.start()
def resizeEvent(self, QResizeEvent):
# resize the graphics scene to match the window
self.ui.graphicsView.resize(screen_resolution.width(), screen_resolution.height())
def keyPressEvent(self, QKeyEvent):
if QKeyEvent.key() == Qt.Key_F11:
if self.windowState() == Qt.WindowFullScreen:
self.showNormal()
else:
self.showFullScreen()
if __name__ == '__main__':
c1 = 2
c2 = 3
try:
c1 = int(sys.argv[1])
if not (c1 > 0 and c1 < 10):
raise ValueError
c2 = int(sys.argv[2])
if not (c2 > 0 and c2 < 10):
raise ValueError
except IndexError:
pass
except ValueError:
print("c1 and c2 must be positive integers between 1 and 9\ngame-of-life [c1 c2]")
sys.exit(0)
# set up graphics
app = QApplication(sys.argv)
# get screen resolution and create the main window
screen_resolution = app.desktop().screenGeometry()
main = MainWindow(screen_resolution.width(), screen_resolution.height(), c1, c2)
# draw, launch qt app
main.show()
s = app.exec_()
# finish properly
sys.exit(s)
| 28.333333 | 91 | 0.621078 |
7940023c29266fd50e3a7e61955bfcda72eb1be3 | 1,596 | py | Python | rvusite/rvu/urls.py | craighagan/rvumanager | b313833bd49cdb36806a4ca4a33039f3d4bcf82e | [
"Apache-2.0"
] | null | null | null | rvusite/rvu/urls.py | craighagan/rvumanager | b313833bd49cdb36806a4ca4a33039f3d4bcf82e | [
"Apache-2.0"
] | null | null | null | rvusite/rvu/urls.py | craighagan/rvumanager | b313833bd49cdb36806a4ca4a33039f3d4bcf82e | [
"Apache-2.0"
] | null | null | null | from django.conf.urls import url
from django.utils import timezone
from django.views.generic import RedirectView
from . import views
def get_current_date_str():
now = timezone.now()
return timezone.template_localtime(now).strftime("%Y-%m-%d")
urlpatterns = [
url(r'^$', views.index, name='index'),
# ex: /patient_visit/5/
url(r'visit/edit/(?P<pk>[0-9]+)/$', views.PatientVisitUpdateView.as_view(), name='patient-visit-edit'),
url(r'visit/delete/(?P<pk>[0-9]+)/$', views.PatientVisitDeleteView.as_view(), name='patient-visit-delete'),
url(r'visit/(?P<pk>[0-9]+)/$', views.PatientVisitDetailView.as_view(), name='patient-visit-detail'),
url(r'visit/all/$', views.PatientVisitListView.as_view(), name='all_patient_visits'),
url(r'visit/fyear/$', views.PatientVisitListThisFiscalYearView.as_view(), name='patient_visits'),
url(r'visit/(?P<visit_date>\d\d\d\d-\d\d-\d\d)/$', views.PatientVisitListView.as_view(), name='patient_visits_day'),
url(r'visit/today/$', RedirectView.as_view(permanent=False, url='/rvu/visit/%s' % get_current_date_str()), name='patient_visits_today'),
url(r'billing_codes/$', views.BillingCodeListView.as_view(), name='billing_codes'),
url(r'providers/$', views.ProviderListView.as_view(), name='providers'),
url(r'visit/new/$', views.CreatePatientVisitView.as_view(), name='patient-visit-create'),
url(r'report/daily/$', views.daily_report, name="daily_report"),
url(r'report/weekly/$', views.weekly_report, name="weekly_report"),
url(r'report/monthly/$', views.monthly_report, name="monthly_report"),
]
| 59.111111 | 140 | 0.708647 |
794002515a5b8cca82b9aac07ba69e56177f6077 | 762 | py | Python | tests/weight_test.py | vfurci200/module9_loan | 6239cc9e03bcfb1371b14d7967e490f3ee11b26a | [
"MIT"
] | null | null | null | tests/weight_test.py | vfurci200/module9_loan | 6239cc9e03bcfb1371b14d7967e490f3ee11b26a | [
"MIT"
] | null | null | null | tests/weight_test.py | vfurci200/module9_loan | 6239cc9e03bcfb1371b14d7967e490f3ee11b26a | [
"MIT"
] | null | null | null | from brownie import accounts, Bank, chain
import pytest
def test_lender_weight_calculation(bank,chain):
balance = bank.balance()
bank.deposit( "10 ether", {'from': accounts[0],'amount': "10 ether"})
w_ = bank.percent(100000000,10000000000000000000,16)
assert w_ > 0
# def test_lenders_weight_calculation(bank,chain):
# bank.deposit( "40 ether", {'from': accounts[0],'amount': "40 ether"})
# bank.deposit( "20 ether", {'from': accounts[3],'amount': "20 ether"})
# bank.borrow( "10 ether", {'from': accounts[1]})
# balance0 = accounts[0].balance();
# balance3 = accounts[3].balance();
# w_0 = bank.calculate_weight(accounts[0])
# w_3= bank.calculate_weight(accounts[3])
# assert w_0 == 66
# assert w_3 == 33
| 34.636364 | 75 | 0.653543 |
79400266eb1092298125055baa4bb287d72e0f64 | 548 | py | Python | utils/scripts/OOOlevelGen/src/levels/Big_Fella.py | fullscreennl/monkeyswipe | c56192e202674dd5ab18023f6cf14cf51e95fbd0 | [
"MIT"
] | null | null | null | utils/scripts/OOOlevelGen/src/levels/Big_Fella.py | fullscreennl/monkeyswipe | c56192e202674dd5ab18023f6cf14cf51e95fbd0 | [
"MIT"
] | null | null | null | utils/scripts/OOOlevelGen/src/levels/Big_Fella.py | fullscreennl/monkeyswipe | c56192e202674dd5ab18023f6cf14cf51e95fbd0 | [
"MIT"
] | null | null | null | import LevelBuilder
from sprites import *
def render(name,bg):
lb = LevelBuilder.LevelBuilder(name+".plist",background=bg)
lb.addObject(Friend.FriendSprite(x=263, y=142,width=268,height=268,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Enemy.EnemySprite(x=263, y=303,width=32,height=32,angle='0',restitution=0.2,static='false',friction=0.5,density=20 ))
lb.addObject(Star.StarSprite(x=432, y=26,width=32,height=32))
lb.addObject(Hero.HeroSprite(x=29, y=21,width=32,height=32))
lb.render()
| 54.8 | 138 | 0.729927 |
794002af4f8ac7a6ac3b496a55e8e01ff48c1a40 | 5,911 | py | Python | tests/test_general_linear.py | DanielBrooks75/geomstats | 7cbb90b25c719c635413e26be1888b1286623e1a | [
"MIT"
] | null | null | null | tests/test_general_linear.py | DanielBrooks75/geomstats | 7cbb90b25c719c635413e26be1888b1286623e1a | [
"MIT"
] | null | null | null | tests/test_general_linear.py | DanielBrooks75/geomstats | 7cbb90b25c719c635413e26be1888b1286623e1a | [
"MIT"
] | null | null | null | """Unit tests for the General Linear group."""
import warnings
import geomstats.backend as gs
import geomstats.tests
from geomstats.geometry.general_linear import GeneralLinear
RTOL = 1e-5
class TestGeneralLinear(geomstats.tests.TestCase):
def setUp(self):
gs.random.seed(1234)
self.n = 3
self.n_samples = 2
self.group = GeneralLinear(n=self.n)
warnings.simplefilter('ignore', category=ImportWarning)
def test_belongs_shape(self):
mat = gs.eye(3)
result = self.group.belongs(mat)
self.assertAllClose(gs.shape(result), ())
mat = gs.ones((3, 3))
result = self.group.belongs(mat)
self.assertAllClose(gs.shape(result), ())
def test_belongs(self):
mat = gs.eye(3)
result = self.group.belongs(mat)
expected = True
self.assertAllClose(result, expected)
mat = gs.ones((3, 3))
result = self.group.belongs(mat)
expected = False
self.assertAllClose(result, expected)
def test_belongs_vectorization_shape(self):
mats = gs.array([gs.eye(3), gs.ones((3, 3))])
result = self.group.belongs(mats)
self.assertAllClose(gs.shape(result), (2,))
def test_belongs_vectorization(self):
mats = gs.array([gs.eye(3), gs.ones((3, 3))])
result = self.group.belongs(mats)
expected = gs.array([True, False])
self.assertAllClose(result, expected)
def test_random_and_belongs(self):
point = self.group.random_point()
result = self.group.belongs(point)
expected = True
self.assertAllClose(result, expected)
def test_random_and_belongs_vectorization(self):
n_samples = 4
point = self.group.random_point(n_samples)
result = self.group.belongs(point)
expected = gs.array([True] * n_samples)
self.assertAllClose(result, expected)
def test_replace_values(self):
points = gs.ones((3, 3, 3))
new_points = gs.zeros((2, 3, 3))
indcs = [True, False, True]
update = self.group._replace_values(points, new_points, indcs)
self.assertAllClose(update, gs.stack(
[gs.zeros((3, 3)), gs.ones((3, 3)), gs.zeros((3, 3))]))
def test_compose(self):
mat1 = gs.array([
[1., 0.],
[0., 2.]])
mat2 = gs.array([
[2., 0.],
[0., 1.]])
result = self.group.compose(mat1, mat2)
expected = 2. * GeneralLinear(2).identity
self.assertAllClose(result, expected)
def test_inv(self):
mat_a = gs.array([
[1., 2., 3.],
[4., 5., 6.],
[7., 8., 10.]])
imat_a = 1. / 3. * gs.array([
[-2., -4., 3.],
[-2., 11., -6.],
[3., -6., 3.]])
expected = imat_a
result = self.group.inverse(mat_a)
self.assertAllClose(result, expected)
def test_inv_vectorized(self):
mat_a = gs.array([
[0., 1., 0.],
[1., 0., 0.],
[0., 0., 1.]])
mat_b = - gs.eye(3, 3)
result = self.group.inverse(gs.array([mat_a, mat_b]))
expected = gs.array([mat_a, mat_b])
self.assertAllClose(result, expected)
@geomstats.tests.np_and_tf_only
def test_log_and_exp(self):
point = 5 * gs.eye(self.n)
group_log = self.group.log(point)
result = self.group.exp(group_log)
expected = point
self.assertAllClose(result, expected)
def test_exp_vectorization(self):
point = gs.array([[[2., 0., 0.],
[0., 3., 0.],
[0., 0., 4.]],
[[1., 0., 0.],
[0., 5., 0.],
[0., 0., 6.]]])
expected = gs.array([[[7.38905609, 0., 0.],
[0., 20.0855369, 0.],
[0., 0., 54.5981500]],
[[2.718281828, 0., 0.],
[0., 148.413159, 0.],
[0., 0., 403.42879349]]])
result = self.group.exp(point)
self.assertAllClose(result, expected, rtol=1e-3)
@geomstats.tests.np_and_tf_only
def test_log_vectorization(self):
point = gs.array([[[2., 0., 0.],
[0., 3., 0.],
[0., 0., 4.]],
[[1., 0., 0.],
[0., 5., 0.],
[0., 0., 6.]]])
expected = gs.array([[[0.693147180, 0., 0.],
[0., 1.09861228866, 0.],
[0., 0., 1.38629436]],
[[0., 0., 0.],
[0., 1.609437912, 0.],
[0., 0., 1.79175946]]])
result = self.group.log(point)
self.assertAllClose(result, expected, atol=1e-4)
@geomstats.tests.np_and_tf_only
def test_orbit(self):
point = gs.array([
[gs.exp(4.), 0.],
[0., gs.exp(2.)]])
sqrt = gs.array([
[gs.exp(2.), 0.],
[0., gs.exp(1.)]])
idty = GeneralLinear(2).identity
path = GeneralLinear(2).orbit(point)
time = gs.linspace(0., 1., 3)
result = path(time)
expected = gs.array([idty, sqrt, point])
self.assertAllClose(result, expected)
@geomstats.tests.np_and_tf_only
def test_expm_and_logm_vectorization_symmetric(self):
point = gs.array([[[2., 0., 0.],
[0., 3., 0.],
[0., 0., 4.]],
[[1., 0., 0.],
[0., 5., 0.],
[0., 0., 6.]]])
result = self.group.exp(self.group.log(point))
expected = point
self.assertAllClose(result, expected)
| 33.207865 | 70 | 0.48469 |
794004570de33bd832ba79b10400d6d31492a292 | 2,237 | py | Python | samples/demo_lcdscreen_move.py | PierreRust/apig_charlcd | 9b5a5881e9fd523f6b559a7570f9f6d0d9ff7e42 | [
"Apache-2.0"
] | 2 | 2015-10-14T10:40:40.000Z | 2016-01-04T04:43:55.000Z | samples/demo_lcdscreen_move.py | PierreRust/apig_charlcd | 9b5a5881e9fd523f6b559a7570f9f6d0d9ff7e42 | [
"Apache-2.0"
] | null | null | null | samples/demo_lcdscreen_move.py | PierreRust/apig_charlcd | 9b5a5881e9fd523f6b559a7570f9f6d0d9ff7e42 | [
"Apache-2.0"
] | null | null | null | import asyncio
from apig_charlcd import LcdScreen
import apigpio as apig
# Define GPIO to LCD mapping
LCD_RS = 22
LCD_E = 27
LCD_D4 = 19
LCD_D5 = 13
LCD_D6 = 6
LCD_D7 = 5
@asyncio.coroutine
def write_diag(lcd_screen):
yield from lcd_screen.move_to(0, 0)
yield from lcd_screen.write_char(ord('A'))
yield from lcd_screen.move_to(1, 1)
yield from lcd_screen.write_char(ord('B'))
yield from lcd_screen.move_to(2, 2)
yield from lcd_screen.write_char(ord('C'))
yield from lcd_screen.move_to(3, 3)
yield from lcd_screen.write_char(ord('D'))
@asyncio.coroutine
def write_at(lcd_screen):
yield from lcd_screen.write_at(0, 0, 'T-L')
yield from lcd_screen.write_at(17, 0, 'T-R')
yield from lcd_screen.write_at(0, 3, 'B-L')
yield from lcd_screen.write_at(17, 3, 'B-R')
@asyncio.coroutine
def write_move_home(lcd_screen):
# Write anywhere on the screen
yield from lcd_screen.move_to(10, 2)
yield from lcd_screen.write_char(0b01111111)
yield from lcd_screen.move_to(11, 2)
yield from lcd_screen.write_char(0b01111110)
# get back to 0,0 and write again
yield from lcd_screen.home()
yield from lcd_screen.write_char(ord('a'))
@asyncio.coroutine
def demo(pi, address):
yield from pi.connect(address)
lcd_screen = LcdScreen(pi, LCD_E, LCD_RS, LCD_D4, LCD_D5, LCD_D6, LCD_D7)
yield from lcd_screen.init()
while True:
yield from asyncio.sleep(1)
yield from write_diag(lcd_screen)
yield from asyncio.sleep(1)
yield from lcd_screen.clear()
yield from asyncio.sleep(1)
yield from write_move_home(lcd_screen)
yield from asyncio.sleep(1)
yield from lcd_screen.clear()
yield from asyncio.sleep(1)
yield from write_at(lcd_screen)
yield from asyncio.sleep(1)
yield from lcd_screen.clear()
yield from asyncio.sleep(1)
if __name__ == '__main__':
p = None
loop = asyncio.get_event_loop()
try:
p = apig.Pi(loop)
a = ('192.168.1.12', 8888)
loop.run_until_complete(demo(p, a))
except KeyboardInterrupt:
pass
finally:
if p is not None:
loop.run_until_complete(p.stop())
| 22.826531 | 77 | 0.669647 |
794004d91b39ebd16d6d81a93e05b50bccb20bbe | 2,183 | py | Python | imcsdk/mometa/aaa/AaaUserPolicy.py | kgrozis/UCS-CIMC-Scripts | 44069ee853299fe5aeed023e8c998ce2534b8d8b | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/aaa/AaaUserPolicy.py | kgrozis/UCS-CIMC-Scripts | 44069ee853299fe5aeed023e8c998ce2534b8d8b | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/aaa/AaaUserPolicy.py | kgrozis/UCS-CIMC-Scripts | 44069ee853299fe5aeed023e8c998ce2534b8d8b | [
"Apache-2.0"
] | null | null | null | """This module contains the general information for AaaUserPolicy ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import ImcVersion, MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class AaaUserPolicyConsts():
pass
class AaaUserPolicy(ManagedObject):
"""This is AaaUserPolicy class."""
consts = AaaUserPolicyConsts()
naming_props = set([])
mo_meta = MoMeta("AaaUserPolicy", "aaaUserPolicy", "policy", VersionMeta.Version209c, "InputOutput", 0x1f, [], ["admin", "user"], [u'aaaUserEp'], [], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version209c, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version209c, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"password_policy_rules": MoPropertyMeta("password_policy_rules", "passwordPolicyRules", "string", VersionMeta.Version209c, MoPropertyMeta.READ_ONLY, None, 0, 2500, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version209c, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version209c, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user_password_policy": MoPropertyMeta("user_password_policy", "userPasswordPolicy", "string", VersionMeta.Version209c, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["Disabled", "Enabled", "disabled", "enabled"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"passwordPolicyRules": "password_policy_rules",
"rn": "rn",
"status": "status",
"userPasswordPolicy": "user_password_policy",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.password_policy_rules = None
self.status = None
self.user_password_policy = None
ManagedObject.__init__(self, "AaaUserPolicy", parent_mo_or_dn, **kwargs)
| 46.446809 | 232 | 0.669262 |
794004e5fe370528dfa998dad206758c4046611d | 131,139 | py | Python | pygsti/report/workspace.py | drewrisinger/pyGSTi | dd4ad669931c7f75e026456470cf33ac5b682d0d | [
"Apache-2.0"
] | 1 | 2021-12-19T15:11:09.000Z | 2021-12-19T15:11:09.000Z | pygsti/report/workspace.py | drewrisinger/pyGSTi | dd4ad669931c7f75e026456470cf33ac5b682d0d | [
"Apache-2.0"
] | null | null | null | pygsti/report/workspace.py | drewrisinger/pyGSTi | dd4ad669931c7f75e026456470cf33ac5b682d0d | [
"Apache-2.0"
] | null | null | null | """ Defines the Workspace class and supporting functionality."""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import itertools as _itertools
import collections as _collections
import os as _os
import shutil as _shutil
import numpy as _np
#import uuid as _uuid
import random as _random
import inspect as _inspect
import pickle as _pickle
import subprocess as _subprocess
from .. import objects as _objs
from ..tools import compattools as _compat
from ..objects.smartcache import CustomDigestError as _CustomDigestError
from . import plotly_plot_ex as _plotly_ex
from . import merge_helpers as _merge
from pprint import pprint as _pprint
_PYGSTI_WORKSPACE_INITIALIZED = False
DEFAULT_PLOTLY_TEMPLATE = 'none'
def in_ipython_notebook():
"""Returns true if called from within an IPython/jupyter notebook"""
try:
# 'ZMQInteractiveShell' in a notebook, 'TerminalInteractiveShell' in IPython REPL, and fails elsewhere.
shell = get_ipython().__class__.__name__
return shell == 'ZMQInteractiveShell'
except NameError:
return False
def display_ipynb(content):
"""Render HTML content to an IPython notebook cell display"""
from IPython.core.display import display, HTML
display(HTML(content))
def enable_plotly_pickling():
"""
Hacks the plotly python library so that figures may be pickled and
un-pickled. This hack should be used only temporarily - so all pickling
and un-pickling should be done between calls to
:func:`enable_plotly_pickling` and :func:`disable_plotly_pickling`.
"""
import plotly
if int(plotly.__version__.split(".")[0]) >= 3: # plotly version 3 or higher
BLT = plotly.basedatatypes.BaseLayoutType
def fix_getattr(self, prop):
if '_subplotid_props' not in self.__dict__:
self._subplotid_props = set()
return self.__saved_getattr__(prop)
if hasattr(BLT, '__getattr__'):
BLT.__saved_getattr__ = BLT.__getattr__
del BLT.__getattr__
BLT.__getattr__ = fix_getattr
else:
def setitem(self, key, value, _raise=True):
"""Sets an item of a dict using the standard dict's __setitem__
to restore normal dict behavior"""
return dict.__setitem__(self, key, value)
plotlyDictClass = plotly.graph_objs.Figure.__bases__[0]
if hasattr(plotlyDictClass, '__setitem__'):
plotlyDictClass.__saved_setitem__ = plotlyDictClass.__setitem__
if hasattr(plotlyDictClass, '__getattr__'):
plotlyDictClass.__saved_getattr__ = plotlyDictClass.__getattr__
del plotlyDictClass.__getattr__
if hasattr(plotlyDictClass, '__setattr__'):
plotlyDictClass.__saved_setattr__ = plotlyDictClass.__setattr__
del plotlyDictClass.__setattr__
plotlyDictClass.__setitem__ = setitem
def disable_plotly_pickling():
""" Reverses the effect of :func:`enable_plotly_pickling` """
import plotly
if int(plotly.__version__.split(".")[0]) >= 3: # plotly version 3 or higher
BLT = plotly.basedatatypes.BaseLayoutType
if hasattr(BLT, '__saved_getattr__'):
BLT.__getattr__ = BLT.__saved_getattr__
del BLT.__saved_getattr__
else:
plotlyDictClass = plotly.graph_objs.Figure.__bases__[0]
if hasattr(plotlyDictClass, '__saved_setitem__'):
plotlyDictClass.__setitem__ = plotlyDictClass.__saved_setitem__
del plotlyDictClass.__saved_setitem__
if hasattr(plotlyDictClass, '__saved_getattr__'):
plotlyDictClass.__getattr__ = plotlyDictClass.__saved_getattr__
del plotlyDictClass.__saved_getattr__
if hasattr(plotlyDictClass, '__saved_setattr__'):
plotlyDictClass.__setattr__ = plotlyDictClass.__saved_setattr__
del plotlyDictClass.__saved_setattr__
def ws_custom_digest(md5, v):
""" A "digest" function for hashing several special types"""
if isinstance(v, NotApplicable):
md5.update("NOTAPPLICABLE".encode('utf-8'))
elif isinstance(v, SwitchValue):
md5.update(v.base.tostring()) # don't recurse to parent switchboard
else:
raise _CustomDigestError()
def randomID():
""" Returns a random DOM ID """
return str(int(1000000 * _random.random()))
#return str(_uuid.uuid4().hex) #alternative
class Workspace(object):
"""
Central to data analysis, Workspace objects facilitate the building
of reports and dashboards. In particular, they serve as a:
- factory for tables, plots, and other types of output
- cache manager to optimize the construction of such output
- serialization manager for saving and loading analysis variables
Workspace objects are typically used either 1) within an ipython
notebook to interactively build a report/dashboard, or 2) within
a script to build a hardcoded ("fixed") report/dashboard.
"""
def __init__(self, cachefile=None):
"""
Initialize a Workspace object.
Parameters
----------
cachefile : str, optional
filename with cached workspace results
"""
self._register_components(False)
self.smartCache = _objs.SmartCache()
if cachefile is not None:
self.load_cache(cachefile)
self.smartCache.add_digest(ws_custom_digest)
def save_cache(self, cachefile, showUnpickled=False):
"""
Save this Workspace's cache to a file.
Parameters
----------
cachefile : str
The filename to save the cache to.
showUnpickled : bool, optional
Whether to print quantities (keys) of cache that could not be
saved because they were not pickle-able.
Returns
-------
None
"""
with open(cachefile, 'wb') as outfile:
enable_plotly_pickling()
_pickle.dump(self.smartCache, outfile)
disable_plotly_pickling()
if showUnpickled:
print('Unpickled keys:')
_pprint(self.smartCache.unpickleable)
def load_cache(self, cachefile):
"""
Load this Workspace's cache from `cachefile`.
Parameters
----------
cachefile : str
The filename to load the cache from.
Returns
-------
None
"""
with open(cachefile, 'rb') as infile:
enable_plotly_pickling()
oldCache = _pickle.load(infile).cache
disable_plotly_pickling()
for v in oldCache.values():
if isinstance(v, WorkspaceOutput): # hasattr(v,'ws') == True for plotly dicts (why?)
print('Updated {} object to set ws to self'.format(type(v)))
v.ws = self
self.smartCache.cache.update(oldCache)
def __getstate__(self):
return {'smartCache': self.smartCache}
def __setstate__(self, state_dict):
self._register_components(False)
self.smartCache = state_dict['smartCache']
def _makefactory(self, cls, autodisplay): # , printer=_objs.VerbosityPrinter(1)):
# XXX this indirection is so wild -- can we please rewrite directly?
#Manipulate argument list of cls.__init__
argspec = _inspect.getargspec(cls.__init__)
argnames = argspec[0]
assert(argnames[0] == 'self' and argnames[1] == 'ws'), \
"__init__ must begin with (self, ws, ...)"
factoryfn_argnames = argnames[2:] # strip off self & ws args
newargspec = (factoryfn_argnames,) + argspec[1:]
#Define a new factory function with appropriate signature
signature = _inspect.formatargspec(
formatvalue=lambda val: "", *newargspec)
signature = signature[1:-1] # strip off parenthesis from ends of "(signature)"
if autodisplay:
factory_func_def = (
'def factoryfn(%(signature)s):\n'
' ret = cls(self, %(signature)s); ret.display(); return ret' %
{'signature': signature})
else:
factory_func_def = (
'def factoryfn(%(signature)s):\n'
' return cls(self, %(signature)s)' %
{'signature': signature})
#print("FACTORY FN DEF = \n",new_func)
exec_globals = {'cls': cls, 'self': self}
exec(factory_func_def, exec_globals)
factoryfn = exec_globals['factoryfn']
#Copy cls.__init__ info over to factory function
factoryfn.__name__ = cls.__init__.__name__
factoryfn.__doc__ = cls.__init__.__doc__
factoryfn.__module__ = cls.__init__.__module__
factoryfn.__dict__ = cls.__init__.__dict__
factoryfn.__defaults__ = cls.__init__.__defaults__
return factoryfn
def _register_components(self, autodisplay):
# "register" components
from . import workspacetables as _wt
from . import workspaceplots as _wp
from . import workspacetexts as _wtxt
def makefactory(cls): return self._makefactory(cls, autodisplay)
self.Switchboard = makefactory(Switchboard)
self.NotApplicable = makefactory(NotApplicable)
#Tables
# Circuits
self.CircuitTable = makefactory(_wt.CircuitTable)
# Spam & Gates
self.SpamTable = makefactory(_wt.SpamTable)
self.SpamParametersTable = makefactory(_wt.SpamParametersTable)
self.GatesTable = makefactory(_wt.GatesTable)
self.ChoiTable = makefactory(_wt.ChoiTable)
# Spam & Gates vs. a target
self.SpamVsTargetTable = makefactory(_wt.SpamVsTargetTable)
self.ModelVsTargetTable = makefactory(_wt.ModelVsTargetTable)
self.GatesVsTargetTable = makefactory(_wt.GatesVsTargetTable)
self.GatesSingleMetricTable = makefactory(_wt.GatesSingleMetricTable)
self.GateEigenvalueTable = makefactory(_wt.GateEigenvalueTable)
self.ErrgenTable = makefactory(_wt.ErrgenTable)
self.GaugeRobustErrgenTable = makefactory(_wt.GaugeRobustErrgenTable)
self.GaugeRobustModelTable = makefactory(_wt.GaugeRobustModelTable)
self.GaugeRobustMetricTable = makefactory(_wt.GaugeRobustMetricTable)
self.NQubitErrgenTable = makefactory(_wt.NQubitErrgenTable)
self.StandardErrgenTable = makefactory(_wt.StandardErrgenTable)
# Specific to 1Q gates
self.GateDecompTable = makefactory(_wt.GateDecompTable)
self.old_GateDecompTable = makefactory(_wt.old_GateDecompTable)
self.old_RotationAxisVsTargetTable = makefactory(_wt.old_RotationAxisVsTargetTable)
self.old_RotationAxisTable = makefactory(_wt.old_RotationAxisTable)
# goodness of fit
self.FitComparisonTable = makefactory(_wt.FitComparisonTable)
self.WildcardBudgetTable = makefactory(_wt.WildcardBudgetTable)
#Specifically designed for reports
self.BlankTable = makefactory(_wt.BlankTable)
self.DataSetOverviewTable = makefactory(_wt.DataSetOverviewTable)
self.GaugeOptParamsTable = makefactory(_wt.GaugeOptParamsTable)
self.MetadataTable = makefactory(_wt.MetadataTable)
self.SoftwareEnvTable = makefactory(_wt.SoftwareEnvTable)
self.ProfilerTable = makefactory(_wt.ProfilerTable)
self.ExampleTable = makefactory(_wt.ExampleTable)
#Plots
self.ColorBoxPlot = makefactory(_wp.ColorBoxPlot)
self.BoxKeyPlot = makefactory(_wp.BoxKeyPlot)
self.MatrixPlot = makefactory(_wp.MatrixPlot)
self.GateMatrixPlot = makefactory(_wp.GateMatrixPlot)
self.PolarEigenvaluePlot = makefactory(_wp.PolarEigenvaluePlot)
self.ProjectionsBoxPlot = makefactory(_wp.ProjectionsBoxPlot)
self.ChoiEigenvalueBarPlot = makefactory(_wp.ChoiEigenvalueBarPlot)
self.GramMatrixBarPlot = makefactory(_wp.GramMatrixBarPlot)
self.FitComparisonBarPlot = makefactory(_wp.FitComparisonBarPlot)
self.FitComparisonBoxPlot = makefactory(_wp.FitComparisonBoxPlot)
self.DatasetComparisonHistogramPlot = makefactory(_wp.DatasetComparisonHistogramPlot)
self.DatasetComparisonSummaryPlot = makefactory(_wp.DatasetComparisonSummaryPlot)
self.RandomizedBenchmarkingPlot = makefactory(_wp.RandomizedBenchmarkingPlot)
#Text blocks
self.StdoutText = makefactory(_wtxt.StdoutText)
#Extras
from ..extras import idletomography as _idt
self.IdleTomographyIntrinsicErrorsTable = makefactory(_idt.IdleTomographyIntrinsicErrorsTable)
self.IdleTomographyObservedRatePlot = makefactory(_idt.IdleTomographyObservedRatePlot)
self.IdleTomographyObservedRatesTable = makefactory(_idt.IdleTomographyObservedRatesTable)
self.IdleTomographyObservedRatesForIntrinsicRateTable = makefactory(
_idt.IdleTomographyObservedRatesForIntrinsicRateTable)
from ..extras.drift import driftreport as _driftrpt
self.DriftSummaryTable = makefactory(_driftrpt.DriftSummaryTable)
self.DriftDetailsTable = makefactory(_driftrpt.DriftDetailsTable)
self.PowerSpectraPlot = makefactory(_driftrpt.PowerSpectraPlot)
self.ProbTrajectoriesPlot = makefactory(_driftrpt.ProbTrajectoriesPlot)
self.GermFiducialProbTrajectoriesPlot = makefactory(_driftrpt.GermFiducialProbTrajectoriesPlot)
self.GermFiducialPowerSpectraPlot = makefactory(_driftrpt.GermFiducialPowerSpectraPlot)
def init_notebook_mode(self, connected=False, autodisplay=False):
"""
Initialize this Workspace for use in an iPython notebook environment.
This function should be called prior to using the Workspace when
working within an iPython notebook.
Parameters
----------
connected : bool (optional)
Whether to assume you are connected to the internet. If you are,
then setting this to `True` allows initialization to rely on web-
hosted resources which will reduce the overall size of your
notebook.
autodisplay : bool (optional)
Whether to automatically display workspace objects after they are
created.
Returns
-------
None
"""
if not in_ipython_notebook():
raise ValueError('Only run `init_notebook_mode` from inside an IPython Notebook.')
global _PYGSTI_WORKSPACE_INITIALIZED
script = ""
if not connected:
_merge.rsync_offline_dir(_os.getcwd())
#If offline, add JS to head that will load local requireJS and/or
# jQuery if needed (jupyter-exported html files always use CDN
# for these).
if not connected:
script += "<script src='offline/jupyterlibload.js'></script>\n"
#Load our custom plotly extension functions
script += _merge.insert_resource(connected, None, "pygsti_plotly_ex.js")
script += "<script type='text/javascript'> window.plotman = new PlotManager(); </script>"
# Load style sheets for displaying tables
script += _merge.insert_resource(connected, None, "pygsti_dataviz.css")
#jQueryUI_CSS = "https://code.jquery.com/ui/1.12.1/themes/base/jquery-ui.css"
jQueryUI_CSS = "https://code.jquery.com/ui/1.12.1/themes/smoothness/jquery-ui.css"
script += _merge.insert_resource(connected, jQueryUI_CSS, "smoothness-jquery-ui.css")
#To fix the UI tooltips within Jupyter (b/c they use an old/custom JQueryUI css file)
if connected:
imgURL = "https://code.jquery.com/ui/1.12.1/themes/smoothness/images/ui-icons_222222_256x240.png"
else:
imgURL = "offline/images/ui-icons_222222_256x240.png"
script += "<style>\n" + \
".tooltipbuttons .ui-button { padding: 0; border: 0; background: transparent; }\n" + \
".tooltipbuttons .ui-icon { background-image: url(\"%s\"); margin-top: 0; }\n" % imgURL + \
"</style>"
# Note: within a jupyter notebook, the requireJS base path appears
# to be "/static", so just setting the path to "offline/myfile"
# would attempt to load "/static/offline/myfile.js" which points
# somewhere like .../site-packages/notebook/static/offline/myfile".
# So:
# - when in a notebook, the path needs to be "../files" followed
# by the notebook's path, which we can obtain via the notebook JS
# object.
# - when *not* in a notebook, the requireJS base defaults to the
# current page, so just using "offline/myfile" works fine then.
#Tell require.js where jQueryUI and Katex are
if connected:
reqscript = (
"<script>"
"console.log('ONLINE - using CDN paths');"
"requirejs.config({{ "
" paths: {{ 'jquery-UI': ['{jqueryui}'],"
" 'katex': ['{katex}'],"
" 'autorender': ['{auto}'] }},"
"}});"
"require(['jquery', 'jquery-UI'],function($,ui) {{"
" window.jQueryUI=ui; console.log('jquery-UI loaded'); }});"
"require(['katex', 'autorender'],function(katex,auto) {{"
" window.katex=katex; console.log('Katex loaded'); }});"
"</script>"
).format(jqueryui="https://code.jquery.com/ui/1.12.1/jquery-ui.min",
katex="https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.7.1/katex.min.js",
auto="https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.7.1/contrib/auto-render.min.js")
else:
reqscript = (
"<script>"
"var pth;"
"if(typeof IPython !== 'undefined') {{"
" var nb = IPython.notebook;"
" var relpath = nb.notebook_path.substr(0, nb.notebook_path.lastIndexOf('/') + 1 );"
" jqueryui_pth = '../files' + nb.base_url + relpath + '{jqueryui}';"
" katex_pth = '../files' + nb.base_url + relpath + '{katex}';"
" auto_pth = '../files' + nb.base_url + relpath + '{auto}';"
" console.log('IPYTHON DETECTED - using path ' + jqueryui_pth);"
"}}"
"else {{"
" jqueryui_pth = '{jqueryui}';"
" katex_pth = '{katex}';"
" auto_pth = '{auto}';"
" console.log('NO IPYTHON DETECTED - using path ' + jqueryui_pth);"
"}}"
"requirejs.config({{ "
" paths: {{ 'jquery-UI': [jqueryui_pth], 'katex': [katex_pth], 'autorender': [auto_pth] }},"
"}});"
"require(['jquery', 'jquery-UI'],function($,ui) {{"
" window.jQueryUI=ui; console.log('jquery & jquery-UI loaded'); }});"
"require(['katex', 'autorender'],function(katex,auto) {{"
" window.katex=katex; console.log('Katex loaded'); }});"
"</script>"
).format(jqueryui="offline/jquery-ui.min",
katex="offline/katex.min",
auto="offline/auto-render.min")
script += reqscript
#Initialize Katex as a fallback if MathJax is unavailable (offline), OR,
# if MathJax is loaded, wait for plotly to load before rendering SVG text
# so math shows up properly in plots (maybe we could just use a require
# statement for this instead of polling?)
script += _merge.insert_resource(
connected, "https://cdnjs.cloudflare.com/ajax/libs/KaTeX/0.7.1/katex.min.css",
"katex.css")
script += (
"\n<script>"
"require(['jquery','katex','autorender'],function($,katex,renderMathInElement) {\n"
" var mathjaxTimer = setInterval( function() {\n"
" if(document.readyState === 'complete' || document.readyState === 'loaded') {\n"
" clearInterval(mathjaxTimer);\n"
" if(typeof MathJax === 'undefined') {\n"
" console.log('MATHJAX not found - attempting to typeset with Katex');\n"
" renderMathInElement(document.body, { delimiters: [\n"
" {left: '$$', right: '$$', display: true},\n"
" {left: '$', right: '$', display: false},\n"
" ] } );\n"
" }\n"
" else { //Mathjax is alive - wait for plotly\n"
" var waitForPlotly = setInterval( function() {\n"
" if( typeof(window.Plotly) !== 'undefined' ){\n"
" MathJax.Hub.Config({ SVG: { font: 'STIX-Web' }, displayAlign: 'center' });\n"
" MathJax.Hub.Queue(['setRenderer', MathJax.Hub, 'SVG']);\n"
" clearInterval(waitForPlotly);\n"
" }\n"
" }, 500 );\n"
" }\n"
" } //end readyState check \n"
" }, 500); //end setInterval \n"
"});\n"
'</script>\n')
# Initialize Plotly libraries
script += _plotly_ex.init_notebook_mode_ex(connected)
# Perform check to see what has been loaded
script += (
"<div id='notebook_load_status' style='font-style:italic;color:blue'>Loading...</div>\n"
"<script type='text/javascript'>\n"
" require(['jquery','jquery-UI','plotly','katex', 'autorender'],\n"
" function($,ui,Plotly,katex,auto) {\n"
" $(document).ready( function() {\n"
" var txt = '';\n"
" if( typeof($('#notebook_load_status').resizable) === 'undefined') {\n"
" txt += '<span class=\"failmsg\">JQueryUI not loaded correctly</span><br>';\n"
" }\n"
" if( typeof(Plotly.newPlot) === 'undefined') {\n"
" txt += '<span class=\"failmsg\">Plotly not loaded correctly</span><br>';\n"
" }\n"
" if(txt.length == 0) {\n"
" txt += '<span class=\"successmsg\">Notebook Initialization Complete</span>';\n"
" if( typeof MathJax !== 'undefined') {\n"
" txt += '<span class=\"successmsg2\"> (+MathJax)</span>';\n"
" } else {\n"
" txt += '<span class=\"successmsg2\"> (+KaTeX)</span>';\n"
" }\n"
" }\n"
" $('#notebook_load_status').html(txt);\n"
" }); });\n"
"</script>\n")
display_ipynb(script) # single call to display keeps things simple
_PYGSTI_WORKSPACE_INITIALIZED = True
self._register_components(autodisplay)
return
def switchedCompute(self, fn, *args):
"""
Computes a function, given its name and arguments, when some or all of
those arguments are SwitchedValue objects.
Caching is employed to avoid duplicating function evaluations which have
the same arguments. Note that the function itself doesn't need to deal
with SwitchValue objects, as this routine resolves such objects into a
series of function evaluations using the underlying value(s) within the
SwitchValue. This routine is primarily used internally for the
computation of tables and plots.
if any of the arguments is an instance of `NotApplicable` then `fn`
is *not* evaluated and the instance is returned as the evaluation
result. If multiple arguments are `NotApplicable` instances, the
first is used as the result.
Parameters
----------
fn : function
The function to evaluate
args : list
The function's arguments
Returns
-------
fn_values : list
The function return values for all relevant sets of arguments.
Denote the length of this list by N.
switchboards : list
A list of all the relevant Switchboards used during the function
evaluation. Denote the length of this list by M.
switchboard_switch_indices : list
A list of length M whose elements are tuples containing the 0-based
indices of the relevant switches (i.e. those used by any of the
arguments) for each switchboard (element of `switchboards`).
switchpos_map : dict
A dictionary whose keys are switch positions, and whose values are
integers between 0 and N which index the element of `fn_values`
corresponding to the given switch positions. Each
"switch positions" key is a tuple of length M whose elements (one
per switchboard) are tuples of 0-based switch-position indices
indicating the position of the relevant switches of that
switchboard. Thus,
`len(key[i]) = len(switchboard_switch_indices[i])`, where `key`
is a dictionary key.
"""
# Computation functions get stripped-down *value* args
# (strip SwitchedValue stuff away)
switchboards = []
switchBdInfo = []
nonSwitchedArgs = []
switchpos_map = {}
storedKeys = {}
resultValues = []
for i, arg in enumerate(args):
if isinstance(arg, SwitchValue):
isb = None
for j, sb in enumerate(switchboards):
if arg.parent is sb:
isb = j; break
else:
isb = len(switchboards)
switchboards.append(arg.parent)
switchBdInfo.append({
'argument indices': [], # indices of arguments that are children of this switchboard
'value names': [], # names of switchboard value correspond to each argument index
'switch indices': set() # indices of the switches that are actually used by the args
})
assert(isb is not None)
info = switchBdInfo[isb]
info['argument indices'].append(i)
info['value names'].append(arg.name)
info['switch indices'].update(arg.dependencies)
else:
nonSwitchedArgs.append((i, arg))
#print("DB: %d arguments" % len(args))
#print("DB: found %d switchboards" % len(switchboards))
#print("DB: switchBdInfo = ", switchBdInfo)
#print("DB: nonSwitchedArgs = ", nonSwitchedArgs)
#Create a list of lists, each list holding all of the relevant switch positions for each board
switch_positions = []
for isb, sb in enumerate(switchboards):
info = switchBdInfo[isb]
info['switch indices'] = list(info['switch indices']) # set -> list so definite order
switch_ranges = [list(range(len(sb.positionLabels[i])))
for i in info['switch indices']]
sb_switch_positions = list(_itertools.product(*switch_ranges))
# a list of all possible positions for the switches being
# used for the *single* board sb
switch_positions.append(sb_switch_positions)
#loop over all relevant switch configurations (across multiple switchboards)
for pos in _itertools.product(*switch_positions):
# pos[i] gives the switch configuration for the i-th switchboard
#fill in the arguments for our function call
argVals = [None] * len(args)
#first, iterate over all the switchboards
for sw_pos, sb, info in zip(pos, switchboards, switchBdInfo):
# sw_pos is a tuple of the info['switch indices'] switch positions for sb
sis = info['switch indices']
for nm, j in zip(info["value names"], info["argument indices"]):
value_swpos = [sw_pos[sis.index(k)] for k in sb[nm].dependencies]
# potentially a subset of sw_pos, contains only the switch positions
# relevant to the particular SwitchedValue named nm (also the j-th argument)
argVals[j] = sb[nm][tuple(value_swpos)] # tuple needed for proper indexing
#next, fill in the non-switched arguments
for j, arg in nonSwitchedArgs:
argVals[j] = arg
for v in argVals:
if isinstance(v, NotApplicable):
key = "NA"; result = v; break
else:
key, result = self.smartCache.cached_compute(fn, argVals)
if key not in storedKeys or key == 'INEFFECTIVE':
switchpos_map[pos] = len(resultValues)
storedKeys[key] = len(resultValues)
resultValues.append(result)
else:
switchpos_map[pos] = storedKeys[key]
switchboard_switch_indices = [info['switch indices'] for info in switchBdInfo]
return resultValues, switchboards, switchboard_switch_indices, switchpos_map
class Switchboard(_collections.OrderedDict):
"""
Encapsulates a render-able set of user-interactive switches
for controlling visualized output.
Outwardly a Switchboard looks like a dictionary of SwitchValue
objects, which in turn look like appropriately sized numpy arrays
of values for some quantity. Different switch positions select
different values and thereby what data is visualized in various
outputs (e.g. tables and plots).
"""
def __init__(self, ws, switches, positions, types, initial_pos=None,
descriptions=None, show="all", ID=None, use_loadable_items=False):
"""
Create a new Switchboard.
Parameters
----------
switches : list
A list of switch names. The length of this list is
the number of switches.
positions : list
Elements are lists of position labels, one per switch.
Length must be equal to `len(switches)`.
types : list of {'buttons','dropdown','slider','numslider'}
A list of switch-type strings specifying what type of switch
each switch is.
- 'buttons': a set of toggle buttons
- 'dropdown': a drop-down (or combo-box)
- 'slider': a horizontal slider (equally spaced items)
- 'numslider': a horizontal slider (spaced by numeric value)
initial_pos : list or None (optional)
A list of 0-based integer indices giving the initial
position of each of the `len(switches)` switches. None
defaults to the first (0-th) position for each switch.
descriptions : list (optional)
A string description for each of the `len(switches)` switches.
show : list (optional)
A list of boolean (one for each of the `len(switches)` switches)
indicating whether or not that switch should be rendered. The
special values "all" and "none" show all or none of the switches,
respectively.
ID : str (optional)
A DOM identifier to use when rendering this Switchboard to HTML.
Usually leaving this value as `None` is best, in which case a
random identifier is created.
"""
# Note: intentionally leave off ws argument desc. in docstring
assert(len(switches) == len(positions))
self.ID = randomID() if (ID is None) else ID
self.ws = ws # Workspace
self.switchNames = switches
self.switchTypes = types
self.switchIDs = ["switchbd%s_%d" % (self.ID, i)
for i in range(len(switches))]
self.positionLabels = positions
self.use_loadable_items = use_loadable_items
if initial_pos is None:
self.initialPositions = _np.array([0] * len(switches), _np.int64)
else:
assert(len(initial_pos) == len(switches))
self.initialPositions = _np.array(initial_pos, _np.int64)
self.descriptions = descriptions
if show == "all":
self.show = [True] * len(switches)
elif show == "none":
self.show = [False] * len(switches)
else:
assert(len(show) == len(switches))
self.show = show
self.widget = None
super(Switchboard, self).__init__([])
def add(self, varname, dependencies):
"""
Adds a new switched-value to this Switchboard.
Parameters
----------
varname : str
A name for the variable being added. This name will be used to
access the new variable (as either a dictionary key or as an
object member).
dependencies : list or tuple
The (0-based) switch-indices specifying which switch positions
the new variable is dependent on. For example, if the Switchboard
has two switches, one for "amplitude" and one for "frequency", and
this value is only dependent on frequency, then `dependencies`
should be set to `(1,)` or `[1]`.
Returns
-------
None
"""
super(Switchboard, self).__setitem__(varname, SwitchValue(self, varname, dependencies))
def add_unswitched(self, varname, value):
"""
Adds a new non-switched-value to this Switchboard.
This can be convenient for attaching related non-switched data to
a :class:`Switchboard`.
Parameters
----------
varname : str
A name for the variable being added. This name will be used to
access the new variable (as either a dictionary key or as an
object member).
value : object
The un-switched value to associate with `varname`.
Returns
-------
None
"""
super(Switchboard, self).__setitem__(varname, value)
def __setitem__(self, key, val):
raise KeyError("Use add(...) to add an item to this swichboard")
def render(self, typ="html"):
"""
Render this Switchboard into the requested format.
The returned string(s) are intended to be used to embedded a
visualization of this object within a larger document.
Parameters
----------
typ : {"html"}
The format to render as. Currently only HTML is supported.
Returns
-------
dict
A dictionary of strings whose keys indicate which portion of
the embeddable output the value is. Keys will vary for different
`typ`. For `"html"`, keys are `"html"` and `"js"` for HTML and
and Javascript code, respectively.
"""
return self._render_base(typ, None, self.show)
def _render_base(self, typ, view_suffix, show):
"""
Break off this implementation so SwitchboardViews can use.
"""
assert(typ == "html"), "Can't render Switchboards as anything but HTML"
switch_html = []; switch_js = []
for name, baseID, styp, posLbls, ipos, bShow in zip(
self.switchNames, self.switchIDs, self.switchTypes,
self.positionLabels, self.initialPositions, show):
ID = (baseID + view_suffix) if view_suffix else baseID
style = "" if bShow else " style='display: none'"
if styp == "buttons":
html = "<div class='switch_container'%s>\n" % style
html += "<fieldset id='%s'>\n" % ID
if name:
html += "<legend>%s: </legend>\n" % name
for k, lbl in enumerate(posLbls):
checked = " checked='checked'" if k == ipos else ""
html += "<label for='%s-%d'>%s</label>\n" % (ID, k, lbl)
html += "<input type='radio' name='%s' id='%s-%d' value=%d%s>\n" \
% (ID, ID, k, k, checked)
html += "</fieldset></div>\n"
js = " $('#%s > input').checkboxradio({ icon: false });" % ID
if view_suffix:
js += "\n".join((
"function connect_%s_to_base(){" % ID,
" if( $('#%s').hasClass('initializedSwitch') ) {" % baseID, # "if base switch is ready"
" $('#%s').on('change', function(event, ui) {" % baseID,
" var v = $(\"#%s > input[name='%s']:checked\").val();" % (baseID, baseID),
" var el = $(\"#%s > input[name='%s'][value=\" + v + \"]\");" % (ID, ID),
" if( el.is(':checked') == false ) { ",
" el.click();",
" }",
" });"
" $('#%s').on('change', function(event, ui) {" % ID,
" var v = $(\"#%s > input[name='%s']:checked\").val();" % (ID, ID),
" var el = $(\"#%s > input[name='%s'][value=\" + v + \"]\");" % (baseID, baseID),
" if( el.is(':checked') == false ) { ",
" el.click();",
" }",
" });",
" $('#%s').trigger('change');" % baseID,
" }",
" else {", # need to wait for base switch
" setTimeout(connect_%s_to_base, 500);" % ID,
" console.log('%s base NOT initialized: Waiting...');" % ID,
" }",
"};",
"connect_%s_to_base();" % ID # start trying to connect
))
elif styp == "dropdown":
html = "<div class='switch_container'%s><fieldset>\n" % style
if name:
html += "<label for='%s'>%s</label>\n" % (ID, name)
html += "<select name='%s' id='%s'>\n" % (ID, ID)
for k, lbl in enumerate(posLbls):
selected = " selected='selected'" if k == ipos else ""
html += "<option value=%d%s>%s</option>\n" % (k, selected, lbl)
html += "</select>\n</fieldset></div>\n"
js = " $('#%s').selectmenu();" % ID
if view_suffix:
js += "\n".join((
"function connect_%s_to_base(){" % ID,
" if( $('#%s').hasClass('initializedSwitch') ) {" % baseID, # "if base switch is ready"
" $('#%s').on('selectmenuchange', function(event, ui) {" % baseID,
" var v = $('#%s').val();" % baseID,
" var el = $('#%s');" % ID,
" if( el.val() != v ) { ",
" el.val(v).selectmenu('refresh');",
" }",
" });"
" $('#%s').on('selectmenuchange', function(event, ui) {" % ID,
" var v = $('#%s').val();" % ID,
" var el = $('#%s');" % baseID,
" if( el.val() != v ) { ",
" el.val(v).selectmenu('refresh').trigger('selectmenuchange');",
" }",
" });",
" $('#%s').trigger('selectmenuchange');" % baseID,
" console.log('%s connected to base');\n" % ID,
" }",
" else {", # need to wait for base switch
" setTimeout(connect_%s_to_base, 500);" % ID,
" console.log('%s base NOT initialized: Waiting...');" % ID,
" }",
"};",
"connect_%s_to_base();" % ID # start trying to connect
))
elif styp == "slider" or styp == "numslider":
if styp == "numslider":
float_vals = list(map(float, posLbls))
m, M = min(float_vals), max(float_vals)
else:
float_vals = list(range(len(posLbls)))
m, M = 0, len(posLbls) - 1
#ml = max(list(map(len,posLbls)))
w = 3.0 # 1.0*ml
html = "<div id='%s-container' class='switch_container'%s>\n" \
% (ID, style)
html += "<fieldset>\n"
if name:
html += "<label for='%s' class='pygsti-slider-label'>%s</label>\n" % (ID, name)
html += "<div name='%s' id='%s'>\n" % (ID, ID)
html += "<div id='%s-handle' class='ui-slider-handle'></div>" % ID
html += "</div>\n</fieldset></div>\n"
# " $('#%s-container').css({'margin-top':'%fem'});" % (ID,1.7/2),
js = ""
if view_suffix is None:
js = "var %s_float_values = [" % ID + \
",".join(map(str, float_vals)) + "];\n"
js += "var %s_str_values = [" % ID + \
",".join(["'%s'" % s for s in posLbls]) + "];\n"
js += "window.%s_float_values = %s_float_values;\n" % (ID, ID) # ensure declared globally
js += "window.%s_str_values = %s_str_values;\n" % (ID, ID) # ensure declared globally
js += "\n".join((
"function findNearest_%s(includeLeft, includeRight, value) {" % ID,
" var nearest = null;",
" var diff = null;",
" for (var i = 0; i < %s_float_values.length; i++) {" % ID,
" if ((includeLeft && %s_float_values[i] <= value) ||" % ID,
" (includeRight && %s_float_values[i] >= value)) {" % ID,
" var newDiff = Math.abs(value - %s_float_values[i]);" % ID,
" if (diff == null || newDiff < diff) {",
" nearest = i;",
" diff = newDiff;",
" }",
" }",
" }",
" return nearest;",
"}",
"window.findNearest_%s = findNearest_%s;\n" % (ID, ID)))
#allow ipos = something (e.g. -1) when there aren't any position labels
if len(posLbls) == 0:
float_val = 0.0; posLabel = "--"
else:
float_val = float_vals[ipos]
posLabel = posLbls[ipos]
js += "\n".join((
" $('#%s').slider({" % ID,
" orientation: 'horizontal', range: false,",
" min: %f, max: %f, step: %f," % (m, M, (M - m) / 100.0),
" value: %f," % float_val,
" create: function() {",
" $('#%s-handle').text('%s');" % (ID, posLabel),
" $('#%s-handle').css({'width':'%fem','height':'%fem'});" % (ID, w, 1.7),
" $('#%s-handle').css({'margin-left':'%fem','top':'%fem'});" % (ID, -w / 2, -1.7 / 2 + 0.4),
" $('#%s-handle').css({'text-align':'center','line-height':'1.5em'});" % ID,
" $('#%s').css({'margin-left':'%fem', 'margin-top':'0.4em'});" % (ID, w / 2),
" },",
" slide: function(event, ui) {",
" var includeLeft = event.keyCode != $.ui.keyCode.RIGHT;",
" var includeRight = event.keyCode != $.ui.keyCode.LEFT;",
" var iValue = findNearest_%s(includeLeft, includeRight, ui.value);" % baseID,
" if($('#%s').slider('value') != %s_float_values[iValue]) {" % (ID, baseID),
" $('#%s-handle').text(%s_str_values[iValue]);" % (baseID, baseID),
" $('#%s').slider('value', %s_float_values[iValue]);" % (baseID, baseID),
" }"
" return false;"
" },",
" });",
))
if view_suffix:
# slide events always change *base* (non-view) slider (see above),
# which causes a change event to fire. Views handle this event
# to update their own slider values.
js += "\n".join((
"function connect_%s_to_base(){" % ID,
" if( $('#%s').hasClass('initializedSwitch') ) {" % baseID, # "if base switch is ready"
" $('#%s').on('slidechange', function(event, ui) {" % baseID,
" $('#%s').slider('value', ui.value);" % ID,
" $('#%s-handle').text( $('#%s-handle').text() );" % (ID, baseID),
" });",
" var mock_ui = { value: $('#%s').slider('value') };" % baseID, # b/c handler uses ui.value
" $('#%s').trigger('slidechange', mock_ui);" % baseID,
" }",
" else {", # need to wait for base switch
" setTimeout(connect_%s_to_base, 500);" % ID,
" console.log('%s base NOT initialized: Waiting...');" % ID,
" }",
"};",
"connect_%s_to_base();" % ID # start trying to connect
))
else:
raise ValueError("Unknown switch type: %s" % styp)
js += "$('#%s').addClass('initializedSwitch');\n" % ID
switch_html.append(html)
switch_js.append(js)
html = "\n".join(switch_html)
if not self.use_loadable_items: # run JS as soon as the document is ready
js = "$(document).ready(function() {\n" + \
"\n".join(switch_js) + "\n});"
else: # in a report, where we have a 'loadable' parent and might not want to load right away
js = "$(document).ready(function() {\n" + \
"$('#%s').closest('.loadable').on('load_loadable_item', function(){\n" % ID + \
"\n".join(switch_js) + "\n}); });"
return {'html': html, 'js': js}
def get_switch_change_handlerjs(self, switchIndex):
"""
Returns the Javascript needed to begin an on-change handler
for a particular switch.
Parameters
----------
switchIndex : int
The 0-based index of which switch to get handler JS for.
Returns
-------
str
"""
ID = self.switchIDs[switchIndex]
typ = self.switchTypes[switchIndex]
if typ == "buttons":
return "$('#%s').on('change', function() {" % ID
elif typ == "dropdown":
return "$('#%s').on('selectmenuchange', function() {" % ID
elif typ == "slider" or typ == "numslider":
return "$('#%s').on('slidechange', function() {" % ID # only when slider stops
#return "$('#%s').on('slide', function() {" % ID # continuous on
# mouse move - but doesn't respond correctly to arrows, so seems
# better to use 'slidechange'
else:
raise ValueError("Unknown switch type: %s" % typ)
def get_switch_valuejs(self, switchIndex):
"""
Returns the Javascript needed to get the value of a particular switch.
Parameters
----------
switchIndex : int
The 0-based index of which switch to get value-extracting JS for.
Returns
-------
str
"""
ID = self.switchIDs[switchIndex]
typ = self.switchTypes[switchIndex]
if typ == "buttons":
return "$(\"#%s > input[name='%s']:checked\").val()" % (ID, ID)
elif typ == "dropdown":
return "$('#%s').val()" % ID
elif typ == "slider" or typ == "numslider":
#return "%s_float_values.indexOf($('#%s').slider('option', 'value'))" % (ID,ID)
return "findNearest_%s(true,true,$('#%s').slider('option', 'value'))" % (ID, ID)
else:
raise ValueError("Unknown switch type: %s" % typ)
def display(self):
"""
Display this switchboard within an iPython notebook.
Calling this function requires that you are in an
iPython environment, and really only makes sense
within a notebook.
Returns
-------
None
"""
if not in_ipython_notebook():
raise ValueError('Only run `display` from inside an IPython Notebook.')
#if self.widget is None:
# self.widget = _widgets.HTMLMath(value="?",
# placeholder='Switch HTML',
# description='Switch HTML',
# disabled=False)
out = self.render("html")
content = "<script>\n" + \
"require(['jquery','jquery-UI'],function($,ui) {" + \
out['js'] + " });</script>" + out['html']
#self.widget.value = content
display_ipynb(content) # self.widget)
def view(self, switches="all", idsuffix="auto"):
"""
Return a view of this Switchboard.
Parameters
----------
switches : list, optional
The names of the switches to include in this view. The special
value "all" includes all of the switches in the view.
Alternatively, this can be an array of boolean values, one
for each switch.
idsuffix : str, optional
A suffix to append to the DOM ID of this switchboard when
rendering the view. If "auto", a random suffix is used.
Returns
-------
SwitchboardView
"""
if switches == "all":
show = [True] * len(self.switchNames)
elif all([isinstance(b, bool) for b in switches]):
assert(len(switches) == len(self.switchNames))
show = switches
else:
show = [False] * len(self.switchNames)
for nm in switches:
show[self.switchNames.index(nm)] = True
return SwitchboardView(self, idsuffix, show)
def __getattr__(self, attr):
if attr in self:
return self[attr]
return getattr(self.__dict__, attr)
class SwitchboardView(object):
"""
A duplicate or "view" of an existing switchboard which logically
represents the *same* set of switches. Thus, when switches are
moved on the duplicate board, switches will move on the original
(and vice versa).
"""
def __init__(self, switchboard, idsuffix="auto", show="all"):
"""
Create a new SwitchboardView
Parameters
----------
switchboard : Switchboard
The base switch board.
idsuffix : str, optional
A suffix to append to the DOM ID of this switchboard
when rendering the view. If "auto", a random suffix
is used.
show : list (optional)
A list of booleans indicating which switches should be rendered.
The special values "all" and "none" show all or none of the
switches, respectively.
"""
if idsuffix == "auto":
self.idsuffix = "v" + randomID()
else:
self.idsuffix = idsuffix
if show == "all":
self.show = [True] * len(switchboard.switchNames)
elif show == "none":
self.show = [False] * len(switchboard.switchNames)
else:
assert(len(show) == len(switchboard.switchNames))
self.show = show
self.switchboard = switchboard
def render(self, typ="html"):
"""
Render this Switchboard into the requested format.
The returned string(s) are intended to be used to embedded a
visualization of this object within a larger document.
Parameters
----------
typ : {"html"}
The format to render as. Currently only HTML is supported.
Returns
-------
dict
A dictionary of strings whose keys indicate which portion of
the embeddable output the value is. Keys will vary for different
`typ`. For `"html"`, keys are `"html"` and `"js"` for HTML and
and Javascript code, respectively.
"""
return self.switchboard._render_base(typ, self.idsuffix, self.show)
def display(self):
"""
Display this switchboard within an iPython notebook.
Calling this function requires that you are in an
iPython environment, and really only makes sense
within a notebook.
Returns
-------
None
"""
if not in_ipython_notebook():
raise ValueError('Only run `display` from inside an IPython Notebook.')
out = self.render("html")
content = "<script>\n" + \
"require(['jquery','jquery-UI'],function($,ui) {" + \
out['js'] + " });</script>" + out['html']
display_ipynb(content)
class SwitchValue(object):
"""
Encapsulates a "switched value", which is essentially a value (i.e. some
quantity, usually one used as an argument to visualization functions) that
is controlled by the switches of a single Switchboard.
The paradigm is one of a Switchboard being a collection of switches along
with a dictionary of SwitchValues, whereby each SwitchValue is a mapping
of switch positions to values. For efficiency, a SwitchValue need only map
a "subspace" of the switch positions, that is, the position-space spanned
by only a subset of the switches. Which switch-positions are mapped is
given by the "dependencies" of a SwitchValue.
SwitchValue behaves much like a numpy array of values in terms of
element access.
"""
def __init__(self, parent_switchboard, name, dependencies):
"""
Creates a new SwitchValue.
Parameters
----------
parent_switchboard : Switchboard
The switch board this value is associated with.
name : str
The name of this value, which is also the key or member
name used to access this value from its parent `Switchboard`.
dependencies : iterable
The 0-based indices identifying which switches this value
depends upon, and correspondingly, which switch positions
the different axes of the new `SwitchValue` correspond to.
"""
self.ws = parent_switchboard.ws # workspace
self.parent = parent_switchboard
self.name = name
self.dependencies = dependencies
shape = [len(self.parent.positionLabels[i]) for i in dependencies]
self.base = _np.empty(shape, dtype=_np.object)
index_all = (slice(None, None),) * len(shape)
self.base[index_all] = NotApplicable(self.ws)
#Access to underlying ndarray
def __getitem__(self, key):
return self.base.__getitem__(key)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j)) # Called for A[:]
def __setitem__(self, key, val):
return self.base.__setitem__(key, val)
def __getattr__(self, attr):
#use __dict__ so no chance for recursive __getattr__
return getattr(self.__dict__['base'], attr)
def __len__(self): return len(self.base)
#Future - arithmetic ops should return a new SwitchValue
#def __add__(self,x): return self.base + x
#def __sub__(self,x): return self.base - x
#def __mul__(self,x): return self.base * x
#def __truediv__(self, x): return self.base / x
class WorkspaceOutput(object):
"""
Base class for all forms of data-visualization within a `Workspace` context.
WorkspaceOutput sets a common interface for performing data visualization
using a Workspace. In particular, `render` is used to create embeddable
output in various formats, and `display` is used to show the object within
an iPython notebook.
"""
default_render_options = {
#General
'output_dir': False,
'precision': None,
'output_name': False,
'switched_item_mode': 'inline', # or 'separate files'
'switched_item_id_overrides': {},
#HTML specific
'global_requirejs': False,
'use_loadable_items': False,
'click_to_display': False,
'render_math': True,
'resizable': True,
'autosize': 'none',
'link_to': None,
'valign': 'top',
#Latex specific
'latex_cmd': "pdflatex",
'latex_flags': ["-interaction=nonstopmode", "-halt-on-error", "-shell-escape"],
'page_size': (6.5, 8.0),
'render_includes': True,
'leave_includes_src': False,
}
def __init__(self, ws):
"""
Create a new WorkspaceOutput object. Usually not called directly.
Parameters
----------
ws : Workspace
The workspace containing the new object.
"""
self.ws = ws
self.ID = randomID() # maybe allow overriding this in the FUTURE
self.options = WorkspaceOutput.default_render_options.copy()
def set_render_options(self, **kwargs):
"""
Sets rendering options, which affect how render() behaves.
The reason render options are set via this function rather
than passed directly as arguments to the render(...) call
is twofold. First, it allows for global 'default' options
to be set before creating `WorkspaceOutput`-derived objects;
Secondly, it allows the user to set render options right after
an object is constructed, separately from the rendering process
(which is sometimes desirable).
Parameters
----------
output_dir : str or False
The name of the output directory under which all output files
should be created. The names of these files just the IDs of the
items being rendered.
precision : int or dict, optional
The amount of precision to display. A dictionary with keys
"polar", "sci", and "normal" can separately specify the
precision for complex angles, numbers in scientific notation, and
everything else, respectively. If an integer is given, it this
same value is taken for all precision types. If None, then
`{'normal': 6, 'polar': 3, 'sci': 0}` is used.
switched_item_mode : {'inline','separate files'}, optional
Whether switched items should be rendered inline within the 'html'
and 'js' blocks of the return value of :func:`render`, or whether
each switched item (corresponding to a single "switch position")
should be rendered in a separate file and loaded on-demand only
when it is needed.
switched_item_id_overrides : dict, optional
A dictionary of *index*:*id* pairs, where *index* is a 0-based index
into the list of switched items (plots or tables), and *id* is a
string ID. Since the ID is used as the filename when saving files,
overriding the ID is useful when writing a single plot or table to
a specific filename.
global_requirejs : bool, optional
Whether the table is going to be embedded in an environment
with a globally defined RequireJS library. If True, then
rendered output will make use of RequireJS.
click_to_display : bool, optional
If True, table plots are not initially created but must
be clicked to prompt creation. This is False by default,
and can be useful to set to True for tables with
especially complex plots whose creation would slow down
page loading significantly.
resizable : bool, optional
Whether or not to place table inside a JQueryUI
resizable widget (only applies when `typ == "html"`).
autosize : {'none', 'initial', 'continual'}, optional
Whether tables and plots should be resized either
initially, i.e. just upon first rendering (`"initial"`) or whenever
the browser window is resized (`"continual"`). This option only
applies for html rendering.
link_to : tuple of {"tex", "pdf", "pkl"} or None, optional
If not None, a list of one or more items from the given set
indicating whether or not to include links to Latex, PDF, and
Python pickle files, respectively. Note that setting this
render option does not automatically *create/render* additional
formats of this output object (you need to make multiple `render`
calls for that) - it just creates the *links* to these files when
rendering as "html".
valign : {"top","bottom"}
Whether the switched items should be vertically aligned by their
tops or bottoms (when they're different heights).
latex_cmd : str, optional
The system command or executable used to compile LaTeX documents.
Usually `"pdflatex"`.
latex_flags : list, optional
A list of (string-valued) flags to pass to `latex_cmd` when
compiling LaTeX documents. Defaults to
`["-interaction=nonstopmode", "-halt-on-error", "-shell-escape"]`
page_size : tuple
The usable page size for LaTeX documents, as (*width*,*height*)
where *width* and *height* are in inches. Note that this does not
include margins. Defaults to `(6.5,8.0)`.
render_includes : bool, optional
When rendering as "latex", whether included files should also be
rendered (either by compiling latex to PDF or saving plots as PDFs).
leave_includes_src : bool, optional
When LaTeX compilation is done, should the source "*.tex" files be
removed? If `False`, then they *are* removed.
Returns
-------
None
"""
for key, val in kwargs.items():
if key in self.options:
self.options[key] = val
else:
raise ValueError("Invalid render option: %s\nValid options are:\n" % key
+ '\n'.join(self.options.keys()))
def __getstate__(self):
state_dict = self.__dict__.copy()
del state_dict['ws']
return state_dict
def __setstate__(self, d):
self.__dict__.update(d)
if 'ws' not in self.__dict__:
self.__dict__['ws'] = None
# Note: hashing not needed because these objects are not *inputs* to
# other WorspaceOutput objects or computation functions - these objects
# are cached using call_key.
def render(self, typ="html"):
"""
Renders this object into the specifed format, specifically for
embedding it within a larger document.
Parameters
----------
typ : str
The format to render as. Currently `"html"` is widely supported
and `"latex"` is supported for tables.
Returns
-------
dict
A dictionary of strings whose keys indicate which portion of
the embeddable output the value is. Keys will vary for different
`typ`. For `"html"`, keys are `"html"` and `"js"` for HTML and
and Javascript code, respectively.
"""
raise NotImplementedError("Derived classes must implement their own render()")
def display(self):
"""
Display this object within an iPython notebook.
"""
if not in_ipython_notebook():
raise ValueError('Only run `display` from inside an IPython Notebook.')
self.set_render_options(global_requirejs=True,
output_dir=None) # b/c jupyter uses require.js
out = self.render("html")
content = "<script>\n" + \
"require(['jquery','jquery-UI','plotly'],function($,ui,Plotly) {" + \
out['js'] + " });</script>" + out['html']
display_ipynb(content)
def saveas(self, filename, index=None, verbosity=0):
"""
Saves this workspace output object to a file.
The type of file that is saved is determined automatically by the
extension of `filename`. Recognized extensions are `pdf` (PDF),
`tex` (LaTeX), `pkl` (Python pickle) and `html` (HTML). Since this
object may contain different instances of its data based on switch
positions, when their are multiple instances the user must specify
the `index` argument to disambiguate.
Parameters
----------
filename : str
The destination filename. Its extension determines what type
of file is saved.
index : int, optional
An absolute index into the list of different switched "versions"
of this object's data. In most cases, the object being saved
doesn't depend on any switch boards and has only a single "version",
in which caes this can be left as the default.
verbosity : int, optional
Controls the level of detail printed to stdout.
Returns
-------
None
"""
raise NotImplementedError()
def _ccompute(self, fn, *args, **kwargs):
""" Cached-computation using self.ws's smart cache """
return self.ws.smartCache.cached_compute(fn, args, kwargs)[1]
def _create_onready_handler(self, content, ID):
global_requirejs = self.options.get('global_requirejs', False)
use_loadable_items = self.options.get('use_loadable_items', False)
ret = ""
if global_requirejs:
ret += "require(['jquery','jquery-UI','plotly','autorender'],function($,ui,Plotly,renderMathInElement) {\n"
ret += ' $(document).ready(function() {\n'
if use_loadable_items:
ret += " $('#%s').closest('.loadable').on('load_loadable_item', function(){\n" % ID
ret += content
if use_loadable_items:
ret += " });" # end load_loadable_item handler
ret += '}); //end on-ready or on-load handler\n'
if global_requirejs:
ret += '}); //end require block\n'
return ret
def _render_html(self, ID, div_htmls, div_jss, div_ids, switchpos_map,
switchboards, switchIndices, div_css_classes=None,
link_to=None, link_to_files_dir=None, embed_figures=True):
"""
Helper rendering function, which takes care of the (complex)
common logic which take a series of HTML div blocks corresponding
to the results of a Workspace.switchedCompute(...) call and
builds the HTML and JS necessary for toggling the visibility of
these divs in response to changes in switch position(s).
Parameters
----------
ID: str
The identifier to use when constructing DOM ids.
div_htmls : list
The html content for each switched block (typically a elements are
"<div>...</div>" blocks themselves). This is the content that
is switched between.
div_jss : list
Javascript content to accompany each switched block.
div_ids : list
A list giving the DOM ids for the div blocks given by `div_html`.
switchpos_map : dict
A dictionary mapping switch positions to div-index. Keys are switch
tuples of per-switchboard positions (i.e. a tuple of tuples), giving
the positions of each switch specified in `switchIndices`. Values
are integer indices into `html_divs`.
switchboards : list
A list of relevant SwitchBoard objects.
switchIndices : list
A list of tuples, one per Switchboard object, giving the relevant
switch indices (integers) within that Switchboard.
div_css_classes : list, optional
A list of (string) CSS classes to add to the div elements created
by this function.
link_to : list, optional
If not None, a list of one or more items from the set
{"tex", "pdf", "pkl"} indicating whether or not to
include links to Latex, PDF, and Python pickle files,
respectively.
link_to_files_dir : str, optional
The directory to place linked-to files in. Only used when
`link_to` is not None.
embed_figures: bool, optional
If True (default), figures will be embedded directly into
the report HTML. Otherwise, figures will be written to
`link_to_files_dir` and dynamically loaded into the report
with AJAX requests.
Returns
-------
dict
A dictionary of strings whose keys indicate which portion of
the embeddable output the value is. Keys are `"html"` and `"js"`.
"""
# within_report = self.options.get('within_report', False)
#Build list of CSS classes for the created divs
classes = ['single_switched_value']
if div_css_classes is not None:
classes.extend(div_css_classes)
cls = ' '.join(classes)
#build HTML as container div containing one or more plot divs
# Note: 'display: none' doesn't always work in firefox... (polar plots in ptic)
# style='display: none' or 'visibility: hidden'
html = "<div id='%s' class='pygsti-wsoutput-group'>\n" % ID
div_contents = []
if div_jss is None: div_jss = [""] * len(div_htmls)
for divHTML, divJS in zip(div_htmls, div_jss):
scriptJS = "<script>\n%s\n</script>\n" % divJS if divJS else ""
div_contents.append(("{script}{html}".format(
script=scriptJS, html=divHTML)))
if embed_figures:
#Inline div contents
html += "\n".join(["<div class='%s' id='%s'>\n%s\n</div>\n" %
(cls, divID, divContent) for divID, divContent
in zip(div_ids, div_contents)])
else:
html += "\n".join(["<div class='%s' id='%s'></div>\n" %
(cls, divID) for divID in div_ids])
#build a list of filenames based on the divIDs
div_filenames = [(divID + ".html") for divID in div_ids]
#Create separate files with div contents
for divContent, divFilenm in zip(div_contents, div_filenames):
with open(_os.path.join(str(link_to_files_dir), divFilenm), 'w') as f:
f.write(divContent)
html += "\n</div>\n" # ends pygsti-wsoutput-group div
#build javascript to map switch positions to div_ids
js = "var switchmap_%s = new Array();\n" % ID
for switchPositions, iDiv in switchpos_map.items():
#switchPositions is a tuple of tuples of position indices, one tuple per switchboard
div_id = div_ids[iDiv]
flatPositions = []
for singleBoardSwitchPositions in switchPositions:
flatPositions.extend(singleBoardSwitchPositions)
js += "switchmap_%s[ [%s] ] = '%s';\n" % \
(ID, ",".join(map(str, flatPositions)), div_id)
js += "window.switchmap_%s = switchmap_%s;\n" % (ID, ID) # ensure a *global* variable
js += "\n"
cnd = " && ".join(["$('#switchbd%s_%d').hasClass('initializedSwitch')"
% (sb.ID, switchIndex)
for sb, switchInds in zip(switchboards, switchIndices)
for switchIndex in switchInds])
if len(cnd) == 0: cnd = "true"
#define fn to "connect" output object to switchboard, i.e.
# register event handlers for relevant switches so output object updates
js += "function connect_%s_to_switches(){\n" % ID
js += " if(%s) {\n" % cnd # "if switches are ready"
# loop below adds event bindings to the body of this if-block
#build a handler function to get all of the relevant switch positions,
# build a (flattened) position array, and perform the lookup. Note that
# this function does the same thing regardless of *which* switch was
# changed, and so is called by all relevant switch change handlers.
onchange_name = "%s_onchange" % ID
handler_js = "function %s() {\n" % onchange_name
handler_js += " var tabdiv = $( '#%s' ).closest('.tabcontent');\n" % ID
handler_js += " if( tabdiv.length > 0 && !tabdiv.hasClass('active') ) return;\n" # short-circuit
handler_js += " var curSwitchPos = new Array();\n"
for sb, switchInds in zip(switchboards, switchIndices):
for switchIndex in switchInds:
handler_js += " curSwitchPos.push(%s);\n" % sb.get_switch_valuejs(switchIndex)
handler_js += " var idToShow = switchmap_%s[ curSwitchPos ];\n" % ID
handler_js += " $( '#%s' ).children().hide();\n" % ID
handler_js += " divToShow = $( '#' + idToShow );\n"
#Javascript to switch to a new div
if embed_figures:
handler_js += " divToShow.show();\n"
handler_js += " divToShow.parentsUntil('#%s').show();\n" % ID
handler_js += " caption = divToShow.closest('figure').children('figcaption:first');\n"
handler_js += " caption.css('width', Math.round(divToShow.width()*0.9) + 'px');\n"
else:
handler_js += " if( divToShow.children().length == 0 ) {\n"
handler_js += " $(`#${idToShow}`).load(`figures/${idToShow}.html`, function() {\n"
handler_js += " divToShow = $( '#' + idToShow );\n"
handler_js += " divToShow.show();\n"
handler_js += " divToShow.parentsUntil('#%s').show();\n" % ID
if link_to and ('tex' in link_to):
handler_js += " divToShow.append('<a class=\"dlLink\" href=\"figures/'"
handler_js += " + idToShow + '.tex\" target=\"_blank\">▼TEX</a>');\n"
if link_to and ('pdf' in link_to):
handler_js += " divToShow.append('<a class=\"dlLink\" href=\"figures/'"
handler_js += " + idToShow + '.pdf\" target=\"_blank\">▼PDF</a>');\n"
if link_to and ('pkl' in link_to):
handler_js += " divToShow.append('<a class=\"dlLink\" href=\"figures/'"
handler_js += " + idToShow + '.pkl\" target=\"_blank\">▼PKL</a>');\n"
handler_js += " caption = divToShow.closest('figure').children('figcaption:first');\n"
handler_js += " caption.css('width', Math.round(divToShow.width()*0.9) + 'px');\n"
handler_js += " });\n" # end load-complete handler
handler_js += " }\n"
handler_js += " else {\n"
handler_js += " divToShow.show();\n"
handler_js += " divToShow.parentsUntil('#%s').show();\n" % ID
handler_js += " caption = divToShow.closest('figure').children('figcaption:first');\n"
handler_js += " caption.css('width', Math.round(divToShow.width()*0.9) + 'px');\n"
handler_js += " }\n"
handler_js += "}\n" # end <ID>_onchange function
#build change event listener javascript
for sb, switchInds in zip(switchboards, switchIndices):
# switchInds is a tuple containing the "used" switch indices of sb
for switchIndex in switchInds:
# part of if-block ensuring switches are ready (i.e. created)
js += " " + sb.get_switch_change_handlerjs(switchIndex) + \
"%s(); });\n" % onchange_name
#bind onchange call to custom 'tabchange' event that we trigger when tab changes
js += " $( '#%s' ).closest('.tabcontent').on('tabchange', function(){\n" % ID
js += "%s(); });\n" % onchange_name
js += " %s();\n" % onchange_name # call onchange function *once* at end to update visibility
# end if-block
js += " console.log('Switches initialized: %s handlers set');\n" % ID
js += " $( '#%s' ).show()\n" % ID # visibility updates are done: show parent container
js += " }\n" # ends if-block
js += " else {\n" # switches aren't ready - so wait
js += " setTimeout(connect_%s_to_switches, 500);\n" % ID
js += " console.log('%s switches NOT initialized: Waiting...');\n" % ID
js += " }\n"
js += "};\n" # end of connect function
#on-ready handler starts trying to connect to switches
# - note this is already in a 'load_loadable_item' handler, so no need for that here
js += "$(document).ready(function() {\n"
js += " connect_%s_to_switches();\n" % ID
if link_to:
# Add download links for all divs at once since they're all ready
rel_figure_dir = _os.path.basename(str(link_to_files_dir))
if 'tex' in link_to:
for div_id in div_ids:
js += " $('#%s').append('<a class=\"dlLink\" href=\"%s/" % (div_id, rel_figure_dir)
js += "%s.tex\" target=\"_blank\">▼TEX</a>');\n" % div_id
if 'pdf' in link_to:
for div_id in div_ids:
js += " $('#%s').append('<a class=\"dlLink\" href=\"%s/" % (div_id, rel_figure_dir)
js += "%s.pdf\" target=\"_blank\">▼PDF</a>');\n" % div_id
if 'pkl' in link_to:
for div_id in div_ids:
js += " $('#%s').append('<a class=\"dlLink\" href=\"%s/" % (div_id, rel_figure_dir)
js += "%s.pkl\" target=\"_blank\">▼PKL</a>');\n" % div_id
js += "});\n\n" # end on-ready handler
js += handler_js
return {'html': html, 'js': js}
class NotApplicable(WorkspaceOutput):
"""
Class signifying that an given set of arguments is not applicable
to a function being evaluated.
"""
def __init__(self, ws):
"""
Create a new NotApplicable object.
"""
super(NotApplicable, self).__init__(ws)
def render(self, typ="html", ID=None):
"""
Renders this object into the specifed format, specifically for
embedding it within a larger document.
Parameters
----------
typ : str
The format to render as. Allowed options are `"html"`,
`"latex"`, and `"python"`.
ID : str, optional
An DOM ID used in place of the objects internal ID.
Returns
-------
dict
A dictionary of strings whose keys indicate which portion of
the embeddable output the value is. Keys will vary for different
`typ`. For `"html"`, keys are `"html"` and `"js"` for HTML and
and Javascript code, respectively.
"""
if ID is None: ID = self.ID
if typ == "html":
return {'html': "<div id='%s' class='notapplicable'>[NO DATA or N/A]</div>" % ID, 'js': ""}
elif typ == "latex":
return {'latex': "Not applicable"}
elif typ == "python":
return "Not Applicable"
else:
raise ValueError("NotApplicable render type not supported: %s" % typ)
class WorkspaceTable(WorkspaceOutput):
"""
Encapsulates a table within a `Workspace` context.
A base class which provides the logic required to take a
single table-generating function and make it into a legitimate
`WorkspaceOutput` object for using within workspaces.
"""
def __init__(self, ws, fn, *args):
"""
Create a new WorkspaceTable. Usually not called directly.
Parameters
----------
ws : Workspace
The workspace containing the new object.
fn : function
A table-creating function.
args : various
The arguments to `fn`.
"""
super(WorkspaceTable, self).__init__(ws)
self.tablefn = fn
self.initargs = args
self.tables, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \
self.ws.switchedCompute(self.tablefn, *self.initargs)
def render(self, typ):
"""
Renders this table into the specifed format, specifically for
embedding it within a larger document.
Parameters
----------
typ : str
The format to render as. Currently `"html"`, `"latex"`
and `"python"` are supported.
Returns
-------
dict
A dictionary of strings giving the different portions of the
embeddable output. For `"html"`, keys are `"html"` and `"js"`.
For `"latex"`, there is a single key `"latex"`.
"""
resizable = self.options.get('resizable', True)
autosize = self.options.get('autosize', 'none')
precision = self.options.get('precision', None)
switched_item_mode = self.options.get('switched_item_mode', 'inline')
overrideIDs = self.options.get('switched_item_id_overrides', {})
output_dir = self.options.get('output_dir', None)
if precision is None:
precDict = {'normal': 6, 'polar': 3, 'sci': 0}
elif _compat.isint(precision):
precDict = {'normal': precision, 'polar': precision, 'sci': precision}
else:
assert('normal' in precision), "Must at least specify 'normal' precision"
p = precision['normal']
precDict = {'normal': p,
'polar': precision.get('polar', p),
'sci': precision.get('sci', p)}
ID = self.ID
tableID = "table_" + ID
if typ == "html":
divHTML = []
divIDs = []
divJS = []
for i, table in enumerate(self.tables):
tableDivID = tableID + "_%d" % i
if i in overrideIDs: tableDivID = overrideIDs[i]
if isinstance(table, NotApplicable):
table_dict = table.render("html", tableDivID)
else:
table_dict = table.render("html", tableID=tableDivID + "_tbl",
tableclass="dataTable",
precision=precDict['normal'],
polarprecision=precDict['polar'],
sciprecision=precDict['sci'],
resizable=resizable, autosize=(autosize == "continual"),
click_to_display=self.options['click_to_display'],
link_to=self.options['link_to'],
output_dir=output_dir)
if switched_item_mode == 'separate files':
divJS.append(self._form_table_js(tableDivID, table_dict['html'], table_dict['js'], None))
else:
#otherwise just add plot handers (table_dict['js']) to divJS for later
divJS.append(table_dict['js'])
divHTML.append(table_dict['html'])
divIDs.append(tableDivID)
if switched_item_mode == 'inline':
base = self._render_html(tableID, divHTML, None, divIDs, self.switchpos_map,
self.switchboards, self.sbSwitchIndices, None,
self.options.get('link_to', None), output_dir) # no JS yet...
js = self._form_table_js(tableID, base['html'], '\n'.join(divJS), base['js'])
# creates JS for everything: plot creation, switchboard init, autosize
elif switched_item_mode == 'separate files':
base = self._render_html(tableID, divHTML, divJS, divIDs, self.switchpos_map,
self.switchboards, self.sbSwitchIndices, None,
self.options.get('link_to', None), output_dir, embed_figures=False)
js = self._form_table_js(tableID, None, None, base['js'])
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
return {'html': base['html'], 'js': js}
elif typ == "latex":
render_includes = self.options.get('render_includes', True)
leave_src = self.options.get('leave_includes_src', False)
W, H = self.options.get('page_size', (6.5, 8.0))
printer = _objs.VerbosityPrinter(1) # TEMP - add verbosity arg?
#Note: in both cases output_dir needs to be the *relative* path
# between the current directory and the output directory if
# \includegraphics statements are to work. If this isn't needed
# (e.g. if just the standalone files are needed) then output_dir
# can be an absolute path as well.
# table rendering returned in ret dict
if switched_item_mode == 'inline':
# Assume current directory is where generated latex
# code will reside and output_dir is where figs go.
tablefig_output_dir = output_dir # (can be None, in
#which case an error will be raised if table has figs)
render_dir = None # no need to chdir for table render
#render each switched "item" as a separate standalone file
elif switched_item_mode == 'separate files':
# Assume current directory is where \includegraphics{...}
# latex will go, and that separate table TEX files *and*
# figures go in `output_dir`. The table latex is given an
# output_dir of '.' because figure files will be in the same
# directory.
assert(output_dir), "Cannot render a table as 'latex' with " + \
"switched items as separate files without a valid " + \
"'output_dir' render option"
tablefig_output_dir = '.'
render_dir = output_dir
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
if render_dir is not None and not _os.path.exists(render_dir):
_os.mkdir(render_dir)
cwd = _os.getcwd()
latex_list = []
for i, table in enumerate(self.tables):
tableDivID = tableID + "_%d" % i
if i in overrideIDs: tableDivID = overrideIDs[i]
if isinstance(table, NotApplicable): continue
if render_dir: _os.chdir(render_dir)
table_dict = table.render("latex",
precision=precDict['normal'],
polarprecision=precDict['polar'],
sciprecision=precDict['sci'],
output_dir=tablefig_output_dir,
render_includes=render_includes)
if render_dir: _os.chdir(cwd)
if switched_item_mode == 'inline':
latex_list.append(table_dict['latex'])
elif switched_item_mode == 'separate files':
if render_includes or leave_src:
d = {'toLatex': table_dict['latex']}
_merge.merge_latex_template(d, "standalone.tex",
_os.path.join(str(output_dir), "%s.tex" % tableDivID))
if render_includes:
assert('latex_cmd' in self.options and self.options['latex_cmd']), \
"Cannot render latex include files without a valid 'latex_cmd' render option"
try:
_os.chdir(render_dir)
latex_cmd = self.options['latex_cmd']
latex_call = [latex_cmd] + self.options.get('latex_flags', []) \
+ ["%s.tex" % tableDivID]
stdout, stderr, returncode = _merge.process_call(latex_call)
_merge.evaluate_call(latex_call, stdout, stderr, returncode, printer)
if not _os.path.isfile("%s.pdf" % tableDivID):
raise Exception("File %s.pdf was not created by %s"
% (tableDivID, latex_cmd))
if not leave_src: _os.remove("%s.tex" % tableDivID)
_os.remove("%s.log" % tableDivID)
_os.remove("%s.aux" % tableDivID)
except _subprocess.CalledProcessError as e:
printer.error("%s returned code %d " % (latex_cmd, e.returncode)
+ "trying to render standalone %s.tex. " % tableDivID
+ "Check %s.log to see details." % tableDivID)
finally:
_os.chdir(cwd)
latex_list.append("\\includegraphics[width=%.2fin,height=%.2fin,keepaspectratio]{%s}" %
(W, H, _os.path.join(str(output_dir), "%s.pdf" % tableDivID)))
elif leave_src:
latex_list.append("\\input{%s}" % _os.path.join(str(output_dir), "%s.tex" % tableDivID))
else:
latex_list.append("%% Didn't generated anything for tableID=%s" % tableDivID)
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode) # pragma: no cover
return {'latex': "\n".join(latex_list)}
elif typ == "python":
if switched_item_mode == 'separate files':
assert(output_dir), "Cannot render tables as 'python' in separate" \
+ " files without a valid 'output_dir' render option"
tables_python = _collections.OrderedDict()
for i, table in enumerate(self.tables):
if isinstance(table, NotApplicable): continue
tableDivID = tableID + "_%d" % i
if i in overrideIDs: tableDivID = overrideIDs[i]
if switched_item_mode == "inline":
table_dict = table.render("python", output_dir=None)
tables_python[tableDivID] = table_dict['python']
elif switched_item_mode == "separate files":
outputFilename = _os.path.join(str(output_dir), "%s.pkl" % tableDivID)
table_dict = table.render("python", output_dir=output_dir)
#( setting output_dir generates separate files for plots in table )
table_dict['python'].to_pickle(outputFilename) # a DataFrame
tables_python[tableDivID] = "df_%s = pd.read_pickle('%s')" \
% (tableDivID, outputFilename)
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
return {'python': tables_python}
else:
assert(len(self.tables) == 1), \
"Can only render %s format for a non-switched table" % typ
return {typ: self.tables[0].render(typ)}
def saveas(self, filename, index=None, verbosity=0):
"""
Saves this workspace table object to a file.
The type of file that is saved is determined automatically by the
extension of `filename`. Recognized extensions are `pdf` (PDF),
`tex` (LaTeX), `pkl` (Python pickle) and `html` (HTML). Since this
object may contain different instances of its data based on switch
positions, when their are multiple instances the user must specify
the `index` argument to disambiguate.
Parameters
----------
filename : str
The destination filename. Its extension determines what type
of file is saved.
index : int, optional
An absolute index into the list of different switched "versions"
of this object's data. In most cases, the object being saved
doesn't depend on any switch boards and has only a single "version",
in which caes this can be left as the default.
verbosity : int, optional
Controls the level of detail printed to stdout.
Returns
-------
None
"""
N = len(self.tables)
if filename.endswith(".html"):
if index is None:
if N == 1:
index = 0
else:
raise ValueError("Must supply `index` argument for a non-trivially-switched WorkspaceTable")
saved_switchposmap = self.switchpos_map
saved_switchboards = self.switchboards
saved_switchinds = self.sbSwitchIndices
#Temporarily pretend we don't depend on any switchboards and
# by default display the user-specified index
self.switchboards = []
self.sbSwitchIndices = []
self.switchpos_map = {(): index}
qtys = {'title': _os.path.splitext(_os.path.basename(str(filename)))[0],
'singleItem': self}
_merge.merge_jinja_template(qtys, filename, templateName="standalone.html",
verbosity=verbosity)
self.switchpos_map = saved_switchposmap
self.switchboards = saved_switchboards
self.sbSwitchIndices = saved_switchinds
elif filename.endswith(".pkl"):
if index is None and N == 1: index = 0
overrides = {i: "index%d" % i for i in range(N)}
self.set_render_options(switched_item_mode="inline",
switched_item_id_overrides=overrides)
render_out = self.render("python")
if index is not None: # just pickle a single element
to_pickle = render_out['python']['index%d' % index]
else: # pickle dictionary of all indices
to_pickle = render_out['python']
with open(str(filename), 'wb') as f:
_pickle.dump(to_pickle, f)
else:
if index is None:
if N == 1:
index = 0
else:
raise ValueError("Must supply `index` argument for a non-trivially-switched WorkspaceTable")
output_dir = _os.path.dirname(str(filename))
filebase, ext = _os.path.splitext(_os.path.basename(str(filename)))
tempDir = _os.path.join(str(output_dir), "%s_temp" % filebase)
if not _os.path.exists(tempDir): _os.mkdir(tempDir)
self.set_render_options(switched_item_mode="separate files",
switched_item_id_overrides={index: filebase},
output_dir=tempDir)
if ext == ".tex":
self.set_render_options(render_includes=False,
leave_includes_src=True)
elif ext == ".pdf":
self.set_render_options(render_includes=True,
leave_includes_src=False)
else:
raise ValueError("Unknown file type for %s" % filename)
self.render("latex") # renders everything in temp dir
_os.rename(_os.path.join(str(tempDir), "%s%s" % (filebase, ext)),
_os.path.join(str(output_dir), "%s%s" % (filebase, ext)))
#remove all other files
_shutil.rmtree(tempDir)
def _form_table_js(self, tableID, table_html, table_plot_handlers,
switchboard_init_js):
resizable = self.options.get('resizable', True)
autosize = self.options.get('autosize', 'none')
create_table_plots = bool(table_plot_handlers is not None)
queue_math_render = bool(table_html and '$' in table_html
and self.options.get('render_math', True))
add_autosize_handler = bool(switchboard_init_js is not None)
#only add ws-table-wide autosize handler when initializing the table's switchboard (once
# per workspace table)
content = ""
# put plot handlers *above* switchboard init JS
if table_plot_handlers: content += table_plot_handlers
if switchboard_init_js: content += switchboard_init_js
#Table initialization javascript: this will either be within the math-rendering (queued) function
# (if '$' in ret['html']) or else at the *end* of the ready handler (if no math needed rendering).
init_table_js = ''
if create_table_plots and resizable: # make a resizable widget on *entire* plot
# (will only act on first call, but wait until first plots are created)
init_table_js += ' make_wstable_resizable("{tableID}");\n'.format(tableID=tableID)
if add_autosize_handler and autosize == "continual":
init_table_js += ' make_wsobj_autosize("{tableID}");\n'.format(tableID=tableID)
if create_table_plots:
init_table_js += ' trigger_wstable_plot_creation("{tableID}",{initautosize});\n'.format(
tableID=tableID, initautosize=str(autosize in ("initial", "continual")).lower())
if queue_math_render:
# then there is math text that needs rendering,
# so queue this, *then* trigger plot creation
content += (' plotman.enqueue(function() {{ \n'
' renderMathInElement(document.getElementById("{tableID}"), {{ delimiters: [\n'
' {{left: "$$", right: "$$", display: true}},\n'
' {{left: "$", right: "$", display: false}},\n'
' ] }} );\n').format(tableID=tableID)
content += init_table_js
content += ' }}, "Rendering math in {tableID}" );\n'.format(tableID=tableID) # end enqueue
else:
#Note: this MUST be below plot handler init, when it triggers plot creation
content += init_table_js
return self._create_onready_handler(content, tableID)
class WorkspacePlot(WorkspaceOutput):
"""
Encapsulates a plot within a `Workspace` context.
A base class which provides the logic required to take a
single plot.ly figure-generating function and make it into a
legitimate `WorkspaceOutput` object for using within workspaces.
"""
def __init__(self, ws, fn, *args):
"""
Create a new WorkspaceTable. Usually not called directly.
Parameters
----------
ws : Workspace
The workspace containing the new object.
fn : function
A table-creating function.
args : various
The arguments to `fn`.
"""
super(WorkspacePlot, self).__init__(ws)
'''
# LSaldyt: removed plotfn for easier pickling? It doesn't seem to be used anywhere
self.plotfn = fn
self.initargs = args
self.figs, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \
self.ws.switchedCompute(self.plotfn, *self.initargs)
'''
self.initargs = args
self.figs, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \
self.ws.switchedCompute(fn, *self.initargs)
def render(self, typ="html", ID=None):
"""
Renders this plot into the specifed format, specifically for
embedding it within a larger document.
Parameters
----------
typ : str
The format to render as. Currently `"html"`, `"latex"`
and `"python"` are supported.
ID : str, optional
A base ID to use when rendering. If None, the object's
persistent ID is used, which usually what you want.
Returns
-------
dict
A dictionary of strings giving the HTML and Javascript portions
of the embeddable output. Keys are `"html"` and `"js"`.
"""
resizable = self.options.get('resizable', True)
valign = self.options.get('valign', 'top')
overrideIDs = self.options.get('switched_item_id_overrides', {})
switched_item_mode = self.options.get('switched_item_mode', 'inline')
output_dir = self.options.get('output_dir', None)
if valign == 'top':
abswrap_cls = 'abswrap'
relwrap_cls = 'relwrap'
elif valign == 'bottom':
abswrap_cls = 'bot_abswrap'
relwrap_cls = 'bot_relwrap'
else:
raise ValueError("Invalid 'valign' value: %s" % valign)
if ID is None: ID = self.ID
plotID = "plot_" + ID
if typ == "html":
#def getPlotlyDivID(html):
# #could make this more robust using lxml or something later...
# iStart = html.index('div id="')
# iEnd = html.index('"', iStart+8)
# return html[iStart+8:iEnd]
##pick "master" plot, whose resizing dictates the resizing of other plots,
## as the largest-height plot.
#iMaster = None; maxH = 0;
#for i, fig in enumerate(self.figs):
# if isinstance(fig, NotApplicable):
# continue
# NOTE: master=None below, but it's unclear whether this will later be needed.
# "handlers only" mode is when plot is embedded in something
# larger (e.g. a table) that takes responsibility for putting
# the JS returned into an on-ready handler and triggering the
# initialization and creation of the plots.
handlersOnly = bool(resizable == "handlers only")
divHTML = []
divIDs = []
divJS = []
for i, fig in enumerate(self.figs):
plotDivID = plotID + "_%d" % i
if i in overrideIDs: plotDivID = overrideIDs[i]
if isinstance(fig, NotApplicable):
fig_dict = fig.render(typ, plotDivID)
else:
#use auto-sizing (fluid layout)
fig.plotlyfig.update_layout(template=DEFAULT_PLOTLY_TEMPLATE)
fig_dict = _plotly_ex.plot_ex(
fig.plotlyfig, show_link=False, resizable=resizable,
lock_aspect_ratio=True, master=True, # bool(i==iMaster)
click_to_display=self.options['click_to_display'],
link_to=self.options['link_to'], link_to_id=plotDivID,
rel_figure_dir=_os.path.basename(
str(output_dir)) if not (output_dir in (None, False)) else None)
if switched_item_mode == 'separate files':
assert(handlersOnly is False) # doesn't make sense to put only handlers in a separate file
divJS.append(self._form_plot_js(plotDivID, fig_dict['js'], None))
else:
divJS.append(fig_dict['js'])
divIDs.append(plotDivID)
divHTML.append("<div class='%s'>%s</div>" % (abswrap_cls, fig_dict['html']))
if switched_item_mode == 'inline':
base = self._render_html(plotID, divHTML, None, divIDs, self.switchpos_map,
self.switchboards, self.sbSwitchIndices, [relwrap_cls])
# Don't link_to b/c plots will all have download buttons
if handlersOnly:
js = '\n'.join(divJS) + base['js'] # insert plot handlers above switchboard init JS
else:
js = self._form_plot_js(plotID, '\n'.join(divJS), base['js'])
elif switched_item_mode == 'separate files':
base = self._render_html(plotID, divHTML, divJS, divIDs, self.switchpos_map,
self.switchboards, self.sbSwitchIndices, [relwrap_cls],
None, self.options['output_dir'], embed_figures=False)
js = self._form_plot_js(plotID, None, base['js'])
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
return {'html': base['html'], 'js': js}
elif typ == "latex":
assert('output_dir' in self.options and self.options['output_dir']), \
"Cannot render a plot as 'latex' without a valid " +\
"'output_dir' render option (regardless of switched_item_mode)"
if switched_item_mode not in ('inline', 'separate files'):
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode) # for uniformity with other cases,
# even though it's not used.
from .mpl_colormaps import plotly_to_matplotlib as _plotly_to_matplotlib
output_dir = self.options['output_dir']
maxW, maxH = self.options.get('page_size', (6.5, 8.0))
includes = []
for i, fig in enumerate(self.figs):
if isinstance(fig, NotApplicable): continue
plotDivID = plotID + "_%d" % i
if i in overrideIDs: plotDivID = overrideIDs[i]
if self.options.get('render_includes', True):
filename = _os.path.join(str(output_dir), plotDivID + ".pdf")
_plotly_to_matplotlib(fig, filename)
W, H = maxW, maxH
if 'mpl_fig_size' in fig.metadata: # added by plotly_to_matplotlib call above
figW, figH = fig.metadata['mpl_fig_size'] # gives the "normal size" of the figure
W = min(W, figW)
W = min(H, figH)
del fig.metadata['mpl_fig_size']
includes.append("\\includegraphics[width=%.2fin,height=%.2fin,keepaspectratio]{%s}" %
(W, H, filename))
else:
includes.append("%% Didn't render plotID=%s" % plotDivID)
return {'latex': '\n'.join(includes)}
elif typ == "python":
if switched_item_mode == 'separate files':
assert(output_dir), "Cannot render plots as 'python' in separate" \
+ " files without a valid 'output_dir' render option"
plots_python = _collections.OrderedDict()
for i, fig in enumerate(self.figs):
plotDivID = plotID + "_%d" % i
if i in overrideIDs: plotDivID = overrideIDs[i]
if isinstance(fig, NotApplicable): continue
if fig.pythonvalue is not None:
data = {'value': fig.pythonvalue}
if "pythonErrorBar" in fig.metadata:
data['errorbar'] = fig.metadata['pythonErrorBar']
else:
data = {'value': "Opaque Figure"}
if switched_item_mode == "inline":
plots_python[plotDivID] = data
elif switched_item_mode == "separate files":
outputFilename = _os.path.join(str(output_dir), "%s.pkl" % plotDivID)
with open(outputFilename, "wb") as fPkl:
_pickle.dump(data, fPkl)
plots_python[plotDivID] = "data_%s = pickle.load(open('%s','rb'))" \
% (plotDivID, outputFilename)
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
return {'python': plots_python}
else:
raise NotImplementedError("Invalid rendering format: %s" % typ)
def saveas(self, filename, index=None, verbosity=0):
"""
Saves this workspace plot object to a file.
The type of file that is saved is determined automatically by the
extension of `filename`. Recognized extensions are `pdf` (PDF),
`pkl` (Python pickle) and `html` (HTML). Since this object may
contain different instances of its data based on switch positions,
when their are multiple instances the user must specify the `index`
argument to disambiguate.
Parameters
----------
filename : str
The destination filename. Its extension determines what type
of file is saved.
index : int, optional
An absolute index into the list of different switched "versions"
of this object's data. In most cases, the object being saved
doesn't depend on any switch boards and has only a single "version",
in which caes this can be left as the default.
verbosity : int, optional
Controls the level of detail printed to stdout.
Returns
-------
None
"""
N = len(self.figs)
if filename.endswith(".html"):
#Note: Same as WorkspaceTable except for N
if index is None:
if N == 1:
index = 0
else:
raise ValueError("Must supply `index` argument for a non-trivially-switched WorkspacePlot")
saved_switchposmap = self.switchpos_map
saved_switchboards = self.switchboards
saved_switchinds = self.sbSwitchIndices
#Temporarily pretend we don't depend on any switchboards and
# by default display the user-specified index
self.switchboards = []
self.sbSwitchIndices = []
self.switchpos_map = {(): index}
qtys = {'title': _os.path.splitext(_os.path.basename(str(filename)))[0],
'singleItem': self}
_merge.merge_jinja_template(qtys, filename, templateName="standalone.html",
verbosity=verbosity)
self.switchpos_map = saved_switchposmap
self.switchboards = saved_switchboards
self.sbSwitchIndices = saved_switchinds
elif filename.endswith(".pkl"):
#Note: Same as WorkspaceTable except for N
if index is None and N == 1: index = 0
overrides = {i: "index%d" % i for i in range(N)}
self.set_render_options(switched_item_mode="inline",
switched_item_id_overrides=overrides)
render_out = self.render("python")
if index is not None: # just pickle a single element
to_pickle = render_out['python']['index%d' % index]
else: # pickle dictionary of all indices
to_pickle = render_out['python']
with open(filename, 'wb') as f:
_pickle.dump(to_pickle, f)
elif filename.endswith(".tex"):
raise ValueError("Cannot save a WorkspacePlot as LaTeX - try PDF.")
elif filename.endswith(".pdf"):
from .mpl_colormaps import plotly_to_matplotlib as _plotly_to_matplotlib
if index is None:
if N == 1:
index = 0
else:
raise ValueError("Must supply `index` argument for a non-trivially-switched WorkspacePlot")
_plotly_to_matplotlib(self.figs[index], filename)
else:
raise ValueError("Unknown file type for %s" % filename)
def _form_plot_js(self, plotID, plot_handlers, switchboard_init_js):
resizable = self.options.get('resizable', True)
autosize = self.options.get('autosize', 'none')
create_plots = bool(plot_handlers is not None)
add_autosize_handler = bool(switchboard_init_js is not None)
#only add ws-plot-wide autosize handler when initializing the plot's switchboard (once
# per workspace table)
content = ""
#put plot handlers above switchboard init JS
if plot_handlers: content += plot_handlers
if switchboard_init_js: content += switchboard_init_js
if resizable: # make a resizable widget
content += 'make_wsplot_resizable("{plotID}");\n'.format(plotID=plotID)
if add_autosize_handler and autosize == "continual": # add window resize handler
content += 'make_wsobj_autosize("{plotID}");\n'.format(plotID=plotID)
if create_plots:
#trigger init & create of plots
content += 'trigger_wsplot_plot_creation("{plotID}",{initautosize});\n'.format(
plotID=plotID, initautosize=str(autosize in ("initial", "continual")).lower())
return self._create_onready_handler(content, plotID)
class WorkspaceText(WorkspaceOutput):
"""
Encapsulates a block of text within a `Workspace` context.
A base class which provides the logic required to take a
single text-generating function and make it into a legitimate
`WorkspaceOutput` object for using within workspaces.
"""
def __init__(self, ws, fn, *args):
"""
Create a new WorkspaceText object. Usually not called directly.
Parameters
----------
ws : Workspace
The workspace containing the new object.
fn : function
A text-creating function.
args : various
The arguments to `fn`.
"""
super(WorkspaceText, self).__init__(ws)
self.textfn = fn
self.initargs = args
self.texts, self.switchboards, self.sbSwitchIndices, self.switchpos_map = \
self.ws.switchedCompute(self.textfn, *self.initargs)
def render(self, typ):
"""
Renders this text block into the specifed format, specifically for
embedding it within a larger document.
Parameters
----------
typ : str
The format to render as. Currently `"html"`, `"latex"`
and `"python"` are supported.
Returns
-------
dict
A dictionary of strings giving the different portions of the
embeddable output. For `"html"`, keys are `"html"` and `"js"`.
For `"latex"`, there is a single key `"latex"`.
"""
switched_item_mode = self.options.get('switched_item_mode', 'inline')
overrideIDs = self.options.get('switched_item_id_overrides', {})
output_dir = self.options.get('output_dir', None)
ID = self.ID
textID = "text_" + ID
if typ == "html":
divHTML = []
divIDs = []
divJS = []
for i, text in enumerate(self.texts):
textDivID = textID + "_%d" % i
if i in overrideIDs: textDivID = overrideIDs[i]
if isinstance(text, NotApplicable):
text_dict = text.render("html", textDivID)
else:
text_dict = text.render("html", textDivID)
if switched_item_mode == 'separate files':
divJS.append(self._form_text_js(textDivID, text_dict['html'], None))
#else: divJS is unused
divHTML.append(text_dict['html'])
divIDs.append(textDivID)
if switched_item_mode == 'inline':
base = self._render_html(textID, divHTML, None, divIDs, self.switchpos_map,
self.switchboards, self.sbSwitchIndices, None,
self.options.get('link_to', None), output_dir) # no JS yet...
js = self._form_text_js(textID, base['html'], base['js'])
# creates JS for everything: plot creation, switchboard init, autosize
elif switched_item_mode == 'separate files':
base = self._render_html(textID, divHTML, divJS, divIDs, self.switchpos_map,
self.switchboards, self.sbSwitchIndices, None,
self.options.get('link_to', None), output_dir, embed_figures=False)
js = self._form_text_js(textID, None, base['js'])
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
return {'html': base['html'], 'js': js}
elif typ == "latex":
leave_src = self.options.get('leave_includes_src', False)
render_includes = self.options.get('render_includes', True)
W, H = self.options.get('page_size', (6.5, 8.0))
printer = _objs.VerbosityPrinter(1) # TEMP - add verbosity arg?
#Note: in both cases output_dir needs to be the *relative* path
# between the current directory and the output directory if
# \includegraphics statements are to work. If this isn't needed
# (e.g. if just the standalone files are needed) then output_dir
# can be an absolute path as well.
cwd = _os.getcwd()
latex_list = []
for i, text in enumerate(self.texts):
textDivID = textID + "_%d" % i
if i in overrideIDs: textDivID = overrideIDs[i]
if isinstance(text, NotApplicable): continue
text_dict = text.render("latex")
if switched_item_mode == 'inline':
latex_list.append(text_dict['latex'])
elif switched_item_mode == 'separate files':
if render_includes or leave_src:
d = {'toLatex': text_dict['latex']}
_merge.merge_latex_template(d, "standalone.tex",
_os.path.join(str(output_dir), "%s.tex" % textDivID))
if render_includes:
render_dir = output_dir
assert('latex_cmd' in self.options and self.options['latex_cmd']), \
"Cannot render latex include files without a valid 'latex_cmd' render option"
try:
_os.chdir(render_dir)
latex_cmd = self.options['latex_cmd']
latex_call = [latex_cmd] + self.options.get('latex_flags', []) \
+ ["%s.tex" % textDivID]
stdout, stderr, returncode = _merge.process_call(latex_call)
_merge.evaluate_call(latex_call, stdout, stderr, returncode, printer)
if not _os.path.isfile("%s.pdf" % textDivID):
raise Exception("File %s.pdf was not created by %s"
% (textDivID, latex_cmd))
if not leave_src: _os.remove("%s.tex" % textDivID)
_os.remove("%s.log" % textDivID)
_os.remove("%s.aux" % textDivID)
except _subprocess.CalledProcessError as e:
printer.error("%s returned code %d " % (latex_cmd, e.returncode)
+ "trying to render standalone %s.tex. " % textDivID
+ "Check %s.log to see details." % textDivID)
finally:
_os.chdir(cwd)
latex_list.append("\\includegraphics[width=%.2fin,height=%.2fin,keepaspectratio]{%s}" %
(W, H, _os.path.join(str(output_dir), "%s.pdf" % textDivID)))
elif leave_src:
latex_list.append("\\input{%s}" % _os.path.join(str(output_dir), "%s.tex" % textDivID))
else:
latex_list.append("%% Didn't generated anything for textID=%s" % textDivID)
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
return {'latex': "\n".join(latex_list)}
elif typ == "python":
if switched_item_mode == 'separate files':
assert(output_dir), "Cannot render texts as 'python' in separate" \
+ " files without a valid 'output_dir' render option"
texts_python = _collections.OrderedDict()
for i, text in enumerate(self.texts):
if isinstance(text, NotApplicable): continue
textDivID = textID + "_%d" % i
if i in overrideIDs: textDivID = overrideIDs[i]
text_dict = text.render("python")
if switched_item_mode == "inline":
texts_python[textDivID] = text_dict['python']
elif switched_item_mode == "separate files":
outputFilename = _os.path.join(str(output_dir), "%s.pkl" % textDivID)
with open(outputFilename, 'wb') as f:
_pickle.dump(text_dict['python'], f)
texts_python[textDivID] = "text_%s = pickle.load(open('%s','rb'))" \
% (textDivID, outputFilename)
else:
raise ValueError("Invalid `switched_item_mode` render option: %s" %
switched_item_mode)
return {'python': texts_python}
else:
assert(len(self.texts) == 1), \
"Can only render %s format for a non-switched text block" % typ
return {typ: self.texts[0].render(typ)}
def saveas(self, filename, index=None, verbosity=0):
"""
Saves this workspace text block object to a file.
The type of file that is saved is determined automatically by the
extension of `filename`. Recognized extensions are `pdf` (PDF),
`tex` (LaTeX), `pkl` (Python pickle) and `html` (HTML). Since this
object may contain different instances of its data based on switch
positions, when their are multiple instances the user must specify
the `index` argument to disambiguate.
Parameters
----------
filename : str
The destination filename. Its extension determines what type
of file is saved.
index : int, optional
An absolute index into the list of different switched "versions"
of this object's data. In most cases, the object being saved
doesn't depend on any switch boards and has only a single "version",
in which caes this can be left as the default.
verbosity : int, optional
Controls the level of detail printed to stdout.
Returns
-------
None
"""
N = len(self.texts)
if filename.endswith(".html"):
if index is None:
if N == 1:
index = 0
else:
raise ValueError("Must supply `index` argument for a non-trivially-switched WorkspaceText")
saved_switchposmap = self.switchpos_map
saved_switchboards = self.switchboards
saved_switchinds = self.sbSwitchIndices
#Temporarily pretend we don't depend on any switchboards and
# by default display the user-specified index
self.switchboards = []
self.sbSwitchIndices = []
self.switchpos_map = {(): index}
qtys = {'title': _os.path.splitext(_os.path.basename(str(filename)))[0],
'singleItem': self}
_merge.merge_jinja_template(qtys, filename, templateName="standalone.html",
verbosity=verbosity)
self.switchpos_map = saved_switchposmap
self.switchboards = saved_switchboards
self.sbSwitchIndices = saved_switchinds
elif filename.endswith(".pkl"):
if index is None and N == 1: index = 0
overrides = {i: "index%d" % i for i in range(N)}
self.set_render_options(switched_item_mode="inline",
switched_item_id_overrides=overrides)
render_out = self.render("python")
if index is not None: # just pickle a single element
to_pickle = render_out['python']['index%d' % index]
else: # pickle dictionary of all indices
to_pickle = render_out['python']
with open(filename, 'wb') as f:
_pickle.dump(to_pickle, f)
else:
if index is None:
if N == 1:
index = 0
else:
raise ValueError("Must supply `index` argument for a non-trivially-switched WorkspaceText")
output_dir = _os.path.dirname(filename)
filebase, ext = _os.path.splitext(_os.path.basename(filename))
tempDir = _os.path.join(str(output_dir), "%s_temp" % filebase)
if not _os.path.exists(tempDir): _os.mkdir(tempDir)
self.set_render_options(switched_item_mode="separate files",
switched_item_id_overrides={index: filebase},
output_dir=tempDir)
if ext == ".tex":
self.set_render_options(render_includes=False,
leave_includes_src=True)
elif ext == ".pdf":
self.set_render_options(render_includes=True,
leave_includes_src=False)
else:
raise ValueError("Unknown file type for %s" % filename)
self.render("latex") # renders everything in temp dir
_os.rename(_os.path.join(str(tempDir), "%s%s" % (filebase, ext)),
_os.path.join(str(output_dir), "%s%s" % (filebase, ext)))
#remove all other files
_shutil.rmtree(tempDir)
def _form_text_js(self, textID, text_html, switchboard_init_js):
content = ""
if switchboard_init_js: content += switchboard_init_js
queue_math_render = bool(text_html and '$' in text_html
and self.options.get('render_math', True))
if text_html is not None:
init_text_js = (
'el = $("#{textid}");\n'
'if(el.hasClass("pygsti-wsoutput-group")) {{\n'
' el.children("div.single_switched_value").each( function(i,el){{\n'
' CollapsibleLists.applyTo( $(el).find("ul").first()[0] );\n'
' }});\n'
'}} else if(el.hasClass("single_switched_value")){{\n'
' CollapsibleLists.applyTo(el[0]);\n'
'}}\n'
'caption = el.closest("figure").children("figcaption:first");\n'
'caption.css("width", Math.round(el.width()*0.9) + "px");\n'
).format(textid=textID)
else:
init_text_js = "" # no per-div init needed
if queue_math_render:
# then there is math text that needs rendering,
# so queue this, *then* trigger plot creation
content += (' plotman.enqueue(function() {{ \n'
' renderMathInElement(document.getElementById("{textID}"), {{ delimiters: [\n'
' {{left: "$$", right: "$$", display: true}},\n'
' {{left: "$", right: "$", display: false}},\n'
' ] }} );\n').format(textID=textID)
content += init_text_js
content += ' }}, "Rendering math in {textID}" );\n'.format(textID=textID) # end enqueue
else:
content += init_text_js
return self._create_onready_handler(content, textID)
| 44.42378 | 119 | 0.552894 |
7940056a7840592e5423b5bbd0ec6d2e7ff7c9ae | 233 | py | Python | common/models.py | StarrFnl/CS_django_2122 | 441adb6df7fba17ccde72f9cf0b7803f8aa2621b | [
"MIT"
] | null | null | null | common/models.py | StarrFnl/CS_django_2122 | 441adb6df7fba17ccde72f9cf0b7803f8aa2621b | [
"MIT"
] | 1 | 2022-01-26T08:54:44.000Z | 2022-01-26T08:54:44.000Z | common/models.py | StarrFnl/CS_django_2122 | 441adb6df7fba17ccde72f9cf0b7803f8aa2621b | [
"MIT"
] | 3 | 2022-01-20T14:55:02.000Z | 2022-01-26T11:16:35.000Z | from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class CustomUser(AbstractUser):
id = models.AutoField(primary_key=True)
nickname = models.CharField(max_length=100)
| 29.125 | 51 | 0.7897 |
7940074a7371928505748d90b9c862ee5b2d225e | 82 | py | Python | haystack/nodes/question_generator/__init__.py | mapapa/haystack | 79fdda8a7cf393d774803608a4874f2a6e63cf6f | [
"Apache-2.0"
] | 7 | 2022-01-22T18:58:54.000Z | 2022-03-18T17:06:35.000Z | haystack/nodes/question_generator/__init__.py | mapapa/haystack | 79fdda8a7cf393d774803608a4874f2a6e63cf6f | [
"Apache-2.0"
] | 17 | 2021-12-08T18:00:58.000Z | 2021-12-28T14:03:27.000Z | haystack/nodes/question_generator/__init__.py | mapapa/haystack | 79fdda8a7cf393d774803608a4874f2a6e63cf6f | [
"Apache-2.0"
] | 1 | 2022-01-05T15:24:36.000Z | 2022-01-05T15:24:36.000Z | from haystack.nodes.question_generator.question_generator import QuestionGenerator | 82 | 82 | 0.926829 |
794007ba40c84745eae2f6ada825346f62738407 | 2,696 | py | Python | git/core.py | trobjo/sublime-text-git | 5bd34d60ca5e85defde67cd28532710d734eb367 | [
"MIT"
] | null | null | null | git/core.py | trobjo/sublime-text-git | 5bd34d60ca5e85defde67cd28532710d734eb367 | [
"MIT"
] | null | null | null | git/core.py | trobjo/sublime-text-git | 5bd34d60ca5e85defde67cd28532710d734eb367 | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
from . import GitWindowCommand, GitTextCommand
class GitCustomCommand(GitWindowCommand):
may_change_files = True
def run(self):
self.get_window().show_input_panel(
"Git command", "",
self.on_input, None, None
)
def on_input(self, command):
command = str(command) # avoiding unicode
if command.strip() == "":
self.panel("No git command provided")
return
import shlex
command_splitted = ['git'] + shlex.split(command)
print(command_splitted)
self.run_command(command_splitted)
class GitRawCommand(GitWindowCommand):
may_change_files = True
def run(self, **args):
self.command = str(args.get('command', ''))
show_in = str(args.get('show_in', 'pane_below'))
if self.command.strip() == "":
self.panel("No git command provided")
return
import shlex
command_split = shlex.split(self.command)
if args.get('append_current_file', False) and self.active_file_name():
command_split.extend(('--', self.active_file_name()))
print(command_split)
self.may_change_files = bool(args.get('may_change_files', True))
if show_in == 'pane_below':
self.run_command(command_split)
elif show_in == 'quick_panel':
self.run_command(command_split, self.show_in_quick_panel)
elif show_in == 'new_tab':
self.run_command(command_split, self.show_in_new_tab)
elif show_in == 'suppress':
self.run_command(command_split, self.do_nothing)
view = self.active_view()
view.run_command('git_branch_status')
def show_in_quick_panel(self, result):
self.results = list(result.rstrip().split('\n'))
if len(self.results):
self.quick_panel(
self.results,
self.do_nothing, sublime.MONOSPACE_FONT
)
else:
sublime.status_message("Nothing to show")
def do_nothing(self, picked):
return
def show_in_new_tab(self, result):
msg = self.window.new_file()
msg.set_scratch(True)
msg.set_name(self.command)
self._output_to_view(msg, result)
msg.sel().clear()
msg.sel().add(sublime.Region(0, 0))
# called by GitWindowCommand
class GitScratchOutputCommand(sublime_plugin.TextCommand):
def run(self, edit, output='', output_file=None, clear=False):
if clear:
region = sublime.Region(0, self.view.size())
self.view.erase(edit, region)
self.view.insert(edit, 0, output)
| 30.636364 | 78 | 0.616098 |
794008123451312f21d6e94c41014c0b94704ef0 | 3,162 | py | Python | pyocd/probe/pydapaccess/interface/common.py | vince-zeng/pyOCD | c9e7bbaee81c2c94b2d8f05a70b6f341457fdae1 | [
"Apache-2.0"
] | 3 | 2019-06-05T01:32:06.000Z | 2020-05-20T08:55:46.000Z | pyocd/probe/pydapaccess/interface/common.py | vince-zeng/pyOCD | c9e7bbaee81c2c94b2d8f05a70b6f341457fdae1 | [
"Apache-2.0"
] | 1 | 2019-07-05T10:13:09.000Z | 2019-07-05T10:51:43.000Z | pyocd/probe/pydapaccess/interface/common.py | vince-zeng/pyOCD | c9e7bbaee81c2c94b2d8f05a70b6f341457fdae1 | [
"Apache-2.0"
] | 1 | 2019-01-21T03:01:53.000Z | 2019-01-21T03:01:53.000Z | # pyOCD debugger
# Copyright (c) 2019 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import usb.util
# USB class codes.
USB_CLASS_COMPOSITE = 0x00
USB_CLASS_COMMUNICATIONS = 0x02
USB_CLASS_HID = 0x03
USB_CLASS_MISCELLANEOUS = 0xef
USB_CLASS_VENDOR_SPECIFIC = 0xff
CMSIS_DAP_USB_CLASSES = [
USB_CLASS_COMPOSITE,
USB_CLASS_MISCELLANEOUS,
]
CMSIS_DAP_HID_USAGE_PAGE = 0xff00
# Various known USB VID/PID values.
ARM_DAPLINK_ID = (0x0d28, 0x0204)
KEIL_ULINKPLUS_ID = (0xc251, 0x2750)
NXP_LPCLINK2_ID = (0x1fc9, 0x0090)
## List of VID/PID pairs for known CMSIS-DAP USB devices.
KNOWN_CMSIS_DAP_IDS = [
ARM_DAPLINK_ID,
KEIL_ULINKPLUS_ID,
NXP_LPCLINK2_ID,
]
def is_known_cmsis_dap_vid_pid(vid, pid):
"""! @brief Test whether a VID/PID pair belong to a known CMSIS-DAP device."""
return (vid, pid) in KNOWN_CMSIS_DAP_IDS
def filter_device_by_class(vid, pid, device_class):
"""! @brief Test whether the device should be ignored by comparing bDeviceClass.
This function checks the device's bDeviceClass to determine whether it is likely to be
a CMSIS-DAP device. It uses the vid and pid for device-specific quirks.
@retval True Skip the device.
@retval False The device is valid.
"""
# Check valid classes for CMSIS-DAP firmware.
if device_class in CMSIS_DAP_USB_CLASSES:
return False
# Old "Mbed CMSIS-DAP" firmware has an incorrect bDeviceClass.
if ((vid, pid) == ARM_DAPLINK_ID) and (device_class == USB_CLASS_COMMUNICATIONS):
return False
# Any other class indicates the device is not CMSIS-DAP.
return True
def filter_device_by_usage_page(vid, pid, usage_page):
"""! @brief Test whether the device should be ignored by comparing the HID usage page.
This function performs device-specific tests to determine whether the device is a CMSIS-DAP
interface. The only current test is for the NXP LPC-Link2, which has extra HID interfaces with
usage pages other than 0xff00. No generic tests are done regardless of VID/PID, because it is
not clear whether all CMSIS-DAP devices have the usage page set to the same value.
@retval True Skip the device.
@retval False The device is valid.
"""
return ((vid, pid) == NXP_LPCLINK2_ID) \
and (usage_page != CMSIS_DAP_HID_USAGE_PAGE)
def check_ep(interface, ep_index, ep_dir, ep_type):
"""! @brief Tests an endpoint type and direction."""
ep = interface[ep_index]
return (usb.util.endpoint_direction(ep.bEndpointAddress) == ep_dir) \
and (usb.util.endpoint_type(ep.bmAttributes) == ep_type)
| 36.767442 | 98 | 0.734662 |
7940081cefc65ecaac0dc98bca7741f138e49dd3 | 1,418 | py | Python | setup.py | Spratiher9/Netplot | 4c07b6b86450d0fc7dd36f08e0c24ca67dd5330e | [
"MIT"
] | 19 | 2021-09-22T06:25:24.000Z | 2021-11-17T19:18:37.000Z | setup.py | Spratiher9/Netplot | 4c07b6b86450d0fc7dd36f08e0c24ca67dd5330e | [
"MIT"
] | null | null | null | setup.py | Spratiher9/Netplot | 4c07b6b86450d0fc7dd36f08e0c24ca67dd5330e | [
"MIT"
] | 2 | 2021-10-01T11:57:06.000Z | 2021-10-02T13:08:02.000Z | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="netplot", # This is the name of the package
version="0.1.2", # The initial release version
author="Souvik Pratiher", # Full name of the author
description="Ultralight 3D renderer of neural network architecture for TF/Keras Models",
long_description=long_description, # Long description read from the the readme file
long_description_content_type="text/markdown",
url = "https://github.com/Spratiher9/Netplot",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
], # Information to filter the project on PyPi website
python_requires='>=3.6', # Minimum version requirement of the package
py_modules=["netplot"], # Name of the python package
package_dir={'':'src'}, # Directory of the source code of the package
install_requires=[
"cycler==0.10.0",
"kiwisolver==1.3.2",
"matplotlib==3.4.3",
"numpy==1.21.2",
"Pillow==8.3.2",
"pyparsing==2.4.7",
"python-dateutil==2.8.2",
"six==1.16.0"
] # Install other dependencies if any
)
| 40.514286 | 95 | 0.575458 |
794008e1091212eea37900edf241ccde5bf1fd54 | 1,213 | py | Python | blankmodule.py | shyed2001/Python_Programming | 93ef958e3d8aa77f9191b550972235ce4fe4a6cb | [
"bzip2-1.0.6"
] | 2 | 2019-05-01T04:32:14.000Z | 2019-05-04T11:28:18.000Z | blankmodule.py | shyed2001/python-learning-basics | 93ef958e3d8aa77f9191b550972235ce4fe4a6cb | [
"bzip2-1.0.6"
] | null | null | null | blankmodule.py | shyed2001/python-learning-basics | 93ef958e3d8aa77f9191b550972235ce4fe4a6cb | [
"bzip2-1.0.6"
] | null | null | null | # Created: 01/05/2019
# Copyright: (c) User 2019
# Licence: <your licence>
#-------------------------------------------------------------------------------
def is_divisible(x, y):
""" Test if x is exactly divisible by y """
if x % y == 0:
#return True
print(True)
else:
#return False
print(False)
print(is_divisible(4, 2))
print(is_divisible(5, 2))
def is_divisible(x, y):
""" Test if x is exactly divisible by y """
if x % y == 0:
return "return ok2"
print("print ok2")
else:
return "return not ok2"
print("print not ok2")
print(is_divisible(4, 2))
print(is_divisible(5, 2))
def is_divisible(x, y):
b= x % y == 0
return b # Test if x is exactly divisible by y
if is_divisible(x, y) == True:
return "return ok 3"
print("print ok 3")
else:
return "return not ok 3"
print("print not ok 3")
print(is_divisible(4, 2))
print(is_divisible(5, 2))
def is_divisible(x, y):
b= x % y == 0
return b # Test if x is exactly divisible by y
print(is_divisible(4, 2))
print(is_divisible(5, 2))
def is_divisible(x, y):
return x % y == 0
print(is_divisible(4, 2))
print(is_divisible(5, 2))
| 23.784314 | 81 | 0.556472 |
7940092288d94a46b3433ff0445dbbeb528f9323 | 17,720 | py | Python | shift_detector/precalculations/text_metadata.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | 3 | 2019-06-21T11:41:08.000Z | 2019-10-24T06:41:51.000Z | shift_detector/precalculations/text_metadata.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | 63 | 2019-05-16T12:09:57.000Z | 2022-02-10T00:21:01.000Z | shift_detector/precalculations/text_metadata.py | hpi-bp1819-naumann/shift-detector | 5d081d05ec084021f11827aa3fd3e167854b2a2a | [
"Apache-2.0"
] | null | null | null | import logging as logger
import re
import regex
import unicodedata
from abc import abstractmethod
from collections import defaultdict
import pandas as pd
import nltk
# noinspection PyPackageRequirements
from iso639 import languages
from langdetect import detect, DetectorFactory
from nltk.corpus import stopwords
# noinspection PyPackageRequirements
from spellchecker import SpellChecker
from textstat import textstat
from langdetect.lang_detect_exception import LangDetectException
from shift_detector.precalculations.precalculation import Precalculation
from shift_detector.precalculations.text_precalculation import TokenizeIntoLowerWordsPrecalculation
from shift_detector.utils import ucb_list
from shift_detector.utils.column_management import ColumnType
from shift_detector.utils.text_metadata_utils import most_common_n_to_string_frequency, \
most_common_n_to_string_alphabetically, delimiters
class GenericTextMetadata(Precalculation):
def __eq__(self, other):
return isinstance(other, self.__class__)
def __hash__(self):
return hash(self.__class__)
def __lt__(self, other):
return self.metadata_name() < other.metadata_name()
def __le__(self, other):
return self.metadata_name() <= other.metadata_name()
def __gt__(self, other):
return self.metadata_name() > other.metadata_name()
def __ge__(self, other):
return self.metadata_name() >= other.metadata_name()
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, text):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[ColumnType.text]
columns = store.column_names(ColumnType.text)
for column in columns:
clean1 = df1[column].dropna()
clean2 = df2[column].dropna()
logger.info(self.metadata_name() + ' analysis for ' + column)
metadata1[column] = [self.metadata_function(text) for text in clean1]
metadata2[column] = [self.metadata_function(text) for text in clean2]
return metadata1, metadata2
class GenericTextMetadataWithTokenizing(GenericTextMetadata):
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, words):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[TokenizeIntoLowerWordsPrecalculation()]
for column in df1.columns:
logger.info(self.metadata_name() + ' analysis for ' + column)
metadata1[column] = [self.metadata_function(words) for words in df1[column]]
metadata2[column] = [self.metadata_function(words) for words in df2[column]]
return metadata1, metadata2
class GenericTextMetadataWithTokenizingAndLanguage(GenericTextMetadata):
def __init__(self, language='en', infer_language=False):
self.language = language
self.infer_language = infer_language
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, language, words):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[TokenizeIntoLowerWordsPrecalculation()]
columns = store.column_names(ColumnType.text)
if self.infer_language:
lang1, lang2 = store[LanguageMetadata()]
for column in columns:
logger.info(self.metadata_name() + ' analysis for ' + column)
temp_column1 = []
temp_column2 = []
for i in range(len(df1)):
if self.infer_language:
temp_column1.append(self.metadata_function(lang1[column][i], df1[column][i]))
temp_column2.append(self.metadata_function(lang2[column][i], df2[column][i]))
else:
temp_column1.append(self.metadata_function(self.language, df1[column][i]))
temp_column2.append(self.metadata_function(self.language, df2[column][i]))
metadata1[column] = temp_column1
metadata2[column] = temp_column2
return metadata1, metadata2
class GenericTextMetadataWithLanguage(GenericTextMetadata):
def __init__(self, language='en', infer_language=False):
self.language = language
self.infer_language = infer_language
@staticmethod
@abstractmethod
def metadata_name() -> str:
raise NotImplementedError
@abstractmethod
def metadata_return_type(self) -> ColumnType:
raise NotImplementedError
@abstractmethod
def metadata_function(self, language, text):
raise NotImplementedError
def process(self, store):
metadata1 = pd.DataFrame()
metadata2 = pd.DataFrame()
df1, df2 = store[ColumnType.text]
columns = store.column_names(ColumnType.text)
if self.infer_language:
lang1, lang2 = store[LanguageMetadata()]
for column in columns:
logger.info(self.metadata_name() + ' analysis for ' + column)
temp_column1 = []
temp_column2 = []
for i in range(len(df1)):
if self.infer_language:
temp_column1.append(self.metadata_function(lang1[column][i], df1[column][i]))
temp_column2.append(self.metadata_function(lang2[column][i], df2[column][i]))
else:
temp_column1.append(self.metadata_function(self.language, df1[column][i]))
temp_column2.append(self.metadata_function(self.language, df2[column][i]))
metadata1[column] = temp_column1
metadata2[column] = temp_column2
return metadata1, metadata2
class NumCharsMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'num_chars'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
return len(text)
class RatioUppercaseLettersMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'ratio_upper'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
if text == "":
return 0
alpha = sum(1 for c in text if c.isalpha())
upper = sum(1 for c in text if c.isupper())
return upper / alpha
class UnicodeCategoriesMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'unicode_categories'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def unicode_category_histogram(text):
if not isinstance(text, str):
return float('nan')
characters = defaultdict(int)
for c in text:
category = unicodedata.category(c)
characters[category] += 1
return characters
def metadata_function(self, text):
return most_common_n_to_string_frequency(self.unicode_category_histogram(text), 5)
class UnicodeBlocksMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'unicode_blocks'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def unicode_block_histogram(text):
if not isinstance(text, str):
return float('nan')
characters = defaultdict(int)
for c in text:
block = ucb_list.block(c)
characters[block] += 1
return characters
def metadata_function(self, text):
return most_common_n_to_string_frequency(self.unicode_block_histogram(text), 5)
class NumWordsMetadata(GenericTextMetadataWithTokenizing):
@staticmethod
def metadata_name() -> str:
return 'num_words'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, words):
if not isinstance(words, list):
return float('nan')
return len(words)
class DistinctWordsRatioMetadata(GenericTextMetadataWithTokenizing):
@staticmethod
def metadata_name() -> str:
return 'distinct_words'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, words):
if not isinstance(words, list):
return float('nan')
distinct_words = set()
if len(words) == 0:
return 0.0
for word in words:
if word not in distinct_words:
distinct_words.add(word)
return len(distinct_words) / len(words)
class UniqueWordsRatioMetadata(GenericTextMetadataWithTokenizing):
@staticmethod
def metadata_name() -> str:
return 'unique_words'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, words):
if not isinstance(words, list):
return float('nan')
if len(words) == 0:
return 0.0
seen_once = set()
seen_often = set()
for word in words:
if word not in seen_often:
if word not in seen_once:
seen_once.add(word)
else:
seen_once.remove(word)
seen_often.add(word)
return len(seen_once) / len(words)
class UnknownWordRatioMetadata(GenericTextMetadataWithTokenizingAndLanguage):
@staticmethod
def metadata_name() -> str:
return 'unknown_word_ratio'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, language, words):
# pyspellchecker supports multiple languages including English, Spanish, German, French, and Portuguese
if not isinstance(words, list):
return float('nan')
try:
spell = SpellChecker(language)
except ValueError as error:
return float('nan')
if len(words) == 0:
return 0.0
misspelled = spell.unknown(words)
return len(misspelled) / len(words)
class StopwordRatioMetadata(GenericTextMetadataWithTokenizingAndLanguage):
@staticmethod
def metadata_name() -> str:
return 'stopword_ratio'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, language, words):
# not working for every language
if not isinstance(words, list):
return float('nan')
stopword_count = 0
try:
stopwords_for_language_lower = stopwords.words(languages.get(part1=language).name.lower())
if len(words) == 0:
return 0.0
for word in words:
if word in stopwords_for_language_lower:
stopword_count += 1
return stopword_count / len(words)
except OSError as error:
return float('nan')
class DelimiterTypeMetadata(GenericTextMetadata):
@staticmethod
def metadata_name() -> str:
return 'delimiter_type'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
for key, value in delimiters.items():
if regex.compile(value).search(text):
return key
return 'no delimiter'
class NumPartsMetadata(GenericTextMetadata):
# Calculates the delimiter of the text and then splits the text by its delimiter
# to calculate the number of parts in the text
@staticmethod
def metadata_name() -> str:
return 'num_parts'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
delimiter = DelimiterTypeMetadata().metadata_function(text)
for key, value in delimiters.items():
if key == delimiter:
return len(regex.split(regex.compile(value), text))
return 0
class LanguagePerParagraph(GenericTextMetadata):
# Depending on the texts delimiter splits the text into parts and calculates the language for each part.
# Returns a string with the languages, sorted by their frequency
def __init__(self, seed=0):
self.seed = seed
@staticmethod
def metadata_name() -> str:
return 'language'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def detect_languages(text):
if not isinstance(text, str) or len(text) == 0:
return float('nan')
if DelimiterTypeMetadata().metadata_function(text) == 'HTML':
parts = re.split(r'<\s*br\s*/?\s*>', text)
else:
parts = re.split(r'[\n\r]+', text)
parts = [x.strip() for x in parts if x.strip()]
detected_languages = defaultdict(int)
for part in parts:
try:
lang = detect(part)
detected_languages[lang] += 1
except LangDetectException:
continue
if detected_languages == {}:
return float('nan')
return detected_languages
def metadata_function(self, text, seed=0):
DetectorFactory.seed = self.seed
return most_common_n_to_string_alphabetically(self.detect_languages(text), 3)
class LanguageMetadata(GenericTextMetadata):
def __init__(self, seed=0):
self.seed = seed
@staticmethod
def metadata_name() -> str:
return 'language'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
def metadata_function(self, text):
if not isinstance(text, str):
return float('nan')
DetectorFactory.seed = self.seed
return detect(text)
class ComplexityMetadata(GenericTextMetadataWithLanguage):
@staticmethod
def metadata_name() -> str:
return 'complexity'
def metadata_return_type(self) -> ColumnType:
return ColumnType.numerical
def metadata_function(self, language, text):
if not isinstance(text, str) or language != 'en':
return float('nan')
return textstat.text_standard(text, True)
class PartOfSpeechMetadata(GenericTextMetadataWithLanguage):
@staticmethod
def metadata_name() -> str:
return 'part_of_speech_tags'
def metadata_return_type(self) -> ColumnType:
return ColumnType.categorical
@staticmethod
def tag_histogram(text):
tokenized_text = nltk.word_tokenize(text)
tagged_text = nltk.pos_tag(tokenized_text)
simplified_tagged_text = [(word, nltk.map_tag('en-ptb', 'universal', tag)) for word, tag in tagged_text]
tagdict = defaultdict(int)
for word in simplified_tagged_text:
tagdict[word[1]] += 1
return tagdict
def metadata_function(self, language, text):
if not isinstance(text, str) or language != 'en':
return float('nan')
return most_common_n_to_string_frequency(self.tag_histogram(text), 5)
class TextMetadata(Precalculation):
def __init__(self, text_metadata_types=None, language='en', infer_language=False):
if text_metadata_types is None:
self.text_metadata_types = frozenset([NumCharsMetadata(), NumWordsMetadata(), DistinctWordsRatioMetadata()])
else:
self.text_metadata_types = frozenset(text_metadata_types)
if infer_language or language != 'en':
for mdtype in self.text_metadata_types:
if isinstance(mdtype, GenericTextMetadataWithTokenizingAndLanguage):
mdtype.language = language
mdtype.infer_language = infer_language
def __eq__(self, other):
return isinstance(other, self.__class__) and self.text_metadata_types == other.text_metadata_types
def __hash__(self):
return hash((self.__class__, self.text_metadata_types))
def process(self, store):
columns = store.column_names(ColumnType.text)
metadata_names = sorted([mdtype.metadata_name() for mdtype in self.text_metadata_types])
index = pd.MultiIndex.from_product([columns, metadata_names], names=['column', 'metadata'])
metadata1 = pd.DataFrame(columns=index)
metadata2 = pd.DataFrame(columns=index)
for metadata_type in self.text_metadata_types:
md1, md2 = store[metadata_type]
for column in columns:
metadata1[(column, metadata_type.metadata_name())] = md1[column]
metadata2[(column, metadata_type.metadata_name())] = md2[column]
return metadata1, metadata2
| 32.394881 | 120 | 0.655926 |
79400943f827182a182078c2b0a4b38f8010d57d | 94 | py | Python | apps/crm/models.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 115 | 2019-08-18T16:12:54.000Z | 2022-03-29T14:17:20.000Z | apps/crm/models.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 22 | 2019-09-09T01:34:54.000Z | 2022-03-12T00:33:40.000Z | apps/crm/models.py | youssriaboelseod/pyerp | 9ef9873e2ff340010656f0c518bccf9d7a14dbaa | [
"MIT"
] | 83 | 2019-08-17T17:09:20.000Z | 2022-03-25T04:46:53.000Z | # Localfolder Library
from .submodels.lead import PyLead
from .submodels.stage import PyStage
| 23.5 | 36 | 0.829787 |
794009aa62e480e63ea0e8151495d9b79bf823da | 1,255 | py | Python | sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/operations_list_results_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2021-09-07T18:36:04.000Z | 2021-09-07T18:36:04.000Z | sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/operations_list_results_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 2 | 2019-10-02T23:37:38.000Z | 2020-10-02T01:17:31.000Z | sdk/policyinsights/azure-mgmt-policyinsights/azure/mgmt/policyinsights/models/operations_list_results_py3.py | tzhanl/azure-sdk-for-python | 18cd03f4ab8fd76cc0498f03e80fbc99f217c96e | [
"MIT"
] | 1 | 2019-06-17T22:18:23.000Z | 2019-06-17T22:18:23.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationsListResults(Model):
"""List of available operations.
:param odatacount: OData entity count; represents the number of operations
returned.
:type odatacount: int
:param value: List of available operations.
:type value: list[~azure.mgmt.policyinsights.models.Operation]
"""
_validation = {
'odatacount': {'minimum': 1},
}
_attribute_map = {
'odatacount': {'key': '@odata\\.count', 'type': 'int'},
'value': {'key': 'value', 'type': '[Operation]'},
}
def __init__(self, *, odatacount: int=None, value=None, **kwargs) -> None:
super(OperationsListResults, self).__init__(**kwargs)
self.odatacount = odatacount
self.value = value
| 33.026316 | 78 | 0.590438 |
794009b0bf0fcfb17fbd54f1dbf6199a54240dfd | 3,122 | py | Python | misu/unitmachine.py | cjrh/unity | 798423e1563929f6ed7417c477c19c5991ab3a9b | [
"BSD-2-Clause"
] | 10 | 2016-09-08T20:56:51.000Z | 2022-03-27T13:21:58.000Z | misu/unitmachine.py | cjrh/misu | 4520f720590c00e46a952c24be04421b867e8508 | [
"BSD-2-Clause"
] | 33 | 2015-03-18T02:01:28.000Z | 2022-03-27T13:14:48.000Z | misu/unitmachine.py | cjrh/unity | 798423e1563929f6ed7417c477c19c5991ab3a9b | [
"BSD-2-Clause"
] | 5 | 2016-08-28T20:33:04.000Z | 2018-08-28T19:28:00.000Z | from __future__ import division
from __future__ import print_function
import traceback
import parsley
def get_unit_text(value):
if not value[1]:
return ''
else:
return value[1]
def calculate(start, pairs):
print('start={} pairs={}'.format(start, pairs))
result = start
for op, value in pairs:
# value is now a tuple. [0] is the magnitude, [1] is the unit
# add and substract must have same units, else error.
if type(result) != tuple:
result = (result, '')
if type(value) != tuple:
value = (value, '')
u1 = get_unit_text(result)
u2 = get_unit_text(value)
if op == '+':
assert u1 == u2, "Units don't match: {} and {}".format(u1, u2)
result = (result[0] + value[0], u1)
elif op == '-':
assert u1 == u2, "Units don't match: {} and {}".format(u1, u2)
result = (result[0] - value[0], u1)
elif op == '*':
result = (result[0] * value[0], u1 + '*' + u2)
elif op == '/':
result = (result[0] / value[0], u1 + '/(' + u2 + ')')
if type(result) == tuple and result[1] == '':
result = result[0]
print(result)
return result
def join_parens_units(parens_result, units):
if units is None:
units = ''
if type(parens_result) == tuple:
return (parens_result[0], parens_result[1]+'*'+units)
else:
return (parens_result, units)
x = parsley.makeGrammar("""
ws = ' '*
digit = :x ?(x in '0123456789') -> x
digits = <digit*>
digit1_9 = :x ?(x in '123456789') -> x
intPart = (digit1_9:first digits:rest -> first + rest) | digit
floatPart :sign :ds = <('.' digits exponent?) | exponent>:tail
-> float(sign + ds + tail)
exponent = ('e' | 'E') ('+' | '-')? digits
number = spaces ('-' | -> ''):sign (intPart:ds (floatPart(sign ds)
| -> int(sign + ds)))
unit = <letter+>
units = unit:fu (ws ('*'|'/'):op ws unit:u -> op+u)*:rem -> fu+''.join(rem)
parens = '(' ws expr:e ws ')' -> e
value = (
(number:e ws units?:u -> (e,u))
|
(parens:p ws units:u -> join_parens_units(p,u))
|
(parens:p -> p)
)
add = '+' ws expr2:n -> ('+', n)
sub = '-' ws expr2:n -> ('-', n)
mul = '*' ws value:n -> ('*', n)
div = '/' ws value:n -> ('/', n)
addsub = ws (add | sub)
muldiv = ws (mul | div)
expr = expr2:left addsub*:right -> calculate(left, right)
expr2 = value:left muldiv*:right -> calculate(left, right)
""", {"calculate": calculate, "join_parens_units": join_parens_units})
if __name__ == '__main__':
print('Try some operations (q to end):')
print()
print('> ', end=' ')
while True:
expr = input()
if expr.lower() == 'q':
print('Exiting...')
break
try:
print(x(expr).expr())
except:
print()
print("Error: ")
print()
print(traceback.format_exc())
print()
print('> ', end=' ')
#print(x("17+34").expr())
#print(x("18").expr())
| 27.385965 | 75 | 0.508648 |
794009c64314b027a4da9764421c4ae457b458e1 | 20,135 | py | Python | old_models/train_4p_resnetd152b.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | 5 | 2019-06-11T09:11:56.000Z | 2020-05-06T16:05:26.000Z | old_models/train_4p_resnetd152b.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | null | null | null | old_models/train_4p_resnetd152b.py | artyompal/imet | 75d708392237a1392ac5fa2f598a747408a88f19 | [
"Apache-2.0"
] | 2 | 2019-06-12T14:14:35.000Z | 2019-07-18T15:06:14.000Z | #!/usr/bin/python3.6
''' Trains a model. '''
import argparse, hashlib, logging, math, os, pprint, random, sys, time
import multiprocessing
from typing import *
from collections import defaultdict, Counter
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
from sklearn.model_selection import KFold
import PIL
from data_loader_v2_albu import Dataset
from utils import create_logger, AverageMeter, F_score
from debug import dprint, assert_eq, assert_ne
from cosine_scheduler import CosineLRWithRestarts
from tqdm import tqdm
IN_KERNEL = os.environ.get('KAGGLE_WORKING_DIR') is not None
if not IN_KERNEL:
import torchsummary
from pytorchcv.model_provider import get_model
from hyperopt import hp, tpe, fmin
else:
from model_provider import get_model
import albumentations as albu
from easydict import EasyDict as edict # type: ignore
opt = edict()
opt.INPUT = '../input/imet-2019-fgvc6/' if IN_KERNEL else '../input/'
opt.MODEL = edict()
opt.MODEL.ARCH = 'resnetd152b'
# opt.MODEL.IMAGE_SIZE = 256
opt.MODEL.INPUT_SIZE = 352 # crop size
opt.MODEL.VERSION = os.path.splitext(os.path.basename(__file__))[0][6:]
opt.MODEL.DROPOUT = 0.5
opt.MODEL.NUM_CLASSES = 1103
opt.EXPERIMENT_DIR = f'../models/{opt.MODEL.VERSION}'
opt.TRAIN = edict()
opt.TRAIN.NUM_FOLDS = 5
opt.TRAIN.BATCH_SIZE = 6 * torch.cuda.device_count()
opt.TRAIN.LOSS = 'BCE'
opt.TRAIN.SHUFFLE = True
opt.TRAIN.WORKERS = min(12, multiprocessing.cpu_count())
opt.TRAIN.PRINT_FREQ = 100
opt.TRAIN.LEARNING_RATE = 1e-4
opt.TRAIN.PATIENCE = 4
opt.TRAIN.LR_REDUCE_FACTOR = 0.2
opt.TRAIN.MIN_LR = 1e-7
opt.TRAIN.EPOCHS = 70
opt.TRAIN.STEPS_PER_EPOCH = 30000
opt.TRAIN.PATH = opt.INPUT + 'train'
opt.TRAIN.FOLDS_FILE = 'folds.npy'
opt.TRAIN.CSV = opt.INPUT + 'train.csv'
opt.TRAIN.OPTIMIZER = 'Adam'
opt.TRAIN.MIN_IMPROVEMENT = 0.001
opt.TRAIN.COSINE = edict()
opt.TRAIN.COSINE.ENABLE = False
opt.TRAIN.COSINE.LR = 1e-4
opt.TRAIN.COSINE.PERIOD = 10
opt.TRAIN.COSINE.COEFF = 1.2
opt.TEST = edict()
opt.TEST.PATH = opt.INPUT + 'test'
opt.TEST.CSV = opt.INPUT + 'sample_submission.csv'
opt.TEST.NUM_TTAS = 4
opt.TEST.TTA_COMBINE_FUNC = 'mean'
def make_folds(df: pd.DataFrame) -> pd.DataFrame:
cls_counts = Counter(cls for classes in df['attribute_ids'].str.split() for cls in classes)
fold_cls_counts = defaultdict(int) # type: ignore
folds = [-1] * len(df)
for item in tqdm(df.sample(frac=1, random_state=42).itertuples(),
total=len(df), disable=IN_KERNEL):
cls = min(item.attribute_ids.split(), key=lambda cls: cls_counts[cls])
fold_counts = [(f, fold_cls_counts[f, cls]) for f in range(opt.TRAIN.NUM_FOLDS)]
min_count = min([count for _, count in fold_counts])
random.seed(item.Index)
fold = random.choice([f for f, count in fold_counts
if count == min_count])
folds[item.Index] = fold
for cls in item.attribute_ids.split():
fold_cls_counts[fold, cls] += 1
return np.array(folds, dtype=np.uint8)
def train_val_split(df: pd.DataFrame, fold: int) -> Tuple[pd.DataFrame, pd.DataFrame]:
if not os.path.exists(opt.TRAIN.FOLDS_FILE):
folds = make_folds(df)
np.save(opt.TRAIN.FOLDS_FILE, folds)
else:
folds = np.load(opt.TRAIN.FOLDS_FILE)
assert folds.shape[0] == df.shape[0]
return df.loc[folds != fold], df.loc[folds == fold]
def load_data(fold: int, params: Dict[str, Any]) -> Any:
torch.multiprocessing.set_sharing_strategy('file_system')
cudnn.benchmark = True
logger.info('Options:')
logger.info(pprint.pformat(opt))
full_df = pd.read_csv(opt.TRAIN.CSV)
print('full_df', full_df.shape)
train_df, val_df = train_val_split(full_df, fold)
print('train_df', train_df.shape, 'val_df', val_df.shape)
test_df = pd.read_csv(opt.TEST.CSV)
# transform_train = transforms.Compose([
# # transforms.Resize((opt.MODEL.IMAGE_SIZE)), # smaller edge
# transforms.RandomCrop(opt.MODEL.INPUT_SIZE),
# transforms.RandomHorizontalFlip(),
# # transforms.ColorJitter(brightness=0.2, contrast=0.2),
# # transforms.RandomAffine(degrees=20, scale=(0.8, 1.2), shear=10, resample=PIL.Image.BILINEAR),
# # transforms.RandomCrop(opt.MODEL.INPUT_SIZE),
# ])
augs = []
augs.append(albu.HorizontalFlip(.5))
if int(params['vflip']):
augs.append(albu.VerticalFlip(.5))
if int(params['rotate90']):
augs.append(albu.RandomRotate90())
if params['affine'] == 'soft':
augs.append(albu.ShiftScaleRotate(shift_limit=0.075, scale_limit=0.15, rotate_limit=10, p=.75))
elif params['affine'] == 'medium':
augs.append(albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.2, rotate_limit=45, p=0.2))
elif params['affine'] == 'hard':
augs.append(albu.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=.75))
if float(params['noise']) > 0.1:
augs.append(albu.OneOf([
albu.IAAAdditiveGaussianNoise(),
albu.GaussNoise(),
], p=float(params['noise'])))
if float(params['blur']) > 0.1:
augs.append(albu.OneOf([
albu.MotionBlur(p=.2),
albu.MedianBlur(blur_limit=3, p=0.1),
albu.Blur(blur_limit=3, p=0.1),
], p=float(params['blur'])))
if float(params['distortion']) > 0.1:
augs.append(albu.OneOf([
albu.OpticalDistortion(p=0.3),
albu.GridDistortion(p=.1),
albu.IAAPiecewiseAffine(p=0.3),
], p=float(params['distortion'])))
if float(params['color']) > 0.1:
augs.append(albu.OneOf([
albu.CLAHE(clip_limit=2),
albu.IAASharpen(),
albu.IAAEmboss(),
albu.RandomBrightnessContrast(),
], p=float(params['color'])))
transform_train = albu.Compose([
albu.PadIfNeeded(opt.MODEL.INPUT_SIZE, opt.MODEL.INPUT_SIZE),
albu.RandomCrop(height=opt.MODEL.INPUT_SIZE, width=opt.MODEL.INPUT_SIZE),
albu.Compose(augs, p=float(params['aug_global_prob'])),
])
if opt.TEST.NUM_TTAS > 1:
transform_test = albu.Compose([
albu.PadIfNeeded(opt.MODEL.INPUT_SIZE, opt.MODEL.INPUT_SIZE),
albu.RandomCrop(height=opt.MODEL.INPUT_SIZE, width=opt.MODEL.INPUT_SIZE),
albu.HorizontalFlip(),
])
else:
transform_test = albu.Compose([
albu.PadIfNeeded(opt.MODEL.INPUT_SIZE, opt.MODEL.INPUT_SIZE),
albu.CenterCrop(height=opt.MODEL.INPUT_SIZE, width=opt.MODEL.INPUT_SIZE),
])
train_dataset = Dataset(train_df, path=opt.TRAIN.PATH, mode='train',
num_classes=opt.MODEL.NUM_CLASSES, resize=False,
augmentor=transform_train)
val_dataset = Dataset(val_df, path=opt.TRAIN.PATH, mode='val',
# image_size=opt.MODEL.INPUT_SIZE,
num_classes=opt.MODEL.NUM_CLASSES, resize=False,
num_tta=1, # opt.TEST.NUM_TTAS,
augmentor=transform_test)
test_dataset = Dataset(test_df, path=opt.TEST.PATH, mode='test',
# image_size=opt.MODEL.INPUT_SIZE,
num_classes=opt.MODEL.NUM_CLASSES, resize=False,
num_tta=opt.TEST.NUM_TTAS,
augmentor=transform_test)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=opt.TRAIN.BATCH_SIZE, shuffle=True,
num_workers=opt.TRAIN.WORKERS)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=opt.TRAIN.BATCH_SIZE, shuffle=False, num_workers=opt.TRAIN.WORKERS)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=opt.TRAIN.BATCH_SIZE, shuffle=False, num_workers=opt.TRAIN.WORKERS)
return train_loader, val_loader, test_loader
def create_model(predict_only: bool, dropout: float) -> Any:
logger.info(f'creating a model {opt.MODEL.ARCH}')
model = get_model(opt.MODEL.ARCH, pretrained=not predict_only)
model.features[-1] = nn.AdaptiveAvgPool2d(1)
if opt.MODEL.ARCH == 'pnasnet5large':
if dropout < 0.1:
model.output = nn.Linear(model.output[-1].in_features, opt.MODEL.NUM_CLASSES)
else:
model.output = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(model.output[-1].in_features, opt.MODEL.NUM_CLASSES))
else:
if dropout < 0.1:
model.output = nn.Linear(model.output.in_features, opt.MODEL.NUM_CLASSES)
else:
model.output = nn.Sequential(
nn.Dropout(dropout),
nn.Linear(model.output.in_features, opt.MODEL.NUM_CLASSES))
model = torch.nn.DataParallel(model).cuda()
model.cuda()
return model
def save_checkpoint(state: Dict[str, Any], filename: str, model_dir: str) -> None:
torch.save(state, os.path.join(model_dir, filename))
logger.info(f'A snapshot was saved to {filename}')
def train(train_loader: Any, model: Any, criterion: Any, optimizer: Any,
epoch: int, lr_scheduler: Any) -> None:
logger.info(f'epoch {epoch}')
batch_time = AverageMeter()
losses = AverageMeter()
avg_score = AverageMeter()
model.train()
num_steps = min(len(train_loader), opt.TRAIN.STEPS_PER_EPOCH)
print('total batches:', len(train_loader))
end = time.time()
for i, (input_, target) in enumerate(train_loader):
if i >= opt.TRAIN.STEPS_PER_EPOCH:
break
# compute output
output = model(input_.cuda())
loss = criterion(output, target.cuda())
# get metric
predict = (output.detach() > 0.5).type(torch.FloatTensor)
avg_score.update(F_score(predict, target).item())
# compute gradient and do SGD step
losses.update(loss.data.item(), input_.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if hasattr(lr_scheduler, 'batch_step'):
lr_scheduler.batch_step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % opt.TRAIN.PRINT_FREQ == 0:
logger.info(f'{epoch} [{i}/{num_steps}]\t'
f'time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
f'loss {losses.val:.4f} ({losses.avg:.4f})\t'
f'F2 {avg_score.val:.4f} ({avg_score.avg:.4f})')
logger.info(f' * average accuracy on train {avg_score.avg:.4f}')
def inference(data_loader: Any, model: Any) -> Tuple[torch.tensor, torch.tensor]:
''' Returns predictions and targets, if any. '''
model.eval()
sigmoid = nn.Sigmoid()
predicts_list, targets_list = [], []
with torch.no_grad():
for i, (input_, target) in enumerate(tqdm(data_loader, disable=IN_KERNEL)):
if opt.TEST.NUM_TTAS != 1 and data_loader.dataset.mode == 'test':
bs, ncrops, c, h, w = input_.size()
input_ = input_.view(-1, c, h, w) # fuse batch size and ncrops
output = model(input_)
output = sigmoid(output)
if opt.TEST.TTA_COMBINE_FUNC == 'max':
output = output.view(bs, ncrops, -1).max(1)[0]
elif opt.TEST.TTA_COMBINE_FUNC == 'mean':
output = output.view(bs, ncrops, -1).mean(1)
else:
assert False
else:
output = model(input_.cuda())
output = sigmoid(output)
predicts_list.append(output.detach().cpu().numpy())
if target is not None:
targets_list.append(target)
predicts = np.concatenate(predicts_list)
targets = np.concatenate(targets_list)
return predicts, targets
def validate(val_loader: Any, model: Any, epoch: int) -> Tuple[float, float]:
''' Calculates validation score.
1. Infers predictions
2. Finds optimal threshold
3. Returns the best score and a threshold. '''
logger.info('validate()')
predicts, targets = inference(val_loader, model)
predicts, targets = torch.tensor(predicts), torch.tensor(targets)
best_score, best_thresh = 0.0, 0.0
for threshold in tqdm(np.linspace(0.05, 0.15, 33), disable=IN_KERNEL):
score = F_score(predicts, targets, threshold=threshold).item()
if score > best_score:
best_score, best_thresh = score, threshold
logger.info(f'{epoch} F2 {best_score:.4f} threshold {best_thresh:.4f}')
logger.info(f' * F2 on validation {best_score:.4f}')
return best_score, best_thresh
def generate_submission(val_loader: Any, test_loader: Any, model: Any,
epoch: int, model_path: Any) -> np.ndarray:
score, threshold = validate(val_loader, model, epoch)
predicts, _ = inference(test_loader, model)
filename = f'pred_level1_{os.path.splitext(os.path.basename(model_path))[0]}'
np.savez(filename, predicts=predicts, threshold=threshold)
def set_lr(optimizer: Any, lr: float) -> None:
for param_group in optimizer.param_groups:
param_group['lr'] = lr
param_group['initial_lr'] = lr
def read_lr(optimizer: Any) -> float:
for param_group in optimizer.param_groups:
lr = float(param_group['lr'])
logger.info(f'learning rate: {lr}')
return lr
assert False
def freeze_layers(model: Any) -> None:
''' Freezes all layers but the last one. '''
m = model.module
for layer in m.children():
if layer not in [m.layer4, m.fc]:
for param in layer.parameters():
param.requires_grad = False
# for layer in [m.fc, m.layer4[0][2].conv3, m.layer4[0][2].bn3]:
# for param in layer.parameters():
# param.requires_grad = True
def unfreeze_layers(model: Any) -> None:
for layer in model.module.children():
for param in layer.parameters():
param.requires_grad = True
def train_model(params: Dict[str, Any]) -> float:
np.random.seed(0)
model_dir = opt.EXPERIMENT_DIR
logger.info('=' * 50)
logger.info(f'hyperparameters: {params}')
train_loader, val_loader, test_loader = load_data(args.fold, params)
model = create_model(args.predict, float(params['dropout']))
# freeze_layers(model)
# if torch.cuda.device_count() == 1:
# torchsummary.summary(model, (3, 224, 224))
if opt.TRAIN.OPTIMIZER == 'Adam':
optimizer = optim.Adam(model.parameters(), opt.TRAIN.LEARNING_RATE)
elif opt.TRAIN.OPTIMIZER == 'SGD':
optimizer = optim.SGD(model.parameters(), opt.TRAIN.LEARNING_RATE,
momentum=0.9, nesterov=True)
else:
assert False
if opt.TRAIN.COSINE.ENABLE:
set_lr(optimizer, opt.TRAIN.COSINE.LR)
lr_scheduler = CosineLRWithRestarts(optimizer, opt.TRAIN.BATCH_SIZE,
opt.TRAIN.BATCH_SIZE * opt.TRAIN.STEPS_PER_EPOCH,
restart_period=opt.TRAIN.COSINE.PERIOD, t_mult=opt.TRAIN.COSINE.COEFF)
else:
lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max',
patience=opt.TRAIN.PATIENCE, factor=opt.TRAIN.LR_REDUCE_FACTOR,
verbose=True, min_lr=opt.TRAIN.MIN_LR,
threshold=opt.TRAIN.MIN_IMPROVEMENT, threshold_mode='abs')
if args.weights is None:
last_epoch = 0
logger.info(f'training will start from epoch {last_epoch+1}')
else:
last_checkpoint = torch.load(args.weights)
assert(last_checkpoint['arch']==opt.MODEL.ARCH)
model.load_state_dict(last_checkpoint['state_dict'])
optimizer.load_state_dict(last_checkpoint['optimizer'])
logger.info(f'checkpoint {args.weights} was loaded.')
last_epoch = last_checkpoint['epoch']
logger.info(f'loaded the model from epoch {last_epoch}')
set_lr(optimizer, opt.TRAIN.LEARNING_RATE)
if args.predict:
print('inference mode')
generate_submission(val_loader, test_loader, model, last_epoch, args.weights)
sys.exit(0)
if opt.TRAIN.LOSS == 'BCE':
criterion = nn.BCEWithLogitsLoss()
else:
raise RuntimeError('unknown loss specified')
best_score = 0.0
best_epoch = 0
last_lr = read_lr(optimizer)
best_model_path = None
for epoch in range(last_epoch + 1, opt.TRAIN.EPOCHS + 1):
logger.info('-' * 50)
if not opt.TRAIN.COSINE.ENABLE:
lr = read_lr(optimizer)
if lr < last_lr - 1e-10 and best_model_path is not None:
# reload the best model
last_checkpoint = torch.load(os.path.join(model_dir, best_model_path))
assert(last_checkpoint['arch']==opt.MODEL.ARCH)
model.load_state_dict(last_checkpoint['state_dict'])
optimizer.load_state_dict(last_checkpoint['optimizer'])
logger.info(f'checkpoint {best_model_path} was loaded.')
set_lr(optimizer, lr)
last_lr = lr
if lr < opt.TRAIN.MIN_LR * 1.01:
logger.info('reached minimum LR, stopping')
break
# logger.info(f'lr={lr}, start cosine annealing!')
# set_lr(optimizer, opt.TRAIN.COSINE.LR)
# opt.TRAIN.COSINE.ENABLE = True
#
# lr_scheduler = CosineLRWithRestarts(optimizer, opt.TRAIN.BATCH_SIZE,
# opt.TRAIN.BATCH_SIZE * opt.TRAIN.STEPS_PER_EPOCH,
# restart_period=opt.TRAIN.COSINE.PERIOD, t_mult=opt.TRAIN.COSINE.COEFF)
if opt.TRAIN.COSINE.ENABLE:
lr_scheduler.step()
read_lr(optimizer)
train(train_loader, model, criterion, optimizer, epoch, lr_scheduler)
score, _ = validate(val_loader, model, epoch)
if not opt.TRAIN.COSINE.ENABLE:
lr_scheduler.step(score) # type: ignore
is_best = score > best_score
best_score = max(score, best_score)
if is_best:
best_epoch = epoch
data_to_save = {
'epoch': epoch,
'arch': opt.MODEL.ARCH,
'state_dict': model.state_dict(),
'best_score': best_score,
'score': score,
'optimizer': optimizer.state_dict(),
'options': opt
}
filename = opt.MODEL.VERSION
if is_best:
best_model_path = f'{filename}_f{args.fold}_e{epoch:02d}_{score:.04f}.pth'
save_checkpoint(data_to_save, best_model_path, model_dir)
logger.info(f'best score: {best_score:.04f}')
return -best_score
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', help='model to resume training', type=str)
parser.add_argument('--fold', help='fold number', type=int, default=0)
parser.add_argument('--predict', help='model to resume training', action='store_true')
parser.add_argument('--num_tta', help='number of TTAs', type=int, default=opt.TEST.NUM_TTAS)
args = parser.parse_args()
params = {'affine': 'medium',
'aug_global_prob': 0.5346290229823514,
'blur': 0.1663552826866818,
'color': 0.112355821364934,
'distortion': 0.12486453027371469,
'dropout': 0.3,
'noise': 0.29392632695458587,
'rotate90': 0,
'vflip': 0}
opt.EXPERIMENT_DIR = os.path.join(opt.EXPERIMENT_DIR, f'fold_{args.fold}')
opt.TEST.NUM_TTAS = args.num_tta
if not os.path.exists(opt.EXPERIMENT_DIR):
os.makedirs(opt.EXPERIMENT_DIR)
logger = create_logger(os.path.join(opt.EXPERIMENT_DIR, 'log_training.txt'))
train_model(params)
| 36.809872 | 105 | 0.632133 |
79400a07d426263b1716fc2346086a9e82bd23da | 931 | py | Python | examples/label_prop.py | NucciTheBoss/pytorch_geometric | e220a2c08fa1b2f1672d616c22eac2a67b5c8967 | [
"MIT"
] | 2,350 | 2021-09-12T08:32:50.000Z | 2022-03-31T18:09:36.000Z | examples/label_prop.py | NucciTheBoss/pytorch_geometric | e220a2c08fa1b2f1672d616c22eac2a67b5c8967 | [
"MIT"
] | 588 | 2021-09-12T08:49:08.000Z | 2022-03-31T21:02:13.000Z | examples/label_prop.py | NucciTheBoss/pytorch_geometric | e220a2c08fa1b2f1672d616c22eac2a67b5c8967 | [
"MIT"
] | 505 | 2021-09-13T13:13:32.000Z | 2022-03-31T15:54:00.000Z | import os.path as osp
from ogb.nodeproppred import Evaluator, PygNodePropPredDataset
import torch_geometric.transforms as T
from torch_geometric.nn import LabelPropagation
root = osp.join(osp.dirname(osp.realpath(__file__)), '..', 'data', 'OGB')
dataset = PygNodePropPredDataset(
'ogbn-arxiv', root, transform=T.Compose([
T.ToUndirected(),
T.ToSparseTensor(),
]))
split_idx = dataset.get_idx_split()
evaluator = Evaluator(name='ogbn-arxiv')
data = dataset[0]
model = LabelPropagation(num_layers=3, alpha=0.9)
out = model(data.y, data.adj_t, mask=split_idx['train'])
y_pred = out.argmax(dim=-1, keepdim=True)
val_acc = evaluator.eval({
'y_true': data.y[split_idx['valid']],
'y_pred': y_pred[split_idx['valid']],
})['acc']
test_acc = evaluator.eval({
'y_true': data.y[split_idx['test']],
'y_pred': y_pred[split_idx['test']],
})['acc']
print(f'Val: {val_acc:.4f}, Test: {test_acc:.4f}')
| 28.212121 | 73 | 0.691729 |
79400aa1b6d5c6fbefabea771733e5f504750efd | 73 | py | Python | count_min_sketch/__init__.py | AWNystrom/CountMinSketch | c4a6eb3320e81c6e4ccedbcaed70854949e5bdd1 | [
"Apache-2.0"
] | 14 | 2016-06-03T16:33:26.000Z | 2021-07-16T09:01:29.000Z | count_min_sketch/__init__.py | AWNystrom/CountMinSketch | c4a6eb3320e81c6e4ccedbcaed70854949e5bdd1 | [
"Apache-2.0"
] | 1 | 2015-07-23T18:19:02.000Z | 2015-07-23T18:19:02.000Z | count_min_sketch/__init__.py | AWNystrom/CountMinSketch | c4a6eb3320e81c6e4ccedbcaed70854949e5bdd1 | [
"Apache-2.0"
] | 10 | 2016-05-12T15:08:59.000Z | 2021-12-23T17:01:56.000Z | from count_min_sketch import CountMinSketch
__all__ = ["CountMinSketch"]
| 24.333333 | 43 | 0.835616 |
79400aceb63ac1b24f2d9988ff46b97123863721 | 2,829 | py | Python | python/mm/selection.py | FumeauxG/mm-api | bb59e448562ff30349d7b1b831ee0db1465a9e6b | [
"MIT"
] | 67 | 2015-03-27T17:30:34.000Z | 2022-03-01T11:08:24.000Z | python/mm/selection.py | FumeauxG/mm-api | bb59e448562ff30349d7b1b831ee0db1465a9e6b | [
"MIT"
] | 15 | 2015-03-17T07:46:58.000Z | 2021-07-10T06:14:16.000Z | python/mm/selection.py | FumeauxG/mm-api | bb59e448562ff30349d7b1b831ee0db1465a9e6b | [
"MIT"
] | 35 | 2015-03-16T15:32:46.000Z | 2022-03-19T17:53:06.000Z | import mmapi;
from convert import *;
from tool import *;
def clear_face_selection(remote):
"""Clear the current face selection (if there is one) and exit the Selection Tool"""
cancel_tool(remote)
def select_all(remote):
"""Select all faces of the active object (will initialize a Selection Tool if necessary)"""
cmd = mmapi.StoredCommands()
cmd.AppendCompleteToolCommand("cancel")
cmd.AppendBeginToolCommand("select")
cmd.AppendSelectCommand_All()
remote.runCommand(cmd)
def selection_utility_command(remote, command_name):
"""Run a selection utility command (see ::AppendSelectUtilityCommand in StoredCommands.h)"""
cmd = mmapi.StoredCommands()
cmd.AppendSelectUtilityCommand( command_name )
remote.runCommand(cmd)
def select_facegroups(remote, groups_list):
"""Select the faces which have a facegroup ID in groups_list"""
cmd = mmapi.StoredCommands()
vgroups = mmapi.vectori();
for group in groups_list:
vgroups.push_back(group);
cmd2 = mmapi.StoredCommands()
cmd2.AppendSelectCommand_ByFaceGroups(vgroups)
remote.runCommand(cmd2)
def select_inside_sphere(remote, sphere_center, sphere_radius):
"""Select the faces inside the sphere with given center/radius. Requires active Selection Tool."""
cmd = mmapi.StoredCommands()
cmd.AppendSelectCommand_InsideSphere( sphere_center[0],sphere_center[1],sphere_center[2], sphere_radius )
remote.runCommand(cmd)
def select_hole(remote, hole_index, mode = 0):
"""Select the faces which have a facegroup ID in groups_list"""
cmd = mmapi.StoredCommands()
cmd.AppendSelectCommand_HoleBorderRing(hole_index, mode)
remote.runCommand(cmd)
def select_triangles(remote, triangles_list, mode = 0):
"""Select the listed triangles"""
cmd = mmapi.StoredCommands()
vtris = mmapi.vectori();
for t in triangles_list:
vtris.push_back(t);
cmd2 = mmapi.StoredCommands()
cmd2.AppendSelectCommand_ByTriangleID(vtris, mode)
remote.runCommand(cmd2)
def list_selected_groups(remote):
"""Returns a list of unique facegroup IDs for the current face selection (requires an active selection)"""
cmd1 = mmapi.StoredCommands()
key1 = cmd1.AppendSelectCommand_ListSelectedFaceGroups()
remote.runCommand(cmd1)
groups1 = mmapi.vectori()
cmd1.GetSelectCommandResult_ListSelectedFaceGroups(key1, groups1);
return vectori_to_list(groups1);
def list_selected_triangles(remote):
"""Returns a list of currently-selected triangles (requires an active selection)"""
cmd1 = mmapi.StoredCommands()
key1 = cmd1.AppendSelectCommand_ListSelectedTriangles()
remote.runCommand(cmd1)
groups1 = mmapi.vectori()
cmd1.GetSelectCommandResult_ListSelectedTriangles(key1, groups1);
return vectori_to_list(groups1);
| 36.269231 | 110 | 0.743019 |
79400b4490c2421fbf20988b0cd940520f1b6165 | 11,016 | py | Python | contest/TestCase.py | Lnk2past/contest | c05dfe5314a41cf352b20f5771aefb50e5640e11 | [
"MIT"
] | 3 | 2019-10-24T00:22:04.000Z | 2019-10-26T19:30:53.000Z | contest/TestCase.py | Lnk2past/contest | c05dfe5314a41cf352b20f5771aefb50e5640e11 | [
"MIT"
] | 4 | 2018-07-02T02:23:03.000Z | 2021-09-11T02:13:55.000Z | contest/TestCase.py | Lnk2past/contest | c05dfe5314a41cf352b20f5771aefb50e5640e11 | [
"MIT"
] | 1 | 2019-10-15T02:09:19.000Z | 2019-10-15T02:09:19.000Z | import os
import pathlib
import shutil
from itertools import zip_longest
from subprocess import run, PIPE, TimeoutExpired
from PIL import Image, ImageChops
from contest.utilities import chdir
from contest.utilities.importer import import_from_source
from contest.utilities.logger import logger, logger_format_fields
class TestCase:
def __init__(
self,
case_name,
exe,
return_code,
argv,
stdin,
stdout,
stderr,
ofstreams,
env,
extra_tests,
timeout,
test_home,
resources,
setup
):
"""Initialize test case inputs
Arguments:
case_name (str): name of the test
exe (str): executable to test
return_code (int): return code of the execution
argv (list): list of command line arguments
stdin (list): list of inputs that are passed to stdin
stdout (str): expected output to stdout
stderr (str): expected output to stderr
ofstreams (list): list of pairs of file names and content
env (dict): dictionary of environment variables to set in the execution space
extra_tests (list): list of additional modules to load for testing
test_home (str): directory to run the test out of
resources (list): list of resources to copy to the test directory
setup (list): list of commands to run before executing the core test
"""
logger_format_fields['test_case'] = case_name
logger.debug(f'Constructing test case {case_name}', extra=logger_format_fields)
self.case_name = case_name
self.exe = exe
self.return_code = return_code
self.argv = argv
self.stdin = self._setup_istream(stdin)
self.stdout = self._setup_ostream(stdout)
self.stderr = self._setup_ostream(stderr)
self.ofstreams = [self._setup_ofstream(ofs) for ofs in ofstreams]
self.env = env
self.extra_tests = extra_tests
self.timeout = timeout
self.test_home = test_home
self.setup = setup
shutil.rmtree(self.test_home, ignore_errors=True)
pathlib.Path(self.test_home).mkdir(parents=True, exist_ok=True)
for resource in resources:
shutil.copytree(resource['src'], pathlib.Path(self.test_home)/resource['dst'])
self.test_args = self._setup_test_process(self.exe, self.argv)
for step in self.setup:
step = self._setup_test_process(step)
with chdir.ChangeDirectory(self.test_home):
logger.debug(f'Running setup: {step}', extra=logger_format_fields)
run(step, stdout=PIPE, stderr=PIPE, cwd=pathlib.Path.cwd())
def _setup_istream(self, stream):
if isinstance(stream, list):
return os.linesep.join(stream)
elif isinstance(stream, str):
return stream
raise RuntimeError('input streams must be a string or a list!')
def _setup_ostream(self, stream):
spec = stream if isinstance(stream, dict) else {}
if isinstance(stream, str):
spec['text'] = stream.splitlines(keepends=True)
elif isinstance(stream, list):
spec['text'] = stream
elif isinstance(stream, dict) and 'text' in spec:
spec['text'] = spec['text'].splitlines(keepends=True)
elif not isinstance(stream, dict):
raise RuntimeError('output streams must be a dictionary, string, or a list!')
if 'start' not in spec:
spec['start'] = 0
if 'count' not in spec:
spec['count'] = -1
return spec
def _setup_ofstream(self, stream):
if isinstance(stream, dict):
if 'file' in stream:
stream['file'] = os.path.join('..', '..', stream['file'])
return self._setup_ostream(stream)
raise RuntimeError('output file streams must be a dictionary!')
def _setup_test_process(self, cmd, argv=[]):
"""Properly sets the relative paths for the executable and contructs the
argument list for the executable.
Returns:
list of the executable and arguments to be passed to subprocess.run
"""
splexe = cmd.split()
splexe.extend(argv)
for idx, sp in enumerate(splexe):
sp = pathlib.Path(self.test_home, '..', '..', sp)
if sp.exists():
sp = sp.resolve()
splexe[idx] = str(sp)
return splexe
def execute(self):
"""Execute the test
Returns:
Number of errors encountered
"""
logger_format_fields['test_case'] = self.case_name
logger.critical('Starting test', extra=logger_format_fields)
logger.debug(f'Test Home: {self.test_home}', extra=logger_format_fields)
logger.debug(f'Running: {self.test_args}', extra=logger_format_fields)
with chdir.ChangeDirectory(self.test_home):
errors = 0
try:
proc = run(self.test_args, input=self.stdin, stdout=PIPE, stderr=PIPE, cwd=pathlib.Path.cwd(),
timeout=self.timeout, universal_newlines=True, env=self.env)
except TimeoutExpired:
logger.critical('Your program took too long to run! Perhaps you have an infinite loop?', extra=logger_format_fields)
errors += 1
if self.return_code and int(self.return_code) != proc.returncode:
logger.critical(f'FAILURE:\n Expected return code {self.return_code}, received {proc.returncode}', extra=logger_format_fields)
errors += 1
if 'file' in self.stdout:
self.stdout['text'] = open(self.stdout['file'])
if 'file' in self.stderr:
self.stderr['text'] = open(self.stderr['file'])
errors += self.check_streams('stdout', self.stdout, proc.stdout.splitlines(keepends=True))
errors += self.check_streams('stderr', self.stderr, proc.stderr.splitlines(keepends=True))
try:
for ofstream in self.ofstreams:
file_type = ofstream.get('type', 'text')
logger.debug(f'Performing {file_type} comparison', extra=logger_format_fields)
if file_type == 'text':
if 'file' in ofstream:
ofstream['text'] = open(ofstream['file'], 'r')
errs = self.check_streams(ofstream['test-file'], ofstream, open(ofstream['test-file'], 'r'))
if errs:
logger.critical(f'Errors found checking streams: {errs}', extra=logger_format_fields)
errors += errs
elif file_type == 'binary':
if 'file' in ofstream:
ofstream['text'] = open(ofstream['file'], 'rb')
errs = self.check_streams(ofstream['test-file'], ofstream, open(ofstream['test-file'], 'rb'))
if errs:
logger.critical(f'Errors found checking binary streams: {errs}', extra=logger_format_fields)
errors += errs
elif file_type == 'image':
f_image = Image.open(ofstream['file'])
t_image = Image.open(ofstream['test-file'])
diff = ImageChops.difference(f_image, t_image)
if diff.getbbox():
errors += 1
logger.critical('Errors found checking images', extra=logger_format_fields)
except FileNotFoundError:
logger.critical(f'FAILURE:\n Could not find output file {ofstream["test-file"]}', extra=logger_format_fields)
errors += 1
for extra_test in self.extra_tests:
logger.debug(f'Running extra test: {extra_test}', extra=logger_format_fields)
extra_test = import_from_source(extra_test)
if not extra_test.test():
errors += 1
logger.critical('Failed!', extra=logger_format_fields)
if not errors:
logger.critical('OK!', extra=logger_format_fields)
return int(errors > 0)
@staticmethod
def check_streams(stream, expected, received):
"""Compares two output streams, line by line
Arguments:
stream (str): name of stream being tested
expected (dict): expected content of stream and details for comparing
received (str): stream output from the test
Returns:
0 for no error, 1 for error
"""
logger.debug(f'Comparing {stream} streams line by line', extra=logger_format_fields)
if 'empty' in expected:
if expected['empty'] and received:
logger.critical(f'FAILURE:\nExpected {stream} to be empty', extra=logger_format_fields)
return 1
elif not expected['empty'] and not received:
logger.critical(f'FAILURE:\nExpected {stream} to be nonempty', extra=logger_format_fields)
return 1
return 0
if 'ignore' in expected:
logger.debug('Ignoring stream', extra=logger_format_fields)
return 0
for line_number, (e, r) in enumerate(zip_longest(expected['text'], received)):
if line_number < expected['start']:
continue
logger.debug(f'{stream} line {line_number}:\n"{e}"\n"{r}"\n', extra=logger_format_fields)
if e != r:
if None in [e, r]:
logger.critical('ERROR: Expected and received streams do not have equal length!', extra=logger_format_fields)
e = '' if e is None else e
r = '' if r is None else r
i = 0
while True:
s1 = e[i:i+5]
s2 = r[i:i+5]
if not s1 == s2:
for idx, (a, b) in enumerate(zip(s1, s2)):
if not a == b:
i = i + idx
break
else:
i = i + min(len(s1), len(s2))
break
i = i + 5
e = f' Expected "{e}"'
r = f' Received "{r}"'
error_location = (' '*18) + (' '*i) + '^ ERROR'
logger.critical(f'FAILURE:\n{e}\n{r}\n{error_location}', extra=logger_format_fields)
return 1
if line_number - expected['start'] + 1 == expected['count']:
logger.debug(f'Checked {expected["count"]} lines, breaking', extra=logger_format_fields)
break
return 0
| 43.714286 | 150 | 0.560911 |
79400b56fb66d27ccd605540343566ad8c630265 | 8,148 | py | Python | lib/data_formats/read_events.py | tub-rip/event_utils | 1ae06397b17bca32036155b80da64d295d4fe09f | [
"MIT"
] | 43 | 2021-01-12T14:59:15.000Z | 2022-03-31T04:36:17.000Z | lib/data_formats/read_events.py | TimoStoff/event_utils | dc0a0712156bb0c3659d90b33e211fa58a83a75f | [
"MIT"
] | 1 | 2021-11-24T18:21:41.000Z | 2021-11-24T18:21:41.000Z | lib/data_formats/read_events.py | tub-rip/event_utils | 1ae06397b17bca32036155b80da64d295d4fe09f | [
"MIT"
] | 11 | 2020-12-17T11:58:51.000Z | 2022-02-11T17:51:43.000Z | import h5py
import numpy as np
import os
def compute_indices(event_stamps, frame_stamps):
"""
Given event timestamps and frame timestamps as arrays,
find the event indices that correspond to the beginning and
end period of each frames
@param event_stamps The event timestamps
@param frame_stamps The frame timestamps
@returns The indices as a 2xN numpy array (N=number of frames)
"""
indices_first = np.searchsorted(event_stamps[:,0], frame_stamps[1:])
indices_last = np.searchsorted(event_stamps[:,0], frame_stamps[:-1])
index = np.stack([indices_first, indices_last], -1)
return index
def read_memmap_events(memmap_path, skip_frames=1, return_events=False, images_file = 'images.npy',
images_ts_file = 'timestamps.npy', optic_flow_file = 'optic_flow.npy',
optic_flow_ts_file = 'optic_flow_timestamps.npy', events_xy_file = 'xy.npy',
events_p_file = 'p.npy', events_t_file = 't.npy'):
"""
Given a path to an RPG-style memmap, read the events it contains.
These memmaps break images, timestamps, optic flow, xy, p and t
components of events into separate files.
@param memmap_path Path to the root directory of the memmap
@param skip_frames Skip reading every 'skip_frames'th frame, default=1
@param return_events If True, return the events as numpy arrays, else return
a handle to the event data files (which can be indexed, but does not load
events into RAM)
@param images_file The file containing images
@param images_ts_file The file containing image timestamps
@param optic_flow_file The file containing optic flow frames
@param optic_flow_ts_file The file containing optic flow frame timestamps
@param events_xy_file The file containing event coordinate data
@param events_p_file The file containing the event polarities
@param events_ts_file The file containing the event timestamps
@return dict with event data:
data = {
"index": index mapping image index to event idx
"frame_stamps": frame timestamps
"images": images
"optic_flow": optic flow
"optic_flow_stamps": of timestamps
"t": event timestamps
"xy": event coords
"p": event polarities
"t0": t0
"""
assert os.path.isdir(memmap_path), '%s is not a valid memmap_pathectory' % memmap_path
data = {}
has_flow = False
for subroot, _, fnames in sorted(os.walk(memmap_path)):
for fname in sorted(fnames):
path = os.path.join(subroot, fname)
if fname.endswith(".npy"):
if fname=="index.npy": # index mapping image index to event idx
indices = np.load(path) # N x 2
assert len(indices.shape) == 2 and indices.shape[1] == 2
indices = indices.astype("int64") # ignore event indices which are 0 (before first image)
data["index"] = indices.T
elif fname==images_ts_file:
data["frame_stamps"] = np.load(path)[::skip_frames,...]
elif fname==images_file:
data["images"] = np.load(path, mmap_mode="r")[::skip_frames,...]
elif fname==optic_flow_file:
data["optic_flow"] = np.load(path, mmap_mode="r")[::skip_frames,...]
has_flow = True
elif fname==optic_flow_ts_file:
data["optic_flow_stamps"] = np.load(path)[::skip_frames,...]
handle = np.load(path, mmap_mode="r")
if fname==events_t_file: # timestamps
data["t"] = handle[:].squeeze() if return_events else handle
data["t0"] = handle[0]
elif fname==events_xy_file: # coordinates
data["xy"] = handle[:].squeeze() if return_events else handle
elif fname==events_p_file: # polarity
data["p"] = handle[:].squeeze() if return_events else handle
if len(data) > 0:
data['path'] = subroot
if "t" not in data:
raise Exception(f"Ignoring memmap_pathectory {subroot} since no events")
if not (len(data['p']) == len(data['xy']) and len(data['p']) == len(data['t'])):
raise Exception(f"Events from {subroot} invalid")
data["num_events"] = len(data['p'])
if "index" not in data and "frame_stamps" in data:
data["index"] = compute_indices(data["t"], data['frame_stamps'])
return data
def read_memmap_events_dict(memmap_path, skip_frames=1, return_events=False, images_file = 'images.npy',
images_ts_file = 'timestamps.npy', optic_flow_file = 'optic_flow.npy',
optic_flow_ts_file = 'optic_flow_timestamps.npy', events_xy_file = 'xy.npy',
events_p_file = 'p.npy', events_t_file = 't.npy'):
"""
Read memmap file events and return them in a dict
"""
data = read_memmap_events(memmap_path, skip_frames, return_events, images_file, images_ts_file,
optic_flow_file, optic_flow_ts_file, events_xy_file, events_p_file, events_t_file)
events = {
'xs':data['xy'][:,0].squeeze(),
'ys':data['xy'][:,1].squeeze(),
'ts':events['t'][:].squeeze(),
'ps':events['p'][:].squeeze()}
return events
def read_h5_events(hdf_path):
"""
Read events from HDF5 file (Monash style).
@param hdf_path Path to HDF5 file
@returns Events as 4xN numpy array (N=num events)
"""
f = h5py.File(hdf_path, 'r')
if 'events/x' in f:
#legacy
events = np.stack((f['events/x'][:], f['events/y'][:], f['events/ts'][:], np.where(f['events/p'][:], 1, -1)), axis=1)
else:
events = np.stack((f['events/xs'][:], f['events/ys'][:], f['events/ts'][:], np.where(f['events/ps'][:], 1, -1)), axis=1)
return events
def read_h5_event_components(hdf_path):
"""
Read events from HDF5 file (Monash style).
@param hdf_path Path to HDF5 file
@returns Events as four np arrays with the event components
"""
f = h5py.File(hdf_path, 'r')
if 'events/x' in f:
#legacy
return (f['events/x'][:], f['events/y'][:], f['events/ts'][:], np.where(f['events/p'][:], 1, -1))
else:
return (f['events/xs'][:], f['events/ys'][:], f['events/ts'][:], np.where(f['events/ps'][:], 1, -1))
def read_h5_events_dict(hdf_path, read_frames=True):
"""
Read events from HDF5 file (Monash style).
@param hdf_path Path to HDF5 file
@returns Events as a dict with entries 'xs', 'ys', 'ts', 'ps' containing the event components,
'frames' containing the frames, 'frame_timestamps' containing frame timestamps and
'frame_event_indices' containing the indices of the corresponding event for each frame
"""
f = h5py.File(hdf_path, 'r')
if 'events/x' in f:
#legacy
events = {
'xs':f['events/x'][:],
'ys':f['events/y'][:],
'ts':f['events/ts'][:],
'ps':np.where(f['events/p'][:], 1, -1)
}
return events
else:
events = {
'xs':f['events/xs'][:],
'ys':f['events/ys'][:],
'ts':f['events/ts'][:],
'ps':np.where(f['events/ps'][:], 1, -1)
}
if read_frames:
images = []
image_stamps = []
image_event_indices = []
for key in f['images']:
frame = f['images/{}'.format(key)][:]
images.append(frame)
image_stamps.append(f['images/{}'.format(key)].attrs['timestamp'])
image_event_indices.append(f['images/{}'.format(key)].attrs['event_idx'])
events['frames'] = images
#np.concatenate(images, axis=2).swapaxes(0,2) if len(frame.shape)==3 else np.stack(images, axis=0)
events['frame_timestamps'] = np.array(image_stamps)
events['frame_event_indices'] = np.array(image_event_indices)
return events
| 46.033898 | 128 | 0.592047 |
79400ca308f57260b4d0d6af045764e9be68743c | 3,327 | py | Python | edge/graphics/subplotter/sample_subplotter.py | Data-Science-in-Mechanical-Engineering/edge | 586eaba2f0957e75940f4f19fa774603f57eae89 | [
"MIT"
] | null | null | null | edge/graphics/subplotter/sample_subplotter.py | Data-Science-in-Mechanical-Engineering/edge | 586eaba2f0957e75940f4f19fa774603f57eae89 | [
"MIT"
] | null | null | null | edge/graphics/subplotter/sample_subplotter.py | Data-Science-in-Mechanical-Engineering/edge | 586eaba2f0957e75940f4f19fa774603f57eae89 | [
"MIT"
] | null | null | null | from . import Subplotter
import numpy as np
def masked(to_mask, mask):
return [item for item, keep in zip(to_mask, mask) if keep]
class SampleSubplotter(Subplotter):
def __init__(self, colors):
super(SampleSubplotter, self).__init__(colors)
self.failed_samples = []
self.unfailed_samples = []
self.failed_colors = []
self.unfailed_colors = []
self.failed_markers = []
self.unfailed_markers = []
def incur_sample(self, state, action, failed, color=None, marker=None):
if color is None:
color = [0.9, 0.3, 0.3]
# States and actions are stored in np arrays of shape (1,) (since we
# are plotting them)
if failed:
marker = marker if marker is not None else 'x'
self.failed_samples.append((state[0], action[0]))
self.failed_colors.append(color)
self.failed_markers.append(marker)
else:
marker = marker if marker is not None else '.'
self.unfailed_samples.append((state[0], action[0]))
self.unfailed_colors.append(color)
self.unfailed_markers.append(marker)
def flush_samples(self):
self.failed_samples = []
self.unfailed_samples = []
self.failed_colors = []
self.unfailed_colors = []
self.failed_markers = []
self.unfailed_markers = []
def ensure_samples_in_at_least_one(self, *datasets):
dataset = np.unique(
np.vstack(datasets),
axis=0
)
def is_in_dataset(to_check):
return [np.isclose(x, dataset).all(axis=1).any() for x in to_check]
failed_in = is_in_dataset(self.failed_samples)
unfailed_in = is_in_dataset(self.unfailed_samples)
def filter_list(to_filter, keep_bools):
return [x for x, keep in zip(to_filter, keep_bools) if keep]
self.failed_samples = filter_list(self.failed_samples, failed_in)
self.unfailed_samples = filter_list(self.unfailed_samples, unfailed_in)
self.failed_colors = filter_list(self.failed_colors, failed_in)
self.unfailed_colors = filter_list(self.unfailed_colors, unfailed_in)
self.failed_markers = filter_list(self.failed_markers, failed_in)
self.unfailed_markers = filter_list(self.unfailed_markers, failed_in)
def draw_on_axs(self, ax_Q):
def scatter_stateactions(stateactions, colors, markers):
markers_set = set(markers)
for marker in markers_set:
fltr = [m == marker for m in markers]
if any(fltr):
states, actions = zip(*masked(stateactions, fltr))
ax_Q.scatter(
actions,
states,
color=masked(colors, fltr),
s=60,
marker=marker,
edgecolors='none'
)
if len(self.failed_samples) > 0:
scatter_stateactions(self.failed_samples, self.failed_colors,
self.failed_markers)
if len(self.unfailed_samples) > 0:
scatter_stateactions(self.unfailed_samples, self.unfailed_colors,
self.unfailed_markers)
| 39.141176 | 79 | 0.593628 |
79400cc84ba38b4bef421de53e2fccf55beed4db | 5,542 | py | Python | openwisp_users/admin.py | ShubhamKushwah/openwisp-users | 5ea599e776c6053ea3acbeb6beb2ea62538945f6 | [
"BSD-3-Clause"
] | null | null | null | openwisp_users/admin.py | ShubhamKushwah/openwisp-users | 5ea599e776c6053ea3acbeb6beb2ea62538945f6 | [
"BSD-3-Clause"
] | null | null | null | openwisp_users/admin.py | ShubhamKushwah/openwisp-users | 5ea599e776c6053ea3acbeb6beb2ea62538945f6 | [
"BSD-3-Clause"
] | null | null | null | from allauth.account.models import EmailAddress
from django import forms
from django.apps import apps
from django.contrib import admin
from django.contrib.auth.admin import GroupAdmin as BaseGroupAdmin
from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
from django.contrib.auth.forms import UserChangeForm as BaseUserChangeForm
from django.contrib.auth.forms import UserCreationForm as BaseUserCreationForm
from django.utils.translation import ugettext_lazy as _
from organizations.base_admin import (BaseOrganizationAdmin,
BaseOrganizationOwnerAdmin,
BaseOrganizationUserAdmin,
BaseOwnerInline)
from .base import BaseAdmin
from .models import (Group, Organization, OrganizationOwner, OrganizationUser,
User)
class EmailAddressInline(admin.StackedInline):
model = EmailAddress
extra = 0
readonly_fields = ['email']
def has_add_permission(self, *args, **kwargs):
"""
Do not let admins add new email objects via inlines
in order to not mess the coherence of the database.
Admins can still change the main email field of the User model,
that will automatically add a new email address object and
send a confirmation email, see ``UserAdmin.save_model``
"""
return False
class OrganizationUserInline(admin.StackedInline):
model = OrganizationUser
extra = 0
class EmailRequiredMixin(forms.ModelForm):
email = forms.EmailField(label=_('Email'), max_length=254, required=True)
class UserCreationForm(EmailRequiredMixin, BaseUserCreationForm):
pass
class UserChangeForm(EmailRequiredMixin, BaseUserChangeForm):
pass
class UserAdmin(BaseUserAdmin, BaseAdmin):
add_form = UserCreationForm
form = UserChangeForm
readonly_fields = ['last_login', 'date_joined']
list_display = ('username', 'email', 'is_superuser', 'date_joined', 'last_login')
inlines = [EmailAddressInline, OrganizationUserInline]
save_on_top = True
def get_readonly_fields(self, request, obj=None):
# retrieve readonly fields
fields = super(UserAdmin, self).get_readonly_fields(request, obj)
# do not allow operators to escalate their privileges
if not request.user.is_superuser:
# copy to avoid modifying reference
fields = fields[:] + ['is_superuser', 'user_permissions']
return fields
def has_change_permission(self, request, obj=None):
# do not allow operators to edit details of superusers
# returns 403 if trying to access the change form of a superuser
if obj and obj.is_superuser and not request.user.is_superuser:
return False
return super(UserAdmin, self).has_change_permission(request, obj)
def get_queryset(self, request):
qs = super(UserAdmin, self).get_queryset(request)
# hide superusers from operators (they can't edit their details)
if not request.user.is_superuser:
qs = qs.filter(is_superuser=False)
return qs
def get_inline_instances(self, request, obj=None):
"""
Avoid displaying inline objects when adding a new user
"""
if obj:
return super(UserAdmin, self).get_inline_instances(request, obj)
return []
def save_model(self, request, obj, form, change):
"""
Automatically creates email addresses for users
added/changed via the django-admin interface
"""
super(UserAdmin, self).save_model(request, obj, form, change)
if obj.email:
EmailAddress.objects.add_email(request,
user=obj,
email=obj.email,
confirm=True,
signup=True)
base_fields = list(UserAdmin.fieldsets[1][1]['fields'])
additional_fields = ['bio', 'url', 'company', 'location']
UserAdmin.fieldsets[1][1]['fields'] = base_fields + additional_fields
UserAdmin.add_fieldsets[0][1]['fields'] = ('username', 'email', 'password1', 'password2')
class GroupAdmin(BaseGroupAdmin, BaseAdmin):
pass
class OwnerInline(BaseOwnerInline):
model = OrganizationOwner
class OrganizationAdmin(BaseOrganizationAdmin, BaseAdmin):
view_on_site = False
inlines = [OwnerInline]
class OrganizationUserAdmin(BaseOrganizationUserAdmin, BaseAdmin):
view_on_site = False
class OrganizationOwnerAdmin(BaseOrganizationOwnerAdmin, BaseAdmin):
list_display = ('get_user', 'organization')
def get_user(self, obj):
return obj.organization_user.user
admin.site.register(User, UserAdmin)
admin.site.register(Organization, OrganizationAdmin)
admin.site.register(OrganizationUser, OrganizationUserAdmin)
admin.site.register(OrganizationOwner, OrganizationOwnerAdmin)
# unregister auth.Group
base_group_model = apps.get_model('auth', 'Group')
admin.site.unregister(base_group_model)
# register openwisp_users.Group proxy model
admin.site.register(Group, GroupAdmin)
# unregister some admin components to keep the admin interface simple
# we can re-enable these models later when they will be really needed
for model in [('account', 'EmailAddress'),
('socialaccount', 'SocialApp'),
('socialaccount', 'SocialToken'),
('socialaccount', 'SocialAccount')]:
admin.site.unregister(apps.get_model(*model))
| 36.222222 | 89 | 0.686034 |
79400cf6ff7ec61671266dc62fea56ba1c74695b | 4,357 | py | Python | tests/testModelStudy.py | sys-bio/SBstoat | 31b184176a7f19074c905db76e6e6ac8e4fc36a8 | [
"MIT"
] | null | null | null | tests/testModelStudy.py | sys-bio/SBstoat | 31b184176a7f19074c905db76e6e6ac8e4fc36a8 | [
"MIT"
] | 18 | 2020-09-14T19:30:38.000Z | 2021-04-13T18:45:18.000Z | tests/testModelStudy.py | sys-bio/SBStoat | 31b184176a7f19074c905db76e6e6ac8e4fc36a8 | [
"MIT"
] | 3 | 2021-01-29T04:00:25.000Z | 2021-03-03T23:43:39.000Z | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 19, 2020
@author: joseph-hellerstein
"""
from SBstoat.modelStudy import ModelStudy, mkDataSourceDct
import tests._testHelpers as th
from tests import _testConstants as tcn
import matplotlib
import numpy as np
import os
import shutil
import unittest
IGNORE_TEST = False
COLNAME = "V"
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_PATH = os.path.join(THIS_DIR, "tst_data.txt")
DATA_PATH2 = os.path.join(THIS_DIR, "data_file2.csv")
DATA_PATHS = [DATA_PATH, DATA_PATH, DATA_PATH]
SERIALIZE_DIR = os.path.join(THIS_DIR, "modelStudy")
DIRS = [SERIALIZE_DIR]
FILES = []
IS_PLOT = False
PARAMETERS_TO_FIT = [v for v in th.PARAMETER_DCT.keys()]
TIMESERIES = th.getTimeseries()
class TestModelStudy(unittest.TestCase):
def setUp(self):
self._remove()
self.parametersToFit = list(th.PARAMETER_DCT.keys())
self.study = ModelStudy(th.ANTIMONY_MODEL, DATA_PATHS,
parametersToFit=PARAMETERS_TO_FIT,
dirStudyPath=SERIALIZE_DIR, isPlot=IS_PLOT, useSerialized=True)
def tearDown(self):
self._remove()
def _remove(self):
for ffile in FILES:
if os.path.isfile(ffile):
os.remove(ffile)
for ddir in DIRS:
if os.path.isdir(ddir):
shutil.rmtree(ddir)
def testConstructor1(self):
if IGNORE_TEST:
return
self.assertGreater(len(self.study.fitterDct.values()), 0)
# Ensure that ModelFitters are serialized correctly
study = ModelStudy(th.ANTIMONY_MODEL, DATA_PATHS,
parametersToFit=self.parametersToFit,
dirStudyPath=SERIALIZE_DIR, isPlot=IS_PLOT)
for name in self.study.instanceNames:
self.assertEqual(study.fitterDct[name].modelSpecification,
self.study.fitterDct[name].modelSpecification)
def testFitModel(self):
if IGNORE_TEST:
return
self.study.fitModel()
names = [v for v in self.study.fitterDct.keys()]
params0 = self.study.fitterDct[names[0]].params
params1 = self.study.fitterDct[names[1]].params
dct0 = params0.valuesdict()
dct1 = params1.valuesdict()
if tcn.IGNORE_ACCURACY:
return
for key, value in dct0.items():
self.assertTrue(np.isclose(value, dct1[key], rtol=0.5))
def testFitBootstrap(self):
if IGNORE_TEST:
return
study = ModelStudy(th.ANTIMONY_MODEL, DATA_PATHS,
parametersToFit=PARAMETERS_TO_FIT,
dirStudyPath=SERIALIZE_DIR, isPlot=IS_PLOT, useSerialized=False)
study.bootstrap(numIteration=10)
for fitter in study.fitterDct.values():
self.assertIsNotNone(fitter.bootstrapResult)
def testPlotFitAll(self):
if IGNORE_TEST:
return
self.study.fitModel()
self.study.plotFitAll()
#
self.study.bootstrap()
self.study.plotFitAll()
def testPlotParameterEstimates(self):
if IGNORE_TEST:
return
self.study.bootstrap(numIteration=20)
self.study.plotParameterEstimates()
class TestFunctions(unittest.TestCase):
def setUp(self):
pass
def testMkDataSourceDct(self):
if IGNORE_TEST:
return
def test(dataSourceNames=None):
dataSourceDct = mkDataSourceDct(DATA_PATH2, "V",
dataSourceNames=dataSourceNames)
trues = [d.colnames[0] == COLNAME for d in dataSourceDct.values()]
self.assertTrue(all(trues))
keys = [k for k in dataSourceDct.keys()]
firstTS = dataSourceDct[keys[0]]
trues = [len(d) == len(firstTS) for d in dataSourceDct.values()]
test()
test(dataSourceNames=["P%d" % d for d in range(6)])
def testMkDataSourceDctTimeRows(self):
if IGNORE_TEST:
return
dataSourceDct1 = mkDataSourceDct(DATA_PATH2, "V", isTimeColumns=True)
dataSourceDct2 = mkDataSourceDct(DATA_PATH2, "V", isTimeColumns=False)
keys1 = list(dataSourceDct1)
keys2 = list(dataSourceDct2)
self.assertEqual(len(dataSourceDct1[keys1[0]]), len(keys2))
if __name__ == '__main__':
matplotlib.use('TkAgg')
unittest.main()
| 31.80292 | 78 | 0.638513 |
79400d3fdc94a1c62de0a2c7fd1f816241e53970 | 995 | py | Python | tests/app_management/admin.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 36 | 2019-10-22T11:44:37.000Z | 2022-03-15T21:27:03.000Z | tests/app_management/admin.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 6 | 2020-10-03T15:13:26.000Z | 2021-09-25T14:05:50.000Z | tests/app_management/admin.py | W1ldPo1nter/django-queryable-properties | 9bb4ecb4fbdd7a9e0f610f937c8101a643027fb1 | [
"BSD-3-Clause"
] | 3 | 2021-04-26T08:30:46.000Z | 2021-08-18T09:04:49.000Z | # -*- coding: utf-8 -*-
from queryable_properties.admin import QueryablePropertiesAdmin, QueryablePropertiesTabularInline
from ..app_management.models import VersionWithClassBasedProperties
class VersionAdmin(QueryablePropertiesAdmin):
list_display = ('version', 'application', 'is_supported')
list_filter = ('application', 'major')
search_fields = ('changes',)
date_hierarchy = 'supported_from'
ordering = ('application', 'major', 'minor', 'patch')
class VersionInline(QueryablePropertiesTabularInline):
model = VersionWithClassBasedProperties
list_select_properties = ('changes_or_default',)
ordering = ('version',)
class ApplicationAdmin(QueryablePropertiesAdmin):
list_display = ('name', 'highest_version', 'version_count')
list_filter = ('common_data', 'support_start_date')
list_select_properties = ('version_count',)
search_fields = ('name', 'highest_version')
ordering = ('name', 'version_count')
inlines = (VersionInline,)
| 35.535714 | 97 | 0.739698 |
79400e0280adae8528b03689a40d0db330670e10 | 1,687 | py | Python | semantic_search/schemas.py | BaderLab/semantic-search | 26abcf7d22df6f0991f17d3c854f104cd8a762f1 | [
"MIT"
] | 1 | 2020-05-08T19:17:46.000Z | 2020-05-08T19:17:46.000Z | semantic_search/schemas.py | BaderLab/semantic-search | 26abcf7d22df6f0991f17d3c854f104cd8a762f1 | [
"MIT"
] | 1 | 2020-05-12T14:50:44.000Z | 2020-05-12T14:50:44.000Z | semantic_search/schemas.py | BaderLab/semantic-search | 26abcf7d22df6f0991f17d3c854f104cd8a762f1 | [
"MIT"
] | null | null | null | from typing import List, Optional
import faiss
from pydantic import BaseModel, Field
from transformers import PreTrainedModel, PreTrainedTokenizer
UID = str
# See: https://fastapi.tiangolo.com/tutorial/body/ for more details on creating a Request Body.
class Document(BaseModel):
uid: UID
text: Optional[str] = None
class Search(BaseModel):
query: Document
documents: List[Document] = []
top_k: int = Field(10, gt=0, description="top_k must be greater than 0")
docs_only: bool = False
class Config:
schema_extra = {
"example": {
"query": {
"uid": "0",
"text": "It has recently been shown that Craf is essential for Kras G12D-induced NSCLC.",
},
"documents": [
{
"uid": "1",
"text": "Craf is essential for the onset of Kras-driven non-small cell lung cancer.",
},
{
"uid": "2",
"text": "Tumorigenesis is a multistage process that involves multiple cell types.",
},
{
"uid": "3",
"text": "Only concomitant ablation of ERK1 and ERK2 impairs tumor growth.",
},
],
"top_k": 3,
}
}
class TopMatch(BaseModel):
uid: UID
score: float
class Model(BaseModel):
tokenizer: PreTrainedModel = None
model: PreTrainedTokenizer = None
index: faiss.Index = None
class Config:
arbitrary_types_allowed = True
| 26.777778 | 109 | 0.515708 |
79400e3e4e3ade9b39242a00a264f97f5a12697e | 992 | py | Python | create_index/create_squad_document.py | khoavq/BertSquadSearch | 260b9890fe14523c1c98a6cfeb8f668b6027a41e | [
"MIT"
] | 2 | 2020-08-07T10:58:06.000Z | 2020-09-08T06:30:26.000Z | create_index/create_squad_document.py | khoavq/BertSquadSearch | 260b9890fe14523c1c98a6cfeb8f668b6027a41e | [
"MIT"
] | 4 | 2020-11-13T18:54:30.000Z | 2022-02-10T01:54:13.000Z | create_index/create_squad_document.py | khoavq/BertSquadSearch | 260b9890fe14523c1c98a6cfeb8f668b6027a41e | [
"MIT"
] | 1 | 2020-09-16T08:40:21.000Z | 2020-09-16T08:40:21.000Z | import json
from bert_serving.client import BertClient
from pprint import pprint
bc = BertClient(output_fmt='list', check_length=False)
def create_question_document(d_id, question, is_impossible, emb):
return {
'_op_type': 'index',
'_index': 'squad2.0',
'context_id': d_id,
'question': question,
'is_impossible': is_impossible,
'text_vector': emb
}
def main():
f = open("./dataset/questions/all_questions.json", encoding="utf8")
docs = json.load(f)
questions = docs.get("data")
with open("squad_question_2.0.jsonl", 'w') as f:
for question in questions:
print(question)
q = question.get("question")
is_impossible = question.get("is_impossible")
d_id = question.get('id')
a = create_question_document(d_id, q, is_impossible, bc.encode([q])[0])
pprint(a)
f.write(json.dumps(a) + '\n')
if __name__ == '__main__':
main()
| 27.555556 | 83 | 0.606855 |
79400ef9115bdec2d35e638391190dfc21cfc220 | 656 | py | Python | src/loadsave.py | antonrufino/battleship | 76cee1d0d8a53c57e6fbdacee7e6f45cf8859d3d | [
"MIT"
] | null | null | null | src/loadsave.py | antonrufino/battleship | 76cee1d0d8a53c57e6fbdacee7e6f45cf8859d3d | [
"MIT"
] | null | null | null | src/loadsave.py | antonrufino/battleship | 76cee1d0d8a53c57e6fbdacee7e6f45cf8859d3d | [
"MIT"
] | null | null | null | # loadsave.py
# The loadsave module is used to write and read data from files in
# JSON format.
import json
# Based on dcumentation from https://docs.python.org/2/library/json.html
# Converts data to a JSON string and saves it to a file
def saveToFile(fileName, data):
jsonString = json.dumps(data) #convert data to json string
fh = open(fileName, "w")
fh.write(jsonString)
fh.close()
# Loads a JSON string from a file and converts it to a Python object
def loadFromFile(fileName):
fh = open(fileName, "r")
jsonString = fh.read()
fh.close()
return json.loads(jsonString) #convert json string to python obj
| 28.521739 | 72 | 0.698171 |
79400f001af4e8b379d6bd713255474ee6c9dd7b | 10,295 | py | Python | aldryn_forms/south_migrations/0007_auto__add_field_formplugin_redirect_type__add_field_formplugin_page__a.py | protoroto/aldryn-forms | 076365f391c096805c5ffea26f40ab728a6fcd2a | [
"BSD-3-Clause"
] | null | null | null | aldryn_forms/south_migrations/0007_auto__add_field_formplugin_redirect_type__add_field_formplugin_page__a.py | protoroto/aldryn-forms | 076365f391c096805c5ffea26f40ab728a6fcd2a | [
"BSD-3-Clause"
] | null | null | null | aldryn_forms/south_migrations/0007_auto__add_field_formplugin_redirect_type__add_field_formplugin_page__a.py | protoroto/aldryn-forms | 076365f391c096805c5ffea26f40ab728a6fcd2a | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FormPlugin.redirect_type'
db.add_column(u'cmsplugin_formplugin', 'redirect_type',
self.gf('django.db.models.fields.CharField')(default='redirect_to_page', max_length=20),
keep_default=False)
# Adding field 'FormPlugin.page'
db.add_column(u'cmsplugin_formplugin', 'page',
self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cms.Page'], null=True, blank=True),
keep_default=False)
# Adding field 'FormPlugin.url'
db.add_column(u'cmsplugin_formplugin', 'url',
self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FormPlugin.redirect_type'
db.delete_column(u'cmsplugin_formplugin', 'redirect_type')
# Deleting field 'FormPlugin.page'
db.delete_column(u'cmsplugin_formplugin', 'page_id')
# Deleting field 'FormPlugin.url'
db.delete_column(u'cmsplugin_formplugin', 'url')
models = {
u'aldryn_forms.buttonplugin': {
'Meta': {'object_name': 'ButtonPlugin', 'db_table': "u'cmsplugin_buttonplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'aldryn_forms.fieldplugin': {
'Meta': {'object_name': 'FieldPlugin', 'db_table': "u'cmsplugin_fieldplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'help_text': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'max_value': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'min_value': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'placeholder_text': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'required_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'aldryn_forms.fieldsetplugin': {
'Meta': {'object_name': 'FieldsetPlugin', 'db_table': "u'cmsplugin_fieldsetplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'legend': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
},
u'aldryn_forms.formplugin': {
'Meta': {'object_name': 'FormPlugin', 'db_table': "u'cmsplugin_formplugin'", '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'error_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'redirect_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'})
},
u'aldryn_forms.option': {
'Meta': {'object_name': 'Option'},
'field': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['aldryn_forms.FieldPlugin']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'),)", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['aldryn_forms'] | 76.259259 | 184 | 0.57863 |
79400fc8d4676bf55ba03dbbc402ba01f7c44924 | 210 | py | Python | WEEKS/wk17/CodeSignal-Solutions/12_-_sortByHeight.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/wk17/CodeSignal-Solutions/12_-_sortByHeight.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | WEEKS/wk17/CodeSignal-Solutions/12_-_sortByHeight.py | webdevhub42/Lambda | b04b84fb5b82fe7c8b12680149e25ae0d27a0960 | [
"MIT"
] | null | null | null | def sortByHeight(a):
treePositions = [x for x in range(len(a)) if a[x] == -1]
people = sorted([x for x in a if x != -1])
for tree in treePositions:
people.insert(tree, -1)
return people
| 30 | 60 | 0.595238 |
79400fea822b9ff6ea33922dd280de797360c319 | 1,292 | py | Python | nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | null | null | null | nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 2 | 2017-10-05T21:08:38.000Z | 2018-10-09T23:01:23.000Z | nipype/interfaces/semtools/diffusion/tests/test_auto_extractNrrdVectorIndex.py | Conxz/nipype | 1281723ae56eacd103597ff4081a205583706e62 | [
"Apache-2.0"
] | 1 | 2016-10-11T19:18:53.000Z | 2016-10-11T19:18:53.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from .....testing import assert_equal
from ..gtract import extractNrrdVectorIndex
def test_extractNrrdVectorIndex_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
setImageOrientation=dict(argstr='--setImageOrientation %s',
),
terminal_output=dict(nohash=True,
),
vectorIndex=dict(argstr='--vectorIndex %d',
),
)
inputs = extractNrrdVectorIndex.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_extractNrrdVectorIndex_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = extractNrrdVectorIndex.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
yield assert_equal, getattr(outputs.traits()[key], metakey), value
| 29.363636 | 78 | 0.675697 |
794010e969b9b4297b6e55f409514a91ce49cc68 | 43 | py | Python | webpath.py | liu-jian/seos | e14fc217bbbb6fac91857d7283f5fe93d3d36128 | [
"MIT"
] | null | null | null | webpath.py | liu-jian/seos | e14fc217bbbb6fac91857d7283f5fe93d3d36128 | [
"MIT"
] | null | null | null | webpath.py | liu-jian/seos | e14fc217bbbb6fac91857d7283f5fe93d3d36128 | [
"MIT"
] | null | null | null | webpath = {
'web-1-04': '/var/www/',
}
| 10.75 | 28 | 0.44186 |
794010f011272d5dc9a089a0d0b3635f91a2cf86 | 170 | py | Python | tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_ConstantTrend_Seasonal_DayOfMonth_MLP.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_ConstantTrend_Seasonal_DayOfMonth_MLP.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | 1 | 2019-11-30T23:39:38.000Z | 2019-12-01T04:34:35.000Z | tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_ConstantTrend_Seasonal_DayOfMonth_MLP.py | jmabry/pyaf | afbc15a851a2445a7824bf255af612dc429265af | [
"BSD-3-Clause"
] | null | null | null | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['ConstantTrend'] , ['Seasonal_DayOfMonth'] , ['MLP'] ); | 42.5 | 92 | 0.764706 |
79401111b0d32d264521ef722783be652dbf9b4e | 5,096 | py | Python | tests/integration/ASR_CTC/example_asr_ctc_experiment.py | Chaanks/speechbrain | 6447bde54f6e3fb07fdb934ab535f17cadfbad53 | [
"Apache-2.0"
] | null | null | null | tests/integration/ASR_CTC/example_asr_ctc_experiment.py | Chaanks/speechbrain | 6447bde54f6e3fb07fdb934ab535f17cadfbad53 | [
"Apache-2.0"
] | null | null | null | tests/integration/ASR_CTC/example_asr_ctc_experiment.py | Chaanks/speechbrain | 6447bde54f6e3fb07fdb934ab535f17cadfbad53 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env/python3
"""This minimal example trains a CTC-based speech recognizer on a tiny dataset.
The encoder is based on a combination of convolutional, recurrent, and
feed-forward networks (CRDNN) that predict phonemes. A greedy search is used on
top of the output probabilities.
Given the tiny dataset, the expected behavior is to overfit the training dataset
(with a validation performance that stays high).
"""
import pathlib
import speechbrain as sb
from hyperpyyaml import load_hyperpyyaml
class CTCBrain(sb.Brain):
def compute_forward(self, batch, stage):
"Given an input batch it computes the output probabilities."
batch = batch.to(self.device)
wavs, lens = batch.sig
feats = self.modules.compute_features(wavs)
feats = self.modules.mean_var_norm(feats, lens)
x = self.modules.model(feats)
x = self.modules.lin(x)
outputs = self.hparams.softmax(x)
return outputs, lens
def compute_objectives(self, predictions, batch, stage):
"Given the network predictions and targets computed the CTC loss."
predictions, lens = predictions
phns, phn_lens = batch.phn_encoded
loss = self.hparams.compute_cost(predictions, phns, lens, phn_lens)
if stage != sb.Stage.TRAIN:
seq = sb.decoders.ctc_greedy_decode(
predictions, lens, blank_id=self.hparams.blank_index
)
self.per_metrics.append(batch.id, seq, phns, target_len=phn_lens)
return loss
def on_stage_start(self, stage, epoch=None):
"Gets called when a stage (either training, validation, test) starts."
if stage != sb.Stage.TRAIN:
self.per_metrics = self.hparams.per_stats()
def on_stage_end(self, stage, stage_loss, epoch=None):
"""Gets called at the end of a stage."""
if stage == sb.Stage.TRAIN:
self.train_loss = stage_loss
if stage == sb.Stage.VALID and epoch is not None:
print("Epoch %d complete" % epoch)
print("Train loss: %.2f" % self.train_loss)
if stage != sb.Stage.TRAIN:
print(stage, "loss: %.2f" % stage_loss)
print(stage, "PER: %.2f" % self.per_metrics.summarize("error_rate"))
def data_prep(data_folder, hparams):
"Creates the datasets and their data processing pipelines."
# 1. Declarations:
train_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_train.json",
replacements={"data_root": data_folder},
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_json(
json_path=data_folder / "../annotation/ASR_dev.json",
replacements={"data_root": data_folder},
)
datasets = [train_data, valid_data]
label_encoder = sb.dataio.encoder.CTCTextEncoder()
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav):
sig = sb.dataio.dataio.read_audio(wav)
return sig
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("phn")
@sb.utils.data_pipeline.provides("phn_list", "phn_encoded")
def text_pipeline(phn):
phn_list = phn.strip().split()
yield phn_list
phn_encoded = label_encoder.encode_sequence_torch(phn_list)
yield phn_encoded
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 3. Fit encoder:
# NOTE: In this minimal example, also update from valid data
label_encoder.insert_blank(index=hparams["blank_index"])
label_encoder.update_from_didataset(train_data, output_key="phn_list")
label_encoder.update_from_didataset(valid_data, output_key="phn_list")
# 4. Set output:
sb.dataio.dataset.set_output_keys(datasets, ["id", "sig", "phn_encoded"])
return train_data, valid_data
def main(device="cpu"):
experiment_dir = pathlib.Path(__file__).resolve().parent
hparams_file = experiment_dir / "hyperparams.yaml"
data_folder = "../../samples/ASR/"
data_folder = (experiment_dir / data_folder).resolve()
# Load model hyper parameters:
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin)
# Dataset creation
train_data, valid_data = data_prep(data_folder, hparams)
# Trainer initialization
ctc_brain = CTCBrain(
hparams["modules"],
hparams["opt_class"],
hparams,
run_opts={"device": device},
)
# Training/validation loop
ctc_brain.fit(
range(hparams["N_epochs"]),
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["dataloader_options"],
)
# Evaluation is run separately (now just evaluating on valid data)
ctc_brain.evaluate(valid_data)
# Check if model overfits for integration test
assert ctc_brain.train_loss < 1.0
if __name__ == "__main__":
main()
def test_error(device):
main(device)
| 33.973333 | 80 | 0.67759 |
794011182d1bd92892fb47a1e8da2622e6d928ad | 1,588 | py | Python | py/cidoc_crm_types/properties/p139_has_alternative_form.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/properties/p139_has_alternative_form.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | py/cidoc_crm_types/properties/p139_has_alternative_form.py | minorg/cidoc-crm-types | 9018bdbf0658e4d28a87bc94543e467be45d8aa5 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
@dataclass
class P139HasAlternativeForm:
"""
Scope note:
This property establishes a relationship of equivalence between two instances of E41 Appellation independent from any item identified by them. It is a dynamic asymmetric relationship, where the range expresses the derivative, if such a direction can be established. Otherwise, the relationship is symmetric.
The relationship is not transitive.
The equivalence applies to all cases of use of an instance of E41 Appellation. Multiple names assigned to an object, which are not equivalent for all things identified with a specific instance of E41 Appellation, should be modelled as repeated values of P1 is identified by (identifies).
P139.1 has type allows the type of derivation, such as “transliteration from Latin 1 to ASCII” be refined..
Examples:
- "Martin Doerr" (E41) has alternative form "Martin Dörr" (E41) has type Alternate spelling (E55)
- "Гончарова, Наталья Сергеевна" (E41) has alternative form "Gončarova, Natal´â Sergeevna" (E41) has type ISO 9:1995 transliteration (E55)
- "Αθήνα" has alternative form "Athina" has type transcription.
In First Order Logic:
P139(x,y) ⊃ E41(x)
P139 (x,y) ⊃ E41(y)
P139(x,y,z) ⊃ [P139(x,y) ∧ E55(z)]
P139(x,y) ⊃ P139(y,x)
"""
URI = "http://erlangen-crm.org/current/P139_has_alternative_form"
| 52.933333 | 307 | 0.729219 |
7940112fbaa9f2e66776e5e2dbdc2075505fdd3d | 299 | py | Python | test/test_dash/test_callbacks.py | tschm/pyweb | 4a4fc0190bd7d5ef5f17b6ce094c3e945b3cc821 | [
"MIT"
] | null | null | null | test/test_dash/test_callbacks.py | tschm/pyweb | 4a4fc0190bd7d5ef5f17b6ce094c3e945b3cc821 | [
"MIT"
] | null | null | null | test/test_dash/test_callbacks.py | tschm/pyweb | 4a4fc0190bd7d5ef5f17b6ce094c3e945b3cc821 | [
"MIT"
] | null | null | null | from pyweb.pydash.pydash.callbacks import Cache
from test.settings import read
import pandas.testing as pt
def test_cache():
prices = read("price.csv", index_col=0, parse_dates=True)
x = Cache.to_json(frame=prices)
pt.assert_frame_equal(prices, Cache.read_json(x), check_names=False)
| 27.181818 | 72 | 0.759197 |
7940115d216623ea75ace1f77a6044495a9eb51e | 14,799 | py | Python | data_extraction.py | cbjuan/opensim-arff | f36357e56f9ac7c876ccfcafd3ed958e4f349ffb | [
"MIT"
] | null | null | null | data_extraction.py | cbjuan/opensim-arff | f36357e56f9ac7c876ccfcafd3ed958e4f349ffb | [
"MIT"
] | null | null | null | data_extraction.py | cbjuan/opensim-arff | f36357e56f9ac7c876ccfcafd3ed958e4f349ffb | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Python Libraries
import MySQLdb
import json
import io
import datetime
import re
# Importing other code
from classes import *
def saveJson(json_name, objects_list, encoder_class):
with io.open(''+json_name+'.json', 'w', encoding='utf-8') as jsonTemp:
jsonTemp.write(unicode(json.dumps(objects_list, jsonTemp, cls=encoder_class, indent=2)))
jsonTemp.close()
'''
Get users and data from database
'''
print "\nGet data from database\n"
# Set the proper params
database_host = ""
database_user = ""
database_passwd = ""
database_name = ""
db = MySQLdb.connect(host=database_host,user=database_user,passwd=database_passwd, db=database_name)
users_list = []
groups_list = []
membership_list = []
friendship_list = []
cur = db.cursor()
# SQL Query
cur.execute("""SELECT PrincipalID, FirstName, LastName, Email from UserAccounts """)
for row in cur.fetchall():
p = User(user_id=row[0], user_name=row[1].replace("*pending* ", ""), user_surname=row[2], email=row[3])
users_list.append(p)
saveJson("users", users_list, UserEncoder)
'''
Other queries that could be used
- These queries produce json files with the info.
- You can format these data to ARFF using utilities available in this code
# Interesting fields in the "UserAccounts" table
# PrincipalID
# FirstName
# LastName
# Email
# UserLevel -> superadmins have a 100 user level. Common users an user level = 0
# UserTitle -> the label for an user, not all have one.
# "friends" Table
# PrincipalID
# Friend
# Flags ?
# Offered ?
# The "griduser" table keeps the users' positions within the virtual world
# "osgroup" table
# GroupID
# Name (of the group)
# OwnerRoleID
# "osgroupmembership" Table
# GroupID
# AgentID
# Get groups
cur.execute("""SELECT GroupID, Name, OwnerRoleID from osgroup """)
for row in cur.fetchall():
print row[0]
print row[1]
print row[2]
g = Group(group_id=row[0], group_name=row[1], group_owner=row[2])
groups_list.append(g)
with io.open('path/groups.json', 'w', encoding='utf-8') as groups_json:
groups_json.write(unicode(json.dumps(groups_list, groups_json, cls=GroupEncoder, indent=2)))
groups_json.close()
# Get members of groups
cur.execute("""SELECT GroupID, AgentID from osgroupmembership """)
for row in cur.fetchall():
print row[0]
print row[1]
g = Member(group_id=row[0], user_id=row[1], )
membership_list.append(g)
with io.open('path/groupsMembers.json', 'w', encoding='utf-8') as members_json:
members_json.write(unicode(json.dumps(membership_list, members_json, cls=MembersEncoder, indent=2)))
members_json.close()
# Get friends
cur.execute("""SELECT PrincipalID, Friend from friends """)
for row in cur.fetchall():
print row[0]
print row[1]
g = Friend(user_id1=row[0], user_id2=row[1], )
friendship_list.append(g)
with io.open('path/friends.json', 'w', encoding='utf-8') as json_friends:
json_friends.write(unicode(json.dumps(friendship_list, json_friends, cls=FriendsEncoder, indent=2)))
json_friends.close()
'''
'''
Get sessions from Opensim log
'''
session_list = []
date_temp = []
hour_temp = []
h2_temp = []
begin_session = 0
fin_session = 0
print "\nReal Sessions\n"
# path to Opensim Robust.log
log_path = ""
f = open(log_path)
line = f.readline()
for line in f:
if line.find("OpenSim.Services.PresenceService.PresenceService [PRESENCE SERVICE]: LoginAgent") >= 0:
begin_session = begin_session+1
session_init = re.search("(.+).LoginAgent (.+).with session (.+).and ssession (.+)", line)
if session_init is not None:
date_beg_session = re.split(" ", session_init.group(1))
date_temp = date_beg_session[0].split("-")
hour_temp = date_beg_session[1].split(":")
h2_temp=hour_temp[2].split(",",1)
p = Session(session_id=str(session_init.group(3)), user_id=str(session_init.group(2)),
date_init=str(date_beg_session[0]), year_init=str(date_temp[0]),
month_init=str(date_temp[1]), day_init=str(date_temp[2]),
hour_init=str(date_beg_session[1]), hours_init=str(hour_temp[0]),
minutes_init=str(hour_temp[1]), seconds_init=str(h2_temp[0]), date_fin=0,
year_fin=0, month_fin=0, day_fin=0, hour_fin=0, hours_fin=0,
minutes_fin=0, seconds_fin=0, session_t=0)
session_list.append(p)
elif line.find("OpenSim.Services.PresenceService.PresenceService [PRESENCE SERVICE]: Session") >= 0:
fin_session = fin_session+1
endSession = re.search("(.+).Session (.+).logout", line)
if endSession is not None:
date_e_session = re.split(" ", endSession.group(1))
for x in session_list:
if x.session_id == endSession.group(2):
x.date_fin = str(date_e_session[0])
x.hour_fin = str(date_e_session[1])
date_temp = date_e_session[0].split("-")
x.year_fin = str(date_temp[0])
x.month_fin = str(date_temp[1])
x.day_fin = str(date_temp[2])
hour_temp = x.hour_fin.split(":")
h2_temp = hour_temp[2].split(",",1)
x.hours_fin = str(hour_temp[0])
x.minutes_fin = str(hour_temp[1])
x.seconds_fin = str(h2_temp[0])
d1 = datetime.datetime.strptime(x.date_init+" "+x.hours_init+":"+x.minutes_init
+ ":"+x.seconds_init, "%Y-%m-%d %H:%M:%S")
d2 = datetime.datetime.strptime(x.date_fin+" "+x.hours_fin+":"+x.minutes_fin
+ ":"+x.seconds_fin, "%Y-%m-%d %H:%M:%S")
result = d2 - d1
x.session_t = str(result.total_seconds())
time_total_sec = 0
time_total_hours = 0.0
time_average_min = 0.0
for x in session_list:
time_total_sec += float(x.session_t)
tMedioSeg = time_total_sec/len(session_list)
time_total_hours = time_total_sec/3660
time_average_min = tMedioSeg/60
print "With real users"
print "How many sessions started %d, complete sessions: %d" % (begin_session, fin_session)
print "Users have employed %d seconds or %.2f hours in the world" % (time_total_sec, time_total_hours)
print "Average time of %d seconds or %.2f minutes per user" % (tMedioSeg, time_average_min)
saveJson("sessions", session_list, SessionEncoder)
print "\n\nReal sessions for Weka\n\n"
arff = open ("sessions.arff", "w")
arff.write("% Title: Info about sessions\n% Creator: Juan Cruz-Benito\n% Date: June, 2013\n\n")
arff.write("@relation 'Sessions'\n")
arff.write("\n@attribute CLASE {Login, Logout}\n")
arff.write("@attribute user_id string\n@attribute fecha DATE yyyy-MM-dd HH:mm:ss\n")
arff.write("\n\n@data\n")
for x in session_list:
if x.date_fin != 0:
arff.write("Login,"+str(x.user_id)+","+str(x.date_init)+" "+str(x.hours_init)+":"
+str(x.minutes_init)+":"+str(x.seconds_init)+"\n")
arff.write("Logout,"+str(x.user_id)+","+str(x.date_fin)+" "+str(x.hours_fin)+":"
+str(x.minutes_fin)+":"+str(x.seconds_fin)+"\n")
arff.close()
'''
Get movements from Virtual World's log
'''
teleports_requests_counter = 0
teleports_incomplete_counter = 0
teleports_complete_counter = 0
teleports_list = []
out_terrain_counter = 0
close_terrain_counter = 0
close_connection_counter = 0
arrival_terrain_counter = 0
print "\nTeleports Reales\n"
# path to Opensim Opensim.log
log_path = ""
f = open(log_path)
line = f.readline()
for line in f:
if line.find("Request Teleport to") >= 0:
# Example
# 2012-07-05 09:43:34,697 DEBUG - OpenSim.Region.CoreModules.Framework.EntityTransfer.EntityTransferModule
# [ENTITY TRANSFER MODULE]: Request Teleport to http://212.128.146.39:1935/ (http://212.128.146.39:1935/)
# USALPHARMA/<128, 128, 1.5>
teleport_request = re.search("(.+).Request Teleport to http://(.+)/ \(http://(.+)/\) (.+)/.", line)
if teleport_request is not None:
teleport_date = re.split(" ", teleport_request.group(1))
teleport_region_dest_o = teleport_request.group(4)
teleports_requests_counter = teleports_requests_counter+1
for line in f:
if line.find("Closing child agents. Checking") >= 0:
# Example
# 2012-07-05 09:35:02,498 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE PRESENCE]:
# Closing child agents. Checking 1 regions in USAL SIPPE
teleport_o = re.search("(.+).Closing child agents. Checking (.+) regions in (.+)", line)
if teleport_o is not None:
teleport_date = re.split(" ", teleport_request.group(1))
source_region_teleport = teleport_o.group(3)
horaTemp = teleport_date[1].split(",",1)
hour_initTemp = horaTemp[0]
p = Teleport(user_name="", date_init=str(teleport_date[0]), hour_init=str(hour_initTemp),
teleport_source=str(source_region_teleport.replace(" ", "_")), teleport_dest="")
teleports_list.append(p)
posLisAct = len(teleports_list)
out_terrain_counter = out_terrain_counter+1
elif line.find("Upgrading child to root agent") >= 0:
# Example
# 2012-07-05 09:35:04,490 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE]:
# Upgrading child to root agent for Admin SIPPE in USALBIO
teleport_d = re.search("Upgrading child to root agent for (.+) in (.+)", line)
if teleport_d is not None:
arrival_counter = arrival_terrain_counter+1
if teleport_d.group(2) == teleport_region_dest_o:
regionteleport_d = teleport_d.group(2)
teleports_complete_counter = teleports_complete_counter+1
teleports_list[posLisAct-1].teleport_dest = str(regionteleport_d.replace (" ", "_"))
teleports_list[posLisAct-1].user_name = str(teleport_d.group(1).replace (" ", "_"))
else:
teleports_incomplete_counter = teleports_incomplete_counter+1
break
elif line.find("Closing child agents. Checking") >= 0:
# Example
# 2012-07-05 09:35:02,498 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE PRESENCE]: Closing
# child agents. Checking 1 regions in USAL SIPPE
teleport_source = re.search("(.+).Closing child agents. Checking (.+) regions in (.+)", line)
if teleport_source is not None:
out_terrain_counter = out_terrain_counter+1
elif line.find("Removing root agent") >= 0:
# Example
# 2012-12-03 14:49:16,846 DEBUG - OpenSim.Region.Framework.Scenes.Scene [SCENE]: Removing root agent
# Patricia Gonzalez f09f6a7e-2baf-4cb4-a9af-db3ca7714ad5 from USALBIO
terrain_close = re.search(".Removing root agent (.+) (.+)from (.+)", line)
if terrain_close is not None:
close_terrain_counter = close_terrain_counter+1
elif line.find("Removing child agent") >= 0:
# Example
# 2012-12-03 14:49:16,863 DEBUG - OpenSim.Region.Framework.Scenes.Scene [SCENE]: Removing child agent
# Patricia Gonzalez f09f6a7e-2baf-4cb4-a9af-db3ca7714ad5 from Animal Recovery Center
connection_close = re.search(".Removing child agent (.+) (.+)from (.+)", line)
if connection_close is not None:
close_connection_counter = close_connection_counter+1
elif line.find("Upgrading child to root agent") >= 0:
# Example
# 2012-07-05 09:35:04,490 DEBUG - OpenSim.Region.Framework.Scenes.ScenePresence [SCENE]: Upgrading child
# to root agent for Admin SIPPE in USALBIO
teleport_dest = re.search(".Upgrading child to root agent for (.+) in (.+)", line)
if teleport_dest is not None:
arrival_terrain_counter = arrival_terrain_counter+1
teleport_source_list = []
teleport_dest_list = []
for x in teleports_list:
if x.teleport_source not in teleport_source_list:
teleport_source_list.append(x.teleport_source)
if x.teleport_dest not in teleport_dest_list:
teleport_dest_list.append(x.teleport_dest)
teleport_source_list_str = str(teleport_source_list).replace("[", "")
teleport_source_list_str = teleport_source_list_str.replace("]", "")
teleport_dest_list_str = str(teleport_dest_list).replace("[", "")
teleport_dest_list_str = teleport_dest_list_str.replace("]", "")
print "\n\nWeka Teleports\n\n"
arff = open ("teleports.arff", "w")
arff.write("% Title: Info about teleports\n% Creator: Juan Cruz-Benito\n% Date: June, 2013\n\n")
arff.write("@relation 'Teleports'\n")
arff.write("\n@attribute CLASE {CompleteTeleport, IncompleteTeleport}\n")
arff.write("@attribute user_name string\n@attribute teleport_source {"+teleport_source_list_str+"}"
"\n@attribute teleport_dest {"+teleport_dest_list_str+"}\n@attribute date "
"DATE yyyy-MM-dd HH:mm:ss")
arff.write("\n\n@data\n")
for x in teleports_list:
if (x.user_name != "") or (x.teleport_dest != ""):
arff.write("CompleteTeleport," + str(x.user_name) + "," + str(x.teleport_source) + "," + str(x.teleport_dest)
+ "," + str(x.date_init) + " " + str(x.hour_init)+"\n")
elif x.user_name != "":
arff.write("IncompleteTeleport,?" + ","+str(x.teleport_source) + "," + str(x.teleport_dest)
+ "," + str(x.date_init) + " " + str(x.hour_init)+"\n")
elif x.teleport_dest != "":
arff.write("IncompleteTeleport," + str(x.user_name)+"," + str(x.teleport_source)
+ ",?," + str(x.date_init)+" " + str(x.hour_init)+"\n")
else:
arff.write("IncompleteTeleport,?" + "," + str(x.teleport_source) + ",?," + str(x.date_init)
+ " " + str(x.hour_init) + "\n")
arff.close()
print "Number of teleport requests %d" % teleports_requests_counter
print "Complete teleports: %d" % teleports_complete_counter
print "Incomplete teleports: %d" % teleports_incomplete_counter
print "Departures from terrain/island: %d" % out_terrain_counter
print "Clossing connections in a terrain: %d" % close_terrain_counter
print "Clossing connections: %d" % close_connection_counter
print "Arrivals to terrain/island: %d" % arrival_terrain_counter
saveJson("real_movements", teleports_list, TeleportEncoder)
| 39.889488 | 117 | 0.638692 |
7940116303ba9b4c83df2c70c546fa5b793008c3 | 13,391 | py | Python | electrum_mona/gui/kivy/uix/dialogs/tx_dialog.py | Redstoneguy129/electrum-mona | c9a417b2a6ddb359faedef7d7c639add3bcab954 | [
"MIT"
] | null | null | null | electrum_mona/gui/kivy/uix/dialogs/tx_dialog.py | Redstoneguy129/electrum-mona | c9a417b2a6ddb359faedef7d7c639add3bcab954 | [
"MIT"
] | null | null | null | electrum_mona/gui/kivy/uix/dialogs/tx_dialog.py | Redstoneguy129/electrum-mona | c9a417b2a6ddb359faedef7d7c639add3bcab954 | [
"MIT"
] | null | null | null | import copy
from datetime import datetime
from typing import NamedTuple, Callable, TYPE_CHECKING
from functools import partial
from kivy.app import App
from kivy.factory import Factory
from kivy.properties import ObjectProperty
from kivy.lang import Builder
from kivy.clock import Clock
from kivy.uix.label import Label
from kivy.uix.dropdown import DropDown
from kivy.uix.button import Button
from .question import Question
from electrum_mona.gui.kivy.i18n import _
from electrum_mona.util import InvalidPassword
from electrum_mona.address_synchronizer import TX_HEIGHT_LOCAL
from electrum_mona.wallet import CannotBumpFee, CannotDoubleSpendTx
from electrum_mona.transaction import Transaction, PartialTransaction
from electrum_mona.network import NetworkException
from ...util import address_colors
if TYPE_CHECKING:
from ...main_window import ElectrumWindow
Builder.load_string('''
#:import KIVY_GUI_PATH electrum_mona.gui.kivy.KIVY_GUI_PATH
<TxDialog>
id: popup
title: _('Transaction')
is_mine: True
can_sign: False
can_broadcast: False
can_rbf: False
fee_str: ''
feerate_str: ''
date_str: ''
date_label:''
amount_str: ''
tx_hash: ''
status_str: ''
description: ''
outputs_str: ''
BoxLayout:
orientation: 'vertical'
ScrollView:
scroll_type: ['bars', 'content']
bar_width: '25dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
padding: '10dp'
GridLayout:
height: self.minimum_height
size_hint_y: None
cols: 1
spacing: '10dp'
BoxLabel:
text: _('Status')
value: root.status_str
BoxLabel:
text: _('Description') if root.description else ''
value: root.description
BoxLabel:
text: root.date_label
value: root.date_str
BoxLabel:
text: _('Amount sent') if root.is_mine else _('Amount received')
value: root.amount_str
BoxLabel:
text: _('Transaction fee') if root.fee_str else ''
value: root.fee_str
BoxLabel:
text: _('Transaction fee rate') if root.feerate_str else ''
value: root.feerate_str
TopLabel:
text: _('Transaction ID') + ':' if root.tx_hash else ''
TxHashLabel:
data: root.tx_hash
name: _('Transaction ID')
TopLabel:
text: _('Outputs') + ':'
OutputList:
id: output_list
Widget:
size_hint: 1, 0.1
BoxLayout:
size_hint: 1, None
height: '48dp'
Button:
id: action_button
size_hint: 0.5, None
height: '48dp'
text: ''
disabled: True
opacity: 0
on_release: root.on_action_button_clicked()
IconButton:
size_hint: 0.5, None
height: '48dp'
icon: f'atlas://{KIVY_GUI_PATH}/theming/light/qrcode'
on_release: root.show_qr()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Label')
on_release: root.label_dialog()
Button:
size_hint: 0.5, None
height: '48dp'
text: _('Close')
on_release: root.dismiss()
''')
class ActionButtonOption(NamedTuple):
text: str
func: Callable
enabled: bool
class TxDialog(Factory.Popup):
def __init__(self, app, tx):
Factory.Popup.__init__(self)
self.app = app # type: ElectrumWindow
self.wallet = self.app.wallet
self.tx = tx # type: Transaction
self._action_button_fn = lambda btn: None
# If the wallet can populate the inputs with more info, do it now.
# As a result, e.g. we might learn an imported address tx is segwit,
# or that a beyond-gap-limit address is is_mine.
# note: this might fetch prev txs over the network.
# note: this is a no-op for complete txs
tx.add_info_from_wallet(self.wallet)
def on_open(self):
self.update()
def update(self):
format_amount = self.app.format_amount_and_units
tx_details = self.wallet.get_tx_info(self.tx)
tx_mined_status = tx_details.tx_mined_status
exp_n = tx_details.mempool_depth_bytes
amount, fee = tx_details.amount, tx_details.fee
self.status_str = tx_details.status
self.description = tx_details.label
self.can_broadcast = tx_details.can_broadcast
self.can_rbf = tx_details.can_bump
self.can_dscancel = tx_details.can_dscancel
self.tx_hash = tx_details.txid or ''
if tx_mined_status.timestamp:
self.date_label = _('Date')
self.date_str = datetime.fromtimestamp(tx_mined_status.timestamp).isoformat(' ')[:-3]
elif exp_n is not None:
self.date_label = _('Mempool depth')
self.date_str = _('{} from tip').format('%.2f MB'%(exp_n/1000000))
else:
self.date_label = ''
self.date_str = ''
self.can_sign = self.wallet.can_sign(self.tx)
if amount is None:
self.amount_str = _("Transaction unrelated to your wallet")
elif amount > 0:
self.is_mine = False
self.amount_str = format_amount(amount)
else:
self.is_mine = True
self.amount_str = format_amount(-amount)
risk_of_burning_coins = (isinstance(self.tx, PartialTransaction)
and self.can_sign
and fee is not None
and bool(self.wallet.get_warning_for_risk_of_burning_coins_as_fees(self.tx)))
if fee is not None and not risk_of_burning_coins:
self.fee_str = format_amount(fee)
fee_per_kb = fee / self.tx.estimated_size() * 1000
self.feerate_str = self.app.format_fee_rate(fee_per_kb)
else:
self.fee_str = _('unknown')
self.feerate_str = _('unknown')
self.ids.output_list.update(self.tx.outputs())
for dict_entry in self.ids.output_list.data:
dict_entry['color'], dict_entry['background_color'] = address_colors(self.wallet, dict_entry['address'])
self.can_remove_tx = tx_details.can_remove
self.update_action_button()
def update_action_button(self):
action_button = self.ids.action_button
options = (
ActionButtonOption(text=_('Sign'), func=lambda btn: self.do_sign(), enabled=self.can_sign),
ActionButtonOption(text=_('Broadcast'), func=lambda btn: self.do_broadcast(), enabled=self.can_broadcast),
ActionButtonOption(text=_('Bump fee'), func=lambda btn: self.do_rbf(), enabled=self.can_rbf),
ActionButtonOption(text=_('Cancel (double-spend)'), func=lambda btn: self.do_dscancel(), enabled=self.can_dscancel),
ActionButtonOption(text=_('Remove'), func=lambda btn: self.remove_local_tx(), enabled=self.can_remove_tx),
)
num_options = sum(map(lambda o: bool(o.enabled), options))
# if no options available, hide button
if num_options == 0:
action_button.disabled = True
action_button.opacity = 0
return
action_button.disabled = False
action_button.opacity = 1
if num_options == 1:
# only one option, button will correspond to that
for option in options:
if option.enabled:
action_button.text = option.text
self._action_button_fn = option.func
else:
# multiple options. button opens dropdown which has one sub-button for each
dropdown = DropDown()
action_button.text = _('Options')
self._action_button_fn = dropdown.open
for option in options:
if option.enabled:
btn = Button(text=option.text, size_hint_y=None, height='48dp')
btn.bind(on_release=option.func)
dropdown.add_widget(btn)
def on_action_button_clicked(self):
action_button = self.ids.action_button
self._action_button_fn(action_button)
def _add_info_to_tx_from_wallet_and_network(self, tx: PartialTransaction) -> bool:
"""Returns whether successful."""
# note side-effect: tx is being mutated
assert isinstance(tx, PartialTransaction)
try:
# note: this might download input utxos over network
# FIXME network code in gui thread...
tx.add_info_from_wallet(self.wallet, ignore_network_issues=False)
except NetworkException as e:
self.app.show_error(repr(e))
return False
return True
def do_rbf(self):
from .bump_fee_dialog import BumpFeeDialog
tx = self.tx
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
size = tx.estimated_size()
cb = partial(self._do_rbf, tx=tx, txid=txid)
d = BumpFeeDialog(self.app, fee, size, cb)
d.open()
def _do_rbf(
self,
new_fee_rate,
is_final,
*,
tx: PartialTransaction,
txid: str,
):
if new_fee_rate is None:
return
try:
new_tx = self.wallet.bump_fee(
tx=tx,
txid=txid,
new_fee_rate=new_fee_rate,
)
except CannotBumpFee as e:
self.app.show_error(str(e))
return
new_tx.set_rbf(not is_final)
self.tx = new_tx
self.update()
self.do_sign()
def do_dscancel(self):
from .dscancel_dialog import DSCancelDialog
tx = self.tx
txid = tx.txid()
assert txid
if not isinstance(tx, PartialTransaction):
tx = PartialTransaction.from_tx(tx)
if not self._add_info_to_tx_from_wallet_and_network(tx):
return
fee = tx.get_fee()
assert fee is not None
size = tx.estimated_size()
cb = partial(self._do_dscancel, tx=tx)
d = DSCancelDialog(self.app, fee, size, cb)
d.open()
def _do_dscancel(
self,
new_fee_rate,
*,
tx: PartialTransaction,
):
if new_fee_rate is None:
return
try:
new_tx = self.wallet.dscancel(
tx=tx,
new_fee_rate=new_fee_rate,
)
except CannotDoubleSpendTx as e:
self.app.show_error(str(e))
return
self.tx = new_tx
self.update()
self.do_sign()
def do_sign(self):
self.app.protected(_("Sign this transaction?"), self._do_sign, ())
def _do_sign(self, password):
self.status_str = _('Signing') + '...'
Clock.schedule_once(lambda dt: self.__do_sign(password), 0.1)
def __do_sign(self, password):
try:
self.app.wallet.sign_transaction(self.tx, password)
except InvalidPassword:
self.app.show_error(_("Invalid PIN"))
self.update()
def do_broadcast(self):
self.app.broadcast(self.tx)
def show_qr(self):
original_raw_tx = str(self.tx)
qr_data = self.tx.to_qr_data()
self.app.qr_dialog(_("Raw Transaction"), qr_data, text_for_clipboard=original_raw_tx)
def remove_local_tx(self):
txid = self.tx.txid()
num_child_txs = len(self.wallet.get_depending_transactions(txid))
question = _("Are you sure you want to remove this transaction?")
if num_child_txs > 0:
question = (_("Are you sure you want to remove this transaction and {} child transactions?")
.format(num_child_txs))
def on_prompt(b):
if b:
self.wallet.remove_transaction(txid)
self.wallet.save_db()
self.app._trigger_update_wallet() # FIXME private...
self.dismiss()
d = Question(question, on_prompt)
d.open()
def label_dialog(self):
from .label_dialog import LabelDialog
key = self.tx.txid()
text = self.app.wallet.get_label_for_txid(key)
def callback(text):
self.app.wallet.set_label(key, text)
self.update()
self.app.history_screen.update()
d = LabelDialog(_('Enter Transaction Label'), text, callback)
d.open()
| 35.709333 | 128 | 0.574341 |
7940118b5ed28d8ccbe93e60e28b2498bef9b421 | 2,214 | py | Python | uwcoursesearch/helpers/CourseData.py | ConMur/UWCourseSearch | ecd5005e7a2a153fe02c015893910a4e60899770 | [
"MIT"
] | null | null | null | uwcoursesearch/helpers/CourseData.py | ConMur/UWCourseSearch | ecd5005e7a2a153fe02c015893910a4e60899770 | [
"MIT"
] | null | null | null | uwcoursesearch/helpers/CourseData.py | ConMur/UWCourseSearch | ecd5005e7a2a153fe02c015893910a4e60899770 | [
"MIT"
] | null | null | null | class CourseInfo:
def __init__(self, course_code, text):
self.code = course_code
self.text = text
class TermInfo:
"""
Represents a term. Contains the id of the term (eg. 1179) and the name
of the term (eg. Fall 2017)
"""
def __init__(self, term_id, term_name):
self.id = term_id
self.name = term_name
class Reserves:
def __init__(self, reserve_group, enrollment_capacity, enrollment_total):
self.reserve_group = reserve_group
self.enrollment_capacity = enrollment_capacity
self.enrollment_total = enrollment_total
class Classes:
def __init__(self, start_time, end_time, weekdays, start_date, end_date,
is_tba, is_cancelled, is_closed, building, room, instructors):
self.start_time = start_time
self.end_time = end_time
self.weekdays = weekdays
self.start_date = start_date
self.end_date = end_date
self.is_tba = is_tba
self.is_cancelled = is_cancelled
self.is_closed = is_closed
self.building = building
self.room = room
self.instructors = instructors
class Course:
"""
Represents a course. Contains the
"""
def __init__(self, subject, catalog_number, units, title, note, class_number,
section, campus, associated_class, related_component_1, related_component_2,
enrollment_capacity, enrollment_total, topic, reserves, classes, held_with,
term, academic_level, last_updated):
self.subject = subject
self.catalog_number = catalog_number
self.units = units
self.title = title
self.note = note
self.class_number = class_number
self.section = section
self.campus = campus
self.associated_class = associated_class
self.related_component_1 = related_component_1
self.related_component_2 = related_component_2
self.enrollment_capacity = enrollment_capacity
self.enrollment_total = enrollment_total
self.topic = topic
self.classes = classes
self.held_with = held_with
self.term = term
self.academic_level = academic_level
self.last_updated = last_updated
| 34.59375 | 81 | 0.676152 |
794011c18d97ea10a4f94469c8bcf33c592a1ae0 | 2,202 | py | Python | {{cookiecutter.project_name|capitalize }}Proj/playground/app/{{cookiecutter.project_name}}.py | papermerge/cookiecutter-js | 1d66ffc8d71ee5f7beade26794ccdf32196738fd | [
"Apache-2.0"
] | null | null | null | {{cookiecutter.project_name|capitalize }}Proj/playground/app/{{cookiecutter.project_name}}.py | papermerge/cookiecutter-js | 1d66ffc8d71ee5f7beade26794ccdf32196738fd | [
"Apache-2.0"
] | null | null | null | {{cookiecutter.project_name|capitalize }}Proj/playground/app/{{cookiecutter.project_name}}.py | papermerge/cookiecutter-js | 1d66ffc8d71ee5f7beade26794ccdf32196738fd | [
"Apache-2.0"
] | null | null | null | import os
import time
import random
from flask import (
Blueprint,
render_template,
request
)
global_context = {
'features': [
{
'url': '/01-sample',
'title': '01 - Sample'
},
{
'url': '/02-sample',
'title': '02 - Sample'
},
]
}
def _get_template_name(req):
name_with_slashes = req.url_rule.rule
template_name = name_with_slashes.split('/')[1]
return f"{template_name}.html"
def _folder_abs_path(folder_name):
"""
Returns absolute path given folder name.
Example:
_folder_abs_path("static") => absolute path to static folder
_folder_abs_path("media") => absolute path to media folder
"""
abs_path = os.path.join(
os.path.dirname(__file__),
'..',
folder_name
)
return abs_path
def create_blueprint(name, request_delay=0):
"""
Create a blueprint with options.
A blueprint, in flask sense, is a reusable app in django's sense.
`request_delay` is the number of seconds to delay handling of the
request. With `request_delay` > 0 we simulate slow requests.
"""
# Reusable app. It provides views for following URLS:
# - /
# - /upload/
blueprint = Blueprint(
name, # unique name
name, # import_name
template_folder='templates', # same folder as for the main app
static_folder=_folder_abs_path("static") # same as for main app
)
@blueprint.route('/')
def browser():
template_name = f"features/{_get_template_name(request)}"
time.sleep(request_delay)
return render_template(
template_name,
**global_context
)
@blueprint.route('/upload/', methods=['POST'])
def upload():
time.sleep(request_delay)
file = request.files['file']
parent_id = request.form['parent_id']
lang = request.form['lang']
return {
'document': {
'id': random.randint(0, 1000),
'title': file.filename,
'parent_id': parent_id,
'lang': lang
}
}
return blueprint
| 23.178947 | 72 | 0.573569 |
794012c47a1bebe98b3142b33b899c983b88cadd | 2,512 | py | Python | hred-tensorflow-master/main.py | hzhwcmhf/contk_docs | d4874cce5347bcf9f33d9fe99756c7145f181b88 | [
"MIT"
] | 5 | 2019-03-14T14:56:47.000Z | 2021-09-26T05:31:02.000Z | hred-tensorflow-master/main.py | hzhwcmhf/contk_docs | d4874cce5347bcf9f33d9fe99756c7145f181b88 | [
"MIT"
] | 10 | 2020-01-28T22:51:46.000Z | 2022-02-10T00:25:23.000Z | hred-tensorflow-master/main.py | hzhwcmhf/contk_docs | d4874cce5347bcf9f33d9fe99756c7145f181b88 | [
"MIT"
] | 3 | 2019-08-27T10:13:54.000Z | 2021-09-21T00:58:30.000Z | import os
import json
import numpy as np
import tensorflow as tf
from cotk.dataloader import MultiTurnDialog
from cotk.wordvector import WordVector, Glove
from utils import debug, try_cache
from model import HredModel
def create_model(sess, data, args, embed):
with tf.variable_scope(args.name):
model = HredModel(data, args, embed)
model.print_parameters()
latest_dir = '%s/checkpoint_latest' % args.model_dir
best_dir = '%s/checkpoint_best' % args.model_dir
if tf.train.get_checkpoint_state(latest_dir) and args.restore == "last":
print("Reading model parameters from %s" % latest_dir)
model.latest_saver.restore(sess, tf.train.latest_checkpoint(latest_dir))
else:
if tf.train.get_checkpoint_state(best_dir) and args.restore == "best":
print('Reading model parameters from %s' % best_dir)
model.best_saver.restore(sess, tf.train.latest_checkpoint(best_dir))
else:
print("Created model with fresh parameters.")
global_variable = [gv for gv in tf.global_variables() if args.name in gv.name]
sess.run(tf.variables_initializer(global_variable))
return model
def main(args):
if args.debug:
debug()
if args.cuda:
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
else:
config = tf.ConfigProto(device_count={'GPU': 0})
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
data_class = MultiTurnDialog.load_class(args.dataset)
wordvec_class = WordVector.load_class(args.wvclass)
if wordvec_class == None:
wordvec_class = Glove
if args.cache:
data = try_cache(data_class, (args.datapath,), args.cache_dir)
vocab = data.frequent_vocab_list
embed = try_cache(lambda wv, ez, vl: wordvec_class(wv).load_matrix(ez, vl),
(args.wvpath, args.embedding_size, vocab),
args.cache_dir, wordvec_class.__name__)
else:
data = data_class(args.datapath,
min_frequent_vocab_times=args.min_frequent_vocab_times,
max_sent_length=args.max_sent_length,
max_turn_length=args.max_turn_length)
wv = wordvec_class(args.wvpath)
vocab = data.frequent_vocab_list
embed = wv.load_matrix(args.embedding_size, vocab)
embed = np.array(embed, dtype = np.float32)
with tf.Session(config=config) as sess:
model = create_model(sess, data, args, embed)
if args.mode == "train":
model.train_process(sess, data, args)
else:
test_res = model.test_process(sess, data, args)
for key, val in test_res.items():
if isinstance(val, bytes):
test_res[key] = str(val)
json.dump(test_res, open("./result.json", "w")) | 33.493333 | 82 | 0.739252 |
794013068f0dd0c979ee6c5b45e6acce98abbf8f | 1,371 | py | Python | publish/utils.py | nacady/django-publish | a9b0b0b0ce0a2cd664d256edc4c819180dc882df | [
"BSD-3-Clause"
] | null | null | null | publish/utils.py | nacady/django-publish | a9b0b0b0ce0a2cd664d256edc4c819180dc882df | [
"BSD-3-Clause"
] | null | null | null | publish/utils.py | nacady/django-publish | a9b0b0b0ce0a2cd664d256edc4c819180dc882df | [
"BSD-3-Clause"
] | 1 | 2021-06-28T03:59:45.000Z | 2021-06-28T03:59:45.000Z | # -*- coding: utf-8 -*-
class NestedSet(object):
'''
a class that can be used a bit like a set,
but will let us store hiearchy too
'''
def __init__(self):
self._root_elements = []
self._children = {}
def add(self, item, parent=None):
if parent is None:
self._root_elements.append(item)
else:
self._children[parent].append(item)
self._children[item] = []
def __contains__(self, item):
return item in self._children
def __len__(self):
return len(self._children)
def __iter__(self):
return iter(self._children)
def original(self, item):
# return the original item added
# or this item if that's not the case
for child in self:
if child == item:
return child
return item
def _add_nested_items(self, items, nested):
for item in items:
nested.append(item)
children = self._nested_children(item)
if children:
nested.append(children)
def _nested_children(self, item):
children = []
self._add_nested_items(self._children[item], children)
return children
def nested_items(self):
items = []
self._add_nested_items(self._root_elements, items)
return items
| 25.388889 | 62 | 0.577681 |
7940136ed83445ba0e1f42d0751dc2b7f74000a1 | 23,272 | py | Python | cwt/cose.py | dajiaji/python-cwt | 61723510663dc4cd5a5171ff3a78994cac5f5213 | [
"MIT"
] | 11 | 2021-04-29T13:48:15.000Z | 2022-01-31T22:27:14.000Z | cwt/cose.py | dajiaji/python-cwt | 61723510663dc4cd5a5171ff3a78994cac5f5213 | [
"MIT"
] | 185 | 2021-04-23T22:14:50.000Z | 2022-03-28T06:27:35.000Z | cwt/cose.py | dajiaji/python-cwt | 61723510663dc4cd5a5171ff3a78994cac5f5213 | [
"MIT"
] | 5 | 2021-08-09T02:21:18.000Z | 2022-01-05T11:39:08.000Z | from typing import Any, Dict, List, Optional, Union
from asn1crypto import pem
from cbor2 import CBORTag
from .cbor_processor import CBORProcessor
from .const import COSE_ALGORITHMS_RECIPIENT
from .cose_key_interface import COSEKeyInterface
from .recipient_interface import RecipientInterface
from .recipients import Recipients
from .signer import Signer
from .utils import to_cose_header
class COSE(CBORProcessor):
"""
A COSE (CBOR Object Signing and Encryption) Implementaion built on top of
`cbor2 <https://cbor2.readthedocs.io/en/stable/>`_.
"""
def __init__(
self,
alg_auto_inclusion: bool = False,
kid_auto_inclusion: bool = False,
verify_kid: bool = False,
ca_certs: str = "",
):
if not isinstance(alg_auto_inclusion, bool):
raise ValueError("alg_auto_inclusion should be bool.")
self._alg_auto_inclusion = alg_auto_inclusion
if not isinstance(kid_auto_inclusion, bool):
raise ValueError("kid_auto_inclusion should be bool.")
self._kid_auto_inclusion = kid_auto_inclusion
if not isinstance(verify_kid, bool):
raise ValueError("verify_kid should be bool.")
self._verify_kid = verify_kid
self._ca_certs = []
if ca_certs:
if not isinstance(ca_certs, str):
raise ValueError("ca_certs should be str.")
self._trust_roots: List[bytes] = []
with open(ca_certs, "rb") as f:
for _, _, der_bytes in pem.unarmor(f.read(), multiple=True):
self._ca_certs.append(der_bytes)
@classmethod
def new(
cls,
alg_auto_inclusion: bool = False,
kid_auto_inclusion: bool = False,
verify_kid: bool = False,
ca_certs: str = "",
):
"""
Constructor.
Args:
alg_auto_inclusion(bool): The indicator whether ``alg`` parameter is included
in a proper header bucket automatically or not.
kid_auto_inclusion(bool): The indicator whether ``kid`` parameter is included
in a proper header bucket automatically or not.
verify_kid(bool): The indicator whether ``kid`` verification is mandatory or
not.
ca_certs(str): The path to a file which contains a concatenated list
of trusted root certificates. You should specify private CA
certificates in your target system. There should be no need to
use the public CA certificates for the Web PKI.
"""
return cls(alg_auto_inclusion, kid_auto_inclusion, verify_kid, ca_certs)
@property
def alg_auto_inclusion(self) -> bool:
"""
If this property is True, an encode_and_*() function will automatically
set the ``alg`` parameter in the header from the COSEKey argument.
"""
return self._alg_auto_inclusion
@alg_auto_inclusion.setter
def alg_auto_inclusion(self, alg_auto_inclusion: bool):
self._alg_auto_inclusion = alg_auto_inclusion
return
@property
def kid_auto_inclusion(self) -> bool:
"""
If this property is True, an encode_and_*() function will automatically
set the ``kid`` parameter in the header from the COSEKey argument.
"""
return self._kid_auto_inclusion
@kid_auto_inclusion.setter
def kid_auto_inclusion(self, kid_auto_inclusion: bool):
self._kid_auto_inclusion = kid_auto_inclusion
return
@property
def verify_kid(self) -> bool:
"""
If this property is True, the decode() function will perform the verification
and decoding process only if the ``kid`` of the COSE data to be decoded and
one of the ``kid`` s in the key list given as an argument match exact.
"""
return self._verify_kid
@verify_kid.setter
def verify_kid(self, verify_kid: bool):
self._verify_kid = verify_kid
return
def encode_and_mac(
self,
payload: bytes,
key: COSEKeyInterface,
protected: Optional[Union[dict, bytes]] = None,
unprotected: Optional[dict] = None,
recipients: Optional[List[RecipientInterface]] = None,
external_aad: bytes = b"",
out: str = "",
) -> Union[bytes, CBORTag]:
"""
Encodes data with MAC.
Args:
payload (bytes): A content to be MACed.
key (COSEKeyInterface): A COSE key as a MAC Authentication key.
protected (Optional[Union[dict, bytes]]): Parameters that are to be cryptographically
protected.
unprotected (Optional[dict]): Parameters that are not cryptographically protected.
recipients (Optional[List[RecipientInterface]]): A list of recipient information structures.
external_aad(bytes): External additional authenticated data supplied by application.
out(str): An output format. Only ``"cbor2/CBORTag"`` can be used. If ``"cbor2/CBORTag"``
is specified. This function will return encoded data as
`cbor2 <https://cbor2.readthedocs.io/en/stable/>`_'s ``CBORTag`` object.
If any other value is specified, it will return encoded data as bytes.
Returns:
Union[bytes, CBORTag]: A byte string of the encoded COSE or a cbor2.CBORTag object.
Raises:
ValueError: Invalid arguments.
EncodeError: Failed to encode data.
"""
p: Union[Dict[int, Any], bytes] = to_cose_header(protected) if not isinstance(protected, bytes) else protected
u = to_cose_header(unprotected)
ctx = "MAC0" if not recipients else "MAC"
b_protected = b""
# MAC0
if not recipients:
if isinstance(p, bytes):
b_protected = p
else:
if self._alg_auto_inclusion:
p[1] = key.alg
b_protected = self._dumps(p)
if self._kid_auto_inclusion and key.kid:
u[4] = key.kid
mac_structure = [ctx, b_protected, external_aad, payload]
tag = key.sign(self._dumps(mac_structure))
res = CBORTag(17, [b_protected, u, payload, tag])
return res if out == "cbor2/CBORTag" else self._dumps(res)
# MAC
recs = []
for rec in recipients:
recs.append(rec.to_list())
if recipients[0].alg in COSE_ALGORITHMS_RECIPIENT.values():
if not isinstance(p, bytes):
if self._alg_auto_inclusion:
p[1] = key.alg
if self._kid_auto_inclusion and key.kid:
u[4] = key.kid
else:
raise NotImplementedError("Algorithms other than direct are not supported for recipients.")
if isinstance(p, bytes):
b_protected = p
else:
b_protected = self._dumps(p) if p else b""
mac_structure = [ctx, b_protected, external_aad, payload]
tag = key.sign(self._dumps(mac_structure))
cose_mac: List[Any] = [b_protected, u, payload, tag]
cose_mac.append(recs)
res = CBORTag(97, cose_mac)
return res if out == "cbor2/CBORTag" else self._dumps(res)
def encode_and_sign(
self,
payload: bytes,
key: Optional[COSEKeyInterface] = None,
protected: Optional[Union[dict, bytes]] = None,
unprotected: Optional[dict] = None,
signers: List[Signer] = [],
external_aad: bytes = b"",
out: str = "",
) -> Union[bytes, CBORTag]:
"""
Encodes data with signing.
Args:
payload (bytes): A content to be signed.
key (Optional[COSEKeyInterface]): A signing key for single signer
cases. When the ``signers`` parameter is set, this ``key`` will
be ignored and should not be set.
protected (Optional[Union[dict, bytes]]): Parameters that are to be
cryptographically protected.
unprotected (Optional[dict]): Parameters that are not cryptographically
protected.
signers (List[Signer]): A list of signer information objects for
multiple signer cases.
external_aad(bytes): External additional authenticated data supplied
by application.
out(str): An output format. Only ``"cbor2/CBORTag"`` can be used. If
``"cbor2/CBORTag"`` is specified. This function will return encoded
data as `cbor2 <https://cbor2.readthedocs.io/en/stable/>`_'s
``CBORTag`` object. If any other value is specified, it will return
encoded data as bytes.
Returns:
Union[bytes, CBORTag]: A byte string of the encoded COSE or a
cbor2.CBORTag object.
Raises:
ValueError: Invalid arguments.
EncodeError: Failed to encode data.
"""
p: Union[Dict[int, Any], bytes] = to_cose_header(protected) if not isinstance(protected, bytes) else protected
u = to_cose_header(unprotected)
ctx = "Signature" if signers else "Signature1"
if not signers and key is not None:
if isinstance(p, dict) and self._alg_auto_inclusion:
p[1] = key.alg
if self._kid_auto_inclusion and key.kid:
u[4] = key.kid
b_protected = b""
if isinstance(p, bytes):
b_protected = p
else:
b_protected = self._dumps(p) if p else b""
# Signature1
if not signers and key is not None:
sig_structure = [ctx, b_protected, external_aad, payload]
sig = key.sign(self._dumps(sig_structure))
res = CBORTag(18, [b_protected, u, payload, sig])
return res if out == "cbor2/CBORTag" else self._dumps(res)
# Signature
sigs = []
for s in signers:
sig_structure = [ctx, b_protected, s.protected, external_aad, payload]
s.sign(self._dumps(sig_structure))
sigs.append([s.protected, s.unprotected, s.signature])
res = CBORTag(98, [b_protected, u, payload, sigs])
return res if out == "cbor2/CBORTag" else self._dumps(res)
def encode_and_encrypt(
self,
payload: bytes,
key: COSEKeyInterface,
protected: Optional[Union[dict, bytes]] = None,
unprotected: Optional[dict] = None,
nonce: bytes = b"",
recipients: Optional[List[RecipientInterface]] = None,
external_aad: bytes = b"",
out: str = "",
) -> bytes:
"""
Encodes data with encryption.
Args:
payload (bytes): A content to be encrypted.
key (COSEKeyInterface): A COSE key as an encryption key.
protected (Optional[Union[dict, bytes]]): Parameters that are to be
cryptographically protected.
unprotected (Optional[dict]): Parameters that are not cryptographically
protected.
nonce (bytes): A nonce for encryption.
recipients (Optional[List[RecipientInterface]]): A list of recipient
information structures.
external_aad(bytes): External additional authenticated data supplied
by application.
out(str): An output format. Only ``"cbor2/CBORTag"`` can be used. If
``"cbor2/CBORTag"`` is specified. This function will return encoded
data as `cbor2 <https://cbor2.readthedocs.io/en/stable/>`_'s
``CBORTag`` object. If any other value is specified, it will return
encoded data as bytes.
Returns:
Union[bytes, CBORTag]: A byte string of the encoded COSE or a
cbor2.CBORTag object.
Raises:
ValueError: Invalid arguments.
EncodeError: Failed to encode data.
"""
p: Union[Dict[int, Any], bytes] = to_cose_header(protected) if not isinstance(protected, bytes) else protected
u = to_cose_header(unprotected)
ctx = "Encrypt0" if not recipients else "Encrypt"
if not nonce:
try:
nonce = key.generate_nonce()
except NotImplementedError:
raise ValueError("Nonce generation is not supported for the key. Set a nonce explicitly.")
# Encrypt0
if not recipients:
if isinstance(p, bytes):
b_protected = p
else:
if self._alg_auto_inclusion:
p[1] = key.alg
b_protected = self._dumps(p) if p else b""
if self._kid_auto_inclusion and key.kid:
u[4] = key.kid
u[5] = nonce
enc_structure = [ctx, b_protected, external_aad]
aad = self._dumps(enc_structure)
ciphertext = key.encrypt(payload, nonce, aad)
res = CBORTag(16, [b_protected, u, ciphertext])
return res if out == "cbor2/CBORTag" else self._dumps(res)
# Encrypt
recs = []
for rec in recipients:
recs.append(rec.to_list())
if recipients[0].alg in COSE_ALGORITHMS_RECIPIENT.values():
if not isinstance(p, bytes) and self._alg_auto_inclusion:
p[1] = key.alg
if self._kid_auto_inclusion and key.kid:
u[4] = key.kid
u[5] = nonce
else:
raise NotImplementedError("Algorithms other than direct are not supported for recipients.")
if isinstance(p, bytes):
b_protected = p
else:
b_protected = self._dumps(p) if p else b""
enc_structure = [ctx, b_protected, external_aad]
aad = self._dumps(enc_structure)
ciphertext = key.encrypt(payload, nonce, aad)
cose_enc: List[Any] = [b_protected, u, ciphertext]
cose_enc.append(recs)
res = CBORTag(96, cose_enc)
return res if out == "cbor2/CBORTag" else self._dumps(res)
def decode(
self,
data: Union[bytes, CBORTag],
keys: Union[COSEKeyInterface, List[COSEKeyInterface]],
context: Optional[Union[Dict[str, Any], List[Any]]] = None,
external_aad: bytes = b"",
) -> bytes:
"""
Verifies and decodes COSE data.
Args:
data (Union[bytes, CBORTag]): A byte string or cbor2.CBORTag of an
encoded data.
keys (Union[COSEKeyInterface, List[COSEKeyInterface]]): COSE key(s)
to verify and decrypt the encoded data.
context (Optional[Union[Dict[str, Any], List[Any]]]): A context information
structure for key deriviation functions.
external_aad(bytes): External additional authenticated data supplied by
application.
Returns:
bytes: A byte string of decoded payload.
Raises:
ValueError: Invalid arguments.
DecodeError: Failed to decode data.
VerifyError: Failed to verify data.
"""
if isinstance(data, bytes):
data = self._loads(data)
if not isinstance(data, CBORTag):
raise ValueError("Invalid COSE format.")
if not isinstance(keys, list):
if not isinstance(keys, COSEKeyInterface):
raise ValueError("key in keys should have COSEKeyInterface.")
keys = [keys]
if data.tag == 16:
keys = self._filter_by_key_ops(keys, 4)
if not isinstance(data.value, list) or len(data.value) != 3:
raise ValueError("Invalid Encrypt0 format.")
elif data.tag == 96:
keys = self._filter_by_key_ops(keys, 4)
if not isinstance(data.value, list) or len(data.value) != 4:
raise ValueError("Invalid Encrypt format.")
elif data.tag == 17:
keys = self._filter_by_key_ops(keys, 10)
if not isinstance(data.value, list) or len(data.value) != 4:
raise ValueError("Invalid MAC0 format.")
elif data.tag == 97:
keys = self._filter_by_key_ops(keys, 10)
if not isinstance(data.value, list) or len(data.value) != 5:
raise ValueError("Invalid MAC format.")
elif data.tag == 18:
keys = self._filter_by_key_ops(keys, 2)
if not isinstance(data.value, list) or len(data.value) != 4:
raise ValueError("Invalid Signature1 format.")
elif data.tag == 98:
keys = self._filter_by_key_ops(keys, 2)
if not isinstance(data.value, list) or len(data.value) != 4:
raise ValueError("Invalid Signature format.")
else:
raise ValueError(f"Unsupported or unknown CBOR tag({data.tag}).")
protected = self._loads(data.value[0]) if data.value[0] else b""
unprotected = data.value[1]
if not isinstance(unprotected, dict):
raise ValueError("unprotected header should be dict.")
alg = self._get_alg(protected)
err: Exception = ValueError("key is not found.")
# Encrypt0
if data.tag == 16:
kid = self._get_kid(protected, unprotected)
aad = self._dumps(["Encrypt0", data.value[0], external_aad])
nonce = unprotected.get(5, None)
if kid:
for _, k in enumerate(keys):
if k.kid != kid:
continue
try:
return k.decrypt(data.value[2], nonce, aad)
except Exception as e:
err = e
raise err
for _, k in enumerate(keys):
try:
return k.decrypt(data.value[2], nonce, aad)
except Exception as e:
err = e
raise err
# Encrypt
if data.tag == 96:
aad = self._dumps(["Encrypt", data.value[0], external_aad])
nonce = unprotected.get(5, None)
rs = Recipients.from_list(data.value[3], self._verify_kid)
enc_key = rs.extract(keys, context, alg)
return enc_key.decrypt(data.value[2], nonce, aad)
# MAC0
if data.tag == 17:
kid = self._get_kid(protected, unprotected)
msg = self._dumps(["MAC0", data.value[0], external_aad, data.value[2]])
if kid:
for _, k in enumerate(keys):
if k.kid != kid:
continue
try:
k.verify(msg, data.value[3])
return data.value[2]
except Exception as e:
err = e
raise err
for _, k in enumerate(keys):
try:
k.verify(msg, data.value[3])
return data.value[2]
except Exception as e:
err = e
raise err
# MAC
if data.tag == 97:
to_be_maced = self._dumps(["MAC", data.value[0], external_aad, data.value[2]])
rs = Recipients.from_list(data.value[4], self._verify_kid)
mac_auth_key = rs.extract(keys, context, alg)
mac_auth_key.verify(to_be_maced, data.value[3])
return data.value[2]
# Signature1
if data.tag == 18:
kid = self._get_kid(protected, unprotected)
to_be_signed = self._dumps(["Signature1", data.value[0], external_aad, data.value[2]])
if kid:
for _, k in enumerate(keys):
if k.kid != kid:
continue
try:
if self._ca_certs:
k.validate_certificate(self._ca_certs)
k.verify(to_be_signed, data.value[3])
return data.value[2]
except Exception as e:
err = e
raise err
for _, k in enumerate(keys):
try:
if self._ca_certs:
k.validate_certificate(self._ca_certs)
k.verify(to_be_signed, data.value[3])
return data.value[2]
except Exception as e:
err = e
raise err
# Signature
# if data.tag == 98:
sigs = data.value[3]
if not isinstance(sigs, list):
raise ValueError("Invalid Signature format.")
for sig in sigs:
if not isinstance(sig, list) or len(sig) != 3:
raise ValueError("Invalid Signature format.")
protected = self._loads(sig[0]) if sig[0] else b""
unprotected = sig[1]
if not isinstance(unprotected, dict):
raise ValueError("unprotected header in signature structure should be dict.")
kid = self._get_kid(protected, unprotected)
if kid:
for _, k in enumerate(keys):
if k.kid != kid:
continue
try:
to_be_signed = self._dumps(
[
"Signature",
data.value[0],
sig[0],
external_aad,
data.value[2],
]
)
k.verify(to_be_signed, sig[2])
return data.value[2]
except Exception as e:
err = e
continue
for _, k in enumerate(keys):
try:
to_be_signed = self._dumps(
[
"Signature",
data.value[0],
sig[0],
external_aad,
data.value[2],
]
)
k.verify(to_be_signed, sig[2])
return data.value[2]
except Exception as e:
err = e
raise err
def _filter_by_key_ops(self, keys: List[COSEKeyInterface], op: int) -> List[COSEKeyInterface]:
res: List[COSEKeyInterface] = []
for k in keys:
if op in k.key_ops:
res.append(k)
if len(res) == 0:
res = keys
return res
def _get_alg(self, protected: Any) -> int:
return protected[1] if isinstance(protected, dict) and 1 in protected else 0
def _get_kid(self, protected: Any, unprotected: dict) -> bytes:
kid = b""
if isinstance(protected, dict) and 4 in protected:
kid = protected[4]
elif 4 in unprotected:
kid = unprotected[4]
elif self._verify_kid:
raise ValueError("kid should be specified.")
return kid
| 40.262976 | 118 | 0.553799 |
7940165d0d55b0abc445a778840227163b95ace5 | 4,897 | py | Python | uploadhaddocks/util.py | rcook/upload-haddocks | a33826be1873da68ba073a42ec828c8ec150d576 | [
"MIT"
] | null | null | null | uploadhaddocks/util.py | rcook/upload-haddocks | a33826be1873da68ba073a42ec828c8ec150d576 | [
"MIT"
] | 2 | 2017-12-29T03:26:52.000Z | 2017-12-30T20:17:13.000Z | uploadhaddocks/util.py | rcook/upload-haddocks | a33826be1873da68ba073a42ec828c8ec150d576 | [
"MIT"
] | null | null | null | ############################################################
#
# uploadhaddocks.util
# Copyright (C) 2017, Richard Cook
# Released under MIT License
# https://github.com/rcook/upload-haddocks
#
############################################################
from __future__ import print_function
import fnmatch
import json
import os
from pyprelude.file_system import make_path
from pyprelude.process import execute
from pyprelude.temp_util import temp_cwd, temp_dir
from pyprelude.util import unpack_args
import re
import shutil
import tarfile
_NAME_PATTERN = re.compile("^name\s*:\s*(?P<name>.*)$")
_VERSION_PATTERN = re.compile("^version\s*:\s*(?P<version>.*)$")
def _get_credentials(path):
obj = json.load(open(path, "rt"))
return obj["username"], obj["password"]
def _get_cabal_path(project_dir):
paths = fnmatch.filter(os.listdir(project_dir), "*.cabal")
if len(paths) == 0:
raise RuntimeError("Could not find a .cabal file")
if len(paths) > 1:
raise RuntimeError("Found more than one .cabal file")
return make_path(project_dir, paths[0])
def _get_stack_yaml_path(project_dir):
path = make_path(project_dir, "stack.yaml")
if not os.path.isfile(path):
raise RuntimeError("Could not find a stack.yaml file")
return path
def _get_package_info(cabal_path):
name = None
version = None
with open(cabal_path, "rt") as f:
for line in f.readlines():
if name is not None and version is not None:
break;
l = line.strip()
m = _NAME_PATTERN.match(l)
if m:
name = m.groups("name")[0]
m = _VERSION_PATTERN.match(l)
if m:
version = m.groups("version")[0]
if name is not None and version is not None:
return name, version
raise RuntimeError("Could not parse name and version from {}".format(cabal_path))
def _system(*args):
command_fragments = unpack_args(*args)
command = " ".join(command_fragments)
status = os.system(command)
if status != 0:
raise RuntimeError()
def _generate_docs(project_dir):
with temp_cwd(project_dir):
_system("stack", "build", "--haddock")
def _get_doc_root(project_dir):
return execute("stack", "path", "--local-doc-root").strip()
def _copy_docs(temp_dir, project_dir, package_name, package_version):
doc_root = _get_doc_root(project_dir)
base_name = "{}-{}".format(package_name, package_version)
doc_dir = make_path(temp_dir, "{}-docs".format(base_name))
shutil.copytree(make_path(doc_root, base_name), doc_dir)
return doc_dir
def _munge_file(path):
output = re.sub("href=\"\\.\\.\/([^/]*)\/", "href=\"../../\\1/docs/", open(path, "rt").read())
open(path, "wt").write(output)
def _munge_docs(doc_dir):
for d, _, file_names in os.walk(doc_dir):
for file_name in file_names:
p = make_path(d, file_name)
ext = os.path.splitext(p)[1]
if ext == ".html":
_munge_file(p)
def _create_archive(doc_dir):
parent_dir = os.path.dirname(doc_dir)
subdir = os.path.basename(doc_dir)
tar_path = "{}.tar.gz".format(doc_dir)
with tarfile.open(tar_path, "w:gz", format=tarfile.USTAR_FORMAT) as tf:
for d, _, file_names in os.walk(doc_dir):
for file_name in file_names:
p = make_path(d, file_name)
arc_name = os.path.join(subdir, os.path.relpath(p, doc_dir))
ti = tf.gettarinfo(p, arc_name)
ti.uid = 0
ti.gid = 0
with open(p, "rb") as f:
tf.addfile(ti, f)
return tar_path
def _upload_archive(user_name, password, tar_path, package_name, package_version):
command = [
"curl",
"-X",
"PUT",
"-H",
"Content-Type: application/x-tar",
"-H",
"Content-Encoding: gzip",
"-u",
"{}:{}".format(user_name, password),
"--data-binary",
"@{}".format(tar_path),
"https://hackage.haskell.org/package/{}-{}/docs".format(package_name, package_version)]
output = execute(command)
print(output)
def upload_haddocks(credentials_path, project_dir):
user_name, password = _get_credentials(credentials_path)
stack_yaml_path = _get_stack_yaml_path(project_dir)
cabal_path = _get_cabal_path(project_dir)
package_name, package_version = _get_package_info(cabal_path)
print("* Generate")
_generate_docs(project_dir)
with temp_dir() as d:
print("* Copy")
doc_dir = _copy_docs(d, project_dir, package_name, package_version)
print("* Munge")
_munge_docs(doc_dir)
print("* Pack")
tar_path = _create_archive(doc_dir)
print("* Upload")
_upload_archive(user_name, password, tar_path, package_name, package_version)
| 32.430464 | 98 | 0.616908 |
79401814190bd28e9f30ac44117e886cc00c8c35 | 5,828 | py | Python | src/reader/data.py | nboukraa/openqa | 876d56c074f425ec6eff53fe7dda9a1a807bd0c2 | [
"MIT"
] | null | null | null | src/reader/data.py | nboukraa/openqa | 876d56c074f425ec6eff53fe7dda9a1a807bd0c2 | [
"MIT"
] | null | null | null | src/reader/data.py | nboukraa/openqa | 876d56c074f425ec6eff53fe7dda9a1a807bd0c2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Edit from DrQA"""
import numpy as np
import logging
import unicodedata
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
from .vector import vectorize, vectorize_with_doc
from .vector import num_docs
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Dictionary class for tokens.
# ------------------------------------------------------------------------------
class Dictionary(object):
NULL = '<NULL>'
UNK = '<UNK>'
START = 2
@staticmethod
def normalize(token):
return unicodedata.normalize('NFD', token)
def __init__(self):
self.tok2ind = {self.NULL: 0, self.UNK: 1}
self.ind2tok = {0: self.NULL, 1: self.UNK}
def __len__(self):
return len(self.tok2ind)
def __iter__(self):
return iter(self.tok2ind)
def __contains__(self, key):
if type(key) == int:
return key in self.ind2tok
elif type(key) == str:
return self.normalize(key) in self.tok2ind
def __getitem__(self, key):
if type(key) == int:
return self.ind2tok.get(key, self.UNK)
if type(key) == str:
return self.tok2ind.get(self.normalize(key),
self.tok2ind.get(self.UNK))
def __setitem__(self, key, item):
if type(key) == int and type(item) == str:
self.ind2tok[key] = item
elif type(key) == str and type(item) == int:
self.tok2ind[key] = item
else:
raise RuntimeError('Invalid (key, item) types.')
def add(self, token):
token = self.normalize(token)
if token not in self.tok2ind:
index = len(self.tok2ind)
self.tok2ind[token] = index
self.ind2tok[index] = token
def tokens(self):
"""Get dictionary tokens.
Return all the words indexed by this dictionary, except for special
tokens.
"""
tokens = [k for k in self.tok2ind.keys()
if k not in {'<NULL>', '<UNK>'}]
return tokens
# ------------------------------------------------------------------------------
# PyTorch dataset class for SQuAD (and SQuAD-like) data.
# ------------------------------------------------------------------------------
class ReaderDataset(Dataset):
def __init__(self, examples, model, single_answer=False):
self.model = model
self.examples = examples
self.single_answer = single_answer
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
return vectorize(self.examples[index], self.model, self.single_answer)
def lengths(self):
return [(len(ex['document']), len(ex['question']))
for ex in self.examples]
def has_answer(answer, text):
"""Check if a text contains an answer string."""
for single_answer in answer:
for i in range(0, len(text) - len(single_answer) + 1):
if single_answer == text[i: i + len(single_answer)]:
return True
return False
class ReaderDataset_with_Doc(Dataset):
def __init__(self, examples, model, docs, single_answer=False):
self.model = model
self.examples = examples
self.single_answer = single_answer
self.docs = docs
#for i in range(len(self.examples)):
# for j in range(0, len(self.docs_by_question[i])):
# self.docs_by_question[i]['has_answer'] = has_answer(self.examples[i]['answer'], self.docs_by_question[i]['document'])
#print (self.docs_by_question.keys())
def __len__(self):
return len(self.examples)
def __getitem__(self, index):
#question = self.examples[index]['question']
#logger.info("%d\t%s", index, question)
#logger.info(self.docs_by_question[question])
#assert("\n" not in question)
#if (question not in self.docs_by_question):
# logger.info("Question not found:%s", question)
# return []
return vectorize_with_doc(self.examples[index],
index,
self.model,
self.single_answer,
self.docs[index])
def lengths(self):
#return [(len(ex['document']), len(ex['question'])) for ex in self.examples]
return [(len(doc[num_docs-1]['document']),
len(doc[num_docs-1]['question'])) for doc in self.docs]
# ------------------------------------------------------------------------------
# PyTorch sampler returning batched of sorted lengths (by doc and question).
# ------------------------------------------------------------------------------
class SortedBatchSampler(Sampler):
def __init__(self, lengths, batch_size, shuffle=True):
self.lengths = lengths
self.batch_size = batch_size
self.shuffle = shuffle
def __iter__(self):
lengths = np.array(
[(-l[0], -l[1], np.random.random()) for l in self.lengths],
dtype=[('l1', np.int_), ('l2', np.int_), ('rand', np.float_)]
)
indices = np.argsort(lengths, order=('l1', 'l2', 'rand'))
batches = [indices[i:i + self.batch_size]
for i in range(0, len(indices), self.batch_size)]
if self.shuffle:
np.random.shuffle(batches)
return iter([i for batch in batches for i in batch])
def __len__(self):
return len(self.lengths)
| 32.377778 | 134 | 0.541695 |
7940187fac1752fc7baa2568ecf349be24197d40 | 3,199 | py | Python | example/settings.py | miceno/django-categories | ef58cae61ee8d0c0920302305f019d76896e72fb | [
"Apache-2.0"
] | null | null | null | example/settings.py | miceno/django-categories | ef58cae61ee8d0c0920302305f019d76896e72fb | [
"Apache-2.0"
] | null | null | null | example/settings.py | miceno/django-categories | ef58cae61ee8d0c0920302305f019d76896e72fb | [
"Apache-2.0"
] | null | null | null | # Django settings for sample project.
import os
import sys
import django
APP = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
PROJ_ROOT = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, APP)
DEBUG = True
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'dev.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.flatpages',
'categories',
'categories.editor',
'mptt',
'simpletext',
)
TIME_ZONE = 'America/Chicago'
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
USE_I18N = True
MEDIA_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'uploads'))
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.abspath(os.path.join(PROJ_ROOT, 'media', 'static'))
STATIC_URL = '/static/'
STATICFILES_DIRS = ()
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
SECRET_KEY = 'bwq#m)-zsey-fs)0#4*o=2z(v5g!ei=zytl9t-1hesh4b&-u^d'
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates'))],
'OPTIONS': {
'debug': DEBUG,
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
}
}
]
CATEGORIES_SETTINGS = {
'ALLOW_SLUG_CHANGE': True,
'RELATION_MODELS': ['simpletext.simpletext', 'flatpages.flatpage'],
'FK_REGISTRY': {
'flatpages.flatpage': 'category',
'simpletext.simpletext': (
'primary_category',
{'name': 'secondary_category', 'related_name': 'simpletext_sec_cat'},
),
},
'M2M_REGISTRY': {
'simpletext.simpletext': {'name': 'categories', 'related_name': 'm2mcats'},
'flatpages.flatpage': (
{'name': 'other_categories', 'related_name': 'other_cats'},
{'name': 'more_categories', 'related_name': 'more_cats'},
),
},
}
if django.VERSION[1] > 5:
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| 26.882353 | 88 | 0.632385 |
79401882e359264ec49111bae57969ea26bebca0 | 4,810 | py | Python | src/azure-cli/azure/cli/command_modules/acs/_consts.py | allanpedroni/azure-cli | 4223f08e758e9fe16223564ba9fc77aee6315751 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/_consts.py | allanpedroni/azure-cli | 4223f08e758e9fe16223564ba9fc77aee6315751 | [
"MIT"
] | null | null | null | src/azure-cli/azure/cli/command_modules/acs/_consts.py | allanpedroni/azure-cli | 4223f08e758e9fe16223564ba9fc77aee6315751 | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from enum import Enum
# consts for AgentPool
# priority
CONST_SCALE_SET_PRIORITY_REGULAR = "Regular"
CONST_SCALE_SET_PRIORITY_SPOT = "Spot"
# eviction policy
CONST_SPOT_EVICTION_POLICY_DELETE = "Delete"
CONST_SPOT_EVICTION_POLICY_DEALLOCATE = "Deallocate"
# Scale Down Mode
CONST_SCALE_DOWN_MODE_DELETE = "Delete"
CONST_SCALE_DOWN_MODE_DEALLOCATE = "Deallocate"
# os disk type
CONST_OS_DISK_TYPE_MANAGED = "Managed"
CONST_OS_DISK_TYPE_EPHEMERAL = "Ephemeral"
# mode
CONST_NODEPOOL_MODE_SYSTEM = "System"
CONST_NODEPOOL_MODE_USER = "User"
# os type
CONST_DEFAULT_NODE_OS_TYPE = "Linux"
# os sku
CONST_OS_SKU_UBUNTU = "Ubuntu"
CONST_OS_SKU_CBLMARINER = "CBLMariner"
# vm set type
CONST_VIRTUAL_MACHINE_SCALE_SETS = "VirtualMachineScaleSets"
CONST_AVAILABILITY_SET = "AvailabilitySet"
# vm size
CONST_DEFAULT_NODE_VM_SIZE = "Standard_DS2_v2"
CONST_DEFAULT_WINDOWS_NODE_VM_SIZE = "Standard_D2s_v3"
# consts for ManagedCluster
# load balancer sku
CONST_LOAD_BALANCER_SKU_BASIC = "basic"
CONST_LOAD_BALANCER_SKU_STANDARD = "standard"
# outbound type
CONST_OUTBOUND_TYPE_LOAD_BALANCER = "loadBalancer"
CONST_OUTBOUND_TYPE_USER_DEFINED_ROUTING = "userDefinedRouting"
CONST_OUTBOUND_TYPE_MANAGED_NAT_GATEWAY = "managedNATGateway"
CONST_OUTBOUND_TYPE_USER_ASSIGNED_NAT_GATEWAY = "userAssignedNATGateway"
# private dns zone mode
CONST_PRIVATE_DNS_ZONE_SYSTEM = "system"
CONST_PRIVATE_DNS_ZONE_NONE = "none"
# used to set identity profile (for kubelet)
CONST_MANAGED_IDENTITY_OPERATOR_ROLE = 'Managed Identity Operator'
CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID = 'f1a07417-d97a-45cb-824c-7a7467783830'
# upgrade channel
CONST_RAPID_UPGRADE_CHANNEL = "rapid"
CONST_STABLE_UPGRADE_CHANNEL = "stable"
CONST_PATCH_UPGRADE_CHANNEL = "patch"
CONST_NODE_IMAGE_UPGRADE_CHANNEL = "node-image"
CONST_NONE_UPGRADE_CHANNEL = "none"
# network plugin
CONST_NETWORK_PLUGIN_KUBENET = "kubenet"
CONST_NETWORK_PLUGIN_AZURE = "azure"
# consts for addons
# http application routing
CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME = "httpApplicationRouting"
# monitoring
CONST_MONITORING_ADDON_NAME = "omsagent"
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID = "logAnalyticsWorkspaceResourceID"
CONST_MONITORING_USING_AAD_MSI_AUTH = "useAADAuth"
# virtual node
CONST_VIRTUAL_NODE_ADDON_NAME = "aciConnector"
CONST_VIRTUAL_NODE_SUBNET_NAME = "SubnetName"
# dashboard
CONST_KUBE_DASHBOARD_ADDON_NAME = "kubeDashboard"
# azure policy
CONST_AZURE_POLICY_ADDON_NAME = "azurepolicy"
# ingress application gateway
CONST_INGRESS_APPGW_ADDON_NAME = "ingressApplicationGateway"
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME = "applicationGatewayName"
CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID = "applicationGatewayId"
CONST_INGRESS_APPGW_SUBNET_ID = "subnetId"
CONST_INGRESS_APPGW_SUBNET_CIDR = "subnetCIDR"
CONST_INGRESS_APPGW_WATCH_NAMESPACE = "watchNamespace"
# confcom
CONST_CONFCOM_ADDON_NAME = "ACCSGXDevicePlugin"
CONST_ACC_SGX_QUOTE_HELPER_ENABLED = "ACCSGXQuoteHelperEnabled"
# open service mesh
CONST_OPEN_SERVICE_MESH_ADDON_NAME = "openServiceMesh"
# azure keyvault secrets provider
CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME = "azureKeyvaultSecretsProvider"
CONST_SECRET_ROTATION_ENABLED = "enableSecretRotation"
CONST_ROTATION_POLL_INTERVAL = "rotationPollInterval"
# all supported addons
ADDONS = {
'http_application_routing': CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME,
'monitoring': CONST_MONITORING_ADDON_NAME,
'virtual-node': CONST_VIRTUAL_NODE_ADDON_NAME,
'kube-dashboard': CONST_KUBE_DASHBOARD_ADDON_NAME,
'azure-policy': CONST_AZURE_POLICY_ADDON_NAME,
'ingress-appgw': CONST_INGRESS_APPGW_ADDON_NAME,
"confcom": CONST_CONFCOM_ADDON_NAME,
'open-service-mesh': CONST_OPEN_SERVICE_MESH_ADDON_NAME,
'azure-keyvault-secrets-provider': CONST_AZURE_KEYVAULT_SECRETS_PROVIDER_ADDON_NAME
}
# consts for check-acr command
CONST_CANIPULL_IMAGE = "mcr.microsoft.com/aks/canipull:0.0.4-alpha"
# consts for decorator pattern
class DecoratorMode(Enum):
"""Enumerations used to distinguish whether to handle creation or update.
"""
CREATE = 1
UPDATE = 2
class AgentPoolDecoratorMode(Enum):
"""Enumerations used to distinguish whether to deal with the default system agentpool in the context of the cluster
or any specific agentpool.
"""
MANAGED_CLUSTER = 1
STANDALONE = 2
# custom exception for decorator pattern, used for gracefully exit
class DecoratorEarlyExitException(Exception):
pass
| 32.281879 | 119 | 0.795634 |
794018c29845469dc6032916e51f7dbe8ae4b85b | 1,050 | py | Python | experimental/soundwave/services/pinpoint_service.py | bopopescu/chromium72-third-party-catapult | 774e1355b871e13bb858147a136e9cb476f55030 | [
"BSD-3-Clause"
] | 1 | 2019-01-04T10:08:58.000Z | 2019-01-04T10:08:58.000Z | experimental/soundwave/services/pinpoint_service.py | kind-john/catapult | 29635376119833f172a58a48a3282d353ce55d2b | [
"BSD-3-Clause"
] | null | null | null | experimental/soundwave/services/pinpoint_service.py | kind-john/catapult | 29635376119833f172a58a48a3282d353ce55d2b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from services import luci_auth
from services import request
SERVICE_URL = 'https://pinpoint-dot-chromeperf.appspot.com/api'
def Request(endpoint, **kwargs):
"""Send a request to some pinpoint endpoint."""
kwargs.setdefault('use_auth', True)
return json.loads(request.Request(SERVICE_URL + endpoint, **kwargs))
def Job(job_id, with_state=False, with_tags=False):
"""Get job informaiton from its id."""
params = []
if with_state:
params.append(('o', 'STATE'))
if with_tags:
params.append(('o', 'TAGS'))
return Request('/job/%s' % job_id, params=params)
def Jobs():
"""List jobs for the authenticated user."""
return Request('/jobs')
def NewJob(**kwargs):
"""Create a new pinpoint job."""
if 'user' not in kwargs:
kwargs['user'] = luci_auth.GetUserEmail()
return Request('/new', method='POST', data=kwargs)
| 25.609756 | 72 | 0.698095 |
794018d0f6669996b339580171f8a2e0334d7880 | 2,020 | py | Python | examples/example_mdrpd.py | teixeirafilipe/cpd | 9bd801fa0e9f81807af48cb089f5481c26332994 | [
"MIT"
] | null | null | null | examples/example_mdrpd.py | teixeirafilipe/cpd | 9bd801fa0e9f81807af48cb089f5481c26332994 | [
"MIT"
] | null | null | null | examples/example_mdrpd.py | teixeirafilipe/cpd | 9bd801fa0e9f81807af48cb089f5481c26332994 | [
"MIT"
] | null | null | null | #! /usr/bin/env python3
# MIT License
#
#Copyright 2020 Filipe Teixeira
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
# from cpd import Partial_Dependence
# this is a workaround to avoid importing cpd
exec(open('../cpd.py','r').read())
data = pd.read_csv('compound_activity.csv')
y = data['Activity'].to_numpy()
X = pd.get_dummies(data.drop('Activity',axis=1))
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.75)
# This a very crude model withou any hyper-parameter optimization
rf_model = RandomForestRegressor()
rf_model.fit(X_train, y_train)
print(f"Score on the Train Set: {rf_model.score(X_train,y_train):6.4f}")
print(f"Score on the Test Set: {rf_model.score(X_test,y_test):6.4f}")
pd_data = Partial_Dependence(rf_model, X_train, real_features=['Spec'])
#print(pd_data._ascii())
#pd_data.to_csv('tmp.csv')
pd_data.plot()
| 34.827586 | 79 | 0.769307 |
79401908b63d09320b018c46b1bc2b7824bb9ddd | 2,801 | py | Python | test_ramanujan_generator.py | csfulop/sicp_python | 52335542de5beb2323ac8339d399d33706e7f1e0 | [
"MIT"
] | null | null | null | test_ramanujan_generator.py | csfulop/sicp_python | 52335542de5beb2323ac8339d399d33706e7f1e0 | [
"MIT"
] | null | null | null | test_ramanujan_generator.py | csfulop/sicp_python | 52335542de5beb2323ac8339d399d33706e7f1e0 | [
"MIT"
] | null | null | null | from unittest import TestCase
from ramanujan_generator import integers_from, interleave, generator_map, pairs, merge_weighted, generator_filter, \
weighted_pairs, merge_with_next_item, ramanujan_numbers
class TestRamanujanGenerator(TestCase):
def test_integers_from(self):
g = integers_from(1)
for i in range(1, 20):
self.assertEqual(next(g), i)
def test_interleave(self):
g = interleave(integers_from(1), integers_from(101))
for i in range(1, 20):
self.assertEqual(next(g), i)
self.assertEqual(next(g), 100 + i)
def test_generator_map(self):
g = generator_map(lambda x: 2 * x, integers_from(1))
for i in range(1, 20):
self.assertEqual(next(g), 2 * i)
def test_generator_filter(self):
g = generator_filter(lambda x: x % 2 == 0, integers_from(1))
for i in range(1, 10):
self.assertEqual(next(g), 2 * i)
# FIXME: assert
def test_pairs(self):
g = pairs(integers_from(1), integers_from(1))
for i in range(20):
print(next(g))
def test_merge_weighted(self):
g = merge_weighted(generator_filter(lambda x: x % 2 == 0, integers_from(1)),
generator_filter(lambda x: x % 2 == 1, integers_from(1)),
lambda x: x)
for i in range(1, 20):
self.assertEqual(next(g), i)
def test_weighted_pairs(self):
g = weighted_pairs(integers_from(1), integers_from(1), lambda x, y: x + y)
a = next(g)
for i in range(1, 50):
a, b = next(g), a
self.assertGreaterEqual(a[0], b[0])
self.assertLessEqual(a[1], a[2])
def test_weighted_pairs_with_ramanujan(self):
g = weighted_pairs(integers_from(1), integers_from(1), lambda x, y: x ** 3 + y ** 3)
a = next(g)
for i in range(1, 100):
a, b = next(g), a
print(b)
self.assertGreaterEqual(a[0], b[0])
self.assertLessEqual(a[1], a[2])
def test_merge_with_next_item(self):
g = merge_with_next_item(integers_from(1))
for i in range(20):
a = next(g)
self.assertEqual(a[0], a[1] + 1)
def test_ramanujan_numbers(self):
"""
Execution results:
Up to 10: execution time: 23ms
Up to 100: execution time: 752ms; Memory usage=1.5GB
Up to 200: execution time: 3sec; Memory usage=10MB
"""
g = ramanujan_numbers()
for i in range(200):
a = next(g)
print(a)
self.assertEqual(a[0][0], a[1][0])
self.assertEqual(a[0][0], a[0][1] ** 3 + a[0][2] ** 3)
self.assertEqual(a[1][0], a[1][1] ** 3 + a[1][2] ** 3)
| 35.0125 | 116 | 0.559443 |
79401955cbdb176325edeae6386425c77a0354c5 | 28,962 | py | Python | qcodes/instrument_drivers/american_magnetics/AMI430.py | ctschnur/Qcodes | 623e39aa3774da8b7aa062734a85f4e1f79a635b | [
"MIT"
] | 1 | 2020-10-19T08:09:04.000Z | 2020-10-19T08:09:04.000Z | qcodes/instrument_drivers/american_magnetics/AMI430.py | ctschnur/Qcodes | 623e39aa3774da8b7aa062734a85f4e1f79a635b | [
"MIT"
] | 206 | 2020-10-01T06:33:56.000Z | 2022-03-29T16:08:08.000Z | qcodes/instrument_drivers/american_magnetics/AMI430.py | Akshita07/Qcodes | f75e4786e268f415935aa4658d92526279c7a102 | [
"MIT"
] | null | null | null | import collections
import logging
import time
from functools import partial
from typing import Union, Iterable, Callable
import numbers
import numpy as np
from qcodes import Instrument, IPInstrument, InstrumentChannel
from qcodes.utils.deprecate import deprecate
from qcodes.math_utils.field_vector import FieldVector
from qcodes.utils.validators import Bool, Numbers, Ints, Anything
log = logging.getLogger(__name__)
CartesianFieldLimitFunction = \
Callable[[numbers.Real, numbers.Real, numbers.Real], bool]
class AMI430Exception(Exception):
pass
class AMI430Warning(UserWarning):
pass
class AMI430SwitchHeater(InstrumentChannel):
class _Decorators:
@classmethod
def check_enabled(cls, f):
def check_enabled_decorator(self, *args, **kwargs):
if not self.check_enabled():
raise AMI430Exception("Switch not enabled")
return f(self, *args, **kwargs)
return check_enabled_decorator
def __init__(self, parent: 'AMI430') -> None:
super().__init__(parent, "SwitchHeater")
# Add state parameters
self.add_parameter('enabled',
label='Switch Heater Enabled',
get_cmd=self.check_enabled,
set_cmd=lambda x: (self.enable() if x
else self.disable()),
vals=Bool())
self.add_parameter('state',
label='Switch Heater On',
get_cmd=self.check_state,
set_cmd=lambda x: (self.on() if x
else self.off()),
vals=Bool())
self.add_parameter('in_persistent_mode',
label='Persistent Mode',
get_cmd="PERS?",
val_mapping={True: 1, False: 0})
# Configuration Parameters
self.add_parameter('current',
label='Switch Heater Current',
unit='mA',
get_cmd='PS:CURR?',
get_parser=float,
set_cmd='CONF:PS:CURR {}',
vals=Numbers(0, 125))
self.add_parameter('heat_time',
label='Heating Time',
unit='s',
get_cmd='PS:HTIME?',
get_parser=int,
set_cmd='CONF:PS:HTIME {}',
vals=Ints(5, 120))
self.add_parameter('cool_time',
label='Cooling Time',
unit='s',
get_cmd='PS:CTIME?',
get_parser=int,
set_cmd='CONF:PS:CTIME {}',
vals=Ints(5, 3600))
def disable(self):
"""Turn measurement off"""
self.write('CONF:PS 0')
self._enabled = False
def enable(self):
"""Turn measurement on"""
self.write('CONF:PS 1')
self._enabled = True
def check_enabled(self):
return bool(self.ask('PS:INST?').strip())
@_Decorators.check_enabled
def on(self):
self.write("PS 1")
while self._parent.ramping_state() == "heating switch":
self._parent._sleep(0.5)
@_Decorators.check_enabled
def off(self):
self.write("PS 0")
while self._parent.ramping_state() == "cooling switch":
self._parent._sleep(0.5)
@_Decorators.check_enabled
def check_state(self):
return bool(self.ask("PS?").strip())
class AMI430(IPInstrument):
"""
Driver for the American Magnetics Model 430 magnet power supply programmer.
This class controls a single magnet power supply. In order to use two or
three magnets simultaneously to set field vectors, first instantiate the
individual magnets using this class and then pass them as arguments to
either the AMI430_2D or AMI430_3D virtual instrument classes.
Args:
name (str): a name for the instrument
address (str): IP address of the power supply programmer
current_ramp_limit: A current ramp limit, in units of A/s
"""
_SHORT_UNITS = {'seconds': 's', 'minutes': 'min',
'tesla': 'T', 'kilogauss': 'kG'}
_DEFAULT_CURRENT_RAMP_LIMIT = 0.06 # [A/s]
def __init__(self, name, address=None, port=None,
reset=False, terminator='\r\n',
current_ramp_limit=None, has_current_rating=False,
**kwargs):
super().__init__(name, address, port, terminator=terminator,
write_confirmation=False, **kwargs)
self._parent_instrument = None
self.has_current_rating = has_current_rating
# Add reset function
self.add_function('reset', call_cmd='*RST')
if reset:
self.reset()
# Add parameters setting instrument units
self.add_parameter("ramp_rate_units",
get_cmd='RAMP:RATE:UNITS?',
set_cmd=(lambda units:
self._update_units(ramp_rate_units=units)),
val_mapping={'seconds': 0,
'minutes': 1})
self.add_parameter('field_units',
get_cmd='FIELD:UNITS?',
set_cmd=(lambda units:
self._update_units(field_units=units)),
val_mapping={'kilogauss': 0,
'tesla': 1})
# Set programmatic safety limits
self.add_parameter('current_ramp_limit',
get_cmd=lambda: self._current_ramp_limit,
set_cmd=self._update_ramp_rate_limit,
unit="A/s")
self.add_parameter('field_ramp_limit',
get_cmd=lambda: self.current_ramp_limit(),
set_cmd=lambda x: self.current_ramp_limit(x),
scale=1/float(self.ask("COIL?")),
unit="T/s")
if current_ramp_limit is None:
self._update_ramp_rate_limit(AMI430._DEFAULT_CURRENT_RAMP_LIMIT,
update=False)
else:
self._update_ramp_rate_limit(current_ramp_limit, update=False)
# Add solenoid parameters
self.add_parameter('coil_constant',
get_cmd=self._update_coil_constant,
set_cmd=self._update_coil_constant,
vals=Numbers(0.001, 999.99999))
# TODO: Not all AMI430s expose this setting. Currently, we
# don't know why, but this most likely a firmware version issue,
# so eventually the following condition will be something like
# if firmware_version > XX
if has_current_rating:
self.add_parameter('current_rating',
get_cmd="CURR:RATING?",
get_parser=float,
set_cmd="CONF:CURR:RATING {}",
unit="A",
vals=Numbers(0.001, 9999.9999))
self.add_parameter('field_rating',
get_cmd=lambda: self.current_rating(),
set_cmd=lambda x: self.current_rating(x),
scale=1/float(self.ask("COIL?")))
self.add_parameter('current_limit',
unit="A",
set_cmd="CONF:CURR:LIMIT {}",
get_cmd='CURR:LIMIT?',
get_parser=float,
vals=Numbers(0, 80)) # what are good numbers here?
self.add_parameter('field_limit',
set_cmd=self.current_limit.set,
get_cmd=self.current_limit.get,
scale=1/float(self.ask("COIL?")))
# Add current solenoid parameters
# Note that field is validated in set_field
self.add_parameter('field',
get_cmd='FIELD:MAG?',
get_parser=float,
set_cmd=self.set_field)
self.add_parameter('ramp_rate',
get_cmd=self._get_ramp_rate,
set_cmd=self._set_ramp_rate)
self.add_parameter('setpoint',
get_cmd='FIELD:TARG?',
get_parser=float)
self.add_parameter('is_quenched',
get_cmd='QU?',
val_mapping={True: 1, False: 0})
self.add_function('reset_quench', call_cmd='QU 0')
self.add_function('set_quenched', call_cmd='QU 1')
self.add_parameter('ramping_state',
get_cmd='STATE?',
get_parser=int,
val_mapping={
'ramping': 1,
'holding': 2,
'paused': 3,
'manual up': 4,
'manual down': 5,
'zeroing current': 6,
'quench detected': 7,
'at zero current': 8,
'heating switch': 9,
'cooling switch': 10,
})
# Add persistent switch
switch_heater = AMI430SwitchHeater(self)
self.add_submodule("switch_heater", switch_heater)
# Add interaction functions
self.add_function('get_error', call_cmd='SYST:ERR?')
self.add_function('ramp', call_cmd='RAMP')
self.add_function('pause', call_cmd='PAUSE')
self.add_function('zero', call_cmd='ZERO')
# Correctly assign all units
self._update_units()
self.connect_message()
def _sleep(self, t):
"""
Sleep for a number of seconds t. If we are or using
the PyVISA 'sim' backend, omit this
"""
simmode = getattr(self, 'visabackend', False) == 'sim'
if simmode:
return
else:
time.sleep(t)
def _can_start_ramping(self):
"""
Check the current state of the magnet to see if we can start ramping
"""
if self.is_quenched():
logging.error(__name__ + ': Could not ramp because of quench')
return False
if self.switch_heater.in_persistent_mode():
logging.error(__name__ + ': Could not ramp because persistent')
return False
state = self.ramping_state()
if state == 'ramping':
# If we don't have a persistent switch, or it's warm
if not self.switch_heater.enabled():
return True
elif self.switch_heater.state():
return True
elif state in ['holding', 'paused', 'at zero current']:
return True
logging.error(__name__ + ': Could not ramp, state: {}'.format(state))
return False
def set_field(self, value, *, block=True, perform_safety_check=True):
"""
Ramp to a certain field
Args:
block (bool): Whether to wait unit the field has finished setting
perform_safety_check (bool): Whether to set the field via a parent
driver (if present), which might perform additional safety
checks.
"""
# Check we aren't violating field limits
field_lim = float(self.ask("COIL?"))*self.current_limit()
if np.abs(value) > field_lim:
msg = 'Aborted _set_field; {} is higher than limit of {}'
raise ValueError(msg.format(value, field_lim))
# If part of a parent driver, set the value using that driver
if self._parent_instrument is not None and perform_safety_check:
self._parent_instrument._request_field_change(self, value)
return
# Check we can ramp
if not self._can_start_ramping():
raise AMI430Exception("Cannot ramp in current state")
# Then, do the actual ramp
self.pause()
# Set the ramp target
self.write('CONF:FIELD:TARG {}'.format(value))
# If we have a persistent switch, make sure it is resistive
if self.switch_heater.enabled():
if not self.switch_heater.state():
raise AMI430Exception("Switch heater is not on")
self.ramp()
# Check if we want to block
if not block:
return
# Otherwise, wait until no longer ramping
self.log.debug(f'Starting blocking ramp of {self.name} to {value}')
while self.ramping_state() == 'ramping':
self._sleep(0.3)
self._sleep(2.0)
state = self.ramping_state()
self.log.debug(f'Finished blocking ramp')
# If we are now holding, it was successful
if state != 'holding':
msg = '_set_field({}) failed with state: {}'
raise AMI430Exception(msg.format(value, state))
@deprecate(alternative='set_field with named parameter block=False')
def ramp_to(self, value, block=False):
"""User accessible method to ramp to field."""
if self._parent_instrument is not None:
if not block:
msg = (": Initiating a blocking instead of non-blocking "
" function because this magnet belongs to a parent "
"driver")
logging.warning(__name__ + msg)
self._parent_instrument._request_field_change(self, value)
else:
self.set_field(value, block=False)
def _get_ramp_rate(self):
""" Return the ramp rate of the first segment in Tesla per second """
results = self.ask('RAMP:RATE:FIELD:1?').split(',')
return float(results[0])
def _set_ramp_rate(self, rate):
""" Set the ramp rate of the first segment in Tesla per second """
if rate > self.field_ramp_limit():
raise ValueError(f"{rate} {self.ramp_rate.unit} "
f"is above the ramp rate limit of "
f"{self.field_ramp_limit()} "
f"{self.field_ramp_limit()}")
self.write('CONF:RAMP:RATE:SEG 1')
self.write('CONF:RAMP:RATE:FIELD 1,{},0'.format(rate))
def _connect(self):
"""
Append the IPInstrument connect to flush the welcome message of the AMI
430 programmer
:return: None
"""
super()._connect()
self.flush_connection()
def _update_ramp_rate_limit(self, new_current_rate_limit, update=True):
"""
Update the maximum current ramp rate
The value passed here is scaled by the units set in
self.ramp_rate_units
"""
# Update ramp limit
self._current_ramp_limit = new_current_rate_limit
# And update instrument limits
if update:
field_ramp_limit = self.field_ramp_limit()
if self.ramp_rate() > field_ramp_limit:
self.ramp_rate(field_ramp_limit)
def _update_coil_constant(self, new_coil_constant=None):
"""
Update the coil constant and relevant scaling factors.
If new_coil_constant is none, query the coil constant from the
instrument
"""
# Query coil constant from instrument
if new_coil_constant is None:
new_coil_constant = float(self.ask("COIL?"))
else:
self.write("CONF:COIL {}".format(new_coil_constant))
# Update scaling factors
self.field_ramp_limit.scale = 1/new_coil_constant
self.field_limit.scale = 1/new_coil_constant
if self.has_current_rating:
self.field_rating.scale = 1/new_coil_constant
# Return new coil constant
return new_coil_constant
def _update_units(self, ramp_rate_units=None, field_units=None):
# Get or set units on device
if ramp_rate_units is None:
ramp_rate_units = self.ramp_rate_units()
else:
self.write("CONF:RAMP:RATE:UNITS {}".format(ramp_rate_units))
ramp_rate_units = self.ramp_rate_units.\
inverse_val_mapping[ramp_rate_units]
if field_units is None:
field_units = self.field_units()
else:
self.write("CONF:FIELD:UNITS {}".format(field_units))
field_units = self.field_units.inverse_val_mapping[field_units]
# Map to shortened unit names
ramp_rate_units = AMI430._SHORT_UNITS[ramp_rate_units]
field_units = AMI430._SHORT_UNITS[field_units]
# And update all units
self.coil_constant.unit = "{}/A".format(field_units)
self.field_limit.unit = f"{field_units}"
self.field.unit = "{}".format(field_units)
self.setpoint.unit = "{}".format(field_units)
self.ramp_rate.unit = "{}/{}".format(field_units, ramp_rate_units)
self.current_ramp_limit.unit = "A/{}".format(ramp_rate_units)
self.field_ramp_limit.unit = f"{field_units}/{ramp_rate_units}"
# And update scaling factors
# Note: we don't update field_ramp_limit scale as it redirects
# to ramp_rate_limit; we don't update ramp_rate units as
# the instrument stores changed units
if ramp_rate_units == "min":
self.current_ramp_limit.scale = 1/60
else:
self.current_ramp_limit.scale = 1
# If the field units change, the value of the coil constant also
# changes, hence we read the new value of the coil constant from the
# instrument via the `coil_constant` parameter (which in turn also
# updates settings of some parameters due to the fact that the coil
# constant changes)
self.coil_constant()
class AMI430_3D(Instrument):
def __init__(self, name,
instrument_x, instrument_y, instrument_z,
field_limit: Union[numbers.Real,
Iterable[CartesianFieldLimitFunction]],
**kwargs):
super().__init__(name, **kwargs)
if not isinstance(name, str):
raise ValueError("Name should be a string")
instruments = [instrument_x, instrument_y, instrument_z]
if not all([isinstance(instrument, AMI430)
for instrument in instruments]):
raise ValueError("Instruments need to be instances "
"of the class AMI430")
self._instrument_x = instrument_x
self._instrument_y = instrument_y
self._instrument_z = instrument_z
self._field_limit: Union[float, Iterable[CartesianFieldLimitFunction]]
if isinstance(field_limit, collections.abc.Iterable):
self._field_limit = field_limit
elif isinstance(field_limit, numbers.Real):
# Conversion to float makes related driver logic simpler
self._field_limit = float(field_limit)
else:
raise ValueError("field limit should either be a number or "
"an iterable of callable field limit functions.")
self._set_point = FieldVector(
x=self._instrument_x.field(),
y=self._instrument_y.field(),
z=self._instrument_z.field()
)
# Get-only parameters that return a measured value
self.add_parameter(
'cartesian_measured',
get_cmd=partial(self._get_measured, 'x', 'y', 'z'),
unit='T'
)
self.add_parameter(
'x_measured',
get_cmd=partial(self._get_measured, 'x'),
unit='T'
)
self.add_parameter(
'y_measured',
get_cmd=partial(self._get_measured, 'y'),
unit='T'
)
self.add_parameter(
'z_measured',
get_cmd=partial(self._get_measured, 'z'),
unit='T'
)
self.add_parameter(
'spherical_measured',
get_cmd=partial(
self._get_measured,
'r',
'theta',
'phi'
),
unit='T'
)
self.add_parameter(
'phi_measured',
get_cmd=partial(self._get_measured, 'phi'),
unit='deg'
)
self.add_parameter(
'theta_measured',
get_cmd=partial(self._get_measured, 'theta'),
unit='deg'
)
self.add_parameter(
'field_measured',
get_cmd=partial(self._get_measured, 'r'),
unit='T')
self.add_parameter(
'cylindrical_measured',
get_cmd=partial(self._get_measured,
'rho',
'phi',
'z'),
unit='T')
self.add_parameter(
'rho_measured',
get_cmd=partial(self._get_measured, 'rho'),
unit='T'
)
# Get and set parameters for the set points of the coordinates
self.add_parameter(
'cartesian',
get_cmd=partial(self._get_setpoints, ('x', 'y', 'z')),
set_cmd=partial(self._set_setpoints, ('x', 'y', 'z')),
unit='T',
vals=Anything()
)
self.add_parameter(
'x',
get_cmd=partial(self._get_setpoints, ('x',)),
set_cmd=partial(self._set_setpoints, ('x',)),
unit='T',
vals=Numbers()
)
self.add_parameter(
'y',
get_cmd=partial(self._get_setpoints, ('y',)),
set_cmd=partial(self._set_setpoints, ('y',)),
unit='T',
vals=Numbers()
)
self.add_parameter(
'z',
get_cmd=partial(self._get_setpoints, ('z',)),
set_cmd=partial(self._set_setpoints, ('z',)),
unit='T',
vals=Numbers()
)
self.add_parameter(
'spherical',
get_cmd=partial(
self._get_setpoints, ('r', 'theta', 'phi')
),
set_cmd=partial(
self._set_setpoints, ('r', 'theta', 'phi')
),
unit='tuple?',
vals=Anything()
)
self.add_parameter(
'phi',
get_cmd=partial(self._get_setpoints, ('phi',)),
set_cmd=partial(self._set_setpoints, ('phi',)),
unit='deg',
vals=Numbers()
)
self.add_parameter(
'theta',
get_cmd=partial(self._get_setpoints, ('theta',)),
set_cmd=partial(self._set_setpoints, ('theta',)),
unit='deg',
vals=Numbers()
)
self.add_parameter(
'field',
get_cmd=partial(self._get_setpoints, ('r',)),
set_cmd=partial(self._set_setpoints, ('r',)),
unit='T',
vals=Numbers()
)
self.add_parameter(
'cylindrical',
get_cmd=partial(
self._get_setpoints, ('rho', 'phi', 'z')
),
set_cmd=partial(
self._set_setpoints, ('rho', 'phi', 'z')
),
unit='tuple?',
vals=Anything()
)
self.add_parameter(
'rho',
get_cmd=partial(self._get_setpoints, ('rho',)),
set_cmd=partial(self._set_setpoints, ('rho',)),
unit='T',
vals=Numbers()
)
self.add_parameter(
'block_during_ramp',
set_cmd=None,
initial_value=True,
unit='',
vals=Bool()
)
def _verify_safe_setpoint(self, setpoint_values):
if isinstance(self._field_limit, float):
return np.linalg.norm(setpoint_values) < self._field_limit
answer = any([limit_function(*setpoint_values) for
limit_function in self._field_limit])
return answer
def _adjust_child_instruments(self, values):
"""
Set the fields of the x/y/z magnets. This function is called
whenever the field is changed and performs several safety checks
to make sure no limits are exceeded.
Args:
values (tuple): a tuple of cartesian coordinates (x, y, z).
"""
self.log.debug("Checking whether fields can be set")
# Check if exceeding the global field limit
if not self._verify_safe_setpoint(values):
raise ValueError("_set_fields aborted; field would exceed limit")
# Check if the individual instruments are ready
for name, value in zip(["x", "y", "z"], values):
instrument = getattr(self, "_instrument_{}".format(name))
if instrument.ramping_state() == "ramping":
msg = '_set_fields aborted; magnet {} is already ramping'
raise AMI430Exception(msg.format(instrument))
# Now that we know we can proceed, call the individual instruments
self.log.debug("Field values OK, proceeding")
for operator in [np.less, np.greater]:
# First ramp the coils that are decreasing in field strength.
# This will ensure that we are always in a safe region as
# far as the quenching of the magnets is concerned
for name, value in zip(["x", "y", "z"], values):
instrument = getattr(self, "_instrument_{}".format(name))
current_actual = instrument.field()
# If the new set point is practically equal to the
# current one then do nothing
if np.isclose(value, current_actual, rtol=0, atol=1e-8):
continue
# evaluate if the new set point is smaller or larger
# than the current value
if not operator(abs(value), abs(current_actual)):
continue
instrument.set_field(value, perform_safety_check=False,
block=self.block_during_ramp.get())
def _request_field_change(self, instrument, value):
"""
This method is called by the child x/y/z magnets if they are set
individually. It results in additional safety checks being
performed by this 3D driver.
"""
if instrument is self._instrument_x:
self._set_x(value)
elif instrument is self._instrument_y:
self._set_y(value)
elif instrument is self._instrument_z:
self._set_z(value)
else:
msg = 'This magnet doesnt belong to its specified parent {}'
raise NameError(msg.format(self))
def _get_measured(self, *names):
x = self._instrument_x.field()
y = self._instrument_y.field()
z = self._instrument_z.field()
measured_values = FieldVector(x=x, y=y, z=z).get_components(*names)
# Convert angles from radians to degrees
d = dict(zip(names, measured_values))
# Do not do "return list(d.values())", because then there is
# no guaranty that the order in which the values are returned
# is the same as the original intention
return_value = [d[name] for name in names]
if len(names) == 1:
return_value = return_value[0]
return return_value
def _get_setpoints(self, names):
measured_values = self._set_point.get_components(*names)
# Convert angles from radians to degrees
d = dict(zip(names, measured_values))
return_value = [d[name] for name in names]
# Do not do "return list(d.values())", because then there is
# no guarantee that the order in which the values are returned
# is the same as the original intention
if len(names) == 1:
return_value = return_value[0]
return return_value
def _set_setpoints(self, names, values):
kwargs = dict(zip(names, np.atleast_1d(values)))
set_point = FieldVector()
set_point.copy(self._set_point)
if len(kwargs) == 3:
set_point.set_vector(**kwargs)
else:
set_point.set_component(**kwargs)
self._adjust_child_instruments(
set_point.get_components("x", "y", "z")
)
self._set_point = set_point
| 36.568182 | 79 | 0.544576 |
79401a1bd561dadb01efaeb2624bc4eb9ddf567d | 1,578 | py | Python | share/rpcauth/rpcauth.py | OpenBullet-Coin/OpenBullet | 4887371728d143ab50c19c59ddcae76204eca112 | [
"MIT"
] | 2 | 2021-04-25T12:13:06.000Z | 2021-07-27T16:09:45.000Z | share/rpcauth/rpcauth.py | OpenBullet-Coin/OpenBullet | 4887371728d143ab50c19c59ddcae76204eca112 | [
"MIT"
] | null | null | null | share/rpcauth/rpcauth.py | OpenBullet-Coin/OpenBullet | 4887371728d143ab50c19c59ddcae76204eca112 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
from base64 import urlsafe_b64encode
from binascii import hexlify
from getpass import getpass
from os import urandom
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
return hexlify(urandom(size)).decode()
def generate_password():
"""Create 32 byte b64 password"""
return urlsafe_b64encode(urandom(32)).decode('utf-8')
def password_to_hmac(salt, password):
m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
return m.hexdigest()
def main():
parser = ArgumentParser(description='Create login credentials for a JSON-RPC user')
parser.add_argument('username', help='the username for authentication')
parser.add_argument('password', help='leave empty to generate a random password or specify "-" to prompt for password', nargs='?')
args = parser.parse_args()
if not args.password:
args.password = generate_password()
elif args.password == '-':
args.password = getpass()
# Create 16 byte hex salt
salt = generate_salt(16)
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to openbullet.conf:')
print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
print('Your password:\n{0}'.format(args.password))
if __name__ == '__main__':
main()
| 33.574468 | 134 | 0.714829 |
79401a947b4a9f8ff58dbba2971b114f0ffd9425 | 3,283 | py | Python | make/photon/prepare/utils/registry.py | thechristschn/harbor | dbec54573c4c50c62b08c317d3b3241d0ce9912b | [
"Apache-2.0"
] | 1 | 2020-07-31T15:00:54.000Z | 2020-07-31T15:00:54.000Z | make/photon/prepare/utils/registry.py | thechristschn/harbor | dbec54573c4c50c62b08c317d3b3241d0ce9912b | [
"Apache-2.0"
] | 3 | 2020-03-09T15:24:24.000Z | 2020-09-10T08:54:35.000Z | make/photon/prepare/utils/registry.py | thechristschn/harbor | dbec54573c4c50c62b08c317d3b3241d0ce9912b | [
"Apache-2.0"
] | 1 | 2020-12-10T06:46:23.000Z | 2020-12-10T06:46:23.000Z | import copy
import os
import subprocess
from g import config_dir, templates_dir, DEFAULT_GID, DEFAULT_UID, data_dir
from urllib.parse import urlsplit
from utils.jinja import render_jinja
from utils.misc import prepare_dir
registry_config_dir = os.path.join(config_dir, "registry")
registry_config_template_path = os.path.join(templates_dir, "registry", "config.yml.jinja")
registry_conf = os.path.join(config_dir, "registry", "config.yml")
registry_passwd_path = os.path.join(config_dir, "registry", "passwd")
registry_data_dir = os.path.join(data_dir, 'registry')
levels_map = {
'debug': 'debug',
'info': 'info',
'warning': 'warn',
'error': 'error',
'fatal': 'fatal'
}
def prepare_registry(config_dict):
prepare_dir(registry_data_dir, uid=DEFAULT_UID, gid=DEFAULT_GID)
prepare_dir(registry_config_dir)
gen_passwd_file(config_dict)
storage_provider_info = get_storage_provider_info(
config_dict['storage_provider_name'],
config_dict['storage_provider_config'])
# process redis info
redis_ops = parse_redis(config_dict['redis_url_reg'])
render_jinja(
registry_config_template_path,
registry_conf,
uid=DEFAULT_UID,
gid=DEFAULT_GID,
level=levels_map[config_dict['log_level']],
storage_provider_info=storage_provider_info,
**config_dict, **redis_ops)
def parse_redis(redis_url):
u = urlsplit(redis_url)
if not u.scheme or u.scheme == 'redis':
return {
'redis_host': u.netloc.split('@')[-1],
'redis_password': u.password or '',
'redis_db_index_reg': u.path and int(u.path[1:]) or 0,
}
elif u.scheme == 'redis+sentinel':
return {
'sentinel_master_set': u.path.split('/')[1],
'redis_host': u.netloc.split('@')[-1],
'redis_password': u.password or '',
'redis_db_index_reg': len(u.path.split('/')) == 3 and int(u.path.split('/')[2]) or 0,
}
else:
raise Exception('bad redis url for registry:' + redis_url)
def get_storage_provider_info(provider_name, provider_config):
provider_config_copy = copy.deepcopy(provider_config)
if provider_name == "filesystem":
if not (provider_config_copy and ('rootdirectory' in provider_config_copy)):
provider_config_copy['rootdirectory'] = '/storage'
if provider_name == 'gcs' and provider_config_copy.get('keyfile'):
provider_config_copy['keyfile'] = '/etc/registry/gcs.key'
# generate storage configuration section in yaml format
storage_provider_conf_list = [provider_name + ':']
for config in provider_config_copy.items():
if config[1] is None:
value = ''
elif config[1] == True:
value = 'true'
else:
value = config[1]
storage_provider_conf_list.append('{}: {}'.format(config[0], value))
storage_provider_info = ('\n' + ' ' * 4).join(storage_provider_conf_list)
return storage_provider_info
def gen_passwd_file(config_dict):
return subprocess.call(["/usr/bin/htpasswd", "-bcB", registry_passwd_path, config_dict['registry_username'],
config_dict['registry_password']], stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
| 37.306818 | 115 | 0.670728 |
79401ab6ce68ecae49c4b517d83c3b0492e7f65b | 1,625 | py | Python | mopidy_gmusic/__init__.py | sebsebmc/mopidy-gmusic | 2a081de654e7f1fa90ac5e840df3e75dc0815d05 | [
"Apache-2.0"
] | null | null | null | mopidy_gmusic/__init__.py | sebsebmc/mopidy-gmusic | 2a081de654e7f1fa90ac5e840df3e75dc0815d05 | [
"Apache-2.0"
] | null | null | null | mopidy_gmusic/__init__.py | sebsebmc/mopidy-gmusic | 2a081de654e7f1fa90ac5e840df3e75dc0815d05 | [
"Apache-2.0"
] | 1 | 2019-05-02T21:59:05.000Z | 2019-05-02T21:59:05.000Z | from __future__ import unicode_literals
import os
from mopidy import config, ext
__version__ = '4.0.0'
class GMusicExtension(ext.Extension):
dist_name = 'Mopidy-GMusic'
ext_name = 'gmusic'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
schema = super(GMusicExtension, self).get_config_schema()
schema['initial_code'] = config.Secret(optional=True)
schema['refresh_token'] = config.Secret(optional=True)
schema['bitrate'] = config.Integer(choices=(128, 160, 320))
schema['deviceid'] = config.String(optional=True)
schema['all_access'] = config.Boolean(optional=True)
schema['refresh_library'] = config.Integer(minimum=-1, optional=True)
schema['refresh_playlists'] = config.Integer(minimum=-1, optional=True)
schema['radio_stations_in_browse'] = config.Boolean(optional=True)
schema['radio_stations_as_playlists'] = config.Boolean(optional=True)
schema['radio_stations_count'] = config.Integer(
minimum=1, optional=True)
schema['radio_tracks_count'] = config.Integer(minimum=1, optional=True)
schema['top_tracks_count'] = config.Integer(minimum=1, optional=True)
return schema
def setup(self, registry):
from .backend import GMusicBackend
from .scrobbler_frontend import GMusicScrobblerFrontend
registry.add('backend', GMusicBackend)
registry.add('frontend', GMusicScrobblerFrontend)
| 31.862745 | 79 | 0.690462 |
79401ac2c02e754404a2814c0e7c0a163c086971 | 427 | py | Python | tasks.py | dmitrypol/redis101 | 49e5ddccff6b799eefc5fcaaf7e1a5335ba570a4 | [
"MIT"
] | 1 | 2019-04-17T07:46:50.000Z | 2019-04-17T07:46:50.000Z | tasks.py | dmitrypol/redis101 | 49e5ddccff6b799eefc5fcaaf7e1a5335ba570a4 | [
"MIT"
] | null | null | null | tasks.py | dmitrypol/redis101 | 49e5ddccff6b799eefc5fcaaf7e1a5335ba570a4 | [
"MIT"
] | null | null | null | import os
import time
from redis import Redis
redis_conn = Redis(host=os.environ['REDIS_HOST'], port=os.environ['REDIS_PORT'])
from rq.decorators import job
@job('default', connection=redis_conn)
def generate_report(input_param):
time.sleep(5)
return {'input_param': input_param}
@job('default', connection=redis_conn)
def download_data(input_param):
time.sleep(5)
return {'download_data': input_param} | 23.722222 | 80 | 0.744731 |
79401adf5a088c952359b9d305fdc6ccbfa3384f | 2,055 | py | Python | test_typographie.py | Delegation-numerique-en-sante/mesconseilscovid | 3101bc0e272f3c1c337d93af5df56a9a5344e9b6 | [
"MIT"
] | 26 | 2020-05-08T07:52:32.000Z | 2021-12-27T10:25:44.000Z | test_typographie.py | Delegation-numerique-en-sante/mesconseilscovid | 3101bc0e272f3c1c337d93af5df56a9a5344e9b6 | [
"MIT"
] | 419 | 2020-05-10T10:28:26.000Z | 2022-03-31T13:19:41.000Z | test_typographie.py | Delegation-numerique-en-sante/mesconseilscovid | 3101bc0e272f3c1c337d93af5df56a9a5344e9b6 | [
"MIT"
] | 19 | 2020-05-10T10:23:16.000Z | 2021-12-03T19:48:04.000Z | import pytest
@pytest.mark.parametrize(
"in_,out_",
[
("", ""),
(" ", " "),
("\u00a0", "\u00a0"),
("\u202f", "\u202f"),
("ici !", "ici !"),
("non ?", "non ?"),
("infos :", "infos :"),
("entre « guillemets »", "entre « guillemets »"),
(
'entre « <a href="">guillemets avec lien</a> »',
'entre « <a href="">guillemets avec lien</a> »',
),
("18 h", "18 h"),
("24 heures", "24 heures"),
("24 heures", "24 heures"),
("18 hibous", "18 hibous"),
("1 j", "1 j"),
("1 jour", "1 jour"),
("2 j", "2 j"),
("2 jours", "2 jours"),
("65 ans", "65 ans"),
("150 g", "150 g"),
("150 g de farine", "150 g de farine"),
("150 gibbons", "150 gibbons"),
("200 mg", "200 mg"),
("à 10 000 kilomètres", "à 10 000 kilomètres"),
("100 %", "100 %"),
("pour 100 % des cas", "pour 100 % des cas"),
(
'<h2 itemprop="name">Est-ce que je peux voyager ?</h2>',
'<h2 itemprop="name">Est-ce que je peux voyager ?</h2>',
),
("Covid-19 :", "Covid-19 :"),
("35,5 °C", "35,5 °C"),
(
"« Comment mettre son masque ? »",
"« Comment mettre son masque ? »",
),
(
"« Comment mettre son masque ! »",
"« Comment mettre son masque ! »",
),
(
"« Comment mettre son masque. »",
"« Comment mettre son masque. »",
),
(
"« Comment mettre son masque… »",
"« Comment mettre son masque… »",
),
],
)
def test_espaces_insecables(in_, out_):
from typographie import typographie
assert typographie(in_) == out_
| 32.619048 | 74 | 0.451095 |
79401ae47d13502daa08de30f27bfcaf909cc099 | 4,806 | py | Python | lldb/test/API/lit.cfg.py | medismailben/llvm-project | e334a839032fe500c3bba22bf976ab7af13ce1c1 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/lit.cfg.py | medismailben/llvm-project | e334a839032fe500c3bba22bf976ab7af13ce1c1 | [
"Apache-2.0"
] | null | null | null | lldb/test/API/lit.cfg.py | medismailben/llvm-project | e334a839032fe500c3bba22bf976ab7af13ce1c1 | [
"Apache-2.0"
] | null | null | null | # -*- Python -*-
# Configuration file for the 'lit' test runner.
import os
import platform
import shlex
import shutil
import lit.formats
# name: The name of this test suite.
config.name = 'lldb-api'
# suffixes: A list of file extensions to treat as test files.
config.suffixes = ['.py']
# test_source_root: The root path where tests are located.
# test_exec_root: The root path where tests should be run.
config.test_source_root = os.path.join(config.lldb_src_root, 'packages',
'Python', 'lldbsuite', 'test')
config.test_exec_root = config.test_source_root
if 'Address' in config.llvm_use_sanitizer:
config.environment['ASAN_OPTIONS'] = 'detect_stack_use_after_return=1'
# macOS flags needed for LLDB built with address sanitizer.
if 'Darwin' in config.host_os and 'x86' in config.host_triple:
import subprocess
resource_dir = subprocess.check_output(
[config.cmake_cxx_compiler,
'-print-resource-dir']).decode('utf-8').strip()
runtime = os.path.join(resource_dir, 'lib', 'darwin',
'libclang_rt.asan_osx_dynamic.dylib')
config.environment['DYLD_INSERT_LIBRARIES'] = runtime
def find_shlibpath_var():
if platform.system() in ['Linux', 'FreeBSD', 'NetBSD', 'SunOS']:
yield 'LD_LIBRARY_PATH'
elif platform.system() == 'Darwin':
yield 'DYLD_LIBRARY_PATH'
elif platform.system() == 'Windows':
yield 'PATH'
# Shared library build of LLVM may require LD_LIBRARY_PATH or equivalent.
if config.shared_libs:
for shlibpath_var in find_shlibpath_var():
# In stand-alone build llvm_shlib_dir specifies LLDB's lib directory while
# llvm_libs_dir specifies LLVM's lib directory.
shlibpath = os.path.pathsep.join(
(config.llvm_shlib_dir, config.llvm_libs_dir,
config.environment.get(shlibpath_var, '')))
config.environment[shlibpath_var] = shlibpath
else:
lit_config.warning("unable to inject shared library path on '{}'".format(
platform.system()))
# Propagate LLDB_CAPTURE_REPRODUCER
if 'LLDB_CAPTURE_REPRODUCER' in os.environ:
config.environment['LLDB_CAPTURE_REPRODUCER'] = os.environ[
'LLDB_CAPTURE_REPRODUCER']
# Clean the module caches in the test build directory. This is necessary in an
# incremental build whenever clang changes underneath, so doing it once per
# lit.py invocation is close enough.
for cachedir in [config.clang_module_cache, config.lldb_module_cache]:
if os.path.isdir(cachedir):
print("Deleting module cache at %s." % cachedir)
shutil.rmtree(cachedir)
# Set a default per-test timeout of 10 minutes. Setting a timeout per test
# requires that killProcessAndChildren() is supported on the platform and
# lit complains if the value is set but it is not supported.
supported, errormsg = lit_config.maxIndividualTestTimeIsSupported
if supported:
lit_config.maxIndividualTestTime = 600
else:
lit_config.warning("Could not set a default per-test timeout. " + errormsg)
# Build dotest command.
dotest_cmd = [config.dotest_path]
dotest_cmd += ['--arch', config.test_arch]
dotest_cmd.extend(config.dotest_args_str.split(';'))
# Library path may be needed to locate just-built clang.
if config.llvm_libs_dir:
dotest_cmd += ['--env', 'LLVM_LIBS_DIR=' + config.llvm_libs_dir]
# Forward ASan-specific environment variables to tests, as a test may load an
# ASan-ified dylib.
for env_var in ('ASAN_OPTIONS', 'DYLD_INSERT_LIBRARIES'):
if env_var in config.environment:
dotest_cmd += ['--inferior-env', env_var + '=' + config.environment[env_var]]
if config.lldb_build_directory:
dotest_cmd += ['--build-dir', config.lldb_build_directory]
if config.lldb_module_cache:
dotest_cmd += ['--lldb-module-cache-dir', config.lldb_module_cache]
if config.clang_module_cache:
dotest_cmd += ['--clang-module-cache-dir', config.clang_module_cache]
if config.lldb_executable:
dotest_cmd += ['--executable', config.lldb_executable]
if config.test_compiler:
dotest_cmd += ['--compiler', config.test_compiler]
if config.dsymutil:
dotest_cmd += ['--dsymutil', config.dsymutil]
if config.filecheck:
dotest_cmd += ['--filecheck', config.filecheck]
if config.lldb_libs_dir:
dotest_cmd += ['--lldb-libs-dir', config.lldb_libs_dir]
# We don't want to force users passing arguments to lit to use `;` as a
# separator. We use Python's simple lexical analyzer to turn the args into a
# list. Pass there arguments last so they can override anything that was
# already configured.
if config.dotest_lit_args_str:
dotest_cmd.extend(shlex.split(config.dotest_lit_args_str))
# Load LLDB test format.
sys.path.append(os.path.join(config.lldb_src_root, "test", "API"))
import lldbtest
# testFormat: The test format to use to interpret tests.
config.test_format = lldbtest.LLDBTest(dotest_cmd)
| 35.865672 | 81 | 0.738452 |
79401af043debb768909871bf4e5a5238afcb4b6 | 3,063 | py | Python | examples/decoding/plot_decoding_spoc_CMC.py | mehrdad-shokri/mne-python | 4c44bd6ae90dd4d5c6c2e925d93f1b2fbf7bda6a | [
"BSD-3-Clause"
] | 3 | 2021-01-04T08:45:56.000Z | 2021-05-19T12:25:59.000Z | examples/decoding/plot_decoding_spoc_CMC.py | mehrdad-shokri/mne-python | 4c44bd6ae90dd4d5c6c2e925d93f1b2fbf7bda6a | [
"BSD-3-Clause"
] | 1 | 2015-10-07T14:19:29.000Z | 2015-10-07T14:19:29.000Z | examples/decoding/plot_decoding_spoc_CMC.py | mehrdad-shokri/mne-python | 4c44bd6ae90dd4d5c6c2e925d93f1b2fbf7bda6a | [
"BSD-3-Clause"
] | 2 | 2021-04-28T11:52:52.000Z | 2021-05-05T02:36:32.000Z | """
====================================
Continuous Target Decoding with SPoC
====================================
Source Power Comodulation (SPoC) [1]_ allows to identify the composition of
orthogonal spatial filters that maximally correlate with a continuous target.
SPoC can be seen as an extension of the CSP for continuous variables.
Here, SPoC is applied to decode the (continuous) fluctuation of an
electromyogram from MEG beta activity using data from
`Cortico-Muscular Coherence example of FieldTrip
<http://www.fieldtriptoolbox.org/tutorial/coherence>`_
References
----------
.. [1] Dahne, S., et al (2014). SPoC: a novel framework for relating the
amplitude of neuronal oscillations to behaviorally relevant parameters.
NeuroImage, 86, 111-122.
"""
# Author: Alexandre Barachant <[email protected]>
# Jean-Remi King <[email protected]>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne import Epochs
from mne.decoding import SPoC
from mne.datasets.fieldtrip_cmc import data_path
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import Ridge
from sklearn.model_selection import KFold, cross_val_predict
# Define parameters
fname = data_path() + '/SubjectCMC.ds'
raw = mne.io.read_raw_ctf(fname)
raw.crop(50., 250.) # crop for memory purposes
# Filter muscular activity to only keep high frequencies
emg = raw.copy().pick_channels(['EMGlft']).load_data()
emg.filter(20., None, fir_design='firwin')
# Filter MEG data to focus on beta band
raw.pick_types(meg=True, ref_meg=True, eeg=False, eog=False).load_data()
raw.filter(15., 30., fir_design='firwin')
# Build epochs as sliding windows over the continuous raw file
events = mne.make_fixed_length_events(raw, id=1, duration=.250)
# Epoch length is 1.5 second
meg_epochs = Epochs(raw, events, tmin=0., tmax=1.500, baseline=None,
detrend=1, decim=8)
emg_epochs = Epochs(emg, events, tmin=0., tmax=1.500, baseline=None)
# Prepare classification
X = meg_epochs.get_data()
y = emg_epochs.get_data().var(axis=2)[:, 0] # target is EMG power
# Classification pipeline with SPoC spatial filtering and Ridge Regression
spoc = SPoC(n_components=2, log=True, reg='oas', rank='full')
clf = make_pipeline(spoc, Ridge())
# Define a two fold cross-validation
cv = KFold(n_splits=2, shuffle=False)
# Run cross validaton
y_preds = cross_val_predict(clf, X, y, cv=cv)
# Plot the True EMG power and the EMG power predicted from MEG data
fig, ax = plt.subplots(1, 1, figsize=[10, 4])
times = raw.times[meg_epochs.events[:, 0] - raw.first_samp]
ax.plot(times, y_preds, color='b', label='Predicted EMG')
ax.plot(times, y, color='r', label='True EMG')
ax.set_xlabel('Time (s)')
ax.set_ylabel('EMG Power')
ax.set_title('SPoC MEG Predictions')
plt.legend()
mne.viz.tight_layout()
plt.show()
##############################################################################
# Plot the contributions to the detected components (i.e., the forward model)
spoc.fit(X, y)
spoc.plot_patterns(meg_epochs.info)
| 34.033333 | 78 | 0.709109 |
79401b7bb4c65d981c9ef943cdf8975d7fbbb11c | 2,522 | py | Python | artic/vcfextract.py | ColinAnthony/fieldbioinformatics | 41f9881218ffe22476662e35ac7786ecbd950696 | [
"MIT"
] | null | null | null | artic/vcfextract.py | ColinAnthony/fieldbioinformatics | 41f9881218ffe22476662e35ac7786ecbd950696 | [
"MIT"
] | null | null | null | artic/vcfextract.py | ColinAnthony/fieldbioinformatics | 41f9881218ffe22476662e35ac7786ecbd950696 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import vcf
import sys
import subprocess
import csv
import os
from collections import defaultdict
from operator import attrgetter
def read_vcf(fn):
vcfinfo = {}
vcf_reader = vcf.Reader(open(fn, 'r'))
for record in vcf_reader:
vcfinfo[record.POS] = record
return vcfinfo
def collect_depths(bamfile):
if not os.path.exists(bamfile):
raise SystemExit("bamfile %s doesn't exist" % (bamfile,))
p = subprocess.Popen(['samtools', 'depth', bamfile], stdout=subprocess.PIPE)
out, err = p.communicate()
depths = defaultdict(dict)
for ln in out.decode('utf-8').split("\n"):
if ln:
contig, pos, depth = ln.split("\t")
depths[int(pos)] = int(depth)
return depths
def main():
positions = {}
for sample_tag in sys.argv[1:]:
for vcfset in ['', '.primertrimmed']:
vcffn = "%s%s.vcf" % (sample_tag, vcfset)
if not os.path.exists(vcffn):
continue
print((vcffn, sys.stderr))
vcf_reader = vcf.Reader(filename=vcffn)
for record in vcf_reader:
if len(record.ALT[0]) == 1 and len(record.REF) == 1:
positions[record.POS] = 'snp'
else:
positions[record.POS] = 'indel'
print("pos\tset\tsample\tvartype\tdepth\tsupportfraction\tbasecalledfrequency")
#for run, samples in runs.iteritems():
# for sample_tag in samples.keys():
for sample_tag in sys.argv[1:]:
for vcfset in ['', '.primertrimmed']:
vcffn = "%s%s.vcf" % (sample_tag, vcfset)
if not os.path.exists(vcffn):
print(("%s does not exist" % (vcffn)))
continue
vcffile = read_vcf(vcffn)
bamfn = "%s.primertrimmed.sorted.bam" % (sample_tag)
depths = collect_depths(bamfn)
#1-based pyvcf
for pos, variant_type in list(positions.items()):
if pos-1 in depths:
depth = depths[pos-1]
else:
depth = 0
if pos in vcffile:
info = vcffile[pos].INFO
print(("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (pos, vcfset, sample_tag, variant_type, depth, info['SupportFraction'], info['BaseCalledFraction'])))
else:
print(("%s\t%s\t%s\tinvariant\t%s\t0\t0" % (pos, vcfset, sample_tag, depth)))
if __name__ == "__main__":
main()
| 31.135802 | 159 | 0.55115 |
79401c17bf0b1efa7f09fd1b26b9d1f9142b0e1b | 2,049 | py | Python | compiler/languages.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 2 | 2020-08-02T11:50:27.000Z | 2020-09-21T05:10:32.000Z | compiler/languages.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | compiler/languages.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | import os
# import subprocess
from subprocess import PIPE, Popen
class Language(object):
_FORMAT = None
def __init__(self, fullpath):
self.fullpath = fullpath
self.path, _ = os.path.split(fullpath)
@classmethod
def format(cls):
if cls._FORMAT:
return cls._FORMAT
def parse_path(self):
return os.path.split(self.fullpath)
def run(self, command):
'''run command'''
# with Popen(command, shell=True, stdout=PIPE, stderr=PIPE) as p:
# stdout, errors = p.communicate()
p = Popen(command, shell=True, stdout=PIPE, stderr=PIPE, close_fds=True)
stdout, errors = p.communicate()
p.stdout.close()
return stdout, errors
def compile(self):
'''compile 接口'''
raise NotImplementedError('The inheritance class must implement {} interface'.format('compile'))
def show(self, stdout, errors):
'''show result message'''
if stdout:
print(str(stdout, encoding='utf-8'))
if errors:
print(str(errors, encoding='utf-8'))
class Python(Language):
_FORMAT = '*.py'
def compile(self):
stdout, errors = self.run('/usr/bin/python {}'.format(self.fullpath))
self.show(stdout, errors)
class Cpp(Language):
_FORMAT = '*.cpp'
def compile(self):
path, filename = self.parse_path()
files = [f.lower() for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
if 'makefile' in files:
makefile = Makefile(self.fullpath)
return makefile.compile()
else:
name_without_ext, _ = os.path.splitext(filename)
stdout, errors = self.run('clang++ -g -Wall {} -o {}'.format(filename, name_without_ext))
self.show(stdout, errors)
cstdout, cerrors = self.run('{}'.format(name_without_ext))
self.show(cstdout, cerrors)
class Makefile(Language):
_FORMAT = 'Makefile'
def compile(self):
print("Compile Makefile")
| 28.068493 | 104 | 0.599317 |
79401cef0e420ddeb37e4508e745339915bcc118 | 6,101 | py | Python | tests/test_base.py | complexsplit/messer | c272c43aee52d83809d7a9a725c321d67a58345c | [
"Apache-2.0"
] | 2 | 2018-12-27T09:59:33.000Z | 2020-08-29T14:41:13.000Z | tests/test_base.py | complexsplit/messer | c272c43aee52d83809d7a9a725c321d67a58345c | [
"Apache-2.0"
] | 5 | 2019-08-14T13:29:38.000Z | 2019-10-16T18:08:46.000Z | tests/test_base.py | complexsplit/messer | c272c43aee52d83809d7a9a725c321d67a58345c | [
"Apache-2.0"
] | 5 | 2019-01-16T16:13:22.000Z | 2020-09-27T12:03:47.000Z | """
(c) 2018 Adobe. All rights reserved.
This file is licensed to you under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. You may obtain a copy
of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under
the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS
OF ANY KIND, either express or implied. See the License for the specific language
governing permissions and limitations under the License.
"""
__author__ = 'Jed Glazner, Sandeep Srivastav Vaddiparthy, Florian Noeding, Heiko Hahn'
import os
import types
import inspect
from messer import parse_args, get_default_config
from messer.abstracts import MesserAWSConfig, MesserAzureConfig
from messer.configure import AWSConfigure, AzureConfigure
CFG_FILE = "/".join([os.path.dirname(os.path.realpath(__file__)), 'tests.ini'])
# secret1 and secret2 will be uploaded during tests
DATA_BAG_ITEM_NAME = "secret1"
DATA_BAG_ITEM_FILE = "/".join([os.path.dirname(os.path.realpath(__file__)), 'resources', 'secret1.json'])
DATA_BAG_ITEM_NAME_2 = "secret2"
DATA_BAG_ITEM_FILE_2 = "/".join([os.path.dirname(os.path.realpath(__file__)), 'resources', 'secret2.json'])
# secret3 and secret4 are pre-uploaded and used to verify that access is restricted
DATA_BAG_ITEM_NAME_3 = "secret3"
DATA_BAG_ITEM_NAME_4 = "secret4"
DATA_BAG = "unittest"
SECRETS_FILE = 'adobe-messer-unittest'
def test_create_aws_config():
"""
Ensures that specifying a new config file creates the file.
:return:
"""
args = parse_args(['configure', 'aws', '-c' 'new.ini', '-m' 'adobe-test', '-e' 'Dev', '-r' 'us-east-2', '-b' 'test-bucket'])
cmd = AWSConfigure(args)
cmd.execute()
config = MesserAWSConfig(args.config)
assert config.master_key == 'adobe-test'
assert config.region == 'us-east-2'
# make sure dev gets converted to lower case
assert config.tier == 'dev'
assert config.secrets_bucket == 'test-bucket'
os.remove(config.filename)
def test_config():
"""
Ensures that our configuration is set properly
"""
config = MesserAWSConfig(open(CFG_FILE))
assert config.keys_bucket == "adobe-envelope-keys-bucket-useast1"
assert config.secrets_bucket == "adobe-secrets-bucket-useast1"
assert config.master_key == "adobe-messer-unittest"
assert config.role_arn == "arn:aws:iam::123456789101:role/adobe-messer-unittest"
assert config.role_session_name == "messer_unittest"
assert config.encryption_context == 'messer_unittest_context'
def test_parse_use_default_config():
"""
Ensures that when no config is specified as an argument it attempts to use the file installed via pip
"""
args = parse_args(['data', 'bag', 'create', 'aws', DATA_BAG])
assert isinstance(args.config, types.FileType) is True
assert args.config.name == get_default_config()
def test_parse_use_specified_config():
"""
Ensures that the the -c argument correctly returns a file type
"""
args = parse_args(['data', 'bag', 'create', 'aws', DATA_BAG, '-c', CFG_FILE, ])
assert isinstance(args.config, types.FileType) is True
assert args.config.name == CFG_FILE
def test_parse_create_data_bag():
"""
Make sure that all of the options are available for creating data bags
"""
args = parse_args(['data', 'bag', 'create', 'aws', DATA_BAG])
assert inspect.isfunction(args.command) is True
assert args.command.__name__ == "create_data_bag_aws"
assert args.name == DATA_BAG
def test_parse_create_data_bag_item():
"""
make sure that all of the options are available for creating encrypted data bag items
"""
args = parse_args(['data', 'bag', 'from', 'file', 'aws', DATA_BAG, DATA_BAG_ITEM_FILE,
'--secret-file', SECRETS_FILE])
assert isinstance(args.item, types.FileType)
assert args.item.name == DATA_BAG_ITEM_FILE
assert args.name == DATA_BAG
assert inspect.isfunction(args.command) is True
assert args.command.__name__ == 'upload_data_bag_aws'
assert args.secret_file == SECRETS_FILE
def test_parse_show_data_bag_item():
"""
make sure that all of the options are available for viewing encrypted data bag items
"""
args = parse_args(['data', 'bag', 'show', 'aws', DATA_BAG, DATA_BAG_ITEM_NAME])
assert args.item == DATA_BAG_ITEM_NAME
assert args.name == DATA_BAG
assert inspect.isfunction(args.command) is True
assert args.command.__name__ == 'show_data_bag_aws'
def test_parse_delete_data_bag_item():
"""
Make sure all the options are present for deleting a data bag items
"""
args = parse_args(['data', 'bag', 'delete', 'aws', DATA_BAG, DATA_BAG_ITEM_NAME])
assert args.item == DATA_BAG_ITEM_NAME
assert args.name == DATA_BAG
assert inspect.isfunction(args.command) is True
assert args.command.__name__ == 'delete_data_bag_aws'
def test_parse_delete_data_bag():
"""
Make sure all the options are present for deleting a data bags
"""
args = parse_args(['data', 'bag', 'delete', 'aws', DATA_BAG])
assert args.name == DATA_BAG
assert inspect.isfunction(args.command) is True
assert args.command.__name__ == 'delete_data_bag_aws'
def test_parse_encryption_create():
"""
Make sure all the options are present for creating new cipher text keys
"""
args = parse_args(['encryption', 'create', 'aws', 'adobe-messer-unittest'])
assert args.key_name == 'adobe-messer-unittest'
assert inspect.isfunction(args.command) is True
assert args.command.__name__ == 'create_key_aws'
def test_parse_encryption_rotate():
"""
Make sure all the options are present for rotating existing cipher text keys
"""
args = parse_args(['encryption', 'increment', 'aws', 'adobe-messer-unittest'])
assert args.key_name == 'adobe-messer-unittest'
assert inspect.isfunction(args.command) is True
assert args.command.__name__ == 'increment_key_version_aws'
| 35.47093 | 128 | 0.714801 |
79401e19f73712372f6b0eb5f931237f47f5c84a | 249 | py | Python | backend/university/viewsets/university.py | andriyandrushko0/univowl | da613316021f7b41b133b5b6e360cc6b9db60504 | [
"MIT"
] | null | null | null | backend/university/viewsets/university.py | andriyandrushko0/univowl | da613316021f7b41b133b5b6e360cc6b9db60504 | [
"MIT"
] | null | null | null | backend/university/viewsets/university.py | andriyandrushko0/univowl | da613316021f7b41b133b5b6e360cc6b9db60504 | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from ..models import University
from ..serializers import UniversitySerializer
class UniversityViewSet(viewsets.ModelViewSet):
queryset = University.objects.all()
serializer_class = UniversitySerializer
| 27.666667 | 47 | 0.823293 |
79401e6a27a491bdb3dff453b4b06d12fc8957aa | 17,439 | py | Python | parcels/particlefile.py | jelletreep/parcels | 7ba3e08de7de046474373f6a5fe02835fc99a8dc | [
"MIT"
] | 1 | 2020-10-01T02:27:00.000Z | 2020-10-01T02:27:00.000Z | parcels/particlefile.py | jelletreep/parcels | 7ba3e08de7de046474373f6a5fe02835fc99a8dc | [
"MIT"
] | null | null | null | parcels/particlefile.py | jelletreep/parcels | 7ba3e08de7de046474373f6a5fe02835fc99a8dc | [
"MIT"
] | null | null | null | """Module controlling the writing of ParticleSets to NetCDF file"""
import os
import random
import shutil
import string
from datetime import timedelta as delta
from glob import glob
import netCDF4
import numpy as np
from parcels.tools.error import ErrorCode
from parcels.tools.loggers import logger
try:
from mpi4py import MPI
except:
MPI = None
try:
from parcels._version import version as parcels_version
except:
raise EnvironmentError('Parcels version can not be retrieved. Have you run ''python setup.py install''?')
try:
from os import getuid
except:
# Windows does not have getuid(), so define to simply return 'tmp'
def getuid():
return 'tmp'
__all__ = ['ParticleFile']
def _is_particle_started_yet(particle, time):
"""We don't want to write a particle that is not started yet.
Particle will be written if:
* particle.time is equal to time argument of pfile.write()
* particle.time is before time (in case particle was deleted between previous export and current one)
"""
return (particle.dt*particle.time <= particle.dt*time or np.isclose(particle.time, time))
def _set_calendar(origin_calendar):
if origin_calendar == 'np_datetime64':
return 'standard'
else:
return origin_calendar
class ParticleFile(object):
"""Initialise trajectory output.
:param name: Basename of the output file
:param particleset: ParticleSet to output
:param outputdt: Interval which dictates the update frequency of file output
while ParticleFile is given as an argument of ParticleSet.execute()
It is either a timedelta object or a positive double.
:param write_ondelete: Boolean to write particle data only when they are deleted. Default is False
:param convert_at_end: Boolean to convert npy files to netcdf at end of run. Default is True
:param tempwritedir: directories to write temporary files to during executing.
Default is out-XXXXXX where Xs are random capitals. Files for individual
processors are written to subdirectories 0, 1, 2 etc under tempwritedir
:param pset_info: dictionary of info on the ParticleSet, stored in tempwritedir/XX/pset_info.npy,
used to create NetCDF file from npy-files.
"""
def __init__(self, name, particleset, outputdt=np.infty, write_ondelete=False, convert_at_end=True,
tempwritedir=None, pset_info=None):
self.write_ondelete = write_ondelete
self.convert_at_end = convert_at_end
self.outputdt = outputdt
self.lasttime_written = None # variable to check if time has been written already
self.dataset = None
self.metadata = {}
if pset_info is not None:
for v in pset_info.keys():
setattr(self, v, pset_info[v])
else:
self.name = name
self.particleset = particleset
self.parcels_mesh = self.particleset.fieldset.gridset.grids[0].mesh
self.time_origin = self.particleset.time_origin
self.lonlatdepth_dtype = self.particleset.lonlatdepth_dtype
self.var_names = []
self.var_names_once = []
for v in self.particleset.ptype.variables:
if v.to_write == 'once':
self.var_names_once += [v.name]
elif v.to_write is True:
self.var_names += [v.name]
if len(self.var_names_once) > 0:
self.written_once = []
self.file_list_once = []
self.file_list = []
self.time_written = []
self.maxid_written = -1
if tempwritedir is None:
tempwritedir = os.path.join(os.path.dirname(str(self.name)), "out-%s"
% ''.join(random.choice(string.ascii_uppercase) for _ in range(8)))
if MPI:
mpi_rank = MPI.COMM_WORLD.Get_rank()
self.tempwritedir_base = MPI.COMM_WORLD.bcast(tempwritedir, root=0)
else:
self.tempwritedir_base = tempwritedir
mpi_rank = 0
self.tempwritedir = os.path.join(self.tempwritedir_base, "%d" % mpi_rank)
if pset_info is None: # otherwise arrive here from convert_npydir_to_netcdf
self.delete_tempwritedir()
def open_netcdf_file(self, data_shape):
"""Initialise NetCDF4.Dataset for trajectory output.
The output follows the format outlined in the Discrete Sampling Geometries
section of the CF-conventions:
http://cfconventions.org/cf-conventions/v1.6.0/cf-conventions.html#discrete-sampling-geometries
The current implementation is based on the NCEI template:
http://www.nodc.noaa.gov/data/formats/netcdf/v2.0/trajectoryIncomplete.cdl
:param data_shape: shape of the variables in the NetCDF4 file
"""
extension = os.path.splitext(str(self.name))[1]
fname = self.name if extension in ['.nc', '.nc4'] else "%s.nc" % self.name
if os.path.exists(str(fname)):
os.remove(str(fname))
self.dataset = netCDF4.Dataset(fname, "w", format="NETCDF4")
self.dataset.createDimension("obs", data_shape[1])
self.dataset.createDimension("traj", data_shape[0])
coords = ("traj", "obs")
self.dataset.feature_type = "trajectory"
self.dataset.Conventions = "CF-1.6/CF-1.7"
self.dataset.ncei_template_version = "NCEI_NetCDF_Trajectory_Template_v2.0"
self.dataset.parcels_version = parcels_version
self.dataset.parcels_mesh = self.parcels_mesh
# Create ID variable according to CF conventions
self.id = self.dataset.createVariable("trajectory", "i4", coords,
fill_value=-2**(31)) # maxint32 fill_value
self.id.long_name = "Unique identifier for each particle"
self.id.cf_role = "trajectory_id"
# Create time, lat, lon and z variables according to CF conventions:
self.time = self.dataset.createVariable("time", "f8", coords, fill_value=np.nan)
self.time.long_name = ""
self.time.standard_name = "time"
if self.time_origin.calendar is None:
self.time.units = "seconds"
else:
self.time.units = "seconds since " + str(self.time_origin)
self.time.calendar = 'standard' if self.time_origin.calendar == 'np_datetime64' else self.time_origin.calendar
self.time.axis = "T"
if self.lonlatdepth_dtype is np.float64:
lonlatdepth_precision = "f8"
else:
lonlatdepth_precision = "f4"
self.lat = self.dataset.createVariable("lat", lonlatdepth_precision, coords, fill_value=np.nan)
self.lat.long_name = ""
self.lat.standard_name = "latitude"
self.lat.units = "degrees_north"
self.lat.axis = "Y"
self.lon = self.dataset.createVariable("lon", lonlatdepth_precision, coords, fill_value=np.nan)
self.lon.long_name = ""
self.lon.standard_name = "longitude"
self.lon.units = "degrees_east"
self.lon.axis = "X"
self.z = self.dataset.createVariable("z", lonlatdepth_precision, coords, fill_value=np.nan)
self.z.long_name = ""
self.z.standard_name = "depth"
self.z.units = "m"
self.z.positive = "down"
for vname in self.var_names:
if vname not in ['time', 'lat', 'lon', 'depth', 'id']:
setattr(self, vname, self.dataset.createVariable(vname, "f4", coords, fill_value=np.nan))
getattr(self, vname).long_name = ""
getattr(self, vname).standard_name = vname
getattr(self, vname).units = "unknown"
for vname in self.var_names_once:
setattr(self, vname, self.dataset.createVariable(vname, "f4", "traj", fill_value=np.nan))
getattr(self, vname).long_name = ""
getattr(self, vname).standard_name = vname
getattr(self, vname).units = "unknown"
for name, message in self.metadata.items():
setattr(self.dataset, name, message)
def __del__(self):
if self.convert_at_end:
self.close()
def close(self, delete_tempfiles=True):
"""Close the ParticleFile object by exporting and then deleting
the temporary npy files"""
self.export()
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
if mpi_rank == 0:
if delete_tempfiles:
self.delete_tempwritedir(tempwritedir=self.tempwritedir_base)
self.convert_at_end = False
def add_metadata(self, name, message):
"""Add metadata to :class:`parcels.particleset.ParticleSet`
:param name: Name of the metadata variabale
:param message: message to be written
"""
if self.dataset is None:
self.metadata[name] = message
else:
setattr(self.dataset, name, message)
def convert_pset_to_dict(self, pset, time, deleted_only=False):
"""Convert all Particle data from one time step to a python dictionary.
:param pset: ParticleSet object to write
:param time: Time at which to write ParticleSet
:param deleted_only: Flag to write only the deleted Particles
returns two dictionaries: one for all variables to be written each outputdt,
and one for all variables to be written once
"""
data_dict = {}
data_dict_once = {}
time = time.total_seconds() if isinstance(time, delta) else time
if self.lasttime_written != time and \
(self.write_ondelete is False or deleted_only is True):
if pset.size == 0:
logger.warning("ParticleSet is empty on writing as array at time %g" % time)
else:
if deleted_only:
pset_towrite = pset
elif pset[0].dt > 0:
pset_towrite = [p for p in pset if time <= p.time < time + p.dt and np.isfinite(p.id)]
else:
pset_towrite = [p for p in pset if time + p.dt < p.time <= time and np.isfinite(p.id)]
if len(pset_towrite) > 0:
for var in self.var_names:
data_dict[var] = np.array([getattr(p, var) for p in pset_towrite])
self.maxid_written = np.max([self.maxid_written, np.max(data_dict['id'])])
pset_errs = [p for p in pset_towrite if p.state != ErrorCode.Delete and abs(time-p.time) > 1e-3]
for p in pset_errs:
logger.warning_once(
'time argument in pfile.write() is %g, but a particle has time % g.' % (time, p.time))
if time not in self.time_written:
self.time_written.append(time)
if len(self.var_names_once) > 0:
first_write = [p for p in pset if (p.id not in self.written_once) and _is_particle_started_yet(p, time)]
data_dict_once['id'] = np.array([p.id for p in first_write])
for var in self.var_names_once:
data_dict_once[var] = np.array([getattr(p, var) for p in first_write])
self.written_once += [p.id for p in first_write]
if not deleted_only:
self.lasttime_written = time
return data_dict, data_dict_once
def dump_dict_to_npy(self, data_dict, data_dict_once):
"""Buffer data to set of temporary numpy files, using np.save"""
if not os.path.exists(self.tempwritedir):
os.makedirs(self.tempwritedir)
if len(data_dict) > 0:
tmpfilename = os.path.join(self.tempwritedir, str(len(self.file_list)) + ".npy")
with open(tmpfilename, 'wb') as f:
np.save(f, data_dict)
self.file_list.append(tmpfilename)
if len(data_dict_once) > 0:
tmpfilename = os.path.join(self.tempwritedir, str(len(self.file_list)) + '_once.npy')
with open(tmpfilename, 'wb') as f:
np.save(f, data_dict_once)
self.file_list_once.append(tmpfilename)
def dump_psetinfo_to_npy(self):
pset_info = {}
attrs_to_dump = ['name', 'var_names', 'var_names_once', 'time_origin', 'lonlatdepth_dtype',
'file_list', 'file_list_once', 'maxid_written', 'time_written', 'parcels_mesh',
'metadata']
for a in attrs_to_dump:
if hasattr(self, a):
pset_info[a] = getattr(self, a)
with open(os.path.join(self.tempwritedir, 'pset_info.npy'), 'wb') as f:
np.save(f, pset_info)
def write(self, pset, time, deleted_only=False):
"""Write all data from one time step to a temporary npy-file
using a python dictionary. The data is saved in the folder 'out'.
:param pset: ParticleSet object to write
:param time: Time at which to write ParticleSet
:param deleted_only: Flag to write only the deleted Particles
"""
data_dict, data_dict_once = self.convert_pset_to_dict(pset, time, deleted_only=deleted_only)
self.dump_dict_to_npy(data_dict, data_dict_once)
self.dump_psetinfo_to_npy()
def read_from_npy(self, file_list, time_steps, var):
"""Read NPY-files for one variable using a loop over all files.
:param file_list: List that contains all file names in the output directory
:param time_steps: Number of time steps that were written in out directory
:param var: name of the variable to read
"""
data = np.nan * np.zeros((self.maxid_written+1, time_steps))
time_index = np.zeros(self.maxid_written+1, dtype=int)
t_ind_used = np.zeros(time_steps, dtype=int)
# loop over all files
for npyfile in file_list:
try:
data_dict = np.load(npyfile, allow_pickle=True).item()
except NameError:
raise RuntimeError('Cannot combine npy files into netcdf file because your ParticleFile is '
'still open on interpreter shutdown.\nYou can use '
'"parcels_convert_npydir_to_netcdf %s" to convert these to '
'a NetCDF file yourself.\nTo avoid this error, make sure you '
'close() your ParticleFile at the end of your script.' % self.tempwritedir)
id_ind = np.array(data_dict["id"], dtype=int)
t_ind = time_index[id_ind] if 'once' not in file_list[0] else 0
t_ind_used[t_ind] = 1
data[id_ind, t_ind] = data_dict[var]
time_index[id_ind] = time_index[id_ind] + 1
# remove rows and columns that are completely filled with nan values
tmp = data[time_index > 0, :]
return tmp[:, t_ind_used == 1]
def export(self):
"""Exports outputs in temporary NPY-files to NetCDF file"""
if MPI:
# The export can only start when all threads are done.
MPI.COMM_WORLD.Barrier()
if MPI.COMM_WORLD.Get_rank() > 0:
return # export only on threat 0
# Retrieve all temporary writing directories and sort them in numerical order
temp_names = sorted(glob(os.path.join("%s" % self.tempwritedir_base, "*")),
key=lambda x: int(os.path.basename(x)))
if len(temp_names) == 0:
raise RuntimeError("No npy files found in %s" % self.tempwritedir_base)
global_maxid_written = -1
global_time_written = []
global_file_list = []
if len(self.var_names_once) > 0:
global_file_list_once = []
for tempwritedir in temp_names:
if os.path.exists(tempwritedir):
pset_info_local = np.load(os.path.join(tempwritedir, 'pset_info.npy'), allow_pickle=True).item()
global_maxid_written = np.max([global_maxid_written, pset_info_local['maxid_written']])
global_time_written += pset_info_local['time_written']
global_file_list += pset_info_local['file_list']
if len(self.var_names_once) > 0:
global_file_list_once += pset_info_local['file_list_once']
self.maxid_written = global_maxid_written
self.time_written = np.unique(global_time_written)
for var in self.var_names:
data = self.read_from_npy(global_file_list, len(self.time_written), var)
if var == self.var_names[0]:
self.open_netcdf_file(data.shape)
varout = 'z' if var == 'depth' else var
getattr(self, varout)[:, :] = data
if len(self.var_names_once) > 0:
for var in self.var_names_once:
getattr(self, var)[:] = self.read_from_npy(global_file_list_once, 1, var)
self.dataset.close()
def delete_tempwritedir(self, tempwritedir=None):
"""Deleted all temporary npy files
:param tempwritedir Optional path of the directory to delete
"""
if tempwritedir is None:
tempwritedir = self.tempwritedir
if os.path.exists(tempwritedir):
shutil.rmtree(tempwritedir)
| 44.601023 | 124 | 0.617123 |
79401eca6dfe77303b43913f338473ceb10db6da | 1,773 | py | Python | pythonCodeSnippets/TextFileViewer.py | erichstuder/random | 233cf7538a04b4144b700fee7e955744efd204ce | [
"MIT"
] | 1 | 2018-12-08T07:42:04.000Z | 2018-12-08T07:42:04.000Z | pythonCodeSnippets/TextFileViewer.py | erichstuder/random | 233cf7538a04b4144b700fee7e955744efd204ce | [
"MIT"
] | null | null | null | pythonCodeSnippets/TextFileViewer.py | erichstuder/random | 233cf7538a04b4144b700fee7e955744efd204ce | [
"MIT"
] | null | null | null | """
IT - Internal Tracer
Copyright (C) 2019 Erich Studer
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
import tkinter as tk
import threading
import os
class TextFileViewer:
__root = None
__textWidget = None
__filePath = None
def __worker(self):
self.__root = tk.Tk()
self.__root.title(self.__filePath)
# root.iconbitmap('dummy.ico')
self.__textWidget = tk.Text(self.__root, height=20, width=120)
self.__textWidget.configure(background='black', foreground='white', state='disabled')
self.__textWidget.pack()
self.__updater()
self.__root.mainloop()
def __updater(self):
with open(self.__filePath, 'r') as file:
self.__textWidget.configure(state='normal')
self.__textWidget.delete('1.0', tk.END)
self.__textWidget.insert(tk.END, file.read())
self.__textWidget.configure(state='disabled')
self.__root.after(1000, self.__updater)
def __init__(self, filePath):
if not os.path.exists(filePath):
raise FileNotFoundError
self.__filePath = filePath
threading.Thread(target=self.__worker).start()
| 34.096154 | 93 | 0.697688 |
79401fad2ff11175b447e05dfe763807c624f061 | 11,586 | py | Python | eval_scores_rgb_flow.py | Ewenwan/DTPP | 0a10dd8c61596d5326fbbe70dcac0eae59088c27 | [
"BSD-2-Clause"
] | 1 | 2019-05-07T01:00:18.000Z | 2019-05-07T01:00:18.000Z | eval_scores_rgb_flow.py | Ewenwan/DTPP | 0a10dd8c61596d5326fbbe70dcac0eae59088c27 | [
"BSD-2-Clause"
] | null | null | null | eval_scores_rgb_flow.py | Ewenwan/DTPP | 0a10dd8c61596d5326fbbe70dcac0eae59088c27 | [
"BSD-2-Clause"
] | 1 | 2019-09-18T05:27:50.000Z | 2019-09-18T05:27:50.000Z | import argparse
import sys
import numpy as np
import scipy.io as sio
from pyActionRecog.utils.video_funcs import default_aggregation_func
from pyActionRecog.utils.metrics import mean_class_accuracy
# parser = argparse.ArgumentParser()
# parser.add_argument('score_files', nargs='+', type=str)
# parser.add_argument('--score_weights', nargs='+', type=float, default=None)
# parser.add_argument('--crop_agg', type=str, choices=['max', 'mean'], default='mean')
# args = parser.parse_args()
# score_files = ["ucf101_split_1_rgb_seg124_3dropout_stage_2_lr_0.00001_iter_2100.npz", "ucf101_split_1_flow_seg124_3dropout_stage_2_lr_0.00001_iter_1500.npz"]
# score_files = ["ucf101_split_1_rgb_seg124_3dropout_imagenet_stage_2_lr_0.0001_iter_600.npz", "ucf101_split_1_flow_seg124_3dropout_stage_2_lr_0.00001_iter_1500.npz"]
# score_files = ["ucf101_split_1_rgb_seg124_3dropout_stage_2_lr_0.00001_iter_2100.npz", "ucf101_split_1_flow_seg124_dropout_0.9_lr_0.00001_iter_900.npz"]
#score_files = ["ucf101_split_2_rgb_seg124_3dropout_stage_2_imagenet_snapshot_lr_0.0001_iter_4500.npz", "ucf101_split_2_flow_seg124_3dropout_stage_2_lr_0.00001_iter_1200.npz"]
# score_files = ["ucf101_split_3_rgb_seg124_3dropout_imagenet_lr_0.0001_iter_600.npz", "ucf101_split_3_flow_seg124_3dropout_snapshot_stage_2_lr_0.0001_iter_900.npz"]
# score_files = ["ucf101_split_1_rgb_tpp_delete_dropout_lr_0.00001_iter_600.npz", "ucf101_split_1_flow_tpp_delete_dropout_lr_0.00001_iter_1500.npz"]
# score_files = ["hmdb51_split_1_rgb_tpp_delete_dropout_lr_0.0001_iter_112.npz", "hmdb51_split_1_flow_tpp_delete_dropout_lr_0.0001_iter_672.npz"]
# score_files = ["hmdb51_split_2_rgb_tpp_delete_dropout_lr_0.00001_iter_112.npz", "hmdb51_split_2_flow_tpp_delete_dropout_lr_0.0001_iter_112.npz"]
# score_files = ["ucf101_split_2_rgb_tpp_delete_dropout_lr_0.0001_iter_1800.npz", "ucf101_split_2_flow_tpp_delete_dropout_sec_lr_0.00001_iter_1200.npz"]
# score_files = ["ucf101_split_3_rgb_tpp_delete_dropout_lr_0.0001_iter_900.npz", "ucf101_split_3_flow_tpp_delete_dropout_lr_0.00001_iter_1800.npz"]
# score_files = ["hmdb51_split_3_rgb_tpp_delete_dropout_lr_0.001_iter_2016.npz", "hmdb51_split_3_flow_tpp_delete_dropout_lr_0.0001_iter_224.npz"]
# score_files = ["ucf101_split_1_rgb_tpp_p1_lr_0.00001_iter_600.npz", "ucf101_split_1_flow_tpp_p1_lr_0.0001_iter_600.npz"]
# score_files = ["ucf101_split_1_rgb_tpp_delete_dropout_lr_0.00001_iter_600_varied_32.npz", "ucf101_split_1_flow_tpp_delete_dropout_lr_0.00001_iter_1500_varied_32.npz"]
# score_files = ["ucf101_split_1_rgb_tpp_p12_lr_0.00001_iter_1200.npz", "ucf101_split_1_flow_tpp_p12_lr_0.00001_iter_1200.npz"]
# score_files = ['ucf101_split_1_rgb_seg3_lr_0.00001_iter_300.npz', 'ucf101_split_1_flow_seg3_lr_0.0001_iter_600.npz']
# score_files = ['hmdb51_split_1_rgb_tpp_kinetics_lr_0.00001_iter_336.npz', 'hmdb51_split_1_flow_tpp_kinetics_lr_0.0001_iter_112.npz']
# score_files = ['hmdb51_split_2_rgb_tpp_kinetics_snapshot_lr_0.00001_iter_224.npz', 'hmdb51_split_2_flow_tpp_kinetics_lr_0.00001_iter_224.npz']
score_files = ['hmdb51_split_3_rgb_tpp_kinetics_lr_0.0001_iter_672.npz',
'hmdb51_split_3_flow_tpp_kinetics_lr_0.00001_iter_224.npz']
# score_files = ['ucf101_split_1_rgb_tpp_p1248_lr_0.001_iter_600.npz', 'ucf101_split_1_flow_tpp_p1248_lr_0.00001_iter_1200.npz']
# score_files = ["ucf101_split_1_rgb_tpp_delete_dropout_lr_0.00001_iter_600.npz", 'ucf101_split_1_flow_tpp_imagenet_lr_0.00001_iter_900.npz']
# score_files = ['ucf101_split_1_rgb_tpp_p124_ave_snapshot_lr_0.00001_iter_600.npz', 'ucf101_split_1_flow_tpp_p124_ave_lr_0.00001_iter_600.npz']
# score_files = ["ucf101_split_1_rgb_tpp_freeze_cnn_lr_0.00001_iter_600.npz", 'ucf101_split_1_flow_tpp_freeze_cnn_lr_0.00001_iter_1200.npz']
# score_files = ['ucf101_split_1_rgb_tpp_kinetics_lr_0.0001_iter_1200.npz', 'ucf101_split_1_flow_tpp_kinetics_lr_0.0001_iter_600.npz'] ### 97.7%
score_files = ['ucf101_split_2_rgb_tpp_kinetics_lr_0.00001_iter_300.npz', 'ucf101_split_2_flow_tpp_kinetics_lr_0.0001_iter_600.npz']
score_files = ['ucf101_split_2_rgb_tpp_kinetics_st_0.001_lr_0.00001_iter_600.npz', 'ucf101_split_2_flow_tpp_kinetics_lr_0.0001_iter_600.npz'] ### 97.8 %
score_files = ['ucf101_split_1_rgb_tpp_kinetics_lr_0.0001_iter_1200.npz', 'ucf101_split_1_flow_tpp_kinetics_st_0.001_lr_0.0001_iter_900.npz'] ### 97.8%
score_files = ["hmdb51_split_1_rgb_tpp_delete_dropout_lr_0.0001_iter_112.npz", "hmdb51_split_1_flow_tpp_delete_dropout_lr_0.0001_iter_672.npz"]
score_files = ['ucf101_split_2_rgb_tpp_kinetics_st_0.001_lr_0.00001_iter_600.npz', 'ucf101_split_2_flow_tpp_kinetics_st_0.001_lr_0.0001_iter_600.npz']
score_files = ['ucf101_split_1_rgb_tpp_seg_7_lr_0.00001_iter_300_varied_50.npz',
'ucf101_split_1_flow_tpp_seg_7_lr_0.0001_iter_900_varied_50.npz']
score_files = ["ucf101_split_1_rgb_tpp_seg_4_lr_0.0001_iter_900_varied_50.npz",
"ucf101_split_1_flow_tpp_seg_4_lr_0.00001_iter_1200_varied_50.npz"]
score_files = ["ucf101_split_1_rgb_tpp_delete_dropout_lr_0.00001_iter_600_varied_50.npz",
"ucf101_split_1_flow_tpp_delete_dropout_lr_0.00001_iter_1500_varied_50.npz"]
score_files = ['ucf101_split_1_rgb_tpp_kinetics_lr_0.0001_iter_1200.npz', 'ucf101_split_1_flow_tpp_kinetics_st_0.001_lr_0.0001_iter_900.npz']
score_files = ['ucf101_split_2_rgb_tpp_kinetics_st_0.001_lr_0.00001_iter_600.npz', 'ucf101_split_2_flow_tpp_kinetics_st_0.001_lr_0.0001_iter_600.npz']
score_files = ['ucf101_split_3_rgb_tpp_kinetics_lr_0.0001_iter_300.npz', 'ucf101_split_3_flow_tpp_kinetics_st_0.001_lr_0.00001_iter_300.npz']
score_files = ['ucf101_split_3_rgb_tpp_kinetics_st_0.001_lr_0.0001_iter_900.npz', 'ucf101_split_3_flow_tpp_kinetics_st_0.001_lr_0.00001_iter_300.npz']
score_files = ["hmdb51_split_1_rgb_tpp_delete_dropout_lr_0.0001_iter_112.npz", "hmdb51_split_1_flow_tpp_delete_dropout_lr_0.0001_iter_672.npz"]
score_files = ["hmdb51_split_1_tsn_rgb_reference_bn_inception_new.npz", "hmdb51_split_1_tsn_flow_reference_bn_inception_new.npz"]
score_files = ['ucf101_split_3_rgb_tpp_kinetics_st_0.01_lr_0.0001_iter_1500.npz', 'ucf101_split_3_flow_tpp_kinetics_st_0.001_lr_0.00001_iter_300.npz']
# save_scores = 'ucf101_split_2_rgb_tpp_kinetics_lr_0.00001_iter_300' ### 92.5%
# save_scores = 'ucf101_split_2_flow_tpp_kinetics_lr_0.0001_iter_600' ### 96.7%
# save_scores = ### 93.81%
# score_files = ['ucf101_split_3_rgb_tpp_kinetics_lr_0.0001_iter_300.npz', 'ucf101_split_3_flow_tpp_kinetics_lr_0.0001_iter_1800.npz']
crop_agg = "mean"
xxxx = 0.5
score_npz_files = [np.load(x) for x in score_files]
score_list = [x['scores'][:, 0] for x in score_npz_files]
label_list = [x['labels'] for x in score_npz_files]
# label verification
# score_aggregation
agg_score_list = []
for score_vec in score_list:
agg_score_vec = [default_aggregation_func(x, normalization=False, crop_agg=getattr(np, crop_agg)) for x in score_vec]
agg_score_list.append(np.array(agg_score_vec))
split = score_files[0].split("_")[2]
score_weights = [xxxx, 1.0 - xxxx]
# #
# if score_weights is None:
# score_weights = [1] * len(score_npz_files)
# else:
# score_weights = score_weights
# if len(score_weights) != len(score_npz_files):
# raise ValueError("Only {} weight specifed for a total of {} score files"
# .format(len(score_weights), len(score_npz_files)))
#
# final_scores = np.zeros_like(agg_score_list[0])
# for i, agg_score in enumerate(agg_score_list):
# final_scores += agg_score * score_weights[i]
#
# print "split: ", split
# # accuracy
# # for x in final_scores:
# # xx = x[0]
# # xxx = xx[0]
# ff = [x[0][0] for x in final_scores]
# acc, class_acc = mean_class_accuracy(ff, label_list[0])
# print 'Final accuracy {:02f}%'.format(acc * 100)
# print "rgb_score_weight: ", xxxx
# print class_acc
# print "\n"
# deep temporal pyramid pooling
## only network prediction
for ii in xrange(0,11):
xxxx = ii * 1.0 /10
score_weights = [xxxx, 1.0-xxxx]
if score_weights is None:
score_weights = [1] * len(score_npz_files)
else:
score_weights = score_weights
if len(score_weights) != len(score_npz_files):
raise ValueError("Only {} weight specifed for a total of {} score files"
.format(len(score_weights), len(score_npz_files)))
final_scores = np.zeros_like(agg_score_list[0])
for i, agg_score in enumerate(agg_score_list):
final_scores += agg_score * score_weights[i]
print "split: ", split
# accuracy
# for x in final_scores:
# xx = x[0]
# xxx = xx[0]
ff = [x[0][0] for x in final_scores]
acc, class_acc = mean_class_accuracy(ff, label_list[0])
print 'Final accuracy {:02f}%'.format(acc * 100)
print "rgb_score_weight: ", xxxx
print "\n"
# MIFS fusion with our method ####
# xxxx = 0.4
# score_weights = [xxxx, 1.0-xxxx]
# if score_weights is None:
# score_weights = [1] * len(score_npz_files)
# else:
# score_weights = score_weights
# if len(score_weights) != len(score_npz_files):
# raise ValueError("Only {} weight specifed for a total of {} score files"
# .format(len(score_weights), len(score_npz_files)))
#
# final_scores = np.zeros_like(agg_score_list[0])
# for i, agg_score in enumerate(agg_score_list):
# final_scores += agg_score * score_weights[i]
#
# print "split: ", split
# ff = [x[0][0] for x in final_scores]
# acc = mean_class_accuracy(ff, label_list[0])
# print 'Final accuracy {:02f}%'.format(acc * 100)
# print "rgb_score_weight: ", xxxx
# print "\n"
#
# test_score = "test_score_" + split
# matfn="MIFS_scores/hmdb/" + test_score
# data=sio.loadmat(matfn)
# MIFS_score = np.array(data[test_score])
# MIFS_score = MIFS_score.transpose(1,0)
# MIFS_score = MIFS_score.reshape(-1,1,1,51)
# for i in xrange(0,11):
# MIFS_score_weight = i * 1.0 / 10
# final_scores = (1-MIFS_score_weight) * final_scores + MIFS_score_weight * MIFS_score
# # accuracy
# # for x in final_scores:
# # xx = x[0]
# # xxx = xx[0]
# ff = [x[0][0] for x in final_scores]
# # ff = final_scores
# acc = mean_class_accuracy(ff, label_list[0])
#
# print 'Final accuracy {:02f}%'.format(acc * 100)
# print "MIFS_score_weight: ", MIFS_score_weight
# print "\n"
# iDT fusion with our method ####
#
# xxxx = 0.5
# score_weights = [xxxx, 1.0 - xxxx]
# if score_weights is None:
# score_weights = [1] * len(score_npz_files)
# else:
# score_weights = score_weights
# if len(score_weights) != len(score_npz_files):
# raise ValueError("Only {} weight specifed for a total of {} score files"
# .format(len(score_weights), len(score_npz_files)))
#
# final_scores = np.zeros_like(agg_score_list[0])
# for i, agg_score in enumerate(agg_score_list):
# final_scores += agg_score * score_weights[i]
#
# test_score = "idt_hmdb_test_score_" + split
# matfn= "iDT_scores/" + test_score + ".mat"
# data=sio.loadmat(matfn)
# iDT_score = np.array(data[test_score])
# iDT_score = iDT_score.transpose(1,0)
# iDT_score = iDT_score.reshape(-1,1,1,51)
#
# for i in xrange(0,11):
# iDT_score_weight = i * 1.0 / 10
# final_scores = (1-iDT_score_weight) * final_scores + iDT_score_weight * iDT_score
# # accuracy
# # for x in final_scores:
# # xx = x[0]
# # xxx = xx[0]
# ff = [x[0][0] for x in final_scores]
# # ff = final_scores
# acc = mean_class_accuracy(ff, label_list[0])
# print 'Final accuracy {:02f}%'.format(acc * 100)
# print "iDT_score_weight: ", iDT_score_weight
# print '\n' | 49.512821 | 175 | 0.763421 |
79402228fac0b4ad572b6c4c3d87e04145bf7f23 | 436 | py | Python | .history/spider/car_spider_20201124003716.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | .history/spider/car_spider_20201124003716.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | .history/spider/car_spider_20201124003716.py | KustomApe/nerdape | aef6fb2d1f8c364b26d91bf8570b4487a24de69a | [
"MIT"
] | null | null | null | from selenium import webdriver
import pandas as pd
import time
"""[Initial setting]
初期設定
"""
options = webdriver.ChromeOptions()
options.add_argument('--headeless')
options.add_argument('--disable-gpu')
options.add_argument('--lang-ja')
browser = webdriver.Chrome(chrome_options=options, executable_path='./chromedriver')
df = pd.DataFrame(columns=['name', 'image', 'price', 'category', 'car'])
url = 'https://motorz-garage.com/parts/' | 31.142857 | 84 | 0.745413 |
794022ac5116d3c5a1b194d33516876384f2375c | 77,618 | py | Python | rasa/shared/core/domain.py | alexyuwen/rasa | cf04a71c980675db9968a79bb8900c96c70cc78c | [
"Apache-2.0"
] | 1 | 2021-04-29T18:21:20.000Z | 2021-04-29T18:21:20.000Z | rasa/shared/core/domain.py | alexyuwen/rasa | cf04a71c980675db9968a79bb8900c96c70cc78c | [
"Apache-2.0"
] | null | null | null | rasa/shared/core/domain.py | alexyuwen/rasa | cf04a71c980675db9968a79bb8900c96c70cc78c | [
"Apache-2.0"
] | 1 | 2021-04-29T18:27:46.000Z | 2021-04-29T18:27:46.000Z | import copy
import collections
import json
import logging
import os
from enum import Enum
from pathlib import Path
from typing import (
Any,
Dict,
List,
NamedTuple,
NoReturn,
Optional,
Set,
Text,
Tuple,
Union,
TYPE_CHECKING,
Iterable,
)
from rasa.shared.constants import (
DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES,
DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION,
DOMAIN_SCHEMA_FILE,
DOCS_URL_DOMAINS,
DOCS_URL_FORMS,
DOCS_URL_MIGRATION_GUIDE,
LATEST_TRAINING_DATA_FORMAT_VERSION,
UTTER_PREFIX,
DOCS_URL_RESPONSES,
REQUIRED_SLOTS_KEY,
IGNORED_INTENTS,
)
import rasa.shared.core.constants
from rasa.shared.exceptions import RasaException, YamlException, YamlSyntaxException
import rasa.shared.nlu.constants
import rasa.shared.utils.validation
import rasa.shared.utils.io
import rasa.shared.utils.common
from rasa.shared.core.events import SlotSet, UserUttered
from rasa.shared.core.slots import Slot, CategoricalSlot, TextSlot, AnySlot
from rasa.shared.utils.validation import KEY_TRAINING_DATA_FORMAT_VERSION
if TYPE_CHECKING:
from rasa.shared.core.trackers import DialogueStateTracker
CARRY_OVER_SLOTS_KEY = "carry_over_slots_to_new_session"
SESSION_EXPIRATION_TIME_KEY = "session_expiration_time"
SESSION_CONFIG_KEY = "session_config"
USED_ENTITIES_KEY = "used_entities"
USE_ENTITIES_KEY = "use_entities"
IGNORE_ENTITIES_KEY = "ignore_entities"
IS_RETRIEVAL_INTENT_KEY = "is_retrieval_intent"
ENTITY_ROLES_KEY = "roles"
ENTITY_GROUPS_KEY = "groups"
KEY_SLOTS = "slots"
KEY_INTENTS = "intents"
KEY_ENTITIES = "entities"
KEY_RESPONSES = "responses"
KEY_ACTIONS = "actions"
KEY_FORMS = "forms"
KEY_E2E_ACTIONS = "e2e_actions"
KEY_RESPONSES_TEXT = "text"
ALL_DOMAIN_KEYS = [
KEY_SLOTS,
KEY_FORMS,
KEY_ACTIONS,
KEY_ENTITIES,
KEY_INTENTS,
KEY_RESPONSES,
KEY_E2E_ACTIONS,
]
PREV_PREFIX = "prev_"
# State is a dictionary with keys (USER, PREVIOUS_ACTION, SLOTS, ACTIVE_LOOP)
# representing the origin of a SubState;
# the values are SubStates, that contain the information needed for featurization
SubState = Dict[Text, Union[Text, Tuple[Union[float, Text]]]]
State = Dict[Text, SubState]
logger = logging.getLogger(__name__)
class InvalidDomain(RasaException):
"""Exception that can be raised when domain is not valid."""
class ActionNotFoundException(ValueError, RasaException):
"""Raised when an action name could not be found."""
class SessionConfig(NamedTuple):
"""The Session Configuration."""
session_expiration_time: float # in minutes
carry_over_slots: bool
@staticmethod
def default() -> "SessionConfig":
"""Returns the SessionConfig with the default values."""
return SessionConfig(
DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES,
DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION,
)
def are_sessions_enabled(self) -> bool:
"""Returns a boolean value depending on the value of session_expiration_time."""
return self.session_expiration_time > 0
class Domain:
"""The domain specifies the universe in which the bot's policy acts.
A Domain subclass provides the actions the bot can take, the intents
and entities it can recognise."""
@classmethod
def empty(cls) -> "Domain":
return cls([], [], [], {}, [], {})
@classmethod
def load(cls, paths: Union[List[Union[Path, Text]], Text, Path]) -> "Domain":
if not paths:
raise InvalidDomain(
"No domain file was specified. Please specify a path "
"to a valid domain file."
)
elif not isinstance(paths, list) and not isinstance(paths, set):
paths = [paths]
domain = Domain.empty()
for path in paths:
other = cls.from_path(path)
domain = domain.merge(other)
return domain
@classmethod
def from_path(cls, path: Union[Text, Path]) -> "Domain":
path = os.path.abspath(path)
if os.path.isfile(path):
domain = cls.from_file(path)
elif os.path.isdir(path):
domain = cls.from_directory(path)
else:
raise InvalidDomain(
"Failed to load domain specification from '{}'. "
"File not found!".format(os.path.abspath(path))
)
return domain
@classmethod
def from_file(cls, path: Text) -> "Domain":
"""Loads the `Domain` from a YAML file."""
return cls.from_yaml(rasa.shared.utils.io.read_file(path), path)
@classmethod
def from_yaml(cls, yaml: Text, original_filename: Text = "") -> "Domain":
"""Loads the `Domain` from YAML text after validating it."""
try:
rasa.shared.utils.validation.validate_yaml_schema(yaml, DOMAIN_SCHEMA_FILE)
data = rasa.shared.utils.io.read_yaml(yaml)
if not rasa.shared.utils.validation.validate_training_data_format_version(
data, original_filename
):
return Domain.empty()
return cls.from_dict(data)
except YamlException as e:
e.filename = original_filename
raise e
@classmethod
def from_dict(cls, data: Dict) -> "Domain":
"""Deserializes and creates domain.
Args:
data: The serialized domain.
Returns:
The instantiated `Domain` object.
"""
responses = data.get(KEY_RESPONSES, {})
slots = cls.collect_slots(data.get(KEY_SLOTS, {}))
additional_arguments = data.get("config", {})
session_config = cls._get_session_config(data.get(SESSION_CONFIG_KEY, {}))
intents = data.get(KEY_INTENTS, {})
forms = data.get(KEY_FORMS, {})
_validate_slot_mappings(forms)
return cls(
intents,
data.get(KEY_ENTITIES, {}),
slots,
responses,
data.get(KEY_ACTIONS, []),
data.get(KEY_FORMS, {}),
data.get(KEY_E2E_ACTIONS, []),
session_config=session_config,
**additional_arguments,
)
@staticmethod
def _get_session_config(session_config: Dict) -> SessionConfig:
session_expiration_time_min = session_config.get(SESSION_EXPIRATION_TIME_KEY)
if session_expiration_time_min is None:
session_expiration_time_min = DEFAULT_SESSION_EXPIRATION_TIME_IN_MINUTES
carry_over_slots = session_config.get(
CARRY_OVER_SLOTS_KEY, DEFAULT_CARRY_OVER_SLOTS_TO_NEW_SESSION,
)
return SessionConfig(session_expiration_time_min, carry_over_slots)
@classmethod
def from_directory(cls, path: Text) -> "Domain":
"""Loads and merges multiple domain files recursively from a directory tree."""
domain = Domain.empty()
for root, _, files in os.walk(path, followlinks=True):
for file in files:
full_path = os.path.join(root, file)
if Domain.is_domain_file(full_path):
other = Domain.from_file(full_path)
domain = other.merge(domain)
return domain
def merge(self, domain: Optional["Domain"], override: bool = False) -> "Domain":
"""Merge this domain with another one, combining their attributes.
List attributes like ``intents`` and ``actions`` will be deduped
and merged. Single attributes will be taken from `self` unless
override is `True`, in which case they are taken from `domain`."""
if not domain or domain.is_empty():
return self
if self.is_empty():
return domain
domain_dict = domain.as_dict()
combined = self.as_dict()
def merge_dicts(
d1: Dict[Text, Any],
d2: Dict[Text, Any],
override_existing_values: bool = False,
) -> Dict[Text, Any]:
if override_existing_values:
a, b = d1.copy(), d2.copy()
else:
a, b = d2.copy(), d1.copy()
a.update(b)
return a
def merge_lists(l1: List[Any], l2: List[Any]) -> List[Any]:
return sorted(list(set(l1 + l2)))
def merge_lists_of_dicts(
dict_list1: List[Dict],
dict_list2: List[Dict],
override_existing_values: bool = False,
) -> List[Dict]:
dict1 = {list(i.keys())[0]: i for i in dict_list1}
dict2 = {list(i.keys())[0]: i for i in dict_list2}
merged_dicts = merge_dicts(dict1, dict2, override_existing_values)
return list(merged_dicts.values())
if override:
config = domain_dict["config"]
for key, val in config.items():
combined["config"][key] = val
if override or self.session_config == SessionConfig.default():
combined[SESSION_CONFIG_KEY] = domain_dict[SESSION_CONFIG_KEY]
combined[KEY_INTENTS] = merge_lists_of_dicts(
combined[KEY_INTENTS], domain_dict[KEY_INTENTS], override
)
# remove existing forms from new actions
for form in combined[KEY_FORMS]:
if form in domain_dict[KEY_ACTIONS]:
domain_dict[KEY_ACTIONS].remove(form)
for key in [KEY_ENTITIES, KEY_ACTIONS, KEY_E2E_ACTIONS]:
combined[key] = merge_lists(combined[key], domain_dict[key])
for key in [KEY_FORMS, KEY_RESPONSES, KEY_SLOTS]:
combined[key] = merge_dicts(combined[key], domain_dict[key], override)
return self.__class__.from_dict(combined)
@staticmethod
def collect_slots(slot_dict: Dict[Text, Any]) -> List[Slot]:
slots = []
# make a copy to not alter the input dictionary
slot_dict = copy.deepcopy(slot_dict)
# Don't sort the slots, see https://github.com/RasaHQ/rasa-x/issues/3900
for slot_name in slot_dict:
slot_type = slot_dict[slot_name].pop("type", None)
slot_class = Slot.resolve_by_type(slot_type)
slot = slot_class(slot_name, **slot_dict[slot_name])
slots.append(slot)
return slots
@staticmethod
def _transform_intent_properties_for_internal_use(
intent: Dict[Text, Any],
entities: List[Text],
roles: Dict[Text, List[Text]],
groups: Dict[Text, List[Text]],
) -> Dict[Text, Any]:
"""Transforms the intent's parameters in a format suitable for internal use.
When an intent is retrieved from the `domain.yml` file, it contains two
parameters, the `use_entities` and the `ignore_entities` parameter. With
the values of these two parameters the Domain class is updated, a new
parameter is added to the intent called `used_entities` and the two
previous parameters are deleted. This happens because internally only the
parameter `used_entities` is needed to list all the entities that should be
used for this intent.
Args:
intent: The intent as retrieved from the `domain.yml` file thus having two
parameters, the `use_entities` and the `ignore_entities` parameter.
entities: All entities as provided by a domain file.
roles: All roles for entities as provided by a domain file.
groups: All groups for entities as provided by a domain file.
Returns:
The intent with the new format thus having only one parameter called
`used_entities` since this is the expected format of the intent
when used internally.
"""
name, properties = list(intent.items())[0]
if properties:
properties.setdefault(USE_ENTITIES_KEY, True)
else:
raise InvalidDomain(
f"In the `domain.yml` file, the intent '{name}' cannot have value of"
f" `{type(properties)}`. If you have placed a ':' character after the"
f" intent's name without adding any additional parameters to this"
f" intent then you would need to remove the ':' character. Please see"
f" {rasa.shared.constants.DOCS_URL_DOMAINS} for more information on how"
f" to correctly add `intents` in the `domain` and"
f" {rasa.shared.constants.DOCS_URL_INTENTS} for examples on"
f" when to use the ':' character after an intent's name."
)
properties.setdefault(IGNORE_ENTITIES_KEY, [])
if not properties[USE_ENTITIES_KEY]: # this covers False, None and []
properties[USE_ENTITIES_KEY] = []
# `use_entities` is either a list of explicitly included entities
# or `True` if all should be included
# if the listed entities have a role or group label, concatenate the entity
# label with the corresponding role or group label to make sure roles and
# groups can also influence the dialogue predictions
if properties[USE_ENTITIES_KEY] is True:
included_entities = set(entities)
included_entities.update(Domain.concatenate_entity_labels(roles))
included_entities.update(Domain.concatenate_entity_labels(groups))
else:
included_entities = set(properties[USE_ENTITIES_KEY])
for entity in list(included_entities):
included_entities.update(
Domain.concatenate_entity_labels(roles, entity)
)
included_entities.update(
Domain.concatenate_entity_labels(groups, entity)
)
excluded_entities = set(properties[IGNORE_ENTITIES_KEY])
for entity in list(excluded_entities):
excluded_entities.update(Domain.concatenate_entity_labels(roles, entity))
excluded_entities.update(Domain.concatenate_entity_labels(groups, entity))
used_entities = list(included_entities - excluded_entities)
used_entities.sort()
# Only print warning for ambiguous configurations if entities were included
# explicitly.
explicitly_included = isinstance(properties[USE_ENTITIES_KEY], list)
ambiguous_entities = included_entities.intersection(excluded_entities)
if explicitly_included and ambiguous_entities:
rasa.shared.utils.io.raise_warning(
f"Entities: '{ambiguous_entities}' are explicitly included and"
f" excluded for intent '{name}'."
f"Excluding takes precedence in this case. "
f"Please resolve that ambiguity.",
docs=f"{DOCS_URL_DOMAINS}",
)
properties[USED_ENTITIES_KEY] = used_entities
del properties[USE_ENTITIES_KEY]
del properties[IGNORE_ENTITIES_KEY]
return intent
@rasa.shared.utils.common.lazy_property
def retrieval_intents(self) -> List[Text]:
"""List retrieval intents present in the domain."""
return [
intent
for intent in self.intent_properties
if self.intent_properties[intent].get(IS_RETRIEVAL_INTENT_KEY)
]
@classmethod
def collect_entity_properties(
cls, domain_entities: List[Union[Text, Dict[Text, Any]]]
) -> Tuple[List[Text], Dict[Text, List[Text]], Dict[Text, List[Text]]]:
"""Get entity properties for a domain from what is provided by a domain file.
Args:
domain_entities: The entities as provided by a domain file.
Returns:
A list of entity names.
A dictionary of entity names to roles.
A dictionary of entity names to groups.
"""
entities: List[Text] = []
roles: Dict[Text, List[Text]] = {}
groups: Dict[Text, List[Text]] = {}
for entity in domain_entities:
if isinstance(entity, str):
entities.append(entity)
elif isinstance(entity, dict):
for _entity, sub_labels in entity.items():
entities.append(_entity)
if sub_labels:
if ENTITY_ROLES_KEY in sub_labels:
roles[_entity] = sub_labels[ENTITY_ROLES_KEY]
if ENTITY_GROUPS_KEY in sub_labels:
groups[_entity] = sub_labels[ENTITY_GROUPS_KEY]
else:
raise InvalidDomain(
f"In the `domain.yml` file, the entity '{_entity}' cannot"
f" have value of `{type(sub_labels)}`. If you have placed a"
f" ':' character after the entity `{_entity}` without"
f" adding any additional parameters to this entity then you"
f" would need to remove the ':' character. Please see"
f" {rasa.shared.constants.DOCS_URL_DOMAINS} for more"
f" information on how to correctly add `entities` in the"
f" `domain` and {rasa.shared.constants.DOCS_URL_ENTITIES}"
f" for examples on when to use the ':' character after an"
f" entity's name."
)
else:
raise InvalidDomain(
f"Invalid domain. Entity is invalid, type of entity '{entity}' "
f"not supported: '{type(entity).__name__}'"
)
return entities, roles, groups
@classmethod
def collect_intent_properties(
cls,
intents: List[Union[Text, Dict[Text, Any]]],
entities: List[Text],
roles: Dict[Text, List[Text]],
groups: Dict[Text, List[Text]],
) -> Dict[Text, Dict[Text, Union[bool, List]]]:
"""Get intent properties for a domain from what is provided by a domain file.
Args:
intents: The intents as provided by a domain file.
entities: All entities as provided by a domain file.
roles: The roles of entities as provided by a domain file.
groups: The groups of entities as provided by a domain file.
Returns:
The intent properties to be stored in the domain.
"""
# make a copy to not alter the input argument
intents = copy.deepcopy(intents)
intent_properties = {}
duplicates = set()
for intent in intents:
intent_name, properties = cls._intent_properties(
intent, entities, roles, groups
)
if intent_name in intent_properties.keys():
duplicates.add(intent_name)
intent_properties.update(properties)
if duplicates:
raise InvalidDomain(
f"Intents are not unique! Found multiple intents with name(s) {sorted(duplicates)}. "
f"Either rename or remove the duplicate ones."
)
cls._add_default_intents(intent_properties, entities, roles, groups)
return intent_properties
@classmethod
def _intent_properties(
cls,
intent: Union[Text, Dict[Text, Any]],
entities: List[Text],
roles: Dict[Text, List[Text]],
groups: Dict[Text, List[Text]],
) -> Tuple[Text, Dict[Text, Any]]:
if not isinstance(intent, dict):
intent_name = intent
intent = {intent_name: {USE_ENTITIES_KEY: True, IGNORE_ENTITIES_KEY: []}}
else:
intent_name = list(intent.keys())[0]
return (
intent_name,
cls._transform_intent_properties_for_internal_use(
intent, entities, roles, groups
),
)
@classmethod
def _add_default_intents(
cls,
intent_properties: Dict[Text, Dict[Text, Union[bool, List]]],
entities: List[Text],
roles: Optional[Dict[Text, List[Text]]],
groups: Optional[Dict[Text, List[Text]]],
) -> None:
for intent_name in rasa.shared.core.constants.DEFAULT_INTENTS:
if intent_name not in intent_properties:
_, properties = cls._intent_properties(
intent_name, entities, roles, groups
)
intent_properties.update(properties)
def __init__(
self,
intents: Union[Set[Text], List[Text], List[Dict[Text, Any]]],
entities: List[Union[Text, Dict[Text, Any]]],
slots: List[Slot],
responses: Dict[Text, List[Dict[Text, Any]]],
action_names: List[Text],
forms: Union[Dict[Text, Any], List[Text]],
action_texts: Optional[List[Text]] = None,
store_entities_as_slots: bool = True,
session_config: SessionConfig = SessionConfig.default(),
) -> None:
"""Creates a `Domain`.
Args:
intents: Intent labels.
entities: The names of entities which might be present in user messages.
slots: Slots to store information during the conversation.
responses: Bot responses. If an action with the same name is executed, it
will send the matching response to the user.
action_names: Names of custom actions.
forms: Form names and their slot mappings.
action_texts: End-to-End bot utterances from end-to-end stories.
store_entities_as_slots: If `True` Rasa will automatically create `SlotSet`
events for entities if there are slots with the same name as the entity.
session_config: Configuration for conversation sessions. Conversations are
restarted at the end of a session.
"""
self.entities, self.roles, self.groups = self.collect_entity_properties(
entities
)
self.intent_properties = self.collect_intent_properties(
intents, self.entities, self.roles, self.groups
)
self.overridden_default_intents = self._collect_overridden_default_intents(
intents
)
self.form_names, self.forms, overridden_form_actions = self._initialize_forms(
forms
)
action_names += overridden_form_actions
self.responses = responses
self.action_texts = action_texts or []
self.session_config = session_config
self._custom_actions = action_names
# only includes custom actions and utterance actions
self.user_actions = self._combine_with_responses(action_names, responses)
# includes all action names (custom, utterance, default actions and forms)
# and action texts from end-to-end bot utterances
self.action_names_or_texts = (
self._combine_user_with_default_actions(self.user_actions)
+ [
form_name
for form_name in self.form_names
if form_name not in self._custom_actions
]
+ self.action_texts
)
self._user_slots = copy.copy(slots)
self.slots = slots
self._add_default_slots()
self.store_entities_as_slots = store_entities_as_slots
self._check_domain_sanity()
def __deepcopy__(self, memo: Optional[Dict[int, Any]]) -> "Domain":
"""Enables making a deep copy of the `Domain` using `copy.deepcopy`.
See https://docs.python.org/3/library/copy.html#copy.deepcopy
for more implementation.
Args:
memo: Optional dictionary of objects already copied during the current
copying pass.
Returns:
A deep copy of the current domain.
"""
domain_dict = self.as_dict()
return self.__class__.from_dict(copy.deepcopy(domain_dict, memo))
@staticmethod
def _collect_overridden_default_intents(
intents: Union[Set[Text], List[Text], List[Dict[Text, Any]]]
) -> List[Text]:
"""Collects the default intents overridden by the user.
Args:
intents: User-provided intents.
Returns:
User-defined intents that are default intents.
"""
intent_names: Set[Text] = {
list(intent.keys())[0] if isinstance(intent, dict) else intent
for intent in intents
}
return sorted(intent_names & set(rasa.shared.core.constants.DEFAULT_INTENTS))
@staticmethod
def _initialize_forms(
forms: Union[Dict[Text, Any], List[Text]]
) -> Tuple[List[Text], Dict[Text, Any], List[Text]]:
"""Retrieves the initial values for the Domain's form fields.
Args:
forms: Form names (if forms are a list) or a form dictionary. Forms
provided in dictionary format have the form names as keys, and either
empty dictionaries as values, or objects containing
`SlotMapping`s.
Returns:
The form names, a mapping of form names and slot mappings, and custom
actions.
Returning custom actions for each forms means that Rasa Open Source should
not use the default `FormAction` for the forms, but rather a custom action
for it. This can e.g. be used to run the deprecated Rasa Open Source 1
`FormAction` which is implemented in the Rasa SDK.
"""
if isinstance(forms, dict):
for form_name, form_data in forms.items():
if form_data is not None and REQUIRED_SLOTS_KEY not in form_data:
forms[form_name] = {REQUIRED_SLOTS_KEY: form_data}
# dict with slot mappings
return list(forms.keys()), forms, []
if isinstance(forms, list) and (not forms or isinstance(forms[0], str)):
# list of form names (Rasa Open Source 1 format)
rasa.shared.utils.io.raise_warning(
"The `forms` section in the domain used the old Rasa Open Source 1 "
"list format to define forms. Rasa Open Source will be configured to "
"use the deprecated `FormAction` within the Rasa SDK. If you want to "
"use the new Rasa Open Source 2 `FormAction` adapt your `forms` "
"section as described in the documentation. Support for the "
"deprecated `FormAction` in the Rasa SDK will be removed in Rasa Open "
"Source 3.0.",
docs=rasa.shared.constants.DOCS_URL_FORMS,
category=FutureWarning,
)
return forms, {form_name: {} for form_name in forms}, forms
rasa.shared.utils.io.raise_warning(
f"The `forms` section in the domain needs to contain a dictionary. "
f"Instead found an object of type '{type(forms)}'.",
docs=DOCS_URL_FORMS,
)
return [], {}, []
def __hash__(self) -> int:
"""Returns a unique hash for the domain."""
return int(self.fingerprint(), 16)
def fingerprint(self) -> Text:
"""Returns a unique hash for the domain which is stable across python runs.
Returns:
fingerprint of the domain
"""
self_as_dict = self.as_dict()
self_as_dict[
KEY_INTENTS
] = rasa.shared.utils.common.sort_list_of_dicts_by_first_key(
self_as_dict[KEY_INTENTS]
)
self_as_dict[KEY_ACTIONS] = self.action_names_or_texts
return rasa.shared.utils.io.get_dictionary_fingerprint(self_as_dict)
@rasa.shared.utils.common.lazy_property
def user_actions_and_forms(self) -> List[Text]:
"""Returns combination of user actions and forms."""
return self.user_actions + self.form_names
@rasa.shared.utils.common.lazy_property
def action_names(self) -> List[Text]:
"""Returns action names or texts."""
# Raise `DeprecationWarning` instead of `FutureWarning` as we only want to
# notify developers about the deprecation (e.g. developers who are using the
# Python API or writing custom policies). End users can't change anything
# about this warning except making their developers change any custom code
# which calls this.
rasa.shared.utils.io.raise_warning(
f"{Domain.__name__}.{Domain.action_names.__name__} "
f"is deprecated and will be removed version 3.0.0.",
category=DeprecationWarning,
)
return self.action_names_or_texts
@rasa.shared.utils.common.lazy_property
def num_actions(self) -> int:
"""Returns the number of available actions."""
# noinspection PyTypeChecker
return len(self.action_names_or_texts)
@rasa.shared.utils.common.lazy_property
def num_states(self) -> int:
"""Number of used input states for the action prediction."""
return len(self.input_states)
@rasa.shared.utils.common.lazy_property
def retrieval_intent_templates(self) -> Dict[Text, List[Dict[Text, Any]]]:
"""Return only the responses which are defined for retrieval intents."""
rasa.shared.utils.io.raise_deprecation_warning(
"The terminology 'template' is deprecated and replaced by 'response', call `retrieval_intent_responses` instead of `retrieval_intent_templates`.",
docs=f"{DOCS_URL_MIGRATION_GUIDE}#rasa-23-to-rasa-24",
)
return self.retrieval_intent_responses
@rasa.shared.utils.common.lazy_property
def retrieval_intent_responses(self) -> Dict[Text, List[Dict[Text, Any]]]:
"""Return only the responses which are defined for retrieval intents."""
return dict(
filter(
lambda x: self.is_retrieval_intent_response(x), self.responses.items()
)
)
@rasa.shared.utils.common.lazy_property
def templates(self) -> Dict[Text, List[Dict[Text, Any]]]:
"""Temporary property before templates become completely deprecated."""
rasa.shared.utils.io.raise_deprecation_warning(
"The terminology 'template' is deprecated and replaced by 'response'. Instead of using the `templates` property, please use the `responses` property instead.",
docs=f"{DOCS_URL_MIGRATION_GUIDE}#rasa-23-to-rasa-24",
)
return self.responses
@staticmethod
def is_retrieval_intent_template(
response: Tuple[Text, List[Dict[Text, Any]]]
) -> bool:
"""Check if the response is for a retrieval intent.
These templates have a `/` symbol in their name. Use that to filter them from
the rest.
"""
rasa.shared.utils.io.raise_deprecation_warning(
"The terminology 'template' is deprecated and replaced by 'response', call `is_retrieval_intent_response` instead of `is_retrieval_intent_template`.",
docs=f"{DOCS_URL_MIGRATION_GUIDE}#rasa-23-to-rasa-24",
)
return rasa.shared.nlu.constants.RESPONSE_IDENTIFIER_DELIMITER in response[0]
@staticmethod
def is_retrieval_intent_response(
response: Tuple[Text, List[Dict[Text, Any]]]
) -> bool:
"""Check if the response is for a retrieval intent.
These responses have a `/` symbol in their name. Use that to filter them from
the rest.
"""
return rasa.shared.nlu.constants.RESPONSE_IDENTIFIER_DELIMITER in response[0]
def _add_default_slots(self) -> None:
"""Sets up the default slots and slot values for the domain."""
self._add_requested_slot()
self._add_knowledge_base_slots()
self._add_categorical_slot_default_value()
self._add_session_metadata_slot()
def _add_categorical_slot_default_value(self) -> None:
"""Add a default value to all categorical slots.
All unseen values found for the slot will be mapped to this default value
for featurization.
"""
for slot in [s for s in self.slots if isinstance(s, CategoricalSlot)]:
slot.add_default_value()
def add_categorical_slot_default_value(self) -> None:
"""See `_add_categorical_slot_default_value` for docstring."""
rasa.shared.utils.io.raise_deprecation_warning(
f"'{self.add_categorical_slot_default_value.__name__}' is deprecated and "
f"will be removed in Rasa Open Source 3.0.0. This method is now "
f"automatically called when the Domain is created which makes a manual "
f"call superfluous."
)
self._add_categorical_slot_default_value()
def _add_requested_slot(self) -> None:
"""Add a slot called `requested_slot` to the list of slots.
The value of this slot will hold the name of the slot which the user
needs to fill in next (either explicitly or implicitly) as part of a form.
"""
if self.form_names and rasa.shared.core.constants.REQUESTED_SLOT not in [
s.name for s in self.slots
]:
self.slots.append(
TextSlot(
rasa.shared.core.constants.REQUESTED_SLOT,
influence_conversation=False,
)
)
def add_requested_slot(self) -> None:
"""See `_add_categorical_slot_default_value` for docstring."""
rasa.shared.utils.io.raise_deprecation_warning(
f"'{self.add_requested_slot.__name__}' is deprecated and "
f"will be removed in Rasa Open Source 3.0.0. This method is now "
f"automatically called when the Domain is created which makes a manual "
f"call superfluous."
)
self._add_requested_slot()
def _add_knowledge_base_slots(self) -> None:
"""Add slots for the knowledge base action to slots.
Slots are only added if the default knowledge base action name is present.
As soon as the knowledge base action is not experimental anymore, we should
consider creating a new section in the domain file dedicated to knowledge
base slots.
"""
if (
rasa.shared.core.constants.DEFAULT_KNOWLEDGE_BASE_ACTION
in self.action_names_or_texts
):
logger.warning(
"You are using an experiential feature: Action '{}'!".format(
rasa.shared.core.constants.DEFAULT_KNOWLEDGE_BASE_ACTION
)
)
slot_names = [s.name for s in self.slots]
knowledge_base_slots = [
rasa.shared.core.constants.SLOT_LISTED_ITEMS,
rasa.shared.core.constants.SLOT_LAST_OBJECT,
rasa.shared.core.constants.SLOT_LAST_OBJECT_TYPE,
]
for s in knowledge_base_slots:
if s not in slot_names:
self.slots.append(TextSlot(s, influence_conversation=False))
def add_knowledge_base_slots(self) -> None:
"""See `_add_categorical_slot_default_value` for docstring."""
rasa.shared.utils.io.raise_deprecation_warning(
f"'{self.add_knowledge_base_slots.__name__}' is deprecated and "
f"will be removed in Rasa Open Source 3.0.0. This method is now "
f"automatically called when the Domain is created which makes a manual "
f"call superfluous."
)
self._add_knowledge_base_slots()
def _add_session_metadata_slot(self) -> None:
self.slots.append(
AnySlot(rasa.shared.core.constants.SESSION_START_METADATA_SLOT,)
)
def index_for_action(self, action_name: Text) -> int:
"""Looks up which action index corresponds to this action name."""
try:
return self.action_names_or_texts.index(action_name)
except ValueError:
self.raise_action_not_found_exception(action_name)
def raise_action_not_found_exception(self, action_name_or_text: Text) -> NoReturn:
"""Raises exception if action name or text not part of the domain or stories.
Args:
action_name_or_text: Name of an action or its text in case it's an
end-to-end bot utterance.
Raises:
ActionNotFoundException: If `action_name_or_text` are not part of this
domain.
"""
action_names = "\n".join([f"\t - {a}" for a in self.action_names_or_texts])
raise ActionNotFoundException(
f"Cannot access action '{action_name_or_text}', "
f"as that name is not a registered "
f"action for this domain. "
f"Available actions are: \n{action_names}"
)
def random_template_for(self, utter_action: Text) -> Optional[Dict[Text, Any]]:
"""Returns a random response for an action name.
Args:
utter_action: The name of the utter action.
Returns:
A response for an utter action.
"""
import numpy as np
# Raise `DeprecationWarning` instead of `FutureWarning` as we only want to
# notify developers about the deprecation (e.g. developers who are using the
# Python API or writing custom policies). End users can't change anything
# about this warning except making their developers change any custom code
# which calls this.
rasa.shared.utils.io.raise_warning(
f"'{Domain.__name__}.{Domain.random_template_for.__class__}' "
f"is deprecated and will be removed version 3.0.0.",
category=DeprecationWarning,
)
if utter_action in self.responses:
return np.random.choice(self.responses[utter_action])
else:
return None
# noinspection PyTypeChecker
@rasa.shared.utils.common.lazy_property
def slot_states(self) -> List[Text]:
"""Returns all available slot state strings."""
return [
f"{slot.name}_{feature_index}"
for slot in self.slots
for feature_index in range(0, slot.feature_dimensionality())
]
# noinspection PyTypeChecker
@rasa.shared.utils.common.lazy_property
def entity_states(self) -> List[Text]:
"""Returns all available entity state strings."""
entity_states = copy.deepcopy(self.entities)
entity_states.extend(Domain.concatenate_entity_labels(self.roles))
entity_states.extend(Domain.concatenate_entity_labels(self.groups))
return entity_states
@staticmethod
def concatenate_entity_labels(
entity_labels: Dict[Text, List[Text]], entity: Optional[Text] = None
) -> List[Text]:
"""Concatenates the given entity labels with their corresponding sub-labels.
If a specific entity label is given, only this entity label will be
concatenated with its corresponding sub-labels.
Args:
entity_labels: A map of an entity label to its sub-label list.
entity: If present, only this entity will be considered.
Returns:
A list of labels.
"""
if entity is not None and entity not in entity_labels:
return []
if entity:
return [
f"{entity}{rasa.shared.core.constants.ENTITY_LABEL_SEPARATOR}{sub_label}"
for sub_label in entity_labels[entity]
]
return [
f"{entity_label}{rasa.shared.core.constants.ENTITY_LABEL_SEPARATOR}{entity_sub_label}"
for entity_label, entity_sub_labels in entity_labels.items()
for entity_sub_label in entity_sub_labels
]
@rasa.shared.utils.common.lazy_property
def input_state_map(self) -> Dict[Text, int]:
"""Provide a mapping from state names to indices."""
return {f: i for i, f in enumerate(self.input_states)}
@rasa.shared.utils.common.lazy_property
def input_states(self) -> List[Text]:
"""Returns all available states."""
return (
self.intents
+ self.entity_states
+ self.slot_states
+ self.action_names_or_texts
+ self.form_names
)
def _get_featurized_entities(self, latest_message: UserUttered) -> Set[Text]:
intent_name = latest_message.intent.get(
rasa.shared.nlu.constants.INTENT_NAME_KEY
)
intent_config = self.intent_config(intent_name)
entities = latest_message.entities
# If Entity Roles and Groups is used, we also need to make sure the roles and
# groups get featurized. We concatenate the entity label with the role/group
# label using a special separator to make sure that the resulting label is
# unique (as you can have the same role/group label for different entities).
entity_names = (
set(entity["entity"] for entity in entities if "entity" in entity.keys())
| set(
f"{entity['entity']}"
f"{rasa.shared.core.constants.ENTITY_LABEL_SEPARATOR}{entity['role']}"
for entity in entities
if "entity" in entity.keys() and "role" in entity.keys()
)
| set(
f"{entity['entity']}"
f"{rasa.shared.core.constants.ENTITY_LABEL_SEPARATOR}{entity['group']}"
for entity in entities
if "entity" in entity.keys() and "group" in entity.keys()
)
)
# the USED_ENTITIES_KEY of an intent also contains the entity labels and the
# concatenated entity labels with their corresponding roles and groups labels
wanted_entities = set(intent_config.get(USED_ENTITIES_KEY, entity_names))
return entity_names & wanted_entities
def _get_user_sub_state(
self, tracker: "DialogueStateTracker"
) -> Dict[Text, Union[Text, Tuple[Text]]]:
"""Turn latest UserUttered event into a substate containing intent,
text and set entities if present
Args:
tracker: dialog state tracker containing the dialog so far
Returns:
a dictionary containing intent, text and set entities
"""
# proceed with values only if the user of a bot have done something
# at the previous step i.e., when the state is not empty.
latest_message = tracker.latest_message
if not latest_message or latest_message.is_empty():
return {}
sub_state = latest_message.as_sub_state()
# filter entities based on intent config
# sub_state will be transformed to frozenset therefore we need to
# convert the set to the tuple
# sub_state is transformed to frozenset because we will later hash it
# for deduplication
entities = tuple(
self._get_featurized_entities(latest_message)
& set(sub_state.get(rasa.shared.nlu.constants.ENTITIES, ()))
)
if entities:
sub_state[rasa.shared.nlu.constants.ENTITIES] = entities
else:
sub_state.pop(rasa.shared.nlu.constants.ENTITIES, None)
return sub_state
@staticmethod
def _get_slots_sub_state(
tracker: "DialogueStateTracker", omit_unset_slots: bool = False,
) -> Dict[Text, Union[Text, Tuple[float]]]:
"""Sets all set slots with the featurization of the stored value.
Args:
tracker: dialog state tracker containing the dialog so far
omit_unset_slots: If `True` do not include the initial values of slots.
Returns:
a dictionary mapping slot names to their featurization
"""
slots = {}
for slot_name, slot in tracker.slots.items():
if slot is not None and slot.as_feature():
if omit_unset_slots and not slot.has_been_set:
continue
if slot.value == rasa.shared.core.constants.SHOULD_NOT_BE_SET:
slots[slot_name] = rasa.shared.core.constants.SHOULD_NOT_BE_SET
elif any(slot.as_feature()):
# only add slot if some of the features are not zero
slots[slot_name] = tuple(slot.as_feature())
return slots
@staticmethod
def _get_prev_action_sub_state(
tracker: "DialogueStateTracker",
) -> Dict[Text, Text]:
"""Turn the previous taken action into a state name.
Args:
tracker: dialog state tracker containing the dialog so far
Returns:
a dictionary with the information on latest action
"""
return tracker.latest_action
@staticmethod
def _get_active_loop_sub_state(
tracker: "DialogueStateTracker",
) -> Dict[Text, Text]:
"""Turn tracker's active loop into a state name.
Args:
tracker: dialog state tracker containing the dialog so far
Returns:
a dictionary mapping "name" to active loop name if present
"""
# we don't use tracker.active_loop_name
# because we need to keep should_not_be_set
active_loop: Optional[Text] = tracker.active_loop.get(
rasa.shared.core.constants.LOOP_NAME
)
if active_loop:
return {rasa.shared.core.constants.LOOP_NAME: active_loop}
else:
return {}
@staticmethod
def _clean_state(state: State) -> State:
return {
state_type: sub_state
for state_type, sub_state in state.items()
if sub_state
}
def get_active_states(
self, tracker: "DialogueStateTracker", omit_unset_slots: bool = False,
) -> State:
"""Returns a bag of active states from the tracker state.
Args:
tracker: dialog state tracker containing the dialog so far
omit_unset_slots: If `True` do not include the initial values of slots.
Returns `State` containing all active states.
"""
state = {
rasa.shared.core.constants.USER: self._get_user_sub_state(tracker),
rasa.shared.core.constants.SLOTS: self._get_slots_sub_state(
tracker, omit_unset_slots=omit_unset_slots
),
rasa.shared.core.constants.PREVIOUS_ACTION: self._get_prev_action_sub_state(
tracker
),
rasa.shared.core.constants.ACTIVE_LOOP: self._get_active_loop_sub_state(
tracker
),
}
return self._clean_state(state)
@staticmethod
def _remove_rule_only_features(
state: State, rule_only_data: Optional[Dict[Text, Any]],
) -> None:
if not rule_only_data:
return
rule_only_slots = rule_only_data.get(
rasa.shared.core.constants.RULE_ONLY_SLOTS, []
)
rule_only_loops = rule_only_data.get(
rasa.shared.core.constants.RULE_ONLY_LOOPS, []
)
# remove slots which only occur in rules but not in stories
if rule_only_slots:
for slot in rule_only_slots:
state.get(rasa.shared.core.constants.SLOTS, {}).pop(slot, None)
# remove active loop which only occur in rules but not in stories
if (
rule_only_loops
and state.get(rasa.shared.core.constants.ACTIVE_LOOP, {}).get(
rasa.shared.core.constants.LOOP_NAME
)
in rule_only_loops
):
del state[rasa.shared.core.constants.ACTIVE_LOOP]
@staticmethod
def _substitute_rule_only_user_input(state: State, last_ml_state: State) -> None:
if not rasa.shared.core.trackers.is_prev_action_listen_in_state(state):
if not last_ml_state.get(rasa.shared.core.constants.USER) and state.get(
rasa.shared.core.constants.USER
):
del state[rasa.shared.core.constants.USER]
elif last_ml_state.get(rasa.shared.core.constants.USER):
state[rasa.shared.core.constants.USER] = last_ml_state[
rasa.shared.core.constants.USER
]
def states_for_tracker_history(
self,
tracker: "DialogueStateTracker",
omit_unset_slots: bool = False,
ignore_rule_only_turns: bool = False,
rule_only_data: Optional[Dict[Text, Any]] = None,
) -> List[State]:
"""List of states for each state of the trackers history.
Args:
tracker: Dialogue state tracker containing the dialogue so far.
omit_unset_slots: If `True` do not include the initial values of slots.
ignore_rule_only_turns: If True ignore dialogue turns that are present
only in rules.
rule_only_data: Slots and loops,
which only occur in rules but not in stories.
Return:
A list of states.
"""
states = []
last_ml_action_sub_state = None
turn_was_hidden = False
for tr, hide_rule_turn in tracker.generate_all_prior_trackers():
if ignore_rule_only_turns:
# remember previous ml action based on the last non hidden turn
# we need this to override previous action in the ml state
if not turn_was_hidden:
last_ml_action_sub_state = self._get_prev_action_sub_state(tr)
# followup action or happy path loop prediction
# don't change the fact whether dialogue turn should be hidden
if (
not tr.followup_action
and not tr.latest_action_name == tr.active_loop_name
):
turn_was_hidden = hide_rule_turn
if turn_was_hidden:
continue
state = self.get_active_states(tr, omit_unset_slots=omit_unset_slots)
if ignore_rule_only_turns:
# clean state from only rule features
self._remove_rule_only_features(state, rule_only_data)
# make sure user input is the same as for previous state
# for non action_listen turns
if states:
self._substitute_rule_only_user_input(state, states[-1])
# substitute previous rule action with last_ml_action_sub_state
if last_ml_action_sub_state:
state[
rasa.shared.core.constants.PREVIOUS_ACTION
] = last_ml_action_sub_state
states.append(self._clean_state(state))
return states
def slots_for_entities(self, entities: List[Dict[Text, Any]]) -> List[SlotSet]:
"""Creates slot events for entities if auto-filling is enabled.
Args:
entities: The list of entities.
Returns:
A list of `SlotSet` events.
"""
if self.store_entities_as_slots:
slot_events = []
for s in self.slots:
if s.auto_fill:
matching_entities = [
e.get("value") for e in entities if e.get("entity") == s.name
]
if matching_entities:
if s.type_name == "list":
slot_events.append(SlotSet(s.name, matching_entities))
else:
slot_events.append(SlotSet(s.name, matching_entities[-1]))
return slot_events
else:
return []
def persist_specification(self, model_path: Text) -> None:
"""Persist the domain specification to storage."""
domain_spec_path = os.path.join(model_path, "domain.json")
rasa.shared.utils.io.create_directory_for_file(domain_spec_path)
metadata = {"states": self.input_states}
rasa.shared.utils.io.dump_obj_as_json_to_file(domain_spec_path, metadata)
@classmethod
def load_specification(cls, path: Text) -> Dict[Text, Any]:
"""Load a domains specification from a dumped model directory."""
metadata_path = os.path.join(path, "domain.json")
return json.loads(rasa.shared.utils.io.read_file(metadata_path))
def compare_with_specification(self, path: Text) -> bool:
"""Compare the domain spec of the current and the loaded domain.
Throws exception if the loaded domain specification is different
to the current domain are different.
"""
loaded_domain_spec = self.load_specification(path)
states = loaded_domain_spec["states"]
if set(states) != set(self.input_states):
missing = ",".join(set(states) - set(self.input_states))
additional = ",".join(set(self.input_states) - set(states))
raise InvalidDomain(
f"Domain specification has changed. "
f"You MUST retrain the policy. "
f"Detected mismatch in domain specification. "
f"The following states have been \n"
f"\t - removed: {missing} \n"
f"\t - added: {additional} "
)
else:
return True
def _slot_definitions(self) -> Dict[Any, Dict[str, Any]]:
# Only persist slots defined by the user. We add the default slots on the
# fly when loading the domain.
return {slot.name: slot.persistence_info() for slot in self._user_slots}
def as_dict(self) -> Dict[Text, Any]:
"""Return serialized `Domain`."""
return {
"config": {"store_entities_as_slots": self.store_entities_as_slots},
SESSION_CONFIG_KEY: {
SESSION_EXPIRATION_TIME_KEY: self.session_config.session_expiration_time,
CARRY_OVER_SLOTS_KEY: self.session_config.carry_over_slots,
},
KEY_INTENTS: self._transform_intents_for_file(),
KEY_ENTITIES: self._transform_entities_for_file(),
KEY_SLOTS: self._slot_definitions(),
KEY_RESPONSES: self.responses,
KEY_ACTIONS: self._custom_actions,
KEY_FORMS: self.forms,
KEY_E2E_ACTIONS: self.action_texts,
}
@staticmethod
def get_responses_with_multilines(
responses: Dict[Text, List[Dict[Text, Any]]]
) -> Dict[Text, List[Dict[Text, Any]]]:
"""Returns `responses` with preserved multilines in the `text` key.
Args:
responses: Original `responses`.
Returns:
`responses` with preserved multilines in the `text` key.
"""
from ruamel.yaml.scalarstring import LiteralScalarString
final_responses = responses.copy()
for utter_action, examples in final_responses.items():
for i, example in enumerate(examples):
response_text = example.get(KEY_RESPONSES_TEXT, "")
if not response_text or "\n" not in response_text:
continue
# Has new lines, use `LiteralScalarString`
final_responses[utter_action][i][
KEY_RESPONSES_TEXT
] = LiteralScalarString(response_text)
return final_responses
def _transform_intents_for_file(self) -> List[Union[Text, Dict[Text, Any]]]:
"""Transform intent properties for displaying or writing into a domain file.
Internally, there is a property `used_entities` that lists all entities to be
used. In domain files, `use_entities` or `ignore_entities` is used instead to
list individual entities to ex- or include, because this is easier to read.
Returns:
The intent properties as they are used in domain files.
"""
intent_properties = copy.deepcopy(self.intent_properties)
intents_for_file = []
for intent_name, intent_props in intent_properties.items():
if (
intent_name in rasa.shared.core.constants.DEFAULT_INTENTS
and intent_name not in self.overridden_default_intents
):
# Default intents should be not dumped with the domain
continue
# `use_entities` and `ignore_entities` in the domain file do not consider
# the role and group labels remove them from the list to make sure to not
# put them into the domain file
use_entities = set(
entity
for entity in intent_props[USED_ENTITIES_KEY]
if rasa.shared.core.constants.ENTITY_LABEL_SEPARATOR not in entity
)
ignore_entities = set(self.entities) - use_entities
if len(use_entities) == len(self.entities):
intent_props[USE_ENTITIES_KEY] = True
elif len(use_entities) <= len(self.entities) / 2:
intent_props[USE_ENTITIES_KEY] = list(use_entities)
else:
intent_props[IGNORE_ENTITIES_KEY] = list(ignore_entities)
intent_props.pop(USED_ENTITIES_KEY)
intents_for_file.append({intent_name: intent_props})
return intents_for_file
def _transform_entities_for_file(self) -> List[Union[Text, Dict[Text, Any]]]:
"""Transform entity properties for displaying or writing to a domain file.
Returns:
The entity properties as they are used in domain files.
"""
entities_for_file = []
for entity in self.entities:
if entity in self.roles and entity in self.groups:
entities_for_file.append(
{
entity: {
ENTITY_GROUPS_KEY: self.groups[entity],
ENTITY_ROLES_KEY: self.roles[entity],
}
}
)
elif entity in self.roles:
entities_for_file.append(
{entity: {ENTITY_ROLES_KEY: self.roles[entity]}}
)
elif entity in self.groups:
entities_for_file.append(
{entity: {ENTITY_GROUPS_KEY: self.groups[entity]}}
)
else:
entities_for_file.append(entity)
return entities_for_file
def cleaned_domain(self) -> Dict[Text, Any]:
"""Fetch cleaned domain to display or write into a file.
The internal `used_entities` property is replaced by `use_entities` or
`ignore_entities` and redundant keys are replaced with default values
to make the domain easier readable.
Returns:
A cleaned dictionary version of the domain.
"""
domain_data = self.as_dict()
# remove e2e actions from domain before we display it
domain_data.pop(KEY_E2E_ACTIONS, None)
for idx, intent_info in enumerate(domain_data[KEY_INTENTS]):
for name, intent in intent_info.items():
if intent.get(USE_ENTITIES_KEY) is True:
del intent[USE_ENTITIES_KEY]
if not intent.get(IGNORE_ENTITIES_KEY):
intent.pop(IGNORE_ENTITIES_KEY, None)
if len(intent) == 0:
domain_data[KEY_INTENTS][idx] = name
for slot in domain_data[KEY_SLOTS].values():
if slot["initial_value"] is None:
del slot["initial_value"]
if slot["auto_fill"]:
del slot["auto_fill"]
if slot["type"].startswith("rasa.shared.core.slots"):
slot["type"] = Slot.resolve_by_type(slot["type"]).type_name
if domain_data["config"]["store_entities_as_slots"]:
del domain_data["config"]["store_entities_as_slots"]
# clean empty keys
return {
k: v
for k, v in domain_data.items()
if v != {} and v != [] and v is not None
}
def persist(self, filename: Union[Text, Path]) -> None:
"""Write domain to a file."""
as_yaml = self.as_yaml(clean_before_dump=False)
rasa.shared.utils.io.write_text_file(as_yaml, filename)
def persist_clean(self, filename: Union[Text, Path]) -> None:
"""Write cleaned domain to a file."""
as_yaml = self.as_yaml(clean_before_dump=True)
rasa.shared.utils.io.write_text_file(as_yaml, filename)
def as_yaml(self, clean_before_dump: bool = False) -> Text:
"""Dump the `Domain` object as a YAML string.
This function preserves the orders of the keys in the domain.
Args:
clean_before_dump: When set to `True`, this method returns
a version of the domain without internal
information. Defaults to `False`.
Returns:
A string in YAML format representing the domain.
"""
# setting the `version` key first so that it appears at the top of YAML files
# thanks to the `should_preserve_key_order` argument
# of `dump_obj_as_yaml_to_string`
domain_data: Dict[Text, Any] = {
KEY_TRAINING_DATA_FORMAT_VERSION: LATEST_TRAINING_DATA_FORMAT_VERSION
}
if clean_before_dump:
domain_data.update(self.cleaned_domain())
else:
domain_data.update(self.as_dict())
if domain_data.get(KEY_RESPONSES, {}):
domain_data[KEY_RESPONSES] = self.get_responses_with_multilines(
domain_data[KEY_RESPONSES]
)
return rasa.shared.utils.io.dump_obj_as_yaml_to_string(
domain_data, should_preserve_key_order=True
)
def intent_config(self, intent_name: Text) -> Dict[Text, Any]:
"""Return the configuration for an intent."""
return self.intent_properties.get(intent_name, {})
@rasa.shared.utils.common.lazy_property
def intents(self) -> List[Text]:
"""Returns sorted list of intents."""
return sorted(self.intent_properties.keys())
@property
def _slots_for_domain_warnings(self) -> List[Text]:
"""Fetch names of slots that are used in domain warnings.
Excludes slots which aren't featurized.
"""
return [s.name for s in self._user_slots if s.influence_conversation]
@property
def _actions_for_domain_warnings(self) -> List[Text]:
"""Fetch names of actions that are used in domain warnings.
Includes user and form actions, but excludes those that are default actions.
"""
return [
a
for a in self.user_actions_and_forms
if a not in rasa.shared.core.constants.DEFAULT_ACTION_NAMES
]
@staticmethod
def _get_symmetric_difference(
domain_elements: Union[List[Text], Set[Text]],
training_data_elements: Optional[Union[List[Text], Set[Text]]],
) -> Dict[Text, Set[Text]]:
"""Get symmetric difference between a set of domain elements and a set of
training data elements.
Returns a dictionary containing a list of items found in the `domain_elements`
but not in `training_data_elements` at key `in_domain`, and a list of items
found in `training_data_elements` but not in `domain_elements` at key
`in_training_data_set`.
"""
if training_data_elements is None:
training_data_elements = set()
in_domain_diff = set(domain_elements) - set(training_data_elements)
in_training_data_diff = set(training_data_elements) - set(domain_elements)
return {"in_domain": in_domain_diff, "in_training_data": in_training_data_diff}
@staticmethod
def _combine_with_responses(
actions: List[Text], responses: Dict[Text, Any]
) -> List[Text]:
"""Combines actions with utter actions listed in responses section."""
unique_utter_actions = [
a for a in sorted(list(responses.keys())) if a not in actions
]
return actions + unique_utter_actions
@staticmethod
def _combine_user_with_default_actions(user_actions: List[Text]) -> List[Text]:
# remove all user actions that overwrite default actions
# this logic is a bit reversed, you'd think that we should remove
# the action name from the default action names if the user overwrites
# the action, but there are some locations in the code where we
# implicitly assume that e.g. "action_listen" is always at location
# 0 in this array. to keep it that way, we remove the duplicate
# action names from the users list instead of the defaults
unique_user_actions = [
a
for a in user_actions
if a not in rasa.shared.core.constants.DEFAULT_ACTION_NAMES
]
return rasa.shared.core.constants.DEFAULT_ACTION_NAMES + unique_user_actions
def domain_warnings(
self,
intents: Optional[Union[List[Text], Set[Text]]] = None,
entities: Optional[Union[List[Text], Set[Text]]] = None,
actions: Optional[Union[List[Text], Set[Text]]] = None,
slots: Optional[Union[List[Text], Set[Text]]] = None,
) -> Dict[Text, Dict[Text, Set[Text]]]:
"""Generate domain warnings from intents, entities, actions and slots.
Returns a dictionary with entries for `intent_warnings`,
`entity_warnings`, `action_warnings` and `slot_warnings`. Excludes domain slots
from domain warnings in case they are not featurized.
"""
intent_warnings = self._get_symmetric_difference(self.intents, intents)
entity_warnings = self._get_symmetric_difference(self.entities, entities)
action_warnings = self._get_symmetric_difference(
self._actions_for_domain_warnings, actions
)
slot_warnings = self._get_symmetric_difference(
self._slots_for_domain_warnings, slots
)
return {
"intent_warnings": intent_warnings,
"entity_warnings": entity_warnings,
"action_warnings": action_warnings,
"slot_warnings": slot_warnings,
}
def _check_domain_sanity(self) -> None:
"""Make sure the domain is properly configured.
If the domain contains any duplicate slots, intents, actions
or entities, an InvalidDomain error is raised. This error
is also raised when intent-action mappings are incorrectly
named or a response is missing.
"""
def get_duplicates(my_items: Iterable[Any]) -> List[Any]:
"""Returns a list of duplicate items in my_items."""
return [
item
for item, count in collections.Counter(my_items).items()
if count > 1
]
def check_mappings(
intent_properties: Dict[Text, Dict[Text, Union[bool, List]]]
) -> List[Tuple[Text, Text]]:
"""Checks whether intent-action mappings use valid action names or texts."""
incorrect = []
for intent, properties in intent_properties.items():
if "triggers" in properties:
triggered_action = properties.get("triggers")
if triggered_action not in self.action_names_or_texts:
incorrect.append((intent, str(triggered_action)))
return incorrect
def get_exception_message(
duplicates: Optional[List[Tuple[List[Text], Text]]] = None,
mappings: List[Tuple[Text, Text]] = None,
) -> Text:
"""Return a message given a list of error locations."""
message = ""
if duplicates:
message += get_duplicate_exception_message(duplicates)
if mappings:
if message:
message += "\n"
message += get_mapping_exception_message(mappings)
return message
def get_mapping_exception_message(mappings: List[Tuple[Text, Text]]) -> Text:
"""Return a message given a list of duplicates."""
message = ""
for name, action_name in mappings:
if message:
message += "\n"
message += (
"Intent '{}' is set to trigger action '{}', which is "
"not defined in the domain.".format(name, action_name)
)
return message
def get_duplicate_exception_message(
duplicates: List[Tuple[List[Text], Text]]
) -> Text:
"""Return a message given a list of duplicates."""
message = ""
for d, name in duplicates:
if d:
if message:
message += "\n"
message += (
f"Duplicate {name} in domain. "
f"These {name} occur more than once in "
f"the domain: '{', '.join(d)}'."
)
return message
duplicate_actions = get_duplicates(self.action_names_or_texts)
duplicate_slots = get_duplicates([s.name for s in self.slots])
duplicate_entities = get_duplicates(self.entities)
incorrect_mappings = check_mappings(self.intent_properties)
if (
duplicate_actions
or duplicate_slots
or duplicate_entities
or incorrect_mappings
):
raise InvalidDomain(
get_exception_message(
[
(duplicate_actions, KEY_ACTIONS),
(duplicate_slots, KEY_SLOTS),
(duplicate_entities, KEY_ENTITIES),
],
incorrect_mappings,
)
)
def check_missing_templates(self) -> None:
"""Warn user of utterance names which have no specified response."""
rasa.shared.utils.io.raise_deprecation_warning(
"The terminology 'template' is deprecated and replaced by 'response'. Please use `check_missing_responses` instead of `check_missing_templates`.",
docs=f"{DOCS_URL_MIGRATION_GUIDE}#rasa-23-to-rasa-24",
)
self.check_missing_responses()
def check_missing_responses(self) -> None:
"""Warn user of utterance names which have no specified response."""
utterances = [
a for a in self.action_names_or_texts if a.startswith(UTTER_PREFIX)
]
missing_responses = [t for t in utterances if t not in self.responses.keys()]
if missing_responses:
for response in missing_responses:
rasa.shared.utils.io.raise_warning(
f"Action '{response}' is listed as a "
f"response action in the domain file, but there is "
f"no matching response defined. Please check your domain.",
docs=DOCS_URL_RESPONSES,
)
def is_empty(self) -> bool:
"""Check whether the domain is empty."""
return self.as_dict() == Domain.empty().as_dict()
@staticmethod
def is_domain_file(filename: Text) -> bool:
"""Checks whether the given file path is a Rasa domain file.
Args:
filename: Path of the file which should be checked.
Returns:
`True` if it's a domain file, otherwise `False`.
Raises:
YamlException: if the file seems to be a YAML file (extension) but
can not be read / parsed.
"""
from rasa.shared.data import is_likely_yaml_file
if not is_likely_yaml_file(filename):
return False
try:
content = rasa.shared.utils.io.read_yaml_file(filename)
except (RasaException, YamlSyntaxException):
return False
return any(key in content for key in ALL_DOMAIN_KEYS)
def slot_mapping_for_form(self, form_name: Text) -> Dict[Text, Any]:
"""Retrieve the slot mappings for a form which are defined in the domain.
Options:
- an extracted entity
- intent: value pairs
- trigger_intent: value pairs
- a whole message
or a list of them, where the first match will be picked
Args:
form_name: The name of the form.
Returns:
The slot mapping or an empty dictionary in case no mapping was found.
"""
return self.forms.get(form_name, {})[REQUIRED_SLOTS_KEY]
class SlotMapping(Enum):
"""Defines the available slot mappings."""
FROM_ENTITY = 0
FROM_INTENT = 1
FROM_TRIGGER_INTENT = 2
FROM_TEXT = 3
def __str__(self) -> Text:
"""Returns a string representation of the object."""
return self.name.lower()
@staticmethod
def validate(mapping: Dict[Text, Any], form_name: Text, slot_name: Text) -> None:
"""Validates a slot mapping.
Args:
mapping: The mapping which is validated.
form_name: The name of the form which uses this slot mapping.
slot_name: The name of the slot which is mapped by this mapping.
Raises:
InvalidDomain: In case the slot mapping is not valid.
"""
if not isinstance(mapping, dict):
raise InvalidDomain(
f"Please make sure that the slot mappings for slot '{slot_name}' in "
f"your form '{form_name}' are valid dictionaries. Please see "
f"{DOCS_URL_FORMS} for more information."
)
validations = {
str(SlotMapping.FROM_ENTITY): ["entity"],
str(SlotMapping.FROM_INTENT): ["value"],
str(SlotMapping.FROM_TRIGGER_INTENT): ["value"],
str(SlotMapping.FROM_TEXT): [],
}
mapping_type = mapping.get("type")
required_keys = validations.get(mapping_type)
if required_keys is None:
raise InvalidDomain(
f"Your form '{form_name}' uses an invalid slot mapping of type "
f"'{mapping_type}' for slot '{slot_name}'. Please see "
f"{DOCS_URL_FORMS} for more information."
)
for required_key in required_keys:
if mapping.get(required_key) is None:
raise InvalidDomain(
f"You need to specify a value for the key "
f"'{required_key}' in the slot mapping of type '{mapping_type}' "
f"for slot '{slot_name}' in the form '{form_name}'. Please see "
f"{DOCS_URL_FORMS} for more information."
)
def _validate_slot_mappings(forms: Union[Dict, List]) -> None:
if isinstance(forms, list):
if not all(isinstance(form_name, str) for form_name in forms):
raise InvalidDomain(
f"If you use the deprecated list syntax for forms, "
f"all form names have to be strings. Please see "
f"{DOCS_URL_FORMS} for more information."
)
return
if not isinstance(forms, dict):
raise InvalidDomain("Forms have to be specified as dictionary.")
for form_name, form_data in forms.items():
if form_data is None:
continue
if not isinstance(form_data, Dict):
raise InvalidDomain(
f"The contents of form '{form_name}' were specified "
f"as '{type(form_data)}'. They need to be specified "
f"as dictionary. Please see {DOCS_URL_FORMS} "
f"for more information."
)
if IGNORED_INTENTS in form_data and REQUIRED_SLOTS_KEY not in form_data:
raise InvalidDomain(
f"If you use the `{IGNORED_INTENTS}` parameter in your form, then "
f"the keyword `{REQUIRED_SLOTS_KEY}` should precede the definition "
f"of your slot mappings. Please see {DOCS_URL_FORMS} "
f"for more information."
)
if REQUIRED_SLOTS_KEY in form_data:
slots = forms[form_name].get(REQUIRED_SLOTS_KEY)
else:
rasa.shared.utils.io.raise_deprecation_warning(
f"The definition of slot mappings in your form "
f"should always be preceded by the keyword `{REQUIRED_SLOTS_KEY}`. "
f"The lack of this keyword will be deprecated in "
f"Rasa Open Source 3.0.0. Please see {DOCS_URL_FORMS} "
f"for more information.",
)
slots = form_data
if not isinstance(slots, Dict):
raise InvalidDomain(
f"The slots for form '{form_name}' were specified "
f"as '{type(slots)}'. They need to be specified "
f"as dictionary. Please see {DOCS_URL_FORMS} "
f"for more information."
)
for slot_name, slot_mappings in slots.items():
if not isinstance(slot_mappings, list):
raise InvalidDomain(
f"The slot mappings for slot '{slot_name}' in "
f"form '{form_name}' have type '{type(slot_mappings)}'. "
f"It is required to provide a list of slot "
f"mappings. Please see {DOCS_URL_FORMS} "
f"for more information."
)
for slot_mapping in slot_mappings:
SlotMapping.validate(slot_mapping, form_name, slot_name)
| 39.844969 | 171 | 0.613131 |
794024b3a013e6bf20a0116927ca83547f33eb65 | 137 | py | Python | examples/debugging/error.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | 694 | 2018-11-30T01:06:30.000Z | 2022-03-31T14:46:26.000Z | examples/debugging/error.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | 323 | 2018-11-05T17:44:34.000Z | 2022-03-31T16:56:41.000Z | examples/debugging/error.py | msarahan/guildai | 99bdd09683291dbc206b6dde1b327d47401d29eb | [
"Apache-2.0"
] | 68 | 2019-04-01T04:24:47.000Z | 2022-02-24T17:22:04.000Z | """Illustrates an error that can be debugged using post mortem debugging."""
print("hello")
foo = 123
zero = 0
foo / zero
print("done")
| 17.125 | 76 | 0.70073 |
794024c4af2ec20e845f0d9d227bc2cd8c798cda | 3,272 | py | Python | tools/mkatlas.py | ChenThread/fromage | 0feec3fd17a67da4de9fbcb34c77da4ef6d24211 | [
"Zlib"
] | 54 | 2017-11-19T00:42:58.000Z | 2022-03-07T21:45:04.000Z | tools/mkatlas.py | ChenThread/fromage | 0feec3fd17a67da4de9fbcb34c77da4ef6d24211 | [
"Zlib"
] | 2 | 2020-03-25T02:32:49.000Z | 2021-12-05T19:35:26.000Z | tools/mkatlas.py | ChenThread/fromage | 0feec3fd17a67da4de9fbcb34c77da4ef6d24211 | [
"Zlib"
] | 9 | 2017-11-19T14:40:57.000Z | 2022-01-10T05:04:12.000Z | import sys, struct
import numpy as np
import scipy.cluster.vq as vq
from PIL import Image, ImageOps, ImageColor
im = Image.open(sys.argv[1])
fp = open(sys.argv[2], "wb")
clut = [None] * 256
single_pixel_colors = [None] * 256
imgdata = [None] * (256*256)
imgwidth = 256
mipmap_levels = 4
def draw_4bit(im, ix, iy, iw, ih, tx, ty):
# generate palette
img = [None] * (mipmap_levels+1)
img_data = [None] * (mipmap_levels+1)
img_translucent = [None] * (mipmap_levels+1)
img[0] = im.crop((ix, iy, ix+iw, iy+ih)).convert("RGBA")
img_data[0] = np.zeros((iw * ih, 3))
img_translucent[0] = np.zeros((iw * ih))
for irm in range(1,mipmap_levels+1):
img[irm] = img[0].resize((iw>>irm, ih>>irm), Image.ANTIALIAS)
img_data[irm] = np.zeros(((iw>>irm) * (ih>>irm), 3))
img_translucent[irm] = np.zeros(((iw>>irm) * (ih>>irm)))
has_translucent = False
for irm in range(0,mipmap_levels+1):
for iry in range(ih>>irm):
for irx in range(iw>>irm):
img_pixel = img[irm].getpixel((irx, iry))
img_data[irm][iry * (iw>>irm) + irx] = [
int(img_pixel[0] * 31 / 255.0),
int(img_pixel[1] * 31 / 255.0),
int(img_pixel[2] * 31 / 255.0)
]
if img_pixel[3] <= 1:
img_translucent[irm][iry * (iw>>irm) + irx] = 1
has_translucent = True
centroids,_ = vq.kmeans(img_data[0], 15 if has_translucent else 16)
palette = [0x0000] * 16
for pl in range(len(centroids)):
r = max(0, min(31, int(centroids[pl][0] + 0.5)))
g = max(0, min(31, int(centroids[pl][1] + 0.5)))
b = max(0, min(31, int(centroids[pl][2] + 0.5)))
palette[pl] = 0x8000|(r<<0)|(g<<5)|(b<<10)
# TODO: floyd-steinberg
for irm in range(0,mipmap_levels+1):
indexes,_ = vq.vq(img_data[irm],centroids)
for iry in range(ih>>irm):
for irx in range(iw>>irm):
iridx = iry * (iw>>irm) + irx
impc = 15
if img_translucent[irm][iridx] == 0:
impc = indexes[iridx]
imgdata[((ty>>irm)+iry)*imgwidth + ((tx>>irm)+irx)] = impc
single_pixel = img[mipmap_levels].getpixel((0, 0))
single_pixel_color = 0
single_pixel_color |= int(single_pixel[2]) << 16
single_pixel_color |= int(single_pixel[1]) << 8
single_pixel_color |= int(single_pixel[0])
return palette, single_pixel_color
# imgp = img.convert(mode='P', palette=Image.ADAPTIVE, colors=16)
# imgpalr = imgp.getpalette()
# palette = [None] * 16
# for col in range(16):
# palette[col] = [imgpalr[col * 3], imgpalr[col * 3 + 1], imgpalr[col * 3 + 2], 255]
# for iry in range(ih):
# for irx in range(iw):
# impc = imgp.getpixel((irx, iry))
# impp = img.getpixel((irx, iry))
# if len(impp) > 3:
# palette[impc][3] = impp[3]
# imgdata[(ty+iry)*imgwidth + tx+irx] = impc
# return palette
def write_palette(fp, palette):
for ip in range(16):
fp.write(struct.pack("<H", palette[ip]))
def add_texture(im, ix, iy, i):
tx = (i & 15) << 4
ty = (i & 240)
clut[i], single_pixel_colors[i] = draw_4bit(im, ix, iy, 16, 16, tx, ty)
for i in range(256):
tx = (i & 15) << 4
ty = (i & 240)
add_texture(im, tx, ty, i)
for iy in range(128):
fp.write(struct.pack("<I", single_pixel_colors[iy + 128]))
for iy in range(256):
for ix in range(0, imgwidth, 2):
v = (imgdata[iy*imgwidth+ix+1] << 4) | (imgdata[iy*imgwidth+ix] << 0)
fp.write(struct.pack("<B", v))
write_palette(fp, clut[iy])
fp.close()
| 32.078431 | 85 | 0.627139 |
794025019423ba8528ac7801e16f4532f255a79d | 2,686 | py | Python | bionlp/evaluate/postprocess.py | abhyudaynj/LSTM-CRF-models | a29780c24f19512177bcec837bc06ea839e8108c | [
"MIT"
] | 191 | 2016-08-19T08:20:59.000Z | 2021-09-07T09:42:00.000Z | bionlp/evaluate/postprocess.py | gabrielStanovsky/LSTM-CRF-models | a29780c24f19512177bcec837bc06ea839e8108c | [
"MIT"
] | 3 | 2017-02-17T06:01:19.000Z | 2020-05-05T13:33:02.000Z | bionlp/evaluate/postprocess.py | gabrielStanovsky/LSTM-CRF-models | a29780c24f19512177bcec837bc06ea839e8108c | [
"MIT"
] | 69 | 2016-09-28T10:10:22.000Z | 2021-05-18T09:37:51.000Z | import pickle, sys, random,logging,os,json
from operator import itemgetter
logger=logging.getLogger(__name__)
IGNORE_TAG='None'
def prepare_document_report(o,l,p,encoded_documents,output_dir):
logger.info('Preparing the documents reports with the IGNORE_TAG = \'{0}\' ( This is case sensitive)'.format(IGNORE_TAG))
untouched_tokens=sum([len(sent_token) for sent_token in l])
logger.info('Total Tokens {0}'.format(untouched_tokens))
doc_list={}
for s_id,(sentence,sent_token) in enumerate(o):
if len(p[s_id]) ==0:
continue
for t_id,token in enumerate(sent_token):
tem=p[s_id]
if t_id >= len(p[s_id]):
print t_id, len(p[s_id])
token.attr['predicted']=tem[t_id]
produced_sentence=[(tk.value,(tk.attr['offset'],tk.attr['offset']+tk.attr['length']),tk.attr['predicted']) for tk in sent_token]
if sent_token[0].attr['document'] in doc_list:
doc_list[sent_token[0].attr['document']].append(label_aggregator(produced_sentence))
else:
doc_list[sent_token[0].attr['document']]=[label_aggregator(produced_sentence)]
logger.info('Writing the predicted annotations to files in {0}'.format(output_dir))
for idxs,document in enumerate(encoded_documents.value):
doc_id=document.id
if doc_id not in doc_list:
logger.warning('Could not find the Document {0} in the processed output. Please verify'.format(doc_id))
continue
doc_text=document.attr['raw_text']
filename='-'.join(subname for subname in str(doc_id).split('/') if subname.strip()!='')
with open(os.path.join(output_dir,'{0}.json'.format(filename)),'w') as fout:
ann_id=0
json_list=[]
for res in doc_list[doc_id]:
for tok in res:
if tok[2]!=IGNORE_TAG:
json_list.append({'id':ann_id,'type':tok[2],'begin':tok[1][0],'end':tok[1][1],'tokenized_text':tok[0],'text':doc_text[tok[1][0]:tok[1][1]]})
ann_id+=1
json_obj={}
json_obj['file_id']=doc_id
json_obj['predictions']=json_list
json.dump(json_list,fout)
with open(os.path.join(output_dir,'{0}.txt'.format(filename)),'w') as fout:
fout.write(doc_text)
fout.write('\n')
def label_aggregator(sent):
idx =0
while idx < len(sent)-1:
if sent[idx][2] == sent[idx+1][2] and sent[idx][2]!=IGNORE_TAG :
sent[idx:idx+2]=[(sent[idx][0]+' '+sent[idx+1][0],(sent[idx][1][0],sent[idx+1][1][1]),sent[idx][2])]
else:
idx+=1
return sent
| 47.122807 | 164 | 0.605733 |
794025492dc046c980c7d6a1696cf7c753b657d0 | 7,486 | py | Python | Sketches/MH/MobileReframe/Chassis.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 12 | 2015-10-20T10:22:01.000Z | 2021-07-19T10:09:44.000Z | Sketches/MH/MobileReframe/Chassis.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 2 | 2015-10-20T10:22:55.000Z | 2017-02-13T11:05:25.000Z | Sketches/MH/MobileReframe/Chassis.py | sparkslabs/kamaelia_orig | 24b5f855a63421a1f7c6c7a35a7f4629ed955316 | [
"Apache-2.0"
] | 6 | 2015-03-09T12:51:59.000Z | 2020-03-01T13:06:21.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=======================================================
Inbox size limiting Pipelines, Graphlines and Carousels
=======================================================
Extended versions of Kamaelia.Chassis.Pipeline, Kamaelia.Chassis.Graphline and
Kamaelia.Chassis.Carousel that add the ability to specify size limits for
inboxes of components.
Example Usages
--------------
A pipeline with inbox size limits on 3 of the components' "inbox" inboxes::
Pipeline( 5, MyComponent(), # 'inbox' inbox limited to 5 items
2, MyComponent(), # 'inbox' inbox limited to 2 items
MyComponent(), # 'inbox' inbox unlimited
28, MyComponent() # 'inbox' inbox limited to 28 items
)
A graphline where component 'A' has a size limit of 5 on its "inbox" inbox; and
component 'C' has a size limit of 17 on its "control" inbox::
Graphline( A = MyComponent(),
B = MyComponent(),
C = MyComponent(),
linkages = { ... },
boxsizes = {
("A","inbox") : 5,
("C","control") : 17
}
)
A Carousel, where the child component will have a size limit of 5 on its "inbox"
inbox::
Carousel( MyComponent(), boxsize=5 )
Decoding a Dirac video file and saving each frame in a separate file::
Pipeline(
RateControlledFileReader("video.dirac", ... ),
DiracDecoder(),
TagWithSequenceNumber(),
InboxControlledCarousel(
lambda (seqnum, frame) :
Pipeline( OneShot(frame),
FrameToYUV4MPEG(),
SimpleFileWriter("%08d.yuv4mpeg" % seqnum),
)
),
)
More details
------------
The behaviour of these three components/prefabs is identical to their original
counterparts (Kamaelia.Chassis.Pipeline, Kamaelia.Chassis.Graphline and
Kamaelia.Chassis.Carousel).
*For Pipelines*, if you want to size limit the "inbox" inbox of a particular
component in the pipeline, then put the size limit as an integer before it.
Any component without an integer before it is left with the default of an
unlimited "inbox" inbox.
The behaviour therefore reduces back to be identical to that of the normal
Pipeline component.
*For Graphlines*, if you want to size limit particular inboxes, supply the
"boxsizes" argument with a dictionary that maps (componentName, boxName) keys
to the size limit for that box.
Again, if you don't specify a "boxsizes" argument, then behaviour is identical
to that of the normal Graphline component.
*For Carousels*, if you want a size limit on the "inbox" inbox of the child
component (created by the factory function), then specify it using the
"boxsizes" argument.
Again, if you don't specify a "boxsizes" argument, then behaviour is identical
to that of the normal Carousel component.
*InboxControlledCarousel* behaves identically to Carousel.
The "inbox" inbox is equivalent to the "next" inbox of Carousel.
The "data_inbox" inbox is equivalent to the "inbox" inbox of Carousel.
"""
from Kamaelia.Chassis.Pipeline import Pipeline as _Pipeline
def Pipeline(*components):
"""\
Pipeline(\*components) -> new Pipeline component.
Encapsulates the specified set of components and wires them up in a chain
(a Pipeline) in the order you provided them.
Keyword arguments:
- components -- the components you want, in the order you want them wired up.
Any Integers set the "inbox" inbox size limit for the component that follows them.
"""
truecomponents = []
boxsize=False
for item in components:
if isinstance(item,int):
boxsize=item
elif item is None:
boxsize=item
else:
component=item
if boxsize != False:
component.inboxes['inbox'].setSize(boxsize)
boxsize=False
truecomponents.append(component)
return _Pipeline(*truecomponents)
from Kamaelia.Chassis.Graphline import Graphline as _Graphline
def Graphline(linkages = None, boxsizes = None,**components):
"""\
Graphline([linkages][,boxsizes],\*\*components) -> new Graphline component
Encapsulates the specified set of components and wires them up with the
specified linkages.
Keyword arguments:
- linkages -- dictionary mapping ("componentname","boxname") to ("componentname","boxname")
- boxsizes -- dictionary mapping ("componentname","boxname") to size limit for inbox
- components -- dictionary mapping names to component instances (default is nothing)
"""
g = _Graphline(linkages,**components)
if boxsizes is not None:
for ((componentname,boxname),size) in boxsizes.items():
components[componentname].inboxes[boxname].setSize(size)
return g
#from Kamaelia.Chassis.Carousel import Carousel as _Carousel
from CarouselRewrite import Carousel as _Carousel
def Carousel(componentFactory, make1stRequest=False, boxsize=None):
"""\
Carousel(componentFactory,[make1stRequest]) -> new Carousel component
Create a Carousel component that makes child components one at a time (in
carousel fashion) using the supplied factory function.
Keyword arguments:
- componentFactory -- function that takes a single argument and returns a component
- make1stRequest -- if True, Carousel will send an initial "NEXT" request. (default=False)
- boxsize -- size limit for "inbox" inbox of the created child component
"""
if boxsize is not None:
def setBoxSize(component):
component.inboxes['inbox'].setSize(boxsize)
return component
newComponentFactory = lambda meta : setBoxSize(componentFactory(meta))
else:
newComponentFactory = componentFactory
return _Carousel(newComponentFactory, make1stRequest)
def InboxControlledCarousel(*argv, **argd):
return Graphline( CAROUSEL = Carousel( *argv, **argd ),
linkages = {
("", "inbox") : ("CAROUSEL", "next"),
("", "data_inbox") : ("CAROUSEL", "inbox"),
("", "control") : ("CAROUSEL", "control"),
("CAROUSEL", "outbox") : ("", "outbox"),
("CAROUSEL", "signal") : ("", "signal"),
}
)
__kamaelia_prefabs__ = ( Pipeline, Graphline, Carousel, )
| 34.497696 | 97 | 0.637991 |
7940255f38d8b4d03577f0a5891a7e03e07f7bf2 | 5,007 | py | Python | molecule/scenario.py | RuriRyan/molecule | 952552a807e5c3873e39d181cb12565a1db4008d | [
"MIT"
] | 1 | 2020-11-05T18:12:36.000Z | 2020-11-05T18:12:36.000Z | molecule/scenario.py | RuriRyan/molecule | 952552a807e5c3873e39d181cb12565a1db4008d | [
"MIT"
] | 8 | 2020-09-16T07:32:49.000Z | 2020-09-20T15:17:32.000Z | molecule/scenario.py | RuriRyan/molecule | 952552a807e5c3873e39d181cb12565a1db4008d | [
"MIT"
] | null | null | null | # Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from molecule import logger
from molecule import scenarios
LOG = logger.get_logger(__name__)
class Scenario(object):
"""
A scenario allows Molecule test a role in a particular way, this is a
fundamental change from Molecule v1.
A scenario is a self-contained directory containing everything necessary
for testing the role in a particular way. The default scenario is named
`default`, and every role should contain a default scenario.
Any option set in this section will override the defaults.
.. code-block:: yaml
scenario:
name: default
create_sequence:
- create
- prepare
check_sequence:
- destroy
- create
- prepare
- converge
- check
- destroy
converge_sequence:
- create
- prepare
- converge
destroy_sequence:
- destroy
test_sequence:
- lint
- destroy
- dependency
- syntax
- create
- prepare
- converge
- idempotence
- side_effect
- verify
- destroy
""" # noqa
def __init__(self, config):
"""
Initialize a new scenario class and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
self.config = config
self._setup()
@property
def name(self):
return self.config.config['scenario']['name']
@property
def directory(self):
return os.path.dirname(self.config.molecule_file)
@property
def ephemeral_directory(self):
return ephemeral_directory(self.directory)
@property
def check_sequence(self):
return self.config.config['scenario']['check_sequence']
@property
def converge_sequence(self):
return self.config.config['scenario']['converge_sequence']
@property
def create_sequence(self):
return self.config.config['scenario']['create_sequence']
@property
def dependency_sequence(self):
return ['dependency']
@property
def destroy_sequence(self):
return self.config.config['scenario']['destroy_sequence']
@property
def idempotence_sequence(self):
return ['idempotence']
@property
def lint_sequence(self):
return ['lint']
@property
def prepare_sequence(self):
return ['prepare']
@property
def side_effect_sequence(self):
return ['side_effect']
@property
def syntax_sequence(self):
return ['syntax']
@property
def test_sequence(self):
return self.config.config['scenario']['test_sequence']
@property
def verify_sequence(self):
return ['verify']
@property
def sequence(self):
"""
Select the sequence based on scenario and subcommand of the provided
scenario object and returns a list.
:param scenario: A scenario object.
:param skipped: An optional bool to include skipped scenarios.
:return: list
"""
s = scenarios.Scenarios([self.config])
matrix = s._get_matrix()
try:
return matrix[self.name][self.config.subcommand]
except KeyError:
# TODO(retr0h): May change this handling in the future.
return []
def _setup(self):
"""
Prepare the scenario for Molecule and returns None.
:return: None
"""
if not os.path.isdir(self.ephemeral_directory):
os.mkdir(self.ephemeral_directory)
def ephemeral_directory(path):
d = os.getenv('MOLECULE_EPHEMERAL_DIRECTORY')
if d:
return os.path.join(path, d)
return os.path.join(path, '.molecule')
| 28.129213 | 79 | 0.632115 |
79402561fde5e6e7c76111aeffaa5c2a9f1f4c37 | 68,497 | py | Python | pysc2/lib/features.py | AdamSchunk/pysc2 | c793aeb3a9bf70ffa9880205018a21d5c29b9582 | [
"Apache-2.0"
] | 1 | 2019-09-20T05:22:56.000Z | 2019-09-20T05:22:56.000Z | pysc2/lib/features.py | AdamSchunk/pysc2 | c793aeb3a9bf70ffa9880205018a21d5c29b9582 | [
"Apache-2.0"
] | null | null | null | pysc2/lib/features.py | AdamSchunk/pysc2 | c793aeb3a9bf70ffa9880205018a21d5c29b9582 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Render feature layers from SC2 Observation protos into numpy arrays."""
# pylint: disable=g-complex-comprehension
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl import logging
import random
import enum
import numpy as np
import six
from pysc2.lib import actions
from pysc2.lib import colors
from pysc2.lib import named_array
from pysc2.lib import point
from pysc2.lib import static_data
from pysc2.lib import stopwatch
from pysc2.lib import transform
from s2clientprotocol import raw_pb2 as sc_raw
from s2clientprotocol import sc2api_pb2 as sc_pb
sw = stopwatch.sw
EPSILON = 1e-5
class FeatureType(enum.Enum):
SCALAR = 1
CATEGORICAL = 2
class PlayerRelative(enum.IntEnum):
"""The values for the `player_relative` feature layers."""
NONE = 0
SELF = 1
ALLY = 2
NEUTRAL = 3
ENEMY = 4
class Visibility(enum.IntEnum):
"""Values for the `visibility` feature layers."""
HIDDEN = 0
SEEN = 1
VISIBLE = 2
class Effects(enum.IntEnum):
"""Values for the `effects` feature layer."""
# pylint: disable=invalid-name
none = 0
PsiStorm = 1
GuardianShield = 2
TemporalFieldGrowing = 3
TemporalField = 4
ThermalLance = 5
ScannerSweep = 6
NukeDot = 7
LiberatorDefenderZoneSetup = 8
LiberatorDefenderZone = 9
BlindingCloud = 10
CorrosiveBile = 11
LurkerSpines = 12
# pylint: enable=invalid-name
class ScoreCumulative(enum.IntEnum):
"""Indices into the `score_cumulative` observation."""
score = 0
idle_production_time = 1
idle_worker_time = 2
total_value_units = 3
total_value_structures = 4
killed_value_units = 5
killed_value_structures = 6
collected_minerals = 7
collected_vespene = 8
collection_rate_minerals = 9
collection_rate_vespene = 10
spent_minerals = 11
spent_vespene = 12
class ScoreByCategory(enum.IntEnum):
"""Indices for the `score_by_category` observation's first dimension."""
food_used = 0
killed_minerals = 1
killed_vespene = 2
lost_minerals = 3
lost_vespene = 4
friendly_fire_minerals = 5
friendly_fire_vespene = 6
used_minerals = 7
used_vespene = 8
total_used_minerals = 9
total_used_vespene = 10
class ScoreCategories(enum.IntEnum):
"""Indices for the `score_by_category` observation's second dimension."""
none = 0
army = 1
economy = 2
technology = 3
upgrade = 4
class ScoreByVital(enum.IntEnum):
"""Indices for the `score_by_vital` observation's first dimension."""
total_damage_dealt = 0
total_damage_taken = 1
total_healed = 2
class ScoreVitals(enum.IntEnum):
"""Indices for the `score_by_vital` observation's second dimension."""
life = 0
shields = 1
energy = 2
class Player(enum.IntEnum):
"""Indices into the `player` observation."""
player_id = 0
minerals = 1
vespene = 2
food_used = 3
food_cap = 4
food_army = 5
food_workers = 6
idle_worker_count = 7
army_count = 8
warp_gate_count = 9
larva_count = 10
class UnitLayer(enum.IntEnum):
"""Indices into the unit layers in the observations."""
unit_type = 0
player_relative = 1
health = 2
shields = 3
energy = 4
transport_slots_taken = 5
build_progress = 6
class UnitCounts(enum.IntEnum):
"""Indices into the `unit_counts` observations."""
unit_type = 0
count = 1
class FeatureUnit(enum.IntEnum):
"""Indices for the `feature_unit` observations."""
unit_type = 0
alliance = 1
health = 2
shield = 3
energy = 4
cargo_space_taken = 5
build_progress = 6
health_ratio = 7
shield_ratio = 8
energy_ratio = 9
display_type = 10
owner = 11
x = 12
y = 13
facing = 14
radius = 15
cloak = 16
is_selected = 17
is_blip = 18
is_powered = 19
mineral_contents = 20
vespene_contents = 21
cargo_space_max = 22
assigned_harvesters = 23
ideal_harvesters = 24
weapon_cooldown = 25
order_length = 26 # If zero, the unit is idle.
order_id_0 = 27
order_id_1 = 28
tag = 29 # Unique identifier for a unit (only populated for raw units).
hallucination = 30
buff_id_0 = 31
buff_id_1 = 32
addon_unit_type = 33
active = 34
is_on_screen = 35
order_progress_0 = 36
order_progress_1 = 37
order_id_2 = 38
order_id_3 = 39
is_in_cargo = 40
buff_duration_remain = 41
buff_duration_max = 42
attack_upgrade_level = 43
armor_upgrade_level = 44
shield_upgrade_level = 45
class EffectPos(enum.IntEnum):
"""Positions of the active effects."""
effect = 0
alliance = 1
owner = 2
radius = 3
x = 4
y = 5
class Radar(enum.IntEnum):
"""Positions of the Sensor towers."""
x = 0
y = 1
radius = 2
class ProductionQueue(enum.IntEnum):
"""Indices for the `production_queue` observations."""
ability_id = 0
build_progress = 1
class Feature(collections.namedtuple(
"Feature", ["index", "name", "layer_set", "full_name", "scale", "type",
"palette", "clip"])):
"""Define properties of a feature layer.
Attributes:
index: Index of this layer into the set of layers.
name: The name of the layer within the set.
layer_set: Which set of feature layers to look at in the observation proto.
full_name: The full name including for visualization.
scale: Max value (+1) of this layer, used to scale the values.
type: A FeatureType for scalar vs categorical.
palette: A color palette for rendering.
clip: Whether to clip the values for coloring.
"""
__slots__ = ()
dtypes = {
1: np.uint8,
8: np.uint8,
16: np.uint16,
32: np.int32,
}
def unpack(self, obs):
"""Return a correctly shaped numpy array for this feature."""
planes = getattr(obs.feature_layer_data, self.layer_set)
plane = getattr(planes, self.name)
return self.unpack_layer(plane)
@staticmethod
@sw.decorate
def unpack_layer(plane):
"""Return a correctly shaped numpy array given the feature layer bytes."""
size = point.Point.build(plane.size)
if size == (0, 0):
# New layer that isn't implemented in this SC2 version.
return None
data = np.frombuffer(plane.data, dtype=Feature.dtypes[plane.bits_per_pixel])
if plane.bits_per_pixel == 1:
data = np.unpackbits(data)
if data.shape[0] != size.x * size.y:
# This could happen if the correct length isn't a multiple of 8, leading
# to some padding bits at the end of the string which are incorrectly
# interpreted as data.
data = data[:size.x * size.y]
return data.reshape(size.y, size.x)
@staticmethod
@sw.decorate
def unpack_rgb_image(plane):
"""Return a correctly shaped numpy array given the image bytes."""
assert plane.bits_per_pixel == 24, "{} != 24".format(plane.bits_per_pixel)
size = point.Point.build(plane.size)
data = np.frombuffer(plane.data, dtype=np.uint8)
return data.reshape(size.y, size.x, 3)
@sw.decorate
def color(self, plane):
if self.clip:
plane = np.clip(plane, 0, self.scale - 1)
return self.palette[plane]
class ScreenFeatures(collections.namedtuple("ScreenFeatures", [
"height_map", "visibility_map", "creep", "power", "player_id",
"player_relative", "unit_type", "selected", "unit_hit_points",
"unit_hit_points_ratio", "unit_energy", "unit_energy_ratio", "unit_shields",
"unit_shields_ratio", "unit_density", "unit_density_aa", "effects",
"hallucinations", "cloaked", "blip", "buffs", "buff_duration", "active",
"build_progress", "pathable", "buildable", "placeholder"])):
"""The set of screen feature layers."""
__slots__ = ()
def __new__(cls, **kwargs):
feats = {}
for name, (scale, type_, palette, clip) in six.iteritems(kwargs):
feats[name] = Feature(
index=ScreenFeatures._fields.index(name),
name=name,
layer_set="renders",
full_name="screen " + name,
scale=scale,
type=type_,
palette=palette(scale) if callable(palette) else palette,
clip=clip)
return super(ScreenFeatures, cls).__new__(cls, **feats) # pytype: disable=missing-parameter
class MinimapFeatures(collections.namedtuple("MinimapFeatures", [
"height_map", "visibility_map", "creep", "camera", "player_id",
"player_relative", "selected", "unit_type", "alerts", "pathable",
"buildable"])):
"""The set of minimap feature layers."""
__slots__ = ()
def __new__(cls, **kwargs):
feats = {}
for name, (scale, type_, palette) in six.iteritems(kwargs):
feats[name] = Feature(
index=MinimapFeatures._fields.index(name),
name=name,
layer_set="minimap_renders",
full_name="minimap " + name,
scale=scale,
type=type_,
palette=palette(scale) if callable(palette) else palette,
clip=False)
return super(MinimapFeatures, cls).__new__(cls, **feats) # pytype: disable=missing-parameter
SCREEN_FEATURES = ScreenFeatures(
height_map=(256, FeatureType.SCALAR, colors.height_map, False),
visibility_map=(4, FeatureType.CATEGORICAL,
colors.VISIBILITY_PALETTE, False),
creep=(2, FeatureType.CATEGORICAL, colors.CREEP_PALETTE, False),
power=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
player_id=(17, FeatureType.CATEGORICAL,
colors.PLAYER_ABSOLUTE_PALETTE, False),
player_relative=(5, FeatureType.CATEGORICAL,
colors.PLAYER_RELATIVE_PALETTE, False),
unit_type=(max(static_data.UNIT_TYPES) + 1, FeatureType.CATEGORICAL,
colors.unit_type, False),
selected=(2, FeatureType.CATEGORICAL, colors.SELECTED_PALETTE, False),
unit_hit_points=(1600, FeatureType.SCALAR, colors.hot, True),
unit_hit_points_ratio=(256, FeatureType.SCALAR, colors.hot, False),
unit_energy=(1000, FeatureType.SCALAR, colors.hot, True),
unit_energy_ratio=(256, FeatureType.SCALAR, colors.hot, False),
unit_shields=(1000, FeatureType.SCALAR, colors.hot, True),
unit_shields_ratio=(256, FeatureType.SCALAR, colors.hot, False),
unit_density=(16, FeatureType.SCALAR, colors.hot, True),
unit_density_aa=(256, FeatureType.SCALAR, colors.hot, False),
effects=(16, FeatureType.CATEGORICAL, colors.effects, False),
hallucinations=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
cloaked=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
blip=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
buffs=(max(static_data.BUFFS) + 1, FeatureType.CATEGORICAL,
colors.buffs, False),
buff_duration=(256, FeatureType.SCALAR, colors.hot, False),
active=(2, FeatureType.CATEGORICAL, colors.POWER_PALETTE, False),
build_progress=(256, FeatureType.SCALAR, colors.hot, False),
pathable=(2, FeatureType.CATEGORICAL, colors.winter, False),
buildable=(2, FeatureType.CATEGORICAL, colors.winter, False),
placeholder=(2, FeatureType.CATEGORICAL, colors.winter, False),
)
MINIMAP_FEATURES = MinimapFeatures(
height_map=(256, FeatureType.SCALAR, colors.height_map),
visibility_map=(4, FeatureType.CATEGORICAL, colors.VISIBILITY_PALETTE),
creep=(2, FeatureType.CATEGORICAL, colors.CREEP_PALETTE),
camera=(2, FeatureType.CATEGORICAL, colors.CAMERA_PALETTE),
player_id=(17, FeatureType.CATEGORICAL, colors.PLAYER_ABSOLUTE_PALETTE),
player_relative=(5, FeatureType.CATEGORICAL,
colors.PLAYER_RELATIVE_PALETTE),
selected=(2, FeatureType.CATEGORICAL, colors.winter),
unit_type=(max(static_data.UNIT_TYPES) + 1, FeatureType.CATEGORICAL,
colors.unit_type),
alerts=(2, FeatureType.CATEGORICAL, colors.winter),
pathable=(2, FeatureType.CATEGORICAL, colors.winter),
buildable=(2, FeatureType.CATEGORICAL, colors.winter),
)
def _to_point(dims):
"""Convert (width, height) or size -> point.Point."""
assert dims
if isinstance(dims, (tuple, list)):
if len(dims) != 2:
raise ValueError(
"A two element tuple or list is expected here, got {}.".format(dims))
else:
width = int(dims[0])
height = int(dims[1])
if width <= 0 or height <= 0:
raise ValueError("Must specify +ve dims, got {}.".format(dims))
else:
return point.Point(width, height)
else:
size = int(dims)
if size <= 0:
raise ValueError(
"Must specify a +ve value for size, got {}.".format(dims))
else:
return point.Point(size, size)
class Dimensions(object):
"""Screen and minimap dimensions configuration.
Both screen and minimap must be specified. Sizes must be positive.
Screen size must be greater than or equal to minimap size in both dimensions.
Attributes:
screen: A (width, height) int tuple or a single int to be used for both.
minimap: A (width, height) int tuple or a single int to be used for both.
"""
def __init__(self, screen=None, minimap=None):
if not screen or not minimap:
raise ValueError(
"screen and minimap must both be set, screen={}, minimap={}".format(
screen, minimap))
self._screen = _to_point(screen)
self._minimap = _to_point(minimap)
@property
def screen(self):
return self._screen
@property
def minimap(self):
return self._minimap
def __repr__(self):
return "Dimensions(screen={}, minimap={})".format(self.screen, self.minimap)
def __eq__(self, other):
return (isinstance(other, Dimensions) and self.screen == other.screen and
self.minimap == other.minimap)
def __ne__(self, other):
return not self == other
class AgentInterfaceFormat(object):
"""Observation and action interface format specific to a particular agent."""
def __init__(
self,
feature_dimensions=None,
rgb_dimensions=None,
raw_resolution=None,
action_space=None,
camera_width_world_units=None,
use_feature_units=False,
use_raw_units=False,
use_raw_actions=False,
max_raw_actions=512,
max_selected_units=30,
use_unit_counts=False,
use_camera_position=False,
show_cloaked=False,
show_burrowed_shadows=False,
show_placeholders=False,
hide_specific_actions=True,
action_delay_fn=None,
send_observation_proto=False,
crop_to_playable_area=False,
raw_crop_to_playable_area=False,
allow_cheating_layers=False,
add_cargo_to_units=False):
"""Initializer.
Args:
feature_dimensions: Feature layer `Dimension`s. Either this or
rgb_dimensions (or both) must be set.
rgb_dimensions: RGB `Dimension`. Either this or feature_dimensions
(or both) must be set.
raw_resolution: Discretize the `raw_units` observation's x,y to this
resolution. Default is the map_size.
action_space: If you pass both feature and rgb sizes, then you must also
specify which you want to use for your actions as an ActionSpace enum.
camera_width_world_units: The width of your screen in world units. If your
feature_dimensions.screen=(64, 48) and camera_width is 24, then each
px represents 24 / 64 = 0.375 world units in each of x and y.
It'll then represent a camera of size (24, 0.375 * 48) = (24, 18)
world units.
use_feature_units: Whether to include feature_unit observations.
use_raw_units: Whether to include raw unit data in observations. This
differs from feature_units because it includes units outside the
screen and hidden units, and because unit positions are given in
terms of world units instead of screen units.
use_raw_actions: [bool] Whether to use raw actions as the interface.
Same as specifying action_space=ActionSpace.RAW.
max_raw_actions: [int] Maximum number of raw actions
max_selected_units: [int] The maximum number of selected units in the
raw interface.
use_unit_counts: Whether to include unit_counts observation. Disabled by
default since it gives information outside the visible area.
use_camera_position: Whether to include the camera's position (in minimap
coordinates) in the observations.
show_cloaked: Whether to show limited information for cloaked units.
show_burrowed_shadows: Whether to show limited information for burrowed
units that leave a shadow on the ground (ie widow mines and moving
roaches and infestors).
show_placeholders: Whether to show buildings that are queued for
construction.
hide_specific_actions: [bool] Some actions (eg cancel) have many
specific versions (cancel this building, cancel that spell) and can
be represented in a more general form. If a specific action is
available, the general will also be available. If you set
`hide_specific_actions` to False, the specific versions will also be
available, but if it's True, the specific ones will be hidden.
Similarly, when transforming back, a specific action will be returned
as the general action. This simplifies the action space, though can
lead to some actions in replays not being exactly representable using
only the general actions.
action_delay_fn: A callable which when invoked returns a delay in game
loops to apply to a requested action. Defaults to None, meaning no
delays are added (actions will be executed on the next game loop,
hence with the minimum delay of 1).
send_observation_proto: Whether or not to send the raw observation
response proto in the observations.
crop_to_playable_area: Crop the feature layer minimap observations down
from the full map area to just the playable area. Also improves the
heightmap rendering.
raw_crop_to_playable_area: Crop the raw units to the playable area. This
means units will show up closer to the origin with less dead space
around their valid locations.
allow_cheating_layers: Show the unit types and potentially other cheating
layers on the minimap.
add_cargo_to_units: Whether to add the units that are currently in cargo
to the feature_units and raw_units lists.
Raises:
ValueError: if the parameters are inconsistent.
"""
if not (feature_dimensions or rgb_dimensions or use_raw_units):
raise ValueError("Must set either the feature layer or rgb dimensions, "
"or use raw units.")
if action_space:
if not isinstance(action_space, actions.ActionSpace):
raise ValueError("action_space must be of type ActionSpace.")
if action_space == actions.ActionSpace.RAW:
use_raw_actions = True
elif ((action_space == actions.ActionSpace.FEATURES and
not feature_dimensions) or
(action_space == actions.ActionSpace.RGB and
not rgb_dimensions)):
raise ValueError(
"Action space must match the observations, action space={}, "
"feature_dimensions={}, rgb_dimensions={}".format(
action_space, feature_dimensions, rgb_dimensions))
else:
if use_raw_actions:
action_space = actions.ActionSpace.RAW
elif feature_dimensions and rgb_dimensions:
raise ValueError(
"You must specify the action space if you have both screen and "
"rgb observations.")
elif feature_dimensions:
action_space = actions.ActionSpace.FEATURES
else:
action_space = actions.ActionSpace.RGB
if raw_resolution:
raw_resolution = _to_point(raw_resolution)
if use_raw_actions:
if not use_raw_units:
raise ValueError(
"You must set use_raw_units if you intend to use_raw_actions")
if action_space != actions.ActionSpace.RAW:
raise ValueError(
"Don't specify both an action_space and use_raw_actions.")
if (rgb_dimensions and
(rgb_dimensions.screen.x < rgb_dimensions.minimap.x or
rgb_dimensions.screen.y < rgb_dimensions.minimap.y)):
raise ValueError(
"RGB Screen (%s) can't be smaller than the minimap (%s)." % (
rgb_dimensions.screen, rgb_dimensions.minimap))
self._feature_dimensions = feature_dimensions
self._rgb_dimensions = rgb_dimensions
self._action_space = action_space
self._camera_width_world_units = camera_width_world_units or 24
self._use_feature_units = use_feature_units
self._use_raw_units = use_raw_units
self._raw_resolution = raw_resolution
self._use_raw_actions = use_raw_actions
self._max_raw_actions = max_raw_actions
self._max_selected_units = max_selected_units
self._use_unit_counts = use_unit_counts
self._use_camera_position = use_camera_position
self._show_cloaked = show_cloaked
self._show_burrowed_shadows = show_burrowed_shadows
self._show_placeholders = show_placeholders
self._hide_specific_actions = hide_specific_actions
self._action_delay_fn = action_delay_fn
self._send_observation_proto = send_observation_proto
self._add_cargo_to_units = add_cargo_to_units
self._crop_to_playable_area = crop_to_playable_area
self._raw_crop_to_playable_area = raw_crop_to_playable_area
self._allow_cheating_layers = allow_cheating_layers
if action_space == actions.ActionSpace.FEATURES:
self._action_dimensions = feature_dimensions
else:
self._action_dimensions = rgb_dimensions
@property
def feature_dimensions(self):
return self._feature_dimensions
@property
def rgb_dimensions(self):
return self._rgb_dimensions
@property
def action_space(self):
return self._action_space
@property
def camera_width_world_units(self):
return self._camera_width_world_units
@property
def use_feature_units(self):
return self._use_feature_units
@property
def use_raw_units(self):
return self._use_raw_units
@property
def raw_resolution(self):
return self._raw_resolution
@raw_resolution.setter
def raw_resolution(self, value):
self._raw_resolution = value
@property
def use_raw_actions(self):
return self._use_raw_actions
@property
def max_raw_actions(self):
return self._max_raw_actions
@property
def max_selected_units(self):
return self._max_selected_units
@property
def use_unit_counts(self):
return self._use_unit_counts
@property
def use_camera_position(self):
return self._use_camera_position
@property
def show_cloaked(self):
return self._show_cloaked
@property
def show_burrowed_shadows(self):
return self._show_burrowed_shadows
@property
def show_placeholders(self):
return self._show_placeholders
@property
def hide_specific_actions(self):
return self._hide_specific_actions
@property
def action_delay_fn(self):
return self._action_delay_fn
@property
def send_observation_proto(self):
return self._send_observation_proto
@property
def add_cargo_to_units(self):
return self._add_cargo_to_units
@property
def action_dimensions(self):
return self._action_dimensions
@property
def crop_to_playable_area(self):
return self._crop_to_playable_area
@property
def raw_crop_to_playable_area(self):
return self._raw_crop_to_playable_area
@property
def allow_cheating_layers(self):
return self._allow_cheating_layers
def parse_agent_interface_format(
feature_screen=None,
feature_minimap=None,
rgb_screen=None,
rgb_minimap=None,
action_space=None,
action_delays=None,
**kwargs):
"""Creates an AgentInterfaceFormat object from keyword args.
Convenient when using dictionaries or command-line arguments for config.
Note that the feature_* and rgb_* properties define the respective spatial
observation dimensions and accept:
* None or 0 to disable that spatial observation.
* A single int for a square observation with that side length.
* A (int, int) tuple for a rectangular (width, height) observation.
Args:
feature_screen: If specified, so must feature_minimap be.
feature_minimap: If specified, so must feature_screen be.
rgb_screen: If specified, so must rgb_minimap be.
rgb_minimap: If specified, so must rgb_screen be.
action_space: ["FEATURES", "RGB", "RAW"].
action_delays: List of relative frequencies for each of [1, 2, 3, ...]
game loop delays on executed actions. Only used when the environment
is non-realtime. Intended to simulate the delays which can be
experienced when playing in realtime. Note that 1 is the minimum
possible delay; as actions can only ever be executed on a subsequent
game loop.
**kwargs: Anything else is passed through to AgentInterfaceFormat.
Returns:
An `AgentInterfaceFormat` object.
Raises:
ValueError: If an invalid parameter is specified.
"""
if feature_screen or feature_minimap:
feature_dimensions = Dimensions(feature_screen, feature_minimap)
else:
feature_dimensions = None
if rgb_screen or rgb_minimap:
rgb_dimensions = Dimensions(rgb_screen, rgb_minimap)
else:
rgb_dimensions = None
def _action_delay_fn(delays):
"""Delay frequencies per game loop delay -> fn returning game loop delay."""
if not delays:
return None
else:
total = sum(delays)
cumulative_sum = np.cumsum([delay / total for delay in delays])
def fn():
sample = random.uniform(0, 1) - EPSILON
for i, cumulative in enumerate(cumulative_sum):
if sample <= cumulative:
return i + 1
raise ValueError("Failed to sample action delay??")
return fn
return AgentInterfaceFormat(
feature_dimensions=feature_dimensions,
rgb_dimensions=rgb_dimensions,
action_space=(action_space and actions.ActionSpace[action_space.upper()]),
action_delay_fn=_action_delay_fn(action_delays),
**kwargs)
def features_from_game_info(game_info, agent_interface_format=None,
map_name=None, **kwargs):
"""Construct a Features object using data extracted from game info.
Args:
game_info: A `sc_pb.ResponseGameInfo` from the game.
agent_interface_format: an optional AgentInterfaceFormat.
map_name: an optional map name, which overrides the one in game_info.
**kwargs: Anything else is passed through to AgentInterfaceFormat. It's an
error to send any kwargs if you pass an agent_interface_format.
Returns:
A features object matching the specified parameterisation.
Raises:
ValueError: if you pass both agent_interface_format and kwargs.
ValueError: if you pass an agent_interface_format that doesn't match
game_info's resolutions.
"""
if not map_name:
map_name = game_info.map_name
if game_info.options.HasField("feature_layer"):
fl_opts = game_info.options.feature_layer
feature_dimensions = Dimensions(
screen=(fl_opts.resolution.x, fl_opts.resolution.y),
minimap=(fl_opts.minimap_resolution.x, fl_opts.minimap_resolution.y))
camera_width_world_units = game_info.options.feature_layer.width
else:
feature_dimensions = None
camera_width_world_units = None
if game_info.options.HasField("render"):
rgb_opts = game_info.options.render
rgb_dimensions = Dimensions(
screen=(rgb_opts.resolution.x, rgb_opts.resolution.y),
minimap=(rgb_opts.minimap_resolution.x, rgb_opts.minimap_resolution.y))
else:
rgb_dimensions = None
map_size = game_info.start_raw.map_size
requested_races = {
info.player_id: info.race_requested for info in game_info.player_info
if info.type != sc_pb.Observer}
if agent_interface_format:
if kwargs:
raise ValueError(
"Either give an agent_interface_format or kwargs, not both.")
aif = agent_interface_format
if (aif.rgb_dimensions != rgb_dimensions or
aif.feature_dimensions != feature_dimensions or
(feature_dimensions and
aif.camera_width_world_units != camera_width_world_units)):
raise ValueError("""
The supplied agent_interface_format doesn't match the resolutions computed from
the game_info:
rgb_dimensions: %s != %s
feature_dimensions: %s != %s
camera_width_world_units: %s != %s
""" % (aif.rgb_dimensions, rgb_dimensions,
aif.feature_dimensions, feature_dimensions,
aif.camera_width_world_units, camera_width_world_units))
else:
agent_interface_format = AgentInterfaceFormat(
feature_dimensions=feature_dimensions,
rgb_dimensions=rgb_dimensions,
camera_width_world_units=camera_width_world_units,
**kwargs)
return Features(
agent_interface_format=agent_interface_format,
map_size=map_size,
map_name=map_name,
requested_races=requested_races)
def _init_valid_functions(action_dimensions):
"""Initialize ValidFunctions and set up the callbacks."""
sizes = {
"screen": tuple(int(i) for i in action_dimensions.screen),
"screen2": tuple(int(i) for i in action_dimensions.screen),
"minimap": tuple(int(i) for i in action_dimensions.minimap),
}
types = actions.Arguments(*[
actions.ArgumentType.spec(t.id, t.name, sizes.get(t.name, t.sizes))
for t in actions.TYPES])
functions = actions.Functions([
actions.Function.spec(f.id, f.name, tuple(types[t.id] for t in f.args))
for f in actions.FUNCTIONS])
return actions.ValidActions(types, functions)
def _init_valid_raw_functions(raw_resolution, max_selected_units):
"""Initialize ValidFunctions and set up the callbacks."""
sizes = {
"world": tuple(int(i) for i in raw_resolution),
"unit_tags": (max_selected_units,),
}
types = actions.RawArguments(*[
actions.ArgumentType.spec(t.id, t.name, sizes.get(t.name, t.sizes))
for t in actions.RAW_TYPES])
functions = actions.Functions([
actions.Function.spec(f.id, f.name, tuple(types[t.id] for t in f.args))
for f in actions.RAW_FUNCTIONS])
return actions.ValidActions(types, functions)
class Features(object):
"""Render feature layers from SC2 Observation protos into numpy arrays.
This has the implementation details of how to render a starcraft environment.
It translates between agent action/observation formats and starcraft
action/observation formats, which should not be seen by agent authors. The
starcraft protos contain more information than they should have access to.
This is outside of the environment so that it can also be used in other
contexts, eg a supervised dataset pipeline.
"""
def __init__(self, agent_interface_format=None, map_size=None,
requested_races=None, map_name="unknown"):
"""Initialize a Features instance matching the specified interface format.
Args:
agent_interface_format: See the documentation for `AgentInterfaceFormat`.
map_size: The size of the map in world units, needed for feature_units.
requested_races: Optional. Dict mapping `player_id`s to that player's
requested race. If present, will send player races in observation.
map_name: Optional name of the map, to be added to the observation.
Raises:
ValueError: if agent_interface_format isn't specified.
ValueError: if map_size isn't specified when use_feature_units or
use_camera_position is.
"""
if not agent_interface_format:
raise ValueError("Please specify agent_interface_format")
self._agent_interface_format = agent_interface_format
aif = self._agent_interface_format
if not aif.raw_resolution and map_size:
aif.raw_resolution = point.Point.build(map_size)
self._map_size = map_size
self._map_name = map_name
if (aif.use_feature_units
or aif.use_camera_position
or aif.use_raw_units):
self.init_camera(
aif.feature_dimensions,
map_size,
aif.camera_width_world_units,
aif.raw_resolution)
self._send_observation_proto = aif.send_observation_proto
self._raw = aif.use_raw_actions
if self._raw:
self._valid_functions = _init_valid_raw_functions(
aif.raw_resolution, aif.max_selected_units)
self._raw_tags = []
else:
self._valid_functions = _init_valid_functions(aif.action_dimensions)
self._requested_races = requested_races
if requested_races is not None:
assert len(requested_races) <= 2
def init_camera(
self, feature_dimensions, map_size, camera_width_world_units,
raw_resolution):
"""Initialize the camera (especially for feature_units).
This is called in the constructor and may be called repeatedly after
`Features` is constructed, since it deals with rescaling coordinates and not
changing environment/action specs.
Args:
feature_dimensions: See the documentation in `AgentInterfaceFormat`.
map_size: The size of the map in world units.
camera_width_world_units: See the documentation in `AgentInterfaceFormat`.
raw_resolution: See the documentation in `AgentInterfaceFormat`.
Raises:
ValueError: If map_size or camera_width_world_units are falsey (which
should mainly happen if called by the constructor).
"""
if not map_size or not camera_width_world_units:
raise ValueError(
"Either pass the game_info with raw enabled, or map_size and "
"camera_width_world_units in order to use feature_units or camera"
"position.")
map_size = point.Point.build(map_size)
self._world_to_world_tl = transform.Linear(point.Point(1, -1),
point.Point(0, map_size.y))
self._world_tl_to_world_camera_rel = transform.Linear(offset=-map_size / 4)
if feature_dimensions:
world_camera_rel_to_feature_screen = transform.Linear(
feature_dimensions.screen / camera_width_world_units,
feature_dimensions.screen / 2)
self._world_to_feature_screen_px = transform.Chain(
self._world_to_world_tl,
self._world_tl_to_world_camera_rel,
world_camera_rel_to_feature_screen,
transform.PixelToCoord())
# If we don't have a specified raw resolution, we do no transform.
world_tl_to_feature_minimap = transform.Linear(
scale=raw_resolution / map_size.max_dim() if raw_resolution else None)
self._world_to_minimap_px = transform.Chain(
self._world_to_world_tl,
world_tl_to_feature_minimap,
transform.PixelToCoord())
self._camera_size = (
raw_resolution / map_size.max_dim() * camera_width_world_units)
def _update_camera(self, camera_center):
"""Update the camera transform based on the new camera center."""
self._world_tl_to_world_camera_rel.offset = (
-self._world_to_world_tl.fwd_pt(camera_center) *
self._world_tl_to_world_camera_rel.scale)
def observation_spec(self):
"""The observation spec for the SC2 environment.
It's worth noting that the image-like observations are in y,x/row,column
order which is different than the actions which are in x,y order. This is
due to conflicting conventions, and to facilitate printing of the images.
Returns:
The dict of observation names to their tensor shapes. Shapes with a 0 can
vary in length, for example the number of valid actions depends on which
units you have selected.
"""
# pytype: disable=wrong-arg-types
obs_spec = named_array.NamedDict({
"action_result": (0,), # See error.proto: ActionResult.
"alerts": (0,), # See sc2api.proto: Alert.
"build_queue": (0, len(UnitLayer)),
"cargo": (0, len(UnitLayer)),
"cargo_slots_available": (1,),
"control_groups": (10, 2),
"game_loop": (1,),
"last_actions": (0,),
"map_name": (0,),
"multi_select": (0, len(UnitLayer)),
"player": (len(Player),),
"production_queue": (0, len(ProductionQueue)),
"radar": (0, len(Radar)),
"score_cumulative": (len(ScoreCumulative),),
"score_by_category": (len(ScoreByCategory), len(ScoreCategories)),
"score_by_vital": (len(ScoreByVital), len(ScoreVitals)),
"single_select": (0, len(UnitLayer)), # Only (n, 7) for n in (0, 1).
})
# pytype: enable=wrong-arg-types
if not self._raw:
obs_spec["available_actions"] = (0,)
aif = self._agent_interface_format
if aif.feature_dimensions:
obs_spec["feature_screen"] = (len(SCREEN_FEATURES),
aif.feature_dimensions.screen.y,
aif.feature_dimensions.screen.x)
obs_spec["feature_minimap"] = (len(MINIMAP_FEATURES),
aif.feature_dimensions.minimap.y,
aif.feature_dimensions.minimap.x)
if aif.rgb_dimensions:
obs_spec["rgb_screen"] = (aif.rgb_dimensions.screen.y,
aif.rgb_dimensions.screen.x,
3)
obs_spec["rgb_minimap"] = (aif.rgb_dimensions.minimap.y,
aif.rgb_dimensions.minimap.x,
3)
if aif.use_feature_units:
obs_spec["feature_units"] = (0, len(FeatureUnit)) # pytype: disable=wrong-arg-types
obs_spec["feature_effects"] = (0, len(EffectPos))
if aif.use_raw_units:
obs_spec["raw_units"] = (0, len(FeatureUnit))
obs_spec["raw_effects"] = (0, len(EffectPos))
obs_spec["upgrades"] = (0,)
if aif.use_unit_counts:
obs_spec["unit_counts"] = (0, len(UnitCounts))
if aif.use_camera_position:
obs_spec["camera_position"] = (2,)
obs_spec["camera_size"] = (2,)
if self._send_observation_proto:
obs_spec["_response_observation"] = (0,)
obs_spec["home_race_requested"] = (1,)
obs_spec["away_race_requested"] = (1,)
return obs_spec
def action_spec(self):
"""The action space pretty complicated and fills the ValidFunctions."""
return self._valid_functions
@property
def map_size(self):
return self._map_size
@property
def requested_races(self):
return self._requested_races
@sw.decorate
def transform_obs(self, obs):
"""Render some SC2 observations into something an agent can handle."""
empty_unit = np.array([], dtype=np.int32).reshape((0, len(UnitLayer)))
out = named_array.NamedDict({ # Fill out some that are sometimes empty.
"single_select": empty_unit,
"multi_select": empty_unit,
"build_queue": empty_unit,
"cargo": empty_unit,
"production_queue": np.array([], dtype=np.int32).reshape(
(0, len(ProductionQueue))),
"last_actions": np.array([], dtype=np.int32),
"cargo_slots_available": np.array([0], dtype=np.int32),
"home_race_requested": np.array([0], dtype=np.int32),
"away_race_requested": np.array([0], dtype=np.int32),
"map_name": self._map_name,
})
def or_zeros(layer, size):
if layer is not None:
return layer.astype(np.int32, copy=False)
else:
return np.zeros((size.y, size.x), dtype=np.int32)
aif = self._agent_interface_format
if aif.feature_dimensions:
with sw("feature_screen"):
out["feature_screen"] = named_array.NamedNumpyArray(
np.stack([or_zeros(f.unpack(obs.observation),
aif.feature_dimensions.screen)
for f in SCREEN_FEATURES]),
names=[ScreenFeatures, None, None])
with sw("feature_minimap"):
out["feature_minimap"] = named_array.NamedNumpyArray(
np.stack([or_zeros(f.unpack(obs.observation),
aif.feature_dimensions.minimap)
for f in MINIMAP_FEATURES]),
names=[MinimapFeatures, None, None])
if aif.rgb_dimensions:
with sw("rgb_screen"):
out["rgb_screen"] = Feature.unpack_rgb_image(
obs.observation.render_data.map).astype(np.int32)
with sw("rgb_minimap"):
out["rgb_minimap"] = Feature.unpack_rgb_image(
obs.observation.render_data.minimap).astype(np.int32)
if not self._raw:
with sw("last_actions"):
out["last_actions"] = np.array(
[self.reverse_action(a).function for a in obs.actions],
dtype=np.int32)
out["action_result"] = np.array([o.result for o in obs.action_errors],
dtype=np.int32)
out["alerts"] = np.array(obs.observation.alerts, dtype=np.int32)
out["game_loop"] = np.array([obs.observation.game_loop], dtype=np.int32)
with sw("score"):
score_details = obs.observation.score.score_details
out["score_cumulative"] = named_array.NamedNumpyArray([
obs.observation.score.score,
score_details.idle_production_time,
score_details.idle_worker_time,
score_details.total_value_units,
score_details.total_value_structures,
score_details.killed_value_units,
score_details.killed_value_structures,
score_details.collected_minerals,
score_details.collected_vespene,
score_details.collection_rate_minerals,
score_details.collection_rate_vespene,
score_details.spent_minerals,
score_details.spent_vespene,
], names=ScoreCumulative, dtype=np.int32)
def get_score_details(key, details, categories):
row = getattr(details, key.name)
return [getattr(row, category.name) for category in categories]
out["score_by_category"] = named_array.NamedNumpyArray([
get_score_details(key, score_details, ScoreCategories)
for key in ScoreByCategory
], names=[ScoreByCategory, ScoreCategories], dtype=np.int32)
out["score_by_vital"] = named_array.NamedNumpyArray([
get_score_details(key, score_details, ScoreVitals)
for key in ScoreByVital
], names=[ScoreByVital, ScoreVitals], dtype=np.int32)
player = obs.observation.player_common
out["player"] = named_array.NamedNumpyArray([
player.player_id,
player.minerals,
player.vespene,
player.food_used,
player.food_cap,
player.food_army,
player.food_workers,
player.idle_worker_count,
player.army_count,
player.warp_gate_count,
player.larva_count,
], names=Player, dtype=np.int32)
def unit_vec(u):
return np.array((
u.unit_type,
u.player_relative,
u.health,
u.shields,
u.energy,
u.transport_slots_taken,
int(u.build_progress * 100), # discretize
), dtype=np.int32)
ui = obs.observation.ui_data
with sw("ui"):
groups = np.zeros((10, 2), dtype=np.int32)
for g in ui.groups:
groups[g.control_group_index, :] = (g.leader_unit_type, g.count)
out["control_groups"] = groups
if ui.HasField("single"):
out["single_select"] = named_array.NamedNumpyArray(
[unit_vec(ui.single.unit)], [None, UnitLayer])
elif ui.HasField("multi"):
out["multi_select"] = named_array.NamedNumpyArray(
[unit_vec(u) for u in ui.multi.units], [None, UnitLayer])
elif ui.HasField("cargo"):
out["single_select"] = named_array.NamedNumpyArray(
[unit_vec(ui.cargo.unit)], [None, UnitLayer])
out["cargo"] = named_array.NamedNumpyArray(
[unit_vec(u) for u in ui.cargo.passengers], [None, UnitLayer])
out["cargo_slots_available"] = np.array([ui.cargo.slots_available],
dtype=np.int32)
elif ui.HasField("production"):
out["single_select"] = named_array.NamedNumpyArray(
[unit_vec(ui.production.unit)], [None, UnitLayer])
if ui.production.build_queue:
out["build_queue"] = named_array.NamedNumpyArray(
[unit_vec(u) for u in ui.production.build_queue],
[None, UnitLayer], dtype=np.int32)
if ui.production.production_queue:
out["production_queue"] = named_array.NamedNumpyArray(
[(item.ability_id, item.build_progress * 100)
for item in ui.production.production_queue],
[None, ProductionQueue], dtype=np.int32)
tag_types = {} # Only populate the cache if it's needed.
def get_addon_type(tag):
if not tag_types:
for u in raw.units:
tag_types[u.tag] = u.unit_type
return tag_types.get(tag, 0)
def full_unit_vec(u, pos_transform, is_raw=False):
"""Compute unit features."""
screen_pos = pos_transform.fwd_pt(
point.Point.build(u.pos))
screen_radius = pos_transform.fwd_dist(u.radius)
def raw_order(i):
if len(u.orders) > i:
# TODO(tewalds): Return a generalized func id.
return actions.RAW_ABILITY_ID_TO_FUNC_ID.get(
u.orders[i].ability_id, 0)
return 0
features = [
# Match unit_vec order
u.unit_type,
u.alliance, # Self = 1, Ally = 2, Neutral = 3, Enemy = 4
u.health,
u.shield,
u.energy,
u.cargo_space_taken,
int(u.build_progress * 100), # discretize
# Resume API order
int(u.health / u.health_max * 255) if u.health_max > 0 else 0,
int(u.shield / u.shield_max * 255) if u.shield_max > 0 else 0,
int(u.energy / u.energy_max * 255) if u.energy_max > 0 else 0,
u.display_type, # Visible = 1, Snapshot = 2, Hidden = 3
u.owner, # 1-15, 16 = neutral
screen_pos.x,
screen_pos.y,
u.facing,
screen_radius,
u.cloak, # Cloaked = 1, CloakedDetected = 2, NotCloaked = 3
u.is_selected,
u.is_blip,
u.is_powered,
u.mineral_contents,
u.vespene_contents,
# Not populated for enemies or neutral
u.cargo_space_max,
u.assigned_harvesters,
u.ideal_harvesters,
u.weapon_cooldown,
len(u.orders),
raw_order(0),
raw_order(1),
u.tag if is_raw else 0,
u.is_hallucination,
u.buff_ids[0] if len(u.buff_ids) >= 1 else 0,
u.buff_ids[1] if len(u.buff_ids) >= 2 else 0,
get_addon_type(u.add_on_tag) if u.add_on_tag else 0,
u.is_active,
u.is_on_screen,
int(u.orders[0].progress * 100) if len(u.orders) >= 1 else 0,
int(u.orders[1].progress * 100) if len(u.orders) >= 2 else 0,
raw_order(2),
raw_order(3),
0,
u.buff_duration_remain,
u.buff_duration_max,
u.attack_upgrade_level,
u.armor_upgrade_level,
u.shield_upgrade_level,
]
return features
raw = obs.observation.raw_data
if aif.use_feature_units:
with sw("feature_units"):
# Update the camera location so we can calculate world to screen pos
self._update_camera(point.Point.build(raw.player.camera))
feature_units = [full_unit_vec(u, self._world_to_feature_screen_px)
for u in raw.units if u.is_on_screen]
out["feature_units"] = named_array.NamedNumpyArray(
feature_units, [None, FeatureUnit], dtype=np.int64)
feature_effects = []
feature_screen_size = aif.feature_dimensions.screen
for effect in raw.effects:
for pos in effect.pos:
screen_pos = self._world_to_feature_screen_px.fwd_pt(
point.Point.build(pos))
if (0 <= screen_pos.x < feature_screen_size.x and
0 <= screen_pos.y < feature_screen_size.y):
feature_effects.append([
effect.effect_id,
effect.alliance,
effect.owner,
effect.radius,
screen_pos.x,
screen_pos.y,
])
out["feature_effects"] = named_array.NamedNumpyArray(
feature_effects, [None, EffectPos], dtype=np.int32)
if aif.use_raw_units:
with sw("raw_units"):
with sw("to_list"):
raw_units = [full_unit_vec(u, self._world_to_minimap_px, is_raw=True)
for u in raw.units]
with sw("to_numpy"):
out["raw_units"] = named_array.NamedNumpyArray(
raw_units, [None, FeatureUnit], dtype=np.int64)
if raw_units:
self._raw_tags = out["raw_units"][:, FeatureUnit.tag]
else:
self._raw_tags = np.array([])
raw_effects = []
for effect in raw.effects:
for pos in effect.pos:
raw_pos = self._world_to_minimap_px.fwd_pt(point.Point.build(pos))
raw_effects.append([
effect.effect_id,
effect.alliance,
effect.owner,
effect.radius,
raw_pos.x,
raw_pos.y,
])
out["raw_effects"] = named_array.NamedNumpyArray(
raw_effects, [None, EffectPos], dtype=np.int32)
out["upgrades"] = np.array(raw.player.upgrade_ids, dtype=np.int32)
def cargo_units(u, pos_transform, is_raw=False):
"""Compute unit features."""
screen_pos = pos_transform.fwd_pt(
point.Point.build(u.pos))
features = []
for v in u.passengers:
features.append([
v.unit_type,
u.alliance, # Self = 1, Ally = 2, Neutral = 3, Enemy = 4
v.health,
v.shield,
v.energy,
0, # cargo_space_taken
0, # build_progress
int(v.health / v.health_max * 255) if v.health_max > 0 else 0,
int(v.shield / v.shield_max * 255) if v.shield_max > 0 else 0,
int(v.energy / v.energy_max * 255) if v.energy_max > 0 else 0,
0, # display_type
u.owner, # 1-15, 16 = neutral
screen_pos.x,
screen_pos.y,
0, # facing
0, # screen_radius
0, # cloak
0, # is_selected
0, # is_blip
0, # is powered
0, # mineral_contents
0, # vespene_contents
0, # cargo_space_max
0, # assigned_harvesters
0, # ideal_harvesters
0, # weapon_cooldown
0, # order_length
0, # order_id_0
0, # order_id_1
v.tag if is_raw else 0,
0, # is hallucination
0, # buff_id_1
0, # buff_id_2
0, # addon_unit_type
0, # active
0, # is_on_screen
0, # order_progress_1
0, # order_progress_2
0, # order_id_2
0, # order_id_3
1, # is_in_cargo
0, # buff_duration_remain
0, # buff_duration_max
0, # attack_upgrade_level
0, # armor_upgrade_level
0, # shield_upgrade_level
])
return features
if aif.add_cargo_to_units:
with sw("add_cargo_to_units"):
if aif.use_feature_units:
with sw("feature_units"):
with sw("to_list"):
feature_cargo_units = []
for u in raw.units:
if u.is_on_screen:
feature_cargo_units += cargo_units(
u, self._world_to_feature_screen_px)
with sw("to_numpy"):
if feature_cargo_units:
all_feature_units = np.array(
feature_cargo_units, dtype=np.int64)
all_feature_units = np.concatenate(
[out["feature_units"], feature_cargo_units], axis=0)
out["feature_units"] = named_array.NamedNumpyArray(
all_feature_units, [None, FeatureUnit], dtype=np.int64)
if aif.use_raw_units:
with sw("raw_units"):
with sw("to_list"):
raw_cargo_units = []
for u in raw.units:
raw_cargo_units += cargo_units(
u, self._world_to_minimap_px, is_raw=True)
with sw("to_numpy"):
if raw_cargo_units:
raw_cargo_units = np.array(raw_cargo_units, dtype=np.int64)
all_raw_units = np.concatenate(
[out["raw_units"], raw_cargo_units], axis=0)
out["raw_units"] = named_array.NamedNumpyArray(
all_raw_units, [None, FeatureUnit], dtype=np.int64)
self._raw_tags = out["raw_units"][:, FeatureUnit.tag]
if aif.use_unit_counts:
with sw("unit_counts"):
unit_counts = collections.defaultdict(int)
for u in raw.units:
if u.alliance == sc_raw.Self:
unit_counts[u.unit_type] += 1
out["unit_counts"] = named_array.NamedNumpyArray(
sorted(unit_counts.items()), [None, UnitCounts], dtype=np.int32)
if aif.use_camera_position:
camera_position = self._world_to_minimap_px.fwd_pt(
point.Point.build(raw.player.camera))
out["camera_position"] = np.array((camera_position.x, camera_position.y),
dtype=np.int32)
out["camera_size"] = np.array((self._camera_size.x, self._camera_size.y),
dtype=np.int32)
if not self._raw:
out["available_actions"] = np.array(
self.available_actions(obs.observation), dtype=np.int32)
if self._requested_races is not None:
out["home_race_requested"] = np.array(
(self._requested_races[player.player_id],), dtype=np.int32)
for player_id, race in self._requested_races.items():
if player_id != player.player_id:
out["away_race_requested"] = np.array((race,), dtype=np.int32)
def transform_radar(radar):
p = self._world_to_minimap_px.fwd_pt(point.Point.build(radar.pos))
return p.x, p.y, radar.radius
out["radar"] = named_array.NamedNumpyArray(
list(map(transform_radar, obs.observation.raw_data.radar)),
[None, Radar], dtype=np.int32)
# Send the entire proto as well (in a function, so it isn't copied).
if self._send_observation_proto:
out["_response_observation"] = lambda: obs
return out
@sw.decorate
def available_actions(self, obs):
"""Return the list of available action ids."""
available_actions = set()
hide_specific_actions = self._agent_interface_format.hide_specific_actions
for i, func in six.iteritems(actions.FUNCTIONS_AVAILABLE):
if func.avail_fn(obs):
available_actions.add(i)
for a in obs.abilities:
if a.ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability %s seen as available.", a.ability_id)
continue
found_applicable = False
for func in actions.ABILITY_IDS[a.ability_id]:
if func.function_type in actions.POINT_REQUIRED_FUNCS[a.requires_point]:
if func.general_id == 0 or not hide_specific_actions:
available_actions.add(func.id)
found_applicable = True
if func.general_id != 0: # Always offer generic actions.
for general_func in actions.ABILITY_IDS[func.general_id]:
if general_func.function_type is func.function_type:
# Only the right type. Don't want to expose the general action
# to minimap if only the screen version is available.
available_actions.add(general_func.id)
found_applicable = True
break
if not found_applicable:
raise ValueError("Failed to find applicable action for {}".format(a))
return list(available_actions)
@sw.decorate
def transform_action(self, obs, func_call, skip_available=False):
"""Transform an agent-style action to one that SC2 can consume.
Args:
obs: a `sc_pb.Observation` from the previous frame.
func_call: a `FunctionCall` to be turned into a `sc_pb.Action`.
skip_available: If True, assume the action is available. This should only
be used for testing or if you expect to make actions that weren't
valid at the last observation.
Returns:
a corresponding `sc_pb.Action`.
Raises:
ValueError: if the action doesn't pass validation.
"""
# Ignore sc_pb.Action's to make the env more flexible, eg raw actions.
if isinstance(func_call, sc_pb.Action):
return func_call
func_id = func_call.function
try:
if self._raw:
func = actions.RAW_FUNCTIONS[func_id]
else:
func = actions.FUNCTIONS[func_id]
except KeyError:
raise ValueError("Invalid function id: %s." % func_id)
# Available?
if not (skip_available or self._raw or
func_id in self.available_actions(obs)):
raise ValueError("Function %s/%s is currently not available" % (
func_id, func.name))
# Right number of args?
if len(func_call.arguments) != len(func.args):
raise ValueError(
"Wrong number of arguments for function: %s, got: %s" % (
func, func_call.arguments))
# Args are valid?
aif = self._agent_interface_format
for t, arg in zip(func.args, func_call.arguments):
if t.count:
if 1 <= len(arg) <= t.count:
continue
else:
raise ValueError(
"Wrong number of values for argument of %s, got: %s" % (
func, func_call.arguments))
if t.name in ("screen", "screen2"):
sizes = aif.action_dimensions.screen
elif t.name == "minimap":
sizes = aif.action_dimensions.minimap
elif t.name == "world":
sizes = aif.raw_resolution
else:
sizes = t.sizes
if len(sizes) != len(arg):
raise ValueError(
"Wrong number of values for argument of %s, got: %s" % (
func, func_call.arguments))
for s, a in zip(sizes, arg):
if not np.all(0 <= a) and np.all(a < s):
raise ValueError("Argument is out of range for %s, got: %s" % (
func, func_call.arguments))
# Convert them to python types.
kwargs = {type_.name: type_.fn(a)
for type_, a in zip(func.args, func_call.arguments)}
# Call the right callback to get an SC2 action proto.
sc2_action = sc_pb.Action()
kwargs["action"] = sc2_action
if func.ability_id:
kwargs["ability_id"] = func.ability_id
if self._raw:
if "world" in kwargs:
kwargs["world"] = self._world_to_minimap_px.back_pt(kwargs["world"])
def find_original_tag(position):
if position >= len(self._raw_tags): # Assume it's a real unit tag.
return position
original_tag = self._raw_tags[position]
if original_tag == 0:
logging.warning("Tag not found: %s", original_tag)
return original_tag
if "target_unit_tag" in kwargs:
kwargs["target_unit_tag"] = find_original_tag(
kwargs["target_unit_tag"][0])
if "unit_tags" in kwargs:
kwargs["unit_tags"] = [find_original_tag(t)
for t in kwargs["unit_tags"]]
actions.RAW_FUNCTIONS[func_id].function_type(**kwargs)
else:
kwargs["action_space"] = aif.action_space
actions.FUNCTIONS[func_id].function_type(**kwargs)
return sc2_action
@sw.decorate
def reverse_action(self, action):
"""Transform an SC2-style action into an agent-style action.
This should be the inverse of `transform_action`.
Args:
action: a `sc_pb.Action` to be transformed.
Returns:
A corresponding `actions.FunctionCall`.
Raises:
ValueError: if it doesn't know how to transform this action.
"""
FUNCTIONS = actions.FUNCTIONS # pylint: disable=invalid-name
aif = self._agent_interface_format
def func_call_ability(ability_id, cmd_type, *args):
"""Get the function id for a specific ability id and action type."""
if ability_id not in actions.ABILITY_IDS:
logging.warning("Unknown ability_id: %s. This is probably dance or "
"cheer, or some unknown new or map specific ability. "
"Treating it as a no-op.", ability_id)
return FUNCTIONS.no_op()
if aif.hide_specific_actions:
general_id = next(iter(actions.ABILITY_IDS[ability_id])).general_id
if general_id:
ability_id = general_id
for func in actions.ABILITY_IDS[ability_id]:
if func.function_type is cmd_type:
return FUNCTIONS[func.id](*args)
raise ValueError("Unknown ability_id: %s, type: %s. Likely a bug." % (
ability_id, cmd_type.__name__))
if action.HasField("action_ui"):
act_ui = action.action_ui
if act_ui.HasField("multi_panel"):
return FUNCTIONS.select_unit(act_ui.multi_panel.type - 1,
act_ui.multi_panel.unit_index)
if act_ui.HasField("control_group"):
return FUNCTIONS.select_control_group(
act_ui.control_group.action - 1,
act_ui.control_group.control_group_index)
if act_ui.HasField("select_idle_worker"):
return FUNCTIONS.select_idle_worker(act_ui.select_idle_worker.type - 1)
if act_ui.HasField("select_army"):
return FUNCTIONS.select_army(act_ui.select_army.selection_add)
if act_ui.HasField("select_warp_gates"):
return FUNCTIONS.select_warp_gates(
act_ui.select_warp_gates.selection_add)
if act_ui.HasField("select_larva"):
return FUNCTIONS.select_larva()
if act_ui.HasField("cargo_panel"):
return FUNCTIONS.unload(act_ui.cargo_panel.unit_index)
if act_ui.HasField("production_panel"):
return FUNCTIONS.build_queue(act_ui.production_panel.unit_index)
if act_ui.HasField("toggle_autocast"):
return func_call_ability(act_ui.toggle_autocast.ability_id,
actions.autocast)
if (action.HasField("action_feature_layer") or
action.HasField("action_render")):
act_sp = actions.spatial(action, aif.action_space)
if act_sp.HasField("camera_move"):
coord = point.Point.build(act_sp.camera_move.center_minimap)
return FUNCTIONS.move_camera(coord)
if act_sp.HasField("unit_selection_point"):
select_point = act_sp.unit_selection_point
coord = point.Point.build(select_point.selection_screen_coord)
return FUNCTIONS.select_point(select_point.type - 1, coord)
if act_sp.HasField("unit_selection_rect"):
select_rect = act_sp.unit_selection_rect
# TODO(tewalds): After looking at some replays we should decide if
# this is good enough. Maybe we need to simulate multiple actions or
# merge the selection rects into a bigger one.
tl = point.Point.build(select_rect.selection_screen_coord[0].p0)
br = point.Point.build(select_rect.selection_screen_coord[0].p1)
return FUNCTIONS.select_rect(select_rect.selection_add, tl, br)
if act_sp.HasField("unit_command"):
cmd = act_sp.unit_command
queue = int(cmd.queue_command)
if cmd.HasField("target_screen_coord"):
coord = point.Point.build(cmd.target_screen_coord)
return func_call_ability(cmd.ability_id, actions.cmd_screen,
queue, coord)
elif cmd.HasField("target_minimap_coord"):
coord = point.Point.build(cmd.target_minimap_coord)
return func_call_ability(cmd.ability_id, actions.cmd_minimap,
queue, coord)
else:
return func_call_ability(cmd.ability_id, actions.cmd_quick, queue)
if action.HasField("action_raw") or action.HasField("action_render"):
raise ValueError("Unknown action:\n%s" % action)
return FUNCTIONS.no_op()
@sw.decorate
def reverse_raw_action(self, action, prev_obs):
"""Transform an SC2-style action into an agent-style action.
This should be the inverse of `transform_action`.
Args:
action: a `sc_pb.Action` to be transformed.
prev_obs: an obs to figure out tags.
Returns:
A corresponding `actions.FunctionCall`.
Raises:
ValueError: if it doesn't know how to transform this action.
"""
aif = self._agent_interface_format
raw_tags = prev_obs["raw_units"][:, FeatureUnit.tag]
def find_tag_position(original_tag):
for i, tag in enumerate(raw_tags):
if tag == original_tag:
return i
logging.warning("Not found tag! %s", original_tag)
return -1
def func_call_ability(ability_id, cmd_type, *args):
"""Get the function id for a specific ability id and action type."""
if ability_id not in actions.RAW_ABILITY_IDS:
logging.warning("Unknown ability_id: %s. This is probably dance or "
"cheer, or some unknown new or map specific ability. "
"Treating it as a no-op.", ability_id)
return actions.RAW_FUNCTIONS.no_op()
if aif.hide_specific_actions:
general_id = next(iter(actions.RAW_ABILITY_IDS[ability_id])).general_id
if general_id:
ability_id = general_id
for func in actions.RAW_ABILITY_IDS[ability_id]:
if func.function_type is cmd_type:
return actions.RAW_FUNCTIONS[func.id](*args)
raise ValueError("Unknown ability_id: %s, type: %s. Likely a bug." % (
ability_id, cmd_type.__name__))
if action.HasField("action_raw"):
raw_act = action.action_raw
if raw_act.HasField("unit_command"):
uc = raw_act.unit_command
ability_id = uc.ability_id
queue_command = uc.queue_command
unit_tags = (find_tag_position(t) for t in uc.unit_tags)
# Remove invalid units.
unit_tags = [t for t in unit_tags if t != -1]
if not unit_tags:
return actions.RAW_FUNCTIONS.no_op()
if uc.HasField("target_unit_tag"):
target_unit_tag = find_tag_position(uc.target_unit_tag)
if target_unit_tag == -1:
return actions.RAW_FUNCTIONS.no_op()
return func_call_ability(ability_id, actions.raw_cmd_unit,
queue_command, unit_tags, target_unit_tag)
if uc.HasField("target_world_space_pos"):
coord = point.Point.build(uc.target_world_space_pos)
coord = self._world_to_minimap_px.fwd_pt(coord)
return func_call_ability(ability_id, actions.raw_cmd_pt,
queue_command, unit_tags, coord)
else:
return func_call_ability(ability_id, actions.raw_cmd,
queue_command, unit_tags)
if raw_act.HasField("toggle_autocast"):
uc = raw_act.toggle_autocast
ability_id = uc.ability_id
unit_tags = (find_tag_position(t) for t in uc.unit_tags)
# Remove invalid units.
unit_tags = [t for t in unit_tags if t != -1]
if not unit_tags:
return actions.RAW_FUNCTIONS.no_op()
return func_call_ability(ability_id, actions.raw_autocast, unit_tags)
if raw_act.HasField("unit_command"):
raise ValueError("Unknown action:\n%s" % action)
if raw_act.HasField("camera_move"):
coord = point.Point.build(raw_act.camera_move.center_world_space)
coord = self._world_to_minimap_px.fwd_pt(coord)
return actions.RAW_FUNCTIONS.raw_move_camera(coord)
return actions.RAW_FUNCTIONS.no_op()
| 36.68827 | 97 | 0.661971 |
794026175d8d4cf4400493492e2a55a96dc0b38f | 639 | py | Python | Q069.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | Q069.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | Q069.py | Linchin/python_leetcode_git | 3d08ab04bbdbd2ce268f33c501fbb149662872c7 | [
"MIT"
] | null | null | null | """
069
sqrt(x)
easy
Given a non-negative integer x, compute and return the square root of x.
Since the return type is an integer, the decimal digits are truncated,
and only the integer part of the result is returned.
"""
class Solution:
def mySqrt(self, x: int) -> int:
l = 0
r = x
while l < r-1:
m = (l + r) // 2
if m * m == x:
return m
elif m * m > x:
r = m
else:
l = m
if (l+1)**2 <= x:
return l+1
else:
return l
x = 27
sol = Solution()
print(sol.mySqrt(x))
| 15.585366 | 72 | 0.466354 |
7940274c7ca37fbcfc1fd714a6a0bce0a9a093f9 | 1,053 | py | Python | roma/via/library/__init__.py | WilliamRo/roma | c23e6717b791dfbbb1b76d0a765f5814d92f8b1b | [
"Apache-2.0"
] | 2 | 2020-11-17T13:29:21.000Z | 2021-09-08T13:29:08.000Z | roma/via/library/__init__.py | WilliamRo/roma | c23e6717b791dfbbb1b76d0a765f5814d92f8b1b | [
"Apache-2.0"
] | null | null | null | roma/via/library/__init__.py | WilliamRo/roma | c23e6717b791dfbbb1b76d0a765f5814d92f8b1b | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 William Ro. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ====-========================================================================-
"""Build-in functions for a city is maintained in this package"""
from .scroll import Scroll
from .basic import ExitHall
from .commu import SendMsg
from .. import city
CONTENT = (
ExitHall,
SendMsg,
)
def register(city_, func_dict: dict):
assert isinstance(city_, city.City)
for s in CONTENT:
assert issubclass(s, Scroll)
s(city_).register(func_dict)
| 27.710526 | 80 | 0.687559 |
79402936ee8c57ef0f611a9d88693dd7d9f2c397 | 29 | py | Python | db/search/resolvers/__init__.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | 1 | 2022-03-03T09:55:57.000Z | 2022-03-03T09:55:57.000Z | db/search/resolvers/__init__.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | 7 | 2022-02-09T10:44:53.000Z | 2022-03-28T03:29:43.000Z | db/search/resolvers/__init__.py | matchd-ch/matchd-backend | 84be4aab1b4708cae50a8988301b15df877c8db0 | [
"Apache-2.0"
] | null | null | null | from .hit import HitResolver
| 14.5 | 28 | 0.827586 |
794029627bdef95beb5ef63ccf63f95b93492e2a | 1,722 | py | Python | src/helloworldservice/app.py | brymck/brymck-io-old | 436e0104fbb25e4ef2d74184632c96af52bdd0b7 | [
"MIT"
] | null | null | null | src/helloworldservice/app.py | brymck/brymck-io-old | 436e0104fbb25e4ef2d74184632c96af52bdd0b7 | [
"MIT"
] | 7 | 2020-09-06T07:23:27.000Z | 2022-02-26T14:41:46.000Z | src/helloworldservice/app.py | brymck/brymck-io-old | 436e0104fbb25e4ef2d74184632c96af52bdd0b7 | [
"MIT"
] | null | null | null | import os
import time
from concurrent.futures import ThreadPoolExecutor
import grpc
from opencensus.trace.exporters.stackdriver_exporter import StackdriverExporter
from opencensus.trace.ext.grpc.server_interceptor import OpenCensusServerInterceptor
from opencensus.trace.samplers.always_on import AlwaysOnSampler
import demo_pb2
import demo_pb2_grpc
from grpc_health.v1.health_pb2 import HealthCheckResponse
from grpc_health.v1.health_pb2_grpc import add_HealthServicer_to_server
from logger import get_json_logger
logger = get_json_logger("helloworldservice")
class HelloWorldService(demo_pb2_grpc.HelloWorldServiceServicer):
def Greet(self, request, context):
response = demo_pb2.GreetResponse()
response.message = f"Hello, {request.name}"
return response
def Check(self, request, context):
return HealthCheckResponse(status=HealthCheckResponse.SERVING)
def serve():
logger.info("initializing frontend")
try:
sampler = AlwaysOnSampler()
exporter = StackdriverExporter()
tracer_interceptor = OpenCensusServerInterceptor(sampler, exporter)
except:
tracer_interceptor = OpenCensusServerInterceptor()
port = os.environ.get("PORT", "9090")
server = grpc.server(ThreadPoolExecutor(max_workers=3))
service = HelloWorldService()
demo_pb2_grpc.add_HelloWorldServiceServicer_to_server(service, server)
add_HealthServicer_to_server(service, server)
logger.info(f"listening on port: {port}")
server.add_insecure_port(f"[::]:{port}")
server.start()
try:
while True:
time.sleep(10000)
except KeyboardInterrupt:
server.stop(0)
if __name__ == "__main__":
serve()
| 28.229508 | 84 | 0.753194 |
79402a43b667587b869016d60580e9257801956a | 2,393 | py | Python | streamdeck_ui/display/filter.py | WMRamadan/streamdeck-ui | e366752af183bfb02a7d3d95d900cdd2190ad4bc | [
"MIT"
] | null | null | null | streamdeck_ui/display/filter.py | WMRamadan/streamdeck-ui | e366752af183bfb02a7d3d95d900cdd2190ad4bc | [
"MIT"
] | null | null | null | streamdeck_ui/display/filter.py | WMRamadan/streamdeck-ui | e366752af183bfb02a7d3d95d900cdd2190ad4bc | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
from fractions import Fraction
from typing import Callable, Tuple
from PIL import Image
class Filter(ABC):
"""
A filter transforms a given input image to the desired output image. A filter can signal that it
is complete and will be removed from the pipeline.
:param str name: The name of the filter. The name is useful for debugging purposes.
"""
size: Tuple[int, int]
"The image size (width, height) in pixels that this filter transforms."
is_complete: bool
"Indicates if the filter is complete and should no longer be processed."
def __init__(self):
self.is_complete = False
@abstractmethod
def initialize(self, size: Tuple[int, int]):
"""Initializes the filter with the provided frame size. Since the construction
of the filter can happen before the size of the display is known, initialization
should be done here.
:param size: The filter image size
:type size: Tuple[int, int]
"""
pass
@abstractmethod
def transform(self, get_input: Callable[[], Image.Image], get_output: Callable[[int], Image.Image], input_changed: bool, time: Fraction) -> Tuple[Image.Image, int]:
"""
Transforms the given input image to te desired output image.
The default behaviour is to return the orignal image.
:param Callable[[], PIL.Image] get_input: A function that returns the input image to transform. Note that calling
this will create a copy of the input image, and it is safe to manipulate directly.
:param Callable[[int], PIL.Image] get_output: Provide the hashcode of the new frame and it will
return the output frame if it already exists. This avoids having to redraw an output frame that is already
cached.
:param bool input_changed: True if the input is different from previous run, False otherwise.
When true, you have to return an Image.
:param Fraction time: The current time in seconds, expressed as a fractional number since
the start of the pipeline.
:rtype: PIL.Image
:return: The transformed output image. If this filter did not modify the input, return None. This signals to the
pipeline manager that there was no change and a cached version will be moved to the next stage.
"""
pass
| 39.883333 | 168 | 0.691601 |
79402be215be1cf4e27544179de1b332e6d90875 | 50,017 | py | Python | napari/layers/points/_tests/test_points.py | Zac-HD/napari | 102a7e8f845893c874d2b86f9371d41130100b89 | [
"BSD-3-Clause"
] | 1 | 2021-04-24T10:10:54.000Z | 2021-04-24T10:10:54.000Z | napari/layers/points/_tests/test_points.py | Zac-HD/napari | 102a7e8f845893c874d2b86f9371d41130100b89 | [
"BSD-3-Clause"
] | 2 | 2021-05-17T02:15:08.000Z | 2022-03-12T21:19:52.000Z | napari/layers/points/_tests/test_points.py | Zac-HD/napari | 102a7e8f845893c874d2b86f9371d41130100b89 | [
"BSD-3-Clause"
] | null | null | null | from copy import copy
from itertools import cycle, islice
import numpy as np
import pandas as pd
import pytest
from vispy.color import get_colormap
from napari._tests.utils import check_layer_world_data_extent
from napari.layers import Points
from napari.layers.points._points_utils import points_to_squares
from napari.utils.colormaps.standardize_color import transform_color
def _make_cycled_properties(values, length):
"""Helper function to make property values
Parameters
----------
values
The values to be cycled.
length : int
The length of the resulting property array
Returns
-------
cycled_properties : np.ndarray
The property array comprising the cycled values.
"""
cycled_properties = np.array(list(islice(cycle(values), 0, length)))
return cycled_properties
def test_empty_points():
pts = Points()
assert pts.data.shape == (0, 2)
def test_empty_points_with_properties():
"""Test instantiating an empty Points layer with properties
See: https://github.com/napari/napari/pull/1069
"""
properties = {
'label': np.array(['label1', 'label2']),
'cont_prop': np.array([0], dtype=float),
}
pts = Points(properties=properties)
current_props = {k: v[0] for k, v in properties.items()}
np.testing.assert_equal(pts.current_properties, current_props)
# verify the property datatype is correct
assert pts.properties['cont_prop'].dtype == float
# add two points and verify the default property was applied
pts.add([10, 10])
pts.add([20, 20])
props = {
'label': np.array(['label1', 'label1']),
'cont_prop': np.array([0, 0], dtype=float),
}
np.testing.assert_equal(pts.properties, props)
def test_empty_points_with_properties_list():
"""Test instantiating an empty Points layer with properties
stored in a list
See: https://github.com/napari/napari/pull/1069
"""
properties = {'label': ['label1', 'label2'], 'cont_prop': [0]}
pts = Points(properties=properties)
current_props = {k: np.asarray(v[0]) for k, v in properties.items()}
np.testing.assert_equal(pts.current_properties, current_props)
# add two points and verify the default property was applied
pts.add([10, 10])
pts.add([20, 20])
props = {
'label': np.array(['label1', 'label1']),
'cont_prop': np.array([0, 0], dtype=float),
}
np.testing.assert_equal(pts.properties, props)
def test_empty_layer_with_face_colormap():
"""Test creating an empty layer where the face color is a colormap
See: https://github.com/napari/napari/pull/1069
"""
default_properties = {'point_type': np.array([1.5], dtype=float)}
layer = Points(
properties=default_properties,
face_color='point_type',
face_colormap='gray',
)
assert layer.face_color_mode == 'colormap'
# verify the current_face_color is correct
face_color = np.array([1, 1, 1, 1])
np.testing.assert_allclose(layer._face.current_color, face_color)
def test_empty_layer_with_edge_colormap():
"""Test creating an empty layer where the face color is a colormap
See: https://github.com/napari/napari/pull/1069
"""
default_properties = {'point_type': np.array([1.5], dtype=float)}
layer = Points(
properties=default_properties,
edge_color='point_type',
edge_colormap='gray',
)
assert layer.edge_color_mode == 'colormap'
# verify the current_face_color is correct
edge_color = np.array([1, 1, 1, 1])
np.testing.assert_allclose(layer._edge.current_color, edge_color)
def test_empty_layer_with_text_properties():
"""Test initializing an empty layer with text defined"""
default_properties = {'point_type': np.array([1.5], dtype=float)}
text_kwargs = {'text': 'point_type', 'color': 'red'}
layer = Points(
properties=default_properties,
text=text_kwargs,
)
np.testing.assert_equal(layer.text.values, np.empty(0))
np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1])
# add a point and check that the appropriate text value was added
layer.add([1, 1])
np.testing.assert_equal(layer.text.values, ['1.5'])
np.testing.assert_allclose(layer.text.color, [1, 0, 0, 1])
def test_empty_layer_with_text_formatted():
"""Test initializing an empty layer with text defined"""
default_properties = {'point_type': np.array([1.5], dtype=float)}
layer = Points(
properties=default_properties,
text='point_type: {point_type:.2f}',
)
np.testing.assert_equal(layer.text.values, np.empty(0))
# add a point and check that the appropriate text value was added
layer.add([1, 1])
np.testing.assert_equal(layer.text.values, ['point_type: 1.50'])
def test_random_points():
"""Test instantiating Points layer with random 2D data."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert np.all(layer.data == data)
assert layer.ndim == shape[1]
assert layer._view_data.ndim == 2
assert len(layer.data) == 10
assert len(layer.selected_data) == 0
def test_integer_points():
"""Test instantiating Points layer with integer data."""
shape = (10, 2)
np.random.seed(0)
data = np.random.randint(20, size=(10, 2))
layer = Points(data)
assert np.all(layer.data == data)
assert layer.ndim == shape[1]
assert layer._view_data.ndim == 2
assert len(layer.data) == 10
def test_negative_points():
"""Test instantiating Points layer with negative data."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape) - 10
layer = Points(data)
assert np.all(layer.data == data)
assert layer.ndim == shape[1]
assert layer._view_data.ndim == 2
assert len(layer.data) == 10
def test_empty_points_array():
"""Test instantiating Points layer with empty array."""
shape = (0, 2)
data = np.empty(shape)
layer = Points(data)
assert np.all(layer.data == data)
assert layer.ndim == shape[1]
assert layer._view_data.ndim == 2
assert len(layer.data) == 0
def test_3D_points():
"""Test instantiating Points layer with random 3D data."""
shape = (10, 3)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert np.all(layer.data == data)
assert layer.ndim == shape[1]
assert layer._view_data.ndim == 2
assert len(layer.data) == 10
def test_4D_points():
"""Test instantiating Points layer with random 4D data."""
shape = (10, 4)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert np.all(layer.data == data)
assert layer.ndim == shape[1]
assert layer._view_data.ndim == 2
assert len(layer.data) == 10
def test_changing_points():
"""Test changing Points data."""
shape_a = (10, 2)
shape_b = (20, 2)
np.random.seed(0)
data_a = 20 * np.random.random(shape_a)
data_b = 20 * np.random.random(shape_b)
layer = Points(data_a)
layer.data = data_b
assert np.all(layer.data == data_b)
assert layer.ndim == shape_b[1]
assert layer._view_data.ndim == 2
assert len(layer.data) == 20
def test_selecting_points():
"""Test selecting points."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
layer.mode = 'select'
data_to_select = {1, 2}
layer.selected_data = data_to_select
assert layer.selected_data == data_to_select
# test switching to 3D
layer._slice_dims(ndisplay=3)
assert layer.selected_data == data_to_select
# select different points while in 3D mode
other_data_to_select = {0}
layer.selected_data = other_data_to_select
assert layer.selected_data == other_data_to_select
# selection should persist when going back to 2D mode
layer._slice_dims(ndisplay=2)
assert layer.selected_data == other_data_to_select
# selection should persist when switching between between select and pan_zoom
layer.mode = 'pan_zoom'
assert layer.selected_data == other_data_to_select
layer.mode = 'select'
assert layer.selected_data == other_data_to_select
# add mode should clear the selection
layer.mode = 'add'
assert layer.selected_data == set()
def test_adding_points():
"""Test adding Points data."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert len(layer.data) == 10
coord = [20, 20]
layer.add(coord)
assert len(layer.data) == 11
assert np.all(layer.data[10] == coord)
# the added point should be selected
assert layer.selected_data == {10}
# test adding multiple points
coords = [[10, 10], [15, 15]]
layer.add(coords)
assert len(layer.data) == 13
assert np.all(layer.data[11:, :] == coords)
# test that the last added points can be deleted
layer.remove_selected()
np.testing.assert_equal(layer.data, np.vstack((data, coord)))
def test_adding_points_to_empty():
"""Test adding Points data to empty."""
shape = (0, 2)
data = np.empty(shape)
layer = Points(data)
assert len(layer.data) == 0
coord = [20, 20]
layer.add(coord)
assert len(layer.data) == 1
assert np.all(layer.data[0] == coord)
def test_removing_selected_points():
"""Test selecting points."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
# With nothing selected no points should be removed
layer.remove_selected()
assert len(layer.data) == shape[0]
# Select two points and remove them
layer.selected_data = {0, 3}
layer.remove_selected()
assert len(layer.data) == shape[0] - 2
assert len(layer.selected_data) == 0
keep = [1, 2] + list(range(4, 10))
assert np.all(layer.data == data[keep])
# Select another point and remove it
layer.selected_data = {4}
layer.remove_selected()
assert len(layer.data) == shape[0] - 3
def test_move():
"""Test moving points."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
unmoved = copy(data)
layer = Points(data)
# Move one point relative to an initial drag start location
layer._move([0], [0, 0])
layer._move([0], [10, 10])
layer._drag_start = None
assert np.all(layer.data[0] == unmoved[0] + [10, 10])
assert np.all(layer.data[1:] == unmoved[1:])
# Move two points relative to an initial drag start location
layer._move([1, 2], [2, 2])
layer._move([1, 2], np.add([2, 2], [-3, 4]))
assert np.all(layer.data[1:2] == unmoved[1:2] + [-3, 4])
def test_changing_modes():
"""Test changing modes."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert layer.mode == 'pan_zoom'
assert layer.interactive is True
layer.mode = 'add'
assert layer.mode == 'add'
layer.mode = 'select'
assert layer.mode == 'select'
assert layer.interactive is False
layer.mode = 'pan_zoom'
assert layer.mode == 'pan_zoom'
assert layer.interactive is True
with pytest.raises(ValueError):
layer.mode = 'not_a_mode'
def test_name():
"""Test setting layer name."""
np.random.seed(0)
data = 20 * np.random.random((10, 2))
layer = Points(data)
assert layer.name == 'Points'
layer = Points(data, name='random')
assert layer.name == 'random'
layer.name = 'pts'
assert layer.name == 'pts'
def test_visiblity():
"""Test setting layer visibility."""
np.random.seed(0)
data = 20 * np.random.random((10, 2))
layer = Points(data)
assert layer.visible is True
layer.visible = False
assert layer.visible is False
layer = Points(data, visible=False)
assert layer.visible is False
layer.visible = True
assert layer.visible is True
def test_opacity():
"""Test setting layer opacity."""
np.random.seed(0)
data = 20 * np.random.random((10, 2))
layer = Points(data)
assert layer.opacity == 1.0
layer.opacity = 0.5
assert layer.opacity == 0.5
layer = Points(data, opacity=0.6)
assert layer.opacity == 0.6
layer.opacity = 0.3
assert layer.opacity == 0.3
def test_blending():
"""Test setting layer blending."""
np.random.seed(0)
data = 20 * np.random.random((10, 2))
layer = Points(data)
assert layer.blending == 'translucent'
layer.blending = 'additive'
assert layer.blending == 'additive'
layer = Points(data, blending='additive')
assert layer.blending == 'additive'
layer.blending = 'opaque'
assert layer.blending == 'opaque'
def test_symbol():
"""Test setting symbol."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert layer.symbol == 'disc'
layer.symbol = 'cross'
assert layer.symbol == 'cross'
layer = Points(data, symbol='star')
assert layer.symbol == 'star'
properties_array = {'point_type': _make_cycled_properties(['A', 'B'], 10)}
properties_list = {'point_type': list(_make_cycled_properties(['A', 'B'], 10))}
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_properties(properties):
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data, properties=copy(properties))
np.testing.assert_equal(layer.properties, properties)
current_prop = {'point_type': np.array(['B'])}
assert layer.current_properties == current_prop
# test removing points
layer.selected_data = {0, 1}
layer.remove_selected()
remove_properties = properties['point_type'][2::]
assert len(layer.properties['point_type']) == (shape[0] - 2)
assert np.all(layer.properties['point_type'] == remove_properties)
# test selection of properties
layer.selected_data = {0}
selected_annotation = layer.current_properties['point_type']
assert len(selected_annotation) == 1
assert selected_annotation[0] == 'A'
# test adding points with properties
layer.add([10, 10])
add_annotations = np.concatenate((remove_properties, ['A']), axis=0)
assert np.all(layer.properties['point_type'] == add_annotations)
# test copy/paste
layer.selected_data = {0, 1}
layer._copy_data()
assert np.all(layer._clipboard['properties']['point_type'] == ['A', 'B'])
layer._paste_data()
paste_annotations = np.concatenate((add_annotations, ['A', 'B']), axis=0)
assert np.all(layer.properties['point_type'] == paste_annotations)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_adding_properties(attribute):
"""Test adding properties to an existing layer"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
# add properties
properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}
layer.properties = properties
np.testing.assert_equal(layer.properties, properties)
# add properties as a dataframe
properties_df = pd.DataFrame(properties)
layer.properties = properties_df
np.testing.assert_equal(layer.properties, properties)
# add properties as a dictionary with list values
properties_list = {
'point_type': list(_make_cycled_properties(['A', 'B'], shape[0]))
}
layer.properties = properties_list
assert isinstance(layer.properties['point_type'], np.ndarray)
# removing a property that was the _*_color_property should give a warning
color_manager = getattr(layer, f'_{attribute}')
color_manager.color_properties = {
'name': 'point_type',
'values': np.empty(0),
'current_value': 'A',
}
properties_2 = {
'not_point_type': _make_cycled_properties(['A', 'B'], shape[0])
}
with pytest.warns(RuntimeWarning):
layer.properties = properties_2
def test_properties_dataframe():
"""Test if properties can be provided as a DataFrame"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}
properties_df = pd.DataFrame(properties)
properties_df = properties_df.astype(properties['point_type'].dtype)
layer = Points(data, properties=properties_df)
np.testing.assert_equal(layer.properties, properties)
def test_add_points_with_properties_as_list():
# test adding points initialized with properties as list
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {
'point_type': list(_make_cycled_properties(['A', 'B'], shape[0]))
}
layer = Points(data, properties=copy(properties))
coord = [18, 18]
layer.add(coord)
new_prop = {'point_type': np.append(properties['point_type'], 'B')}
np.testing.assert_equal(layer.properties, new_prop)
def test_updating_points_properties():
# test adding points initialized with properties
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}
layer = Points(data, properties=copy(properties))
layer.mode = 'select'
layer.selected_data = [len(data) - 1]
layer.current_properties = {'point_type': np.array(['A'])}
updated_properties = properties
updated_properties['point_type'][-1] = 'A'
np.testing.assert_equal(layer.properties, updated_properties)
properties_array = {'point_type': _make_cycled_properties(['A', 'B'], 10)}
properties_list = {'point_type': list(_make_cycled_properties(['A', 'B'], 10))}
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_text_from_property_value(properties):
"""Test setting text from a property value"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data, properties=copy(properties), text='point_type')
np.testing.assert_equal(layer.text.values, properties['point_type'])
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_text_from_property_fstring(properties):
"""Test setting text with an f-string from the property value"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(
data, properties=copy(properties), text='type: {point_type}'
)
expected_text = ['type: ' + v for v in properties['point_type']]
np.testing.assert_equal(layer.text.values, expected_text)
# test updating the text
layer.text = 'type-ish: {point_type}'
expected_text_2 = ['type-ish: ' + v for v in properties['point_type']]
np.testing.assert_equal(layer.text.values, expected_text_2)
# copy/paste
layer.selected_data = {0}
layer._copy_data()
layer._paste_data()
expected_text_3 = expected_text_2 + ['type-ish: A']
np.testing.assert_equal(layer.text.values, expected_text_3)
# add point
layer.selected_data = {0}
new_shape = np.random.random((1, 2))
layer.add(new_shape)
expected_text_4 = expected_text_3 + ['type-ish: A']
np.testing.assert_equal(layer.text.values, expected_text_4)
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_set_text_with_kwarg_dict(properties):
text_kwargs = {
'text': 'type: {point_type}',
'color': [0, 0, 0, 1],
'rotation': 10,
'translation': [5, 5],
'anchor': 'upper_left',
'size': 10,
'visible': True,
}
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data, properties=copy(properties), text=text_kwargs)
expected_text = ['type: ' + v for v in properties['point_type']]
np.testing.assert_equal(layer.text.values, expected_text)
for property, value in text_kwargs.items():
if property == 'text':
continue
layer_value = getattr(layer._text, property)
np.testing.assert_equal(layer_value, value)
@pytest.mark.parametrize("properties", [properties_array, properties_list])
def test_text_error(properties):
"""creating a layer with text as the wrong type should raise an error"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
# try adding text as the wrong type
with pytest.raises(TypeError):
Points(data, properties=copy(properties), text=123)
def test_refresh_text():
"""Test refreshing the text after setting new properties"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'point_type': ['A'] * shape[0]}
layer = Points(data, properties=copy(properties), text='point_type')
new_properties = {'point_type': ['B'] * shape[0]}
layer.properties = new_properties
np.testing.assert_equal(layer.text.values, new_properties['point_type'])
def test_points_errors():
shape = (3, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
# try adding properties with the wrong number of properties
with pytest.raises(ValueError):
annotations = {'point_type': np.array(['A', 'B'])}
Points(data, properties=copy(annotations))
def test_edge_width():
"""Test setting edge width."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert layer.edge_width == 1
layer.edge_width = 2
assert layer.edge_width == 2
layer = Points(data, edge_width=3)
assert layer.edge_width == 3
def test_n_dimensional():
"""Test setting n_dimensional flag for 2D and 4D data."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert layer.n_dimensional is False
layer.n_dimensional = True
assert layer.n_dimensional is True
layer = Points(data, n_dimensional=True)
assert layer.n_dimensional is True
shape = (10, 4)
data = 20 * np.random.random(shape)
layer = Points(data)
assert layer.n_dimensional is False
layer.n_dimensional = True
assert layer.n_dimensional is True
layer = Points(data, n_dimensional=True)
assert layer.n_dimensional is True
@pytest.mark.filterwarnings("ignore:elementwise comparison fail:FutureWarning")
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_switch_color_mode(attribute):
"""Test switching between color modes"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
# create a continuous property with a known value in the last element
continuous_prop = np.random.random((shape[0],))
continuous_prop[-1] = 1
properties = {
'point_truthiness': continuous_prop,
'point_type': _make_cycled_properties(['A', 'B'], shape[0]),
}
initial_color = [1, 0, 0, 1]
color_cycle = ['red', 'blue']
color_kwarg = f'{attribute}_color'
colormap_kwarg = f'{attribute}_colormap'
color_cycle_kwarg = f'{attribute}_color_cycle'
args = {
color_kwarg: initial_color,
colormap_kwarg: 'gray',
color_cycle_kwarg: color_cycle,
}
layer = Points(data, properties=properties, **args)
layer_color_mode = getattr(layer, f'{attribute}_color_mode')
layer_color = getattr(layer, f'{attribute}_color')
assert layer_color_mode == 'direct'
np.testing.assert_allclose(
layer_color, np.repeat([initial_color], shape[0], axis=0)
)
# there should not be an edge_color_property
color_manager = getattr(layer, f'_{attribute}')
color_property = color_manager.color_properties
assert color_property is None
# transitioning to colormap should raise a warning
# because there isn't an edge color property yet and
# the first property in points.properties is being automatically selected
with pytest.warns(UserWarning):
setattr(layer, f'{attribute}_color_mode', 'colormap')
color_manager = getattr(layer, f'_{attribute}')
color_property_name = color_manager.color_properties.name
assert color_property_name == next(iter(properties))
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(layer_color[-1], [1, 1, 1, 1])
# switch to color cycle
setattr(layer, f'{attribute}_color_mode', 'cycle')
setattr(layer, f'{attribute}_color', 'point_type')
color = getattr(layer, f'{attribute}_color')
layer_color = transform_color(color_cycle * int(shape[0] / 2))
np.testing.assert_allclose(color, layer_color)
# switch back to direct, edge_colors shouldn't change
setattr(layer, f'{attribute}_color_mode', 'direct')
new_edge_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(new_edge_color, color)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_colormap_without_properties(attribute):
"""Setting the colormode to colormap should raise an exception"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
with pytest.raises(ValueError):
setattr(layer, f'{attribute}_color_mode', 'colormap')
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_colormap_with_categorical_properties(attribute):
"""Setting the colormode to colormap should raise an exception"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}
layer = Points(data, properties=properties)
with pytest.raises(TypeError):
with pytest.warns(UserWarning):
setattr(layer, f'{attribute}_color_mode', 'colormap')
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_add_colormap(attribute):
"""Test directly adding a vispy Colormap object"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
annotations = {'point_type': _make_cycled_properties([0, 1.5], shape[0])}
color_kwarg = f'{attribute}_color'
colormap_kwarg = f'{attribute}_colormap'
args = {color_kwarg: 'point_type', colormap_kwarg: 'viridis'}
layer = Points(data, properties=annotations, **args)
setattr(layer, f'{attribute}_colormap', get_colormap('gray'))
layer_colormap = getattr(layer, f'{attribute}_colormap')
assert 'unnamed colormap' in layer_colormap.name
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_add_point_direct(attribute: str):
"""Test adding points to layer directly"""
layer = Points()
assert len(getattr(layer, f'{attribute}_color')) == 0
setattr(layer, f'current_{attribute}_color', 'red')
coord = [18, 18]
layer.add(coord)
np.testing.assert_allclose(
[[1, 0, 0, 1]], getattr(layer, f'{attribute}_color')
)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_color_direct(attribute: str):
"""Test setting colors directly"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer_kwargs = {f'{attribute}_color': 'black'}
layer = Points(data, **layer_kwargs)
color_array = transform_color(['black'] * shape[0])
current_color = getattr(layer, f'current_{attribute}_color')
layer_color = getattr(layer, f'{attribute}_color')
assert current_color == 'black'
assert len(layer.edge_color) == shape[0]
np.testing.assert_allclose(color_array, layer_color)
# With no data selected changing color has no effect
setattr(layer, f'current_{attribute}_color', 'blue')
current_color = getattr(layer, f'current_{attribute}_color')
assert current_color == 'blue'
np.testing.assert_allclose(color_array, layer_color)
# Select data and change edge color of selection
selected_data = {0, 1}
layer.selected_data = {0, 1}
current_color = getattr(layer, f'current_{attribute}_color')
assert current_color == 'black'
setattr(layer, f'current_{attribute}_color', 'green')
colorarray_green = transform_color(['green'] * len(layer.selected_data))
color_array[list(selected_data)] = colorarray_green
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(color_array, layer_color)
# Add new point and test its color
coord = [18, 18]
layer.selected_data = {}
setattr(layer, f'current_{attribute}_color', 'blue')
layer.add(coord)
color_array = np.vstack([color_array, transform_color('blue')])
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] + 1
np.testing.assert_allclose(color_array, layer_color)
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] - 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array[1], color_array[3:])),
)
color_cycle_str = ['red', 'blue']
color_cycle_rgb = [[1, 0, 0], [0, 0, 1]]
color_cycle_rgba = [[1, 0, 0, 1], [0, 0, 1, 1]]
@pytest.mark.parametrize("attribute", ['edge', 'face'])
@pytest.mark.parametrize(
"color_cycle",
[color_cycle_str, color_cycle_rgb, color_cycle_rgba],
)
def test_color_cycle(attribute, color_cycle):
"""Test setting edge/face color with a color cycle list"""
# create Points using list color cycle
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}
points_kwargs = {
'properties': properties,
f'{attribute}_color': 'point_type',
f'{attribute}_color_cycle': color_cycle,
}
layer = Points(data, **points_kwargs)
assert layer.properties == properties
color_array = transform_color(
list(islice(cycle(color_cycle), 0, shape[0]))
)
layer_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(layer_color, color_array)
# Add new point and test its color
coord = [18, 18]
layer.selected_data = {0}
layer.add(coord)
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] + 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array, transform_color('red'))),
)
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
layer_color = getattr(layer, f'{attribute}_color')
assert len(layer_color) == shape[0] - 1
np.testing.assert_allclose(
layer_color,
np.vstack((color_array[1], color_array[3:], transform_color('red'))),
)
# test adding a point with a new property value
layer.selected_data = {}
current_properties = layer.current_properties
current_properties['point_type'] = np.array(['new'])
layer.current_properties = current_properties
layer.add([10, 10])
color_manager = getattr(layer, f'_{attribute}')
color_cycle_map = color_manager.categorical_colormap.colormap
assert 'new' in color_cycle_map
np.testing.assert_allclose(
color_cycle_map['new'], np.squeeze(transform_color(color_cycle[0]))
)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_color_cycle_dict(attribute):
"""Test setting edge/face color with a color cycle dict"""
data = np.array([[0, 0], [100, 0], [0, 100]])
properties = {'my_colors': [2, 6, 3]}
points_kwargs = {
'properties': properties,
f'{attribute}_color': 'my_colors',
f'{attribute}_color_cycle': {1: 'green', 2: 'red', 3: 'blue'},
}
layer = Points(data, **points_kwargs)
color_manager = getattr(layer, f'_{attribute}')
color_cycle_map = color_manager.categorical_colormap.colormap
np.testing.assert_allclose(color_cycle_map[2], [1, 0, 0, 1]) # 2 is red
np.testing.assert_allclose(color_cycle_map[3], [0, 0, 1, 1]) # 3 is blue
np.testing.assert_allclose(color_cycle_map[6], [1, 1, 1, 1]) # 6 is white
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_add_color_cycle_to_empty_layer(attribute):
"""Test adding a point to an empty layer when edge/face color is a color cycle
See: https://github.com/napari/napari/pull/1069
"""
default_properties = {'point_type': np.array(['A'])}
color_cycle = ['red', 'blue']
points_kwargs = {
'properties': default_properties,
f'{attribute}_color': 'point_type',
f'{attribute}_color_cycle': color_cycle,
}
layer = Points(**points_kwargs)
# verify the current_edge_color is correct
expected_color = transform_color(color_cycle[0])[0]
color_manager = getattr(layer, f'_{attribute}')
current_color = color_manager.current_color
np.testing.assert_allclose(current_color, expected_color)
# add a point
layer.add([10, 10])
props = {'point_type': np.array(['A'])}
expected_color = np.array([[1, 0, 0, 1]])
np.testing.assert_equal(layer.properties, props)
attribute_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(attribute_color, expected_color)
# add a point with a new property
layer.selected_data = []
layer.current_properties = {'point_type': np.array(['B'])}
layer.add([12, 12])
new_color = np.array([0, 0, 1, 1])
expected_color = np.vstack((expected_color, new_color))
new_properties = {'point_type': np.array(['A', 'B'])}
attribute_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(attribute_color, expected_color)
np.testing.assert_equal(layer.properties, new_properties)
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_adding_value_color_cycle(attribute):
"""Test that adding values to properties used to set a color cycle
and then calling Points.refresh_colors() performs the update and adds the
new value to the face/edge_color_cycle_map.
See: https://github.com/napari/napari/issues/988
"""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'point_type': _make_cycled_properties(['A', 'B'], shape[0])}
color_cycle = ['red', 'blue']
points_kwargs = {
'properties': properties,
f'{attribute}_color': 'point_type',
f'{attribute}_color_cycle': color_cycle,
}
layer = Points(data, **points_kwargs)
# make point 0 point_type C
props = layer.properties
point_types = props['point_type']
point_types[0] = 'C'
props['point_type'] = point_types
layer.properties = props
color_manager = getattr(layer, f'_{attribute}')
color_cycle_map = color_manager.categorical_colormap.colormap
color_map_keys = [*color_cycle_map]
assert 'C' in color_map_keys
@pytest.mark.parametrize("attribute", ['edge', 'face'])
def test_color_colormap(attribute):
"""Test setting edge/face color with a colormap"""
# create Points using with a colormap
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
properties = {'point_type': _make_cycled_properties([0, 1.5], shape[0])}
points_kwargs = {
'properties': properties,
f'{attribute}_color': 'point_type',
f'{attribute}_colormap': 'gray',
}
layer = Points(data, **points_kwargs)
assert layer.properties == properties
color_mode = getattr(layer, f'{attribute}_color_mode')
assert color_mode == 'colormap'
color_array = transform_color(['black', 'white'] * int(shape[0] / 2))
attribute_color = getattr(layer, f'{attribute}_color')
assert np.all(attribute_color == color_array)
# change the color cycle - face_color should not change
setattr(layer, f'{attribute}_color_cycle', ['red', 'blue'])
attribute_color = getattr(layer, f'{attribute}_color')
assert np.all(attribute_color == color_array)
# Add new point and test its color
coord = [18, 18]
layer.selected_data = {0}
layer.add(coord)
attribute_color = getattr(layer, f'{attribute}_color')
assert len(attribute_color) == shape[0] + 1
np.testing.assert_allclose(
attribute_color,
np.vstack((color_array, transform_color('black'))),
)
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == shape[0] - 1
attribute_color = getattr(layer, f'{attribute}_color')
assert len(attribute_color) == shape[0] - 1
np.testing.assert_allclose(
attribute_color,
np.vstack(
(
color_array[1],
color_array[3:],
transform_color('black'),
)
),
)
# adjust the clims
setattr(layer, f'{attribute}_contrast_limits', (0, 3))
attribute_color = getattr(layer, f'{attribute}_color')
np.testing.assert_allclose(attribute_color[-2], [0.5, 0.5, 0.5, 1])
# change the colormap
new_colormap = 'viridis'
setattr(layer, f'{attribute}_colormap', new_colormap)
attribute_colormap = getattr(layer, f'{attribute}_colormap')
assert attribute_colormap.name == new_colormap
def test_size():
"""Test setting size with scalar."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
assert layer.current_size == 10
assert layer.size.shape == shape
assert np.unique(layer.size)[0] == 10
# Add a new point, it should get current size
coord = [17, 17]
layer.add(coord)
assert layer.size.shape == (11, 2)
assert np.unique(layer.size)[0] == 10
# Setting size affects newly added points not current points
layer.current_size = 20
assert layer.current_size == 20
assert layer.size.shape == (11, 2)
assert np.unique(layer.size)[0] == 10
# Add new point, should have new size
coord = [18, 18]
layer.add(coord)
assert layer.size.shape == (12, 2)
assert np.unique(layer.size[:11])[0] == 10
assert np.all(layer.size[11] == [20, 20])
# Select data and change size
layer.selected_data = {0, 1}
assert layer.current_size == 10
layer.current_size = 16
assert layer.size.shape == (12, 2)
assert np.unique(layer.size[2:11])[0] == 10
assert np.unique(layer.size[:2])[0] == 16
# Select data and size changes
layer.selected_data = {11}
assert layer.current_size == 20
def test_size_with_arrays():
"""Test setting size with arrays."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
sizes = 5 * np.random.random(shape)
layer.size = sizes
assert np.all(layer.size == sizes)
# Test broadcasting of sizes
sizes = [5, 5]
layer.size = sizes
assert np.all(layer.size[0] == sizes)
# Test broadcasting of transposed sizes
sizes = np.random.randint(low=1, high=5, size=shape[::-1])
layer.size = sizes
np.testing.assert_equal(layer.size, sizes.T)
# Un-broadcastable array should raise an exception
bad_sizes = np.random.randint(low=1, high=5, size=(3, 8))
with pytest.raises(ValueError):
layer.size = bad_sizes
# Create new layer with new size array data
sizes = 5 * np.random.random(shape)
layer = Points(data, size=sizes)
assert layer.current_size == 10
assert layer.size.shape == shape
assert np.all(layer.size == sizes)
# Create new layer with new size array data
sizes = [5, 5]
layer = Points(data, size=sizes)
assert layer.current_size == 10
assert layer.size.shape == shape
assert np.all(layer.size[0] == sizes)
# Add new point, should have new size
coord = [18, 18]
layer.current_size = 13
layer.add(coord)
assert layer.size.shape == (11, 2)
assert np.unique(layer.size[:10])[0] == 5
assert np.all(layer.size[10] == [13, 13])
# Select data and change size
layer.selected_data = {0, 1}
assert layer.current_size == 5
layer.current_size = 16
assert layer.size.shape == (11, 2)
assert np.unique(layer.size[2:10])[0] == 5
assert np.unique(layer.size[:2])[0] == 16
# Check removing data adjusts colors correctly
layer.selected_data = {0, 2}
layer.remove_selected()
assert len(layer.data) == 9
assert len(layer.size) == 9
assert np.all(layer.size[0] == [16, 16])
assert np.all(layer.size[1] == [5, 5])
def test_size_with_3D_arrays():
"""Test setting size with 3D arrays."""
shape = (10, 3)
np.random.seed(0)
data = 20 * np.random.random(shape)
data[:2, 0] = 0
layer = Points(data)
assert layer.current_size == 10
assert layer.size.shape == shape
assert np.unique(layer.size)[0] == 10
sizes = 5 * np.random.random(shape)
layer.size = sizes
assert np.all(layer.size == sizes)
# Test broadcasting of sizes
sizes = [1, 5, 5]
layer.size = sizes
assert np.all(layer.size[0] == sizes)
# Create new layer with new size array data
sizes = 5 * np.random.random(shape)
layer = Points(data, size=sizes)
assert layer.current_size == 10
assert layer.size.shape == shape
assert np.all(layer.size == sizes)
# Create new layer with new size array data
sizes = [1, 5, 5]
layer = Points(data, size=sizes)
assert layer.current_size == 10
assert layer.size.shape == shape
assert np.all(layer.size[0] == sizes)
# Add new point, should have new size in last dim only
coord = [4, 18, 18]
layer.current_size = 13
layer.add(coord)
assert layer.size.shape == (11, 3)
assert np.unique(layer.size[:10, 1:])[0] == 5
assert np.all(layer.size[10] == [1, 13, 13])
# Select data and change size
layer.selected_data = {0, 1}
assert layer.current_size == 5
layer.current_size = 16
assert layer.size.shape == (11, 3)
assert np.unique(layer.size[2:10, 1:])[0] == 5
assert np.all(layer.size[0] == [16, 16, 16])
# Create new 3D layer with new 2D points size data
sizes = [0, 5, 5]
layer = Points(data, size=sizes)
assert layer.current_size == 10
assert layer.size.shape == shape
assert np.all(layer.size[0] == sizes)
# Add new point, should have new size only in last 2 dimensions
coord = [4, 18, 18]
layer.current_size = 13
layer.add(coord)
assert layer.size.shape == (11, 3)
assert np.all(layer.size[10] == [0, 13, 13])
# Select data and change size
layer.selected_data = {0, 1}
assert layer.current_size == 5
layer.current_size = 16
assert layer.size.shape == (11, 3)
assert np.unique(layer.size[2:10, 1:])[0] == 5
assert np.all(layer.size[0] == [0, 16, 16])
def test_copy_and_paste():
"""Test copying and pasting selected points."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
layer = Points(data)
# Clipboard starts empty
assert layer._clipboard == {}
# Pasting empty clipboard doesn't change data
layer._paste_data()
assert len(layer.data) == 10
# Copying with nothing selected leave clipboard empty
layer._copy_data()
assert layer._clipboard == {}
# Copying and pasting with two points selected adds to clipboard and data
layer.selected_data = {0, 1}
layer._copy_data()
layer._paste_data()
assert len(layer._clipboard.keys()) > 0
assert len(layer.data) == shape[0] + 2
assert np.all(layer.data[:2] == layer.data[-2:])
# Pasting again adds two more points to data
layer._paste_data()
assert len(layer.data) == shape[0] + 4
assert np.all(layer.data[:2] == layer.data[-2:])
# Unselecting everything and copying and pasting will empty the clipboard
# and add no new data
layer.selected_data = {}
layer._copy_data()
layer._paste_data()
assert layer._clipboard == {}
assert len(layer.data) == shape[0] + 4
def test_value():
"""Test getting the value of the data at the current coordinates."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
data[-1] = [0, 0]
layer = Points(data)
value = layer.get_value((0, 0))
assert value == 9
layer.data = layer.data + 20
value = layer.get_value((0, 0))
assert value is None
def test_message():
"""Test converting value and coords to message."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
data[-1] = [0, 0]
layer = Points(data)
msg = layer.get_status((0,) * 2)
assert type(msg) == str
def test_thumbnail():
"""Test the image thumbnail for square data."""
shape = (10, 2)
np.random.seed(0)
data = 20 * np.random.random(shape)
data[0] = [0, 0]
data[-1] = [20, 20]
layer = Points(data)
layer._update_thumbnail()
assert layer.thumbnail.shape == layer._thumbnail_shape
def test_thumbnail_with_n_points_greater_than_max():
"""Test thumbnail generation with n_points > _max_points_thumbnail
see: https://github.com/napari/napari/pull/934
"""
# 2D
max_points = Points._max_points_thumbnail * 2
bigger_data = np.random.randint(10, 100, (max_points, 2))
big_layer = Points(bigger_data)
big_layer._update_thumbnail()
assert big_layer.thumbnail.shape == big_layer._thumbnail_shape
# #3D
bigger_data_3d = np.random.randint(10, 100, (max_points, 3))
bigger_layer_3d = Points(bigger_data_3d)
bigger_layer_3d._slice_dims(ndisplay=3)
bigger_layer_3d._update_thumbnail()
assert bigger_layer_3d.thumbnail.shape == bigger_layer_3d._thumbnail_shape
def test_view_data():
coords = np.array([[0, 1, 1], [0, 2, 2], [1, 3, 3], [3, 3, 3]])
layer = Points(coords)
layer._slice_dims([0, slice(None), slice(None)])
assert np.all(
layer._view_data == coords[np.ix_([0, 1], layer._dims_displayed)]
)
layer._slice_dims([1, slice(None), slice(None)])
assert np.all(
layer._view_data == coords[np.ix_([2], layer._dims_displayed)]
)
layer._slice_dims([1, slice(None), slice(None)], ndisplay=3)
assert np.all(layer._view_data == coords)
def test_view_size():
coords = np.array([[0, 1, 1], [0, 2, 2], [1, 3, 3], [3, 3, 3]])
sizes = np.array([[3, 5, 5], [3, 5, 5], [3, 3, 3], [2, 2, 3]])
layer = Points(coords, size=sizes, n_dimensional=False)
layer._slice_dims([0, slice(None), slice(None)])
assert np.all(
layer._view_size == sizes[np.ix_([0, 1], layer._dims_displayed)]
)
layer._slice_dims([1, slice(None), slice(None)])
assert np.all(
layer._view_size == sizes[np.ix_([2], layer._dims_displayed)]
)
layer.n_dimensional = True
assert len(layer._view_size) == 3
# test a slice with no points
layer.n_dimensional = False
layer._slice_dims([2, slice(None), slice(None)])
assert np.all(layer._view_size == [])
def test_view_colors():
coords = [[0, 1, 1], [0, 2, 2], [1, 3, 3], [3, 3, 3]]
face_color = np.array(
[[1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1], [0, 0, 1, 1]]
)
edge_color = np.array(
[[0, 0, 1, 1], [1, 0, 0, 1], [0, 1, 0, 1], [0, 0, 1, 1]]
)
layer = Points(coords, face_color=face_color, edge_color=edge_color)
layer._slice_dims([0, slice(None), slice(None)])
assert np.all(layer._view_face_color == face_color[[0, 1]])
assert np.all(layer._view_edge_color == edge_color[[0, 1]])
layer._slice_dims([1, slice(None), slice(None)])
assert np.all(layer._view_face_color == face_color[[2]])
assert np.all(layer._view_edge_color == edge_color[[2]])
# view colors should return empty array if there are no points
layer._slice_dims([2, slice(None), slice(None)])
assert len(layer._view_face_color) == 0
assert len(layer._view_edge_color) == 0
def test_interaction_box():
"""Test the boxes calculated for selected points"""
data = [[3, 3]]
size = 2
layer = Points(data, size=size)
# get a box with no points selected
index = []
box = layer.interaction_box(index)
assert box is None
# get a box with a point selected
index = [0]
expected_box = points_to_squares(data, size)
box = layer.interaction_box(index)
np.all([np.isin(p, expected_box) for p in box])
def test_world_data_extent():
"""Test extent after applying transforms."""
data = [(7, -5, 0), (-2, 0, 15), (4, 30, 12)]
min_val = (-2, -5, 0)
max_val = (7, 30, 15)
layer = Points(data)
extent = np.array((min_val, max_val))
check_layer_world_data_extent(layer, extent, (3, 1, 1), (10, 20, 5))
def test_slice_data():
data = [
(10, 2, 4),
(10 + 2 * 1e-7, 4, 6),
(8, 1, 7),
(10.1, 7, 2),
(10 - 2 * 1e-7, 1, 6),
]
layer = Points(data)
assert len(layer._slice_data((8, slice(None), slice(None)))[0]) == 1
assert len(layer._slice_data((10, slice(None), slice(None)))[0]) == 3
assert (
len(layer._slice_data((10 + 2 * 1e-12, slice(None), slice(None)))[0])
== 3
)
assert len(layer._slice_data((10.1, slice(None), slice(None)))[0]) == 1
def test_scale_init():
layer = Points(None, scale=(1, 1, 1, 1))
assert layer.ndim == 4
layer1 = Points([], scale=(1, 1, 1, 1))
assert layer1.ndim == 4
layer2 = Points([])
assert layer2.ndim == 2
with pytest.raises(ValueError):
Points([[1, 1, 1]], scale=(1, 1, 1, 1))
def test_update_none():
layer = Points([(1, 2, 3), (1, 3, 2)])
assert layer.ndim == 3
assert layer.data.size == 6
layer.data = None
assert layer.ndim == 3
assert layer.data.size == 0
layer.data = [(1, 2, 3), (1, 3, 2)]
assert layer.ndim == 3
assert layer.data.size == 6
| 32.227448 | 82 | 0.655417 |
79402c8f2c493f51569912048672b73d3c2685d8 | 15,527 | py | Python | tests/rule_based_profiler/test_rule_based_profiler.py | alecsaunders/great_expectations | 4227c970fd93cd66a9ad3b8ee5cd5e09763a4432 | [
"Apache-2.0"
] | 1 | 2022-01-26T18:51:29.000Z | 2022-01-26T18:51:29.000Z | tests/rule_based_profiler/test_rule_based_profiler.py | taylorfturner/great_expectations | e4964894fb97b933cac713ef1f1a78e33d362ff3 | [
"Apache-2.0"
] | null | null | null | tests/rule_based_profiler/test_rule_based_profiler.py | taylorfturner/great_expectations | e4964894fb97b933cac713ef1f1a78e33d362ff3 | [
"Apache-2.0"
] | null | null | null | from typing import Any, Dict, List, Optional
from great_expectations.rule_based_profiler.rule import Rule
from great_expectations.rule_based_profiler.types import ParameterContainer
from great_expectations.util import deep_filter_properties_iterable
def test_reconcile_profiler_variables_no_overrides(
profiler_with_placeholder_args,
variables_multi_part_name_parameter_container,
):
variables: Dict[str, Any] = {}
effective_variables: Optional[
ParameterContainer
] = profiler_with_placeholder_args.reconcile_profiler_variables(variables=variables)
assert effective_variables == variables_multi_part_name_parameter_container
def test_reconcile_profiler_variables_with_overrides(
profiler_with_placeholder_args,
):
variables: Dict[str, Any] = {
"false_positive_threshold": 2.0e-2,
"sampling_method": "bootstrap",
"mostly": 8.0e-1,
}
effective_variables: Optional[
ParameterContainer
] = profiler_with_placeholder_args.reconcile_profiler_variables(variables=variables)
assert effective_variables.to_dict()["parameter_nodes"]["variables"][
"variables"
] == {
"false_positive_threshold": 2.0e-2,
"sampling_method": "bootstrap",
"mostly": 8.0e-1,
}
def test_reconcile_profiler_rules_no_overrides(
profiler_with_placeholder_args,
):
rules: Dict[str, Dict[str, Any]] = {}
effective_rules: List[
Rule
] = profiler_with_placeholder_args.reconcile_profiler_rules(rules=rules)
assert effective_rules == profiler_with_placeholder_args.rules
def test_reconcile_profiler_rules_new_rule_override(
profiler_with_placeholder_args,
):
rules: Dict[str, Dict[str, Any]] = {
"rule_0": {
"domain_builder": {
"class_name": "ColumnDomainBuilder",
"module_name": "great_expectations.rule_based_profiler.domain_builder",
},
"parameter_builders": [
{
"class_name": "MetricMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_parameter",
"metric_name": "my_metric",
},
{
"class_name": "NumericMetricRangeMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_other_parameter",
"metric_name": "my_other_metric",
},
],
"expectation_configuration_builders": [
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_min_to_be_between",
},
],
},
}
expected_rules: List[dict] = [
{
"name": "rule_0",
"domain_builder": {},
"parameter_builders": [
{
"name": "my_parameter",
"metric_name": "my_metric",
"enforce_numeric_metric": False,
"replace_nan_with_zero": False,
},
{
"name": "my_other_parameter",
"metric_name": "my_other_metric",
"sampling_method": "bootstrap",
"enforce_numeric_metric": True,
"replace_nan_with_zero": True,
"false_positive_rate": 0.05,
"truncate_values": {},
},
],
"expectation_configuration_builders": [
{
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
{
"expectation_type": "expect_column_min_to_be_between",
},
],
},
{
"name": "rule_1",
"domain_builder": {},
"parameter_builders": [
{
"name": "my_parameter",
"metric_name": "my_metric",
"enforce_numeric_metric": False,
"replace_nan_with_zero": False,
},
],
"expectation_configuration_builders": [
{
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
],
},
]
effective_rules: List[
Rule
] = profiler_with_placeholder_args.reconcile_profiler_rules(rules=rules)
rule: Rule
effective_rule_configs_actual: dict = {
rule.name: rule.to_json_dict() for rule in effective_rules
}
deep_filter_properties_iterable(effective_rule_configs_actual, inplace=True)
rule_config: dict
effective_rule_configs_expected: dict = {
rule_config["name"]: rule_config for rule_config in expected_rules
}
assert effective_rule_configs_actual == effective_rule_configs_expected
def test_reconcile_profiler_rules_existing_rule_domain_builder_override(
profiler_with_placeholder_args,
):
rules: Dict[str, Dict[str, Any]] = {
"rule_1": {
"domain_builder": {
"class_name": "SimpleColumnSuffixDomainBuilder",
"module_name": "great_expectations.rule_based_profiler.domain_builder",
"column_name_suffixes": [
"_ts",
],
},
},
}
expected_rules: List[dict] = [
{
"name": "rule_1",
"domain_builder": {
"column_name_suffixes": [
"_ts",
],
},
"parameter_builders": [
{
"name": "my_parameter",
"metric_name": "my_metric",
"enforce_numeric_metric": False,
"replace_nan_with_zero": False,
},
],
"expectation_configuration_builders": [
{
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
],
},
]
effective_rules: List[
Rule
] = profiler_with_placeholder_args.reconcile_profiler_rules(rules=rules)
rule: Rule
effective_rule_configs_actual: dict = {
rule.name: rule.to_json_dict() for rule in effective_rules
}
deep_filter_properties_iterable(effective_rule_configs_actual, inplace=True)
rule_config: dict
effective_rule_configs_expected: dict = {
rule_config["name"]: rule_config for rule_config in expected_rules
}
assert effective_rule_configs_actual == effective_rule_configs_expected
def test_reconcile_profiler_rules_existing_rule_parameter_builder_overrides(
profiler_with_placeholder_args,
):
rules: Dict[str, Dict[str, Any]] = {
"rule_1": {
"parameter_builders": [
{
"class_name": "MetricMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_parameter",
"metric_name": "my_special_metric",
"enforce_numeric_metric": True,
"replace_nan_with_zero": True,
},
{
"class_name": "NumericMetricRangeMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_other_parameter",
"metric_name": "my_other_metric",
"enforce_numeric_metric": True,
"replace_nan_with_zero": False,
"false_positive_rate": 0.025,
},
],
},
}
expected_rules: List[dict] = [
{
"name": "rule_1",
"domain_builder": {},
"parameter_builders": [
{
"name": "my_parameter",
"metric_name": "my_special_metric",
"enforce_numeric_metric": True,
"replace_nan_with_zero": True,
},
{
"name": "my_other_parameter",
"metric_name": "my_other_metric",
"sampling_method": "bootstrap",
"enforce_numeric_metric": True,
"replace_nan_with_zero": False,
"false_positive_rate": 0.025,
"truncate_values": {},
},
],
"expectation_configuration_builders": [
{
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
],
},
]
effective_rules: List[
Rule
] = profiler_with_placeholder_args.reconcile_profiler_rules(rules=rules)
rule: Rule
effective_rule_configs_actual: dict = {
rule.name: rule.to_json_dict() for rule in effective_rules
}
deep_filter_properties_iterable(effective_rule_configs_actual, inplace=True)
rule_config: dict
effective_rule_configs_expected: dict = {
rule_config["name"]: rule_config for rule_config in expected_rules
}
assert effective_rule_configs_actual == effective_rule_configs_expected
def test_reconcile_profiler_rules_existing_rule_expectation_configuration_builder_overrides(
profiler_with_placeholder_args,
):
rules: Dict[str, Dict[str, Any]] = {
"rule_1": {
"expectation_configuration_builders": [
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_min_to_be_between",
},
],
},
}
expected_rules: List[dict] = [
{
"name": "rule_1",
"domain_builder": {},
"parameter_builders": [
{
"name": "my_parameter",
"metric_name": "my_metric",
"enforce_numeric_metric": False,
"replace_nan_with_zero": False,
},
],
"expectation_configuration_builders": [
{
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
{
"expectation_type": "expect_column_min_to_be_between",
},
],
},
]
effective_rules: List[
Rule
] = profiler_with_placeholder_args.reconcile_profiler_rules(rules=rules)
rule: Rule
effective_rule_configs_actual: dict = {
rule.name: rule.to_json_dict() for rule in effective_rules
}
deep_filter_properties_iterable(effective_rule_configs_actual, inplace=True)
rule_config: dict
effective_rule_configs_expected: dict = {
rule_config["name"]: rule_config for rule_config in expected_rules
}
assert effective_rule_configs_actual == effective_rule_configs_expected
def test_reconcile_profiler_rules_existing_rule_full_rule_override(
profiler_with_placeholder_args,
):
rules: Dict[str, Dict[str, Any]] = {
"rule_1": {
"domain_builder": {
"class_name": "ColumnDomainBuilder",
"module_name": "great_expectations.rule_based_profiler.domain_builder",
},
"parameter_builders": [
{
"class_name": "MetricMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_parameter",
"metric_name": "my_metric",
},
{
"class_name": "NumericMetricRangeMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_other_parameter",
"metric_name": "my_other_metric",
},
],
"expectation_configuration_builders": [
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_min_to_be_between",
},
],
},
}
expected_rules: List[dict] = [
{
"name": "rule_1",
"domain_builder": {},
"parameter_builders": [
{
"name": "my_parameter",
"metric_name": "my_metric",
"enforce_numeric_metric": False,
"replace_nan_with_zero": False,
},
{
"name": "my_other_parameter",
"metric_name": "my_other_metric",
"sampling_method": "bootstrap",
"enforce_numeric_metric": True,
"replace_nan_with_zero": True,
"false_positive_rate": 0.05,
"truncate_values": {},
},
],
"expectation_configuration_builders": [
{
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
},
{
"expectation_type": "expect_column_min_to_be_between",
},
],
},
]
effective_rules: List[
Rule
] = profiler_with_placeholder_args.reconcile_profiler_rules(rules=rules)
rule: Rule
effective_rule_configs_actual: dict = {
rule.name: rule.to_json_dict() for rule in effective_rules
}
deep_filter_properties_iterable(effective_rule_configs_actual, inplace=True)
rule_config: dict
effective_rule_configs_expected: dict = {
rule_config["name"]: rule_config for rule_config in expected_rules
}
assert effective_rule_configs_actual == effective_rule_configs_expected
| 35.859122 | 110 | 0.563792 |
79402d1e3c4809c51b24717a4eae769068710ba9 | 11,693 | py | Python | code/python/FactSetEstimatesReportBuilder/v1/fds/sdk/FactSetEstimatesReportBuilder/model/response.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 6 | 2022-02-07T16:34:18.000Z | 2022-03-30T08:04:57.000Z | code/python/FactSetEstimatesReportBuilder/v1/fds/sdk/FactSetEstimatesReportBuilder/model/response.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | 2 | 2022-02-07T05:25:57.000Z | 2022-03-07T14:18:04.000Z | code/python/FactSetEstimatesReportBuilder/v1/fds/sdk/FactSetEstimatesReportBuilder/model/response.py | factset/enterprise-sdk | 3fd4d1360756c515c9737a0c9a992c7451d7de7e | [
"Apache-2.0"
] | null | null | null | """
FactSet Estimates Report Builder
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from fds.sdk.FactSetEstimatesReportBuilder.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
OpenApiModel
)
from fds.sdk.FactSetEstimatesReportBuilder.exceptions import ApiAttributeError
def lazy_import():
from fds.sdk.FactSetEstimatesReportBuilder.model.meta import Meta
globals()['Meta'] = Meta
class Response(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'data': (bool, date, datetime, dict, float, int, list, str, none_type,), # noqa: E501
'meta': (Meta,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'data': 'data', # noqa: E501
'meta': 'meta', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (bool, date, datetime, dict, float, int, list, str, none_type): Returns a STACH 2.0 row organized package. [optional] # noqa: E501
meta (Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""Response - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
data (bool, date, datetime, dict, float, int, list, str, none_type): Returns a STACH 2.0 row organized package. [optional] # noqa: E501
meta (Meta): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 43.958647 | 148 | 0.573933 |
79402d84de98eca47b8029ac8673db95142db1b4 | 9,174 | py | Python | custom/icds/location_reassignment/tasks.py | EXTREMOPHILARUM/commcare-hq | b97aa9095615d0c3c5f259db67ad9438afa3d7a5 | [
"BSD-3-Clause"
] | null | null | null | custom/icds/location_reassignment/tasks.py | EXTREMOPHILARUM/commcare-hq | b97aa9095615d0c3c5f259db67ad9438afa3d7a5 | [
"BSD-3-Clause"
] | 1 | 2021-06-02T04:45:16.000Z | 2021-06-02T04:45:16.000Z | custom/icds/location_reassignment/tasks.py | EXTREMOPHILARUM/commcare-hq | b97aa9095615d0c3c5f259db67ad9438afa3d7a5 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.core.mail.message import EmailMessage
from django.template.defaultfilters import linebreaksbr
from celery.task import task
from corehq.apps.userreports.data_source_providers import (
DynamicDataSourceProvider,
StaticDataSourceProvider,
)
from corehq.apps.userreports.specs import EvaluationContext
from corehq.apps.userreports.util import get_indicator_adapter
from corehq.form_processor.backends.sql.dbaccessors import CaseAccessorSQL
from custom.icds.location_reassignment.download import Households, OtherCases
from custom.icds.location_reassignment.models import Transition
from custom.icds.location_reassignment.processor import (
HouseholdReassignmentProcessor,
OtherCasesReassignmentProcessor,
Processor,
)
from custom.icds.location_reassignment.utils import (
get_case_ids_for_reassignment,
get_supervisor_id,
reassign_cases,
reassign_household,
)
@task
def process_location_reassignment(domain, transitions, uploaded_filename, user_email):
try:
Processor(domain, transitions).process()
except Exception as e:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Location Reassignment Failed",
body=linebreaksbr(
f"The request could not be completed for file {uploaded_filename}. Something went wrong.\n"
f"Error raised : {e}.\n"
"Please report an issue if needed."
),
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.content_subtype = "html"
email.send()
raise e
else:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Location Reassignment Completed",
body=f"The request has been successfully completed for file {uploaded_filename}.",
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.send()
@task(queue=settings.CELERY_LOCATION_REASSIGNMENT_QUEUE)
def reassign_cases_for_owner(domain, old_location_id, new_location_id, deprecation_time):
"""
finds relevant case ids and then
for each household case
reassign the household case and all its child cases to new location as a group
and then reassign all other cases as a group
"""
supervisor_id = get_supervisor_id(domain, old_location_id)
child_case_ids_per_household_id, other_case_ids = get_case_ids_for_reassignment(domain, old_location_id)
for household_case_id, household_child_case_ids in child_case_ids_per_household_id.items():
reassign_household(domain, household_case_id, old_location_id, new_location_id, supervisor_id,
deprecation_time=deprecation_time, household_child_case_ids=household_child_case_ids)
if other_case_ids:
reassign_cases(domain, other_case_ids, new_location_id)
@task
def email_household_details(domain, transitions, uploaded_filename, user_email):
try:
transition_objs = [Transition(**transition) for transition in transitions]
filestream = Households(domain).dump(transition_objs)
except Exception as e:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Location Reassignment Household Dump Failed",
body=linebreaksbr(
f"The request could not be completed for file {uploaded_filename}. Something went wrong.\n"
f"Error raised : {e}.\n"
"Please report an issue if needed."
),
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.content_subtype = "html"
email.send()
raise e
else:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Location Reassignment Household Dump Completed",
body=f"The request has been successfully completed for file {uploaded_filename}. ",
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
if filestream:
email.attach(filename="Households.xlsx", content=filestream.read())
else:
email.body += "There were no house hold details found. "
email.body += f"Please note that the households are fetched only for " \
f"{', '.join(Households.valid_operations)}."
email.send()
@task
def email_other_cases_details(domain, transitions, uploaded_filename, user_email):
try:
transition_objs = [Transition(**transition) for transition in transitions]
filestream = OtherCases(domain).dump(transition_objs)
except Exception as e:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Location Reassignment Other Cases Dump Failed",
body=linebreaksbr(
f"The request could not be completed for file {uploaded_filename}. Something went wrong.\n"
f"Error raised : {e}.\n"
"Please report an issue if needed."
),
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.content_subtype = "html"
email.send()
raise e
else:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Location Reassignment Other Cases Dump Completed",
body=f"The request has been successfully completed for file {uploaded_filename}. ",
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
if filestream:
email.attach(filename="Other Cases.zip", content=filestream.read())
else:
email.body += "There were no cases found. "
email.body += f"Please note that the cases are fetched only for " \
f"{', '.join(OtherCases.valid_operations)}."
email.send()
@task
def process_households_reassignment(domain, reassignments, uploaded_filename, user_email):
try:
HouseholdReassignmentProcessor(domain, reassignments).process()
except Exception as e:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Household Reassignment Failed",
body=linebreaksbr(
f"The request could not be completed for file {uploaded_filename}. Something went wrong.\n"
f"Error raised : {e}.\n"
"Please report an issue if needed."
),
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.content_subtype = "html"
email.send()
raise e
else:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Household Reassignment Completed",
body=f"The request has been successfully completed for file {uploaded_filename}.",
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.send()
@task
def process_other_cases_reassignment(domain, reassignments, uploaded_filename, user_email):
try:
OtherCasesReassignmentProcessor(domain, reassignments).process()
except Exception as e:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Other Cases Reassignment Failed",
body=linebreaksbr(
f"The request could not be completed for file {uploaded_filename}. Something went wrong.\n"
f"Error raised : {e}.\n"
"Please report an issue if needed."
),
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.content_subtype = "html"
email.send()
raise e
else:
email = EmailMessage(
subject=f"[{settings.SERVER_ENVIRONMENT}] - Other Cases Reassignment Completed",
body=f"The request has been successfully completed for file {uploaded_filename}.",
to=[user_email],
from_email=settings.DEFAULT_FROM_EMAIL
)
email.send()
@task(queue=settings.CELERY_LOCATION_REASSIGNMENT_QUEUE)
def process_ucr_changes(domain, case_ids):
cases = CaseAccessorSQL.get_cases(case_ids)
docs = [case.to_json() for case in cases]
data_source_providers = [DynamicDataSourceProvider(), StaticDataSourceProvider()]
all_configs = [
source
for provider in data_source_providers
for source in provider.by_domain(domain)
]
adapters = [
get_indicator_adapter(config, raise_errors=True, load_source='location_reassignment')
for config in all_configs
]
async_configs_by_doc_id = {}
for doc in docs:
eval_context = EvaluationContext(doc)
for adapter in adapters:
if adapter.config.filter(doc, eval_context):
async_configs_by_doc_id[doc['_id']].append(adapter.config._id)
rows_to_save = adapter.get_all_values(doc, eval_context)
if rows_to_save:
adapter.save_rows(rows_to_save, use_shard_col=False)
else:
adapter.delete(doc, use_shard_col=False)
| 39.714286 | 112 | 0.661216 |
79402e673c97f0039cfca5d72a689081da629e19 | 421 | py | Python | website_project_django/asgi.py | jakubpliszka/website_project | de6925253fe5397b584086f1b9acc86532301d68 | [
"MIT"
] | null | null | null | website_project_django/asgi.py | jakubpliszka/website_project | de6925253fe5397b584086f1b9acc86532301d68 | [
"MIT"
] | null | null | null | website_project_django/asgi.py | jakubpliszka/website_project | de6925253fe5397b584086f1b9acc86532301d68 | [
"MIT"
] | null | null | null | """
ASGI config for website_project_django project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'website_project_django.settings')
application = get_asgi_application()
| 24.764706 | 82 | 0.800475 |
79402ebbe8de733badba628aa22a4eba7cbed942 | 1,357 | py | Python | models/segtran_modified/code/networks/deformable_unet/unet.py | indigoYoshimaru/3d-brain-thesis | bc6fd5e85e7e8e88c5a3cccafad098c7f3d7586a | [
"MIT"
] | null | null | null | models/segtran_modified/code/networks/deformable_unet/unet.py | indigoYoshimaru/3d-brain-thesis | bc6fd5e85e7e8e88c5a3cccafad098c7f3d7586a | [
"MIT"
] | null | null | null | models/segtran_modified/code/networks/deformable_unet/unet.py | indigoYoshimaru/3d-brain-thesis | bc6fd5e85e7e8e88c5a3cccafad098c7f3d7586a | [
"MIT"
] | null | null | null | from .unet_parts import *
class UNet(nn.Module):
def __init__(self, n_channels, n_classes,downsize_nb_filters_factor=2):
super(UNet, self).__init__()
self.inc = inconv(n_channels, 64//downsize_nb_filters_factor)
self.down1 = down(64//downsize_nb_filters_factor, 128//downsize_nb_filters_factor)
self.down2 = down(128//downsize_nb_filters_factor, 256//downsize_nb_filters_factor)
self.down3 = down(256//downsize_nb_filters_factor, 512//downsize_nb_filters_factor)
self.down4 = down(512//downsize_nb_filters_factor, 512//downsize_nb_filters_factor)
self.up1 = up(1024//downsize_nb_filters_factor, 256//downsize_nb_filters_factor)
self.up2 = up(512//downsize_nb_filters_factor, 128//downsize_nb_filters_factor)
self.up3 = up(256//downsize_nb_filters_factor, 64//downsize_nb_filters_factor)
self.up4 = up(128//downsize_nb_filters_factor, 64//downsize_nb_filters_factor)
self.outc = outconv(64//downsize_nb_filters_factor, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
x = self.up1(x5, x4)
x = self.up2(x, x3)
x = self.up3(x, x2)
x = self.up4(x, x1)
x = self.outc(x)
return torch.sigmoid(x)
| 45.233333 | 91 | 0.680177 |
79402f67d726666a7886a4432d048dc5871e3935 | 42 | py | Python | blossom/learners/__init__.py | phanxuanphucnd/Blossom | e589c2b16c8d5742cec724f1ec1c04bf10c81eef | [
"MIT"
] | 3 | 2021-06-12T20:01:26.000Z | 2021-09-24T08:13:26.000Z | blossom/learners/__init__.py | Arizona-Voice/blossom | 02aedadaeb20fab183ef2bb3e84e2cace4c92f96 | [
"MIT"
] | null | null | null | blossom/learners/__init__.py | Arizona-Voice/blossom | 02aedadaeb20fab183ef2bb3e84e2cace4c92f96 | [
"MIT"
] | null | null | null | from .mhatt_learner import MHAttKWSLearner | 42 | 42 | 0.904762 |
794030d322af122f4be78c5c70c8766736a9825c | 2,581 | py | Python | Geog5222/indexing/pm2quadtree.py | thejayhaykid/Python | 641c33b94762f0cace203dcf4cc121571625ab02 | [
"MIT"
] | null | null | null | Geog5222/indexing/pm2quadtree.py | thejayhaykid/Python | 641c33b94762f0cace203dcf4cc121571625ab02 | [
"MIT"
] | null | null | null | Geog5222/indexing/pm2quadtree.py | thejayhaykid/Python | 641c33b94762f0cace203dcf4cc121571625ab02 | [
"MIT"
] | null | null | null | from xdcel import *
from pmquadtree import *
import pickle
def split_by_edges_pm2(edges, pmq):
subedges = []
for e in edges:
if is_intersect(pmq.extent, e):
subedges.append(e)
elif pmq.extent.contains(e.fr) and\
pmq.extent.contains(e.to):
subedges.append(e)
if len(subedges) == 0:
pmq.type = WHITE
return
elif len(subedges) == 1:
pmq.type = BLACK
pmq.edges.append(subedges[0])
return
else:
p1,p2 = subedges[0].fr, subedges[0].to
common_vertex = None
if subedges[1].is_endpoint(p1):
common_vertex = p1
elif subedges[1].is_endpoint(p2):
common_vertex = p2
if common_vertex is not None:
for e in subedges[2:]:
if not e.is_endpoint(common_vertex):
common_vertex = None
break
if common_vertex is not None:
for e in subedges:
pmq.edges.append(e)
pmq.type = BLACK
return
if pmq.extent.is_minimal():
for e in subedges:
pmq.edges.append(e)
pmq.type = BLACK
return
if pmq.is_leaf():
xmin = pmq.extent.xmin
xmax = pmq.extent.xmax
ymin = pmq.extent.ymin
ymax = pmq.extent.ymax
xmid = xmin + (xmax-xmin)/2.0
ymid = ymin + (ymax-ymin)/2.0
exts = [ Extent(xmin, xmid, ymid, ymax), # nw
Extent(xmid, xmax, ymid, ymax), # ne
Extent(xmid, xmax, ymin, ymid), # se
Extent(xmin, xmid, ymin, ymid) # sw
]
pmq.quads = [ PMQuadTreeNode(exts[i].getcenter(),
exts[i])
for i in range(4) ]
if pmq.vertex:
for q in pmq.quads:
if q.extent.contains(pmq.vertex):
q.vertex = pmq.vertex
pmq.vertex = None
for i in range(4):
split_by_edges_pm2(subedges, pmq.quads[i])
def test():
D = pickle.load(open('../data/mydcel.pickle'))
XD = Xdcel(D)
X = [v.x for v in D.vertices]
Y = [v.y for v in D.vertices]
xmin,xmax,ymin,ymax = min(X)-1, max(X)+1,\
min(Y)-1, max(Y)+1
maxmax = max(xmax,ymax)
xmax=ymax=maxmax
extent = Extent(xmin, xmax, ymin, ymax)
pm2q = PMQuadTreeNode(extent.getcenter(), extent)
split_by_points(XD.vertices, pm2q)
split_by_edges_pm2(XD.edges, pm2q)
print search_pmquadtree(pm2q, 10, 10)
if __name__ == '__main__':
test()
| 30.364706 | 57 | 0.528477 |
Subsets and Splits